AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Lakehal, A.; Alti, A.; Annane, B.
CORES: Context-Aware Emotion-Driven Recommendation System-Based LLM to Improve Virtual Shopping Experiences Journal Article
In: Future Internet, vol. 17, no. 2, 2025, ISSN: 19995903 (ISSN).
Abstract | Links | BibTeX | Tags: Context, Context-Aware, Customisation, Decisions makings, E- commerces, e-commerce, Emotion, emotions, Language Model, Large language model, LLM, Recommendation, Virtual environments, Virtual Reality, Virtual shopping
@article{lakehal_cores_2025,
title = {CORES: Context-Aware Emotion-Driven Recommendation System-Based LLM to Improve Virtual Shopping Experiences},
author = {A. Lakehal and A. Alti and B. Annane},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85218626299&doi=10.3390%2ffi17020094&partnerID=40&md5=a0f68e273de08b2c33d03da4cb6c19bb},
doi = {10.3390/fi17020094},
issn = {19995903 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Future Internet},
volume = {17},
number = {2},
abstract = {In today’s business landscape, artificial intelligence (AI) plays a pivotal role in shopping processes and customization. As the demand for customization grows, virtual reality (VR) emerges as an innovative solution to improve users’ perception and decision making in virtual shopping experiences (VSEs). Despite its potential, limited research has explored the integration of contextual information and emotions in VR to deliver effective product recommendations. This paper presents CORES (context-aware emotion-driven recommendation system), a novel approach designed to enrich users’ experiences and to support decision making in VR. CORES combines advanced large language models (LLMs) and embedding-based context-aware recommendation strategies to provide customized products. Therefore, emotions are collected from social platforms, and relevant contextual information is matched to enable effective recommendation. Additionally, CORES leverages transformers and retrieval-augmented generation (RAG) capabilities to explain recommended items, facilitate VR visualization, and generate insights using various prompt templates. CORES is applied to a VR shop of different items. An empirical study validates the efficiency and accuracy of this approach, achieving a significant average accuracy of 97% and an acceptable response time of 0.3267s in dynamic shopping scenarios. © 2025 by the authors.},
keywords = {Context, Context-Aware, Customisation, Decisions makings, E- commerces, e-commerce, Emotion, emotions, Language Model, Large language model, LLM, Recommendation, Virtual environments, Virtual Reality, Virtual shopping},
pubstate = {published},
tppubtype = {article}
}
Mereu, J.; Artizzu, V.; Carcangiu, A.; Spano, L. D.; Simeoli, L.; Mattioli, A.; Manca, M.; Santoro, C.; Paternò, F.
Empowering End-User in Creating eXtended Reality Content with a Conversational Chatbot Proceedings Article
In: L., Zaina; J.C., Campos; D., Spano; K., Luyten; P., Palanque; G., Veer; A., Ebert; S.R., Humayoun; V., Memmesheimer (Ed.): Lect. Notes Comput. Sci., pp. 126–137, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303191759-2 (ISBN).
Abstract | Links | BibTeX | Tags: Context, End-User Development, End-Users, Event condition action rules, Event-condition-action rules, Extended reality, Immersive authoring, Language Model, Large language model, Meta-design, multimodal input, Multimodal inputs, Virtualization
@inproceedings{mereu_empowering_2025,
title = {Empowering End-User in Creating eXtended Reality Content with a Conversational Chatbot},
author = {J. Mereu and V. Artizzu and A. Carcangiu and L. D. Spano and L. Simeoli and A. Mattioli and M. Manca and C. Santoro and F. Paternò},
editor = {Zaina L. and Campos J.C. and Spano D. and Luyten K. and Palanque P. and Veer G. and Ebert A. and Humayoun S.R. and Memmesheimer V.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007719800&doi=10.1007%2f978-3-031-91760-8_9&partnerID=40&md5=280b33b96bf2b250e515922072f92204},
doi = {10.1007/978-3-031-91760-8_9},
isbn = {03029743 (ISSN); 978-303191759-2 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15518 LNCS},
pages = {126–137},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Recent advancements in eXtended Reality (XR) technologies have found application across diverse domains. However, creating complex interactions within XR environments remains challenging for non-technical users. In this work, we present EUD4XR, a project aiming to: i) empower end-user developers (EUDevs) to customize XR environments by supporting virtual objects and physical devices; ii) involve an intelligent conversational agent which assists the user in defining behaviours. The agent can handle multimodal input, to drive the EUDev during the rule authoring process, using contextual knowledge of the virtual environment and its elements. By integrating conversational assistance, EUD4XR seeks to lower further the usage barriers for end-users to personalize XR experiences according to their needs. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Context, End-User Development, End-Users, Event condition action rules, Event-condition-action rules, Extended reality, Immersive authoring, Language Model, Large language model, Meta-design, multimodal input, Multimodal inputs, Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
2024
Artizzu, V.; Carcangiu, A.; Manca, M.; Mattioli, A.; Mereu, J.; Paternò, F.; Santoro, C.; Simeoli, L.; Spano, L. D.
End-User Development for eXtended Reality using a multimodal Intelligent Conversational Agent Proceedings Article
In: N., Wang; A., Bellucci; C., Anthes; P., Daeijavad; J., Friedl-Knirsch; F., Maurer; F., Pointecker; L.D., Spano (Ed.): CEUR Workshop Proc., CEUR-WS, 2024, ISBN: 16130073 (ISSN).
Abstract | Links | BibTeX | Tags: Condition, Context, End-User Development, Event-condition-action, Extended reality, Immersive authoring, Language Model, Large language model, Meta-design, multimodal input, Multimodal inputs, Rule, rules, User interfaces
@inproceedings{artizzu_end-user_2024,
title = {End-User Development for eXtended Reality using a multimodal Intelligent Conversational Agent},
author = {V. Artizzu and A. Carcangiu and M. Manca and A. Mattioli and J. Mereu and F. Paternò and C. Santoro and L. Simeoli and L. D. Spano},
editor = {Wang N. and Bellucci A. and Anthes C. and Daeijavad P. and Friedl-Knirsch J. and Maurer F. and Pointecker F. and Spano L.D.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85196077262&partnerID=40&md5=3d5f022f30a1f0e3e5e81133d07823b5},
isbn = {16130073 (ISSN)},
year = {2024},
date = {2024-01-01},
booktitle = {CEUR Workshop Proc.},
volume = {3704},
publisher = {CEUR-WS},
abstract = {In the past years, both the research community and commercial products have proposed various solutions aiming to support end-user developers (EUDevs), namely users without extensive programming skills, to build and customize XR experiences. However, current tools may not fully eliminate the potential for user errors or misunderstandings. In this paper, we present EUD4XR, a methodology consisting of an intelligent conversational agent to provide contextual help, to EUDevs, during the authoring process. The key characteristics of this agent are its multimodality, comprehending the user’s voice, gaze, and pointing, combined with the environment status. Moreover, the agent could also demonstrate concepts, suggest components, and help explain errors further to reduce misunderstandings for end-user developers of VR/XR. © 2024 Copyright for this paper by its authors.},
keywords = {Condition, Context, End-User Development, Event-condition-action, Extended reality, Immersive authoring, Language Model, Large language model, Meta-design, multimodal input, Multimodal inputs, Rule, rules, User interfaces},
pubstate = {published},
tppubtype = {inproceedings}
}