AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Mereu, J.; Artizzu, V.; Carcangiu, A.; Spano, L. D.; Simeoli, L.; Mattioli, A.; Manca, M.; Santoro, C.; Paternò, F.
Empowering End-User in Creating eXtended Reality Content with a Conversational Chatbot Proceedings Article
In: L., Zaina; J.C., Campos; D., Spano; K., Luyten; P., Palanque; G., Veer; A., Ebert; S.R., Humayoun; V., Memmesheimer (Ed.): Lect. Notes Comput. Sci., pp. 126–137, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303191759-2 (ISBN).
Abstract | Links | BibTeX | Tags: Context, End-User Development, End-Users, Event condition action rules, Event-condition-action rules, Extended reality, Immersive authoring, Language Model, Large language model, Meta-design, multimodal input, Multimodal inputs, Virtualization
@inproceedings{mereu_empowering_2025,
title = {Empowering End-User in Creating eXtended Reality Content with a Conversational Chatbot},
author = {J. Mereu and V. Artizzu and A. Carcangiu and L. D. Spano and L. Simeoli and A. Mattioli and M. Manca and C. Santoro and F. Paternò},
editor = {Zaina L. and Campos J.C. and Spano D. and Luyten K. and Palanque P. and Veer G. and Ebert A. and Humayoun S.R. and Memmesheimer V.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007719800&doi=10.1007%2f978-3-031-91760-8_9&partnerID=40&md5=280b33b96bf2b250e515922072f92204},
doi = {10.1007/978-3-031-91760-8_9},
isbn = {03029743 (ISSN); 978-303191759-2 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15518 LNCS},
pages = {126–137},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Recent advancements in eXtended Reality (XR) technologies have found application across diverse domains. However, creating complex interactions within XR environments remains challenging for non-technical users. In this work, we present EUD4XR, a project aiming to: i) empower end-user developers (EUDevs) to customize XR environments by supporting virtual objects and physical devices; ii) involve an intelligent conversational agent which assists the user in defining behaviours. The agent can handle multimodal input, to drive the EUDev during the rule authoring process, using contextual knowledge of the virtual environment and its elements. By integrating conversational assistance, EUD4XR seeks to lower further the usage barriers for end-users to personalize XR experiences according to their needs. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Context, End-User Development, End-Users, Event condition action rules, Event-condition-action rules, Extended reality, Immersive authoring, Language Model, Large language model, Meta-design, multimodal input, Multimodal inputs, Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Mereu, J.
Using LLMs to enhance end-user development support in XR Proceedings Article
In: V., Paneva; D., Tetteroo; V., Frau; S., Feger; D., Spano; F., Paterno; S., Sauer; M., Manca (Ed.): CEUR Workshop Proc., CEUR-WS, 2025, ISBN: 16130073 (ISSN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Condition, Configuration, Development support, Development technique, End-User Development, End-Users, Event-condition-action, Event-Condition-Actions, Extended reality, Human computer interaction, Information Systems, Information use, Natural Language, Natural language processing systems, Natural languages, Rule, rules
@inproceedings{mereu_using_2025,
title = {Using LLMs to enhance end-user development support in XR},
author = {J. Mereu},
editor = {Paneva V. and Tetteroo D. and Frau V. and Feger S. and Spano D. and Paterno F. and Sauer S. and Manca M.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105008755984&partnerID=40&md5=bfaaa38c3bee309621426f8f35332107},
isbn = {16130073 (ISSN)},
year = {2025},
date = {2025-01-01},
booktitle = {CEUR Workshop Proc.},
volume = {3978},
publisher = {CEUR-WS},
abstract = {This paper outlines the center stage of my PhD research, which aims to empower non-developer users to create and customize eXtended Reality (XR) environments through End-User Development (EUD) techniques combined with the latest AI tools. In particular, I describe my contributions to the EUD4XR project, detailing both the work completed and the ongoing developments. EUD4XR seeks to support end-users in customizing XR content with the assistance of a Large Language Model (LLM)-based conversational agent. © 2025 Copyright for this paper by its authors.},
keywords = {Artificial intelligence, Condition, Configuration, Development support, Development technique, End-User Development, End-Users, Event-condition-action, Event-Condition-Actions, Extended reality, Human computer interaction, Information Systems, Information use, Natural Language, Natural language processing systems, Natural languages, Rule, rules},
pubstate = {published},
tppubtype = {inproceedings}
}
Qian, P.; Redondo, C. V.; Wang, N.; Udora, C.; Men, J.; TAFAZOLLI, R.
Enabling Generative AI based Multi-sensory XR Applications with Mobile Edge Computing Proceedings Article
In: Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331543709 (ISBN).
Abstract | Links | BibTeX | Tags: Bandwidth, Edge computing, End-Users, Extended reality (XR), Generative AI, Gigabits per second, Holographic Application, Holographic applications, Holography, Interactive applications, Mobile edge computing, Mobile systems, Mobile telecommunication systems, Multi-Sensory, Network architecture, Real- time, Semantic Web, Semantics
@inproceedings{qian_enabling_2025,
title = {Enabling Generative AI based Multi-sensory XR Applications with Mobile Edge Computing},
author = {P. Qian and C. V. Redondo and N. Wang and C. Udora and J. Men and R. TAFAZOLLI},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105017962639&doi=10.1109%2FINFOCOMWKSHPS65812.2025.11152969&partnerID=40&md5=67f9f0030079cb49d844e01abc0d5971},
doi = {10.1109/INFOCOMWKSHPS65812.2025.11152969},
isbn = {9798331543709 (ISBN)},
year = {2025},
date = {2025-01-01},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {With the rapid development of XR devices, holographic applications are expanding across various domains. However, it is a consensus that capturing and transmitting real-time holographic content still requires significant bandwidth. Even with the enhanced wireless capabilities of mobile systems, they still fall short of meeting the bandwidth and latency demands required for near-Gigabit per second interactive application scenarios. This paper proposes a network architecture that leverages MEC to address these challenges with the assistant of Generative AI. In this framework, the MEC server can leverage the power of the generative AI model to generate holographic objects with the input of user semantic commands, instead of requiring end-users to capture and transmit large raw holographic data. This approach significantly reduces uplink bandwidth requirements while enabling efficient real-time content generation. To validate this approach, we design an interactive and multisensory operational training scenario relying solely on semantic uplink transmissions from the end-users. The preliminary results based on the testbed implemented highlight the feasibility of deploying diverse holographic applications in wireless environments. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Bandwidth, Edge computing, End-Users, Extended reality (XR), Generative AI, Gigabits per second, Holographic Application, Holographic applications, Holography, Interactive applications, Mobile edge computing, Mobile systems, Mobile telecommunication systems, Multi-Sensory, Network architecture, Real- time, Semantic Web, Semantics},
pubstate = {published},
tppubtype = {inproceedings}
}
2024
Cuervo-Rosillo, R.; Zarraonandia, T.; Díaz, P.
Using Generative AI to Support Non-Experts in the Creation of Immersive Experiences Proceedings Article
In: ACM Int. Conf. Proc. Ser., Association for Computing Machinery, 2024, ISBN: 979-840071764-2 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, End-Users, generative artificial intelligence, Immersive, immersive experience, Immersive Experiences, Natural languages, Speech commands, User interfaces, Virtual Reality
@inproceedings{cuervo-rosillo_using_2024,
title = {Using Generative AI to Support Non-Experts in the Creation of Immersive Experiences},
author = {R. Cuervo-Rosillo and T. Zarraonandia and P. Díaz},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85195422750&doi=10.1145%2f3656650.3656733&partnerID=40&md5=00d53df1d6b30acc6d281bb86ead73ab},
doi = {10.1145/3656650.3656733},
isbn = {979-840071764-2 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {ACM Int. Conf. Proc. Ser.},
publisher = {Association for Computing Machinery},
abstract = {This work focuses on exploring the use of Generative Artificial Intelligence to assist end-users in creating immersive experiences. We present a prototype that supports the creation and edition of virtual environments using speech commands expressed in natural language. © 2024 Owner/Author.},
keywords = {Artificial intelligence, End-Users, generative artificial intelligence, Immersive, immersive experience, Immersive Experiences, Natural languages, Speech commands, User interfaces, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}