AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Gatti, E.; Giunchi, D.; Numan, N.; Steed, A.
Around the Virtual Campfire: Early UX Insights into AI-Generated Stories in VR Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 136–141, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833152157-8 (ISBN).
Abstract | Links | BibTeX | Tags: Generative AI, Images synthesis, Immersive, Interactive Environments, Language Model, Large language model, Storytelling, User input, User study, Users' experiences, Virtual environments, VR
@inproceedings{gatti_around_2025,
title = {Around the Virtual Campfire: Early UX Insights into AI-Generated Stories in VR},
author = {E. Gatti and D. Giunchi and N. Numan and A. Steed},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000263662&doi=10.1109%2fAIxVR63409.2025.00027&partnerID=40&md5=cd804d892d45554e936d0221508b3447},
doi = {10.1109/AIxVR63409.2025.00027},
isbn = {979-833152157-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {136–141},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual Reality (VR) presents an immersive platform for storytelling, allowing narratives to unfold in highly engaging, interactive environments. Leveraging AI capabilities and image synthesis offers new possibilities for creating scalable, generative VR content. In this work, we use an LLM-driven VR storytelling platform to explore how AI-generated visuals and narrative elements impact the user experience in VR storytelling. Previously, we presented AIsop, a system to integrate LLM-generated text and images and TTS audio into a storytelling experience, where the narrative unfolds based on user input. In this paper, we present two user studies focusing on how AI-generated visuals influence narrative perception and the overall VR experience. Our findings highlight the positive impact of AI-generated pictorial content on the storytelling experience, highlighting areas for enhancement and further research in interactive narrative design. © 2025 IEEE.},
keywords = {Generative AI, Images synthesis, Immersive, Interactive Environments, Language Model, Large language model, Storytelling, User input, User study, Users' experiences, Virtual environments, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
2024
Asra, S. A.; Wickramarathne, J.
Artificial Intelligence (AI) in Augmented Reality (AR), Virtual Reality (VR) and Mixed Reality (MR) Experiences: Enhancing Immersion and Interaction for User Experiences Proceedings Article
In: B., Luo; S.K., Sahoo; Y.H., Lee; C.H.T., Lee; M., Ong; A., Alphones (Ed.): IEEE Reg 10 Annu Int Conf Proc TENCON, pp. 1700–1705, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 21593442 (ISSN); 979-835035082-1 (ISBN).
Abstract | Links | BibTeX | Tags: AI, AR, Emersion experience, Immersive augmented realities, Mixed reality, MR, Primary sources, Real-world, Secondary sources, Training simulation, Users' experiences, Video game simulation, Video training, Virtual environments, VR
@inproceedings{asra_artificial_2024,
title = {Artificial Intelligence (AI) in Augmented Reality (AR), Virtual Reality (VR) and Mixed Reality (MR) Experiences: Enhancing Immersion and Interaction for User Experiences},
author = {S. A. Asra and J. Wickramarathne},
editor = {Luo B. and Sahoo S.K. and Lee Y.H. and Lee C.H.T. and Ong M. and Alphones A.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000443498&doi=10.1109%2fTENCON61640.2024.10902724&partnerID=40&md5=2ff92b5e2529ae7fe797cd8026e8065d},
doi = {10.1109/TENCON61640.2024.10902724},
isbn = {21593442 (ISSN); 979-835035082-1 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE Reg 10 Annu Int Conf Proc TENCON},
pages = {1700–1705},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The utilisation of Artificial Intelligence (AI) generated material is one of the most fascinating advancements in the rapidly growing fields of Virtual Reality (VR), Augmented Reality (AR), and Mixed Reality (MR). Two examples of how AI-generated material is revolutionising how we interact with AR, VR and MR are video games and training simulations. In this essay, we'll examine the intriguing potential of AI-generated content and how it's being used to the development of hybrid real-world/virtual experiences. Using this strategy, we acquired the information from primary and secondary sources. We surveyed AR, VR, and MR users to compile the data for the primary source. Then, utilising published papers as a secondary source, information was gathered. By elucidating the concept of context immersion, this research can lay the foundation for the advancement of information regarding immersive AR, VR, and MR contexts. We are able to offer recommendations for overcoming the weak parts and strengthening the good ones based on the questionnaire survey findings. © 2024 IEEE.},
keywords = {AI, AR, Emersion experience, Immersive augmented realities, Mixed reality, MR, Primary sources, Real-world, Secondary sources, Training simulation, Users' experiences, Video game simulation, Video training, Virtual environments, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Y.; Zhang, Y.
Enhancing Cognitive Recall in Dementia Patients: Integrating Generative AI with Virtual Reality for Behavioral and Memory Rehabilitation Proceedings Article
In: ACM Int. Conf. Proc. Ser., pp. 86–91, Association for Computing Machinery, 2024, ISBN: 979-840071806-9 (ISBN).
Abstract | Links | BibTeX | Tags: AI, Cognitive rehabilitation, Cognitive stimulations, Dementia patients, Electronic health record, Firebase, Generalisation, Neurodegenerative diseases, Non visuals, Patient rehabilitation, Rehabilitation projects, Virtual environments, Virtual Reality, Virtual-reality environment, Visual memory, Visual-spatial, VR
@inproceedings{wang_enhancing_2024,
title = {Enhancing Cognitive Recall in Dementia Patients: Integrating Generative AI with Virtual Reality for Behavioral and Memory Rehabilitation},
author = {Y. Wang and Y. Zhang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85205444838&doi=10.1145%2f3686540.3686552&partnerID=40&md5=1577754660fddd936254fc78586e6a17},
doi = {10.1145/3686540.3686552},
isbn = {979-840071806-9 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {ACM Int. Conf. Proc. Ser.},
pages = {86–91},
publisher = {Association for Computing Machinery},
abstract = {In this Project, we developed a cognitive rehabilitation program for dementia patients, leveraging generative AI and virtual reality (VR) to evoke personal memories [4]. Integrating Open AI, DreamStudio, and Unity, our system allows patients to input descriptions, generating visual memories in a VR environment [5]. In trials, 85% of AI-generated images matched patients' expectations, although some inaccuracies arose from AI generalizations. Further validation with dementia patients is needed to assess memory recovery impacts. This novel approach modernizes Cognitive Stimulation Therapy (CST), traditionally reliant on non-visual exercises, by incorporating AI and VR to enhance memory recall and visual-spatial skills. While the world is developing more and more into Artificial Intelligence (AI) and Virtual Reality (VR), our program successfully coordinates them to help stimulate dementia patients' brains and perform the memory recall and visual spatial aspects of CST. © 2024 Copyright held by the owner/author(s).},
keywords = {AI, Cognitive rehabilitation, Cognitive stimulations, Dementia patients, Electronic health record, Firebase, Generalisation, Neurodegenerative diseases, Non visuals, Patient rehabilitation, Rehabilitation projects, Virtual environments, Virtual Reality, Virtual-reality environment, Visual memory, Visual-spatial, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Christiansen, F. R.; Hollensberg, L. Nø.; Jensen, N. B.; Julsgaard, K.; Jespersen, K. N.; Nikolov, I.
Exploring Presence in Interactions with LLM-Driven NPCs: A Comparative Study of Speech Recognition and Dialogue Options Proceedings Article
In: S.N., Spencer (Ed.): Proc. ACM Symp. Virtual Reality Softw. Technol. VRST, Association for Computing Machinery, 2024, ISBN: 979-840070535-9 (ISBN).
Abstract | Links | BibTeX | Tags: Comparatives studies, Computer simulation languages, Economic and social effects, Immersive System, Immersive systems, Language Model, Large language model, Large language models (LLM), Model-driven, Modern technologies, Non-playable character, NPC, Presence, Social Actors, Speech enhancement, Speech recognition, Text to speech, Virtual environments, Virtual Reality, VR
@inproceedings{christiansen_exploring_2024,
title = {Exploring Presence in Interactions with LLM-Driven NPCs: A Comparative Study of Speech Recognition and Dialogue Options},
author = {F. R. Christiansen and L. Nø. Hollensberg and N. B. Jensen and K. Julsgaard and K. N. Jespersen and I. Nikolov},
editor = {Spencer S.N.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85212512351&doi=10.1145%2f3641825.3687716&partnerID=40&md5=56ec6982b399fd97196ea73e7c659c31},
doi = {10.1145/3641825.3687716},
isbn = {979-840070535-9 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. ACM Symp. Virtual Reality Softw. Technol. VRST},
publisher = {Association for Computing Machinery},
abstract = {Combining modern technologies like large-language models (LLMs), speech-to-text, and text-to-speech can enhance immersion in virtual reality (VR) environments. However, challenges exist in effectively implementing LLMs and educating users. This paper explores implementing LLM-powered virtual social actors and facilitating user communication. We developed a murder mystery game where users interact with LLM-based non-playable characters (NPCs) through interrogation, clue-gathering, and exploration. Two versions were tested: one using speech recognition and another with traditional dialog boxes. While both provided similar social presence, users felt more immersed with speech recognition but found it overwhelming, while the dialog version was more challenging. Slow NPC response times were a source of frustration, highlighting the need for faster generation or better masking for a seamless experience. © 2024 Owner/Author.},
keywords = {Comparatives studies, Computer simulation languages, Economic and social effects, Immersive System, Immersive systems, Language Model, Large language model, Large language models (LLM), Model-driven, Modern technologies, Non-playable character, NPC, Presence, Social Actors, Speech enhancement, Speech recognition, Text to speech, Virtual environments, Virtual Reality, VR},
pubstate = {published},
tppubtype = {inproceedings}
}