AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Gatti, E.; Giunchi, D.; Numan, N.; Steed, A.
Around the Virtual Campfire: Early UX Insights into AI-Generated Stories in VR Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 136–141, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331521578 (ISBN).
Abstract | Links | BibTeX | Tags: Generative AI, Images synthesis, Immersive, Interactive Environments, Language Model, Large language model, Storytelling, User input, User study, Users' experiences, Virtual environments, VR
@inproceedings{gatti_around_2025,
title = {Around the Virtual Campfire: Early UX Insights into AI-Generated Stories in VR},
author = {E. Gatti and D. Giunchi and N. Numan and A. Steed},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000263662&doi=10.1109%2FAIxVR63409.2025.00027&partnerID=40&md5=ab95e803af14233db6ed307222632542},
doi = {10.1109/AIxVR63409.2025.00027},
isbn = {9798331521578 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {136–141},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual Reality (VR) presents an immersive platform for storytelling, allowing narratives to unfold in highly engaging, interactive environments. Leveraging AI capabilities and image synthesis offers new possibilities for creating scalable, generative VR content. In this work, we use an LLM-driven VR storytelling platform to explore how AI-generated visuals and narrative elements impact the user experience in VR storytelling. Previously, we presented AIsop, a system to integrate LLM-generated text and images and TTS audio into a storytelling experience, where the narrative unfolds based on user input. In this paper, we present two user studies focusing on how AI-generated visuals influence narrative perception and the overall VR experience. Our findings highlight the positive impact of AI-generated pictorial content on the storytelling experience, highlighting areas for enhancement and further research in interactive narrative design. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Generative AI, Images synthesis, Immersive, Interactive Environments, Language Model, Large language model, Storytelling, User input, User study, Users' experiences, Virtual environments, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Almashmoum, M.; Payton, A.; Johnstone, E.; Cunningham, J.; Ainsworth, J.
In: JMIR XR and Spatial Computing, vol. 2, 2025, ISSN: 28183045 (ISSN), (Publisher: JMIR Publications Inc.).
Abstract | Links | BibTeX | Tags: Artificial intelligence, digital environments, heuristic evaluation, knowledge sharing, multidisciplinary team meetings, simulation, Usability, Virtual environments, Virtual Reality, VR
@article{almashmoum_understanding_2025,
title = {Understanding the Views of Health Care Professionals on the Usability and Utility of Virtual Reality Multidisciplinary Team Meetings: Usability and Utility Study},
author = {M. Almashmoum and A. Payton and E. Johnstone and J. Cunningham and J. Ainsworth},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105012020235&doi=10.2196%2F60651&partnerID=40&md5=55e0ddff7b6b7dbde1ea4b1191f88d96},
doi = {10.2196/60651},
issn = {28183045 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {JMIR XR and Spatial Computing},
volume = {2},
abstract = {Background: Multidisciplinary team (MDT) meetings are one of the facilitators that enhance knowledge sharing among health care professionals. However, organizing a face-to-face MDT meeting to discuss patient treatment plans can be time-con-suming. Virtual reality software is widely used in health care nowadays to save time and protect lives. Therefore, the use of virtual reality multidisciplinary team (VRMDT) meeting software may help enhance knowledge sharing between health care professionals and make meetings more efficient. Objective: The objectives of this study were to introduce VRMDT software for enhancing knowledge sharing and to evaluate the feasibility and usability of the VRMDT for use by professionals in health care institutions. Methods: We invited participants from The University of Manchester Faculty for Biology, Medicine, and Health who had a health care background. As this was the first stage of software development, individuals who did not usually attend MDT meetings were also invited via email to participate in this study. Participants evaluated VRMDT using a Meta Quest 3 headset, and software developed using the Unity platform. The software contained an onboarding tutorial that taught the participants how to select items, load and rotate 3D Digital Imaging and Communications in Medicine files, talk to a generative artificial intelligence–supported avatar, and make notes. After the evaluation (approximately 15 min), participants received an electronic survey using the Qualtrics survey tool (Qualtrics International Inc) to score the usability and feasibility of the software by responding to the 10-item system usability scale, and 12-point heuristic evaluation questions with Neilsen severity rating. Results: A total of 12 participants, including 4 health informatics, 3 with a nursing background, 2 medical doctors, 1 radiologist, and 2 biostatisticians, participated in the study. The most common age bracket of participants was 20‐30 years (6/12, 50%). Most of the respondents had no experience with virtual reality, either in educational or entertainment settings. The VRMDT received a mean usability score of 72.7 (range between 68 and 80.3), earning an overall “good” rating grade. The mean score of single items in the heuristic evaluation questionnaires was less than 1 out of 4 (the overall mean was 0.6), which indicates that only minor problems were encountered when using this software. Overall, the participant’s feedback was good with highlighted issues including a poor internet connection and the quality of the generative artificial intelligence response. Conclusions: VRMDT software (developed by SentiraXR) was developed with several functions aimed at helping health care professionals to discuss medical conditions efficiently. Participants found that the VRMDT is a powerful, and useful tool for enhancing knowledge sharing among professionals who are involved in MDT meetings due to its functionality and multiuser interactive environments. Additionally, there may be the possibility of using it to train junior professionals to interpret medical reports. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: JMIR Publications Inc.},
keywords = {Artificial intelligence, digital environments, heuristic evaluation, knowledge sharing, multidisciplinary team meetings, simulation, Usability, Virtual environments, Virtual Reality, VR},
pubstate = {published},
tppubtype = {article}
}
2024
Asra, S. A.; Wickramarathne, J.
Artificial Intelligence (AI) in Augmented Reality (AR), Virtual Reality (VR) and Mixed Reality (MR) Experiences: Enhancing Immersion and Interaction for User Experiences Proceedings Article
In: B., Luo; S.K., Sahoo; Y.H., Lee; C.H.T., Lee; M., Ong; A., Alphones (Ed.): IEEE Reg 10 Annu Int Conf Proc TENCON, pp. 1700–1705, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 21593442 (ISSN); 979-835035082-1 (ISBN).
Abstract | Links | BibTeX | Tags: AI, AR, Emersion experience, Immersive augmented realities, Mixed reality, MR, Primary sources, Real-world, Secondary sources, Training simulation, Users' experiences, Video game simulation, Video training, Virtual environments, VR
@inproceedings{asra_artificial_2024,
title = {Artificial Intelligence (AI) in Augmented Reality (AR), Virtual Reality (VR) and Mixed Reality (MR) Experiences: Enhancing Immersion and Interaction for User Experiences},
author = {S. A. Asra and J. Wickramarathne},
editor = {Luo B. and Sahoo S.K. and Lee Y.H. and Lee C.H.T. and Ong M. and Alphones A.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000443498&doi=10.1109%2fTENCON61640.2024.10902724&partnerID=40&md5=2ff92b5e2529ae7fe797cd8026e8065d},
doi = {10.1109/TENCON61640.2024.10902724},
isbn = {21593442 (ISSN); 979-835035082-1 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE Reg 10 Annu Int Conf Proc TENCON},
pages = {1700–1705},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The utilisation of Artificial Intelligence (AI) generated material is one of the most fascinating advancements in the rapidly growing fields of Virtual Reality (VR), Augmented Reality (AR), and Mixed Reality (MR). Two examples of how AI-generated material is revolutionising how we interact with AR, VR and MR are video games and training simulations. In this essay, we'll examine the intriguing potential of AI-generated content and how it's being used to the development of hybrid real-world/virtual experiences. Using this strategy, we acquired the information from primary and secondary sources. We surveyed AR, VR, and MR users to compile the data for the primary source. Then, utilising published papers as a secondary source, information was gathered. By elucidating the concept of context immersion, this research can lay the foundation for the advancement of information regarding immersive AR, VR, and MR contexts. We are able to offer recommendations for overcoming the weak parts and strengthening the good ones based on the questionnaire survey findings. © 2024 IEEE.},
keywords = {AI, AR, Emersion experience, Immersive augmented realities, Mixed reality, MR, Primary sources, Real-world, Secondary sources, Training simulation, Users' experiences, Video game simulation, Video training, Virtual environments, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Y.; Zhang, Y.
Enhancing Cognitive Recall in Dementia Patients: Integrating Generative AI with Virtual Reality for Behavioral and Memory Rehabilitation Proceedings Article
In: ACM Int. Conf. Proc. Ser., pp. 86–91, Association for Computing Machinery, 2024, ISBN: 979-840071806-9 (ISBN).
Abstract | Links | BibTeX | Tags: AI, Cognitive rehabilitation, Cognitive stimulations, Dementia patients, Electronic health record, Firebase, Generalisation, Neurodegenerative diseases, Non visuals, Patient rehabilitation, Rehabilitation projects, Virtual environments, Virtual Reality, Virtual-reality environment, Visual memory, Visual-spatial, VR
@inproceedings{wang_enhancing_2024,
title = {Enhancing Cognitive Recall in Dementia Patients: Integrating Generative AI with Virtual Reality for Behavioral and Memory Rehabilitation},
author = {Y. Wang and Y. Zhang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85205444838&doi=10.1145%2f3686540.3686552&partnerID=40&md5=1577754660fddd936254fc78586e6a17},
doi = {10.1145/3686540.3686552},
isbn = {979-840071806-9 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {ACM Int. Conf. Proc. Ser.},
pages = {86–91},
publisher = {Association for Computing Machinery},
abstract = {In this Project, we developed a cognitive rehabilitation program for dementia patients, leveraging generative AI and virtual reality (VR) to evoke personal memories [4]. Integrating Open AI, DreamStudio, and Unity, our system allows patients to input descriptions, generating visual memories in a VR environment [5]. In trials, 85% of AI-generated images matched patients' expectations, although some inaccuracies arose from AI generalizations. Further validation with dementia patients is needed to assess memory recovery impacts. This novel approach modernizes Cognitive Stimulation Therapy (CST), traditionally reliant on non-visual exercises, by incorporating AI and VR to enhance memory recall and visual-spatial skills. While the world is developing more and more into Artificial Intelligence (AI) and Virtual Reality (VR), our program successfully coordinates them to help stimulate dementia patients' brains and perform the memory recall and visual spatial aspects of CST. © 2024 Copyright held by the owner/author(s).},
keywords = {AI, Cognitive rehabilitation, Cognitive stimulations, Dementia patients, Electronic health record, Firebase, Generalisation, Neurodegenerative diseases, Non visuals, Patient rehabilitation, Rehabilitation projects, Virtual environments, Virtual Reality, Virtual-reality environment, Visual memory, Visual-spatial, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Christiansen, F. R.; Hollensberg, L. Nø.; Jensen, N. B.; Julsgaard, K.; Jespersen, K. N.; Nikolov, I.
Exploring Presence in Interactions with LLM-Driven NPCs: A Comparative Study of Speech Recognition and Dialogue Options Proceedings Article
In: Spencer, S. N. (Ed.): Proc. ACM Symp. Virtual Reality Softw. Technol. VRST, Association for Computing Machinery, 2024, ISBN: 9798400705359 (ISBN).
Abstract | Links | BibTeX | Tags: Comparatives studies, Computer simulation languages, Economic and social effects, Immersive System, Immersive systems, Language Model, Large language model, Large language models (LLM), Model-driven, Modern technologies, Non-playable character, NPC, Presence, Social Actors, Speech enhancement, Speech recognition, Text to speech, Virtual environments, Virtual Reality, VR
@inproceedings{christiansen_exploring_2024,
title = {Exploring Presence in Interactions with LLM-Driven NPCs: A Comparative Study of Speech Recognition and Dialogue Options},
author = {F. R. Christiansen and L. Nø. Hollensberg and N. B. Jensen and K. Julsgaard and K. N. Jespersen and I. Nikolov},
editor = {S. N. Spencer},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85212512351&doi=10.1145%2F3641825.3687716&partnerID=40&md5=96540f274db6d000f4092edc5a07e241},
doi = {10.1145/3641825.3687716},
isbn = {9798400705359 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. ACM Symp. Virtual Reality Softw. Technol. VRST},
publisher = {Association for Computing Machinery},
abstract = {Combining modern technologies like large-language models (LLMs), speech-to-text, and text-to-speech can enhance immersion in virtual reality (VR) environments. However, challenges exist in effectively implementing LLMs and educating users. This paper explores implementing LLM-powered virtual social actors and facilitating user communication. We developed a murder mystery game where users interact with LLM-based non-playable characters (NPCs) through interrogation, clue-gathering, and exploration. Two versions were tested: one using speech recognition and another with traditional dialog boxes. While both provided similar social presence, users felt more immersed with speech recognition but found it overwhelming, while the dialog version was more challenging. Slow NPC response times were a source of frustration, highlighting the need for faster generation or better masking for a seamless experience. © 2024 Elsevier B.V., All rights reserved.},
keywords = {Comparatives studies, Computer simulation languages, Economic and social effects, Immersive System, Immersive systems, Language Model, Large language model, Large language models (LLM), Model-driven, Modern technologies, Non-playable character, NPC, Presence, Social Actors, Speech enhancement, Speech recognition, Text to speech, Virtual environments, Virtual Reality, VR},
pubstate = {published},
tppubtype = {inproceedings}
}