AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Weerasinghe, M.; Kljun, M.; Pucihar, K. Č.
A Cross-Device Interaction with the Smartphone and HMD for Vocabulary Learning Proceedings Article
In: L., Zaina; J.C., Campos; D., Spano; K., Luyten; P., Palanque; G., Veer; A., Ebert; S.R., Humayoun; V., Memmesheimer (Ed.): Lect. Notes Comput. Sci., pp. 269–282, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303191759-2 (ISBN).
Abstract | Links | BibTeX | Tags: Augmented Reality, Context-based, Context-based vocabulary learning, Cross-reality interaction, Engineering education, Head-mounted displays, Head-mounted-displays, Images synthesis, Keyword method, Mixed reality, Smart phones, Smartphones, Students, Text-to-image synthesis, Visualization, Vocabulary learning
@inproceedings{weerasinghe_cross-device_2025,
title = {A Cross-Device Interaction with the Smartphone and HMD for Vocabulary Learning},
author = {M. Weerasinghe and M. Kljun and K. Č. Pucihar},
editor = {Zaina L. and Campos J.C. and Spano D. and Luyten K. and Palanque P. and Veer G. and Ebert A. and Humayoun S.R. and Memmesheimer V.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007828696&doi=10.1007%2f978-3-031-91760-8_18&partnerID=40&md5=4ebf202715ba880dcfeb3232dba7e2c4},
doi = {10.1007/978-3-031-91760-8_18},
isbn = {03029743 (ISSN); 978-303191759-2 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15518 LNCS},
pages = {269–282},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Cross-reality (XR) systems facilitate interaction between devices with differing levels of virtual content. By engaging with a variety of such devices, XR systems offer the flexibility to choose the most suitable modality for specific task or context. This capability enables rich applications in training and education, including vocabulary learning. Vocabulary acquisition is a vital part of language learning, employing techniques such as words rehearsing, flashcards, labelling environments with post-it notes, and mnemonic strategies such as the keyword method. Traditional mnemonics typically rely on visual stimuli or mental visualisations. Recent research highlights that AR can enhance vocabulary learning by combining real objects with augmented stimuli such as in labelling environments. Additionally,advancements in generative AI now enable high-quality, synthetically generated images from text descriptions, facilitating externalisation of personalised visual stimuli of mental visualisations. However, creating interfaces for effective real-world augmentation remains challenging, particularly given the limited text input capabilities of Head-Mounted Displays (HMDs). This work presents an XR system that combines smartphones and HMDs by leveraging Augmented Reality (AR) for contextually relevant information and a smartphone for efficient text input. The system enables users to visually annotate objects with personalised images of keyword associations generated with DALL-E 2. To evaluate the system, we conducted a user study with 16 university graduate students, assessing both usability and overall user experience. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Augmented Reality, Context-based, Context-based vocabulary learning, Cross-reality interaction, Engineering education, Head-mounted displays, Head-mounted-displays, Images synthesis, Keyword method, Mixed reality, Smart phones, Smartphones, Students, Text-to-image synthesis, Visualization, Vocabulary learning},
pubstate = {published},
tppubtype = {inproceedings}
}
Gatti, E.; Giunchi, D.; Numan, N.; Steed, A.
Around the Virtual Campfire: Early UX Insights into AI-Generated Stories in VR Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 136–141, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833152157-8 (ISBN).
Abstract | Links | BibTeX | Tags: Generative AI, Images synthesis, Immersive, Interactive Environments, Language Model, Large language model, Storytelling, User input, User study, Users' experiences, Virtual environments, VR
@inproceedings{gatti_around_2025,
title = {Around the Virtual Campfire: Early UX Insights into AI-Generated Stories in VR},
author = {E. Gatti and D. Giunchi and N. Numan and A. Steed},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000263662&doi=10.1109%2fAIxVR63409.2025.00027&partnerID=40&md5=cd804d892d45554e936d0221508b3447},
doi = {10.1109/AIxVR63409.2025.00027},
isbn = {979-833152157-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {136–141},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual Reality (VR) presents an immersive platform for storytelling, allowing narratives to unfold in highly engaging, interactive environments. Leveraging AI capabilities and image synthesis offers new possibilities for creating scalable, generative VR content. In this work, we use an LLM-driven VR storytelling platform to explore how AI-generated visuals and narrative elements impact the user experience in VR storytelling. Previously, we presented AIsop, a system to integrate LLM-generated text and images and TTS audio into a storytelling experience, where the narrative unfolds based on user input. In this paper, we present two user studies focusing on how AI-generated visuals influence narrative perception and the overall VR experience. Our findings highlight the positive impact of AI-generated pictorial content on the storytelling experience, highlighting areas for enhancement and further research in interactive narrative design. © 2025 IEEE.},
keywords = {Generative AI, Images synthesis, Immersive, Interactive Environments, Language Model, Large language model, Storytelling, User input, User study, Users' experiences, Virtual environments, VR},
pubstate = {published},
tppubtype = {inproceedings}
}