AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Weerasinghe, M.; Kljun, M.; Pucihar, K. Č.
A Cross-Device Interaction with the Smartphone and HMD for Vocabulary Learning Proceedings Article
In: L., Zaina; J.C., Campos; D., Spano; K., Luyten; P., Palanque; G., Veer; A., Ebert; S.R., Humayoun; V., Memmesheimer (Ed.): Lect. Notes Comput. Sci., pp. 269–282, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303191759-2 (ISBN).
Abstract | Links | BibTeX | Tags: Augmented Reality, Context-based, Context-based vocabulary learning, Cross-reality interaction, Engineering education, Head-mounted displays, Head-mounted-displays, Images synthesis, Keyword method, Mixed reality, Smart phones, Smartphones, Students, Text-to-image synthesis, Visualization, Vocabulary learning
@inproceedings{weerasinghe_cross-device_2025,
title = {A Cross-Device Interaction with the Smartphone and HMD for Vocabulary Learning},
author = {M. Weerasinghe and M. Kljun and K. Č. Pucihar},
editor = {Zaina L. and Campos J.C. and Spano D. and Luyten K. and Palanque P. and Veer G. and Ebert A. and Humayoun S.R. and Memmesheimer V.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007828696&doi=10.1007%2f978-3-031-91760-8_18&partnerID=40&md5=4ebf202715ba880dcfeb3232dba7e2c4},
doi = {10.1007/978-3-031-91760-8_18},
isbn = {03029743 (ISSN); 978-303191759-2 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15518 LNCS},
pages = {269–282},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Cross-reality (XR) systems facilitate interaction between devices with differing levels of virtual content. By engaging with a variety of such devices, XR systems offer the flexibility to choose the most suitable modality for specific task or context. This capability enables rich applications in training and education, including vocabulary learning. Vocabulary acquisition is a vital part of language learning, employing techniques such as words rehearsing, flashcards, labelling environments with post-it notes, and mnemonic strategies such as the keyword method. Traditional mnemonics typically rely on visual stimuli or mental visualisations. Recent research highlights that AR can enhance vocabulary learning by combining real objects with augmented stimuli such as in labelling environments. Additionally,advancements in generative AI now enable high-quality, synthetically generated images from text descriptions, facilitating externalisation of personalised visual stimuli of mental visualisations. However, creating interfaces for effective real-world augmentation remains challenging, particularly given the limited text input capabilities of Head-Mounted Displays (HMDs). This work presents an XR system that combines smartphones and HMDs by leveraging Augmented Reality (AR) for contextually relevant information and a smartphone for efficient text input. The system enables users to visually annotate objects with personalised images of keyword associations generated with DALL-E 2. To evaluate the system, we conducted a user study with 16 university graduate students, assessing both usability and overall user experience. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Augmented Reality, Context-based, Context-based vocabulary learning, Cross-reality interaction, Engineering education, Head-mounted displays, Head-mounted-displays, Images synthesis, Keyword method, Mixed reality, Smart phones, Smartphones, Students, Text-to-image synthesis, Visualization, Vocabulary learning},
pubstate = {published},
tppubtype = {inproceedings}
}
Tovias, E.; Wu, L.
Leveraging Virtual Reality and AI for Enhanced Vocabulary Learning Proceedings Article
In: pp. 308, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331521646 (ISBN).
Abstract | Links | BibTeX | Tags: Avatar, Avatars, E-Learning, Immersive, Interactive computer graphics, Interactive learning, Language Model, Large language model, large language models, Learning experiences, Real time interactions, Text-based methods, user experience, Users' experiences, Virtual environments, Virtual Reality, Vocabulary learning
@inproceedings{tovias_leveraging_2025,
title = {Leveraging Virtual Reality and AI for Enhanced Vocabulary Learning},
author = {E. Tovias and L. Wu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105017563813&doi=10.1109%2FICHMS65439.2025.11154184&partnerID=40&md5=7b79f93d6f8ec222b25a4bfeac408d3a},
doi = {10.1109/ICHMS65439.2025.11154184},
isbn = {9798331521646 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {308},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This study examines the integration of virtual reality (VR) and Artificial Intelligence (AI) to create more immersive, interactive learning experiences. By combining VR's engaging user experience with AI-powered avatars, this research explores how these tools can enhance vocabulary learning compared to traditional text-based methods. Utilizing a Meta Quest 3 headset, Unity for development, and OpenAI's API & ElevenLabs for dynamic dialogues, this system offers personalized, real-time interactions (Fig. 1). The integration of these technologies fosters a bright future, driving significant advancements in the development of highly immersive and effective learning environments. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Avatar, Avatars, E-Learning, Immersive, Interactive computer graphics, Interactive learning, Language Model, Large language model, large language models, Learning experiences, Real time interactions, Text-based methods, user experience, Users' experiences, Virtual environments, Virtual Reality, Vocabulary learning},
pubstate = {published},
tppubtype = {inproceedings}
}