AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Bendarkawi, J.; Ponce, A.; Mata, S. C.; Aliu, A.; Liu, Y.; Zhang, L.; Liaqat, A.; Rao, V. N.; Monroy-Hernández, A.
ConversAR: Exploring Embodied LLM-Powered Group Conversations in Augmented Reality for Second Language Learners Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 979-840071395-8 (ISBN).
Abstract | Links | BibTeX | Tags: Augmented Reality, Augmented Reality (AR), Embodied agent, Embodied Agents, Language learning, Language Model, Large language model, large language models (LLMs), Population dynamics, Second language, Second Language Acquisition, Second language learners, Social dynamics, Turn-taking
@inproceedings{bendarkawi_conversar_2025,
title = {ConversAR: Exploring Embodied LLM-Powered Group Conversations in Augmented Reality for Second Language Learners},
author = {J. Bendarkawi and A. Ponce and S. C. Mata and A. Aliu and Y. Liu and L. Zhang and A. Liaqat and V. N. Rao and A. Monroy-Hernández},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005746128&doi=10.1145%2f3706599.3720162&partnerID=40&md5=8330d3e0cb735caffa828b848ab9a110},
doi = {10.1145/3706599.3720162},
isbn = {979-840071395-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Group conversations are valuable for second language (L2) learners as they provide opportunities to practice listening and speaking, exercise complex turn-taking skills, and experience group social dynamics in a target language. However, most existing Augmented Reality (AR)-based conversational learning tools focus on dyadic interactions rather than group dialogues. Although research has shown that AR can help reduce speaking anxiety and create a comfortable space for practicing speaking skills in dyadic scenarios, especially with Large Language Model (LLM)-based conversational agents, the potential for group language practice using these technologies remains largely unexplored. We introduce ConversAR, a gpt-4o powered AR application, that enables L2 learners to practice contextualized group conversations. Our system features two embodied LLM agents with vision-based scene understanding and live captions. In a system evaluation with 10 participants, users reported reduced speaking anxiety and increased learner autonomy compared to perceptions of in-person practice methods with other learners. © 2025 Copyright held by the owner/author(s).},
keywords = {Augmented Reality, Augmented Reality (AR), Embodied agent, Embodied Agents, Language learning, Language Model, Large language model, large language models (LLMs), Population dynamics, Second language, Second Language Acquisition, Second language learners, Social dynamics, Turn-taking},
pubstate = {published},
tppubtype = {inproceedings}
}
2024
White, M.; Banerjee, N. K.; Banerjee, S.
VRcabulary: A VR Environment for Reinforced Language Learning via Multi-Modular Design Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 315–319, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835037202-1 (ISBN).
Abstract | Links | BibTeX | Tags: 'current, E-Learning, Foreign language, Immersive, Instructional modules, Language learning, Modular designs, Modulars, Multi-modular, Reinforcement, Second language, Virtual Reality, Virtual-reality environment
@inproceedings{white_vrcabulary_2024,
title = {VRcabulary: A VR Environment for Reinforced Language Learning via Multi-Modular Design},
author = {M. White and N. K. Banerjee and S. Banerjee},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85187241160&doi=10.1109%2fAIxVR59861.2024.00053&partnerID=40&md5=4d8ff8ac5c6aa8336a571ba906fe0f5d},
doi = {10.1109/AIxVR59861.2024.00053},
isbn = {979-835037202-1 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {315–319},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {We demonstrate VRcabulary, a hierarchical modular virtual reality (VR) environment for language learning (LL). Current VR LL apps lack the benefit of reinforcement presented in typical classroom environments. Apps either introduce content in the second language and lack retention testing, or provide gamification without an in-environment instructional component. To acquire reinforcement of knowledge, the learner needs to visit the app multiple times, increasing the potential for monotony. In VRcabulary, we introduce a multi-modular hierarchical design with 3 modules - an instructional module providing AI-generated audio playbacks of object names, a practice module enabling interaction based reinforcement of object names in response to audio playback, and an exam module enabling retention testing through interaction. To incentivize engagement by reducing monotony, we keep the designs of each modules distinct. We provide sequential object presentations in the instructional module and multiple object assortments in the practice and exam modules. We provide feedback and multiple trials in the practice module, but eliminate them from the exam module. We expect cross-module diversity of interaction in VRcabulary to enhance engagement in VR LL. © 2024 IEEE.},
keywords = {'current, E-Learning, Foreign language, Immersive, Instructional modules, Language learning, Modular designs, Modulars, Multi-modular, Reinforcement, Second language, Virtual Reality, Virtual-reality environment},
pubstate = {published},
tppubtype = {inproceedings}
}