AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Juarez, A.; Rábago, J.; Pliego, A.; Salazar, G.; Hinrichsen, C.; Castro, M.; Pachajoa, T.
Innovative Methodology for the Integration of Emerging Technologies in Global Education: Mixed Realities, AI, Metaverse, and SDGs Proceedings Article
In: Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798350355239 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Arts computing, Collaborative learning, E-Learning, Education computing, Educational Innovation, Educational innovations, Educational Technology, Emerging technologies, Engineering education, Global education, High educations, higher education, Innovative methodologies, Me-xico, Metaverse, Metaverses, Mixed Realities, Mixed reality, Product design, Sebastian, Social aspects, Students, Sustainable development, Sustainable Development Goals, Teaching, Technical skills
@inproceedings{juarez_innovative_2025,
title = {Innovative Methodology for the Integration of Emerging Technologies in Global Education: Mixed Realities, AI, Metaverse, and SDGs},
author = {A. Juarez and J. Rábago and A. Pliego and G. Salazar and C. Hinrichsen and M. Castro and T. Pachajoa},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105011951378&doi=10.1109%2FIFE63672.2025.11024834&partnerID=40&md5=4e101ad649487ce729c3a5fa9e875559},
doi = {10.1109/IFE63672.2025.11024834},
isbn = {9798350355239 (ISBN)},
year = {2025},
date = {2025-01-01},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The academic collaboration among Tecnologico de Monterrey (Mexico), the University of San Sebastián (Chile), and the Catholic University of Colombia was an innovative effort to transform the teaching of the "Formal Representation of Space" through the use of emerging technologies. This project was based on the convergence of the theory of Community of Inquiry (CoI), International Collaborative Online Learning (COIL), and the integration of Mixed Realities, Metaverse, and generative artificial intelligence. The central objective of this collaboration was to improve the technical and creative skills of students of architecture, industrial design, digital art, communication, and music production through a pedagogical approach that utilizes 3D spatial visualization and intercultural interaction. The use of the Tec Virtual Campus's Metaverse and the Global Classroom program was instrumental in facilitating real-time collaboration among students from different countries, allowing for the creation of joint projects that reflect a deep understanding of the Sustainable Development Goals (SDGs). This effort resulted in an advanced methodology that improves students' technical skills and promotes a meaningful global commitment to sustainability and social responsibility, reflecting the transformative power of international collaborative education. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Artificial intelligence, Arts computing, Collaborative learning, E-Learning, Education computing, Educational Innovation, Educational innovations, Educational Technology, Emerging technologies, Engineering education, Global education, High educations, higher education, Innovative methodologies, Me-xico, Metaverse, Metaverses, Mixed Realities, Mixed reality, Product design, Sebastian, Social aspects, Students, Sustainable development, Sustainable Development Goals, Teaching, Technical skills},
pubstate = {published},
tppubtype = {inproceedings}
}
Kadri, M.; Boubakri, F. -E.; Azough, A.; Zidani, K. A.
Game-Based VR Anatomy Learning with Generative AI: Proof of Concept for GenAiVR-Lab Proceedings Article
In: pp. 100–105, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331534899 (ISBN).
Abstract | Links | BibTeX | Tags: Anatomy educations, Artificial intelligence, Bone, Bone fragments, Collaborative learning, E-Learning, Educational Evaluation, Game-Based, Game-based learning, Generative AI, Human computer interaction, Human skeleton, Laboratories, Learning systems, Medical students, Proof of concept, Virtual Reality, Virtual Reality Anatomy
@inproceedings{kadri_game-based_2025,
title = {Game-Based VR Anatomy Learning with Generative AI: Proof of Concept for GenAiVR-Lab},
author = {M. Kadri and F. -E. Boubakri and A. Azough and K. A. Zidani},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105015604062&doi=10.1109%2FSCME62582.2025.11104860&partnerID=40&md5=c557ca7975a9683e8c271fbb3a21c4e4},
doi = {10.1109/SCME62582.2025.11104860},
isbn = {9798331534899 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {100–105},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Anatomy education often fails to engage learners or foster precise 3D spatial understanding of complex systems like the human skeleton. We present a Game-Based VR Anatomy Learning system with Generative AI, introduced as a Proof of Concept for our GenAiVR-Lab framework. This prototype validates the foundational pillars of our future development. In the Anatomy Lab scenario, 25 medical students explore a virtual skeleton and undertake a timed mission: assemble three bone fragments within two minutes. Incorrect picks are disabled with point deductions; learners may request a one-shot conversational hint from a ChatGPT-powered Virtual Anatomy Instructor; if time expires, a teammate continues with remaining time. We measured perception changes using pre- and post-test versions of four Perspective Questionnaires: Learning Perspective (LPQ), VR-AI Perspective (VRAIPQ), Generative AI Perspective (GAIPQ), and Game-Based Learning Perspective (GBLPQ). Results demonstrate significant improvements across all four perspectives, with mean scores increasing by approximately 1.3 points on the 5-point Likert scale and nearly all participants showing positive gains. Effect sizes ranged from 2.52 to 3.34, indicating large practical significance, with all measures reaching statistical significance. These findings demonstrate that collaborative game mechanics and generative AI guidance enhance engagement and spatial reasoning. We contrast this PoC with the full GenAiVR-Lab vision - integrating Retrieval-Augmented Generation for precise feedback, multimodal I/O, and adaptive pathways - and outline a roadmap for next-generation immersive anatomy education. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Anatomy educations, Artificial intelligence, Bone, Bone fragments, Collaborative learning, E-Learning, Educational Evaluation, Game-Based, Game-based learning, Generative AI, Human computer interaction, Human skeleton, Laboratories, Learning systems, Medical students, Proof of concept, Virtual Reality, Virtual Reality Anatomy},
pubstate = {published},
tppubtype = {inproceedings}
}
Boubakri, F. -E.; Kadri, M.; Kaghat, F. Z.; Azough, A.; Tairi, H.
Exploring 3D Cardiac Anatomy with Text-Based AI Guidance in Virtual Reality Proceedings Article
In: pp. 43–48, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331534899 (ISBN).
Abstract | Links | BibTeX | Tags: 3D cardiac anatomy, 3d heart models, Anatomy education, Anatomy educations, Cardiac anatomy, Collaborative environments, Collaborative learning, Computer aided instruction, Curricula, Design and Development, E-Learning, Education computing, Generative AI, Heart, Immersive environment, Learning systems, Natural language processing systems, Social virtual reality, Students, Teaching, Three dimensional computer graphics, Virtual Reality
@inproceedings{boubakri_exploring_2025,
title = {Exploring 3D Cardiac Anatomy with Text-Based AI Guidance in Virtual Reality},
author = {F. -E. Boubakri and M. Kadri and F. Z. Kaghat and A. Azough and H. Tairi},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105015676741&doi=10.1109%2FSCME62582.2025.11104869&partnerID=40&md5=c961694f97c50adc23b6826dddb265cd},
doi = {10.1109/SCME62582.2025.11104869},
isbn = {9798331534899 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {43–48},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This paper presents the design and development of a social virtual reality (VR) classroom focused on cardiac anatomy education for students in grades K-12. The application allows multiple learners to explore a detailed 3D heart model within an immersive and collaborative environment. A crucial part of the system is the integration of a text-based conversational AI interface powered by ChatGPT, which provides immediate, interactive explanations and addresses student inquiries about heart anatomy. The system supports both guided and exploratory learning modes, encourages peer collaboration, and offers personalized support through natural language dialogue. We evaluated the system's effectiveness through a comprehensive study measuring learning perception (LPQ), VR perception (VRPQ), AI perception (AIPQ), and VR-related symptoms (VRSQ). Potential applications include making high-quality cardiac anatomy education more affordable for K-12 schools with limited resources, offering an adaptable AI-based tutoring system for students to learn at their own pace, and equipping educators with an easy-to-use tool to integrate into their science curriculum with minimal additional training. © 2025 Elsevier B.V., All rights reserved.},
keywords = {3D cardiac anatomy, 3d heart models, Anatomy education, Anatomy educations, Cardiac anatomy, Collaborative environments, Collaborative learning, Computer aided instruction, Curricula, Design and Development, E-Learning, Education computing, Generative AI, Heart, Immersive environment, Learning systems, Natural language processing systems, Social virtual reality, Students, Teaching, Three dimensional computer graphics, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhao, P.; Wei, X.
The Role of 3D Virtual Humans in Communication and Assisting Students' Learning in Transparent Display Environments: Perspectives of Pre-Service Teachers Proceedings Article
In: Chui, K. T.; Jaikaeo, C.; Niramitranon, J.; Kaewmanee, W.; Ng, K. -K.; Ongkunaruk, P. (Ed.): pp. 319–323, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331595500 (ISBN).
Abstract | Links | BibTeX | Tags: 3D virtual human, Assistive technology, CDIO teaching model, Collaborative learning, Collaborative practices, Display environments, E-Learning, Educational Technology, Engineering education, feedback, Integration, Knowledge delivery, Knowledge transfer, Learning algorithms, Natural language processing systems, Preservice teachers, Psychology computing, Student learning, Students, Teaching, Teaching model, Transparent display environment, Transparent displays, Virtual Reality
@inproceedings{zhao_role_2025,
title = {The Role of 3D Virtual Humans in Communication and Assisting Students' Learning in Transparent Display Environments: Perspectives of Pre-Service Teachers},
author = {P. Zhao and X. Wei},
editor = {K. T. Chui and C. Jaikaeo and J. Niramitranon and W. Kaewmanee and K. -K. Ng and P. Ongkunaruk},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105015746241&doi=10.1109%2FISET65607.2025.00069&partnerID=40&md5=08c39b84fa6bd6ac13ddbed203d7b1d9},
doi = {10.1109/ISET65607.2025.00069},
isbn = {9798331595500 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {319–323},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The integration of transparent display and 3D virtual human technologies into education is expanding rapidly; however, their systematic incorporation into the CDIO teaching model remains underexplored, particularly in supporting complex knowledge delivery and collaborative practice. This study developed an intelligent virtual teacher assistance system based on generative AI and conducted a teaching experiment combining transparent display and 3D virtual human technologies. Feedback was collected through focus group interviews with 24 pre-service teachers. Results show that the virtual human, through natural language and multimodal interaction, significantly enhanced classroom engagement and contextual understanding, while its real-time feedback and personalized guidance effectively supported CDIO-based collaborative learning. Nonetheless, challenges remain in contextual adaptability and emotional feedback accuracy. Accordingly, the study proposes a path for technical optimization through the integration of multimodal emotion recognition, adaptive instructional algorithms, and nonintrusive data collection, offering empirical and theoretical insights into educational technology integration within the CDIO framework and future intelligent learning tools. © 2025 Elsevier B.V., All rights reserved.},
keywords = {3D virtual human, Assistive technology, CDIO teaching model, Collaborative learning, Collaborative practices, Display environments, E-Learning, Educational Technology, Engineering education, feedback, Integration, Knowledge delivery, Knowledge transfer, Learning algorithms, Natural language processing systems, Preservice teachers, Psychology computing, Student learning, Students, Teaching, Teaching model, Transparent display environment, Transparent displays, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Dang, B.; Huynh, L.; Gul, F.; Rosé, C.; Järvelä, S.; Nguyen, A.
Human–AI collaborative learning in mixed reality: Examining the cognitive and socio-emotional interactions Journal Article
In: British Journal of Educational Technology, vol. 56, no. 5, pp. 2078–2101, 2025, ISSN: 00071013 (ISSN); 14678535 (ISSN), (Publisher: John Wiley and Sons Inc).
Abstract | Links | BibTeX | Tags: Artificial intelligence agent, Collaborative learning, Educational robots, Embodied agent, Emotional intelligence, Emotional interactions, Generative adversarial networks, generative artificial intelligence, Hierarchical clustering, Human–AI collaboration, Interaction pattern, Mixed reality, ordered network analysis, Ordered network analyze, Social behavior, Social interactions, Social psychology, Students, Supervised learning, Teaching
@article{dang_humanai_2025,
title = {Human–AI collaborative learning in mixed reality: Examining the cognitive and socio-emotional interactions},
author = {B. Dang and L. Huynh and F. Gul and C. Rosé and S. Järvelä and A. Nguyen},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007896240&doi=10.1111%2Fbjet.13607&partnerID=40&md5=1c80a5bfe5917e7a9b14ee5809da232f},
doi = {10.1111/bjet.13607},
issn = {00071013 (ISSN); 14678535 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {British Journal of Educational Technology},
volume = {56},
number = {5},
pages = {2078–2101},
abstract = {The rise of generative artificial intelligence (GAI), especially with multimodal large language models like GPT-4o, sparked transformative potential and challenges for learning and teaching. With potential as a cognitive offloading tool, GAI can enable learners to focus on higher-order thinking and creativity. Yet, this also raises questions about integration into traditional education due to the limited research on learners' interactions with GAI. Some studies with GAI focus on text-based human–AI interactions, while research on embodied GAI in immersive environments like mixed reality (MR) remains unexplored. To address this, this study investigates interaction dynamics between learners and embodied GAI agents in MR, examining cognitive and socio-emotional interactions during collaborative learning. We investigated the paired interactive patterns between a student and an embodied GAI agent in MR, based on data from 26 higher education students with 1317 recorded activities. Data were analysed using a multi-layered learning analytics approach, including quantitative content analysis, sequence analysis via hierarchical clustering and pattern analysis through ordered network analysis (ONA). Our findings identified two interaction patterns: type (1) AI-led Supported Exploratory Questioning (AISQ) and type (2) Learner-Initiated Inquiry (LII) group. Despite their distinction in characteristic, both types demonstrated comparable levels of socio-emotional engagement and exhibited meaningful cognitive engagement, surpassing the superficial content reproduction that can be observed in interactions with GPT models. This study contributes to the human–AI collaboration and learning studies, extending understanding to learning in MR environments and highlighting implications for designing AI-based educational tools. Practitioner notes What is already known about this topic Socio-emotional interactions are fundamental to cognitive processes and play a critical role in collaborative learning. Generative artificial intelligence (GAI) holds transformative potential for education but raises questions about how learners interact with such technology. Most existing research focuses on text-based interactions with GAI; there is limited empirical evidence on how embodied GAI agents within immersive environments like Mixed Reality (MR) influence the cognitive and socio-emotional interactions for learning and regulation. What this paper adds Provides first empirical insights into cognitive and socio-emotional interaction patterns between learners and embodied GAI agents in MR environments. Identifies two distinct interaction patterns: AISQ type (structured, guided, supportive) and LII type (inquiry-driven, exploratory, engaging), demonstrating how these patterns influence collaborative learning dynamics. Shows that both interaction types facilitate meaningful cognitive engagement, moving beyond superficial content reproduction commonly associated with GAI interactions. Implications for practice and/or policy Insights from the identified interaction patterns can inform the design of teaching strategies that effectively integrate embodied GAI agents to enhance both cognitive and socio-emotional engagement. Findings can guide the development of AI-based educational tools that capitalise on the capabilities of embodied GAI agents, supporting a balance between structured guidance and exploratory learning. Highlights the need for ethical considerations in adopting embodied GAI agents, particularly regarding the human-like realism of these agents and potential impacts on learner dependency and interaction norms. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: John Wiley and Sons Inc},
keywords = {Artificial intelligence agent, Collaborative learning, Educational robots, Embodied agent, Emotional intelligence, Emotional interactions, Generative adversarial networks, generative artificial intelligence, Hierarchical clustering, Human–AI collaboration, Interaction pattern, Mixed reality, ordered network analysis, Ordered network analyze, Social behavior, Social interactions, Social psychology, Students, Supervised learning, Teaching},
pubstate = {published},
tppubtype = {article}
}
2024
Liu, Z.; Zhu, Z.; Zhu, L.; Jiang, E.; Hu, X.; Peppler, K.; Ramani, K.
ClassMeta: Designing Interactive Virtual Classmate to Promote VR Classroom Participation Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2024, ISBN: 979-840070330-0 (ISBN).
Abstract | Links | BibTeX | Tags: 3D Avatars, Behavioral Research, Classroom learning, Collaborative learning, Computational Linguistics, Condition, E-Learning, Human behaviors, Language Model, Large language model, Learning experiences, Learning systems, pedagogical agent, Pedagogical agents, Students, Three dimensional computer graphics, Virtual Reality, VR classroom
@inproceedings{liu_classmeta_2024,
title = {ClassMeta: Designing Interactive Virtual Classmate to Promote VR Classroom Participation},
author = {Z. Liu and Z. Zhu and L. Zhu and E. Jiang and X. Hu and K. Peppler and K. Ramani},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85194868458&doi=10.1145%2f3613904.3642947&partnerID=40&md5=0592b2f977a2ad2e6366c6fa05808a6a},
doi = {10.1145/3613904.3642947},
isbn = {979-840070330-0 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Peer influence plays a crucial role in promoting classroom participation, where behaviors from active students can contribute to a collective classroom learning experience. However, the presence of these active students depends on several conditions and is not consistently available across all circumstances. Recently, Large Language Models (LLMs) such as GPT have demonstrated the ability to simulate diverse human behaviors convincingly due to their capacity to generate contextually coherent responses based on their role settings. Inspired by this advancement in technology, we designed ClassMeta, a GPT-4 powered agent to help promote classroom participation by playing the role of an active student. These agents, which are embodied as 3D avatars in virtual reality, interact with actual instructors and students with both spoken language and body gestures. We conducted a comparative study to investigate the potential of ClassMeta for improving the overall learning experience of the class. © 2024 Copyright held by the owner/author(s)},
keywords = {3D Avatars, Behavioral Research, Classroom learning, Collaborative learning, Computational Linguistics, Condition, E-Learning, Human behaviors, Language Model, Large language model, Learning experiences, Learning systems, pedagogical agent, Pedagogical agents, Students, Three dimensional computer graphics, Virtual Reality, VR classroom},
pubstate = {published},
tppubtype = {inproceedings}
}