AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Alibrahim, Y.; Ibrahim, M.; Gurdayal, D.; Munshi, M.
AI speechbots and 3D segmentations in virtual reality improve radiology on-call training in resource-limited settings Journal Article
In: Intelligence-Based Medicine, vol. 11, 2025, ISSN: 26665212 (ISSN), (Publisher: Elsevier B.V.).
Abstract | Links | BibTeX | Tags: 3D segmentation, AI speechbots, Article, artificial intelligence chatbot, ChatGPT, computer assisted tomography, Deep learning, headache, human, Image segmentation, interventional radiology, Large language model, Likert scale, nausea, Proof of concept, prospective study, radiology, radiology on call training, resource limited setting, Teaching, Training, ultrasound, Virtual Reality, voice recognition
@article{alibrahim_ai_2025,
title = {AI speechbots and 3D segmentations in virtual reality improve radiology on-call training in resource-limited settings},
author = {Y. Alibrahim and M. Ibrahim and D. Gurdayal and M. Munshi},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001472313&doi=10.1016%2Fj.ibmed.2025.100245&partnerID=40&md5=981139e173e781b67dba5a46be64de31},
doi = {10.1016/j.ibmed.2025.100245},
issn = {26665212 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Intelligence-Based Medicine},
volume = {11},
abstract = {Objective: Evaluate the use of large-language model (LLM) speechbot tools and deep learning-assisted generation of 3D reconstructions when integrated in a virtual reality (VR) setting to teach radiology on-call topics to radiology residents. Methods: Three first year radiology residents in Guyana were enrolled in an 8-week radiology course that focused on preparation for on-call duties. The course, delivered via VR headsets with custom software integrating LLM-powered speechbots trained on imaging reports and 3D reconstructions segmented with the help of a deep learning model. Each session focused on a specific radiology area, employing a didactic and case-based learning approach, enhanced with 3D reconstructions and an LLM-powered speechbot. Post-session, residents reassessed their knowledge and provided feedback on their VR and LLM-powered speechbot experiences. Results/discussion: Residents found that the 3D reconstructions segmented semi-automatically by deep learning algorithms and AI-driven self-learning via speechbot was highly valuable. The 3D reconstructions, especially in the interventional radiology session, were helpful and the benefit is augmented by VR where navigating the models is seamless and perception of depth is pronounced. Residents also found conversing with the AI-speechbot seamless and was valuable in their post session self-learning. The major drawback of VR was motion sickness, which was mild and improved over time. Conclusion: AI-assisted VR radiology education could be used to develop new and accessible ways of teaching a variety of radiology topics in a seamless and cost-effective way. This could be especially useful in supporting radiology education remotely in regions which lack local radiology expertise. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Elsevier B.V.},
keywords = {3D segmentation, AI speechbots, Article, artificial intelligence chatbot, ChatGPT, computer assisted tomography, Deep learning, headache, human, Image segmentation, interventional radiology, Large language model, Likert scale, nausea, Proof of concept, prospective study, radiology, radiology on call training, resource limited setting, Teaching, Training, ultrasound, Virtual Reality, voice recognition},
pubstate = {published},
tppubtype = {article}
}
Li, Y.; Pang, E. C. H.; Ng, C. S. Y.; Azim, M.; Leung, H.
Enhancing Linear Algebra Education with AI-Generated Content in the CityU Metaverse: A Comparative Study Proceedings Article
In: T., Hao; J.G., Wu; X., Luo; Y., Sun; Y., Mu; S., Ge; W., Xie (Ed.): Lect. Notes Comput. Sci., pp. 3–16, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-981964406-3 (ISBN).
Abstract | Links | BibTeX | Tags: Comparatives studies, Digital age, Digital interactions, digital twin, Educational metaverse, Engineering education, Generative AI, Immersive, Matrix algebra, Metaverse, Metaverses, Personnel training, Students, Teaching, University campus, Virtual environments, virtual learning environment, Virtual learning environments, Virtual Reality, Virtualization
@inproceedings{li_enhancing_2025,
title = {Enhancing Linear Algebra Education with AI-Generated Content in the CityU Metaverse: A Comparative Study},
author = {Y. Li and E. C. H. Pang and C. S. Y. Ng and M. Azim and H. Leung},
editor = {Hao T. and Wu J.G. and Luo X. and Sun Y. and Mu Y. and Ge S. and Xie W.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003632691&doi=10.1007%2f978-981-96-4407-0_1&partnerID=40&md5=c067ba5d4c15e9c0353bf315680531fc},
doi = {10.1007/978-981-96-4407-0_1},
isbn = {03029743 (ISSN); 978-981964406-3 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15589 LNCS},
pages = {3–16},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {In today’s digital age, the metaverse is emerging as the forthcoming evolution of the internet. It provides an immersive space that marks a new frontier in the way digital interactions are facilitated and experienced. In this paper, we present the CityU Metaverse, which aims to construct a digital twin of our university campus. It is designed as an educational virtual world where learning applications can be embedded in this virtual campus, supporting not only remote and collaborative learning but also professional technical training to enhance educational experiences through immersive and interactive learning. To evaluate the effectiveness of this educational metaverse, we conducted an experiment focused on 3D linear transformation in linear algebra, with teaching content generated by generative AI, comparing our metaverse system with traditional teaching methods. Knowledge tests and surveys assessing learning interest revealed that students engaged with the CityU Metaverse, facilitated by AI-generated content, outperformed those in traditional settings and reported greater enjoyment during the learning process. The work provides valuable perspectives on the behaviors and interactions within the metaverse by analyzing user preferences and learning outcomes. © The Author(s), under exclusive license to Springer Nature Singapore Pte Ltd. 2025.},
keywords = {Comparatives studies, Digital age, Digital interactions, digital twin, Educational metaverse, Engineering education, Generative AI, Immersive, Matrix algebra, Metaverse, Metaverses, Personnel training, Students, Teaching, University campus, Virtual environments, virtual learning environment, Virtual learning environments, Virtual Reality, Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Gao, H.; Xie, Y.; Kasneci, E.
PerVRML: ChatGPT-Driven Personalized VR Environments for Machine Learning Education Journal Article
In: International Journal of Human-Computer Interaction, 2025, ISSN: 10447318 (ISSN); 15327590 (ISSN), (Publisher: Taylor and Francis Ltd.).
Abstract | Links | BibTeX | Tags: Backpropagation, ChatGPT, Curricula, Educational robots, Immersive learning, Interactive learning, Language Model, Large language model, large language models, Learning mode, Machine learning education, Machine-learning, Personalized learning, Support vector machines, Teaching, Virtual Reality, Virtual-reality environment, Virtualization
@article{gao_pervrml_2025,
title = {PerVRML: ChatGPT-Driven Personalized VR Environments for Machine Learning Education},
author = {H. Gao and Y. Xie and E. Kasneci},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005776517&doi=10.1080%2F10447318.2025.2504188&partnerID=40&md5=27accdeba3e1e2202fc1102053d54b7c},
doi = {10.1080/10447318.2025.2504188},
issn = {10447318 (ISSN); 15327590 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {International Journal of Human-Computer Interaction},
abstract = {The advent of large language models (LLMs) such as ChatGPT has demonstrated significant potential for advancing educational technologies. Recently, growing interest has emerged in integrating ChatGPT with virtual reality (VR) to provide interactive and dynamic learning environments. This study explores the effectiveness of ChatGTP-driven VR in facilitating machine learning education through PerVRML. PerVRML incorporates a ChatGPT-powered avatar that provides real-time assistance and uses LLMs to personalize learning paths based on various sensor data from VR. A between-subjects design was employed to compare two learning modes: personalized and non-personalized. Quantitative data were collected from assessments, user experience surveys, and interaction metrics. The results indicate that while both learning modes supported learning effectively, ChatGPT-powered personalization significantly improved learning outcomes and had distinct impacts on user feedback. These findings underscore the potential of ChatGPT-enhanced VR to deliver adaptive and personalized educational experiences. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Taylor and Francis Ltd.},
keywords = {Backpropagation, ChatGPT, Curricula, Educational robots, Immersive learning, Interactive learning, Language Model, Large language model, large language models, Learning mode, Machine learning education, Machine-learning, Personalized learning, Support vector machines, Teaching, Virtual Reality, Virtual-reality environment, Virtualization},
pubstate = {published},
tppubtype = {article}
}
Nygren, T.; Samuelsson, M.; Hansson, P. -O.; Efimova, E.; Bachelder, S.
In: International Journal of Artificial Intelligence in Education, 2025, ISSN: 15604306 (ISSN); 15604292 (ISSN), (Publisher: Springer).
Abstract | Links | BibTeX | Tags: AI-generated feedback, Controversial issue in social study education, Controversial issues in social studies education, Curricula, Domain knowledge, Economic and social effects, Expert systems, Generative AI, Human engineering, Knowledge engineering, Language Model, Large language model, large language models (LLMs), Mixed reality, Mixed reality simulation, Mixed reality simulation (MRS), Pedagogical content knowledge, Pedagogical content knowledge (PCK), Personnel training, Preservice teachers, Social studies education, Teacher training, Teacher training simulation, Teacher training simulations, Teaching, Training simulation
@article{nygren_ai_2025,
title = {AI Versus Human Feedback in Mixed Reality Simulations: Comparing LLM and Expert Mentoring in Preservice Teacher Education on Controversial Issues},
author = {T. Nygren and M. Samuelsson and P. -O. Hansson and E. Efimova and S. Bachelder},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007244772&doi=10.1007%2Fs40593-025-00484-8&partnerID=40&md5=3404a614af6fe4d4d2cb284060600e3c},
doi = {10.1007/s40593-025-00484-8},
issn = {15604306 (ISSN); 15604292 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {International Journal of Artificial Intelligence in Education},
abstract = {This study explores the potential role of AI-generated mentoring within simulated environments designed for teacher education, specifically focused on the challenges of teaching controversial issues. Using a mixed-methods approach, we empirically investigate the potential and challenges of AI-generated feedback compared to that provided by human experts when mentoring preservice teachers in the context of mixed reality simulations. Findings reveal that human experts offered more mixed and nuanced feedback than ChatGPT-4o and Perplexity, especially when identifying missed teaching opportunities and balancing classroom discussions. The AI models evaluated were publicly available pro versions of LLMs and were tested using detailed prompts and coding schemes aligned with educational theories. AI systems were not very good at identifying aspects of general, pedagogical or content knowledge based on Shulman’s theories but were still quite effective in generating feedback in line with human experts. The study highlights the promise of AI to enhance teacher training but underscores the importance of combining AI feedback with expert insights to address the complexities of real-world teaching. This research contributes to a growing understanding of AI's potential role and limitations in education. It suggests that, while AI can be valuable to scale mixed reality simulations, it should be carefully evaluated and balanced by human expertise in teacher education. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Springer},
keywords = {AI-generated feedback, Controversial issue in social study education, Controversial issues in social studies education, Curricula, Domain knowledge, Economic and social effects, Expert systems, Generative AI, Human engineering, Knowledge engineering, Language Model, Large language model, large language models (LLMs), Mixed reality, Mixed reality simulation, Mixed reality simulation (MRS), Pedagogical content knowledge, Pedagogical content knowledge (PCK), Personnel training, Preservice teachers, Social studies education, Teacher training, Teacher training simulation, Teacher training simulations, Teaching, Training simulation},
pubstate = {published},
tppubtype = {article}
}
Juarez, A.; Rábago, J.; Pliego, A.; Salazar, G.; Hinrichsen, C.; Castro, M.; Pachajoa, T.
Innovative Methodology for the Integration of Emerging Technologies in Global Education: Mixed Realities, AI, Metaverse, and SDGs Proceedings Article
In: Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798350355239 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Arts computing, Collaborative learning, E-Learning, Education computing, Educational Innovation, Educational innovations, Educational Technology, Emerging technologies, Engineering education, Global education, High educations, higher education, Innovative methodologies, Me-xico, Metaverse, Metaverses, Mixed Realities, Mixed reality, Product design, Sebastian, Social aspects, Students, Sustainable development, Sustainable Development Goals, Teaching, Technical skills
@inproceedings{juarez_innovative_2025,
title = {Innovative Methodology for the Integration of Emerging Technologies in Global Education: Mixed Realities, AI, Metaverse, and SDGs},
author = {A. Juarez and J. Rábago and A. Pliego and G. Salazar and C. Hinrichsen and M. Castro and T. Pachajoa},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105011951378&doi=10.1109%2FIFE63672.2025.11024834&partnerID=40&md5=4e101ad649487ce729c3a5fa9e875559},
doi = {10.1109/IFE63672.2025.11024834},
isbn = {9798350355239 (ISBN)},
year = {2025},
date = {2025-01-01},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The academic collaboration among Tecnologico de Monterrey (Mexico), the University of San Sebastián (Chile), and the Catholic University of Colombia was an innovative effort to transform the teaching of the "Formal Representation of Space" through the use of emerging technologies. This project was based on the convergence of the theory of Community of Inquiry (CoI), International Collaborative Online Learning (COIL), and the integration of Mixed Realities, Metaverse, and generative artificial intelligence. The central objective of this collaboration was to improve the technical and creative skills of students of architecture, industrial design, digital art, communication, and music production through a pedagogical approach that utilizes 3D spatial visualization and intercultural interaction. The use of the Tec Virtual Campus's Metaverse and the Global Classroom program was instrumental in facilitating real-time collaboration among students from different countries, allowing for the creation of joint projects that reflect a deep understanding of the Sustainable Development Goals (SDGs). This effort resulted in an advanced methodology that improves students' technical skills and promotes a meaningful global commitment to sustainability and social responsibility, reflecting the transformative power of international collaborative education. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Artificial intelligence, Arts computing, Collaborative learning, E-Learning, Education computing, Educational Innovation, Educational innovations, Educational Technology, Emerging technologies, Engineering education, Global education, High educations, higher education, Innovative methodologies, Me-xico, Metaverse, Metaverses, Mixed Realities, Mixed reality, Product design, Sebastian, Social aspects, Students, Sustainable development, Sustainable Development Goals, Teaching, Technical skills},
pubstate = {published},
tppubtype = {inproceedings}
}
López-Ozieblo, R.; Jiandong, D. S.; Techanamurthy, U.; Geng, H.; Nurgissayeva, A.
Enhancing AI Literacy through Immersive VR: Evaluating Pedagogical Design and GenAI Integration Proceedings Article
In: pp. 718–723, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331511661 (ISBN).
Abstract | Links | BibTeX | Tags: AI Literacy, Artificial intelligence, Behavioral Research, Classlet platform, E-Learning, Educational settings, Emerging technologies, Engineering education, Experiential learning, GenAI avatar, GenAI Avatars, Immersive virtual reality, Interactive computer graphics, Pedagogical designs, Pedagogical Innovation, Regression analysis, Teaching, Virtual Reality, Virtual-reality environment
@inproceedings{lopez-ozieblo_enhancing_2025,
title = {Enhancing AI Literacy through Immersive VR: Evaluating Pedagogical Design and GenAI Integration},
author = {R. López-Ozieblo and D. S. Jiandong and U. Techanamurthy and H. Geng and A. Nurgissayeva},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105013538409&doi=10.1109%2FCSTE64638.2025.11092268&partnerID=40&md5=a963d754ceaa73f360d9678d346a7686},
doi = {10.1109/CSTE64638.2025.11092268},
isbn = {9798331511661 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {718–723},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {As AI continues to reshape industries, enhancing AI literacy is crucial for empowering learners to interact confidently and critically with emerging technologies. Virtual Reality (VR) offers a way to bridge theoretical knowledge with practical application but integrating VR into educational settings struggles with technical and pedagogical challenges. This study investigates how immersive VR environments can be optimized to enhance AI literacy and identifies key factors driving students' intent to adopt these technologies. Using Classlet - a VR platform that integrates interactive multimodal tasks, narrative-driven activities, and GenAI avatar interactions - we created a virtual office where learners engaged in research tasks and simulation scenarios with instructor-customized prompts. Our mixed-methods approach, involving participants from Hong Kong and Malaysia, focused on AI literacy within contexts such as Fast Fashion and European society. Regression analyses revealed that overall intent is strongly predicted by composite enjoyment, perceived performance, and behavioral control (R2 = 0.803). Post-AI literacy self-assessments were predicted by AI self-efficacy and enjoyment ( R2 = 0.421). However, female participants reported lower scores on AI efficacy (p = 0.042), suggesting baseline differences that warrant further investigation. Qualitative insights show the immersive and engaging nature of the experience while highlighting the need for further GenAI prompt designs for elaborative and bidirectional interactions. © 2025 Elsevier B.V., All rights reserved.},
keywords = {AI Literacy, Artificial intelligence, Behavioral Research, Classlet platform, E-Learning, Educational settings, Emerging technologies, Engineering education, Experiential learning, GenAI avatar, GenAI Avatars, Immersive virtual reality, Interactive computer graphics, Pedagogical designs, Pedagogical Innovation, Regression analysis, Teaching, Virtual Reality, Virtual-reality environment},
pubstate = {published},
tppubtype = {inproceedings}
}
Anvitha, K.; Durjay, T.; Sathvika, K.; Gnanendra, G.; Annamalai, S.; Natarajan, S. K.
EduBot: A Compact AI-Driven Study Assistant for Contextual Knowledge Retrieval Proceedings Article
In: Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331507756 (ISBN).
Abstract | Links | BibTeX | Tags: Chatbots, Computer aided instruction, Contextual knowledge, Curricula, Digital Education, E-Learning, Education computing, Educational Technology, Engineering education, Indexing (of information), Information Retrieval, Intelligent systems, Knowledge retrieval, LangChain Framework, Language Model, Large language model, learning experience, Learning experiences, Learning systems, LLM, PDF - Driven Chatbot, Query processing, Students, Teaching, Traditional learning, Virtual Reality
@inproceedings{anvitha_edubot_2025,
title = {EduBot: A Compact AI-Driven Study Assistant for Contextual Knowledge Retrieval},
author = {K. Anvitha and T. Durjay and K. Sathvika and G. Gnanendra and S. Annamalai and S. K. Natarajan},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105013615976&doi=10.1109%2FGINOTECH63460.2025.11077097&partnerID=40&md5=b08377283f2ea2ee406d38d1d23f1e42},
doi = {10.1109/GINOTECH63460.2025.11077097},
isbn = {9798331507756 (ISBN)},
year = {2025},
date = {2025-01-01},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {In the evolving landscape of educational technology, intelligent systems are redefining traditional learning methods by enhancing accessibility, adaptability, and engagement in instructional processes. This paper presents EduBot, a PDF-Driven Chatbot developed using advanced Large Language Models (LLMs) and leveraging frameworks like LangChain, OpenAI's Chat-Gpt, and Pinecone. EduBot is designed as an interactive educational assistant, responding to student queries based on faculty-provided guidelines embedded in PDF documents. Through natural language processing, EduBot streamlines information retrieval, providing accurate, context-aware responses that foster a self- directed learning experience. By aligning with specific academic requirements and enhancing clarity in information delivery, EduBot stands as a promising tool in personalized digital learning support. This paper explores the design, implementation, and impact of EduBot, offering insights into its potential as a scalable solution for academic institutions The demand for accessible and adaptive educational tools is increasing as students seek more personalized and efficient ways to enhance their learning experience. EduBot is a cutting- edge PDF-driven chatbot designed to act as a virtual educational assistant, helping students to navigate and understand course materials by answering queries directly based on faculty guidelines. Built upon Large Language Models (LLMs), specifically utilizing frameworks such as LangChain and OpenAI's GPT-3.5, EduBot provides a sophisticated solution for integrating curated academic content into interactive learning. With its backend support from Pinecone for optimized data indexing, EduBot offers accurate and context-specific responses, facilitating a deeper level of engagement and comprehension. The average relevancy score is 80%. This paper outlines the design and deployment of EduBot, emphasizing its architecture, adaptability, and contributions to the educational landscape, where such AI- driven tools are poised to become indispensable in fostering autonomous, personalized learning environments. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Chatbots, Computer aided instruction, Contextual knowledge, Curricula, Digital Education, E-Learning, Education computing, Educational Technology, Engineering education, Indexing (of information), Information Retrieval, Intelligent systems, Knowledge retrieval, LangChain Framework, Language Model, Large language model, learning experience, Learning experiences, Learning systems, LLM, PDF - Driven Chatbot, Query processing, Students, Teaching, Traditional learning, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Boubakri, F. -E.; Kadri, M.; Kaghat, F. Z.; Azough, A.; Tairi, H.
Exploring 3D Cardiac Anatomy with Text-Based AI Guidance in Virtual Reality Proceedings Article
In: pp. 43–48, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331534899 (ISBN).
Abstract | Links | BibTeX | Tags: 3D cardiac anatomy, 3d heart models, Anatomy education, Anatomy educations, Cardiac anatomy, Collaborative environments, Collaborative learning, Computer aided instruction, Curricula, Design and Development, E-Learning, Education computing, Generative AI, Heart, Immersive environment, Learning systems, Natural language processing systems, Social virtual reality, Students, Teaching, Three dimensional computer graphics, Virtual Reality
@inproceedings{boubakri_exploring_2025,
title = {Exploring 3D Cardiac Anatomy with Text-Based AI Guidance in Virtual Reality},
author = {F. -E. Boubakri and M. Kadri and F. Z. Kaghat and A. Azough and H. Tairi},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105015676741&doi=10.1109%2FSCME62582.2025.11104869&partnerID=40&md5=c961694f97c50adc23b6826dddb265cd},
doi = {10.1109/SCME62582.2025.11104869},
isbn = {9798331534899 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {43–48},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This paper presents the design and development of a social virtual reality (VR) classroom focused on cardiac anatomy education for students in grades K-12. The application allows multiple learners to explore a detailed 3D heart model within an immersive and collaborative environment. A crucial part of the system is the integration of a text-based conversational AI interface powered by ChatGPT, which provides immediate, interactive explanations and addresses student inquiries about heart anatomy. The system supports both guided and exploratory learning modes, encourages peer collaboration, and offers personalized support through natural language dialogue. We evaluated the system's effectiveness through a comprehensive study measuring learning perception (LPQ), VR perception (VRPQ), AI perception (AIPQ), and VR-related symptoms (VRSQ). Potential applications include making high-quality cardiac anatomy education more affordable for K-12 schools with limited resources, offering an adaptable AI-based tutoring system for students to learn at their own pace, and equipping educators with an easy-to-use tool to integrate into their science curriculum with minimal additional training. © 2025 Elsevier B.V., All rights reserved.},
keywords = {3D cardiac anatomy, 3d heart models, Anatomy education, Anatomy educations, Cardiac anatomy, Collaborative environments, Collaborative learning, Computer aided instruction, Curricula, Design and Development, E-Learning, Education computing, Generative AI, Heart, Immersive environment, Learning systems, Natural language processing systems, Social virtual reality, Students, Teaching, Three dimensional computer graphics, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhao, P.; Wei, X.
The Role of 3D Virtual Humans in Communication and Assisting Students' Learning in Transparent Display Environments: Perspectives of Pre-Service Teachers Proceedings Article
In: Chui, K. T.; Jaikaeo, C.; Niramitranon, J.; Kaewmanee, W.; Ng, K. -K.; Ongkunaruk, P. (Ed.): pp. 319–323, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331595500 (ISBN).
Abstract | Links | BibTeX | Tags: 3D virtual human, Assistive technology, CDIO teaching model, Collaborative learning, Collaborative practices, Display environments, E-Learning, Educational Technology, Engineering education, feedback, Integration, Knowledge delivery, Knowledge transfer, Learning algorithms, Natural language processing systems, Preservice teachers, Psychology computing, Student learning, Students, Teaching, Teaching model, Transparent display environment, Transparent displays, Virtual Reality
@inproceedings{zhao_role_2025,
title = {The Role of 3D Virtual Humans in Communication and Assisting Students' Learning in Transparent Display Environments: Perspectives of Pre-Service Teachers},
author = {P. Zhao and X. Wei},
editor = {K. T. Chui and C. Jaikaeo and J. Niramitranon and W. Kaewmanee and K. -K. Ng and P. Ongkunaruk},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105015746241&doi=10.1109%2FISET65607.2025.00069&partnerID=40&md5=08c39b84fa6bd6ac13ddbed203d7b1d9},
doi = {10.1109/ISET65607.2025.00069},
isbn = {9798331595500 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {319–323},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The integration of transparent display and 3D virtual human technologies into education is expanding rapidly; however, their systematic incorporation into the CDIO teaching model remains underexplored, particularly in supporting complex knowledge delivery and collaborative practice. This study developed an intelligent virtual teacher assistance system based on generative AI and conducted a teaching experiment combining transparent display and 3D virtual human technologies. Feedback was collected through focus group interviews with 24 pre-service teachers. Results show that the virtual human, through natural language and multimodal interaction, significantly enhanced classroom engagement and contextual understanding, while its real-time feedback and personalized guidance effectively supported CDIO-based collaborative learning. Nonetheless, challenges remain in contextual adaptability and emotional feedback accuracy. Accordingly, the study proposes a path for technical optimization through the integration of multimodal emotion recognition, adaptive instructional algorithms, and nonintrusive data collection, offering empirical and theoretical insights into educational technology integration within the CDIO framework and future intelligent learning tools. © 2025 Elsevier B.V., All rights reserved.},
keywords = {3D virtual human, Assistive technology, CDIO teaching model, Collaborative learning, Collaborative practices, Display environments, E-Learning, Educational Technology, Engineering education, feedback, Integration, Knowledge delivery, Knowledge transfer, Learning algorithms, Natural language processing systems, Preservice teachers, Psychology computing, Student learning, Students, Teaching, Teaching model, Transparent display environment, Transparent displays, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, L.; Mitchell, K.
Structured Teaching Prompt Articulation for Generative-AI Role Embodiment with Augmented Mirror Video Displays Proceedings Article
In: Spencer, S. N. (Ed.): Proc.: VRCAI - ACM SIGGRAPH Int. Conf. Virtual-Reality Contin. Appl. Ind., Association for Computing Machinery, Inc, 2025, ISBN: 9798400713484 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Augmented Reality, Computer interaction, Contrastive Learning, Cultural icon, Experiential learning, Generative adversarial networks, Generative AI, human-computer interaction, Immersive, Pedagogical practices, Role-based, Teachers', Teaching, Video display, Virtual environments, Virtual Reality
@inproceedings{casas_structured_2025,
title = {Structured Teaching Prompt Articulation for Generative-AI Role Embodiment with Augmented Mirror Video Displays},
author = {L. Casas and K. Mitchell},
editor = {S. N. Spencer},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85217997060&doi=10.1145%2F3703619.3706049&partnerID=40&md5=fb1b42dadbdc8ac44eeaafa93abc7f2c},
doi = {10.1145/3703619.3706049},
isbn = {9798400713484 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc.: VRCAI - ACM SIGGRAPH Int. Conf. Virtual-Reality Contin. Appl. Ind.},
publisher = {Association for Computing Machinery, Inc},
abstract = {We present a classroom enhanced with augmented reality video display in which students adopt snapshots of their corresponding virtual personas according to their teacher's live articulated spoken educational theme, linearly, such as historical figures, famous scientists, cultural icons, and laterally according to archetypal categories such as world dance styles. We define a structure of generative AI prompt guidance to assist teachers with focused specified visual role embodiment stylization. By leveraging role-based immersive embodiment, our proposed approach enriches pedagogical practices that prioritize experiential learning. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Artificial intelligence, Augmented Reality, Computer interaction, Contrastive Learning, Cultural icon, Experiential learning, Generative adversarial networks, Generative AI, human-computer interaction, Immersive, Pedagogical practices, Role-based, Teachers', Teaching, Video display, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Guo, P.; Zhang, Q.; Tian, C.; Xue, W.; Feng, X.
Digital Human Techniques for Education Reform Proceedings Article
In: ICETM - Proc. Int. Conf. Educ. Technol. Manag., pp. 173–178, Association for Computing Machinery, Inc, 2025, ISBN: 9798400717468 (ISBN).
Abstract | Links | BibTeX | Tags: Augmented Reality, Contrastive Learning, Digital elevation model, Digital human technique, Digital Human Techniques, Digital humans, Education Reform, Education reforms, Educational Technology, Express emotions, Federated learning, Human behaviors, Human form models, Human techniques, Immersive, Innovative technology, Modeling languages, Natural language processing systems, Teachers', Teaching, Virtual environments, Virtual humans
@inproceedings{guo_digital_2025,
title = {Digital Human Techniques for Education Reform},
author = {P. Guo and Q. Zhang and C. Tian and W. Xue and X. Feng},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001671326&doi=10.1145%2F3711403.3711428&partnerID=40&md5=fe9030a088666939b363c6c8c2fc5f66},
doi = {10.1145/3711403.3711428},
isbn = {9798400717468 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {ICETM - Proc. Int. Conf. Educ. Technol. Manag.},
pages = {173–178},
publisher = {Association for Computing Machinery, Inc},
abstract = {The rapid evolution of artificial intelligence, big data, and generative AI models has ushered in significant transformations across various sectors, including education. Digital Human Technique, an innovative technology grounded in advanced computer science and artificial intelligence, is reshaping educational paradigms by enabling virtual humans to simulate human behavior, express emotions, and interact with users. This paper explores the application of Digital Human Technique in education reform, focusing on creating immersive, intelligent classroom experiences that foster meaningful interactions between teachers and students. We define Digital Human Technique and delve into its key technical components such as character modeling and rendering, natural language processing, computer vision, and augmented reality technologies. Our methodology involves analyzing the role of educational digital humans created through these technologies, assessing their impact on educational processes, and examining various application scenarios in educational reform. Results indicate that Digital Human Technique significantly enhances the learning experience by enabling personalized teaching, increasing engagement, and fostering emotional connections. Educational digital humans serve as virtual teachers, interactive learning aids, and facilitators of emotional interaction, effectively addressing the challenges of traditional educational methods. They also promote a deeper understanding of complex concepts through simulated environments and interactive digital content. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Augmented Reality, Contrastive Learning, Digital elevation model, Digital human technique, Digital Human Techniques, Digital humans, Education Reform, Education reforms, Educational Technology, Express emotions, Federated learning, Human behaviors, Human form models, Human techniques, Immersive, Innovative technology, Modeling languages, Natural language processing systems, Teachers', Teaching, Virtual environments, Virtual humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ly, C.; Peng, E.; Liu, K.; Qin, A.; Howe, G.; Cheng, A. Y.; Cuadra, A.
Museum in the Classroom: Engaging Students with Augmented Reality Museum Artifacts and Generative AI Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 9798400713958 (ISBN); 9798400713941 (ISBN).
Abstract | Links | BibTeX | Tags: Artifact or System, Child/parent, Children/Parents, Digitisation, Education/Learning, Engaging students, Engineering education, Field trips, Interactive learning, Learning experiences, Rich learning experiences, Students, Teachers', Teaching
@inproceedings{ly_museum_2025,
title = {Museum in the Classroom: Engaging Students with Augmented Reality Museum Artifacts and Generative AI},
author = {C. Ly and E. Peng and K. Liu and A. Qin and G. Howe and A. Y. Cheng and A. Cuadra},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005741934&doi=10.1145%2F3706599.3719787&partnerID=40&md5=0a405b14f099e1132e32dd10c121eb37},
doi = {10.1145/3706599.3719787},
isbn = {9798400713958 (ISBN); 9798400713941 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Museum field trips provide a rich learning experience for children. However, they are complex and expensive for teachers to organize. Fortunately, digitization of museum artifacts makes it possible to use museum resources within the classroom. Museum in the Classroom (MITC) explores how augmented reality (AR) and generative artificial intelligence (AI) can create an interactive learning experience around museum artifacts. This iPad app allows educators to select historical topics from a curated artifact library, generating AR-based exhibits that students can explore. MITC engages students through interactive AR artifacts, AI-driven chatbots, and AI-generated quiz questions, based on a real exhibition at the Cantor Arts Center at Stanford University. A formative study with middle schoolers (N = 20) demonstrated that the app increased engagement compared to traditional learning methods. MITC also fostered a playful and comfortable environment to interact with museum artifacts. Our findings suggest that combining AR and AI has the potential to enrich classroom learning and offer a scalable alternative to traditional museum visits. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Artifact or System, Child/parent, Children/Parents, Digitisation, Education/Learning, Engaging students, Engineering education, Field trips, Interactive learning, Learning experiences, Rich learning experiences, Students, Teachers', Teaching},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu, X. T.; Cheerman, H.; Cheng, M.; Kiami, S. R.; Chukoskie, L.; McGivney, E.
Designing VR Simulation System for Clinical Communication Training with LLMs-Based Embodied Conversational Agents Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 9798400713958 (ISBN); 9798400713941 (ISBN).
Abstract | Links | BibTeX | Tags: Clinical communications, Clinical Simulation, Communications training, Curricula, Embodied conversational agent, Embodied Conversational Agents, Health professions, Intelligent virtual agents, Language Model, Medical education, Model-based OPC, Patient simulators, Personnel training, Students, Teaching, User centered design, Virtual environments, Virtual Reality, VR simulation, VR simulation systems
@inproceedings{zhu_designing_2025,
title = {Designing VR Simulation System for Clinical Communication Training with LLMs-Based Embodied Conversational Agents},
author = {X. T. Zhu and H. Cheerman and M. Cheng and S. R. Kiami and L. Chukoskie and E. McGivney},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005754066&doi=10.1145%2F3706599.3719693&partnerID=40&md5=6ad72d5adf98c2ca2437b5a3f6508a88},
doi = {10.1145/3706599.3719693},
isbn = {9798400713958 (ISBN); 9798400713941 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {VR simulation in Health Professions (HP) education demonstrates huge potential, but fixed learning content with little customization limits its application beyond lab environments. To address these limitations in the context of VR for patient communication training, we conducted a user-centered study involving semi-structured interviews with advanced HP students to understand their challenges in clinical communication training and perceptions of VR-based solutions. From this, we derived design insights emphasizing the importance of realistic scenarios, simple interactions, and unpredictable dialogues. Building on these insights, we developed the Virtual AI Patient Simulator (VAPS), a novel VR system powered by Large Language Models (LLMs) and Embodied Conversational Agents (ECAs), supporting dynamic and customizable patient interactions for immersive learning. We also provided an example of how clinical professors could use user-friendly design forms to create personalized scenarios that align with course objectives in VAPS and discuss future implications of integrating AI-driven technologies into VR education. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Clinical communications, Clinical Simulation, Communications training, Curricula, Embodied conversational agent, Embodied Conversational Agents, Health professions, Intelligent virtual agents, Language Model, Medical education, Model-based OPC, Patient simulators, Personnel training, Students, Teaching, User centered design, Virtual environments, Virtual Reality, VR simulation, VR simulation systems},
pubstate = {published},
tppubtype = {inproceedings}
}
Dang, B.; Huynh, L.; Gul, F.; Rosé, C.; Järvelä, S.; Nguyen, A.
Human–AI collaborative learning in mixed reality: Examining the cognitive and socio-emotional interactions Journal Article
In: British Journal of Educational Technology, vol. 56, no. 5, pp. 2078–2101, 2025, ISSN: 00071013 (ISSN); 14678535 (ISSN), (Publisher: John Wiley and Sons Inc).
Abstract | Links | BibTeX | Tags: Artificial intelligence agent, Collaborative learning, Educational robots, Embodied agent, Emotional intelligence, Emotional interactions, Generative adversarial networks, generative artificial intelligence, Hierarchical clustering, Human–AI collaboration, Interaction pattern, Mixed reality, ordered network analysis, Ordered network analyze, Social behavior, Social interactions, Social psychology, Students, Supervised learning, Teaching
@article{dang_humanai_2025,
title = {Human–AI collaborative learning in mixed reality: Examining the cognitive and socio-emotional interactions},
author = {B. Dang and L. Huynh and F. Gul and C. Rosé and S. Järvelä and A. Nguyen},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007896240&doi=10.1111%2Fbjet.13607&partnerID=40&md5=1c80a5bfe5917e7a9b14ee5809da232f},
doi = {10.1111/bjet.13607},
issn = {00071013 (ISSN); 14678535 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {British Journal of Educational Technology},
volume = {56},
number = {5},
pages = {2078–2101},
abstract = {The rise of generative artificial intelligence (GAI), especially with multimodal large language models like GPT-4o, sparked transformative potential and challenges for learning and teaching. With potential as a cognitive offloading tool, GAI can enable learners to focus on higher-order thinking and creativity. Yet, this also raises questions about integration into traditional education due to the limited research on learners' interactions with GAI. Some studies with GAI focus on text-based human–AI interactions, while research on embodied GAI in immersive environments like mixed reality (MR) remains unexplored. To address this, this study investigates interaction dynamics between learners and embodied GAI agents in MR, examining cognitive and socio-emotional interactions during collaborative learning. We investigated the paired interactive patterns between a student and an embodied GAI agent in MR, based on data from 26 higher education students with 1317 recorded activities. Data were analysed using a multi-layered learning analytics approach, including quantitative content analysis, sequence analysis via hierarchical clustering and pattern analysis through ordered network analysis (ONA). Our findings identified two interaction patterns: type (1) AI-led Supported Exploratory Questioning (AISQ) and type (2) Learner-Initiated Inquiry (LII) group. Despite their distinction in characteristic, both types demonstrated comparable levels of socio-emotional engagement and exhibited meaningful cognitive engagement, surpassing the superficial content reproduction that can be observed in interactions with GPT models. This study contributes to the human–AI collaboration and learning studies, extending understanding to learning in MR environments and highlighting implications for designing AI-based educational tools. Practitioner notes What is already known about this topic Socio-emotional interactions are fundamental to cognitive processes and play a critical role in collaborative learning. Generative artificial intelligence (GAI) holds transformative potential for education but raises questions about how learners interact with such technology. Most existing research focuses on text-based interactions with GAI; there is limited empirical evidence on how embodied GAI agents within immersive environments like Mixed Reality (MR) influence the cognitive and socio-emotional interactions for learning and regulation. What this paper adds Provides first empirical insights into cognitive and socio-emotional interaction patterns between learners and embodied GAI agents in MR environments. Identifies two distinct interaction patterns: AISQ type (structured, guided, supportive) and LII type (inquiry-driven, exploratory, engaging), demonstrating how these patterns influence collaborative learning dynamics. Shows that both interaction types facilitate meaningful cognitive engagement, moving beyond superficial content reproduction commonly associated with GAI interactions. Implications for practice and/or policy Insights from the identified interaction patterns can inform the design of teaching strategies that effectively integrate embodied GAI agents to enhance both cognitive and socio-emotional engagement. Findings can guide the development of AI-based educational tools that capitalise on the capabilities of embodied GAI agents, supporting a balance between structured guidance and exploratory learning. Highlights the need for ethical considerations in adopting embodied GAI agents, particularly regarding the human-like realism of these agents and potential impacts on learner dependency and interaction norms. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: John Wiley and Sons Inc},
keywords = {Artificial intelligence agent, Collaborative learning, Educational robots, Embodied agent, Emotional intelligence, Emotional interactions, Generative adversarial networks, generative artificial intelligence, Hierarchical clustering, Human–AI collaboration, Interaction pattern, Mixed reality, ordered network analysis, Ordered network analyze, Social behavior, Social interactions, Social psychology, Students, Supervised learning, Teaching},
pubstate = {published},
tppubtype = {article}
}
Hassoulas, A.; Crawford, O.; Hemrom, S.; Almeida, A.; Coffey, M. J.; Hodgson, M.; Leveridge, B.; Karwa, D.; Lethbridge, A.; Williams, H.; Voisey, A.; Reed, K.; Patel, S.; Hart, K.; Shaw, H.
A pilot study investigating the efficacy of technology enhanced case based learning (CBL) in small group teaching Journal Article
In: Scientific Reports, vol. 15, no. 1, 2025, ISSN: 20452322 (ISSN), (Publisher: Nature Research).
Abstract | Links | BibTeX | Tags: coronavirus disease 2019, Covid-19, epidemiology, female, human, Humans, Learning, male, Medical, Medical student, Pilot Projects, pilot study, problem based learning, Problem-Based Learning, procedures, SARS-CoV-2, Severe acute respiratory syndrome coronavirus 2, Students, Teaching, Virtual Reality
@article{hassoulas_pilot_2025,
title = {A pilot study investigating the efficacy of technology enhanced case based learning (CBL) in small group teaching},
author = {A. Hassoulas and O. Crawford and S. Hemrom and A. Almeida and M. J. Coffey and M. Hodgson and B. Leveridge and D. Karwa and A. Lethbridge and H. Williams and A. Voisey and K. Reed and S. Patel and K. Hart and H. Shaw},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105004223025&doi=10.1038%2Fs41598-025-99764-5&partnerID=40&md5=f1b0da1e40a22712db764ea952e8833a},
doi = {10.1038/s41598-025-99764-5},
issn = {20452322 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Scientific Reports},
volume = {15},
number = {1},
abstract = {The recent paradigm shift in teaching provision within higher education, following the COVID-19 pandemic, has led to blended models of learning prevailing in the pedagogic literature and in education practice. This shift has also resulted in an abundance of tools and technologies coming to market. Whilst the value of integrating technology into teaching and assessment has been well-established in the literature, the magnitude of choice available to educators and to students can be overwhelming. The current pilot investigated the feasibility of integrating key technologies in delivering technology-enhanced learning (TEL) case-based learning (CBL) within a sample of year two medical students. The cohort was selected at random, as was the control group receiving conventional CBL. Both groups were matched on prior academic performance. The TEL-CBL group received (1) in-person tutorials delivered within an immersive learning suite, (2) access to 3D anatomy software to explore during their self-directed learning time, (3) virtual reality (VR) guided anatomy exploration during tutorials, (4) access to a generative AI-based simulated virtual patient repository to practice key skills such as communication and history taking, and (5) an immersive medical emergency simulation. Metrics assessed included formative academic performance, student learning experience, and confidence in relation to communication and clinical skills. The results revealed that the TEL-CBL group outperformed their peers in successive formative assessments (p < 0.05), engaged thoroughly with the technologies at their disposal, and reported that these technologies enhanced their learning experience. Furthermore, students reported that access to the GenAI-simulated virtual patient platform and the immersive medical emergency simulation improved their clinical confidence and gave them a useful insight into what they can expect during the clinical phase of their medical education. The results are discussed in relation to the advantages that key emerging technologies may play in enhancing student performance, experience and confidence. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Nature Research},
keywords = {coronavirus disease 2019, Covid-19, epidemiology, female, human, Humans, Learning, male, Medical, Medical student, Pilot Projects, pilot study, problem based learning, Problem-Based Learning, procedures, SARS-CoV-2, Severe acute respiratory syndrome coronavirus 2, Students, Teaching, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
2024
Harinee, S.; Raja, R. Vimal; Mugila, E.; Govindharaj, I.; Sanjaykumar, V.; Ragavendhiran, T.
Elevating Medical Training: A Synergistic Fusion of AI and VR for Immersive Anatomy Learning and Practical Procedure Mastery Proceedings Article
In: Int. Conf. Syst., Comput., Autom. Netw., ICSCAN, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 9798331510022 (ISBN).
Abstract | Links | BibTeX | Tags: 'current, Anatomy education, Anatomy educations, Computer interaction, Curricula, Embodied virtual assistant, Embodied virtual assistants, Generative AI, Human- Computer Interaction, Immersive, Intelligent virtual agents, Medical computing, Medical education, Medical procedure practice, Medical procedures, Medical training, Personnel training, Students, Teaching, Three dimensional computer graphics, Usability engineering, Virtual assistants, Virtual environments, Virtual Reality, Visualization
@inproceedings{harinee_elevating_2024,
title = {Elevating Medical Training: A Synergistic Fusion of AI and VR for Immersive Anatomy Learning and Practical Procedure Mastery},
author = {S. Harinee and R. Vimal Raja and E. Mugila and I. Govindharaj and V. Sanjaykumar and T. Ragavendhiran},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000334626&doi=10.1109%2FICSCAN62807.2024.10894451&partnerID=40&md5=ae7a491686ade8cebdc276f585a6f4f0},
doi = {10.1109/ICSCAN62807.2024.10894451},
isbn = {9798331510022 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Int. Conf. Syst., Comput., Autom. Netw., ICSCAN},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual reality with its 3D visualization have brought an overwhelming change in the face of medical education, especially for courses like human anatomy. The proposed virtual reality system to bring massive improvements in the education received by a medical student studying for their degree courses. The project puts forward the text-to-speech and speech-to-text aligned system that simplifies the usage of a chatbot empowered by OpenAI GPT-4 and allows pupils to vocally speak with Avatar, the set virtual assistant. Contrary to the current methodologies, the setup of virtual reality is powered by avatars and thus covers an enhanced virtual assistant environment. Avatars offer students the set of repeated practicing of medical procedures on it, and the real uniqueness in the proposed product. The developed virtual reality environment is enhanced over other current training techniques where a student should interact and immerse in three-dimensional human organs for visualization in three dimensions and hence get better knowledge of the subjects in greater depth. A virtual assistant guides the whole process, giving insights and support to help the student bridge the gap from theory to practice. Then, the system is essentially Knowledge based and Analysis based approach. The combination of generative AI along with embodied virtual agents has great potential when it comes to customized virtual conversation assistant for much wider range of applications. The study brings out the value of acquiring hands-on skills through simulated medical procedures and opens new frontiers of research and development in AI, VR, and medical education. In addition to assessing the effectiveness of such novel functionalities, the study also explores user experience related dimensions such as usability, task loading, and the sense of presence in proposed virtual medical environment. © 2025 Elsevier B.V., All rights reserved.},
keywords = {'current, Anatomy education, Anatomy educations, Computer interaction, Curricula, Embodied virtual assistant, Embodied virtual assistants, Generative AI, Human- Computer Interaction, Immersive, Intelligent virtual agents, Medical computing, Medical education, Medical procedure practice, Medical procedures, Medical training, Personnel training, Students, Teaching, Three dimensional computer graphics, Usability engineering, Virtual assistants, Virtual environments, Virtual Reality, Visualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Domenichini, D.; Bucchiarone, A.; Chiarello, F.; Schiavo, G.; Fantoni, G.
An AI-Driven Approach for Enhancing Engagement and Conceptual Understanding in Physics Education Proceedings Article
In: IEEE Global Eng. Edu. Conf., EDUCON, IEEE Computer Society, 2024, ISBN: 21659559 (ISSN); 979-835039402-3 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive Learning, Artificial intelligence, Artificial intelligence in education, Artificial Intelligence in Education (AIED), Conceptual Understanding, Educational System, Educational systems, Gamification, Generative AI, generative artificial intelligence, Learning Activity, Learning systems, Physics Education, Teachers', Teaching, Virtual Reality
@inproceedings{domenichini_ai-driven_2024,
title = {An AI-Driven Approach for Enhancing Engagement and Conceptual Understanding in Physics Education},
author = {D. Domenichini and A. Bucchiarone and F. Chiarello and G. Schiavo and G. Fantoni},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199035695&doi=10.1109%2fEDUCON60312.2024.10578670&partnerID=40&md5=4cf9f89e97664ae6d618a90f2dbc23e0},
doi = {10.1109/EDUCON60312.2024.10578670},
isbn = {21659559 (ISSN); 979-835039402-3 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE Global Eng. Edu. Conf., EDUCON},
publisher = {IEEE Computer Society},
abstract = {This Work in Progress paper introduces the design of an innovative educational system that leverages Artificial Intelligence (AI) to address challenges in physics education. The primary objective is to create a system that dynamically adapts to the individual needs and preferences of students while maintaining user-friendliness for teachers, allowing them to tailor their teaching methods. The emphasis is on fostering motivation and engagement, achieved through the implementation of a gamified virtual environment and a strong focus on personalization. Our aim is to develop a system capable of autonomously generating learning activities and constructing effective learning paths, all under the supervision and interaction of teachers. The generation of learning activities is guided by educational taxonomies that delineate and categorize the cognitive processes involved in these activities. The proposed educational system seeks to address challenges identified by Physics Education Research (PER), which offers valuable insights into how individuals learn physics and provides strategies to enhance the overall quality of physics education. Our specific focus revolves around two crucial aspects: concentrating on the conceptual understanding of physics concepts and processes, and fostering knowledge integration and coherence across various physics topics. These aspects are deemed essential for cultivating enduring knowledge and facilitating practical applications in the field of physics. © 2024 IEEE.},
keywords = {Adaptive Learning, Artificial intelligence, Artificial intelligence in education, Artificial Intelligence in Education (AIED), Conceptual Understanding, Educational System, Educational systems, Gamification, Generative AI, generative artificial intelligence, Learning Activity, Learning systems, Physics Education, Teachers', Teaching, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Jia, Y.; Sin, Z. P. T.; Wang, X. E.; Li, C.; Ng, P. H. F.; Huang, X.; Dong, J.; Wang, Y.; Baciu, G.; Cao, J.; Li, Q.
NivTA: Towards a Naturally Interactable Edu-Metaverse Teaching Assistant for CAVE Proceedings Article
In: Proc. - IEEE Int. Conf. Metaverse Comput., Netw., Appl., MetaCom, pp. 57–64, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 9798331515997 (ISBN).
Abstract | Links | BibTeX | Tags: Active learning, Adversarial machine learning, cave automatic virtual environment, Cave automatic virtual environments, Caves, Chatbots, Contrastive Learning, Digital elevation model, Federated learning, Interactive education, Language Model, Large language model agent, Learning Activity, LLM agents, Metaverses, Model agents, Natural user interface, Students, Teaching, Teaching assistants, Virtual environments, Virtual Reality, virtual teaching assistant, Virtual teaching assistants
@inproceedings{jia_nivta_2024,
title = {NivTA: Towards a Naturally Interactable Edu-Metaverse Teaching Assistant for CAVE},
author = {Y. Jia and Z. P. T. Sin and X. E. Wang and C. Li and P. H. F. Ng and X. Huang and J. Dong and Y. Wang and G. Baciu and J. Cao and Q. Li},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85211447638&doi=10.1109%2FMetaCom62920.2024.00023&partnerID=40&md5=123c4e543f778f928741ddbe174f4c79},
doi = {10.1109/MetaCom62920.2024.00023},
isbn = {9798331515997 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Conf. Metaverse Comput., Netw., Appl., MetaCom},
pages = {57–64},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Edu-metaverse is a specialized metaverse dedicated for interactive education in an immersive environment. Its main purpose is to immerse the learners in a digital environment and conduct learning activities that could mirror reality. Not only does it enable activities that may be difficult to perform in the real world, but it also extends the interaction to personalized and CL. This is a more effective pedagogical approach as it tends to enhance the motivation and engagement of students and it increases their active participation in lessons delivered. To this extend, we propose to realize an interactive virtual teaching assistant called NivTA. To make NivTA easily accessible and engaging by multiple users simultaneously, we also propose to use a CAVE virtual environment (CAVE-VR) as a "metaverse window"into concepts, ideas, topics, and learning activities. The students simply need to step into the CAVE-VR and interact with a life-size teaching assistant that they can engage with naturally, as if they are approaching a real person. Instead of text-based interaction currently developed for large language models (LLM), NivTA is given additional cues regarding the users so it can react more naturally via a specific prompt design. For example, the user can simply point to an educational concept and ask NivTA to explain what it is. To guide NivTA onto the educational concept, the prompt is also designed to feed in an educational KG to provide NivTA with the context of the student's question. The NivTA system is an integration of several components that are discussed in this paper. We further describe how the system is designed and implemented, along with potential applications and future work on interactive collaborative edu-metaverse environments dedicated for teaching and learning. © 2024 Elsevier B.V., All rights reserved.},
keywords = {Active learning, Adversarial machine learning, cave automatic virtual environment, Cave automatic virtual environments, Caves, Chatbots, Contrastive Learning, Digital elevation model, Federated learning, Interactive education, Language Model, Large language model agent, Learning Activity, LLM agents, Metaverses, Model agents, Natural user interface, Students, Teaching, Teaching assistants, Virtual environments, Virtual Reality, virtual teaching assistant, Virtual teaching assistants},
pubstate = {published},
tppubtype = {inproceedings}
}
Ivanova, M.; Grosseck, G.; Holotescu, C.
Unveiling Insights: A Bibliometric Analysis of Artificial Intelligence in Teaching Journal Article
In: Informatics, vol. 11, no. 1, 2024, ISSN: 22279709 (ISSN), (Publisher: Multidisciplinary Digital Publishing Institute (MDPI)).
Abstract | Links | BibTeX | Tags: Artificial intelligence, ChatGPT, Intelligent Environment, large language models, learning analytics, Teaching
@article{ivanova_unveiling_2024,
title = {Unveiling Insights: A Bibliometric Analysis of Artificial Intelligence in Teaching},
author = {M. Ivanova and G. Grosseck and C. Holotescu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85188949348&doi=10.3390%2Finformatics11010010&partnerID=40&md5=528da77c867555fb0569ba6c14f887d1},
doi = {10.3390/informatics11010010},
issn = {22279709 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Informatics},
volume = {11},
number = {1},
abstract = {The penetration of intelligent applications in education is rapidly increasing, posing a number of questions of a different nature to the educational community. This paper is coming to analyze and outline the influence of artificial intelligence (AI) on teaching practice which is an essential problem considering its growing utilization and pervasion on a global scale. A bibliometric approach is applied to outdraw the “big picture” considering gathered bibliographic data from scientific databases Scopus and Web of Science. Data on relevant publications matching the query “artificial intelligence and teaching” over the past 5 years have been researched and processed through Biblioshiny in R environment in order to establish a descriptive structure of the scientific production, to determine the impact of scientific publications, to trace collaboration patterns and to identify key research areas and emerging trends. The results point out the growth in scientific production lately that is an indicator of increased interest in the investigated topic by researchers who mainly work in collaborative teams as some of them are from different countries and institutions. The identified key research areas include techniques used in educational applications, such as artificial intelligence, machine learning, and deep learning. Additionally, there is a focus on applicable technologies like ChatGPT, learning analytics, and virtual reality. The research also explores the context of application for these techniques and technologies in various educational settings, including teaching, higher education, active learning, e-learning, and online learning. Based on our findings, the trending research topics can be encapsulated by terms such as ChatGPT, chatbots, AI, generative AI, machine learning, emotion recognition, large language models, convolutional neural networks, and decision theory. These findings offer valuable insights into the current landscape of research interests in the field. © 2024 Elsevier B.V., All rights reserved.},
note = {Publisher: Multidisciplinary Digital Publishing Institute (MDPI)},
keywords = {Artificial intelligence, ChatGPT, Intelligent Environment, large language models, learning analytics, Teaching},
pubstate = {published},
tppubtype = {article}
}
Liu, M.; M'Hiri, F.
Beyond Traditional Teaching: Large Language Models as Simulated Teaching Assistants in Computer Science Proceedings Article
In: SIGCSE - Proc. ACM Tech. Symp. Comput. Sci. Educ., pp. 743–749, Association for Computing Machinery, Inc, 2024, ISBN: 9798400704246 (ISBN); 9798400704239 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive teaching, ChatGPT, Computational Linguistics, CS education, E-Learning, Education computing, Engineering education, GPT, Language Model, LLM, machine learning, Machine-learning, Novice programmer, novice programmers, Openai, Programming, Python, Students, Teaching, Virtual Reality
@inproceedings{liu_beyond_2024,
title = {Beyond Traditional Teaching: Large Language Models as Simulated Teaching Assistants in Computer Science},
author = {M. Liu and F. M'Hiri},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85189289344&doi=10.1145%2F3626252.3630789&partnerID=40&md5=7b0f42d4bf0e1706de8691200e4a0e00},
doi = {10.1145/3626252.3630789},
isbn = {9798400704246 (ISBN); 9798400704239 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {SIGCSE - Proc. ACM Tech. Symp. Comput. Sci. Educ.},
volume = {1},
pages = {743–749},
publisher = {Association for Computing Machinery, Inc},
abstract = {As the prominence of Large Language Models (LLMs) grows in various sectors, their potential in education warrants exploration. In this study, we investigate the feasibility of employing GPT-3.5 from OpenAI, as an LLM teaching assistant (TA) or a virtual TA in computer science (CS) courses. The objective is to enhance the accessibility of CS education while maintaining academic integrity by refraining from providing direct solutions to current-semester assignments. Targeting Foundations of Programming (COMP202), an undergraduate course that introduces students to programming with Python, we have developed a virtual TA using the LangChain framework, known for integrating language models with diverse data sources and environments. The virtual TA assists students with their code and clarifies complex concepts. For homework questions, it is designed to guide students with hints rather than giving out direct solutions. We assessed its performance first through a qualitative evaluation, then a survey-based comparative analysis, using a mix of questions commonly asked on the COMP202 discussion board and questions created by the authors. Our preliminary results indicate that the virtual TA outperforms human TAs on clarity and engagement, matching them on accuracy when the question is non-assignment-specific, for which human TAs still proved more reliable. These findings suggest that while virtual TAs, leveraging the capabilities of LLMs, hold great promise towards making CS education experience more accessible and engaging, their optimal use necessitates human supervision. We conclude by identifying several directions that could be explored in future implementations. © 2024 Elsevier B.V., All rights reserved.},
keywords = {Adaptive teaching, ChatGPT, Computational Linguistics, CS education, E-Learning, Education computing, Engineering education, GPT, Language Model, LLM, machine learning, Machine-learning, Novice programmer, novice programmers, Openai, Programming, Python, Students, Teaching, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Scott, A. J. S.; McCuaig, F.; Lim, V.; Watkins, W.; Wang, J.; Strachan, G.
Revolutionizing Nurse Practitioner Training: Integrating Virtual Reality and Large Language Models for Enhanced Clinical Education Proceedings Article
In: G., Strudwick; N.R., Hardiker; G., Rees; R., Cook; R., Cook; Y.J., Lee (Ed.): Stud. Health Technol. Informatics, pp. 671–672, IOS Press BV, 2024, ISBN: 09269630 (ISSN); 978-164368527-4 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, 3D models, 3d-modeling, adult, anamnesis, clinical decision making, clinical education, Clinical Simulation, Computational Linguistics, computer interface, Computer-Assisted Instruction, conference paper, Curriculum, Decision making, E-Learning, Education, Health care education, Healthcare Education, human, Humans, Language Model, Large language model, large language models, Mesh generation, Model animations, Modeling languages, nurse practitioner, Nurse Practitioners, Nursing, nursing education, nursing student, OSCE preparation, procedures, simulation, Teaching, therapy, Training, Training program, User-Computer Interface, Virtual Reality, Virtual reality training
@inproceedings{scott_revolutionizing_2024,
title = {Revolutionizing Nurse Practitioner Training: Integrating Virtual Reality and Large Language Models for Enhanced Clinical Education},
author = {A. J. S. Scott and F. McCuaig and V. Lim and W. Watkins and J. Wang and G. Strachan},
editor = {Strudwick G. and Hardiker N.R. and Rees G. and Cook R. and Cook R. and Lee Y.J.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199593781&doi=10.3233%2fSHTI240272&partnerID=40&md5=90c7bd43ba978f942723e6cf1983ffb3},
doi = {10.3233/SHTI240272},
isbn = {09269630 (ISSN); 978-164368527-4 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Stud. Health Technol. Informatics},
volume = {315},
pages = {671–672},
publisher = {IOS Press BV},
abstract = {This project introduces an innovative virtual reality (VR) training program for student Nurse Practitioners, incorporating advanced 3D modeling, animation, and Large Language Models (LLMs). Designed to simulate realistic patient interactions, the program aims to improve communication, history taking, and clinical decision-making skills in a controlled, authentic setting. This abstract outlines the methods, results, and potential impact of this cutting-edge educational tool on nursing education. © 2024 The Authors.},
keywords = {3D modeling, 3D models, 3d-modeling, adult, anamnesis, clinical decision making, clinical education, Clinical Simulation, Computational Linguistics, computer interface, Computer-Assisted Instruction, conference paper, Curriculum, Decision making, E-Learning, Education, Health care education, Healthcare Education, human, Humans, Language Model, Large language model, large language models, Mesh generation, Model animations, Modeling languages, nurse practitioner, Nurse Practitioners, Nursing, nursing education, nursing student, OSCE preparation, procedures, simulation, Teaching, therapy, Training, Training program, User-Computer Interface, Virtual Reality, Virtual reality training},
pubstate = {published},
tppubtype = {inproceedings}
}
Sikström, P.; Valentini, C.; Sivunen, A.; Kärkkäinen, T.
Pedagogical agents communicating and scaffolding students' learning: High school teachers' and students' perspectives Journal Article
In: Computers and Education, vol. 222, 2024, ISSN: 03601315 (ISSN), (Publisher: Elsevier Ltd).
Abstract | Links | BibTeX | Tags: Adversarial machine learning, Agents communication, Augmented Reality, Contrastive Learning, Federated learning, Human communications, Human-Machine Communication, Human-to-human communication script, Human–machine communication, Human–machine communication (HMC), pedagogical agent, Pedagogical agents, Scaffolds, Scaffolds (biology), Secondary education, Student learning, Students, Teachers', Teaching, User-centered design, User-centred, Virtual environments
@article{sikstrom_pedagogical_2024,
title = {Pedagogical agents communicating and scaffolding students' learning: High school teachers' and students' perspectives},
author = {P. Sikström and C. Valentini and A. Sivunen and T. Kärkkäinen},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85202198552&doi=10.1016%2Fj.compedu.2024.105140&partnerID=40&md5=a38656368911012d1b66ad221e67e8c8},
doi = {10.1016/j.compedu.2024.105140},
issn = {03601315 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Computers and Education},
volume = {222},
abstract = {Pedagogical agents (PAs) communicate verbally and non-verbally with students in digital and virtual reality/augmented reality learning environments. PAs have been shown to be beneficial for learning, and generative artificial intelligence, such as large language models, can improve PAs' communication abilities significantly. K-12 education is underrepresented in learning technology research and teachers' and students' insights have not been considered when developing PA communication. The current study addresses this research gap by conducting and analyzing semi-structured, in-depth interviews with eleven high school teachers and sixteen high school students about their expectations for PAs' communication capabilities. The interviewees identified relational and task-related communication capabilities that a PA should perform to communicate effectively with students and scaffold their learning. PA communication that is simultaneously affirmative and relational can induce immediacy, foster the relationship and engagement with a PA, and support students' learning management. Additionally, the teachers and students described the activities and technological aspects that should be considered when designing conversational PAs. The study showed that teachers and students applied human-to-human communication scripts when outlining their desired PA communication characteristics. The study offers novel insights and recommendations to researchers and developers on the communicational, pedagogical, and technological aspects that must be considered when designing communicative PAs that scaffold students’ learning, and discusses the contributions on human–machine communication in education. © 2024 Elsevier B.V., All rights reserved.},
note = {Publisher: Elsevier Ltd},
keywords = {Adversarial machine learning, Agents communication, Augmented Reality, Contrastive Learning, Federated learning, Human communications, Human-Machine Communication, Human-to-human communication script, Human–machine communication, Human–machine communication (HMC), pedagogical agent, Pedagogical agents, Scaffolds, Scaffolds (biology), Secondary education, Student learning, Students, Teachers', Teaching, User-centered design, User-centred, Virtual environments},
pubstate = {published},
tppubtype = {article}
}