AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Li, Y.; Pang, E. C. H.; Ng, C. S. Y.; Azim, M.; Leung, H.
Enhancing Linear Algebra Education with AI-Generated Content in the CityU Metaverse: A Comparative Study Proceedings Article
In: T., Hao; J.G., Wu; X., Luo; Y., Sun; Y., Mu; S., Ge; W., Xie (Ed.): Lect. Notes Comput. Sci., pp. 3–16, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-981964406-3 (ISBN).
Abstract | Links | BibTeX | Tags: Comparatives studies, Digital age, Digital interactions, digital twin, Educational metaverse, Engineering education, Generative AI, Immersive, Matrix algebra, Metaverse, Metaverses, Personnel training, Students, Teaching, University campus, Virtual environments, virtual learning environment, Virtual learning environments, Virtual Reality, Virtualization
@inproceedings{li_enhancing_2025,
title = {Enhancing Linear Algebra Education with AI-Generated Content in the CityU Metaverse: A Comparative Study},
author = {Y. Li and E. C. H. Pang and C. S. Y. Ng and M. Azim and H. Leung},
editor = {Hao T. and Wu J.G. and Luo X. and Sun Y. and Mu Y. and Ge S. and Xie W.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003632691&doi=10.1007%2f978-981-96-4407-0_1&partnerID=40&md5=c067ba5d4c15e9c0353bf315680531fc},
doi = {10.1007/978-981-96-4407-0_1},
isbn = {03029743 (ISSN); 978-981964406-3 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15589 LNCS},
pages = {3–16},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {In today’s digital age, the metaverse is emerging as the forthcoming evolution of the internet. It provides an immersive space that marks a new frontier in the way digital interactions are facilitated and experienced. In this paper, we present the CityU Metaverse, which aims to construct a digital twin of our university campus. It is designed as an educational virtual world where learning applications can be embedded in this virtual campus, supporting not only remote and collaborative learning but also professional technical training to enhance educational experiences through immersive and interactive learning. To evaluate the effectiveness of this educational metaverse, we conducted an experiment focused on 3D linear transformation in linear algebra, with teaching content generated by generative AI, comparing our metaverse system with traditional teaching methods. Knowledge tests and surveys assessing learning interest revealed that students engaged with the CityU Metaverse, facilitated by AI-generated content, outperformed those in traditional settings and reported greater enjoyment during the learning process. The work provides valuable perspectives on the behaviors and interactions within the metaverse by analyzing user preferences and learning outcomes. © The Author(s), under exclusive license to Springer Nature Singapore Pte Ltd. 2025.},
keywords = {Comparatives studies, Digital age, Digital interactions, digital twin, Educational metaverse, Engineering education, Generative AI, Immersive, Matrix algebra, Metaverse, Metaverses, Personnel training, Students, Teaching, University campus, Virtual environments, virtual learning environment, Virtual learning environments, Virtual Reality, Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Dang, B.; Huynh, L.; Gul, F.; Rosé, C.; Järvelä, S.; Nguyen, A.
Human–AI collaborative learning in mixed reality: Examining the cognitive and socio-emotional interactions Journal Article
In: British Journal of Educational Technology, 2025, ISSN: 00071013 (ISSN).
Abstract | Links | BibTeX | Tags: Artificial intelligence agent, Collaborative learning, Educational robots, Embodied agent, Emotional intelligence, Emotional interactions, Generative adversarial networks, generative artificial intelligence, Hierarchical clustering, Human–AI collaboration, Interaction pattern, Mixed reality, ordered network analysis, Ordered network analyze, Social behavior, Social interactions, Social psychology, Students, Supervised learning, Teaching
@article{dang_humanai_2025,
title = {Human–AI collaborative learning in mixed reality: Examining the cognitive and socio-emotional interactions},
author = {B. Dang and L. Huynh and F. Gul and C. Rosé and S. Järvelä and A. Nguyen},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007896240&doi=10.1111%2fbjet.13607&partnerID=40&md5=b58a641069461f8880d1ee0adcf42457},
doi = {10.1111/bjet.13607},
issn = {00071013 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {British Journal of Educational Technology},
abstract = {The rise of generative artificial intelligence (GAI), especially with multimodal large language models like GPT-4o, sparked transformative potential and challenges for learning and teaching. With potential as a cognitive offloading tool, GAI can enable learners to focus on higher-order thinking and creativity. Yet, this also raises questions about integration into traditional education due to the limited research on learners' interactions with GAI. Some studies with GAI focus on text-based human–AI interactions, while research on embodied GAI in immersive environments like mixed reality (MR) remains unexplored. To address this, this study investigates interaction dynamics between learners and embodied GAI agents in MR, examining cognitive and socio-emotional interactions during collaborative learning. We investigated the paired interactive patterns between a student and an embodied GAI agent in MR, based on data from 26 higher education students with 1317 recorded activities. Data were analysed using a multi-layered learning analytics approach, including quantitative content analysis, sequence analysis via hierarchical clustering and pattern analysis through ordered network analysis (ONA). Our findings identified two interaction patterns: type (1) AI-led Supported Exploratory Questioning (AISQ) and type (2) Learner-Initiated Inquiry (LII) group. Despite their distinction in characteristic, both types demonstrated comparable levels of socio-emotional engagement and exhibited meaningful cognitive engagement, surpassing the superficial content reproduction that can be observed in interactions with GPT models. This study contributes to the human–AI collaboration and learning studies, extending understanding to learning in MR environments and highlighting implications for designing AI-based educational tools. Practitioner notes What is already known about this topic Socio-emotional interactions are fundamental to cognitive processes and play a critical role in collaborative learning. Generative artificial intelligence (GAI) holds transformative potential for education but raises questions about how learners interact with such technology. Most existing research focuses on text-based interactions with GAI; there is limited empirical evidence on how embodied GAI agents within immersive environments like Mixed Reality (MR) influence the cognitive and socio-emotional interactions for learning and regulation. What this paper adds Provides first empirical insights into cognitive and socio-emotional interaction patterns between learners and embodied GAI agents in MR environments. Identifies two distinct interaction patterns: AISQ type (structured, guided, supportive) and LII type (inquiry-driven, exploratory, engaging), demonstrating how these patterns influence collaborative learning dynamics. Shows that both interaction types facilitate meaningful cognitive engagement, moving beyond superficial content reproduction commonly associated with GAI interactions. Implications for practice and/or policy Insights from the identified interaction patterns can inform the design of teaching strategies that effectively integrate embodied GAI agents to enhance both cognitive and socio-emotional engagement. Findings can guide the development of AI-based educational tools that capitalise on the capabilities of embodied GAI agents, supporting a balance between structured guidance and exploratory learning. Highlights the need for ethical considerations in adopting embodied GAI agents, particularly regarding the human-like realism of these agents and potential impacts on learner dependency and interaction norms. © 2025 The Author(s). British Journal of Educational Technology published by John Wiley & Sons Ltd on behalf of British Educational Research Association.},
keywords = {Artificial intelligence agent, Collaborative learning, Educational robots, Embodied agent, Emotional intelligence, Emotional interactions, Generative adversarial networks, generative artificial intelligence, Hierarchical clustering, Human–AI collaboration, Interaction pattern, Mixed reality, ordered network analysis, Ordered network analyze, Social behavior, Social interactions, Social psychology, Students, Supervised learning, Teaching},
pubstate = {published},
tppubtype = {article}
}
Zhu, X. T.; Cheerman, H.; Cheng, M.; Kiami, S. R.; Chukoskie, L.; McGivney, E.
Designing VR Simulation System for Clinical Communication Training with LLMs-Based Embodied Conversational Agents Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 979-840071395-8 (ISBN).
Abstract | Links | BibTeX | Tags: Clinical communications, Clinical Simulation, Communications training, Curricula, Embodied conversational agent, Embodied Conversational Agents, Health professions, Intelligent virtual agents, Language Model, Medical education, Model-based OPC, Patient simulators, Personnel training, Students, Teaching, User centered design, Virtual environments, Virtual Reality, VR simulation, VR simulation systems
@inproceedings{zhu_designing_2025,
title = {Designing VR Simulation System for Clinical Communication Training with LLMs-Based Embodied Conversational Agents},
author = {X. T. Zhu and H. Cheerman and M. Cheng and S. R. Kiami and L. Chukoskie and E. McGivney},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005754066&doi=10.1145%2f3706599.3719693&partnerID=40&md5=4468fbd54b43d6779259300afd08632e},
doi = {10.1145/3706599.3719693},
isbn = {979-840071395-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {VR simulation in Health Professions (HP) education demonstrates huge potential, but fixed learning content with little customization limits its application beyond lab environments. To address these limitations in the context of VR for patient communication training, we conducted a user-centered study involving semi-structured interviews with advanced HP students to understand their challenges in clinical communication training and perceptions of VR-based solutions. From this, we derived design insights emphasizing the importance of realistic scenarios, simple interactions, and unpredictable dialogues. Building on these insights, we developed the Virtual AI Patient Simulator (VAPS), a novel VR system powered by Large Language Models (LLMs) and Embodied Conversational Agents (ECAs), supporting dynamic and customizable patient interactions for immersive learning. We also provided an example of how clinical professors could use user-friendly design forms to create personalized scenarios that align with course objectives in VAPS and discuss future implications of integrating AI-driven technologies into VR education. © 2025 Copyright held by the owner/author(s).},
keywords = {Clinical communications, Clinical Simulation, Communications training, Curricula, Embodied conversational agent, Embodied Conversational Agents, Health professions, Intelligent virtual agents, Language Model, Medical education, Model-based OPC, Patient simulators, Personnel training, Students, Teaching, User centered design, Virtual environments, Virtual Reality, VR simulation, VR simulation systems},
pubstate = {published},
tppubtype = {inproceedings}
}
Guo, P.; Zhang, Q.; Tian, C.; Xue, W.; Feng, X.
Digital Human Techniques for Education Reform Proceedings Article
In: ICETM - Proc. Int. Conf. Educ. Technol. Manag., pp. 173–178, Association for Computing Machinery, Inc, 2025, ISBN: 979-840071746-8 (ISBN).
Abstract | Links | BibTeX | Tags: Augmented Reality, Contrastive Learning, Digital elevation model, Digital human technique, Digital Human Techniques, Digital humans, Education Reform, Education reforms, Educational Technology, Express emotions, Federated learning, Human behaviors, Human form models, Human techniques, Immersive, Innovative technology, Modeling languages, Natural language processing systems, Teachers', Teaching, Virtual environments, Virtual humans
@inproceedings{guo_digital_2025,
title = {Digital Human Techniques for Education Reform},
author = {P. Guo and Q. Zhang and C. Tian and W. Xue and X. Feng},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001671326&doi=10.1145%2f3711403.3711428&partnerID=40&md5=dd96647315af9409d119f68f9cf4e980},
doi = {10.1145/3711403.3711428},
isbn = {979-840071746-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {ICETM - Proc. Int. Conf. Educ. Technol. Manag.},
pages = {173–178},
publisher = {Association for Computing Machinery, Inc},
abstract = {The rapid evolution of artificial intelligence, big data, and generative AI models has ushered in significant transformations across various sectors, including education. Digital Human Technique, an innovative technology grounded in advanced computer science and artificial intelligence, is reshaping educational paradigms by enabling virtual humans to simulate human behavior, express emotions, and interact with users. This paper explores the application of Digital Human Technique in education reform, focusing on creating immersive, intelligent classroom experiences that foster meaningful interactions between teachers and students. We define Digital Human Technique and delve into its key technical components such as character modeling and rendering, natural language processing, computer vision, and augmented reality technologies. Our methodology involves analyzing the role of educational digital humans created through these technologies, assessing their impact on educational processes, and examining various application scenarios in educational reform. Results indicate that Digital Human Technique significantly enhances the learning experience by enabling personalized teaching, increasing engagement, and fostering emotional connections. Educational digital humans serve as virtual teachers, interactive learning aids, and facilitators of emotional interaction, effectively addressing the challenges of traditional educational methods. They also promote a deeper understanding of complex concepts through simulated environments and interactive digital content. © 2024 Copyright held by the owner/author(s).},
keywords = {Augmented Reality, Contrastive Learning, Digital elevation model, Digital human technique, Digital Human Techniques, Digital humans, Education Reform, Education reforms, Educational Technology, Express emotions, Federated learning, Human behaviors, Human form models, Human techniques, Immersive, Innovative technology, Modeling languages, Natural language processing systems, Teachers', Teaching, Virtual environments, Virtual humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, L.; Mitchell, K.
Structured Teaching Prompt Articulation for Generative-AI Role Embodiment with Augmented Mirror Video Displays Proceedings Article
In: S.N., Spencer (Ed.): Proc.: VRCAI - ACM SIGGRAPH Int. Conf. Virtual-Reality Contin. Appl. Ind., Association for Computing Machinery, Inc, 2025, ISBN: 979-840071348-4 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Augmented Reality, Computer interaction, Contrastive Learning, Cultural icon, Experiential learning, Generative adversarial networks, Generative AI, human-computer interaction, Immersive, Pedagogical practices, Role-based, Teachers', Teaching, Video display, Virtual environments, Virtual Reality
@inproceedings{casas_structured_2025,
title = {Structured Teaching Prompt Articulation for Generative-AI Role Embodiment with Augmented Mirror Video Displays},
author = {L. Casas and K. Mitchell},
editor = {Spencer S.N.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85217997060&doi=10.1145%2f3703619.3706049&partnerID=40&md5=7141c5dac7882232c6ee8e0bef0ba84e},
doi = {10.1145/3703619.3706049},
isbn = {979-840071348-4 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc.: VRCAI - ACM SIGGRAPH Int. Conf. Virtual-Reality Contin. Appl. Ind.},
publisher = {Association for Computing Machinery, Inc},
abstract = {We present a classroom enhanced with augmented reality video display in which students adopt snapshots of their corresponding virtual personas according to their teacher's live articulated spoken educational theme, linearly, such as historical figures, famous scientists, cultural icons, and laterally according to archetypal categories such as world dance styles. We define a structure of generative AI prompt guidance to assist teachers with focused specified visual role embodiment stylization. By leveraging role-based immersive embodiment, our proposed approach enriches pedagogical practices that prioritize experiential learning. © 2024 ACM.},
keywords = {Artificial intelligence, Augmented Reality, Computer interaction, Contrastive Learning, Cultural icon, Experiential learning, Generative adversarial networks, Generative AI, human-computer interaction, Immersive, Pedagogical practices, Role-based, Teachers', Teaching, Video display, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Alibrahim, Y.; Ibrahim, M.; Gurdayal, D.; Munshi, M.
AI speechbots and 3D segmentations in virtual reality improve radiology on-call training in resource-limited settings Journal Article
In: Intelligence-Based Medicine, vol. 11, 2025, ISSN: 26665212 (ISSN).
Abstract | Links | BibTeX | Tags: 3D segmentation, AI speechbots, Article, artificial intelligence chatbot, ChatGPT, computer assisted tomography, Deep learning, headache, human, Image segmentation, interventional radiology, Large language model, Likert scale, nausea, Proof of concept, prospective study, radiology, radiology on call training, resource limited setting, Teaching, Training, ultrasound, Virtual Reality, voice recognition
@article{alibrahim_ai_2025,
title = {AI speechbots and 3D segmentations in virtual reality improve radiology on-call training in resource-limited settings},
author = {Y. Alibrahim and M. Ibrahim and D. Gurdayal and M. Munshi},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001472313&doi=10.1016%2fj.ibmed.2025.100245&partnerID=40&md5=623a0ceaa07e5516a296420d25c3033b},
doi = {10.1016/j.ibmed.2025.100245},
issn = {26665212 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Intelligence-Based Medicine},
volume = {11},
abstract = {Objective: Evaluate the use of large-language model (LLM) speechbot tools and deep learning-assisted generation of 3D reconstructions when integrated in a virtual reality (VR) setting to teach radiology on-call topics to radiology residents. Methods: Three first year radiology residents in Guyana were enrolled in an 8-week radiology course that focused on preparation for on-call duties. The course, delivered via VR headsets with custom software integrating LLM-powered speechbots trained on imaging reports and 3D reconstructions segmented with the help of a deep learning model. Each session focused on a specific radiology area, employing a didactic and case-based learning approach, enhanced with 3D reconstructions and an LLM-powered speechbot. Post-session, residents reassessed their knowledge and provided feedback on their VR and LLM-powered speechbot experiences. Results/discussion: Residents found that the 3D reconstructions segmented semi-automatically by deep learning algorithms and AI-driven self-learning via speechbot was highly valuable. The 3D reconstructions, especially in the interventional radiology session, were helpful and the benefit is augmented by VR where navigating the models is seamless and perception of depth is pronounced. Residents also found conversing with the AI-speechbot seamless and was valuable in their post session self-learning. The major drawback of VR was motion sickness, which was mild and improved over time. Conclusion: AI-assisted VR radiology education could be used to develop new and accessible ways of teaching a variety of radiology topics in a seamless and cost-effective way. This could be especially useful in supporting radiology education remotely in regions which lack local radiology expertise. © 2025},
keywords = {3D segmentation, AI speechbots, Article, artificial intelligence chatbot, ChatGPT, computer assisted tomography, Deep learning, headache, human, Image segmentation, interventional radiology, Large language model, Likert scale, nausea, Proof of concept, prospective study, radiology, radiology on call training, resource limited setting, Teaching, Training, ultrasound, Virtual Reality, voice recognition},
pubstate = {published},
tppubtype = {article}
}
Nygren, T.; Samuelsson, M.; Hansson, P. -O.; Efimova, E.; Bachelder, S.
In: International Journal of Artificial Intelligence in Education, 2025, ISSN: 15604292 (ISSN).
Abstract | Links | BibTeX | Tags: AI-generated feedback, Controversial issue in social study education, Controversial issues in social studies education, Curricula, Domain knowledge, Economic and social effects, Expert systems, Generative AI, Human engineering, Knowledge engineering, Language Model, Large language model, large language models (LLMs), Mixed reality, Mixed reality simulation, Mixed reality simulation (MRS), Pedagogical content knowledge, Pedagogical content knowledge (PCK), Personnel training, Preservice teachers, Social studies education, Teacher training, Teacher training simulation, Teacher training simulations, Teaching, Training simulation
@article{nygren_ai_2025,
title = {AI Versus Human Feedback in Mixed Reality Simulations: Comparing LLM and Expert Mentoring in Preservice Teacher Education on Controversial Issues},
author = {T. Nygren and M. Samuelsson and P. -O. Hansson and E. Efimova and S. Bachelder},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007244772&doi=10.1007%2fs40593-025-00484-8&partnerID=40&md5=d3cb14a8117045505cbbeb174b32b88d},
doi = {10.1007/s40593-025-00484-8},
issn = {15604292 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {International Journal of Artificial Intelligence in Education},
abstract = {This study explores the potential role of AI-generated mentoring within simulated environments designed for teacher education, specifically focused on the challenges of teaching controversial issues. Using a mixed-methods approach, we empirically investigate the potential and challenges of AI-generated feedback compared to that provided by human experts when mentoring preservice teachers in the context of mixed reality simulations. Findings reveal that human experts offered more mixed and nuanced feedback than ChatGPT-4o and Perplexity, especially when identifying missed teaching opportunities and balancing classroom discussions. The AI models evaluated were publicly available pro versions of LLMs and were tested using detailed prompts and coding schemes aligned with educational theories. AI systems were not very good at identifying aspects of general, pedagogical or content knowledge based on Shulman’s theories but were still quite effective in generating feedback in line with human experts. The study highlights the promise of AI to enhance teacher training but underscores the importance of combining AI feedback with expert insights to address the complexities of real-world teaching. This research contributes to a growing understanding of AI's potential role and limitations in education. It suggests that, while AI can be valuable to scale mixed reality simulations, it should be carefully evaluated and balanced by human expertise in teacher education. © The Author(s) 2025.},
keywords = {AI-generated feedback, Controversial issue in social study education, Controversial issues in social studies education, Curricula, Domain knowledge, Economic and social effects, Expert systems, Generative AI, Human engineering, Knowledge engineering, Language Model, Large language model, large language models (LLMs), Mixed reality, Mixed reality simulation, Mixed reality simulation (MRS), Pedagogical content knowledge, Pedagogical content knowledge (PCK), Personnel training, Preservice teachers, Social studies education, Teacher training, Teacher training simulation, Teacher training simulations, Teaching, Training simulation},
pubstate = {published},
tppubtype = {article}
}
Hassoulas, A.; Crawford, O.; Hemrom, S.; Almeida, A.; Coffey, M. J.; Hodgson, M.; Leveridge, B.; Karwa, D.; Lethbridge, A.; Williams, H.; Voisey, A.; Reed, K.; Patel, S.; Hart, K.; Shaw, H.
A pilot study investigating the efficacy of technology enhanced case based learning (CBL) in small group teaching Journal Article
In: Scientific Reports, vol. 15, no. 1, 2025, ISSN: 20452322 (ISSN).
Abstract | Links | BibTeX | Tags: coronavirus disease 2019, Covid-19, epidemiology, female, human, Humans, Learning, male, Medical, Medical student, Pilot Projects, pilot study, problem based learning, Problem-Based Learning, procedures, SARS-CoV-2, Severe acute respiratory syndrome coronavirus 2, Students, Teaching, Virtual Reality
@article{hassoulas_pilot_2025,
title = {A pilot study investigating the efficacy of technology enhanced case based learning (CBL) in small group teaching},
author = {A. Hassoulas and O. Crawford and S. Hemrom and A. Almeida and M. J. Coffey and M. Hodgson and B. Leveridge and D. Karwa and A. Lethbridge and H. Williams and A. Voisey and K. Reed and S. Patel and K. Hart and H. Shaw},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105004223025&doi=10.1038%2fs41598-025-99764-5&partnerID=40&md5=8588cac4c3ffe437e667ba4373e010ec},
doi = {10.1038/s41598-025-99764-5},
issn = {20452322 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Scientific Reports},
volume = {15},
number = {1},
abstract = {The recent paradigm shift in teaching provision within higher education, following the COVID-19 pandemic, has led to blended models of learning prevailing in the pedagogic literature and in education practice. This shift has also resulted in an abundance of tools and technologies coming to market. Whilst the value of integrating technology into teaching and assessment has been well-established in the literature, the magnitude of choice available to educators and to students can be overwhelming. The current pilot investigated the feasibility of integrating key technologies in delivering technology-enhanced learning (TEL) case-based learning (CBL) within a sample of year two medical students. The cohort was selected at random, as was the control group receiving conventional CBL. Both groups were matched on prior academic performance. The TEL-CBL group received (1) in-person tutorials delivered within an immersive learning suite, (2) access to 3D anatomy software to explore during their self-directed learning time, (3) virtual reality (VR) guided anatomy exploration during tutorials, (4) access to a generative AI-based simulated virtual patient repository to practice key skills such as communication and history taking, and (5) an immersive medical emergency simulation. Metrics assessed included formative academic performance, student learning experience, and confidence in relation to communication and clinical skills. The results revealed that the TEL-CBL group outperformed their peers in successive formative assessments (p < 0.05), engaged thoroughly with the technologies at their disposal, and reported that these technologies enhanced their learning experience. Furthermore, students reported that access to the GenAI-simulated virtual patient platform and the immersive medical emergency simulation improved their clinical confidence and gave them a useful insight into what they can expect during the clinical phase of their medical education. The results are discussed in relation to the advantages that key emerging technologies may play in enhancing student performance, experience and confidence. © The Author(s) 2025.},
keywords = {coronavirus disease 2019, Covid-19, epidemiology, female, human, Humans, Learning, male, Medical, Medical student, Pilot Projects, pilot study, problem based learning, Problem-Based Learning, procedures, SARS-CoV-2, Severe acute respiratory syndrome coronavirus 2, Students, Teaching, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Ly, C.; Peng, E.; Liu, K.; Qin, A.; Howe, G.; Cheng, A. Y.; Cuadra, A.
Museum in the Classroom: Engaging Students with Augmented Reality Museum Artifacts and Generative AI Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 979-840071395-8 (ISBN).
Abstract | Links | BibTeX | Tags: Artifact or System, Child/parent, Children/Parents, Digitisation, Education/Learning, Engaging students, Engineering education, Field trips, Interactive learning, Learning experiences, Rich learning experiences, Students, Teachers', Teaching
@inproceedings{ly_museum_2025,
title = {Museum in the Classroom: Engaging Students with Augmented Reality Museum Artifacts and Generative AI},
author = {C. Ly and E. Peng and K. Liu and A. Qin and G. Howe and A. Y. Cheng and A. Cuadra},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005741934&doi=10.1145%2f3706599.3719787&partnerID=40&md5=08816dd8d41bc34a0dc2d355985e2cc4},
doi = {10.1145/3706599.3719787},
isbn = {979-840071395-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Museum field trips provide a rich learning experience for children. However, they are complex and expensive for teachers to organize. Fortunately, digitization of museum artifacts makes it possible to use museum resources within the classroom. Museum in the Classroom (MITC) explores how augmented reality (AR) and generative artificial intelligence (AI) can create an interactive learning experience around museum artifacts. This iPad app allows educators to select historical topics from a curated artifact library, generating AR-based exhibits that students can explore. MITC engages students through interactive AR artifacts, AI-driven chatbots, and AI-generated quiz questions, based on a real exhibition at the Cantor Arts Center at Stanford University. A formative study with middle schoolers (N = 20) demonstrated that the app increased engagement compared to traditional learning methods. MITC also fostered a playful and comfortable environment to interact with museum artifacts. Our findings suggest that combining AR and AI has the potential to enrich classroom learning and offer a scalable alternative to traditional museum visits. © 2025 Copyright held by the owner/author(s).},
keywords = {Artifact or System, Child/parent, Children/Parents, Digitisation, Education/Learning, Engaging students, Engineering education, Field trips, Interactive learning, Learning experiences, Rich learning experiences, Students, Teachers', Teaching},
pubstate = {published},
tppubtype = {inproceedings}
}
Gao, H.; Xie, Y.; Kasneci, E.
PerVRML: ChatGPT-Driven Personalized VR Environments for Machine Learning Education Journal Article
In: International Journal of Human-Computer Interaction, 2025, ISSN: 10447318 (ISSN).
Abstract | Links | BibTeX | Tags: Backpropagation, ChatGPT, Curricula, Educational robots, Immersive learning, Interactive learning, Language Model, Large language model, large language models, Learning mode, Machine learning education, Machine-learning, Personalized learning, Support vector machines, Teaching, Virtual Reality, Virtual-reality environment, Virtualization
@article{gao_pervrml_2025,
title = {PerVRML: ChatGPT-Driven Personalized VR Environments for Machine Learning Education},
author = {H. Gao and Y. Xie and E. Kasneci},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005776517&doi=10.1080%2f10447318.2025.2504188&partnerID=40&md5=c2c59be3d20d02c6df7750c2330c8f6d},
doi = {10.1080/10447318.2025.2504188},
issn = {10447318 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {International Journal of Human-Computer Interaction},
abstract = {The advent of large language models (LLMs) such as ChatGPT has demonstrated significant potential for advancing educational technologies. Recently, growing interest has emerged in integrating ChatGPT with virtual reality (VR) to provide interactive and dynamic learning environments. This study explores the effectiveness of ChatGTP-driven VR in facilitating machine learning education through PerVRML. PerVRML incorporates a ChatGPT-powered avatar that provides real-time assistance and uses LLMs to personalize learning paths based on various sensor data from VR. A between-subjects design was employed to compare two learning modes: personalized and non-personalized. Quantitative data were collected from assessments, user experience surveys, and interaction metrics. The results indicate that while both learning modes supported learning effectively, ChatGPT-powered personalization significantly improved learning outcomes and had distinct impacts on user feedback. These findings underscore the potential of ChatGPT-enhanced VR to deliver adaptive and personalized educational experiences. © 2025 Taylor & Francis Group, LLC.},
keywords = {Backpropagation, ChatGPT, Curricula, Educational robots, Immersive learning, Interactive learning, Language Model, Large language model, large language models, Learning mode, Machine learning education, Machine-learning, Personalized learning, Support vector machines, Teaching, Virtual Reality, Virtual-reality environment, Virtualization},
pubstate = {published},
tppubtype = {article}
}
2024
Harinee, S.; Raja, R. Vimal; Mugila, E.; Govindharaj, I.; Sanjaykumar, V.; Ragavendhiran, T.
Elevating Medical Training: A Synergistic Fusion of AI and VR for Immersive Anatomy Learning and Practical Procedure Mastery Proceedings Article
In: Int. Conf. Syst., Comput., Autom. Netw., ICSCAN, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-833151002-2 (ISBN).
Abstract | Links | BibTeX | Tags: 'current, Anatomy education, Anatomy educations, Computer interaction, Curricula, Embodied virtual assistant, Embodied virtual assistants, Generative AI, Human- Computer Interaction, Immersive, Intelligent virtual agents, Medical computing, Medical education, Medical procedure practice, Medical procedures, Medical training, Personnel training, Students, Teaching, Three dimensional computer graphics, Usability engineering, Virtual assistants, Virtual environments, Virtual Reality, Visualization
@inproceedings{harinee_elevating_2024,
title = {Elevating Medical Training: A Synergistic Fusion of AI and VR for Immersive Anatomy Learning and Practical Procedure Mastery},
author = {S. Harinee and R. Vimal Raja and E. Mugila and I. Govindharaj and V. Sanjaykumar and T. Ragavendhiran},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000334626&doi=10.1109%2fICSCAN62807.2024.10894451&partnerID=40&md5=100899b489c00335e0a652f2efd33e23},
doi = {10.1109/ICSCAN62807.2024.10894451},
isbn = {979-833151002-2 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Int. Conf. Syst., Comput., Autom. Netw., ICSCAN},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual reality with its 3D visualization have brought an overwhelming change in the face of medical education, especially for courses like human anatomy. The proposed virtual reality system to bring massive improvements in the education received by a medical student studying for their degree courses. The project puts forward the text-to-speech and speech-to-text aligned system that simplifies the usage of a chatbot empowered by OpenAI GPT-4 and allows pupils to vocally speak with Avatar, the set virtual assistant. Contrary to the current methodologies, the setup of virtual reality is powered by avatars and thus covers an enhanced virtual assistant environment. Avatars offer students the set of repeated practicing of medical procedures on it, and the real uniqueness in the proposed product. The developed virtual reality environment is enhanced over other current training techniques where a student should interact and immerse in three-dimensional human organs for visualization in three dimensions and hence get better knowledge of the subjects in greater depth. A virtual assistant guides the whole process, giving insights and support to help the student bridge the gap from theory to practice. Then, the system is essentially Knowledge based and Analysis based approach. The combination of generative AI along with embodied virtual agents has great potential when it comes to customized virtual conversation assistant for much wider range of applications. The study brings out the value of acquiring hands-on skills through simulated medical procedures and opens new frontiers of research and development in AI, VR, and medical education. In addition to assessing the effectiveness of such novel functionalities, the study also explores user experience related dimensions such as usability, task loading, and the sense of presence in proposed virtual medical environment. © 2024 IEEE.},
keywords = {'current, Anatomy education, Anatomy educations, Computer interaction, Curricula, Embodied virtual assistant, Embodied virtual assistants, Generative AI, Human- Computer Interaction, Immersive, Intelligent virtual agents, Medical computing, Medical education, Medical procedure practice, Medical procedures, Medical training, Personnel training, Students, Teaching, Three dimensional computer graphics, Usability engineering, Virtual assistants, Virtual environments, Virtual Reality, Visualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Domenichini, D.; Bucchiarone, A.; Chiarello, F.; Schiavo, G.; Fantoni, G.
An AI-Driven Approach for Enhancing Engagement and Conceptual Understanding in Physics Education Proceedings Article
In: IEEE Global Eng. Edu. Conf., EDUCON, IEEE Computer Society, 2024, ISBN: 21659559 (ISSN); 979-835039402-3 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive Learning, Artificial intelligence, Artificial intelligence in education, Artificial Intelligence in Education (AIED), Conceptual Understanding, Educational System, Educational systems, Gamification, Generative AI, generative artificial intelligence, Learning Activity, Learning systems, Physics Education, Teachers', Teaching, Virtual Reality
@inproceedings{domenichini_ai-driven_2024,
title = {An AI-Driven Approach for Enhancing Engagement and Conceptual Understanding in Physics Education},
author = {D. Domenichini and A. Bucchiarone and F. Chiarello and G. Schiavo and G. Fantoni},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199035695&doi=10.1109%2fEDUCON60312.2024.10578670&partnerID=40&md5=4cf9f89e97664ae6d618a90f2dbc23e0},
doi = {10.1109/EDUCON60312.2024.10578670},
isbn = {21659559 (ISSN); 979-835039402-3 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE Global Eng. Edu. Conf., EDUCON},
publisher = {IEEE Computer Society},
abstract = {This Work in Progress paper introduces the design of an innovative educational system that leverages Artificial Intelligence (AI) to address challenges in physics education. The primary objective is to create a system that dynamically adapts to the individual needs and preferences of students while maintaining user-friendliness for teachers, allowing them to tailor their teaching methods. The emphasis is on fostering motivation and engagement, achieved through the implementation of a gamified virtual environment and a strong focus on personalization. Our aim is to develop a system capable of autonomously generating learning activities and constructing effective learning paths, all under the supervision and interaction of teachers. The generation of learning activities is guided by educational taxonomies that delineate and categorize the cognitive processes involved in these activities. The proposed educational system seeks to address challenges identified by Physics Education Research (PER), which offers valuable insights into how individuals learn physics and provides strategies to enhance the overall quality of physics education. Our specific focus revolves around two crucial aspects: concentrating on the conceptual understanding of physics concepts and processes, and fostering knowledge integration and coherence across various physics topics. These aspects are deemed essential for cultivating enduring knowledge and facilitating practical applications in the field of physics. © 2024 IEEE.},
keywords = {Adaptive Learning, Artificial intelligence, Artificial intelligence in education, Artificial Intelligence in Education (AIED), Conceptual Understanding, Educational System, Educational systems, Gamification, Generative AI, generative artificial intelligence, Learning Activity, Learning systems, Physics Education, Teachers', Teaching, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, M.; M'Hiri, F.
Beyond Traditional Teaching: Large Language Models as Simulated Teaching Assistants in Computer Science Proceedings Article
In: SIGCSE - Proc. ACM Tech. Symp. Comput. Sci. Educ., pp. 743–749, Association for Computing Machinery, Inc, 2024, ISBN: 979-840070423-9 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive teaching, ChatGPT, Computational Linguistics, CS education, E-Learning, Education computing, Engineering education, GPT, Language Model, LLM, machine learning, Machine-learning, Novice programmer, novice programmers, Openai, Programming, Python, Students, Teaching, Virtual Reality
@inproceedings{liu_beyond_2024,
title = {Beyond Traditional Teaching: Large Language Models as Simulated Teaching Assistants in Computer Science},
author = {M. Liu and F. M'Hiri},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85189289344&doi=10.1145%2f3626252.3630789&partnerID=40&md5=44ec79c8f005f4551c820c61f5b5d435},
doi = {10.1145/3626252.3630789},
isbn = {979-840070423-9 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {SIGCSE - Proc. ACM Tech. Symp. Comput. Sci. Educ.},
volume = {1},
pages = {743–749},
publisher = {Association for Computing Machinery, Inc},
abstract = {As the prominence of Large Language Models (LLMs) grows in various sectors, their potential in education warrants exploration. In this study, we investigate the feasibility of employing GPT-3.5 from OpenAI, as an LLM teaching assistant (TA) or a virtual TA in computer science (CS) courses. The objective is to enhance the accessibility of CS education while maintaining academic integrity by refraining from providing direct solutions to current-semester assignments. Targeting Foundations of Programming (COMP202), an undergraduate course that introduces students to programming with Python, we have developed a virtual TA using the LangChain framework, known for integrating language models with diverse data sources and environments. The virtual TA assists students with their code and clarifies complex concepts. For homework questions, it is designed to guide students with hints rather than giving out direct solutions. We assessed its performance first through a qualitative evaluation, then a survey-based comparative analysis, using a mix of questions commonly asked on the COMP202 discussion board and questions created by the authors. Our preliminary results indicate that the virtual TA outperforms human TAs on clarity and engagement, matching them on accuracy when the question is non-assignment-specific, for which human TAs still proved more reliable. These findings suggest that while virtual TAs, leveraging the capabilities of LLMs, hold great promise towards making CS education experience more accessible and engaging, their optimal use necessitates human supervision. We conclude by identifying several directions that could be explored in future implementations. © 2024 ACM.},
keywords = {Adaptive teaching, ChatGPT, Computational Linguistics, CS education, E-Learning, Education computing, Engineering education, GPT, Language Model, LLM, machine learning, Machine-learning, Novice programmer, novice programmers, Openai, Programming, Python, Students, Teaching, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Ivanova, M.; Grosseck, G.; Holotescu, C.
Unveiling Insights: A Bibliometric Analysis of Artificial Intelligence in Teaching Journal Article
In: Informatics, vol. 11, no. 1, 2024, ISSN: 22279709 (ISSN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, ChatGPT, Intelligent Environment, large language models, learning analytics, Teaching
@article{ivanova_unveiling_2024,
title = {Unveiling Insights: A Bibliometric Analysis of Artificial Intelligence in Teaching},
author = {M. Ivanova and G. Grosseck and C. Holotescu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85188949348&doi=10.3390%2finformatics11010010&partnerID=40&md5=aaf44928fb594e2807234da0f3799437},
doi = {10.3390/informatics11010010},
issn = {22279709 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Informatics},
volume = {11},
number = {1},
abstract = {The penetration of intelligent applications in education is rapidly increasing, posing a number of questions of a different nature to the educational community. This paper is coming to analyze and outline the influence of artificial intelligence (AI) on teaching practice which is an essential problem considering its growing utilization and pervasion on a global scale. A bibliometric approach is applied to outdraw the “big picture” considering gathered bibliographic data from scientific databases Scopus and Web of Science. Data on relevant publications matching the query “artificial intelligence and teaching” over the past 5 years have been researched and processed through Biblioshiny in R environment in order to establish a descriptive structure of the scientific production, to determine the impact of scientific publications, to trace collaboration patterns and to identify key research areas and emerging trends. The results point out the growth in scientific production lately that is an indicator of increased interest in the investigated topic by researchers who mainly work in collaborative teams as some of them are from different countries and institutions. The identified key research areas include techniques used in educational applications, such as artificial intelligence, machine learning, and deep learning. Additionally, there is a focus on applicable technologies like ChatGPT, learning analytics, and virtual reality. The research also explores the context of application for these techniques and technologies in various educational settings, including teaching, higher education, active learning, e-learning, and online learning. Based on our findings, the trending research topics can be encapsulated by terms such as ChatGPT, chatbots, AI, generative AI, machine learning, emotion recognition, large language models, convolutional neural networks, and decision theory. These findings offer valuable insights into the current landscape of research interests in the field. © 2024 by the authors.},
keywords = {Artificial intelligence, ChatGPT, Intelligent Environment, large language models, learning analytics, Teaching},
pubstate = {published},
tppubtype = {article}
}
Jia, Y.; Sin, Z. P. T.; Wang, X. E.; Li, C.; Ng, P. H. F.; Huang, X.; Dong, J.; Wang, Y.; Baciu, G.; Cao, J.; Li, Q.
NivTA: Towards a Naturally Interactable Edu-Metaverse Teaching Assistant for CAVE Proceedings Article
In: Proc. - IEEE Int. Conf. Metaverse Comput., Netw., Appl., MetaCom, pp. 57–64, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-833151599-7 (ISBN).
Abstract | Links | BibTeX | Tags: Active learning, Adversarial machine learning, cave automatic virtual environment, Cave automatic virtual environments, Caves, Chatbots, Contrastive Learning, Digital elevation model, Federated learning, Interactive education, Language Model, Large language model agent, Learning Activity, LLM agents, Metaverses, Model agents, Natural user interface, Students, Teaching, Teaching assistants, Virtual environments, Virtual Reality, virtual teaching assistant, Virtual teaching assistants
@inproceedings{jia_nivta_2024,
title = {NivTA: Towards a Naturally Interactable Edu-Metaverse Teaching Assistant for CAVE},
author = {Y. Jia and Z. P. T. Sin and X. E. Wang and C. Li and P. H. F. Ng and X. Huang and J. Dong and Y. Wang and G. Baciu and J. Cao and Q. Li},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85211447638&doi=10.1109%2fMetaCom62920.2024.00023&partnerID=40&md5=efefd453c426e74705518254bdc49e87},
doi = {10.1109/MetaCom62920.2024.00023},
isbn = {979-833151599-7 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Conf. Metaverse Comput., Netw., Appl., MetaCom},
pages = {57–64},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Edu-metaverse is a specialized metaverse dedicated for interactive education in an immersive environment. Its main purpose is to immerse the learners in a digital environment and conduct learning activities that could mirror reality. Not only does it enable activities that may be difficult to perform in the real world, but it also extends the interaction to personalized and CL. This is a more effective pedagogical approach as it tends to enhance the motivation and engagement of students and it increases their active participation in lessons delivered. To this extend, we propose to realize an interactive virtual teaching assistant called NivTA. To make NivTA easily accessible and engaging by multiple users simultaneously, we also propose to use a CAVE virtual environment (CAVE-VR) as a "metaverse window"into concepts, ideas, topics, and learning activities. The students simply need to step into the CAVE-VR and interact with a life-size teaching assistant that they can engage with naturally, as if they are approaching a real person. Instead of text-based interaction currently developed for large language models (LLM), NivTA is given additional cues regarding the users so it can react more naturally via a specific prompt design. For example, the user can simply point to an educational concept and ask NivTA to explain what it is. To guide NivTA onto the educational concept, the prompt is also designed to feed in an educational KG to provide NivTA with the context of the student's question. The NivTA system is an integration of several components that are discussed in this paper. We further describe how the system is designed and implemented, along with potential applications and future work on interactive collaborative edu-metaverse environments dedicated for teaching and learning. © 2024 IEEE.},
keywords = {Active learning, Adversarial machine learning, cave automatic virtual environment, Cave automatic virtual environments, Caves, Chatbots, Contrastive Learning, Digital elevation model, Federated learning, Interactive education, Language Model, Large language model agent, Learning Activity, LLM agents, Metaverses, Model agents, Natural user interface, Students, Teaching, Teaching assistants, Virtual environments, Virtual Reality, virtual teaching assistant, Virtual teaching assistants},
pubstate = {published},
tppubtype = {inproceedings}
}
Sikström, P.; Valentini, C.; Sivunen, A.; Kärkkäinen, T.
Pedagogical agents communicating and scaffolding students' learning: High school teachers' and students' perspectives Journal Article
In: Computers and Education, vol. 222, 2024, ISSN: 03601315 (ISSN).
Abstract | Links | BibTeX | Tags: Adversarial machine learning, Agents communication, Augmented Reality, Contrastive Learning, Federated learning, Human communications, Human-Machine Communication, Human-to-human communication script, Human–machine communication, Human–machine communication (HMC), pedagogical agent, Pedagogical agents, Scaffolds, Scaffolds (biology), Secondary education, Student learning, Students, Teachers', Teaching, User-centered design, User-centred, Virtual environments
@article{sikstrom_pedagogical_2024,
title = {Pedagogical agents communicating and scaffolding students' learning: High school teachers' and students' perspectives},
author = {P. Sikström and C. Valentini and A. Sivunen and T. Kärkkäinen},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85202198552&doi=10.1016%2fj.compedu.2024.105140&partnerID=40&md5=dfb4a7b6c1f6352c5cc6faac213e938f},
doi = {10.1016/j.compedu.2024.105140},
issn = {03601315 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Computers and Education},
volume = {222},
abstract = {Pedagogical agents (PAs) communicate verbally and non-verbally with students in digital and virtual reality/augmented reality learning environments. PAs have been shown to be beneficial for learning, and generative artificial intelligence, such as large language models, can improve PAs' communication abilities significantly. K-12 education is underrepresented in learning technology research and teachers' and students' insights have not been considered when developing PA communication. The current study addresses this research gap by conducting and analyzing semi-structured, in-depth interviews with eleven high school teachers and sixteen high school students about their expectations for PAs' communication capabilities. The interviewees identified relational and task-related communication capabilities that a PA should perform to communicate effectively with students and scaffold their learning. PA communication that is simultaneously affirmative and relational can induce immediacy, foster the relationship and engagement with a PA, and support students' learning management. Additionally, the teachers and students described the activities and technological aspects that should be considered when designing conversational PAs. The study showed that teachers and students applied human-to-human communication scripts when outlining their desired PA communication characteristics. The study offers novel insights and recommendations to researchers and developers on the communicational, pedagogical, and technological aspects that must be considered when designing communicative PAs that scaffold students’ learning, and discusses the contributions on human–machine communication in education. © 2024 The Authors},
keywords = {Adversarial machine learning, Agents communication, Augmented Reality, Contrastive Learning, Federated learning, Human communications, Human-Machine Communication, Human-to-human communication script, Human–machine communication, Human–machine communication (HMC), pedagogical agent, Pedagogical agents, Scaffolds, Scaffolds (biology), Secondary education, Student learning, Students, Teachers', Teaching, User-centered design, User-centred, Virtual environments},
pubstate = {published},
tppubtype = {article}
}
Scott, A. J. S.; McCuaig, F.; Lim, V.; Watkins, W.; Wang, J.; Strachan, G.
Revolutionizing Nurse Practitioner Training: Integrating Virtual Reality and Large Language Models for Enhanced Clinical Education Proceedings Article
In: G., Strudwick; N.R., Hardiker; G., Rees; R., Cook; R., Cook; Y.J., Lee (Ed.): Stud. Health Technol. Informatics, pp. 671–672, IOS Press BV, 2024, ISBN: 09269630 (ISSN); 978-164368527-4 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, 3D models, 3d-modeling, adult, anamnesis, clinical decision making, clinical education, Clinical Simulation, Computational Linguistics, computer interface, Computer-Assisted Instruction, conference paper, Curriculum, Decision making, E-Learning, Education, Health care education, Healthcare Education, human, Humans, Language Model, Large language model, large language models, Mesh generation, Model animations, Modeling languages, nurse practitioner, Nurse Practitioners, Nursing, nursing education, nursing student, OSCE preparation, procedures, simulation, Teaching, therapy, Training, Training program, User-Computer Interface, Virtual Reality, Virtual reality training
@inproceedings{scott_revolutionizing_2024,
title = {Revolutionizing Nurse Practitioner Training: Integrating Virtual Reality and Large Language Models for Enhanced Clinical Education},
author = {A. J. S. Scott and F. McCuaig and V. Lim and W. Watkins and J. Wang and G. Strachan},
editor = {Strudwick G. and Hardiker N.R. and Rees G. and Cook R. and Cook R. and Lee Y.J.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199593781&doi=10.3233%2fSHTI240272&partnerID=40&md5=90c7bd43ba978f942723e6cf1983ffb3},
doi = {10.3233/SHTI240272},
isbn = {09269630 (ISSN); 978-164368527-4 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Stud. Health Technol. Informatics},
volume = {315},
pages = {671–672},
publisher = {IOS Press BV},
abstract = {This project introduces an innovative virtual reality (VR) training program for student Nurse Practitioners, incorporating advanced 3D modeling, animation, and Large Language Models (LLMs). Designed to simulate realistic patient interactions, the program aims to improve communication, history taking, and clinical decision-making skills in a controlled, authentic setting. This abstract outlines the methods, results, and potential impact of this cutting-edge educational tool on nursing education. © 2024 The Authors.},
keywords = {3D modeling, 3D models, 3d-modeling, adult, anamnesis, clinical decision making, clinical education, Clinical Simulation, Computational Linguistics, computer interface, Computer-Assisted Instruction, conference paper, Curriculum, Decision making, E-Learning, Education, Health care education, Healthcare Education, human, Humans, Language Model, Large language model, large language models, Mesh generation, Model animations, Modeling languages, nurse practitioner, Nurse Practitioners, Nursing, nursing education, nursing student, OSCE preparation, procedures, simulation, Teaching, therapy, Training, Training program, User-Computer Interface, Virtual Reality, Virtual reality training},
pubstate = {published},
tppubtype = {inproceedings}
}