AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Mekki, Y. M.; Simon, L. V.; Freeman, W. D.; Qadir, J.
Medical Education Metaverses (MedEd Metaverses): Opportunities, Use Case, and Guidelines Journal Article
In: Computer, vol. 58, no. 3, pp. 60–70, 2025, ISSN: 00189162 (ISSN).
Abstract | Links | BibTeX | Tags: Adaptive feedback, Augmented Reality, Immersive learning, Medical education, Metaverses, Performance tracking, Remote resources, Remote training, Resource efficiencies, Training efficiency, Virtual environments
@article{mekki_medical_2025,
title = {Medical Education Metaverses (MedEd Metaverses): Opportunities, Use Case, and Guidelines},
author = {Y. M. Mekki and L. V. Simon and W. D. Freeman and J. Qadir},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85218631349&doi=10.1109%2fMC.2024.3474033&partnerID=40&md5=65f46cf9b8d98eaf0fcd6843b9ebc41e},
doi = {10.1109/MC.2024.3474033},
issn = {00189162 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Computer},
volume = {58},
number = {3},
pages = {60–70},
abstract = {This article explores how artificial intelligence (AI), particularly generative AI (GenAI), can enhance extended reality (XR) applications in medical education (MedEd) metaverses. We compare traditional augmented reality/virtual reality methods with AI-enabled XR metaverses, highlighting improvements in immersive learning, adaptive feedback, personalized performance tracking, remote training, and resource efficiency. © 1970-2012 IEEE.},
keywords = {Adaptive feedback, Augmented Reality, Immersive learning, Medical education, Metaverses, Performance tracking, Remote resources, Remote training, Resource efficiencies, Training efficiency, Virtual environments},
pubstate = {published},
tppubtype = {article}
}
Pielage, L.; Schmidle, P.; Marschall, B.; Risse, B.
Interactive High-Quality Skin Lesion Generation using Diffusion Models for VR-based Dermatological Education Proceedings Article
In: Int Conf Intell User Interfaces Proc IUI, pp. 878–897, Association for Computing Machinery, 2025, ISBN: 979-840071306-4 (ISBN).
Abstract | Links | BibTeX | Tags: Deep learning, Dermatology, Diffusion Model, diffusion models, Digital elevation model, Generative AI, Graphical user interfaces, Guidance Strategies, Guidance strategy, Image generation, Image generations, Inpainting, Interactive Generation, Medical education, Medical Imaging, Simulation training, Skin lesion, Upsampling, Virtual environments, Virtual Reality
@inproceedings{pielage_interactive_2025,
title = {Interactive High-Quality Skin Lesion Generation using Diffusion Models for VR-based Dermatological Education},
author = {L. Pielage and P. Schmidle and B. Marschall and B. Risse},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001923208&doi=10.1145%2f3708359.3712101&partnerID=40&md5=639eec55b08a54ce813f7c1016c621e7},
doi = {10.1145/3708359.3712101},
isbn = {979-840071306-4 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Int Conf Intell User Interfaces Proc IUI},
pages = {878–897},
publisher = {Association for Computing Machinery},
abstract = {Malignant melanoma is one of the most lethal forms of cancer when not detected early. As a result, cancer screening programs have been implemented internationally, all of which require visual inspection of skin lesions. Early melanoma detection is a crucial competence in medical and dermatological education, and it is primarily trained using 2D imagery. However, given the intrinsic 3D nature of skin lesions and the importance of incorporating additional contextual information about the patient (e.g., skin type, nearby lesions, etc.), this approach falls short of providing a comprehensive and scalable learning experience. A potential solution is the use of Virtual Reality (VR) scenarios, which can offer an effective strategy to train skin cancer screenings in a realistic 3D setting, thereby enhancing medical students' awareness of early melanoma detection. In this paper, we present a comprehensive pipeline and models for generating malignant melanomas and benign nevi, which can be utilized in VR-based medical training. We use diffusion models for the generation of skin lesions, which we have enhanced with various guiding strategies to give educators maximum flexibility in designing scenarios and seamlessly placing lesions on virtual agents. Additionally, we have developed a tool which comprises a graphical user interface (GUI) enabling the generation of new lesions and adapting existing ones using an intuitive and interactive inpainting strategy. The tool also offers a novel custom upsampling strategy to achieve a sufficient resolution required for diagnostic purposes. The generated skin lesions have been validated in a user study with trained dermatologists, confirming the overall high quality of the generated lesions and the utility for educational purposes. © 2025 Copyright held by the owner/author(s).},
keywords = {Deep learning, Dermatology, Diffusion Model, diffusion models, Digital elevation model, Generative AI, Graphical user interfaces, Guidance Strategies, Guidance strategy, Image generation, Image generations, Inpainting, Interactive Generation, Medical education, Medical Imaging, Simulation training, Skin lesion, Upsampling, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu, X. T.; Cheerman, H.; Cheng, M.; Kiami, S. R.; Chukoskie, L.; McGivney, E.
Designing VR Simulation System for Clinical Communication Training with LLMs-Based Embodied Conversational Agents Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 979-840071395-8 (ISBN).
Abstract | Links | BibTeX | Tags: Clinical communications, Clinical Simulation, Communications training, Curricula, Embodied conversational agent, Embodied Conversational Agents, Health professions, Intelligent virtual agents, Language Model, Medical education, Model-based OPC, Patient simulators, Personnel training, Students, Teaching, User centered design, Virtual environments, Virtual Reality, VR simulation, VR simulation systems
@inproceedings{zhu_designing_2025,
title = {Designing VR Simulation System for Clinical Communication Training with LLMs-Based Embodied Conversational Agents},
author = {X. T. Zhu and H. Cheerman and M. Cheng and S. R. Kiami and L. Chukoskie and E. McGivney},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005754066&doi=10.1145%2f3706599.3719693&partnerID=40&md5=4468fbd54b43d6779259300afd08632e},
doi = {10.1145/3706599.3719693},
isbn = {979-840071395-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {VR simulation in Health Professions (HP) education demonstrates huge potential, but fixed learning content with little customization limits its application beyond lab environments. To address these limitations in the context of VR for patient communication training, we conducted a user-centered study involving semi-structured interviews with advanced HP students to understand their challenges in clinical communication training and perceptions of VR-based solutions. From this, we derived design insights emphasizing the importance of realistic scenarios, simple interactions, and unpredictable dialogues. Building on these insights, we developed the Virtual AI Patient Simulator (VAPS), a novel VR system powered by Large Language Models (LLMs) and Embodied Conversational Agents (ECAs), supporting dynamic and customizable patient interactions for immersive learning. We also provided an example of how clinical professors could use user-friendly design forms to create personalized scenarios that align with course objectives in VAPS and discuss future implications of integrating AI-driven technologies into VR education. © 2025 Copyright held by the owner/author(s).},
keywords = {Clinical communications, Clinical Simulation, Communications training, Curricula, Embodied conversational agent, Embodied Conversational Agents, Health professions, Intelligent virtual agents, Language Model, Medical education, Model-based OPC, Patient simulators, Personnel training, Students, Teaching, User centered design, Virtual environments, Virtual Reality, VR simulation, VR simulation systems},
pubstate = {published},
tppubtype = {inproceedings}
}
2024
Harinee, S.; Raja, R. Vimal; Mugila, E.; Govindharaj, I.; Sanjaykumar, V.; Ragavendhiran, T.
Elevating Medical Training: A Synergistic Fusion of AI and VR for Immersive Anatomy Learning and Practical Procedure Mastery Proceedings Article
In: Int. Conf. Syst., Comput., Autom. Netw., ICSCAN, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-833151002-2 (ISBN).
Abstract | Links | BibTeX | Tags: 'current, Anatomy education, Anatomy educations, Computer interaction, Curricula, Embodied virtual assistant, Embodied virtual assistants, Generative AI, Human- Computer Interaction, Immersive, Intelligent virtual agents, Medical computing, Medical education, Medical procedure practice, Medical procedures, Medical training, Personnel training, Students, Teaching, Three dimensional computer graphics, Usability engineering, Virtual assistants, Virtual environments, Virtual Reality, Visualization
@inproceedings{harinee_elevating_2024,
title = {Elevating Medical Training: A Synergistic Fusion of AI and VR for Immersive Anatomy Learning and Practical Procedure Mastery},
author = {S. Harinee and R. Vimal Raja and E. Mugila and I. Govindharaj and V. Sanjaykumar and T. Ragavendhiran},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000334626&doi=10.1109%2fICSCAN62807.2024.10894451&partnerID=40&md5=100899b489c00335e0a652f2efd33e23},
doi = {10.1109/ICSCAN62807.2024.10894451},
isbn = {979-833151002-2 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Int. Conf. Syst., Comput., Autom. Netw., ICSCAN},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual reality with its 3D visualization have brought an overwhelming change in the face of medical education, especially for courses like human anatomy. The proposed virtual reality system to bring massive improvements in the education received by a medical student studying for their degree courses. The project puts forward the text-to-speech and speech-to-text aligned system that simplifies the usage of a chatbot empowered by OpenAI GPT-4 and allows pupils to vocally speak with Avatar, the set virtual assistant. Contrary to the current methodologies, the setup of virtual reality is powered by avatars and thus covers an enhanced virtual assistant environment. Avatars offer students the set of repeated practicing of medical procedures on it, and the real uniqueness in the proposed product. The developed virtual reality environment is enhanced over other current training techniques where a student should interact and immerse in three-dimensional human organs for visualization in three dimensions and hence get better knowledge of the subjects in greater depth. A virtual assistant guides the whole process, giving insights and support to help the student bridge the gap from theory to practice. Then, the system is essentially Knowledge based and Analysis based approach. The combination of generative AI along with embodied virtual agents has great potential when it comes to customized virtual conversation assistant for much wider range of applications. The study brings out the value of acquiring hands-on skills through simulated medical procedures and opens new frontiers of research and development in AI, VR, and medical education. In addition to assessing the effectiveness of such novel functionalities, the study also explores user experience related dimensions such as usability, task loading, and the sense of presence in proposed virtual medical environment. © 2024 IEEE.},
keywords = {'current, Anatomy education, Anatomy educations, Computer interaction, Curricula, Embodied virtual assistant, Embodied virtual assistants, Generative AI, Human- Computer Interaction, Immersive, Intelligent virtual agents, Medical computing, Medical education, Medical procedure practice, Medical procedures, Medical training, Personnel training, Students, Teaching, Three dimensional computer graphics, Usability engineering, Virtual assistants, Virtual environments, Virtual Reality, Visualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Salloum, A.; Alfaisal, R.; Salloum, S. A.
Revolutionizing Medical Education: Empowering Learning with ChatGPT Book Section
In: Studies in Big Data, vol. 144, pp. 79–90, Springer Science and Business Media Deutschland GmbH, 2024, ISBN: 21976503 (ISSN).
Abstract | Links | BibTeX | Tags: Abstracting, AI integration, ChatGPT, Education, Human like, Interactivity, Language Model, Learning platform, Learning platforms, Medical education, Metaverse, Metaverses, Paradigm shifts, Personalizations, Technological advancement
@incollection{salloum_revolutionizing_2024,
title = {Revolutionizing Medical Education: Empowering Learning with ChatGPT},
author = {A. Salloum and R. Alfaisal and S. A. Salloum},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85191302844&doi=10.1007%2f978-3-031-52280-2_6&partnerID=40&md5=a5325b8e43460906174a3c7a2c383e1a},
doi = {10.1007/978-3-031-52280-2_6},
isbn = {21976503 (ISSN)},
year = {2024},
date = {2024-01-01},
booktitle = {Studies in Big Data},
volume = {144},
pages = {79–90},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {The landscape of medical education is undergoing a paradigm shift driven by technological advancements. This abstract explores the potential of ChatGPT, an advanced AI language model developed by OpenAI, in revolutionizing medical education. ChatGPT’s capacity to understand and generate human-like text opens doors to interactive, personalized, and adaptive learning experiences that address the evolving demands of medical training. Medical education traditionally relies on didactic approaches that often lack interactivity and personalization. ChatGPT addresses this limitation by introducing a conversational AI-driven dimension to medical learning. Learners can engage with ChatGPT in natural language, seeking explanations, asking questions, and clarifying doubts. This adaptive interactivity mirrors the dynamic nature of medical practice and fosters critical thinking skills essential for medical professionals. Furthermore, ChatGPT augments educators’ roles by assisting in content creation, formative assessments, and immediate feedback delivery. This empowers educators to focus on higher-order facilitation and mentorship, enriching the learning journey. However, responsible integration of ChatGPT into medical education demands careful curation of accurate medical content and validation against trusted sources. Ethical considerations related to AI-generated content and potential biases also warrant attention. This abstract underscores the transformative potential of ChatGPT in reshaping medical education. By creating an environment of engagement, adaptability, and personalization, ChatGPT paves the way for a dynamic and empowered medical learning ecosystem that aligns with the demands of modern healthcare. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2024.},
keywords = {Abstracting, AI integration, ChatGPT, Education, Human like, Interactivity, Language Model, Learning platform, Learning platforms, Medical education, Metaverse, Metaverses, Paradigm shifts, Personalizations, Technological advancement},
pubstate = {published},
tppubtype = {incollection}
}
Takata, T.; Yamada, R.; Rene, A. Oliveira Nzinga; Xu, K.; Fujimoto, M.
Development of a Virtual Patient Model for Kampo Medical Interview: New Approach for Enhancing Empathy and Understanding of Kampo Medicine Pathological Concepts Proceedings Article
In: Jt. Int. Conf. Soft Comput. Intell. Syst. Int. Symp. Adv. Intell. Syst., SCIS ISIS, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835037333-2 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Clinical practices, Clinical training, Complementary and alternative medicines, Covid-19, Diagnosis, Educational approach, Empathy, Kampo medical interview, Medical education, Medical student, Medical students, New approaches, Virtual environments, Virtual patient, Virtual patient models, Virtual patients, Virtual Reality
@inproceedings{takata_development_2024,
title = {Development of a Virtual Patient Model for Kampo Medical Interview: New Approach for Enhancing Empathy and Understanding of Kampo Medicine Pathological Concepts},
author = {T. Takata and R. Yamada and A. Oliveira Nzinga Rene and K. Xu and M. Fujimoto},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85214666311&doi=10.1109%2fSCISISIS61014.2024.10759962&partnerID=40&md5=2e149e0fe211f586049914e571c6e2fa},
doi = {10.1109/SCISISIS61014.2024.10759962},
isbn = {979-835037333-2 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Jt. Int. Conf. Soft Comput. Intell. Syst. Int. Symp. Adv. Intell. Syst., SCIS ISIS},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Global interest in complementary and alternative medicine has increased in recent years, with Kampo medicine in Japan gaining greater trust and use. Detailed patient interviews are essential in Kampo medicine, as the physician's empathy is critical to diagnostic precision. Typically, medical students develop empathy and deepen their understanding of Kampo's pathological concepts through clinical practice. However, the COVID-19 pandemic has imposed significant restrictions on clinical training. To address this challenge, we propose a novel educational approach to enhance empathy and understanding of Kampo medicine by developing a virtual patient application. This application leverages generative artificial intelligence to simulate realistic patient interactions, enabling students to practice Kampo medical interviews in a safe, controlled environment. The AI-generated conversations are designed to reflect the emotional nuances of real-life dialogue, with the virtual patients' facial expressions synchronized to these emotions, thus enhancing the realism of the training. The suggested method allows repeated practice at any time and fosters the development of essential diag-nostic and empathetic skills. While promising challenges remain in improving these simulations' accuracy, further refinements are still under consideration. © 2024 IEEE.},
keywords = {Artificial intelligence, Clinical practices, Clinical training, Complementary and alternative medicines, Covid-19, Diagnosis, Educational approach, Empathy, Kampo medical interview, Medical education, Medical student, Medical students, New approaches, Virtual environments, Virtual patient, Virtual patient models, Virtual patients, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Chheang, V.; Sharmin, S.; Marquez-Hernandez, R.; Patel, M.; Rajasekaran, D.; Caulfield, G.; Kiafar, B.; Li, J.; Kullu, P.; Barmaki, R. L.
Towards Anatomy Education with Generative AI-based Virtual Assistants in Immersive Virtual Reality Environments Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 21–30, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835037202-1 (ISBN).
Abstract | Links | BibTeX | Tags: 3-D visualization systems, Anatomy education, Anatomy educations, Cognitive complexity, E-Learning, Embodied virtual assistant, Embodied virtual assistants, Generative AI, generative artificial intelligence, Human computer interaction, human-computer interaction, Immersive virtual reality, Interactive 3d visualizations, Knowledge Management, Medical education, Three dimensional computer graphics, Verbal communications, Virtual assistants, Virtual Reality, Virtual-reality environment
@inproceedings{chheang_towards_2024,
title = {Towards Anatomy Education with Generative AI-based Virtual Assistants in Immersive Virtual Reality Environments},
author = {V. Chheang and S. Sharmin and R. Marquez-Hernandez and M. Patel and D. Rajasekaran and G. Caulfield and B. Kiafar and J. Li and P. Kullu and R. L. Barmaki},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85187216893&doi=10.1109%2fAIxVR59861.2024.00011&partnerID=40&md5=33e8744309add5fe400f4f341326505f},
doi = {10.1109/AIxVR59861.2024.00011},
isbn = {979-835037202-1 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {21–30},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual reality (VR) and interactive 3D visualization systems have enhanced educational experiences and environments, particularly in complicated subjects such as anatomy education. VR-based systems surpass the potential limitations of traditional training approaches in facilitating interactive engagement among students. However, research on embodied virtual assistants that leverage generative artificial intelligence (AI) and verbal communication in the anatomy education context is underrepresented. In this work, we introduce a VR environment with a generative AI-embodied virtual assistant to support participants in responding to varying cognitive complexity anatomy questions and enable verbal communication. We assessed the technical efficacy and usability of the proposed environment in a pilot user study with 16 participants. We conducted a within-subject design for virtual assistant configuration (avatar- and screen-based), with two levels of cognitive complexity (knowledge- and analysis-based). The results reveal a significant difference in the scores obtained from knowledge- and analysis-based questions in relation to avatar configuration. Moreover, results provide insights into usability, cognitive task load, and the sense of presence in the proposed virtual assistant configurations. Our environment and results of the pilot study offer potential benefits and future research directions beyond medical education, using generative AI and embodied virtual agents as customized virtual conversational assistants. © 2024 IEEE.},
keywords = {3-D visualization systems, Anatomy education, Anatomy educations, Cognitive complexity, E-Learning, Embodied virtual assistant, Embodied virtual assistants, Generative AI, generative artificial intelligence, Human computer interaction, human-computer interaction, Immersive virtual reality, Interactive 3d visualizations, Knowledge Management, Medical education, Three dimensional computer graphics, Verbal communications, Virtual assistants, Virtual Reality, Virtual-reality environment},
pubstate = {published},
tppubtype = {inproceedings}
}