AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2024
Harinee, S.; Raja, R. Vimal; Mugila, E.; Govindharaj, I.; Sanjaykumar, V.; Ragavendhiran, T.
Elevating Medical Training: A Synergistic Fusion of AI and VR for Immersive Anatomy Learning and Practical Procedure Mastery Proceedings Article
In: Int. Conf. Syst., Comput., Autom. Netw., ICSCAN, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-833151002-2 (ISBN).
Abstract | Links | BibTeX | Tags: 'current, Anatomy education, Anatomy educations, Computer interaction, Curricula, Embodied virtual assistant, Embodied virtual assistants, Generative AI, Human- Computer Interaction, Immersive, Intelligent virtual agents, Medical computing, Medical education, Medical procedure practice, Medical procedures, Medical training, Personnel training, Students, Teaching, Three dimensional computer graphics, Usability engineering, Virtual assistants, Virtual environments, Virtual Reality, Visualization
@inproceedings{harinee_elevating_2024,
title = {Elevating Medical Training: A Synergistic Fusion of AI and VR for Immersive Anatomy Learning and Practical Procedure Mastery},
author = {S. Harinee and R. Vimal Raja and E. Mugila and I. Govindharaj and V. Sanjaykumar and T. Ragavendhiran},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000334626&doi=10.1109%2fICSCAN62807.2024.10894451&partnerID=40&md5=100899b489c00335e0a652f2efd33e23},
doi = {10.1109/ICSCAN62807.2024.10894451},
isbn = {979-833151002-2 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Int. Conf. Syst., Comput., Autom. Netw., ICSCAN},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual reality with its 3D visualization have brought an overwhelming change in the face of medical education, especially for courses like human anatomy. The proposed virtual reality system to bring massive improvements in the education received by a medical student studying for their degree courses. The project puts forward the text-to-speech and speech-to-text aligned system that simplifies the usage of a chatbot empowered by OpenAI GPT-4 and allows pupils to vocally speak with Avatar, the set virtual assistant. Contrary to the current methodologies, the setup of virtual reality is powered by avatars and thus covers an enhanced virtual assistant environment. Avatars offer students the set of repeated practicing of medical procedures on it, and the real uniqueness in the proposed product. The developed virtual reality environment is enhanced over other current training techniques where a student should interact and immerse in three-dimensional human organs for visualization in three dimensions and hence get better knowledge of the subjects in greater depth. A virtual assistant guides the whole process, giving insights and support to help the student bridge the gap from theory to practice. Then, the system is essentially Knowledge based and Analysis based approach. The combination of generative AI along with embodied virtual agents has great potential when it comes to customized virtual conversation assistant for much wider range of applications. The study brings out the value of acquiring hands-on skills through simulated medical procedures and opens new frontiers of research and development in AI, VR, and medical education. In addition to assessing the effectiveness of such novel functionalities, the study also explores user experience related dimensions such as usability, task loading, and the sense of presence in proposed virtual medical environment. © 2024 IEEE.},
keywords = {'current, Anatomy education, Anatomy educations, Computer interaction, Curricula, Embodied virtual assistant, Embodied virtual assistants, Generative AI, Human- Computer Interaction, Immersive, Intelligent virtual agents, Medical computing, Medical education, Medical procedure practice, Medical procedures, Medical training, Personnel training, Students, Teaching, Three dimensional computer graphics, Usability engineering, Virtual assistants, Virtual environments, Virtual Reality, Visualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Chheang, V.; Sharmin, S.; Marquez-Hernandez, R.; Patel, M.; Rajasekaran, D.; Caulfield, G.; Kiafar, B.; Li, J.; Kullu, P.; Barmaki, R. L.
Towards Anatomy Education with Generative AI-based Virtual Assistants in Immersive Virtual Reality Environments Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 21–30, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835037202-1 (ISBN).
Abstract | Links | BibTeX | Tags: 3-D visualization systems, Anatomy education, Anatomy educations, Cognitive complexity, E-Learning, Embodied virtual assistant, Embodied virtual assistants, Generative AI, generative artificial intelligence, Human computer interaction, human-computer interaction, Immersive virtual reality, Interactive 3d visualizations, Knowledge Management, Medical education, Three dimensional computer graphics, Verbal communications, Virtual assistants, Virtual Reality, Virtual-reality environment
@inproceedings{chheang_towards_2024,
title = {Towards Anatomy Education with Generative AI-based Virtual Assistants in Immersive Virtual Reality Environments},
author = {V. Chheang and S. Sharmin and R. Marquez-Hernandez and M. Patel and D. Rajasekaran and G. Caulfield and B. Kiafar and J. Li and P. Kullu and R. L. Barmaki},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85187216893&doi=10.1109%2fAIxVR59861.2024.00011&partnerID=40&md5=33e8744309add5fe400f4f341326505f},
doi = {10.1109/AIxVR59861.2024.00011},
isbn = {979-835037202-1 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {21–30},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual reality (VR) and interactive 3D visualization systems have enhanced educational experiences and environments, particularly in complicated subjects such as anatomy education. VR-based systems surpass the potential limitations of traditional training approaches in facilitating interactive engagement among students. However, research on embodied virtual assistants that leverage generative artificial intelligence (AI) and verbal communication in the anatomy education context is underrepresented. In this work, we introduce a VR environment with a generative AI-embodied virtual assistant to support participants in responding to varying cognitive complexity anatomy questions and enable verbal communication. We assessed the technical efficacy and usability of the proposed environment in a pilot user study with 16 participants. We conducted a within-subject design for virtual assistant configuration (avatar- and screen-based), with two levels of cognitive complexity (knowledge- and analysis-based). The results reveal a significant difference in the scores obtained from knowledge- and analysis-based questions in relation to avatar configuration. Moreover, results provide insights into usability, cognitive task load, and the sense of presence in the proposed virtual assistant configurations. Our environment and results of the pilot study offer potential benefits and future research directions beyond medical education, using generative AI and embodied virtual agents as customized virtual conversational assistants. © 2024 IEEE.},
keywords = {3-D visualization systems, Anatomy education, Anatomy educations, Cognitive complexity, E-Learning, Embodied virtual assistant, Embodied virtual assistants, Generative AI, generative artificial intelligence, Human computer interaction, human-computer interaction, Immersive virtual reality, Interactive 3d visualizations, Knowledge Management, Medical education, Three dimensional computer graphics, Verbal communications, Virtual assistants, Virtual Reality, Virtual-reality environment},
pubstate = {published},
tppubtype = {inproceedings}
}