AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Li, Y.; Pang, E. C. H.; Ng, C. S. Y.; Azim, M.; Leung, H.
Enhancing Linear Algebra Education with AI-Generated Content in the CityU Metaverse: A Comparative Study Proceedings Article
In: T., Hao; J.G., Wu; X., Luo; Y., Sun; Y., Mu; S., Ge; W., Xie (Ed.): Lect. Notes Comput. Sci., pp. 3–16, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-981964406-3 (ISBN).
Abstract | Links | BibTeX | Tags: Comparatives studies, Digital age, Digital interactions, digital twin, Educational metaverse, Engineering education, Generative AI, Immersive, Matrix algebra, Metaverse, Metaverses, Personnel training, Students, Teaching, University campus, Virtual environments, virtual learning environment, Virtual learning environments, Virtual Reality, Virtualization
@inproceedings{li_enhancing_2025,
title = {Enhancing Linear Algebra Education with AI-Generated Content in the CityU Metaverse: A Comparative Study},
author = {Y. Li and E. C. H. Pang and C. S. Y. Ng and M. Azim and H. Leung},
editor = {Hao T. and Wu J.G. and Luo X. and Sun Y. and Mu Y. and Ge S. and Xie W.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003632691&doi=10.1007%2f978-981-96-4407-0_1&partnerID=40&md5=c067ba5d4c15e9c0353bf315680531fc},
doi = {10.1007/978-981-96-4407-0_1},
isbn = {03029743 (ISSN); 978-981964406-3 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15589 LNCS},
pages = {3–16},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {In today’s digital age, the metaverse is emerging as the forthcoming evolution of the internet. It provides an immersive space that marks a new frontier in the way digital interactions are facilitated and experienced. In this paper, we present the CityU Metaverse, which aims to construct a digital twin of our university campus. It is designed as an educational virtual world where learning applications can be embedded in this virtual campus, supporting not only remote and collaborative learning but also professional technical training to enhance educational experiences through immersive and interactive learning. To evaluate the effectiveness of this educational metaverse, we conducted an experiment focused on 3D linear transformation in linear algebra, with teaching content generated by generative AI, comparing our metaverse system with traditional teaching methods. Knowledge tests and surveys assessing learning interest revealed that students engaged with the CityU Metaverse, facilitated by AI-generated content, outperformed those in traditional settings and reported greater enjoyment during the learning process. The work provides valuable perspectives on the behaviors and interactions within the metaverse by analyzing user preferences and learning outcomes. © The Author(s), under exclusive license to Springer Nature Singapore Pte Ltd. 2025.},
keywords = {Comparatives studies, Digital age, Digital interactions, digital twin, Educational metaverse, Engineering education, Generative AI, Immersive, Matrix algebra, Metaverse, Metaverses, Personnel training, Students, Teaching, University campus, Virtual environments, virtual learning environment, Virtual learning environments, Virtual Reality, Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Sousa, R. T.; Oliveira, E. A. M.; Cintra, L. M. F.; Filho, A. R. G. Galvão
Transformative Technologies for Rehabilitation: Leveraging Immersive and AI-Driven Solutions to Reduce Recidivism and Promote Decent Work Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 168–171, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331514846 (ISBN).
Abstract | Links | BibTeX | Tags: AI- Driven Rehabilitation, Artificial intelligence- driven rehabilitation, Emotional intelligence, Engineering education, Generative AI, generative artificial intelligence, Immersive, Immersive technologies, Immersive Technology, Language Model, Large language model, large language models, Skills development, Social Reintegration, Social skills, Sociology, Vocational training
@inproceedings{sousa_transformative_2025,
title = {Transformative Technologies for Rehabilitation: Leveraging Immersive and AI-Driven Solutions to Reduce Recidivism and Promote Decent Work},
author = {R. T. Sousa and E. A. M. Oliveira and L. M. F. Cintra and A. R. G. Galvão Filho},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005140551&doi=10.1109%2FVRW66409.2025.00042&partnerID=40&md5=a8dbe15493fd8361602d049f2b09efe3},
doi = {10.1109/VRW66409.2025.00042},
isbn = {9798331514846 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {168–171},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The reintegration of incarcerated individuals into society presents significant challenges, particularly in addressing barriers related to vocational training, social skill development, and emotional rehabilitation. Immersive technologies, such as Virtual Reality and Augmented Reality, combined with generative Artificial Intelligence (AI) and Large Language Models, offer innovative opportunities to enhance these areas. These technologies create practical, controlled environments for skill acquisition and behavioral training, while generative AI enables dynamic, personalized, and adaptive experiences. This paper explores the broader potential of these integrated technologies in supporting rehabilitation, reducing recidivism, and fostering sustainable employment opportunities and these initiatives align with the overarching equity objective of ensuring Decent Work for All, reinforcing the commitment to inclusive and equitable progress across diverse communities, through the transformative potential of immersive and AI-driven systems in correctional systems. © 2025 Elsevier B.V., All rights reserved.},
keywords = {AI- Driven Rehabilitation, Artificial intelligence- driven rehabilitation, Emotional intelligence, Engineering education, Generative AI, generative artificial intelligence, Immersive, Immersive technologies, Immersive Technology, Language Model, Large language model, large language models, Skills development, Social Reintegration, Social skills, Sociology, Vocational training},
pubstate = {published},
tppubtype = {inproceedings}
}
Lau, K. H. C.; Bozkir, E.; Gao, H.; Kasneci, E.
Evaluating Usability and Engagement of Large Language Models in Virtual Reality for Traditional Scottish Curling Proceedings Article
In: A., Del Bue; C., Canton; J., Pont-Tuset; T., Tommasi (Ed.): Lect. Notes Comput. Sci., pp. 177–195, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303191571-0 (ISBN).
Abstract | Links | BibTeX | Tags: Chatbots, Cultural heritages, Digital Cultural Heritage, Digital cultural heritages, Educational robots, Engineering education, Heritage education, Historic Preservation, Language Model, Large language model, large language models, Learning outcome, Model-based OPC, Usability engineering, User Engagement, Virtual Reality, Virtual-reality environment, Virtualization
@inproceedings{lau_evaluating_2025,
title = {Evaluating Usability and Engagement of Large Language Models in Virtual Reality for Traditional Scottish Curling},
author = {K. H. C. Lau and E. Bozkir and H. Gao and E. Kasneci},
editor = {Del Bue A. and Canton C. and Pont-Tuset J. and Tommasi T.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105006905979&doi=10.1007%2f978-3-031-91572-7_11&partnerID=40&md5=8a81fb09ff54e57b9429660a8898149a},
doi = {10.1007/978-3-031-91572-7_11},
isbn = {03029743 (ISSN); 978-303191571-0 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15628 LNCS},
pages = {177–195},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {This paper explores the innovative application of Large Language Models (LLMs) in Virtual Reality (VR) environments to promote heritage education, focusing on traditional Scottish curling presented in the game “Scottish Bonspiel VR”. Our study compares the effectiveness of LLM-based chatbots with pre-defined scripted chatbots, evaluating key criteria such as usability, user engagement, and learning outcomes. The results show that LLM-based chatbots significantly improve interactivity and engagement, creating a more dynamic and immersive learning environment. This integration helps document and preserve cultural heritage and enhances dissemination processes, which are crucial for safeguarding intangible cultural heritage (ICH) amid environmental changes. Furthermore, the study highlights the potential of novel technologies in education to provide immersive experiences that foster a deeper appreciation of cultural heritage. These findings support the wider application of LLMs and VR in cultural education to address global challenges and promote sustainable practices to preserve and enhance cultural heritage. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Chatbots, Cultural heritages, Digital Cultural Heritage, Digital cultural heritages, Educational robots, Engineering education, Heritage education, Historic Preservation, Language Model, Large language model, large language models, Learning outcome, Model-based OPC, Usability engineering, User Engagement, Virtual Reality, Virtual-reality environment, Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Yokoyama, N.; Kimura, R.; Nakajima, T.
ViGen: Defamiliarizing Everyday Perception for Discovering Unexpected Insights Proceedings Article
In: H., Degen; S., Ntoa (Ed.): Lect. Notes Comput. Sci., pp. 397–417, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303193417-9 (ISBN).
Abstract | Links | BibTeX | Tags: Artful Expression, Artistic technique, Augmented Reality, Daily lives, Defamiliarization, Dynamic environments, Engineering education, Enhanced vision systems, Generative AI, generative artificial intelligence, Human augmentation, Human engineering, Human-AI Interaction, Human-artificial intelligence interaction, Semi-transparent
@inproceedings{yokoyama_vigen_2025,
title = {ViGen: Defamiliarizing Everyday Perception for Discovering Unexpected Insights},
author = {N. Yokoyama and R. Kimura and T. Nakajima},
editor = {Degen H. and Ntoa S.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007760030&doi=10.1007%2f978-3-031-93418-6_26&partnerID=40&md5=dee6f54688284313a45579aab5f934d6},
doi = {10.1007/978-3-031-93418-6_26},
isbn = {03029743 (ISSN); 978-303193417-9 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15821 LNAI},
pages = {397–417},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {This paper proposes ViGen, an Augmented Reality (AR) and Artificial Intelligence (AI)-enhanced vision system designed to facilitate defamiliarization in daily life. Humans rely on sight to gather information, think, and act, yet the act of seeing often becomes passive in daily life. Inspired by Victor Shklovsky’s concept of defamiliarization and the artistic technique of photomontage, ViGen seeks to disrupt habitual perceptions. It achieves this by overlaying semi-transparent, AI-generated images, created based on the user’s view, through an AR display. The system is evaluated by several structured interviews, in which participants experience ViGen in three different scenarios. Results indicate that AI-generated visuals effectively supported defamiliarization by transforming ordinary scenes into unfamiliar ones. However, the user’s familiarity with a place plays a significant role. Also, while the feature that adjusts the transparency of overlaid images enhances safety, its limitations in dynamic environments suggest the need for further research across diverse cultural and geographic contexts. This study demonstrates the potential of AI-augmented vision systems to stimulate new ways of seeing, offering insights for further development in visual augmentation technologies. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Artful Expression, Artistic technique, Augmented Reality, Daily lives, Defamiliarization, Dynamic environments, Engineering education, Enhanced vision systems, Generative AI, generative artificial intelligence, Human augmentation, Human engineering, Human-AI Interaction, Human-artificial intelligence interaction, Semi-transparent},
pubstate = {published},
tppubtype = {inproceedings}
}
Weerasinghe, M.; Kljun, M.; Pucihar, K. Č.
A Cross-Device Interaction with the Smartphone and HMD for Vocabulary Learning Proceedings Article
In: L., Zaina; J.C., Campos; D., Spano; K., Luyten; P., Palanque; G., Veer; A., Ebert; S.R., Humayoun; V., Memmesheimer (Ed.): Lect. Notes Comput. Sci., pp. 269–282, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303191759-2 (ISBN).
Abstract | Links | BibTeX | Tags: Augmented Reality, Context-based, Context-based vocabulary learning, Cross-reality interaction, Engineering education, Head-mounted displays, Head-mounted-displays, Images synthesis, Keyword method, Mixed reality, Smart phones, Smartphones, Students, Text-to-image synthesis, Visualization, Vocabulary learning
@inproceedings{weerasinghe_cross-device_2025,
title = {A Cross-Device Interaction with the Smartphone and HMD for Vocabulary Learning},
author = {M. Weerasinghe and M. Kljun and K. Č. Pucihar},
editor = {Zaina L. and Campos J.C. and Spano D. and Luyten K. and Palanque P. and Veer G. and Ebert A. and Humayoun S.R. and Memmesheimer V.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007828696&doi=10.1007%2f978-3-031-91760-8_18&partnerID=40&md5=4ebf202715ba880dcfeb3232dba7e2c4},
doi = {10.1007/978-3-031-91760-8_18},
isbn = {03029743 (ISSN); 978-303191759-2 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15518 LNCS},
pages = {269–282},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Cross-reality (XR) systems facilitate interaction between devices with differing levels of virtual content. By engaging with a variety of such devices, XR systems offer the flexibility to choose the most suitable modality for specific task or context. This capability enables rich applications in training and education, including vocabulary learning. Vocabulary acquisition is a vital part of language learning, employing techniques such as words rehearsing, flashcards, labelling environments with post-it notes, and mnemonic strategies such as the keyword method. Traditional mnemonics typically rely on visual stimuli or mental visualisations. Recent research highlights that AR can enhance vocabulary learning by combining real objects with augmented stimuli such as in labelling environments. Additionally,advancements in generative AI now enable high-quality, synthetically generated images from text descriptions, facilitating externalisation of personalised visual stimuli of mental visualisations. However, creating interfaces for effective real-world augmentation remains challenging, particularly given the limited text input capabilities of Head-Mounted Displays (HMDs). This work presents an XR system that combines smartphones and HMDs by leveraging Augmented Reality (AR) for contextually relevant information and a smartphone for efficient text input. The system enables users to visually annotate objects with personalised images of keyword associations generated with DALL-E 2. To evaluate the system, we conducted a user study with 16 university graduate students, assessing both usability and overall user experience. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Augmented Reality, Context-based, Context-based vocabulary learning, Cross-reality interaction, Engineering education, Head-mounted displays, Head-mounted-displays, Images synthesis, Keyword method, Mixed reality, Smart phones, Smartphones, Students, Text-to-image synthesis, Visualization, Vocabulary learning},
pubstate = {published},
tppubtype = {inproceedings}
}
Logothetis, I.; Diakogiannis, K.; Vidakis, N.
Interactive Learning Through Conversational Avatars and Immersive VR: Enhancing Diabetes Education and Self-Management Proceedings Article
In: X., Fang (Ed.): Lect. Notes Comput. Sci., pp. 415–429, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303192577-1 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Chronic disease, Computer aided instruction, Diabetes Education, Diagnosis, E-Learning, Education management, Engineering education, Gamification, Immersive virtual reality, Interactive computer graphics, Interactive learning, Large population, Learning systems, NUI, Self management, Serious game, Serious games, simulation, Virtual Reality
@inproceedings{logothetis_interactive_2025,
title = {Interactive Learning Through Conversational Avatars and Immersive VR: Enhancing Diabetes Education and Self-Management},
author = {I. Logothetis and K. Diakogiannis and N. Vidakis},
editor = {Fang X.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105008266480&doi=10.1007%2f978-3-031-92578-8_27&partnerID=40&md5=451274dfa3ef0b3f1b39c7d5a665ee3b},
doi = {10.1007/978-3-031-92578-8_27},
isbn = {03029743 (ISSN); 978-303192577-1 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15816 LNCS},
pages = {415–429},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Diabetes is a chronic disease affecting a large population of the world. Education and self-management of diabetes are crucial. Technologies such as Virtual Reality (VR) have presented promising results in healthcare education, while studies suggest that Artificial Intelligence (AI) can help in learning by further engaging the learner. This study aims to educate users on the entire routine of managing diabetes. The serious game utilizes VR for realistic interaction with diabetes tools and generative AI through a conversational avatar that acts as an assistant instructor. In this way, it allows users to practice diagnostic and therapeutic interventions in a controlled virtual environment, helping to build their understanding and confidence in diabetes management. To measure the effects of the proposed serious game, presence, and perceived agency were measured. Preliminary results indicate that this setup aids in the engagement and immersion of learners, while the avatar can provide helpful information during gameplay. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Artificial intelligence, Chronic disease, Computer aided instruction, Diabetes Education, Diagnosis, E-Learning, Education management, Engineering education, Gamification, Immersive virtual reality, Interactive computer graphics, Interactive learning, Large population, Learning systems, NUI, Self management, Serious game, Serious games, simulation, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Monjoree, U.; Yan, W.
Assessing AI Models' Spatial Visualization in PSVT:R and Augmented Reality: Towards Enhancing AI's Spatial Intelligence Proceedings Article
In: pp. 727–734, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331524005 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, Architecture engineering, Artificial intelligence, Augmented Reality, Construction science, Engineering education, Engineering science, Generative AI, generative artificial intelligence, Image processing, Intelligence models, Linear transformations, Medicine, Rotation, Rotation process, Spatial Intelligence, Spatial rotation, Spatial visualization, Three dimensional computer graphics, Three dimensional space, Visualization
@inproceedings{monjoree_assessing_2025,
title = {Assessing AI Models' Spatial Visualization in PSVT:R and Augmented Reality: Towards Enhancing AI's Spatial Intelligence},
author = {U. Monjoree and W. Yan},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105011255775&doi=10.1109%2FCAI64502.2025.00131&partnerID=40&md5=0bd551863839b3025898e55265403969},
doi = {10.1109/CAI64502.2025.00131},
isbn = {9798331524005 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {727–734},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Spatial intelligence is important in many fields, such as Architecture, Engineering, and Construction (AEC), Science, Technology, Engineering, and Mathematics (STEM), and Medicine. Understanding three-dimensional (3D) spatial rotations can involve verbal descriptions and visual or interactive examples, illustrating how objects move and change orientation in 3D space. Recent studies show that artificial intelligence (AI) with language and vision capabilities still faces limitations in spatial reasoning. In this paper, we have studied the spatial capabilities of advanced generative AI to understand the rotations of objects in 3D space utilizing its image processing and language processing features. We examined the spatial intelligence of three generative AI models (GPT-4, Gemini 1.5 Pro, and Llama 3.2) to understand the spatial rotation process with spatial rotation diagrams based on the revised Purdue Spatial Visualization Test: Visualization of Rotations (Revised PSVT:R). Furthermore, we incorporated an added layer of a coordinate system axes on Revised PSVT:R to study the variations in generative AI models' performance. We additionally examined generative AI models' understanding of 3D rotations in Augmented Reality (AR) scene images that visualize spatial rotations of a physical object in 3D space and observed an increased accuracy of generative AI models' understanding of rotations by adding additional textual information depicting the rotation process or mathematical representations of the rotation (e.g., matrices) superimposed on the object. The results indicate that while GPT-4, Gemini 1.5 Pro, and Llama 3.2 as the main current generative AI model lack the understanding of a spatial rotation process, it has the potential to understand the rotation process with additional information that can be provided by methods such as AR. AR can superimpose textual information or mathematical representations of rotations on spatial transformation diagrams and create a more intelligible input for AI to comprehend or for training AI's spatial intelligence. Furthermore, by combining the potentials in spatial intelligence of AI with AR's interactive visualization abilities, we expect to offer enhanced guidance for students' spatial learning activities. Such spatial guidance can greatly benefit understanding spatial transformations and additionally support processes like assembly, construction, manufacturing, as well as learning in AEC, STEM, and Medicine that require precise 3D spatial understanding. © 2025 Elsevier B.V., All rights reserved.},
keywords = {3D modeling, Architecture engineering, Artificial intelligence, Augmented Reality, Construction science, Engineering education, Engineering science, Generative AI, generative artificial intelligence, Image processing, Intelligence models, Linear transformations, Medicine, Rotation, Rotation process, Spatial Intelligence, Spatial rotation, Spatial visualization, Three dimensional computer graphics, Three dimensional space, Visualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Juarez, A.; Rábago, J.; Pliego, A.; Salazar, G.; Hinrichsen, C.; Castro, M.; Pachajoa, T.
Innovative Methodology for the Integration of Emerging Technologies in Global Education: Mixed Realities, AI, Metaverse, and SDGs Proceedings Article
In: Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798350355239 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Arts computing, Collaborative learning, E-Learning, Education computing, Educational Innovation, Educational innovations, Educational Technology, Emerging technologies, Engineering education, Global education, High educations, higher education, Innovative methodologies, Me-xico, Metaverse, Metaverses, Mixed Realities, Mixed reality, Product design, Sebastian, Social aspects, Students, Sustainable development, Sustainable Development Goals, Teaching, Technical skills
@inproceedings{juarez_innovative_2025,
title = {Innovative Methodology for the Integration of Emerging Technologies in Global Education: Mixed Realities, AI, Metaverse, and SDGs},
author = {A. Juarez and J. Rábago and A. Pliego and G. Salazar and C. Hinrichsen and M. Castro and T. Pachajoa},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105011951378&doi=10.1109%2FIFE63672.2025.11024834&partnerID=40&md5=4e101ad649487ce729c3a5fa9e875559},
doi = {10.1109/IFE63672.2025.11024834},
isbn = {9798350355239 (ISBN)},
year = {2025},
date = {2025-01-01},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The academic collaboration among Tecnologico de Monterrey (Mexico), the University of San Sebastián (Chile), and the Catholic University of Colombia was an innovative effort to transform the teaching of the "Formal Representation of Space" through the use of emerging technologies. This project was based on the convergence of the theory of Community of Inquiry (CoI), International Collaborative Online Learning (COIL), and the integration of Mixed Realities, Metaverse, and generative artificial intelligence. The central objective of this collaboration was to improve the technical and creative skills of students of architecture, industrial design, digital art, communication, and music production through a pedagogical approach that utilizes 3D spatial visualization and intercultural interaction. The use of the Tec Virtual Campus's Metaverse and the Global Classroom program was instrumental in facilitating real-time collaboration among students from different countries, allowing for the creation of joint projects that reflect a deep understanding of the Sustainable Development Goals (SDGs). This effort resulted in an advanced methodology that improves students' technical skills and promotes a meaningful global commitment to sustainability and social responsibility, reflecting the transformative power of international collaborative education. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Artificial intelligence, Arts computing, Collaborative learning, E-Learning, Education computing, Educational Innovation, Educational innovations, Educational Technology, Emerging technologies, Engineering education, Global education, High educations, higher education, Innovative methodologies, Me-xico, Metaverse, Metaverses, Mixed Realities, Mixed reality, Product design, Sebastian, Social aspects, Students, Sustainable development, Sustainable Development Goals, Teaching, Technical skills},
pubstate = {published},
tppubtype = {inproceedings}
}
López-Ozieblo, R.; Jiandong, D. S.; Techanamurthy, U.; Geng, H.; Nurgissayeva, A.
Enhancing AI Literacy through Immersive VR: Evaluating Pedagogical Design and GenAI Integration Proceedings Article
In: pp. 718–723, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331511661 (ISBN).
Abstract | Links | BibTeX | Tags: AI Literacy, Artificial intelligence, Behavioral Research, Classlet platform, E-Learning, Educational settings, Emerging technologies, Engineering education, Experiential learning, GenAI avatar, GenAI Avatars, Immersive virtual reality, Interactive computer graphics, Pedagogical designs, Pedagogical Innovation, Regression analysis, Teaching, Virtual Reality, Virtual-reality environment
@inproceedings{lopez-ozieblo_enhancing_2025,
title = {Enhancing AI Literacy through Immersive VR: Evaluating Pedagogical Design and GenAI Integration},
author = {R. López-Ozieblo and D. S. Jiandong and U. Techanamurthy and H. Geng and A. Nurgissayeva},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105013538409&doi=10.1109%2FCSTE64638.2025.11092268&partnerID=40&md5=a963d754ceaa73f360d9678d346a7686},
doi = {10.1109/CSTE64638.2025.11092268},
isbn = {9798331511661 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {718–723},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {As AI continues to reshape industries, enhancing AI literacy is crucial for empowering learners to interact confidently and critically with emerging technologies. Virtual Reality (VR) offers a way to bridge theoretical knowledge with practical application but integrating VR into educational settings struggles with technical and pedagogical challenges. This study investigates how immersive VR environments can be optimized to enhance AI literacy and identifies key factors driving students' intent to adopt these technologies. Using Classlet - a VR platform that integrates interactive multimodal tasks, narrative-driven activities, and GenAI avatar interactions - we created a virtual office where learners engaged in research tasks and simulation scenarios with instructor-customized prompts. Our mixed-methods approach, involving participants from Hong Kong and Malaysia, focused on AI literacy within contexts such as Fast Fashion and European society. Regression analyses revealed that overall intent is strongly predicted by composite enjoyment, perceived performance, and behavioral control (R2 = 0.803). Post-AI literacy self-assessments were predicted by AI self-efficacy and enjoyment ( R2 = 0.421). However, female participants reported lower scores on AI efficacy (p = 0.042), suggesting baseline differences that warrant further investigation. Qualitative insights show the immersive and engaging nature of the experience while highlighting the need for further GenAI prompt designs for elaborative and bidirectional interactions. © 2025 Elsevier B.V., All rights reserved.},
keywords = {AI Literacy, Artificial intelligence, Behavioral Research, Classlet platform, E-Learning, Educational settings, Emerging technologies, Engineering education, Experiential learning, GenAI avatar, GenAI Avatars, Immersive virtual reality, Interactive computer graphics, Pedagogical designs, Pedagogical Innovation, Regression analysis, Teaching, Virtual Reality, Virtual-reality environment},
pubstate = {published},
tppubtype = {inproceedings}
}
Tomkou, D.; Fatouros, G.; Andreou, A.; Makridis, G.; Liarokapis, F.; Dardanis, D.; Kiourtis, A.; Soldatos, J.; Kyriazis, D.
Bridging Industrial Expertise and XR with LLM-Powered Conversational Agents Proceedings Article
In: pp. 1050–1056, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331543723 (ISBN).
Abstract | Links | BibTeX | Tags: Air navigation, Conversational Agents, Conversational AI, Embeddings, Engineering education, Extended reality, Knowledge Management, Knowledge transfer, Language Model, Large language model, large language models, Personnel training, Remote Assistance, Retrieval-Augmented Generation, Robotics, Semantics, Smart manufacturing
@inproceedings{tomkou_bridging_2025,
title = {Bridging Industrial Expertise and XR with LLM-Powered Conversational Agents},
author = {D. Tomkou and G. Fatouros and A. Andreou and G. Makridis and F. Liarokapis and D. Dardanis and A. Kiourtis and J. Soldatos and D. Kyriazis},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105013837767&doi=10.1109%2FDCOSS-IoT65416.2025.00158&partnerID=40&md5=45e35086d8be9d3e16afeade6598d238},
doi = {10.1109/DCOSS-IoT65416.2025.00158},
isbn = {9798331543723 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {1050–1056},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This paper introduces a novel integration of Retrieval-Augmented Generation (RAG) enhanced Large Language Models (LLMs) with Extended Reality (XR) technologies to address knowledge transfer challenges in industrial environments. The proposed system embeds domain-specific industrial knowledge into XR environments through a natural language interface, enabling hands-free, context-aware expert guidance for workers. We present the architecture of the proposed system consisting of an LLM Chat Engine with dynamic tool orchestration and an XR application featuring voice-driven interaction. Performance evaluation of various chunking strategies, embedding models, and vector databases reveals that semantic chunking, balanced embedding models, and efficient vector stores deliver optimal performance for industrial knowledge retrieval. The system's potential is demonstrated through early implementation in multiple industrial use cases, including robotic assembly, smart infrastructure maintenance, and aerospace component servicing. Results indicate potential for enhancing training efficiency, remote assistance capabilities, and operational guidance in alignment with Industry 5.0's human-centric and resilient approach to industrial development. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Air navigation, Conversational Agents, Conversational AI, Embeddings, Engineering education, Extended reality, Knowledge Management, Knowledge transfer, Language Model, Large language model, large language models, Personnel training, Remote Assistance, Retrieval-Augmented Generation, Robotics, Semantics, Smart manufacturing},
pubstate = {published},
tppubtype = {inproceedings}
}
Anvitha, K.; Durjay, T.; Sathvika, K.; Gnanendra, G.; Annamalai, S.; Natarajan, S. K.
EduBot: A Compact AI-Driven Study Assistant for Contextual Knowledge Retrieval Proceedings Article
In: Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331507756 (ISBN).
Abstract | Links | BibTeX | Tags: Chatbots, Computer aided instruction, Contextual knowledge, Curricula, Digital Education, E-Learning, Education computing, Educational Technology, Engineering education, Indexing (of information), Information Retrieval, Intelligent systems, Knowledge retrieval, LangChain Framework, Language Model, Large language model, learning experience, Learning experiences, Learning systems, LLM, PDF - Driven Chatbot, Query processing, Students, Teaching, Traditional learning, Virtual Reality
@inproceedings{anvitha_edubot_2025,
title = {EduBot: A Compact AI-Driven Study Assistant for Contextual Knowledge Retrieval},
author = {K. Anvitha and T. Durjay and K. Sathvika and G. Gnanendra and S. Annamalai and S. K. Natarajan},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105013615976&doi=10.1109%2FGINOTECH63460.2025.11077097&partnerID=40&md5=b08377283f2ea2ee406d38d1d23f1e42},
doi = {10.1109/GINOTECH63460.2025.11077097},
isbn = {9798331507756 (ISBN)},
year = {2025},
date = {2025-01-01},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {In the evolving landscape of educational technology, intelligent systems are redefining traditional learning methods by enhancing accessibility, adaptability, and engagement in instructional processes. This paper presents EduBot, a PDF-Driven Chatbot developed using advanced Large Language Models (LLMs) and leveraging frameworks like LangChain, OpenAI's Chat-Gpt, and Pinecone. EduBot is designed as an interactive educational assistant, responding to student queries based on faculty-provided guidelines embedded in PDF documents. Through natural language processing, EduBot streamlines information retrieval, providing accurate, context-aware responses that foster a self- directed learning experience. By aligning with specific academic requirements and enhancing clarity in information delivery, EduBot stands as a promising tool in personalized digital learning support. This paper explores the design, implementation, and impact of EduBot, offering insights into its potential as a scalable solution for academic institutions The demand for accessible and adaptive educational tools is increasing as students seek more personalized and efficient ways to enhance their learning experience. EduBot is a cutting- edge PDF-driven chatbot designed to act as a virtual educational assistant, helping students to navigate and understand course materials by answering queries directly based on faculty guidelines. Built upon Large Language Models (LLMs), specifically utilizing frameworks such as LangChain and OpenAI's GPT-3.5, EduBot provides a sophisticated solution for integrating curated academic content into interactive learning. With its backend support from Pinecone for optimized data indexing, EduBot offers accurate and context-specific responses, facilitating a deeper level of engagement and comprehension. The average relevancy score is 80%. This paper outlines the design and deployment of EduBot, emphasizing its architecture, adaptability, and contributions to the educational landscape, where such AI- driven tools are poised to become indispensable in fostering autonomous, personalized learning environments. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Chatbots, Computer aided instruction, Contextual knowledge, Curricula, Digital Education, E-Learning, Education computing, Educational Technology, Engineering education, Indexing (of information), Information Retrieval, Intelligent systems, Knowledge retrieval, LangChain Framework, Language Model, Large language model, learning experience, Learning experiences, Learning systems, LLM, PDF - Driven Chatbot, Query processing, Students, Teaching, Traditional learning, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Alex, G.
Leveraging Large Language Models for Automated XR Instructional Content Generation Proceedings Article
In: Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331585341 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Authoring Tool, Case-studies, Engineering education, Extended reality, IEEE Standards, Language Model, Large language model, Learning systems, Ontology, Ontology's, Simple++
@inproceedings{alex_leveraging_2025,
title = {Leveraging Large Language Models for Automated XR Instructional Content Generation},
author = {G. Alex},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105015398440&doi=10.1109%2FICE%2FITMC65658.2025.11106622&partnerID=40&md5=c125d3b7e58cfff4c24a9b15bb615912},
doi = {10.1109/ICE/ITMC65658.2025.11106622},
isbn = {9798331585341 (ISBN)},
year = {2025},
date = {2025-01-01},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This paper presents a study in which authors examine the potential of leveraging large language models to generate instructional content for eXtended Reality environments. Considering the IEEE ARLEM standard as a framework for structuring data, it could be integrated and interpreted by existing authoring tools. In terms of methods, authors have adopted an exploratory approach in testing various strategies. A case study focusing on the use of an eXtended Reality authoring tool for teaching operating procedures is presented. Finally, this exploratory work shows that while simple prompts can produce scenarios with satisfactory quality, imposing a structured schema through more complex prompts leads to less reliable outcomes. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Artificial intelligence, Authoring Tool, Case-studies, Engineering education, Extended reality, IEEE Standards, Language Model, Large language model, Learning systems, Ontology, Ontology's, Simple++},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Y.; Yan, Y.; Yang, G.
Bringing Microbiology to Life in Museum: Using Mobile VR and LLM-Powered Virtual Character for Children's Science Learning Proceedings Article
In: Chui, K. T.; Jaikaeo, C.; Niramitranon, J.; Kaewmanee, W.; Ng, K. -K.; Ongkunaruk, P. (Ed.): pp. 83–87, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331595500 (ISBN).
Abstract | Links | BibTeX | Tags: Computer aided instruction, E-Learning, Engineering education, Experimental groups, Immersive technologies, Informal learning, Language Model, Large language model, large language models, Learning systems, Microbiology, Mobile virtual reality, Museum, Museums, Science education, Science learning, Virtual addresses, Virtual character, Virtual Reality, Virtual reality system
@inproceedings{chen_bringing_2025,
title = {Bringing Microbiology to Life in Museum: Using Mobile VR and LLM-Powered Virtual Character for Children's Science Learning},
author = {Y. Chen and Y. Yan and G. Yang},
editor = {K. T. Chui and C. Jaikaeo and J. Niramitranon and W. Kaewmanee and K. -K. Ng and P. Ongkunaruk},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105015708152&doi=10.1109%2FISET65607.2025.00025&partnerID=40&md5=77ae9a4829656155010abc280a817a72},
doi = {10.1109/ISET65607.2025.00025},
isbn = {9798331595500 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {83–87},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Although the increasing advantages of immersive technology-enhanced museum informal learning in children's science education, the application of mobile virtual reality (MVR) technology combined with large language models (LLM) in this environment has not yet been fully explored. Furthermore, virtual character, as an intelligent learning assistant, is capable of providing personalized guidance and instant feedback to children through natural language interactions, but its potential in museum learning has yet to be fully tapped. To address these gaps, this study investigates the effectiveness of integrating MVR with LLM-powered virtual character in promoting children's microbiology learning during museum activities. In this paper, the technology-enhanced POE (Prediction-observation-explanation) learning model was studied, and the corresponding MVR system was designed and developed to carry out microbial learning activities. A quasiexperimental design was used with 60 children aged 10-12. The experimental group learned via an MVR system combining LLM-powered virtual character, while the control group used traditional methods. Results showed the experimental group significantly outperformed the control group in both academic achievement and learning motivation, including attention, confidence, and satisfaction. This provides evidence for using immersive technologies in informal learning and offers insights into applying LLM-powered virtual character in science education. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Computer aided instruction, E-Learning, Engineering education, Experimental groups, Immersive technologies, Informal learning, Language Model, Large language model, large language models, Learning systems, Microbiology, Mobile virtual reality, Museum, Museums, Science education, Science learning, Virtual addresses, Virtual character, Virtual Reality, Virtual reality system},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhao, P.; Wei, X.
The Role of 3D Virtual Humans in Communication and Assisting Students' Learning in Transparent Display Environments: Perspectives of Pre-Service Teachers Proceedings Article
In: Chui, K. T.; Jaikaeo, C.; Niramitranon, J.; Kaewmanee, W.; Ng, K. -K.; Ongkunaruk, P. (Ed.): pp. 319–323, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331595500 (ISBN).
Abstract | Links | BibTeX | Tags: 3D virtual human, Assistive technology, CDIO teaching model, Collaborative learning, Collaborative practices, Display environments, E-Learning, Educational Technology, Engineering education, feedback, Integration, Knowledge delivery, Knowledge transfer, Learning algorithms, Natural language processing systems, Preservice teachers, Psychology computing, Student learning, Students, Teaching, Teaching model, Transparent display environment, Transparent displays, Virtual Reality
@inproceedings{zhao_role_2025,
title = {The Role of 3D Virtual Humans in Communication and Assisting Students' Learning in Transparent Display Environments: Perspectives of Pre-Service Teachers},
author = {P. Zhao and X. Wei},
editor = {K. T. Chui and C. Jaikaeo and J. Niramitranon and W. Kaewmanee and K. -K. Ng and P. Ongkunaruk},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105015746241&doi=10.1109%2FISET65607.2025.00069&partnerID=40&md5=08c39b84fa6bd6ac13ddbed203d7b1d9},
doi = {10.1109/ISET65607.2025.00069},
isbn = {9798331595500 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {319–323},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The integration of transparent display and 3D virtual human technologies into education is expanding rapidly; however, their systematic incorporation into the CDIO teaching model remains underexplored, particularly in supporting complex knowledge delivery and collaborative practice. This study developed an intelligent virtual teacher assistance system based on generative AI and conducted a teaching experiment combining transparent display and 3D virtual human technologies. Feedback was collected through focus group interviews with 24 pre-service teachers. Results show that the virtual human, through natural language and multimodal interaction, significantly enhanced classroom engagement and contextual understanding, while its real-time feedback and personalized guidance effectively supported CDIO-based collaborative learning. Nonetheless, challenges remain in contextual adaptability and emotional feedback accuracy. Accordingly, the study proposes a path for technical optimization through the integration of multimodal emotion recognition, adaptive instructional algorithms, and nonintrusive data collection, offering empirical and theoretical insights into educational technology integration within the CDIO framework and future intelligent learning tools. © 2025 Elsevier B.V., All rights reserved.},
keywords = {3D virtual human, Assistive technology, CDIO teaching model, Collaborative learning, Collaborative practices, Display environments, E-Learning, Educational Technology, Engineering education, feedback, Integration, Knowledge delivery, Knowledge transfer, Learning algorithms, Natural language processing systems, Preservice teachers, Psychology computing, Student learning, Students, Teaching, Teaching model, Transparent display environment, Transparent displays, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Vadisetty, R.; Polamarasetti, A.; Goyal, M. K.; Rongali, S. K.; Prajapati, S. K.; Butani, J. B.
Cloud-Based Immersive Learning: The Role of Virtual Reality, Big Data, and Generative AI in Transformative Education Experiences Proceedings Article
In: Mishra, S.; Tripathy, H. K.; Mohanty, J. R. (Ed.): Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331523022 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Big Data, Cloud analytics, Cloud environments, Cloud-based, Cloud-based learning, E-Learning, Engineering education, Generative AI, generative artificial intelligence, Immersive learning, Learning analytic, learning analytics, Learning systems, Metadata, Personalized Education, Personalized learning, Real time analysis, Realistic simulation, Virtual environments, Virtual Reality
@inproceedings{vadisetty_cloud-based_2025,
title = {Cloud-Based Immersive Learning: The Role of Virtual Reality, Big Data, and Generative AI in Transformative Education Experiences},
author = {R. Vadisetty and A. Polamarasetti and M. K. Goyal and S. K. Rongali and S. K. Prajapati and J. B. Butani},
editor = {S. Mishra and H. K. Tripathy and J. R. Mohanty},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105018048438&doi=10.1109%2FASSIC64892.2025.11158636&partnerID=40&md5=6d832a0f4460d2eb93e357faba143a32},
doi = {10.1109/ASSIC64892.2025.11158636},
isbn = {9798331523022 (ISBN)},
year = {2025},
date = {2025-01-01},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Immersive learning transforms education by integrating Virtual Reality (VR), Big Data, and Generative Artificial Intelligence (AI) in cloud environments. This work discusses these technologies' contribution towards increased engagement, personalized learning, and recall through flexible and interactive experiences. Realistic simulations in a secure environment, real-time analysis via Big Data, and dynamically personalized information via Generative AI make immersive learning a reality. Nevertheless, scalability, security, and ease of integration are yet to be addressed. This article proposes an integrated model for cloud-based immersive learning, comparing conventional and AI-facilitated approaches through experimental evaluation. Besides, technical, ethical, and legislative considerations and future directions for inquiry are addressed. In conclusion, with its potential for personalized, scalable, and data-intensive instruction, AI-facilitated immersive learning is a transformational technology for educational delivery. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Artificial intelligence, Big Data, Cloud analytics, Cloud environments, Cloud-based, Cloud-based learning, E-Learning, Engineering education, Generative AI, generative artificial intelligence, Immersive learning, Learning analytic, learning analytics, Learning systems, Metadata, Personalized Education, Personalized learning, Real time analysis, Realistic simulation, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Ly, C.; Peng, E.; Liu, K.; Qin, A.; Howe, G.; Cheng, A. Y.; Cuadra, A.
Museum in the Classroom: Engaging Students with Augmented Reality Museum Artifacts and Generative AI Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 9798400713958 (ISBN); 9798400713941 (ISBN).
Abstract | Links | BibTeX | Tags: Artifact or System, Child/parent, Children/Parents, Digitisation, Education/Learning, Engaging students, Engineering education, Field trips, Interactive learning, Learning experiences, Rich learning experiences, Students, Teachers', Teaching
@inproceedings{ly_museum_2025,
title = {Museum in the Classroom: Engaging Students with Augmented Reality Museum Artifacts and Generative AI},
author = {C. Ly and E. Peng and K. Liu and A. Qin and G. Howe and A. Y. Cheng and A. Cuadra},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005741934&doi=10.1145%2F3706599.3719787&partnerID=40&md5=0a405b14f099e1132e32dd10c121eb37},
doi = {10.1145/3706599.3719787},
isbn = {9798400713958 (ISBN); 9798400713941 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Museum field trips provide a rich learning experience for children. However, they are complex and expensive for teachers to organize. Fortunately, digitization of museum artifacts makes it possible to use museum resources within the classroom. Museum in the Classroom (MITC) explores how augmented reality (AR) and generative artificial intelligence (AI) can create an interactive learning experience around museum artifacts. This iPad app allows educators to select historical topics from a curated artifact library, generating AR-based exhibits that students can explore. MITC engages students through interactive AR artifacts, AI-driven chatbots, and AI-generated quiz questions, based on a real exhibition at the Cantor Arts Center at Stanford University. A formative study with middle schoolers (N = 20) demonstrated that the app increased engagement compared to traditional learning methods. MITC also fostered a playful and comfortable environment to interact with museum artifacts. Our findings suggest that combining AR and AI has the potential to enrich classroom learning and offer a scalable alternative to traditional museum visits. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Artifact or System, Child/parent, Children/Parents, Digitisation, Education/Learning, Engaging students, Engineering education, Field trips, Interactive learning, Learning experiences, Rich learning experiences, Students, Teachers', Teaching},
pubstate = {published},
tppubtype = {inproceedings}
}
2024
Sarshartehrani, F.; Mohammadrezaei, E.; Behravan, M.; Gracanin, D.
Enhancing E-Learning Experience Through Embodied AI Tutors in Immersive Virtual Environments: A Multifaceted Approach for Personalized Educational Adaptation Proceedings Article
In: R.A., Sottilare; J., Schwarz (Ed.): Lect. Notes Comput. Sci., pp. 272–287, Springer Science and Business Media Deutschland GmbH, 2024, ISBN: 03029743 (ISSN); 978-303160608-3 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive Learning, Artificial intelligence, Artificial intelligence in education, Computer aided instruction, Computer programming, E - learning, E-Learning, Education computing, Embodied artificial intelligence, Engineering education, Immersive Virtual Environments, Learner Engagement, Learning experiences, Learning systems, Multi-faceted approach, Personalized Instruction, Traditional boundaries, Virtual Reality
@inproceedings{sarshartehrani_enhancing_2024,
title = {Enhancing E-Learning Experience Through Embodied AI Tutors in Immersive Virtual Environments: A Multifaceted Approach for Personalized Educational Adaptation},
author = {F. Sarshartehrani and E. Mohammadrezaei and M. Behravan and D. Gracanin},
editor = {Sottilare R.A. and Schwarz J.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85196174389&doi=10.1007%2f978-3-031-60609-0_20&partnerID=40&md5=3801d0959781b1a191a3eb14f47bd8d8},
doi = {10.1007/978-3-031-60609-0_20},
isbn = {03029743 (ISSN); 978-303160608-3 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {14727 LNCS},
pages = {272–287},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {As digital education transcends traditional boundaries, e-learning experiences are increasingly shaped by cutting-edge technologies like artificial intelligence (AI), virtual reality (VR), and adaptive learning systems. This study examines the integration of AI-driven personalized instruction within immersive VR environments, targeting enhanced learner engagement-a core metric in online education effectiveness. Employing a user-centric design, the research utilizes embodied AI tutors, calibrated to individual learners’ emotional intelligence and cognitive states, within a Python programming curriculum-a key area in computer science education. The methodology relies on intelligent tutoring systems and personalized learning pathways, catering to a diverse participant pool from Virginia Tech. Our data-driven approach, underpinned by the principles of educational psychology and computational pedagogy, indicates that AI-enhanced virtual learning environments significantly elevate user engagement and proficiency in programming education. Although the scope is limited to a single academic institution, the promising results advocate for the scalability of such AI-powered educational tools, with potential implications for distance learning, MOOCs, and lifelong learning platforms. This research contributes to the evolving narrative of smart education and the role of large language models (LLMs) in crafting bespoke educational experiences, suggesting a paradigm shift towards more interactive, personalized e-learning solutions that align with global educational technology trends. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2024.},
keywords = {Adaptive Learning, Artificial intelligence, Artificial intelligence in education, Computer aided instruction, Computer programming, E - learning, E-Learning, Education computing, Embodied artificial intelligence, Engineering education, Immersive Virtual Environments, Learner Engagement, Learning experiences, Learning systems, Multi-faceted approach, Personalized Instruction, Traditional boundaries, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, M.; M'Hiri, F.
Beyond Traditional Teaching: Large Language Models as Simulated Teaching Assistants in Computer Science Proceedings Article
In: SIGCSE - Proc. ACM Tech. Symp. Comput. Sci. Educ., pp. 743–749, Association for Computing Machinery, Inc, 2024, ISBN: 9798400704246 (ISBN); 9798400704239 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive teaching, ChatGPT, Computational Linguistics, CS education, E-Learning, Education computing, Engineering education, GPT, Language Model, LLM, machine learning, Machine-learning, Novice programmer, novice programmers, Openai, Programming, Python, Students, Teaching, Virtual Reality
@inproceedings{liu_beyond_2024,
title = {Beyond Traditional Teaching: Large Language Models as Simulated Teaching Assistants in Computer Science},
author = {M. Liu and F. M'Hiri},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85189289344&doi=10.1145%2F3626252.3630789&partnerID=40&md5=7b0f42d4bf0e1706de8691200e4a0e00},
doi = {10.1145/3626252.3630789},
isbn = {9798400704246 (ISBN); 9798400704239 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {SIGCSE - Proc. ACM Tech. Symp. Comput. Sci. Educ.},
volume = {1},
pages = {743–749},
publisher = {Association for Computing Machinery, Inc},
abstract = {As the prominence of Large Language Models (LLMs) grows in various sectors, their potential in education warrants exploration. In this study, we investigate the feasibility of employing GPT-3.5 from OpenAI, as an LLM teaching assistant (TA) or a virtual TA in computer science (CS) courses. The objective is to enhance the accessibility of CS education while maintaining academic integrity by refraining from providing direct solutions to current-semester assignments. Targeting Foundations of Programming (COMP202), an undergraduate course that introduces students to programming with Python, we have developed a virtual TA using the LangChain framework, known for integrating language models with diverse data sources and environments. The virtual TA assists students with their code and clarifies complex concepts. For homework questions, it is designed to guide students with hints rather than giving out direct solutions. We assessed its performance first through a qualitative evaluation, then a survey-based comparative analysis, using a mix of questions commonly asked on the COMP202 discussion board and questions created by the authors. Our preliminary results indicate that the virtual TA outperforms human TAs on clarity and engagement, matching them on accuracy when the question is non-assignment-specific, for which human TAs still proved more reliable. These findings suggest that while virtual TAs, leveraging the capabilities of LLMs, hold great promise towards making CS education experience more accessible and engaging, their optimal use necessitates human supervision. We conclude by identifying several directions that could be explored in future implementations. © 2024 Elsevier B.V., All rights reserved.},
keywords = {Adaptive teaching, ChatGPT, Computational Linguistics, CS education, E-Learning, Education computing, Engineering education, GPT, Language Model, LLM, machine learning, Machine-learning, Novice programmer, novice programmers, Openai, Programming, Python, Students, Teaching, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Suryavanshi, D. P.; Kaveri, P. R.; Kadlag, P. S.
Advancing Digital Transformation in Indian Higher Education Institutions Proceedings Article
In: Intell. Comput. Control Eng. Bus. Syst., ICCEBS, Institute of Electrical and Electronics Engineers Inc., 2023, ISBN: 9798350394580 (ISBN).
Abstract | Links | BibTeX | Tags: Augmented Reality, Data Analysis, Data collection, Data handling, Developing countries, Digital revolution, Digital transformation, E-Learning, Educational Institution, Educational institutions, Engineering education, High educations, Higher education institutions, Information analysis, Learning systems, Literature studies, Metadata, Primary data, Stakeholder, Stakeholders, Technology Adoption
@inproceedings{suryavanshi_advancing_2023,
title = {Advancing Digital Transformation in Indian Higher Education Institutions},
author = {D. P. Suryavanshi and P. R. Kaveri and P. S. Kadlag},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85189153416&doi=10.1109%2FICCEBS58601.2023.10448947&partnerID=40&md5=84365ef7ef853f33b9415de8bbed4cc9},
doi = {10.1109/ICCEBS58601.2023.10448947},
isbn = {9798350394580 (ISBN)},
year = {2023},
date = {2023-01-01},
booktitle = {Intell. Comput. Control Eng. Bus. Syst., ICCEBS},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The paper focuses on advancing the use of Digital Transformation in Indian Higher Education Institutions, although India being a developing country it is important for the educational institution to practice transformation in various forms. The paper covers the detail literature study and conclude with various opinions that have been generated through primary data collection. The objective of the study is to identify the need of digital transformation for education environment by two major methods literature study and stakeholder data analysis. Technological expectation was also studied using questionnaires. The study also analyzed related studies that had been done in the past using the Vosviewer programme for the years 1980 to 2004 for Scopus dataset in order to understand the year-by-year publications, research articles, and book chapters in the subject of Digital Transformation in Higher Education. The majority of stakeholders concur that using digital transformation technologies like IoT, AI & ChatGpt, Generative AI, Augmented reality in higher education is essential for implementing NEP 2020 and successfully integrating digital technologies. The paper covers a detail discussion including literature review on various aspects of digital transformation in education institutes. It also covers opinion from various stakeholders to understand actual outcomes expected from the study which was conducted. The current study uses a mixed research methodology because the questionnaire includes both quantitative and qualitative questions. A sample of 40 respondents was collected, representing the four main stakeholders in education: students, faculty, businesspeople, and educationalists. The responses were analysed using the SPSS Percentage and mean. The newly adopted educational policy NEP 2020 encourages the use of technology and skill-based learning. The importance of technology in teaching and learning processes has been emphasized in numerous research papers in order to improve the teaching-learning process and its outcomes. The thorough assessment of the literature was carried out utilizing the VOS viewer to evaluate the pertinent studies and pinpoint any gaps. © 2024 Elsevier B.V., All rights reserved.},
keywords = {Augmented Reality, Data Analysis, Data collection, Data handling, Developing countries, Digital revolution, Digital transformation, E-Learning, Educational Institution, Educational institutions, Engineering education, High educations, Higher education institutions, Information analysis, Learning systems, Literature studies, Metadata, Primary data, Stakeholder, Stakeholders, Technology Adoption},
pubstate = {published},
tppubtype = {inproceedings}
}
Ayre, D.; Dougherty, C.; Zhao, Y.
IMPLEMENTATION OF AN ARTIFICIAL INTELLIGENCE (AI) INSTRUCTIONAL SUPPORT SYSTEM IN A VIRTUAL REALITY (VR) THERMAL-FLUIDS LABORATORY Proceedings Article
In: ASME Int Mech Eng Congress Expos Proc, American Society of Mechanical Engineers (ASME), 2023, ISBN: 978-079188765-3 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, E-Learning, Education computing, Engineering education, Fluid mechanics, Generative AI, generative artificial intelligence, GPT, High educations, Instructional support, Laboratories, Laboratory class, Laboratory experiments, Physical laboratory, Professional aspects, Students, Support systems, Thermal fluids, Virtual Reality, Virtual-reality environment
@inproceedings{ayre_implementation_2023,
title = {IMPLEMENTATION OF AN ARTIFICIAL INTELLIGENCE (AI) INSTRUCTIONAL SUPPORT SYSTEM IN A VIRTUAL REALITY (VR) THERMAL-FLUIDS LABORATORY},
author = {D. Ayre and C. Dougherty and Y. Zhao},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85185393784&doi=10.1115%2fIMECE2023-112683&partnerID=40&md5=c2492592a016478a4b3591ff82a93be5},
doi = {10.1115/IMECE2023-112683},
isbn = {978-079188765-3 (ISBN)},
year = {2023},
date = {2023-01-01},
booktitle = {ASME Int Mech Eng Congress Expos Proc},
volume = {8},
publisher = {American Society of Mechanical Engineers (ASME)},
abstract = {Physical laboratory experiments have long been the cornerstone of higher education, providing future engineers practical real-life experience invaluable to their careers. However, demand for laboratory time has exceeded physical capabilities. Virtual reality (VR) labs have proven to retain many benefits of attending physical labs while also providing significant advantages only available in a VR environment. Previously, our group had developed a pilot VR lab that replicated six (6) unique thermal-fluids lab experiments developed using the Unity game engine. One of the VR labs was tested in a thermal-fluid mechanics laboratory class with favorable results, but students highlighted the need for additional assistance within the VR simulation. In response to this testing, we have incorporated an artificial intelligence (AI) assistant to aid students within the VR environment by developing an interaction model. Utilizing the Generative Pre-trained Transformer 4 (GPT-4) large language model (LLM) and augmented context retrieval, the AI assistant can provide reliable instruction and troubleshoot errors while students conduct the lab procedure to provide an experience similar to a real-life lab assistant. The updated VR lab was tested in two laboratory classes and while the overall tone of student response to an AI-powered assistant was excitement and enthusiasm, observations and other recorded data show that students are currently unsure of how to utilize this new technology, which will help guide future refinement of AI components within the VR environment. © 2023 by ASME.},
keywords = {Artificial intelligence, E-Learning, Education computing, Engineering education, Fluid mechanics, Generative AI, generative artificial intelligence, GPT, High educations, Instructional support, Laboratories, Laboratory class, Laboratory experiments, Physical laboratory, Professional aspects, Students, Support systems, Thermal fluids, Virtual Reality, Virtual-reality environment},
pubstate = {published},
tppubtype = {inproceedings}
}
Gaikwad, T.; Kulkarni, A.
Smart Training Framework and Assessment Strategies Proceedings Article
In: IEEE Eng. Informatics, EI, Institute of Electrical and Electronics Engineers Inc., 2023, ISBN: 9798350338522 (ISBN).
Abstract | Links | BibTeX | Tags: AR training, Assessment strategies, Augmented Reality, Augmented reality training, Computational Linguistics, Edtech, Education computing, Education sectors, Engineering education, Language Model, Large language model, large language models, Prompt engineering, Risk assessment, Smart assessment, Students, Training assessment, Training framework
@inproceedings{gaikwad_smart_2023,
title = {Smart Training Framework and Assessment Strategies},
author = {T. Gaikwad and A. Kulkarni},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85193969838&doi=10.1109%2FIEEECONF58110.2023.10520594&partnerID=40&md5=bb9990750ee4ea498a5f551c8bd382ce},
doi = {10.1109/IEEECONF58110.2023.10520594},
isbn = {9798350338522 (ISBN)},
year = {2023},
date = {2023-01-01},
booktitle = {IEEE Eng. Informatics, EI},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The rapidly evolving landscape of technological advancements is significantly transforming the education sector. This integration of technology in the education sector has given rise to the edtech industry which is transforming as newer technologies are introduced. Training delivered to the learners, along with the assessment of the learners, are the fundamental components of the education sector. However, current methods of delivering training and assessing learners face numerous challenges, including skill shortage due to technology advancements, high costs, conducting complex training in high- risk environments. Similarly, assessment methods struggle with inflexible assessment strategies and limited personalized feedback to learners. Addressing these challenges in training and assessment, this study proposes a smart training and assessment framework (STAF) which leverages the benefits of augmented reality (AR) and artificial intelligence (AI) based large language models (LLMs) which stand out as a monumental leap in reshaping the training and assessment sector. As part of this study, an AR based training module was created and delivered to students. A survey was conducted of these students to gain insights about the adaptability of AR based trainings and potential to improve these trainings. It is concluded that along with AR in education, AI and LLMs with prompt engineering strategies should be integrated in the education domain for better interactivity and enhanced student performance. Currently, limited research is conducted on integration of LLMs in AR environments for the education sector and this paper provides an in-depth exploration of the immense potential of the applications of LLMs within the realm of training and assessment for improved learner performance. © 2024 Elsevier B.V., All rights reserved.},
keywords = {AR training, Assessment strategies, Augmented Reality, Augmented reality training, Computational Linguistics, Edtech, Education computing, Education sectors, Engineering education, Language Model, Large language model, large language models, Prompt engineering, Risk assessment, Smart assessment, Students, Training assessment, Training framework},
pubstate = {published},
tppubtype = {inproceedings}
}
Marquez, R.; Barrios, N.; Vera, R. E.; Mendez, M. E.; Tolosa, L.; Zambrano, F.; Li, Y.
A perspective on the synergistic potential of artificial intelligence and product-based learning strategies in biobased materials education Journal Article
In: Education for Chemical Engineers, vol. 44, pp. 164–180, 2023, ISSN: 17497728 (ISSN), (Publisher: Elsevier B.V.).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Bio-based, Bio-based materials, Biobased, ChatGPT, Chemical engineering, Chemical engineering education, Education computing, Engineering education, Formulation, Generative AI, Learning strategy, Learning systems, Material engineering, Materials, Students, Sustainable development, Teaching approaches, Traditional materials, Virtual Reality
@article{marquez_perspective_2023,
title = {A perspective on the synergistic potential of artificial intelligence and product-based learning strategies in biobased materials education},
author = {R. Marquez and N. Barrios and R. E. Vera and M. E. Mendez and L. Tolosa and F. Zambrano and Y. Li},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85162078243&doi=10.1016%2Fj.ece.2023.05.005&partnerID=40&md5=b672c7295c387979b6275d4245d25f97},
doi = {10.1016/j.ece.2023.05.005},
issn = {17497728 (ISSN)},
year = {2023},
date = {2023-01-01},
journal = {Education for Chemical Engineers},
volume = {44},
pages = {164–180},
abstract = {The integration of product-based learning strategies in Materials in Chemical Engineering education is crucial for students to gain the skills and competencies required to thrive in the emerging circular bioeconomy. Traditional materials engineering education has often relied on a transmission teaching approach, in which students are expected to passively receive information from instructors. However, this approach has shown to be inadequate under the current circumstances, in which information is readily available and innovative tools such as artificial intelligence and virtual reality environments are becoming widespread (e.g., metaverse). Instead, we consider that a critical goal of education should be to develop aptitudes and abilities that enable students to generate solutions and products that address societal demands. In this work, we propose innovative strategies, such as product-based learning methods and GPT (Generative Pre-trained Transformer) artificial intelligence text generation models, to modify the focus of a Materials in Chemical Engineering course from non-sustainable materials to sustainable ones, aiming to address the critical challenges of our society. This approach aims to achieve two objectives: first to enable students to actively engage with raw materials and solve real-world challenges, and second, to foster creativity and entrepreneurship skills by providing them with the necessary tools to conduct brainstorming sessions and develop procedures following scientific methods. The incorporation of circular bioeconomy concepts, such as renewable resources, waste reduction, and resource efficiency into the curriculum provides a framework for students to understand the environmental, social, and economic implications in Chemical Engineering. It also allows them to make informed decisions within the circular bioeconomy framework, benefiting society by promoting the development and adoption of sustainable technologies and practices. © 2023 Elsevier B.V., All rights reserved.},
note = {Publisher: Elsevier B.V.},
keywords = {Artificial intelligence, Bio-based, Bio-based materials, Biobased, ChatGPT, Chemical engineering, Chemical engineering education, Education computing, Engineering education, Formulation, Generative AI, Learning strategy, Learning systems, Material engineering, Materials, Students, Sustainable development, Teaching approaches, Traditional materials, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}