AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Dong, Y.
Enhancing Painting Exhibition Experiences with the Application of Augmented Reality-Based AI Video Generation Technology Proceedings Article
In: P., Zaphiris; A., Ioannou; A., Ioannou; R.A., Sottilare; J., Schwarz; M., Rauterberg (Ed.): Lect. Notes Comput. Sci., pp. 256–262, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303176814-9 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, AI-generated art, Art and Technology, Arts computing, Augmented Reality, Augmented reality technology, Digital Exhibition Design, Dynamic content, E-Learning, Education computing, Generation technologies, Interactive computer graphics, Knowledge Management, Multi dimensional, Planning designs, Three dimensional computer graphics, Video contents, Video generation
@inproceedings{dong_enhancing_2025,
title = {Enhancing Painting Exhibition Experiences with the Application of Augmented Reality-Based AI Video Generation Technology},
author = {Y. Dong},
editor = {Zaphiris P. and Ioannou A. and Ioannou A. and Sottilare R.A. and Schwarz J. and Rauterberg M.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85213302959&doi=10.1007%2f978-3-031-76815-6_18&partnerID=40&md5=35484f5ed199a831f1a30f265a0d32d5},
doi = {10.1007/978-3-031-76815-6_18},
isbn = {03029743 (ISSN); 978-303176814-9 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15378 LNCS},
pages = {256–262},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Traditional painting exhibitions often rely on flat presentation methods, such as walls and stands, limiting their impact. Augmented Reality (AR) technology presents an opportunity to transform these experiences by turning static, flat artwork into dynamic, multi-dimensional presentations. However, creating and integrating video or dynamic content can be time-consuming and challenging, requiring meticulous planning, design, and production. In the context of urban renewal and community revitalization, particularly in China’s first-tier cities where real estate development has saturated the market, there is a growing trend to repurpose traditional commercial and office spaces with cultural and artistic exhibitions. These exhibitions not only enhance the spatial quality but also elevate the user experience, making the spaces more competitive. However, these non-traditional exhibition venues often lack the amenities of professional galleries, relying on walls, windows, and corners for displays, and requiring quick setup times. For visitors, who are often office workers or shoppers with limited time, the use of personal mobile devices for interaction is common. WeChat, China’s most widely used mobile application, provides a platform for convenient digital interactive experiences through mini-programs, which can support lightweight AR applications. AI video generation technologies, such as Conditional Generative Adversarial Networks (ControlNet) and Latent Consistency Models (LCM), have seen significant advancements. These technologies now allow for the creation of 3D models and video content from text and images. Tools like Meshy and Pika provide the ability to generate various video styles and offer precise control over video content. New AI video applications like Stable Video further expand the possibilities by rapidly converting static images into dynamic videos, facilitating easy adjustments and edits. This paper explores the application of AR-based AI video generation technology in enhancing the experience of painting exhibitions. By integrating these technologies, traditional paintings can be transformed into interactive, engaging displays that enrich the viewer’s experience. The study demonstrates the potential of these innovations to make art exhibitions more appealing and competitive in various public spaces, thereby improving both artistic expression and audience engagement. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {3D modeling, AI-generated art, Art and Technology, Arts computing, Augmented Reality, Augmented reality technology, Digital Exhibition Design, Dynamic content, E-Learning, Education computing, Generation technologies, Interactive computer graphics, Knowledge Management, Multi dimensional, Planning designs, Three dimensional computer graphics, Video contents, Video generation},
pubstate = {published},
tppubtype = {inproceedings}
}
Logothetis, I.; Diakogiannis, K.; Vidakis, N.
Interactive Learning Through Conversational Avatars and Immersive VR: Enhancing Diabetes Education and Self-Management Proceedings Article
In: X., Fang (Ed.): Lect. Notes Comput. Sci., pp. 415–429, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303192577-1 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Chronic disease, Computer aided instruction, Diabetes Education, Diagnosis, E-Learning, Education management, Engineering education, Gamification, Immersive virtual reality, Interactive computer graphics, Interactive learning, Large population, Learning systems, NUI, Self management, Serious game, Serious games, simulation, Virtual Reality
@inproceedings{logothetis_interactive_2025,
title = {Interactive Learning Through Conversational Avatars and Immersive VR: Enhancing Diabetes Education and Self-Management},
author = {I. Logothetis and K. Diakogiannis and N. Vidakis},
editor = {Fang X.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105008266480&doi=10.1007%2f978-3-031-92578-8_27&partnerID=40&md5=451274dfa3ef0b3f1b39c7d5a665ee3b},
doi = {10.1007/978-3-031-92578-8_27},
isbn = {03029743 (ISSN); 978-303192577-1 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15816 LNCS},
pages = {415–429},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Diabetes is a chronic disease affecting a large population of the world. Education and self-management of diabetes are crucial. Technologies such as Virtual Reality (VR) have presented promising results in healthcare education, while studies suggest that Artificial Intelligence (AI) can help in learning by further engaging the learner. This study aims to educate users on the entire routine of managing diabetes. The serious game utilizes VR for realistic interaction with diabetes tools and generative AI through a conversational avatar that acts as an assistant instructor. In this way, it allows users to practice diagnostic and therapeutic interventions in a controlled virtual environment, helping to build their understanding and confidence in diabetes management. To measure the effects of the proposed serious game, presence, and perceived agency were measured. Preliminary results indicate that this setup aids in the engagement and immersion of learners, while the avatar can provide helpful information during gameplay. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Artificial intelligence, Chronic disease, Computer aided instruction, Diabetes Education, Diagnosis, E-Learning, Education management, Engineering education, Gamification, Immersive virtual reality, Interactive computer graphics, Interactive learning, Large population, Learning systems, NUI, Self management, Serious game, Serious games, simulation, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
2024
White, M.; Banerjee, N. K.; Banerjee, S.
VRcabulary: A VR Environment for Reinforced Language Learning via Multi-Modular Design Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 315–319, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835037202-1 (ISBN).
Abstract | Links | BibTeX | Tags: 'current, E-Learning, Foreign language, Immersive, Instructional modules, Language learning, Modular designs, Modulars, Multi-modular, Reinforcement, Second language, Virtual Reality, Virtual-reality environment
@inproceedings{white_vrcabulary_2024,
title = {VRcabulary: A VR Environment for Reinforced Language Learning via Multi-Modular Design},
author = {M. White and N. K. Banerjee and S. Banerjee},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85187241160&doi=10.1109%2fAIxVR59861.2024.00053&partnerID=40&md5=4d8ff8ac5c6aa8336a571ba906fe0f5d},
doi = {10.1109/AIxVR59861.2024.00053},
isbn = {979-835037202-1 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {315–319},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {We demonstrate VRcabulary, a hierarchical modular virtual reality (VR) environment for language learning (LL). Current VR LL apps lack the benefit of reinforcement presented in typical classroom environments. Apps either introduce content in the second language and lack retention testing, or provide gamification without an in-environment instructional component. To acquire reinforcement of knowledge, the learner needs to visit the app multiple times, increasing the potential for monotony. In VRcabulary, we introduce a multi-modular hierarchical design with 3 modules - an instructional module providing AI-generated audio playbacks of object names, a practice module enabling interaction based reinforcement of object names in response to audio playback, and an exam module enabling retention testing through interaction. To incentivize engagement by reducing monotony, we keep the designs of each modules distinct. We provide sequential object presentations in the instructional module and multiple object assortments in the practice and exam modules. We provide feedback and multiple trials in the practice module, but eliminate them from the exam module. We expect cross-module diversity of interaction in VRcabulary to enhance engagement in VR LL. © 2024 IEEE.},
keywords = {'current, E-Learning, Foreign language, Immersive, Instructional modules, Language learning, Modular designs, Modulars, Multi-modular, Reinforcement, Second language, Virtual Reality, Virtual-reality environment},
pubstate = {published},
tppubtype = {inproceedings}
}
Gaudi, T.; Kapralos, B.; Quevedo, A.
Structural and Functional Fidelity of Virtual Humans in Immersive Virtual Learning Environments Proceedings Article
In: IEEE Gaming, Entertain., Media Conf., GEM, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835037453-7 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, Computer aided instruction, Digital representations, E-Learning, Engagement, fidelity, Immersive, Immersive virtual learning environment, Serious game, Serious games, Three dimensional computer graphics, Virtual character, virtual human, Virtual humans, Virtual instructors, Virtual learning environments, Virtual Reality, virtual simulation, Virtual simulations
@inproceedings{gaudi_structural_2024,
title = {Structural and Functional Fidelity of Virtual Humans in Immersive Virtual Learning Environments},
author = {T. Gaudi and B. Kapralos and A. Quevedo},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199517136&doi=10.1109%2fGEM61861.2024.10585535&partnerID=40&md5=bf271019e077b5e464bcd62b1b28312b},
doi = {10.1109/GEM61861.2024.10585535},
isbn = {979-835037453-7 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE Gaming, Entertain., Media Conf., GEM},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Central to many immersive virtual learning environments (iVLEs) are virtual humans, or characters that are digital representations, which can serve as virtual instructors to facilitate learning. Current technology is allowing the production of photo-realistic (high fidelity/highly realistic) avatars, whether using traditional approaches relying on 3D modeling, or modern tools leveraging generative AI and virtual character creation tools. However, fidelity (i.e., level of realism) is complex as it can be analyzed from various points of view referring to its structure, function, interactivity, and behavior among others. Given its relevance, fidelity can influence various aspects of iVLEs including engagement and ultimately learning outcomes. In this work-in-progress paper, we propose a study that will examine the effect of structural and functional fidelity of a virtual human assistant on engagement within a virtual simulation designed to teach the cognitive aspects (e.g., the steps of a procedure) of the heart auscultation procedure. © 2024 IEEE.},
keywords = {3D modeling, Computer aided instruction, Digital representations, E-Learning, Engagement, fidelity, Immersive, Immersive virtual learning environment, Serious game, Serious games, Three dimensional computer graphics, Virtual character, virtual human, Virtual humans, Virtual instructors, Virtual learning environments, Virtual Reality, virtual simulation, Virtual simulations},
pubstate = {published},
tppubtype = {inproceedings}
}
Williams, R.
Deep HoriXons - 3D Virtual Generative AI Assisted Campus for Deep Learning AI and Cybersecurity Proceedings Article
In: M., Blowers; B.T., Wysocki (Ed.): Proc SPIE Int Soc Opt Eng, SPIE, 2024, ISBN: 0277786X (ISSN); 978-151067434-9 (ISBN).
Abstract | Links | BibTeX | Tags: 3D virtual campus, AI and cybersecurity education, AI talent pipeline, ChatGPT digital tutor, CompTIA Security+, Computer aided instruction, Cyber security, Cyber-security educations, Cybersecurity, Deep learning, E-Learning, Immersive, Learning systems, Virtual campus, Virtual learning environments, Virtual Reality
@inproceedings{williams_deep_2024,
title = {Deep HoriXons - 3D Virtual Generative AI Assisted Campus for Deep Learning AI and Cybersecurity},
author = {R. Williams},
editor = {Blowers M. and Wysocki B.T.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85196555361&doi=10.1117%2f12.3011374&partnerID=40&md5=ff7392a37a51044c79d4d2824c9cf46b},
doi = {10.1117/12.3011374},
isbn = {0277786X (ISSN); 978-151067434-9 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc SPIE Int Soc Opt Eng},
volume = {13058},
publisher = {SPIE},
abstract = {This abstract outlines two significant innovations in AI and cybersecurity education within the "Deep HoriXons" 3D virtual campus, addressing the urgent need for skilled professionals in these domains. First, the paper introduces "Deep HoriXons," an immersive 3D virtual learning environment designed to democratize and enhance the educational experience for AI and cybersecurity. This innovation is notable for its global accessibility and ability to simulate real-world scenarios, providing an interactive platform for experiential learning, which is a marked departure from traditional educational models. The second innovation discussed is the strategic integration of ChatGPT as a digital educator and tutor within this virtual environment. ChatGPT's role is pivotal in offering tailored, real-time educational support, making complex AI and cybersecurity concepts more accessible and engaging for learners. This application of ChatGPT is an innovation worth noting for its ability to adapt to individual learning styles, provide interactive scenario-based learning, and support a deeper understanding of technical subjects through dynamic, responsive interaction. Together, these innovations represent a significant advancement in the field of AI and cybersecurity education, addressing the critical talent shortage by making high-quality, interactive learning experiences accessible on a global scale. The paper highlights the importance of these innovations in creating a skilled workforce capable of tackling the evolving challenges in AI and cybersecurity, underscoring the need for ongoing research and development in this area. © 2024 SPIE.},
keywords = {3D virtual campus, AI and cybersecurity education, AI talent pipeline, ChatGPT digital tutor, CompTIA Security+, Computer aided instruction, Cyber security, Cyber-security educations, Cybersecurity, Deep learning, E-Learning, Immersive, Learning systems, Virtual campus, Virtual learning environments, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Z.; Zhu, Z.; Zhu, L.; Jiang, E.; Hu, X.; Peppler, K.; Ramani, K.
ClassMeta: Designing Interactive Virtual Classmate to Promote VR Classroom Participation Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2024, ISBN: 979-840070330-0 (ISBN).
Abstract | Links | BibTeX | Tags: 3D Avatars, Behavioral Research, Classroom learning, Collaborative learning, Computational Linguistics, Condition, E-Learning, Human behaviors, Language Model, Large language model, Learning experiences, Learning systems, pedagogical agent, Pedagogical agents, Students, Three dimensional computer graphics, Virtual Reality, VR classroom
@inproceedings{liu_classmeta_2024,
title = {ClassMeta: Designing Interactive Virtual Classmate to Promote VR Classroom Participation},
author = {Z. Liu and Z. Zhu and L. Zhu and E. Jiang and X. Hu and K. Peppler and K. Ramani},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85194868458&doi=10.1145%2f3613904.3642947&partnerID=40&md5=0592b2f977a2ad2e6366c6fa05808a6a},
doi = {10.1145/3613904.3642947},
isbn = {979-840070330-0 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Peer influence plays a crucial role in promoting classroom participation, where behaviors from active students can contribute to a collective classroom learning experience. However, the presence of these active students depends on several conditions and is not consistently available across all circumstances. Recently, Large Language Models (LLMs) such as GPT have demonstrated the ability to simulate diverse human behaviors convincingly due to their capacity to generate contextually coherent responses based on their role settings. Inspired by this advancement in technology, we designed ClassMeta, a GPT-4 powered agent to help promote classroom participation by playing the role of an active student. These agents, which are embodied as 3D avatars in virtual reality, interact with actual instructors and students with both spoken language and body gestures. We conducted a comparative study to investigate the potential of ClassMeta for improving the overall learning experience of the class. © 2024 Copyright held by the owner/author(s)},
keywords = {3D Avatars, Behavioral Research, Classroom learning, Collaborative learning, Computational Linguistics, Condition, E-Learning, Human behaviors, Language Model, Large language model, Learning experiences, Learning systems, pedagogical agent, Pedagogical agents, Students, Three dimensional computer graphics, Virtual Reality, VR classroom},
pubstate = {published},
tppubtype = {inproceedings}
}
Clocchiatti, A.; Fumero, N.; Soccini, A. M.
Character Animation Pipeline based on Latent Diffusion and Large Language Models Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 398–405, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835037202-1 (ISBN).
Abstract | Links | BibTeX | Tags: Animation, Animation pipeline, Artificial intelligence, Augmented Reality, Character animation, Computational Linguistics, Computer animation, Deep learning, Diffusion, E-Learning, Extended reality, Film production, Generative art, Language Model, Learning systems, Learning techniques, Natural language processing systems, Pipelines, Production pipelines, Virtual Reality
@inproceedings{clocchiatti_character_2024,
title = {Character Animation Pipeline based on Latent Diffusion and Large Language Models},
author = {A. Clocchiatti and N. Fumero and A. M. Soccini},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85187217072&doi=10.1109%2fAIxVR59861.2024.00067&partnerID=40&md5=d88b9ba7c80d49b60fd0d7acd5e7c4f0},
doi = {10.1109/AIxVR59861.2024.00067},
isbn = {979-835037202-1 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {398–405},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Artificial intelligence and deep learning techniques are revolutionizing the film production pipeline. The majority of the current screenplay-to-animation pipelines focus on understanding the screenplay through natural language processing techniques, and on the generation of the animation through custom engines, missing the possibility to customize the characters. To address these issues, we propose a high-level pipeline for generating 2D characters and animations starting from screenplays, through a combination of Latent Diffusion Models and Large Language Models. Our approach uses ChatGPT to generate character descriptions starting from the screenplay. Then, using that data, it generates images of custom characters with Stable Diffusion and animates them according to their actions in different scenes. The proposed approach avoids well-known problems in generative AI tools such as temporal inconsistency and lack of control on the outcome. The results suggest that the pipeline is consistent and reliable, benefiting industries ranging from film production to virtual, augmented and extended reality content creation. © 2024 IEEE.},
keywords = {Animation, Animation pipeline, Artificial intelligence, Augmented Reality, Character animation, Computational Linguistics, Computer animation, Deep learning, Diffusion, E-Learning, Extended reality, Film production, Generative art, Language Model, Learning systems, Learning techniques, Natural language processing systems, Pipelines, Production pipelines, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Haramina, E.; Paladin, M.; Petričušić, Z.; Posarić, F.; Drobnjak, A.; Botički, I.
Learning Algorithms Concepts in a Virtual Reality Escape Room Proceedings Article
In: S., Babic; Z., Car; M., Cicin-Sain; D., Cisic; P., Ergovic; T.G., Grbac; V., Gradisnik; S., Gros; A., Jokic; A., Jovic; D., Jurekovic; T., Katulic; M., Koricic; V., Mornar; J., Petrovic; K., Skala; D., Skvorc; V., Sruk; M., Svaco; E., Tijan; N., Vrcek; B., Vrdoljak (Ed.): ICT Electron. Conv., MIPRO - Proc., pp. 2057–2062, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835038249-5 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Computational complexity, Computer generated three dimensional environment, E-Learning, Education, Escape room, Extended reality, generative artificial intelligence, Learn+, Learning, Learning algorithms, Learning systems, Puzzle, puzzles, user experience, User study, User testing, Users' experiences, Virtual Reality
@inproceedings{haramina_learning_2024,
title = {Learning Algorithms Concepts in a Virtual Reality Escape Room},
author = {E. Haramina and M. Paladin and Z. Petričušić and F. Posarić and A. Drobnjak and I. Botički},
editor = {Babic S. and Car Z. and Cicin-Sain M. and Cisic D. and Ergovic P. and Grbac T.G. and Gradisnik V. and Gros S. and Jokic A. and Jovic A. and Jurekovic D. and Katulic T. and Koricic M. and Mornar V. and Petrovic J. and Skala K. and Skvorc D. and Sruk V. and Svaco M. and Tijan E. and Vrcek N. and Vrdoljak B.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85198221737&doi=10.1109%2fMIPRO60963.2024.10569447&partnerID=40&md5=8a94d92d989d1f0feb84eba890945de8},
doi = {10.1109/MIPRO60963.2024.10569447},
isbn = {979-835038249-5 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {ICT Electron. Conv., MIPRO - Proc.},
pages = {2057–2062},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Although the standard way to learn algorithms is by coding, learning through games is another way to obtain knowledge while having fun. Virtual reality is a computer-generated three-dimensional environment in which the player is fully immersed by having external stimuli mostly blocked out. In the game presented in this paper, players are enhancing their algorithms skills by playing an escape room game. The goal is to complete the room within the designated time by solving puzzles. The puzzles change for every playthrough with the use of generative artificial intelligence to provide every player with a unique experience. There are multiple types of puzzles such as. time complexity, sorting algorithms, searching algorithms, and code execution. The paper presents the results of a study indicating students' preference for learning through gaming as a method of acquiring algorithms knowledge. © 2024 IEEE.},
keywords = {Artificial intelligence, Computational complexity, Computer generated three dimensional environment, E-Learning, Education, Escape room, Extended reality, generative artificial intelligence, Learn+, Learning, Learning algorithms, Learning systems, Puzzle, puzzles, user experience, User study, User testing, Users' experiences, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, M.; M'Hiri, F.
Beyond Traditional Teaching: Large Language Models as Simulated Teaching Assistants in Computer Science Proceedings Article
In: SIGCSE - Proc. ACM Tech. Symp. Comput. Sci. Educ., pp. 743–749, Association for Computing Machinery, Inc, 2024, ISBN: 979-840070423-9 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive teaching, ChatGPT, Computational Linguistics, CS education, E-Learning, Education computing, Engineering education, GPT, Language Model, LLM, machine learning, Machine-learning, Novice programmer, novice programmers, Openai, Programming, Python, Students, Teaching, Virtual Reality
@inproceedings{liu_beyond_2024,
title = {Beyond Traditional Teaching: Large Language Models as Simulated Teaching Assistants in Computer Science},
author = {M. Liu and F. M'Hiri},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85189289344&doi=10.1145%2f3626252.3630789&partnerID=40&md5=44ec79c8f005f4551c820c61f5b5d435},
doi = {10.1145/3626252.3630789},
isbn = {979-840070423-9 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {SIGCSE - Proc. ACM Tech. Symp. Comput. Sci. Educ.},
volume = {1},
pages = {743–749},
publisher = {Association for Computing Machinery, Inc},
abstract = {As the prominence of Large Language Models (LLMs) grows in various sectors, their potential in education warrants exploration. In this study, we investigate the feasibility of employing GPT-3.5 from OpenAI, as an LLM teaching assistant (TA) or a virtual TA in computer science (CS) courses. The objective is to enhance the accessibility of CS education while maintaining academic integrity by refraining from providing direct solutions to current-semester assignments. Targeting Foundations of Programming (COMP202), an undergraduate course that introduces students to programming with Python, we have developed a virtual TA using the LangChain framework, known for integrating language models with diverse data sources and environments. The virtual TA assists students with their code and clarifies complex concepts. For homework questions, it is designed to guide students with hints rather than giving out direct solutions. We assessed its performance first through a qualitative evaluation, then a survey-based comparative analysis, using a mix of questions commonly asked on the COMP202 discussion board and questions created by the authors. Our preliminary results indicate that the virtual TA outperforms human TAs on clarity and engagement, matching them on accuracy when the question is non-assignment-specific, for which human TAs still proved more reliable. These findings suggest that while virtual TAs, leveraging the capabilities of LLMs, hold great promise towards making CS education experience more accessible and engaging, their optimal use necessitates human supervision. We conclude by identifying several directions that could be explored in future implementations. © 2024 ACM.},
keywords = {Adaptive teaching, ChatGPT, Computational Linguistics, CS education, E-Learning, Education computing, Engineering education, GPT, Language Model, LLM, machine learning, Machine-learning, Novice programmer, novice programmers, Openai, Programming, Python, Students, Teaching, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Chheang, V.; Sharmin, S.; Marquez-Hernandez, R.; Patel, M.; Rajasekaran, D.; Caulfield, G.; Kiafar, B.; Li, J.; Kullu, P.; Barmaki, R. L.
Towards Anatomy Education with Generative AI-based Virtual Assistants in Immersive Virtual Reality Environments Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 21–30, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835037202-1 (ISBN).
Abstract | Links | BibTeX | Tags: 3-D visualization systems, Anatomy education, Anatomy educations, Cognitive complexity, E-Learning, Embodied virtual assistant, Embodied virtual assistants, Generative AI, generative artificial intelligence, Human computer interaction, human-computer interaction, Immersive virtual reality, Interactive 3d visualizations, Knowledge Management, Medical education, Three dimensional computer graphics, Verbal communications, Virtual assistants, Virtual Reality, Virtual-reality environment
@inproceedings{chheang_towards_2024,
title = {Towards Anatomy Education with Generative AI-based Virtual Assistants in Immersive Virtual Reality Environments},
author = {V. Chheang and S. Sharmin and R. Marquez-Hernandez and M. Patel and D. Rajasekaran and G. Caulfield and B. Kiafar and J. Li and P. Kullu and R. L. Barmaki},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85187216893&doi=10.1109%2fAIxVR59861.2024.00011&partnerID=40&md5=33e8744309add5fe400f4f341326505f},
doi = {10.1109/AIxVR59861.2024.00011},
isbn = {979-835037202-1 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {21–30},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual reality (VR) and interactive 3D visualization systems have enhanced educational experiences and environments, particularly in complicated subjects such as anatomy education. VR-based systems surpass the potential limitations of traditional training approaches in facilitating interactive engagement among students. However, research on embodied virtual assistants that leverage generative artificial intelligence (AI) and verbal communication in the anatomy education context is underrepresented. In this work, we introduce a VR environment with a generative AI-embodied virtual assistant to support participants in responding to varying cognitive complexity anatomy questions and enable verbal communication. We assessed the technical efficacy and usability of the proposed environment in a pilot user study with 16 participants. We conducted a within-subject design for virtual assistant configuration (avatar- and screen-based), with two levels of cognitive complexity (knowledge- and analysis-based). The results reveal a significant difference in the scores obtained from knowledge- and analysis-based questions in relation to avatar configuration. Moreover, results provide insights into usability, cognitive task load, and the sense of presence in the proposed virtual assistant configurations. Our environment and results of the pilot study offer potential benefits and future research directions beyond medical education, using generative AI and embodied virtual agents as customized virtual conversational assistants. © 2024 IEEE.},
keywords = {3-D visualization systems, Anatomy education, Anatomy educations, Cognitive complexity, E-Learning, Embodied virtual assistant, Embodied virtual assistants, Generative AI, generative artificial intelligence, Human computer interaction, human-computer interaction, Immersive virtual reality, Interactive 3d visualizations, Knowledge Management, Medical education, Three dimensional computer graphics, Verbal communications, Virtual assistants, Virtual Reality, Virtual-reality environment},
pubstate = {published},
tppubtype = {inproceedings}
}
Krauss, C.; Bassbouss, L.; Upravitelev, M.; An, T. -S.; Altun, D.; Reray, L.; Balitzki, E.; Tamimi, T. El; Karagülle, M.
Opportunities and Challenges in Developing Educational AI-Assistants for the Metaverse Proceedings Article
In: R.A., Sottilare; J., Schwarz (Ed.): Lect. Notes Comput. Sci., pp. 219–238, Springer Science and Business Media Deutschland GmbH, 2024, ISBN: 03029743 (ISSN); 978-303160608-3 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, AI-assistant, AI-Assistants, Computational Linguistics, Computer aided instruction, Concept-based, E-Learning, Education, Interoperability, Language Model, Large language model, large language models, Learning Environments, Learning systems, Learning Technologies, Learning technology, LLM, Metaverse, Metaverses, Natural language processing systems, Proof of concept, User interfaces, Virtual assistants, Virtual Reality
@inproceedings{krauss_opportunities_2024,
title = {Opportunities and Challenges in Developing Educational AI-Assistants for the Metaverse},
author = {C. Krauss and L. Bassbouss and M. Upravitelev and T. -S. An and D. Altun and L. Reray and E. Balitzki and T. El Tamimi and M. Karagülle},
editor = {Sottilare R.A. and Schwarz J.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85196214138&doi=10.1007%2f978-3-031-60609-0_16&partnerID=40&md5=9a66876cb30e9e5d287a86e6cfa66e05},
doi = {10.1007/978-3-031-60609-0_16},
isbn = {03029743 (ISSN); 978-303160608-3 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {14727 LNCS},
pages = {219–238},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {The paper explores the opportunities and challenges for metaverse learning environments with AI-Assistants based on Large Language Models. A proof of concept based on popular but proprietary technologies is presented that enables a natural language exchange between the user and an AI-based medical expert in a highly immersive environment based on the Unreal Engine. The answers generated by ChatGPT are not only played back lip-synchronously, but also visualized in the VR environment using a 3D model of a skeleton. Usability and user experience play a particularly important role in the development of the highly immersive AI-Assistant. The proof of concept serves to illustrate the opportunities and challenges that lie in the merging of large language models, metaverse applications and educational ecosystems, which are self-contained research areas. Development strategies, tools and interoperability standards will be presented to facilitate future developments in this triangle of tension. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2024.},
keywords = {3D modeling, AI-assistant, AI-Assistants, Computational Linguistics, Computer aided instruction, Concept-based, E-Learning, Education, Interoperability, Language Model, Large language model, large language models, Learning Environments, Learning systems, Learning Technologies, Learning technology, LLM, Metaverse, Metaverses, Natural language processing systems, Proof of concept, User interfaces, Virtual assistants, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Sarshartehrani, F.; Mohammadrezaei, E.; Behravan, M.; Gracanin, D.
Enhancing E-Learning Experience Through Embodied AI Tutors in Immersive Virtual Environments: A Multifaceted Approach for Personalized Educational Adaptation Proceedings Article
In: R.A., Sottilare; J., Schwarz (Ed.): Lect. Notes Comput. Sci., pp. 272–287, Springer Science and Business Media Deutschland GmbH, 2024, ISBN: 03029743 (ISSN); 978-303160608-3 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive Learning, Artificial intelligence, Artificial intelligence in education, Computer aided instruction, Computer programming, E - learning, E-Learning, Education computing, Embodied artificial intelligence, Engineering education, Immersive Virtual Environments, Learner Engagement, Learning experiences, Learning systems, Multi-faceted approach, Personalized Instruction, Traditional boundaries, Virtual Reality
@inproceedings{sarshartehrani_enhancing_2024,
title = {Enhancing E-Learning Experience Through Embodied AI Tutors in Immersive Virtual Environments: A Multifaceted Approach for Personalized Educational Adaptation},
author = {F. Sarshartehrani and E. Mohammadrezaei and M. Behravan and D. Gracanin},
editor = {Sottilare R.A. and Schwarz J.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85196174389&doi=10.1007%2f978-3-031-60609-0_20&partnerID=40&md5=3801d0959781b1a191a3eb14f47bd8d8},
doi = {10.1007/978-3-031-60609-0_20},
isbn = {03029743 (ISSN); 978-303160608-3 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {14727 LNCS},
pages = {272–287},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {As digital education transcends traditional boundaries, e-learning experiences are increasingly shaped by cutting-edge technologies like artificial intelligence (AI), virtual reality (VR), and adaptive learning systems. This study examines the integration of AI-driven personalized instruction within immersive VR environments, targeting enhanced learner engagement-a core metric in online education effectiveness. Employing a user-centric design, the research utilizes embodied AI tutors, calibrated to individual learners’ emotional intelligence and cognitive states, within a Python programming curriculum-a key area in computer science education. The methodology relies on intelligent tutoring systems and personalized learning pathways, catering to a diverse participant pool from Virginia Tech. Our data-driven approach, underpinned by the principles of educational psychology and computational pedagogy, indicates that AI-enhanced virtual learning environments significantly elevate user engagement and proficiency in programming education. Although the scope is limited to a single academic institution, the promising results advocate for the scalability of such AI-powered educational tools, with potential implications for distance learning, MOOCs, and lifelong learning platforms. This research contributes to the evolving narrative of smart education and the role of large language models (LLMs) in crafting bespoke educational experiences, suggesting a paradigm shift towards more interactive, personalized e-learning solutions that align with global educational technology trends. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2024.},
keywords = {Adaptive Learning, Artificial intelligence, Artificial intelligence in education, Computer aided instruction, Computer programming, E - learning, E-Learning, Education computing, Embodied artificial intelligence, Engineering education, Immersive Virtual Environments, Learner Engagement, Learning experiences, Learning systems, Multi-faceted approach, Personalized Instruction, Traditional boundaries, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Otoum, Y.; Gottimukkala, N.; Kumar, N.; Nayak, A.
Machine Learning in Metaverse Security: Current Solutions and Future Challenges Journal Article
In: ACM Computing Surveys, vol. 56, no. 8, 2024, ISSN: 03600300 (ISSN).
Abstract | Links | BibTeX | Tags: 'current, Block-chain, Blockchain, digital twin, E-Learning, Extended reality, Future challenges, Generative AI, machine learning, Machine-learning, Metaverse Security, Metaverses, Security and privacy, Spatio-temporal dynamics, Sustainable development
@article{otoum_machine_2024,
title = {Machine Learning in Metaverse Security: Current Solutions and Future Challenges},
author = {Y. Otoum and N. Gottimukkala and N. Kumar and A. Nayak},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85193466017&doi=10.1145%2f3654663&partnerID=40&md5=b35485c5f2e943ec105ea11a80712cbe},
doi = {10.1145/3654663},
issn = {03600300 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {ACM Computing Surveys},
volume = {56},
number = {8},
abstract = {The Metaverse, positioned as the next frontier of the Internet, has the ambition to forge a virtual shared realm characterized by immersion, hyper-spatiotemporal dynamics, and self-sustainability. Recent technological strides in AI, Extended Reality, 6G, and blockchain propel the Metaverse closer to realization, gradually transforming it from science fiction into an imminent reality. Nevertheless, the extensive deployment of the Metaverse faces substantial obstacles, primarily stemming from its potential to infringe on privacy and be susceptible to security breaches, whether inherent in its underlying technologies or arising from the evolving digital landscape. Metaverse security provisioning is poised to confront various foundational challenges owing to its distinctive attributes, encompassing immersive realism, hyper-spatiotemporally, sustainability, and heterogeneity. This article undertakes a comprehensive study of the security and privacy challenges facing the Metaverse, leveraging machine learning models for this purpose. In particular, our focus centers on an innovative distributed Metaverse architecture characterized by interactions across 3D worlds. Subsequently, we conduct a thorough review of the existing cutting-edge measures designed for Metaverse systems while also delving into the discourse surrounding security and privacy threats. As we contemplate the future of Metaverse systems, we outline directions for open research pursuits in this evolving landscape. © 2024 Copyright held by the owner/author(s). Publication rights licensed to ACM.},
keywords = {'current, Block-chain, Blockchain, digital twin, E-Learning, Extended reality, Future challenges, Generative AI, machine learning, Machine-learning, Metaverse Security, Metaverses, Security and privacy, Spatio-temporal dynamics, Sustainable development},
pubstate = {published},
tppubtype = {article}
}
Scott, A. J. S.; McCuaig, F.; Lim, V.; Watkins, W.; Wang, J.; Strachan, G.
Revolutionizing Nurse Practitioner Training: Integrating Virtual Reality and Large Language Models for Enhanced Clinical Education Proceedings Article
In: G., Strudwick; N.R., Hardiker; G., Rees; R., Cook; R., Cook; Y.J., Lee (Ed.): Stud. Health Technol. Informatics, pp. 671–672, IOS Press BV, 2024, ISBN: 09269630 (ISSN); 978-164368527-4 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, 3D models, 3d-modeling, adult, anamnesis, clinical decision making, clinical education, Clinical Simulation, Computational Linguistics, computer interface, Computer-Assisted Instruction, conference paper, Curriculum, Decision making, E-Learning, Education, Health care education, Healthcare Education, human, Humans, Language Model, Large language model, large language models, Mesh generation, Model animations, Modeling languages, nurse practitioner, Nurse Practitioners, Nursing, nursing education, nursing student, OSCE preparation, procedures, simulation, Teaching, therapy, Training, Training program, User-Computer Interface, Virtual Reality, Virtual reality training
@inproceedings{scott_revolutionizing_2024,
title = {Revolutionizing Nurse Practitioner Training: Integrating Virtual Reality and Large Language Models for Enhanced Clinical Education},
author = {A. J. S. Scott and F. McCuaig and V. Lim and W. Watkins and J. Wang and G. Strachan},
editor = {Strudwick G. and Hardiker N.R. and Rees G. and Cook R. and Cook R. and Lee Y.J.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199593781&doi=10.3233%2fSHTI240272&partnerID=40&md5=90c7bd43ba978f942723e6cf1983ffb3},
doi = {10.3233/SHTI240272},
isbn = {09269630 (ISSN); 978-164368527-4 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Stud. Health Technol. Informatics},
volume = {315},
pages = {671–672},
publisher = {IOS Press BV},
abstract = {This project introduces an innovative virtual reality (VR) training program for student Nurse Practitioners, incorporating advanced 3D modeling, animation, and Large Language Models (LLMs). Designed to simulate realistic patient interactions, the program aims to improve communication, history taking, and clinical decision-making skills in a controlled, authentic setting. This abstract outlines the methods, results, and potential impact of this cutting-edge educational tool on nursing education. © 2024 The Authors.},
keywords = {3D modeling, 3D models, 3d-modeling, adult, anamnesis, clinical decision making, clinical education, Clinical Simulation, Computational Linguistics, computer interface, Computer-Assisted Instruction, conference paper, Curriculum, Decision making, E-Learning, Education, Health care education, Healthcare Education, human, Humans, Language Model, Large language model, large language models, Mesh generation, Model animations, Modeling languages, nurse practitioner, Nurse Practitioners, Nursing, nursing education, nursing student, OSCE preparation, procedures, simulation, Teaching, therapy, Training, Training program, User-Computer Interface, Virtual Reality, Virtual reality training},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Marín-Morales, J.; Llanes-Jurado, J.; Minissi, M. E.; Gómez-Zaragozá, L.; Altozano, A.; Alcaniz, M.
Gaze and Head Movement Patterns of Depressive Symptoms During Conversations with Emotional Virtual Humans Proceedings Article
In: Int. Conf. Affect. Comput. Intell. Interact., ACII, Institute of Electrical and Electronics Engineers Inc., 2023, ISBN: 979-835032743-4 (ISBN).
Abstract | Links | BibTeX | Tags: Biomarkers, Clustering, Clusterings, Computational Linguistics, Depressive disorder, Depressive symptom, E-Learning, Emotion elicitation, Eye movements, Gaze movements, K-means clustering, Language Model, Large language model, large language models, Learning systems, Mental health, Multivariant analysis, Signal processing, Statistical learning, virtual human, Virtual humans, Virtual Reality
@inproceedings{marin-morales_gaze_2023,
title = {Gaze and Head Movement Patterns of Depressive Symptoms During Conversations with Emotional Virtual Humans},
author = {J. Marín-Morales and J. Llanes-Jurado and M. E. Minissi and L. Gómez-Zaragozá and A. Altozano and M. Alcaniz},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85184656388&doi=10.1109%2fACII59096.2023.10388134&partnerID=40&md5=143cdd8530e17a7b64bdf88f3a0496ab},
doi = {10.1109/ACII59096.2023.10388134},
isbn = {979-835032743-4 (ISBN)},
year = {2023},
date = {2023-01-01},
booktitle = {Int. Conf. Affect. Comput. Intell. Interact., ACII},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Depressive symptoms involve dysfunctional social attitudes and heightened negative emotional states. Identifying biomarkers requires data collection in realistic environments that activate depression-specific phenomena. However, no previous research analysed biomarkers in combination with AI-powered conversational virtual humans (VH) for mental health assessment. This study aims to explore gaze and head movements patterns related to depressive symptoms during conversations with emotional VH. A total of 105 participants were evenly divided into a control group and a group of subjects with depressive symptoms (SDS). They completed six semi-guided conversations designed to evoke basic emotions. The VHs were developed using a cognitive-inspired framework, enabling real-time voice-based conversational interactions powered by a Large Language Model, and including emotional facial expressions and lip synchronization. They have embedded life-history, context, attitudes, emotions and motivations. Signal processing techniques were applied to obtain gaze and head movements features, and heatmaps were generated. Then, parametric and non-parametric statistical tests were applied to evaluate differences between groups. Additionally, a two-dimensional t-SNE embedding was created and combined with k-means clustering. Results indicate that SDS exhibited shorter blinks and longer saccades. The control group showed affiliative lateral head gyros and accelerations, while the SDS demonstrated stress-related back-and-forth movements. SDS also displayed the avoidance of eye contact. The exploratory multivariate statistical unsupervised learning achieved 72.3% accuracy. The present study analyse biomarkers in affective processes with multiple social contextual factors and information modalities in ecological environments, and enhances our understanding of gaze and head movements patterns in individuals with depressive symptoms, ultimately contributing to the development of more effective assessments and intervention strategies. © 2023 IEEE.},
keywords = {Biomarkers, Clustering, Clusterings, Computational Linguistics, Depressive disorder, Depressive symptom, E-Learning, Emotion elicitation, Eye movements, Gaze movements, K-means clustering, Language Model, Large language model, large language models, Learning systems, Mental health, Multivariant analysis, Signal processing, Statistical learning, virtual human, Virtual humans, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Leng, Z.; Kwon, H.; Ploetz, T.
Generating Virtual On-body Accelerometer Data from Virtual Textual Descriptions for Human Activity Recognition Proceedings Article
In: ISWC - Proc. Int. Symp. Wearable Comput., pp. 39–43, Association for Computing Machinery, Inc, 2023, ISBN: 979-840070199-3 (ISBN).
Abstract | Links | BibTeX | Tags: Activity recognition, Computational Linguistics, E-Learning, Human activity recognition, Language Model, Large language model, large language models, Motion estimation, Motion Synthesis, On-body, Pattern recognition, Recognition models, Textual description, Training data, Virtual IMU Data, Virtual Reality, Wearable Sensors
@inproceedings{leng_generating_2023,
title = {Generating Virtual On-body Accelerometer Data from Virtual Textual Descriptions for Human Activity Recognition},
author = {Z. Leng and H. Kwon and T. Ploetz},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85175788497&doi=10.1145%2f3594738.3611361&partnerID=40&md5=ddecaf6d81f71511c8152ca14f33cd7f},
doi = {10.1145/3594738.3611361},
isbn = {979-840070199-3 (ISBN)},
year = {2023},
date = {2023-01-01},
booktitle = {ISWC - Proc. Int. Symp. Wearable Comput.},
pages = {39–43},
publisher = {Association for Computing Machinery, Inc},
abstract = {The development of robust, generalized models for human activity recognition (HAR) has been hindered by the scarcity of large-scale, labeled data sets. Recent work has shown that virtual IMU data extracted from videos using computer vision techniques can lead to substantial performance improvements when training HAR models combined with small portions of real IMU data. Inspired by recent advances in motion synthesis from textual descriptions and connecting Large Language Models (LLMs) to various AI models, we introduce an automated pipeline that first uses ChatGPT to generate diverse textual descriptions of activities. These textual descriptions are then used to generate 3D human motion sequences via a motion synthesis model, T2M-GPT, and later converted to streams of virtual IMU data. We benchmarked our approach on three HAR datasets (RealWorld, PAMAP2, and USC-HAD) and demonstrate that the use of virtual IMU training data generated using our new approach leads to significantly improved HAR model performance compared to only using real IMU data. Our approach contributes to the growing field of cross-modality transfer methods and illustrate how HAR models can be improved through the generation of virtual training data that do not require any manual effort. © 2023 Owner/Author.},
keywords = {Activity recognition, Computational Linguistics, E-Learning, Human activity recognition, Language Model, Large language model, large language models, Motion estimation, Motion Synthesis, On-body, Pattern recognition, Recognition models, Textual description, Training data, Virtual IMU Data, Virtual Reality, Wearable Sensors},
pubstate = {published},
tppubtype = {inproceedings}
}
Bottega, J. A.; Kich, V. A.; Jesus, J. C.; Steinmetz, R.; Kolling, A. H.; Grando, R. B.; Guerra, R. S.; Gamarra, D. F. T.
Jubileo: An Immersive Simulation Framework for Social Robot Design Journal Article
In: Journal of Intelligent and Robotic Systems: Theory and Applications, vol. 109, no. 4, 2023, ISSN: 09210296 (ISSN).
Abstract | Links | BibTeX | Tags: Anthropomorphic Robots, Computational Linguistics, Cost effectiveness, E-Learning, English language learning, English languages, Human Robot Interaction, Human-robot interaction, Humanoid robot, Humans-robot interactions, Immersive, Language learning, Language Model, Large language model, large language models, Learning game, Machine design, Man machine systems, Open systems, Robot Operating System, Simulation framework, Simulation platform, Virtual Reality
@article{bottega_jubileo_2023,
title = {Jubileo: An Immersive Simulation Framework for Social Robot Design},
author = {J. A. Bottega and V. A. Kich and J. C. Jesus and R. Steinmetz and A. H. Kolling and R. B. Grando and R. S. Guerra and D. F. T. Gamarra},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85178895874&doi=10.1007%2fs10846-023-01991-3&partnerID=40&md5=6392af1e9a500ef51c3e215bd9709ce5},
doi = {10.1007/s10846-023-01991-3},
issn = {09210296 (ISSN)},
year = {2023},
date = {2023-01-01},
journal = {Journal of Intelligent and Robotic Systems: Theory and Applications},
volume = {109},
number = {4},
abstract = {This paper introduces Jubileo, an open-source simulated humanoid robot as a framework for the development of human-robot interaction applications. By leveraging the power of the Robot Operating System (ROS) and Unity in a virtual reality environment, this simulation establishes a strong connection to real robotics, faithfully replicating the robot’s physical components down to its motors and enabling communication with servo-actuators to control both the animatronic face and the joints of a real humanoid robot. To validate the capabilities of the framework, we propose English teaching games that integrate Virtual Reality (VR), game-based Human-Robot Interaction (HRI), and advanced large language models such as Generative Pre-trained Transformer (GPT). These games aim to foster linguistic competence within dynamic and interactive virtual environments. The incorporation of large language models bolsters the robot’s capability to generate human-like responses, thus facilitating a more realistic conversational experience. Moreover, the simulation framework reduces real-world testing risks and offers a cost-effective, efficient, and scalable platform for developing new HRI applications. The paper underscores the transformative potential of converging VR, large language models, and HRI, particularly in educational applications. © 2023, The Author(s), under exclusive licence to Springer Nature B.V.},
keywords = {Anthropomorphic Robots, Computational Linguistics, Cost effectiveness, E-Learning, English language learning, English languages, Human Robot Interaction, Human-robot interaction, Humanoid robot, Humans-robot interactions, Immersive, Language learning, Language Model, Large language model, large language models, Learning game, Machine design, Man machine systems, Open systems, Robot Operating System, Simulation framework, Simulation platform, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Suryavanshi, D. P.; Kaveri, P. R.; Kadlag, P. S.
Advancing Digital Transformation in Indian Higher Education Institutions Proceedings Article
In: Intell. Comput. Control Eng. Bus. Syst., ICCEBS, Institute of Electrical and Electronics Engineers Inc., 2023, ISBN: 979-835039458-0 (ISBN).
Abstract | Links | BibTeX | Tags: Augmented Reality, Data Analysis, Data collection, Data handling, Developing countries, Digital revolution, Digital transformation, E-Learning, Educational Institution, Educational institutions, Engineering education, High educations, Higher education institutions, Information analysis, Learning systems, Literature studies, Metadata, Primary data, Stakeholder, Stakeholders, Technology Adoption
@inproceedings{suryavanshi_advancing_2023,
title = {Advancing Digital Transformation in Indian Higher Education Institutions},
author = {D. P. Suryavanshi and P. R. Kaveri and P. S. Kadlag},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85189153416&doi=10.1109%2fICCEBS58601.2023.10448947&partnerID=40&md5=8aff6f6dc84d011ed59e0f8cec9d9318},
doi = {10.1109/ICCEBS58601.2023.10448947},
isbn = {979-835039458-0 (ISBN)},
year = {2023},
date = {2023-01-01},
booktitle = {Intell. Comput. Control Eng. Bus. Syst., ICCEBS},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The paper focuses on advancing the use of Digital Transformation in Indian Higher Education Institutions, although India being a developing country it is important for the educational institution to practice transformation in various forms. The paper covers the detail literature study and conclude with various opinions that have been generated through primary data collection. The objective of the study is to identify the need of digital transformation for education environment by two major methods literature study and stakeholder data analysis. Technological expectation was also studied using questionnaires. The study also analyzed related studies that had been done in the past using the Vosviewer programme for the years 1980 to 2004 for Scopus dataset in order to understand the year-by-year publications, research articles, and book chapters in the subject of Digital Transformation in Higher Education. The majority of stakeholders concur that using digital transformation technologies like IoT, AI & ChatGpt, Generative AI, Augmented reality in higher education is essential for implementing NEP 2020 and successfully integrating digital technologies. The paper covers a detail discussion including literature review on various aspects of digital transformation in education institutes. It also covers opinion from various stakeholders to understand actual outcomes expected from the study which was conducted. The current study uses a mixed research methodology because the questionnaire includes both quantitative and qualitative questions. A sample of 40 respondents was collected, representing the four main stakeholders in education: students, faculty, businesspeople, and educationalists. The responses were analysed using the SPSS Percentage and mean. The newly adopted educational policy NEP 2020 encourages the use of technology and skill-based learning. The importance of technology in teaching and learning processes has been emphasized in numerous research papers in order to improve the teaching-learning process and its outcomes. The thorough assessment of the literature was carried out utilizing the VOS viewer to evaluate the pertinent studies and pinpoint any gaps. © 2023 IEEE.},
keywords = {Augmented Reality, Data Analysis, Data collection, Data handling, Developing countries, Digital revolution, Digital transformation, E-Learning, Educational Institution, Educational institutions, Engineering education, High educations, Higher education institutions, Information analysis, Learning systems, Literature studies, Metadata, Primary data, Stakeholder, Stakeholders, Technology Adoption},
pubstate = {published},
tppubtype = {inproceedings}
}
DeChant, C.; Akinola, I.; Bauer, D.
Learning to summarize and answer questions about a virtual robot’s past actions Journal Article
In: Autonomous Robots, vol. 47, no. 8, pp. 1103–1118, 2023, ISSN: 09295593 (ISSN).
Abstract | Links | BibTeX | Tags: Action sequences, E-Learning, Interpretability, Language Model, Long horizon task, Long horizon tasks, Natural language processing systems, Natural languages, Question Answering, Representation learning, Robots, Summarization, Video frame, Virtual Reality, Virtual robots, Zero-shot learning
@article{dechant_learning_2023,
title = {Learning to summarize and answer questions about a virtual robot’s past actions},
author = {C. DeChant and I. Akinola and D. Bauer},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85176588341&doi=10.1007%2fs10514-023-10134-4&partnerID=40&md5=162b3343d5f000f2b79f59c339f99022},
doi = {10.1007/s10514-023-10134-4},
issn = {09295593 (ISSN)},
year = {2023},
date = {2023-01-01},
journal = {Autonomous Robots},
volume = {47},
number = {8},
pages = {1103–1118},
abstract = {When robots perform long action sequences, users will want to easily and reliably find out what they have done. We therefore demonstrate the task of learning to summarize and answer questions about a robot agent’s past actions using natural language alone. A single system with a large language model at its core is trained to both summarize and answer questions about action sequences given ego-centric video frames of a virtual robot and a question prompt. To enable training of question answering, we develop a method to automatically generate English-language questions and answers about objects, actions, and the temporal order in which actions occurred during episodes of robot action in the virtual environment. Training one model to both summarize and answer questions enables zero-shot transfer of representations of objects learned through question answering to improved action summarization. © 2023, The Author(s).},
keywords = {Action sequences, E-Learning, Interpretability, Language Model, Long horizon task, Long horizon tasks, Natural language processing systems, Natural languages, Question Answering, Representation learning, Robots, Summarization, Video frame, Virtual Reality, Virtual robots, Zero-shot learning},
pubstate = {published},
tppubtype = {article}
}
Ayre, D.; Dougherty, C.; Zhao, Y.
IMPLEMENTATION OF AN ARTIFICIAL INTELLIGENCE (AI) INSTRUCTIONAL SUPPORT SYSTEM IN A VIRTUAL REALITY (VR) THERMAL-FLUIDS LABORATORY Proceedings Article
In: ASME Int Mech Eng Congress Expos Proc, American Society of Mechanical Engineers (ASME), 2023, ISBN: 978-079188765-3 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, E-Learning, Education computing, Engineering education, Fluid mechanics, Generative AI, generative artificial intelligence, GPT, High educations, Instructional support, Laboratories, Laboratory class, Laboratory experiments, Physical laboratory, Professional aspects, Students, Support systems, Thermal fluids, Virtual Reality, Virtual-reality environment
@inproceedings{ayre_implementation_2023,
title = {IMPLEMENTATION OF AN ARTIFICIAL INTELLIGENCE (AI) INSTRUCTIONAL SUPPORT SYSTEM IN A VIRTUAL REALITY (VR) THERMAL-FLUIDS LABORATORY},
author = {D. Ayre and C. Dougherty and Y. Zhao},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85185393784&doi=10.1115%2fIMECE2023-112683&partnerID=40&md5=c2492592a016478a4b3591ff82a93be5},
doi = {10.1115/IMECE2023-112683},
isbn = {978-079188765-3 (ISBN)},
year = {2023},
date = {2023-01-01},
booktitle = {ASME Int Mech Eng Congress Expos Proc},
volume = {8},
publisher = {American Society of Mechanical Engineers (ASME)},
abstract = {Physical laboratory experiments have long been the cornerstone of higher education, providing future engineers practical real-life experience invaluable to their careers. However, demand for laboratory time has exceeded physical capabilities. Virtual reality (VR) labs have proven to retain many benefits of attending physical labs while also providing significant advantages only available in a VR environment. Previously, our group had developed a pilot VR lab that replicated six (6) unique thermal-fluids lab experiments developed using the Unity game engine. One of the VR labs was tested in a thermal-fluid mechanics laboratory class with favorable results, but students highlighted the need for additional assistance within the VR simulation. In response to this testing, we have incorporated an artificial intelligence (AI) assistant to aid students within the VR environment by developing an interaction model. Utilizing the Generative Pre-trained Transformer 4 (GPT-4) large language model (LLM) and augmented context retrieval, the AI assistant can provide reliable instruction and troubleshoot errors while students conduct the lab procedure to provide an experience similar to a real-life lab assistant. The updated VR lab was tested in two laboratory classes and while the overall tone of student response to an AI-powered assistant was excitement and enthusiasm, observations and other recorded data show that students are currently unsure of how to utilize this new technology, which will help guide future refinement of AI components within the VR environment. © 2023 by ASME.},
keywords = {Artificial intelligence, E-Learning, Education computing, Engineering education, Fluid mechanics, Generative AI, generative artificial intelligence, GPT, High educations, Instructional support, Laboratories, Laboratory class, Laboratory experiments, Physical laboratory, Professional aspects, Students, Support systems, Thermal fluids, Virtual Reality, Virtual-reality environment},
pubstate = {published},
tppubtype = {inproceedings}
}
2016
Augello, Agnese; Gentile, Manuel; Dignum, Frank
Social Agents for Learning in Virtual Environments Journal Article
In: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 10056 LNCS, pp. 133–143, 2016, ISSN: 03029743.
Abstract | Links | BibTeX | Tags: Conversational Agents, E-Learning, Education, IVA, Serious game, Social Agents, Social Context, Social Practices, Virtual Reality
@article{augelloSocialAgentsLearning2016,
title = {Social Agents for Learning in Virtual Environments},
author = { Agnese Augello and Manuel Gentile and Frank Dignum},
editor = { Jeuring J. Veltkamp R.C. Bottino R.},
doi = {10.1007/978-3-319-50182-6_12},
issn = {03029743},
year = {2016},
date = {2016-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {10056 LNCS},
pages = {133--143},
abstract = {Several serious games have been proposed to practice communication strategies in formal contexts. Intelligent virtual agents (IVA) can be used to show the player the effects of a conversational move. In this paper we discuss the key role of using social context for the virtual agents in these serious games. Social practices are exploited to bundle social interactions into standard packages and as a basis to model the deliberation processes of IVAs. We describe a social practice oriented IVA architecture used in the implementation of a serious game for the practicing of communication in medical interviews. textcopyright Springer International Publishing AG 2016.},
keywords = {Conversational Agents, E-Learning, Education, IVA, Serious game, Social Agents, Social Context, Social Practices, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Augello, Agnese; Gentile, Manuel; Dignum, Frank
Social agents for learning in virtual environments Journal Article
In: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 10056 LNCS, pp. 133–143, 2016, ISSN: 03029743.
Abstract | Links | BibTeX | Tags: Conversational Agents, E-Learning, Education, IVA, Serious game, Social Agents, Social Context, Social Practices, Virtual Reality
@article{augello_social_2016,
title = {Social agents for learning in virtual environments},
author = {Agnese Augello and Manuel Gentile and Frank Dignum},
editor = {Veltkamp R. C. Jeuring J. Bottino R.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85006036432&doi=10.1007%2f978-3-319-50182-6_12&partnerID=40&md5=e7d1d4e46a92c9cf9d943639fc5dbbc9},
doi = {10.1007/978-3-319-50182-6_12},
issn = {03029743},
year = {2016},
date = {2016-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {10056 LNCS},
pages = {133–143},
abstract = {Several serious games have been proposed to practice communication strategies in formal contexts. Intelligent virtual agents (IVA) can be used to show the player the effects of a conversational move. In this paper we discuss the key role of using social context for the virtual agents in these serious games. Social practices are exploited to bundle social interactions into standard packages and as a basis to model the deliberation processes of IVAs. We describe a social practice oriented IVA architecture used in the implementation of a serious game for the practicing of communication in medical interviews. © Springer International Publishing AG 2016.},
keywords = {Conversational Agents, E-Learning, Education, IVA, Serious game, Social Agents, Social Context, Social Practices, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}