AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Tsai, Y. -J.; Liu, S. -T.; Hsu, S. -C.
The Development of an Interactive IoT Cross-Media Survey System and Real-Time Re-presentation of Mass Learning Proceedings Article
In: J., Wei; G., Margetis (Ed.): Lect. Notes Comput. Sci., pp. 145–157, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303193060-7 (ISBN).
Abstract | Links | BibTeX | Tags: Cross-media, Data Re-presentation, Internet of Things, IoT Cross-Media System, IoT cross-medium system, Learning outcome, Learning systems, Mass Learning, Media systems, Smart phones, Smartphone, Smartphones, STEM with A, Survey System, Survey systems, Surveying, Tangible User Interface, Tangible user interfaces, User interfaces, Virtual Reality
@inproceedings{tsai_development_2025,
title = {The Development of an Interactive IoT Cross-Media Survey System and Real-Time Re-presentation of Mass Learning},
author = {Y. -J. Tsai and S. -T. Liu and S. -C. Hsu},
editor = {Wei J. and Margetis G.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105008756188&doi=10.1007%2f978-3-031-93061-4_10&partnerID=40&md5=c487828eeacfdf18cf4e726e6ce28146},
doi = {10.1007/978-3-031-93061-4_10},
isbn = {03029743 (ISSN); 978-303193060-7 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15823 LNCS},
pages = {145–157},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {In this study, we propose the Interactive IoT Cross-Media Survey System, integrating tangible interaction in a game-like manner with real-time data re-presentation. This system was implemented in the “STEM with A” Interactive Exploration Hall at National Tsing Hua University in 2020. It enabled participants to use their smartphones as tangible user interfaces to “scoop-up questions” from interactive sensing points within the exhibition areas. After completing the questions, participants could “pour-in” their responses and observe digital data re-presentation artworks generated from survey results, showcasing mass learning outcomes. Furthermore, the data re-presentation content was tailored to participants’ group characteristics, showing how their responses impact the group’s overall learning outcomes with each “pour-in response.” The study achieved several key outcomes: (1) transforming traditional surveys into a gamified survey system, enhancing participants’ engagement, (2) providing real-time, group-based data re-presentations, enabling participants to contribute to the group’s learning outcomes, and (3) implementing a grouping mechanism to foster collaboration within groups and healthy competition between them. This system provides flexible and customizable data re-presentation, making it suitable for diverse environments requiring real-time data-driven engagement. Future applications can integrate emerging technologies, such as generative AI to dynamically generate questions or virtual reality to offer immersive experiences. Additionally, data re-presentations can be designed as dynamic mass artistic creations, allowing participants to become co-creators of an evolving collective masterpiece. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Cross-media, Data Re-presentation, Internet of Things, IoT Cross-Media System, IoT cross-medium system, Learning outcome, Learning systems, Mass Learning, Media systems, Smart phones, Smartphone, Smartphones, STEM with A, Survey System, Survey systems, Surveying, Tangible User Interface, Tangible user interfaces, User interfaces, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Logothetis, I.; Diakogiannis, K.; Vidakis, N.
Interactive Learning Through Conversational Avatars and Immersive VR: Enhancing Diabetes Education and Self-Management Proceedings Article
In: X., Fang (Ed.): Lect. Notes Comput. Sci., pp. 415–429, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303192577-1 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Chronic disease, Computer aided instruction, Diabetes Education, Diagnosis, E-Learning, Education management, Engineering education, Gamification, Immersive virtual reality, Interactive computer graphics, Interactive learning, Large population, Learning systems, NUI, Self management, Serious game, Serious games, simulation, Virtual Reality
@inproceedings{logothetis_interactive_2025,
title = {Interactive Learning Through Conversational Avatars and Immersive VR: Enhancing Diabetes Education and Self-Management},
author = {I. Logothetis and K. Diakogiannis and N. Vidakis},
editor = {Fang X.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105008266480&doi=10.1007%2f978-3-031-92578-8_27&partnerID=40&md5=451274dfa3ef0b3f1b39c7d5a665ee3b},
doi = {10.1007/978-3-031-92578-8_27},
isbn = {03029743 (ISSN); 978-303192577-1 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15816 LNCS},
pages = {415–429},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Diabetes is a chronic disease affecting a large population of the world. Education and self-management of diabetes are crucial. Technologies such as Virtual Reality (VR) have presented promising results in healthcare education, while studies suggest that Artificial Intelligence (AI) can help in learning by further engaging the learner. This study aims to educate users on the entire routine of managing diabetes. The serious game utilizes VR for realistic interaction with diabetes tools and generative AI through a conversational avatar that acts as an assistant instructor. In this way, it allows users to practice diagnostic and therapeutic interventions in a controlled virtual environment, helping to build their understanding and confidence in diabetes management. To measure the effects of the proposed serious game, presence, and perceived agency were measured. Preliminary results indicate that this setup aids in the engagement and immersion of learners, while the avatar can provide helpful information during gameplay. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Artificial intelligence, Chronic disease, Computer aided instruction, Diabetes Education, Diagnosis, E-Learning, Education management, Engineering education, Gamification, Immersive virtual reality, Interactive computer graphics, Interactive learning, Large population, Learning systems, NUI, Self management, Serious game, Serious games, simulation, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Ozeki, R.; Yonekura, H.; Rizk, H.; Yamaguchi, H.
Cellular-based Indoor Localization with Adapted LLM and Label-aware Contrastive Learning Proceedings Article
In: pp. 138–145, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331586461 (ISBN).
Abstract | Links | BibTeX | Tags: Cellular Network, Cellulars, Computer interaction, Contrastive Learning, Deep learning, Human computer interaction, Indoor Localization, Indoor Navigation, Indoor positioning, Indoor positioning systems, Language Model, Large language model, Learning systems, Mobile computing, Mobile-computing, Signal processing, Smart Environment, Wireless networks
@inproceedings{ozeki_cellular-based_2025,
title = {Cellular-based Indoor Localization with Adapted LLM and Label-aware Contrastive Learning},
author = {R. Ozeki and H. Yonekura and H. Rizk and H. Yamaguchi},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105010820397&doi=10.1109%2FSMARTCOMP65954.2025.00070&partnerID=40&md5=9e15d9f4225f00cd57bedc511aad27d9},
doi = {10.1109/SMARTCOMP65954.2025.00070},
isbn = {9798331586461 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {138–145},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Accurate indoor positioning is essential for mobile computing, human-computer interaction, and next-generation smart environments, enabling applications in indoor navigation, augmented reality, personalized services, healthcare, and emergency response. Cellular signal fingerprinting has emerged as a widely adopted solution, with deep learning models achieving state-of-the-art performance. However, existing approaches face critical deployment challenges, including labor-intensive fingerprinting, sparse reference points, and missing RSS values caused by environmental interference, hardware variability, and dynamic signal fluctuations. These limitations hinder their scalability, adaptability, and real-world usability in complex indoor environments. To address these challenges, we present GPT2Loc a novel indoor localization framework that integrates LLM with label-aware contrastive learning, improving accuracy while reducing reliance on extensive fingerprinting. LLMs effectively extract meaningful spatial features from incomplete and noisy RSS data, enabling robust localization even in sparsely finger-printed areas. Our label-aware contrastive learning approach further enhances generalization by aligning latent representations with spatial relationships, allowing GPT2Loc to interpolate user locations in unseen areas and mitigate signal inconsistencies. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Cellular Network, Cellulars, Computer interaction, Contrastive Learning, Deep learning, Human computer interaction, Indoor Localization, Indoor Navigation, Indoor positioning, Indoor positioning systems, Language Model, Large language model, Learning systems, Mobile computing, Mobile-computing, Signal processing, Smart Environment, Wireless networks},
pubstate = {published},
tppubtype = {inproceedings}
}
Shi, L.; Gu, Y.; Zheng, Y.; Kameda, S.; Lu, H.
LWD-IUM: A Lightweight Detector for Advancing Robotic Grasp in VR-Based Industrial and Underwater Metaverse Proceedings Article
In: pp. 1384–1391, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331508876 (ISBN).
Abstract | Links | BibTeX | Tags: 3D object, 3D object detection, Deep learning, generative artificial intelligence, Grasping and manipulation, Intelligent robots, Learning systems, Metaverses, Neural Networks, Object Detection, Object recognition, Objects detection, Real- time, Real-time, Robotic grasping, robotic grasping and manipulation, Robotic manipulation, Virtual Reality, Vision transformer, Visual servoing
@inproceedings{shi_lwd-ium_2025,
title = {LWD-IUM: A Lightweight Detector for Advancing Robotic Grasp in VR-Based Industrial and Underwater Metaverse},
author = {L. Shi and Y. Gu and Y. Zheng and S. Kameda and H. Lu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105011354353&doi=10.1109%2FIWCMC65282.2025.11059637&partnerID=40&md5=77aa4cdb0a08a1db5d0027a71403da89},
doi = {10.1109/IWCMC65282.2025.11059637},
isbn = {9798331508876 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {1384–1391},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {In the burgeoning field of virtual reality (VR) metaverse, the sophistication of interactions between robotic agents and their environment has become a critical concern. In this work, we present LWD-IUM, a novel light-weight detector designed to enhance robotic grasp capabilities in the VR metaverse. LWD-IUM applies deep learning techniques to discern and navigate the complex VR metaverse environment, aiding robotic agents in the identification and grasping of objects with high precision and efficiency. The algorithm is constructed with an advanced lightweight neural network structure based on self-attention mechanism that ensures optimal balance between computational cost and performance, making it highly suitable for real-time applications in VR. Evaluation on the KITTI 3D dataset demonstrated real-time detection capabilities (24-30 fps) of LWD-IUM, with its mean average precision (mAP) remaining 80% above standard 3D detectors, even with a 50% parameter reduction. In addition, we show that LWD-IUM outperforms existing models for object detection and grasping tasks through the real environment testing on a Baxter dual-arm collaborative robot. By pioneering advancements in robotic grasp in the VR metaverse, LWD-IUM promotes more immersive and realistic interactions, pushing the boundaries of what's possible in virtual experiences. © 2025 Elsevier B.V., All rights reserved.},
keywords = {3D object, 3D object detection, Deep learning, generative artificial intelligence, Grasping and manipulation, Intelligent robots, Learning systems, Metaverses, Neural Networks, Object Detection, Object recognition, Objects detection, Real- time, Real-time, Robotic grasping, robotic grasping and manipulation, Robotic manipulation, Virtual Reality, Vision transformer, Visual servoing},
pubstate = {published},
tppubtype = {inproceedings}
}
Mendoza, A. P.; Quiroga, K. J. Barrios; Celis, S. D. Solano; M., C. G. Quintero
NAIA: A Multi-Technology Virtual Assistant for Boosting Academic Environments—A Case Study Journal Article
In: IEEE Access, vol. 13, pp. 141461–141483, 2025, ISSN: 21693536 (ISSN), (Publisher: Institute of Electrical and Electronics Engineers Inc.).
Abstract | Links | BibTeX | Tags: Academic environment, Artificial intelligence, Case-studies, Computational Linguistics, Computer vision, Digital avatar, Digital avatars, Efficiency, Human computer interaction, Human-AI Interaction, Interactive computer graphics, Language Model, Large language model, large language model (LLM), Learning systems, Natural language processing systems, Personal digital assistants, Personnel training, Population statistics, Speech communication, Speech processing, Speech to text, speech to text (STT), Text to speech, text to speech (TTS), user experience, User interfaces, Virtual assistant, Virtual assistants, Virtual Reality
@article{mendoza_naia_2025,
title = {NAIA: A Multi-Technology Virtual Assistant for Boosting Academic Environments—A Case Study},
author = {A. P. Mendoza and K. J. Barrios Quiroga and S. D. Solano Celis and C. G. Quintero M.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105013598763&doi=10.1109%2FACCESS.2025.3597565&partnerID=40&md5=7ad6b037cfedb943fc026642c4854284},
doi = {10.1109/ACCESS.2025.3597565},
issn = {21693536 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Access},
volume = {13},
pages = {141461–141483},
abstract = {Virtual assistants have become essential tools for improving productivity and efficiency in various domains. This paper presents NAIA (Nimble Artificial Intelligence Assistant), an advanced multi-role and multi-task virtual assistant enhanced with artificial intelligence, designed to serve a university community case study. The system integrates AI technologies including Large Language Models (LLM), Computer Vision, and voice processing to create an immersive and efficient interaction through animated digital avatars. NAIA features five specialized roles: researcher, receptionist, personal skills trainer, personal assistant, and university guide, each equipped with specific capabilities to support different aspects of academic life. The system’s Computer Vision capabilities enable it to comment on users’ physical appearance and environment, enriching the interaction. Through natural language processing and voice interaction, NAIA aims to improve productivity and efficiency within the university environment while providing personalized assistance through a ubiquitous platform accessible across multiple devices. NAIA is evaluated through a user experience survey involving 30 participants with different demographic characteristics, this is the most accepted way by the community to evaluate this type of solution. Participants give their feedback after using one role of NAIA after using it for 30 minutes. The experiment showed that 90% of the participants considered NAIA-assisted tasks of higher quality and, on average, NAIA has a score of 4.27 out of 5 on user satisfaction. Participants particularly appreciated the assistant’s visual recognition, natural conversation flow, and user interaction capabilities. Results demonstrate NAIA’s capabilities and effectiveness across the five roles. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Institute of Electrical and Electronics Engineers Inc.},
keywords = {Academic environment, Artificial intelligence, Case-studies, Computational Linguistics, Computer vision, Digital avatar, Digital avatars, Efficiency, Human computer interaction, Human-AI Interaction, Interactive computer graphics, Language Model, Large language model, large language model (LLM), Learning systems, Natural language processing systems, Personal digital assistants, Personnel training, Population statistics, Speech communication, Speech processing, Speech to text, speech to text (STT), Text to speech, text to speech (TTS), user experience, User interfaces, Virtual assistant, Virtual assistants, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Anvitha, K.; Durjay, T.; Sathvika, K.; Gnanendra, G.; Annamalai, S.; Natarajan, S. K.
EduBot: A Compact AI-Driven Study Assistant for Contextual Knowledge Retrieval Proceedings Article
In: Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331507756 (ISBN).
Abstract | Links | BibTeX | Tags: Chatbots, Computer aided instruction, Contextual knowledge, Curricula, Digital Education, E-Learning, Education computing, Educational Technology, Engineering education, Indexing (of information), Information Retrieval, Intelligent systems, Knowledge retrieval, LangChain Framework, Language Model, Large language model, learning experience, Learning experiences, Learning systems, LLM, PDF - Driven Chatbot, Query processing, Students, Teaching, Traditional learning, Virtual Reality
@inproceedings{anvitha_edubot_2025,
title = {EduBot: A Compact AI-Driven Study Assistant for Contextual Knowledge Retrieval},
author = {K. Anvitha and T. Durjay and K. Sathvika and G. Gnanendra and S. Annamalai and S. K. Natarajan},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105013615976&doi=10.1109%2FGINOTECH63460.2025.11077097&partnerID=40&md5=b08377283f2ea2ee406d38d1d23f1e42},
doi = {10.1109/GINOTECH63460.2025.11077097},
isbn = {9798331507756 (ISBN)},
year = {2025},
date = {2025-01-01},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {In the evolving landscape of educational technology, intelligent systems are redefining traditional learning methods by enhancing accessibility, adaptability, and engagement in instructional processes. This paper presents EduBot, a PDF-Driven Chatbot developed using advanced Large Language Models (LLMs) and leveraging frameworks like LangChain, OpenAI's Chat-Gpt, and Pinecone. EduBot is designed as an interactive educational assistant, responding to student queries based on faculty-provided guidelines embedded in PDF documents. Through natural language processing, EduBot streamlines information retrieval, providing accurate, context-aware responses that foster a self- directed learning experience. By aligning with specific academic requirements and enhancing clarity in information delivery, EduBot stands as a promising tool in personalized digital learning support. This paper explores the design, implementation, and impact of EduBot, offering insights into its potential as a scalable solution for academic institutions The demand for accessible and adaptive educational tools is increasing as students seek more personalized and efficient ways to enhance their learning experience. EduBot is a cutting- edge PDF-driven chatbot designed to act as a virtual educational assistant, helping students to navigate and understand course materials by answering queries directly based on faculty guidelines. Built upon Large Language Models (LLMs), specifically utilizing frameworks such as LangChain and OpenAI's GPT-3.5, EduBot provides a sophisticated solution for integrating curated academic content into interactive learning. With its backend support from Pinecone for optimized data indexing, EduBot offers accurate and context-specific responses, facilitating a deeper level of engagement and comprehension. The average relevancy score is 80%. This paper outlines the design and deployment of EduBot, emphasizing its architecture, adaptability, and contributions to the educational landscape, where such AI- driven tools are poised to become indispensable in fostering autonomous, personalized learning environments. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Chatbots, Computer aided instruction, Contextual knowledge, Curricula, Digital Education, E-Learning, Education computing, Educational Technology, Engineering education, Indexing (of information), Information Retrieval, Intelligent systems, Knowledge retrieval, LangChain Framework, Language Model, Large language model, learning experience, Learning experiences, Learning systems, LLM, PDF - Driven Chatbot, Query processing, Students, Teaching, Traditional learning, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Alex, G.
Leveraging Large Language Models for Automated XR Instructional Content Generation Proceedings Article
In: Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331585341 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Authoring Tool, Case-studies, Engineering education, Extended reality, IEEE Standards, Language Model, Large language model, Learning systems, Ontology, Ontology's, Simple++
@inproceedings{alex_leveraging_2025,
title = {Leveraging Large Language Models for Automated XR Instructional Content Generation},
author = {G. Alex},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105015398440&doi=10.1109%2FICE%2FITMC65658.2025.11106622&partnerID=40&md5=c125d3b7e58cfff4c24a9b15bb615912},
doi = {10.1109/ICE/ITMC65658.2025.11106622},
isbn = {9798331585341 (ISBN)},
year = {2025},
date = {2025-01-01},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This paper presents a study in which authors examine the potential of leveraging large language models to generate instructional content for eXtended Reality environments. Considering the IEEE ARLEM standard as a framework for structuring data, it could be integrated and interpreted by existing authoring tools. In terms of methods, authors have adopted an exploratory approach in testing various strategies. A case study focusing on the use of an eXtended Reality authoring tool for teaching operating procedures is presented. Finally, this exploratory work shows that while simple prompts can produce scenarios with satisfactory quality, imposing a structured schema through more complex prompts leads to less reliable outcomes. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Artificial intelligence, Authoring Tool, Case-studies, Engineering education, Extended reality, IEEE Standards, Language Model, Large language model, Learning systems, Ontology, Ontology's, Simple++},
pubstate = {published},
tppubtype = {inproceedings}
}
Kadri, M.; Boubakri, F. -E.; Azough, A.; Zidani, K. A.
Game-Based VR Anatomy Learning with Generative AI: Proof of Concept for GenAiVR-Lab Proceedings Article
In: pp. 100–105, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331534899 (ISBN).
Abstract | Links | BibTeX | Tags: Anatomy educations, Artificial intelligence, Bone, Bone fragments, Collaborative learning, E-Learning, Educational Evaluation, Game-Based, Game-based learning, Generative AI, Human computer interaction, Human skeleton, Laboratories, Learning systems, Medical students, Proof of concept, Virtual Reality, Virtual Reality Anatomy
@inproceedings{kadri_game-based_2025,
title = {Game-Based VR Anatomy Learning with Generative AI: Proof of Concept for GenAiVR-Lab},
author = {M. Kadri and F. -E. Boubakri and A. Azough and K. A. Zidani},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105015604062&doi=10.1109%2FSCME62582.2025.11104860&partnerID=40&md5=c557ca7975a9683e8c271fbb3a21c4e4},
doi = {10.1109/SCME62582.2025.11104860},
isbn = {9798331534899 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {100–105},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Anatomy education often fails to engage learners or foster precise 3D spatial understanding of complex systems like the human skeleton. We present a Game-Based VR Anatomy Learning system with Generative AI, introduced as a Proof of Concept for our GenAiVR-Lab framework. This prototype validates the foundational pillars of our future development. In the Anatomy Lab scenario, 25 medical students explore a virtual skeleton and undertake a timed mission: assemble three bone fragments within two minutes. Incorrect picks are disabled with point deductions; learners may request a one-shot conversational hint from a ChatGPT-powered Virtual Anatomy Instructor; if time expires, a teammate continues with remaining time. We measured perception changes using pre- and post-test versions of four Perspective Questionnaires: Learning Perspective (LPQ), VR-AI Perspective (VRAIPQ), Generative AI Perspective (GAIPQ), and Game-Based Learning Perspective (GBLPQ). Results demonstrate significant improvements across all four perspectives, with mean scores increasing by approximately 1.3 points on the 5-point Likert scale and nearly all participants showing positive gains. Effect sizes ranged from 2.52 to 3.34, indicating large practical significance, with all measures reaching statistical significance. These findings demonstrate that collaborative game mechanics and generative AI guidance enhance engagement and spatial reasoning. We contrast this PoC with the full GenAiVR-Lab vision - integrating Retrieval-Augmented Generation for precise feedback, multimodal I/O, and adaptive pathways - and outline a roadmap for next-generation immersive anatomy education. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Anatomy educations, Artificial intelligence, Bone, Bone fragments, Collaborative learning, E-Learning, Educational Evaluation, Game-Based, Game-based learning, Generative AI, Human computer interaction, Human skeleton, Laboratories, Learning systems, Medical students, Proof of concept, Virtual Reality, Virtual Reality Anatomy},
pubstate = {published},
tppubtype = {inproceedings}
}
Boubakri, F. -E.; Kadri, M.; Kaghat, F. Z.; Azough, A.; Tairi, H.
Exploring 3D Cardiac Anatomy with Text-Based AI Guidance in Virtual Reality Proceedings Article
In: pp. 43–48, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331534899 (ISBN).
Abstract | Links | BibTeX | Tags: 3D cardiac anatomy, 3d heart models, Anatomy education, Anatomy educations, Cardiac anatomy, Collaborative environments, Collaborative learning, Computer aided instruction, Curricula, Design and Development, E-Learning, Education computing, Generative AI, Heart, Immersive environment, Learning systems, Natural language processing systems, Social virtual reality, Students, Teaching, Three dimensional computer graphics, Virtual Reality
@inproceedings{boubakri_exploring_2025,
title = {Exploring 3D Cardiac Anatomy with Text-Based AI Guidance in Virtual Reality},
author = {F. -E. Boubakri and M. Kadri and F. Z. Kaghat and A. Azough and H. Tairi},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105015676741&doi=10.1109%2FSCME62582.2025.11104869&partnerID=40&md5=c961694f97c50adc23b6826dddb265cd},
doi = {10.1109/SCME62582.2025.11104869},
isbn = {9798331534899 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {43–48},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This paper presents the design and development of a social virtual reality (VR) classroom focused on cardiac anatomy education for students in grades K-12. The application allows multiple learners to explore a detailed 3D heart model within an immersive and collaborative environment. A crucial part of the system is the integration of a text-based conversational AI interface powered by ChatGPT, which provides immediate, interactive explanations and addresses student inquiries about heart anatomy. The system supports both guided and exploratory learning modes, encourages peer collaboration, and offers personalized support through natural language dialogue. We evaluated the system's effectiveness through a comprehensive study measuring learning perception (LPQ), VR perception (VRPQ), AI perception (AIPQ), and VR-related symptoms (VRSQ). Potential applications include making high-quality cardiac anatomy education more affordable for K-12 schools with limited resources, offering an adaptable AI-based tutoring system for students to learn at their own pace, and equipping educators with an easy-to-use tool to integrate into their science curriculum with minimal additional training. © 2025 Elsevier B.V., All rights reserved.},
keywords = {3D cardiac anatomy, 3d heart models, Anatomy education, Anatomy educations, Cardiac anatomy, Collaborative environments, Collaborative learning, Computer aided instruction, Curricula, Design and Development, E-Learning, Education computing, Generative AI, Heart, Immersive environment, Learning systems, Natural language processing systems, Social virtual reality, Students, Teaching, Three dimensional computer graphics, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Y.; Yan, Y.; Yang, G.
Bringing Microbiology to Life in Museum: Using Mobile VR and LLM-Powered Virtual Character for Children's Science Learning Proceedings Article
In: Chui, K. T.; Jaikaeo, C.; Niramitranon, J.; Kaewmanee, W.; Ng, K. -K.; Ongkunaruk, P. (Ed.): pp. 83–87, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331595500 (ISBN).
Abstract | Links | BibTeX | Tags: Computer aided instruction, E-Learning, Engineering education, Experimental groups, Immersive technologies, Informal learning, Language Model, Large language model, large language models, Learning systems, Microbiology, Mobile virtual reality, Museum, Museums, Science education, Science learning, Virtual addresses, Virtual character, Virtual Reality, Virtual reality system
@inproceedings{chen_bringing_2025,
title = {Bringing Microbiology to Life in Museum: Using Mobile VR and LLM-Powered Virtual Character for Children's Science Learning},
author = {Y. Chen and Y. Yan and G. Yang},
editor = {K. T. Chui and C. Jaikaeo and J. Niramitranon and W. Kaewmanee and K. -K. Ng and P. Ongkunaruk},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105015708152&doi=10.1109%2FISET65607.2025.00025&partnerID=40&md5=77ae9a4829656155010abc280a817a72},
doi = {10.1109/ISET65607.2025.00025},
isbn = {9798331595500 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {83–87},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Although the increasing advantages of immersive technology-enhanced museum informal learning in children's science education, the application of mobile virtual reality (MVR) technology combined with large language models (LLM) in this environment has not yet been fully explored. Furthermore, virtual character, as an intelligent learning assistant, is capable of providing personalized guidance and instant feedback to children through natural language interactions, but its potential in museum learning has yet to be fully tapped. To address these gaps, this study investigates the effectiveness of integrating MVR with LLM-powered virtual character in promoting children's microbiology learning during museum activities. In this paper, the technology-enhanced POE (Prediction-observation-explanation) learning model was studied, and the corresponding MVR system was designed and developed to carry out microbial learning activities. A quasiexperimental design was used with 60 children aged 10-12. The experimental group learned via an MVR system combining LLM-powered virtual character, while the control group used traditional methods. Results showed the experimental group significantly outperformed the control group in both academic achievement and learning motivation, including attention, confidence, and satisfaction. This provides evidence for using immersive technologies in informal learning and offers insights into applying LLM-powered virtual character in science education. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Computer aided instruction, E-Learning, Engineering education, Experimental groups, Immersive technologies, Informal learning, Language Model, Large language model, large language models, Learning systems, Microbiology, Mobile virtual reality, Museum, Museums, Science education, Science learning, Virtual addresses, Virtual character, Virtual Reality, Virtual reality system},
pubstate = {published},
tppubtype = {inproceedings}
}
Vadisetty, R.; Polamarasetti, A.; Goyal, M. K.; Rongali, S. K.; Prajapati, S. K.; Butani, J. B.
Cloud-Based Immersive Learning: The Role of Virtual Reality, Big Data, and Generative AI in Transformative Education Experiences Proceedings Article
In: Mishra, S.; Tripathy, H. K.; Mohanty, J. R. (Ed.): Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331523022 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Big Data, Cloud analytics, Cloud environments, Cloud-based, Cloud-based learning, E-Learning, Engineering education, Generative AI, generative artificial intelligence, Immersive learning, Learning analytic, learning analytics, Learning systems, Metadata, Personalized Education, Personalized learning, Real time analysis, Realistic simulation, Virtual environments, Virtual Reality
@inproceedings{vadisetty_cloud-based_2025,
title = {Cloud-Based Immersive Learning: The Role of Virtual Reality, Big Data, and Generative AI in Transformative Education Experiences},
author = {R. Vadisetty and A. Polamarasetti and M. K. Goyal and S. K. Rongali and S. K. Prajapati and J. B. Butani},
editor = {S. Mishra and H. K. Tripathy and J. R. Mohanty},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105018048438&doi=10.1109%2FASSIC64892.2025.11158636&partnerID=40&md5=6d832a0f4460d2eb93e357faba143a32},
doi = {10.1109/ASSIC64892.2025.11158636},
isbn = {9798331523022 (ISBN)},
year = {2025},
date = {2025-01-01},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Immersive learning transforms education by integrating Virtual Reality (VR), Big Data, and Generative Artificial Intelligence (AI) in cloud environments. This work discusses these technologies' contribution towards increased engagement, personalized learning, and recall through flexible and interactive experiences. Realistic simulations in a secure environment, real-time analysis via Big Data, and dynamically personalized information via Generative AI make immersive learning a reality. Nevertheless, scalability, security, and ease of integration are yet to be addressed. This article proposes an integrated model for cloud-based immersive learning, comparing conventional and AI-facilitated approaches through experimental evaluation. Besides, technical, ethical, and legislative considerations and future directions for inquiry are addressed. In conclusion, with its potential for personalized, scalable, and data-intensive instruction, AI-facilitated immersive learning is a transformational technology for educational delivery. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Artificial intelligence, Big Data, Cloud analytics, Cloud environments, Cloud-based, Cloud-based learning, E-Learning, Engineering education, Generative AI, generative artificial intelligence, Immersive learning, Learning analytic, learning analytics, Learning systems, Metadata, Personalized Education, Personalized learning, Real time analysis, Realistic simulation, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Vadisetty, R.; Polamarasetti, A.; Goyal, M. K.; Rongali, S. K.; Prajapati, S. K.; Butani, J. B.
Generative AI for Creating Immersive Learning Environments: Virtual Reality and Beyond Proceedings Article
In: Mishra, S.; Tripathy, H. K.; Mohanty, J. R. (Ed.): Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331523022 (ISBN).
Abstract | Links | BibTeX | Tags: AI in Education, Artificial intelligence in education, Augmented Reality, Augmented Reality (AR), Computer aided instruction, E-Learning, Educational spaces, Generative adversarial networks, Generative AI, generative artificial intelligence, Immersive, Immersive learning, Learning Environments, Learning systems, Personalized learning, Virtual and augmented reality, Virtual environments, Virtual Reality, Virtual Reality (VR)
@inproceedings{vadisetty_generative_2025,
title = {Generative AI for Creating Immersive Learning Environments: Virtual Reality and Beyond},
author = {R. Vadisetty and A. Polamarasetti and M. K. Goyal and S. K. Rongali and S. K. Prajapati and J. B. Butani},
editor = {S. Mishra and H. K. Tripathy and J. R. Mohanty},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105018128093&doi=10.1109%2FASSIC64892.2025.11158626&partnerID=40&md5=b29a005f42262bf50c58d7708e2ed91a},
doi = {10.1109/ASSIC64892.2025.11158626},
isbn = {9798331523022 (ISBN)},
year = {2025},
date = {2025-01-01},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Generative Artificial Intelligence (AI) revolutionizes immersive educational spaces with dynamic, personalized, and interactive experiences. In this article, Generative AI addresses its role in Virtual and Augmented Realities through automated creation, personalized learning pathways, and heightened engagement. With Generative AI, educational simulations can adapt to learner performance, produce interactive characters, and present real-time feedback through models such as Generative Adversarial Networks (GANs) and Transformerbased AI. Considering its potential, computational limitations, ethics, and authentic content concerns must be considered. In its examination, current implementations, benefits, and impediments, such as AI-powered flexible learning, are discussed in detail in this work. In conclusion, Generative AI's role in changing immersive instruction and opening doors for amplified and augmented educational offerings is stressed. © 2025 Elsevier B.V., All rights reserved.},
keywords = {AI in Education, Artificial intelligence in education, Augmented Reality, Augmented Reality (AR), Computer aided instruction, E-Learning, Educational spaces, Generative adversarial networks, Generative AI, generative artificial intelligence, Immersive, Immersive learning, Learning Environments, Learning systems, Personalized learning, Virtual and augmented reality, Virtual environments, Virtual Reality, Virtual Reality (VR)},
pubstate = {published},
tppubtype = {inproceedings}
}
Salinas, C. S.; Magudia, K.; Sangal, A.; Ren, L.; Segars, W. P.
In-silico CT simulations of deep learning generated heterogeneous phantoms Journal Article
In: Biomedical Physics and Engineering Express, vol. 11, no. 4, 2025, ISSN: 20571976 (ISSN), (Publisher: Institute of Physics).
Abstract | Links | BibTeX | Tags: adult, algorithm, Algorithms, anatomical concepts, anatomical location, anatomical variation, Article, Biological organs, bladder, Bone, bone marrow, CGAN, colon, comparative study, computer assisted tomography, Computer graphics, computer model, Computer Simulation, Computer-Assisted, Computerized tomography, CT organ texture, CT organ textures, CT scanners, CT synthesis, CT-scan, Deep learning, fluorodeoxyglucose f 18, Generative Adversarial Network, Generative AI, histogram, human, human tissue, Humans, III-V semiconductors, image analysis, Image processing, Image segmentation, Image texture, Imaging, imaging phantom, intra-abdominal fat, kidney blood vessel, Learning systems, liver, lung, major clinical study, male, mean absolute error, Medical Imaging, neoplasm, Phantoms, procedures, prostate muscle, radiological parameters, signal noise ratio, Signal to noise ratio, Signal-To-Noise Ratio, simulation, Simulation platform, small intestine, Statistical tests, stomach, structural similarity index, subcutaneous fat, Textures, three dimensional double u net conditional generative adversarial network, Three-Dimensional, three-dimensional imaging, Tomography, Virtual CT scanner, Virtual Reality, Virtual trial, virtual trials, whole body CT, X-Ray Computed, x-ray computed tomography
@article{salinas_-silico_2025,
title = {In-silico CT simulations of deep learning generated heterogeneous phantoms},
author = {C. S. Salinas and K. Magudia and A. Sangal and L. Ren and W. P. Segars},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105010297226&doi=10.1088%2F2057-1976%2Fade9c9&partnerID=40&md5=47f211fd93f80e407dcd7e4c490976c2},
doi = {10.1088/2057-1976/ade9c9},
issn = {20571976 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Biomedical Physics and Engineering Express},
volume = {11},
number = {4},
abstract = {Current virtual imaging phantoms primarily emphasize geometric accuracy of anatomical structures. However, to enhance realism, it is also important to incorporate intra-organ detail. Because biological tissues are heterogeneous in composition, virtual phantoms should reflect this by including realistic intra-organ texture and material variation. We propose training two 3D Double U-Net conditional generative adversarial networks (3D DUC-GAN) to generate sixteen unique textures that encompass organs found within the torso. The model was trained on 378 CT image-segmentation pairs taken from a publicly available dataset with 18 additional pairs reserved for testing. Textured phantoms were generated and imaged using DukeSim, a virtual CT simulation platform. Results showed that the deep learning model was able to synthesize realistic heterogeneous phantoms from a set of homogeneous phantoms. These phantoms were compared with original CT scans and had a mean absolute difference of 46.15 ± 1.06 HU. The structural similarity index (SSIM) and peak signal-to-noise ratio (PSNR) were 0.86 ± 0.004 and 28.62 ± 0.14, respectively. The maximum mean discrepancy between the generated and actual distribution was 0.0016. These metrics marked an improvement of 27%, 5.9%, 6.2%, and 28% respectively, compared to current homogeneous texture methods. The generated phantoms that underwent a virtual CT scan had a closer visual resemblance to the true CT scan compared to the previous method. The resulting heterogeneous phantoms offer a significant step toward more realistic in silico trials, enabling enhanced simulation of imaging procedures with greater fidelity to true anatomical variation. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Institute of Physics},
keywords = {adult, algorithm, Algorithms, anatomical concepts, anatomical location, anatomical variation, Article, Biological organs, bladder, Bone, bone marrow, CGAN, colon, comparative study, computer assisted tomography, Computer graphics, computer model, Computer Simulation, Computer-Assisted, Computerized tomography, CT organ texture, CT organ textures, CT scanners, CT synthesis, CT-scan, Deep learning, fluorodeoxyglucose f 18, Generative Adversarial Network, Generative AI, histogram, human, human tissue, Humans, III-V semiconductors, image analysis, Image processing, Image segmentation, Image texture, Imaging, imaging phantom, intra-abdominal fat, kidney blood vessel, Learning systems, liver, lung, major clinical study, male, mean absolute error, Medical Imaging, neoplasm, Phantoms, procedures, prostate muscle, radiological parameters, signal noise ratio, Signal to noise ratio, Signal-To-Noise Ratio, simulation, Simulation platform, small intestine, Statistical tests, stomach, structural similarity index, subcutaneous fat, Textures, three dimensional double u net conditional generative adversarial network, Three-Dimensional, three-dimensional imaging, Tomography, Virtual CT scanner, Virtual Reality, Virtual trial, virtual trials, whole body CT, X-Ray Computed, x-ray computed tomography},
pubstate = {published},
tppubtype = {article}
}
Lu, J.; Gao, J.; Feng, F.; He, Z.; Zheng, M.; Liu, K.; He, J.; Liao, B.; Xu, S.; Sun, K.; Mo, Y.; Peng, Q.; Luo, J.; Li, Q.; Lu, G.; Wang, Z.; Dong, J.; He, K.; Cheng, S.; Cao, J.; Jiao, H.; Zhang, P.; Ma, S.; Zhu, L.; Shi, C.; Zhang, Y.; Chen, Y.; Wang, W.; Zhu, S.; Li, X.; Wang, Q.; Liu, J.; Wang, C.; Lin, W.; Zhai, E.; Wu, J.; Liu, Q.; Fu, B.; Cai, D.
Alibaba Stellar: A New Generation RDMA Network for Cloud AI Proceedings Article
In: pp. 453–466, Association for Computing Machinery, Inc, 2025, ISBN: 9798400715242 (ISBN).
Abstract | Links | BibTeX | Tags: Access network, Cloud computing, Congestion control (communication), Containers, data center networking, Data center networkings, Language Model, Learning systems, Machine learning applications, Memory architecture, Network support, Network support for AI and machine learning application, network support for AI and machine learning applications, Performance, Program processors, Remote direct memory access, Stellars, Transport and congestion control, Virtual Reality, Virtualization
@inproceedings{lu_alibaba_2025,
title = {Alibaba Stellar: A New Generation RDMA Network for Cloud AI},
author = {J. Lu and J. Gao and F. Feng and Z. He and M. Zheng and K. Liu and J. He and B. Liao and S. Xu and K. Sun and Y. Mo and Q. Peng and J. Luo and Q. Li and G. Lu and Z. Wang and J. Dong and K. He and S. Cheng and J. Cao and H. Jiao and P. Zhang and S. Ma and L. Zhu and C. Shi and Y. Zhang and Y. Chen and W. Wang and S. Zhu and X. Li and Q. Wang and J. Liu and C. Wang and W. Lin and E. Zhai and J. Wu and Q. Liu and B. Fu and D. Cai},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105016208536&doi=10.1145%2F3718958.3750539&partnerID=40&md5=901fdd27c510072633f0390a0abfb653},
doi = {10.1145/3718958.3750539},
isbn = {9798400715242 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {453–466},
publisher = {Association for Computing Machinery, Inc},
abstract = {The rapid adoption of Large Language Models (LLMs) in cloud environments has intensified the demand for high-performance AI training and inference, where Remote Direct Memory Access (RDMA) plays a critical role. However, existing RDMA virtualization solutions, such as Single-Root Input/Output Virtualization (SR-IOV), face significant limitations in scalability, performance, and stability. These issues include lengthy container initialization times, hardware resource constraints, and inefficient traffic steering. To address these challenges, we propose Stellar, a new generation RDMA network for cloud AI. Stellar introduces three key innovations: Para-Virtualized Direct Memory Access (PVDMA) for on-demand memory pinning, extended Memory Translation Table (eMTT) for optimized GPU Direct RDMA (GDR) performance, and RDMA Packet Spray for efficient multi-path utilization. Deployed in our large-scale AI clusters, Stellar spins up virtual devices in seconds, reduces container initialization time by 15 times, and improves LLM training speed by up to 14%. Our evaluations demonstrate that Stellar significantly outperforms existing solutions, offering a scalable, stable, and high-performance RDMA network for cloud AI. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Access network, Cloud computing, Congestion control (communication), Containers, data center networking, Data center networkings, Language Model, Learning systems, Machine learning applications, Memory architecture, Network support, Network support for AI and machine learning application, network support for AI and machine learning applications, Performance, Program processors, Remote direct memory access, Stellars, Transport and congestion control, Virtual Reality, Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Wei, Q.; Huang, J.; Gao, Y.; Dong, W.
One Model to Fit Them All: Universal IMU-based Human Activity Recognition with LLM-assisted Cross-dataset Representation Journal Article
In: Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies, vol. 9, no. 3, 2025, ISSN: 24749567 (ISSN), (Publisher: Association for Computing Machinery).
Abstract | Links | BibTeX | Tags: Broad application, Contrastive Learning, Cross-dataset, Data collection, Human activity recognition, Human activity recognition systems, Human computer interaction, Intelligent interactions, Language Model, Large datasets, Large language model, large language models, Learning systems, Neural-networks, Pattern recognition, Spatial relationships, Ubiquitous computing, Virtual Reality
@article{wei_one_2025,
title = {One Model to Fit Them All: Universal IMU-based Human Activity Recognition with LLM-assisted Cross-dataset Representation},
author = {Q. Wei and J. Huang and Y. Gao and W. Dong},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105015431117&doi=10.1145%2F3749509&partnerID=40&md5=2a6f26a05856c48ba3aaaf356b375dc0},
doi = {10.1145/3749509},
issn = {24749567 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies},
volume = {9},
number = {3},
abstract = {Human Activity Recognition (HAR) is essential for pervasive computing and intelligent interaction, with broad applications across various fields. However, there is still no one model capable of fitting various HAR datasets, severely limiting its applicability in practical scenarios. To address this, we propose oneHAR, an LLM-assisted universal IMU-based HAR system designed to achieve "one model to fit them all" — just one model that can adapt to diverse HAR datasets without any dataset-specific operation. In particular, we propose Cross-Dataset neural network (CDNet) for the "one model," which models both the temporal context and spatial relationships of IMU data to capture cross-dataset representations, encompassing differences in device, participant, data collection position, and environment, etc. Additionally, we introduce LLM-driven data synthesis, which enhances the training process by generating virtual IMU data through three carefully designed strategies. Furthermore, LLM-assisted adaptive position processing optimizes the inference process by flexibly handling a variable combination of positional inputs. Our model demonstrates strong generalization across five public IMU-based HAR datasets, outperforming the best baselines by up to 46.9% in the unseen-dataset scenario, and 6.5% in the cross-dataset scenario. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Association for Computing Machinery},
keywords = {Broad application, Contrastive Learning, Cross-dataset, Data collection, Human activity recognition, Human activity recognition systems, Human computer interaction, Intelligent interactions, Language Model, Large datasets, Large language model, large language models, Learning systems, Neural-networks, Pattern recognition, Spatial relationships, Ubiquitous computing, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Wang, Z.; Aris, A.; Zhang, P.
Mobile-Driven Deep Learning Algorithm for Personalized Clothing Design Using Multi-Feature Attributes Journal Article
In: International Journal of Interactive Mobile Technologies, vol. 19, no. 18, pp. 146–160, 2025, ISSN: 18657923 (ISSN), (Publisher: International Federation of Engineering Education Societies (IFEES)).
Abstract | Links | BibTeX | Tags: Clothing design, Convolutional Neural Networks, Data privacy, Data visualization, Deep learning, E-Learning, Electronic commerce, Fashion design, Feature attributes, Hosiery manufacture, Learning algorithms, Learning platform, Learning systems, Mobile Learning, Mobile learning platform, Mobile-driven deep learning, Multi-feature attribute, multi-feature attributes, Multifeatures, Personalized clothing design, Personalized clothings, StyleFitNet, Textiles, Virtual Reality
@article{wang_mobile-driven_2025,
title = {Mobile-Driven Deep Learning Algorithm for Personalized Clothing Design Using Multi-Feature Attributes},
author = {Z. Wang and A. Aris and P. Zhang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105017860148&doi=10.3991%2Fijim.v19i18.57239&partnerID=40&md5=de3ca359dd178d8ea59cf8da73a9c486},
doi = {10.3991/ijim.v19i18.57239},
issn = {18657923 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {International Journal of Interactive Mobile Technologies},
volume = {19},
number = {18},
pages = {146–160},
abstract = {Personalized fashion recommendation systems face significant challenges in balancing accurate style prediction, real-time mobile performance, and user privacy compliance. This study presents StyleFitNet, a novel mobile-driven deep learning framework that integrates multiple user feature attributes, including body measurements, fabric preferences, and temporal style evolution, to generate personalized clothing designs. The hybrid convolutional neural networks (CNNs)-recurrent neural networks (RNNs) architecture addresses key limitations of conventional recommendation systems by simultaneously processing spatial features and sequential preference patterns. A comprehensive evaluation demonstrates the system’s superiority in recommendation accuracy, design diversity, and user satisfaction compared to existing approaches. The implementation features GDPR-compliant data handling and a 3D virtual fitting room, significantly reducing return rates while maintaining robust privacy protections. Findings highlight the model’s ability to adapt to evolving fashion trends while preserving individual style preferences, offering both technical and business advantages for e-commerce platforms. The study concludes that StyleFitNet establishes a new standard for artificial intelligence (AI)-driven fashion recommendations, successfully merging advanced personalization with ethical data practices. Key implications include the demonstrated viability of hybrid deep learning models for mobile deployment and the importance of temporal analysis in preference modelling. Future research directions include cross-cultural validation and the integration of generative AI for enhanced visualization. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: International Federation of Engineering Education Societies (IFEES)},
keywords = {Clothing design, Convolutional Neural Networks, Data privacy, Data visualization, Deep learning, E-Learning, Electronic commerce, Fashion design, Feature attributes, Hosiery manufacture, Learning algorithms, Learning platform, Learning systems, Mobile Learning, Mobile learning platform, Mobile-driven deep learning, Multi-feature attribute, multi-feature attributes, Multifeatures, Personalized clothing design, Personalized clothings, StyleFitNet, Textiles, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
2024
Liew, Z. Q.; Xu, M.; Lim, W. Y. Bryan; Niyato, D.; Kim, D. I.
AI-Generated Bidding for Immersive AIGC Services in Mobile Edge-Empowered Metaverse Proceedings Article
In: Int. Conf. Inf. Networking, pp. 305–309, IEEE Computer Society, 2024, ISBN: 19767684 (ISSN); 979-835033094-6 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence generated bid, Artificial intelligence generated content, Bidding mechanism, Bidding models, Budget constraint, Budget control, Budget-constraint bidding, Constrained optimization, Content services, Immersive, Learning systems, Metaverses, Mobile edge computing, Reinforcement Learning, Semantics, Virtual tour
@inproceedings{liew_ai-generated_2024,
title = {AI-Generated Bidding for Immersive AIGC Services in Mobile Edge-Empowered Metaverse},
author = {Z. Q. Liew and M. Xu and W. Y. Bryan Lim and D. Niyato and D. I. Kim},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85198324990&doi=10.1109%2fICOIN59985.2024.10572159&partnerID=40&md5=271f5c45e8e95f01b42acaee89599bd5},
doi = {10.1109/ICOIN59985.2024.10572159},
isbn = {19767684 (ISSN); 979-835033094-6 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Int. Conf. Inf. Networking},
pages = {305–309},
publisher = {IEEE Computer Society},
abstract = {Recent advancements in Artificial Intelligence Generated Content (AIGC) provide personalized and immersive content generation services for applications such as interactive advertisements, virtual tours, and metaverse. With the use of mobile edge computing (MEC), buyers can bid for the AIGC service to enhance their user experience in real-time. However, designing strategies to optimize the quality of the services won can be challenging for budget-constrained buyers. The performance of classical bidding mechanisms is limited by the fixed rules in the strategies. To this end, we propose AI-generated bidding (AIGB) to optimize the bidding strategies for AIGC. AIGB model uses reinforcement learning model to generate bids for the services by learning from the historical data and environment states such as remaining budget, budget consumption rate, and quality of the won services. To obtain quality AIGC service, we propose a semantic aware reward function for the AIGB model. The proposed model is tested with a real-world dataset and experiments show that our model outperforms the classical bidding mechanism in terms of the number of services won and the similarity score. © 2024 IEEE.},
keywords = {Artificial intelligence generated bid, Artificial intelligence generated content, Bidding mechanism, Bidding models, Budget constraint, Budget control, Budget-constraint bidding, Constrained optimization, Content services, Immersive, Learning systems, Metaverses, Mobile edge computing, Reinforcement Learning, Semantics, Virtual tour},
pubstate = {published},
tppubtype = {inproceedings}
}
Cronin, I.
Apress Media LLC, 2024, ISBN: 979-886880282-9 (ISBN); 979-886880281-2 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Augmented Reality, Autonomous system, Autonomous systems, Business applications, Computer vision, Decision making, Gaussian Splatting, Gaussians, Generative AI, Language processing, Learning algorithms, Learning systems, machine learning, Machine-learning, Natural Language Processing, Natural Language Processing (NLP), Natural language processing systems, Natural languages, Splatting
@book{cronin_understanding_2024,
title = {Understanding Generative AI Business Applications: A Guide to Technical Principles and Real-World Applications},
author = {I. Cronin},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001777571&doi=10.1007%2f979-8-8688-0282-9&partnerID=40&md5=c0714ff3e1ad755596426ea092b830d6},
doi = {10.1007/979-8-8688-0282-9},
isbn = {979-886880282-9 (ISBN); 979-886880281-2 (ISBN)},
year = {2024},
date = {2024-01-01},
publisher = {Apress Media LLC},
series = {Understanding Generative AI Business Applications: A Guide to Technical Principles and Real-World Applications},
abstract = {This guide covers the fundamental technical principles and various business applications of Generative AI for planning, developing, and evaluating AI-driven products. It equips you with the knowledge you need to harness the potential of Generative AI for enhancing business creativity and productivity. The book is organized into three sections: text-based, senses-based, and rationale-based. Each section provides an in-depth exploration of the specific methods and applications of Generative AI. In the text-based section, you will find detailed discussions on designing algorithms to automate and enhance written communication, including insights into the technical aspects of transformer-based Natural Language Processing (NLP) and chatbot architecture, such as GPT-4, Claude 2, Google Bard, and others. The senses-based section offers a glimpse into the algorithms and data structures that underpin visual, auditory, and multisensory experiences, including NeRF, 3D Gaussian Splatting, Stable Diffusion, AR and VR technologies, and more. The rationale-based section illuminates the decision-making capabilities of AI, with a focus on machine learning and data analytics techniques that empower applications such as simulation models, agents, and autonomous systems. In summary, this book serves as a guide for those seeking to navigate the dynamic landscape of Generative AI. Whether you’re a seasoned AI professional or a business leader looking to harness the power of creative automation, these pages offer a roadmap to leverage Generative AI for your organization’s success. © 2024 by Irena Cronin.},
keywords = {Artificial intelligence, Augmented Reality, Autonomous system, Autonomous systems, Business applications, Computer vision, Decision making, Gaussian Splatting, Gaussians, Generative AI, Language processing, Learning algorithms, Learning systems, machine learning, Machine-learning, Natural Language Processing, Natural Language Processing (NLP), Natural language processing systems, Natural languages, Splatting},
pubstate = {published},
tppubtype = {book}
}
Clocchiatti, A.; Fumero, N.; Soccini, A. M.
Character Animation Pipeline based on Latent Diffusion and Large Language Models Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 398–405, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 9798350372021 (ISBN).
Abstract | Links | BibTeX | Tags: Animation, Animation pipeline, Artificial intelligence, Augmented Reality, Character animation, Computational Linguistics, Computer animation, Deep learning, Diffusion, E-Learning, Extended reality, Film production, Generative art, Language Model, Learning systems, Learning techniques, Natural language processing systems, Pipelines, Production pipelines, Virtual Reality
@inproceedings{clocchiatti_character_2024,
title = {Character Animation Pipeline based on Latent Diffusion and Large Language Models},
author = {A. Clocchiatti and N. Fumero and A. M. Soccini},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85187217072&doi=10.1109%2FAIxVR59861.2024.00067&partnerID=40&md5=c51a20d28df6b65ef2587a75aadafae4},
doi = {10.1109/AIxVR59861.2024.00067},
isbn = {9798350372021 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {398–405},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Artificial intelligence and deep learning techniques are revolutionizing the film production pipeline. The majority of the current screenplay-to-animation pipelines focus on understanding the screenplay through natural language processing techniques, and on the generation of the animation through custom engines, missing the possibility to customize the characters. To address these issues, we propose a high-level pipeline for generating 2D characters and animations starting from screenplays, through a combination of Latent Diffusion Models and Large Language Models. Our approach uses ChatGPT to generate character descriptions starting from the screenplay. Then, using that data, it generates images of custom characters with Stable Diffusion and animates them according to their actions in different scenes. The proposed approach avoids well-known problems in generative AI tools such as temporal inconsistency and lack of control on the outcome. The results suggest that the pipeline is consistent and reliable, benefiting industries ranging from film production to virtual, augmented and extended reality content creation. © 2024 Elsevier B.V., All rights reserved.},
keywords = {Animation, Animation pipeline, Artificial intelligence, Augmented Reality, Character animation, Computational Linguistics, Computer animation, Deep learning, Diffusion, E-Learning, Extended reality, Film production, Generative art, Language Model, Learning systems, Learning techniques, Natural language processing systems, Pipelines, Production pipelines, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaccour, C.; Saad, W.; Debbah, M.; Poor, H. V. Vincent
Joint Sensing, Communication, and AI: A Trifecta for Resilient THz User Experiences Journal Article
In: IEEE Transactions on Wireless Communications, vol. 23, no. 9, pp. 11444–11460, 2024, ISSN: 15361276 (ISSN); 15582248 (ISSN), (Publisher: Institute of Electrical and Electronics Engineers Inc.).
Abstract | Links | BibTeX | Tags: Artificial intelligence, artificial intelligence (AI), Behavioral Research, Channel state information, Computer hardware, Cramer-Rao bounds, Extended reality (XR), Hardware, Joint sensing and communication, Learning systems, machine learning, machine learning (ML), Machine-learning, Multi agent systems, reliability, Resilience, Sensor data fusion, Tera Hertz, Terahertz, terahertz (THz), Terahertz communication, Wireless communications, Wireless sensor networks, X reality
@article{chaccour_joint_2024,
title = {Joint Sensing, Communication, and AI: A Trifecta for Resilient THz User Experiences},
author = {C. Chaccour and W. Saad and M. Debbah and H. V. Vincent Poor},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85190170739&doi=10.1109%2FTWC.2024.3382192&partnerID=40&md5=561f4cdf229d462bb636c787487201bd},
doi = {10.1109/TWC.2024.3382192},
issn = {15361276 (ISSN); 15582248 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {IEEE Transactions on Wireless Communications},
volume = {23},
number = {9},
pages = {11444–11460},
abstract = {In this paper a novel joint sensing, communication, and artificial intelligence (AI) framework is proposed so as to optimize extended reality (XR) experiences over terahertz (THz) wireless systems. Within this framework, active reconfigurable intelligent surfaces (RISs) are incorporated as pivotal elements, serving as enhanced base stations in the THz band to enhance Line-of-Sight (LoS) communication. The proposed framework consists of three main components. First, a tensor decomposition framework is proposed to extract unique sensing parameters for XR users and their environment by exploiting the THz channel sparsity. Essentially, the THz band's quasi-opticality is exploited and the sensing parameters are extracted from the uplink communication signal, thereby allowing for the use of the same waveform, spectrum, and hardware for both communication and sensing functionalities. Then, the Cramér-Rao lower bound is derived to assess the accuracy of the estimated sensing parameters. Second, a non-autoregressive multi-resolution generative AI framework integrated with an adversarial transformer is proposed to predict missing and future sensing information. The proposed framework offers robust and comprehensive historical sensing information and anticipatory forecasts of future environmental changes, which are generalizable to fluctuations in both known and unforeseen user behaviors and environmental conditions. Third, a multi-agent deep recurrent hysteretic Q-neural network is developed to control the handover policy of RIS subarrays, leveraging the informative nature of sensing information to minimize handover cost, maximize the individual quality of personal experiences (QoPEs), and improve the robustness and resilience of THz links. Simulation results show a high generalizability of the proposed unsupervised generative artificial intelligence (AI) framework to fluctuations in user behavior and velocity, leading to a 61% improvement in instantaneous reliability compared to schemes with known channel state information. © 2024 Elsevier B.V., All rights reserved.},
note = {Publisher: Institute of Electrical and Electronics Engineers Inc.},
keywords = {Artificial intelligence, artificial intelligence (AI), Behavioral Research, Channel state information, Computer hardware, Cramer-Rao bounds, Extended reality (XR), Hardware, Joint sensing and communication, Learning systems, machine learning, machine learning (ML), Machine-learning, Multi agent systems, reliability, Resilience, Sensor data fusion, Tera Hertz, Terahertz, terahertz (THz), Terahertz communication, Wireless communications, Wireless sensor networks, X reality},
pubstate = {published},
tppubtype = {article}
}
Krauss, C.; Bassbouss, L.; Upravitelev, M.; An, T. -S.; Altun, D.; Reray, L.; Balitzki, E.; Tamimi, T. El; Karagülle, M.
Opportunities and Challenges in Developing Educational AI-Assistants for the Metaverse Proceedings Article
In: R.A., Sottilare; J., Schwarz (Ed.): Lect. Notes Comput. Sci., pp. 219–238, Springer Science and Business Media Deutschland GmbH, 2024, ISBN: 03029743 (ISSN); 978-303160608-3 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, AI-assistant, AI-Assistants, Computational Linguistics, Computer aided instruction, Concept-based, E-Learning, Education, Interoperability, Language Model, Large language model, large language models, Learning Environments, Learning systems, Learning Technologies, Learning technology, LLM, Metaverse, Metaverses, Natural language processing systems, Proof of concept, User interfaces, Virtual assistants, Virtual Reality
@inproceedings{krauss_opportunities_2024,
title = {Opportunities and Challenges in Developing Educational AI-Assistants for the Metaverse},
author = {C. Krauss and L. Bassbouss and M. Upravitelev and T. -S. An and D. Altun and L. Reray and E. Balitzki and T. El Tamimi and M. Karagülle},
editor = {Sottilare R.A. and Schwarz J.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85196214138&doi=10.1007%2f978-3-031-60609-0_16&partnerID=40&md5=9a66876cb30e9e5d287a86e6cfa66e05},
doi = {10.1007/978-3-031-60609-0_16},
isbn = {03029743 (ISSN); 978-303160608-3 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {14727 LNCS},
pages = {219–238},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {The paper explores the opportunities and challenges for metaverse learning environments with AI-Assistants based on Large Language Models. A proof of concept based on popular but proprietary technologies is presented that enables a natural language exchange between the user and an AI-based medical expert in a highly immersive environment based on the Unreal Engine. The answers generated by ChatGPT are not only played back lip-synchronously, but also visualized in the VR environment using a 3D model of a skeleton. Usability and user experience play a particularly important role in the development of the highly immersive AI-Assistant. The proof of concept serves to illustrate the opportunities and challenges that lie in the merging of large language models, metaverse applications and educational ecosystems, which are self-contained research areas. Development strategies, tools and interoperability standards will be presented to facilitate future developments in this triangle of tension. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2024.},
keywords = {3D modeling, AI-assistant, AI-Assistants, Computational Linguistics, Computer aided instruction, Concept-based, E-Learning, Education, Interoperability, Language Model, Large language model, large language models, Learning Environments, Learning systems, Learning Technologies, Learning technology, LLM, Metaverse, Metaverses, Natural language processing systems, Proof of concept, User interfaces, Virtual assistants, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Williams, R.
Deep HoriXons - 3D Virtual Generative AI Assisted Campus for Deep Learning AI and Cybersecurity Proceedings Article
In: M., Blowers; B.T., Wysocki (Ed.): Proc SPIE Int Soc Opt Eng, SPIE, 2024, ISBN: 0277786X (ISSN); 978-151067434-9 (ISBN).
Abstract | Links | BibTeX | Tags: 3D virtual campus, AI and cybersecurity education, AI talent pipeline, ChatGPT digital tutor, CompTIA Security+, Computer aided instruction, Cyber security, Cyber-security educations, Cybersecurity, Deep learning, E-Learning, Immersive, Learning systems, Virtual campus, Virtual learning environments, Virtual Reality
@inproceedings{williams_deep_2024,
title = {Deep HoriXons - 3D Virtual Generative AI Assisted Campus for Deep Learning AI and Cybersecurity},
author = {R. Williams},
editor = {Blowers M. and Wysocki B.T.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85196555361&doi=10.1117%2f12.3011374&partnerID=40&md5=ff7392a37a51044c79d4d2824c9cf46b},
doi = {10.1117/12.3011374},
isbn = {0277786X (ISSN); 978-151067434-9 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc SPIE Int Soc Opt Eng},
volume = {13058},
publisher = {SPIE},
abstract = {This abstract outlines two significant innovations in AI and cybersecurity education within the "Deep HoriXons" 3D virtual campus, addressing the urgent need for skilled professionals in these domains. First, the paper introduces "Deep HoriXons," an immersive 3D virtual learning environment designed to democratize and enhance the educational experience for AI and cybersecurity. This innovation is notable for its global accessibility and ability to simulate real-world scenarios, providing an interactive platform for experiential learning, which is a marked departure from traditional educational models. The second innovation discussed is the strategic integration of ChatGPT as a digital educator and tutor within this virtual environment. ChatGPT's role is pivotal in offering tailored, real-time educational support, making complex AI and cybersecurity concepts more accessible and engaging for learners. This application of ChatGPT is an innovation worth noting for its ability to adapt to individual learning styles, provide interactive scenario-based learning, and support a deeper understanding of technical subjects through dynamic, responsive interaction. Together, these innovations represent a significant advancement in the field of AI and cybersecurity education, addressing the critical talent shortage by making high-quality, interactive learning experiences accessible on a global scale. The paper highlights the importance of these innovations in creating a skilled workforce capable of tackling the evolving challenges in AI and cybersecurity, underscoring the need for ongoing research and development in this area. © 2024 SPIE.},
keywords = {3D virtual campus, AI and cybersecurity education, AI talent pipeline, ChatGPT digital tutor, CompTIA Security+, Computer aided instruction, Cyber security, Cyber-security educations, Cybersecurity, Deep learning, E-Learning, Immersive, Learning systems, Virtual campus, Virtual learning environments, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Sarshartehrani, F.; Mohammadrezaei, E.; Behravan, M.; Gracanin, D.
Enhancing E-Learning Experience Through Embodied AI Tutors in Immersive Virtual Environments: A Multifaceted Approach for Personalized Educational Adaptation Proceedings Article
In: R.A., Sottilare; J., Schwarz (Ed.): Lect. Notes Comput. Sci., pp. 272–287, Springer Science and Business Media Deutschland GmbH, 2024, ISBN: 03029743 (ISSN); 978-303160608-3 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive Learning, Artificial intelligence, Artificial intelligence in education, Computer aided instruction, Computer programming, E - learning, E-Learning, Education computing, Embodied artificial intelligence, Engineering education, Immersive Virtual Environments, Learner Engagement, Learning experiences, Learning systems, Multi-faceted approach, Personalized Instruction, Traditional boundaries, Virtual Reality
@inproceedings{sarshartehrani_enhancing_2024,
title = {Enhancing E-Learning Experience Through Embodied AI Tutors in Immersive Virtual Environments: A Multifaceted Approach for Personalized Educational Adaptation},
author = {F. Sarshartehrani and E. Mohammadrezaei and M. Behravan and D. Gracanin},
editor = {Sottilare R.A. and Schwarz J.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85196174389&doi=10.1007%2f978-3-031-60609-0_20&partnerID=40&md5=3801d0959781b1a191a3eb14f47bd8d8},
doi = {10.1007/978-3-031-60609-0_20},
isbn = {03029743 (ISSN); 978-303160608-3 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {14727 LNCS},
pages = {272–287},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {As digital education transcends traditional boundaries, e-learning experiences are increasingly shaped by cutting-edge technologies like artificial intelligence (AI), virtual reality (VR), and adaptive learning systems. This study examines the integration of AI-driven personalized instruction within immersive VR environments, targeting enhanced learner engagement-a core metric in online education effectiveness. Employing a user-centric design, the research utilizes embodied AI tutors, calibrated to individual learners’ emotional intelligence and cognitive states, within a Python programming curriculum-a key area in computer science education. The methodology relies on intelligent tutoring systems and personalized learning pathways, catering to a diverse participant pool from Virginia Tech. Our data-driven approach, underpinned by the principles of educational psychology and computational pedagogy, indicates that AI-enhanced virtual learning environments significantly elevate user engagement and proficiency in programming education. Although the scope is limited to a single academic institution, the promising results advocate for the scalability of such AI-powered educational tools, with potential implications for distance learning, MOOCs, and lifelong learning platforms. This research contributes to the evolving narrative of smart education and the role of large language models (LLMs) in crafting bespoke educational experiences, suggesting a paradigm shift towards more interactive, personalized e-learning solutions that align with global educational technology trends. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2024.},
keywords = {Adaptive Learning, Artificial intelligence, Artificial intelligence in education, Computer aided instruction, Computer programming, E - learning, E-Learning, Education computing, Embodied artificial intelligence, Engineering education, Immersive Virtual Environments, Learner Engagement, Learning experiences, Learning systems, Multi-faceted approach, Personalized Instruction, Traditional boundaries, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Haramina, E.; Paladin, M.; Petričušić, Z.; Posarić, F.; Drobnjak, A.; Botički, I.
Learning Algorithms Concepts in a Virtual Reality Escape Room Proceedings Article
In: Babic, S.; Car, Z.; Cicin-Sain, M.; Cisic, D.; Ergovic, P.; Grbac, T. G.; Gradisnik, V.; Gros, S.; Jokic, A.; Jovic, A.; Jurekovic, D.; Katulic, T.; Koricic, M.; Mornar, V.; Petrovic, J.; Skala, K.; Skvorc, D.; Sruk, V.; Svaco, M.; Tijan, E.; Vrcek, N.; Vrdoljak, B. (Ed.): ICT Electron. Conv., MIPRO - Proc., pp. 2057–2062, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 9798350382495 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Computational complexity, Computer generated three dimensional environment, E-Learning, Education, Escape room, Extended reality, generative artificial intelligence, Learn+, Learning, Learning algorithms, Learning systems, Puzzle, puzzles, user experience, User study, User testing, Users' experiences, Virtual Reality
@inproceedings{haramina_learning_2024,
title = {Learning Algorithms Concepts in a Virtual Reality Escape Room},
author = {E. Haramina and M. Paladin and Z. Petričušić and F. Posarić and A. Drobnjak and I. Botički},
editor = {S. Babic and Z. Car and M. Cicin-Sain and D. Cisic and P. Ergovic and T. G. Grbac and V. Gradisnik and S. Gros and A. Jokic and A. Jovic and D. Jurekovic and T. Katulic and M. Koricic and V. Mornar and J. Petrovic and K. Skala and D. Skvorc and V. Sruk and M. Svaco and E. Tijan and N. Vrcek and B. Vrdoljak},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85198221737&doi=10.1109%2FMIPRO60963.2024.10569447&partnerID=40&md5=ee56896e4128fd5a8bef03825469a46f},
doi = {10.1109/MIPRO60963.2024.10569447},
isbn = {9798350382495 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {ICT Electron. Conv., MIPRO - Proc.},
pages = {2057–2062},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Although the standard way to learn algorithms is by coding, learning through games is another way to obtain knowledge while having fun. Virtual reality is a computer-generated three-dimensional environment in which the player is fully immersed by having external stimuli mostly blocked out. In the game presented in this paper, players are enhancing their algorithms skills by playing an escape room game. The goal is to complete the room within the designated time by solving puzzles. The puzzles change for every playthrough with the use of generative artificial intelligence to provide every player with a unique experience. There are multiple types of puzzles such as. time complexity, sorting algorithms, searching algorithms, and code execution. The paper presents the results of a study indicating students' preference for learning through gaming as a method of acquiring algorithms knowledge. © 2024 Elsevier B.V., All rights reserved.},
keywords = {Artificial intelligence, Computational complexity, Computer generated three dimensional environment, E-Learning, Education, Escape room, Extended reality, generative artificial intelligence, Learn+, Learning, Learning algorithms, Learning systems, Puzzle, puzzles, user experience, User study, User testing, Users' experiences, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Domenichini, D.; Bucchiarone, A.; Chiarello, F.; Schiavo, G.; Fantoni, G.
An AI-Driven Approach for Enhancing Engagement and Conceptual Understanding in Physics Education Proceedings Article
In: IEEE Global Eng. Edu. Conf., EDUCON, IEEE Computer Society, 2024, ISBN: 21659559 (ISSN); 979-835039402-3 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive Learning, Artificial intelligence, Artificial intelligence in education, Artificial Intelligence in Education (AIED), Conceptual Understanding, Educational System, Educational systems, Gamification, Generative AI, generative artificial intelligence, Learning Activity, Learning systems, Physics Education, Teachers', Teaching, Virtual Reality
@inproceedings{domenichini_ai-driven_2024,
title = {An AI-Driven Approach for Enhancing Engagement and Conceptual Understanding in Physics Education},
author = {D. Domenichini and A. Bucchiarone and F. Chiarello and G. Schiavo and G. Fantoni},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199035695&doi=10.1109%2fEDUCON60312.2024.10578670&partnerID=40&md5=4cf9f89e97664ae6d618a90f2dbc23e0},
doi = {10.1109/EDUCON60312.2024.10578670},
isbn = {21659559 (ISSN); 979-835039402-3 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE Global Eng. Edu. Conf., EDUCON},
publisher = {IEEE Computer Society},
abstract = {This Work in Progress paper introduces the design of an innovative educational system that leverages Artificial Intelligence (AI) to address challenges in physics education. The primary objective is to create a system that dynamically adapts to the individual needs and preferences of students while maintaining user-friendliness for teachers, allowing them to tailor their teaching methods. The emphasis is on fostering motivation and engagement, achieved through the implementation of a gamified virtual environment and a strong focus on personalization. Our aim is to develop a system capable of autonomously generating learning activities and constructing effective learning paths, all under the supervision and interaction of teachers. The generation of learning activities is guided by educational taxonomies that delineate and categorize the cognitive processes involved in these activities. The proposed educational system seeks to address challenges identified by Physics Education Research (PER), which offers valuable insights into how individuals learn physics and provides strategies to enhance the overall quality of physics education. Our specific focus revolves around two crucial aspects: concentrating on the conceptual understanding of physics concepts and processes, and fostering knowledge integration and coherence across various physics topics. These aspects are deemed essential for cultivating enduring knowledge and facilitating practical applications in the field of physics. © 2024 IEEE.},
keywords = {Adaptive Learning, Artificial intelligence, Artificial intelligence in education, Artificial Intelligence in Education (AIED), Conceptual Understanding, Educational System, Educational systems, Gamification, Generative AI, generative artificial intelligence, Learning Activity, Learning systems, Physics Education, Teachers', Teaching, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}