AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Kurai, R.; Hiraki, T.; Hiroi, Y.; Hirao, Y.; Perusquía-Hernández, M.; Uchiyama, H.; Kiyokawa, K.
MagicCraft: Natural Language-Driven Generation of Dynamic and Interactive 3D Objects for Commercial Metaverse Platforms Journal Article
In: IEEE Access, vol. 13, pp. 132459–132474, 2025, ISSN: 21693536 (ISSN), (Publisher: Institute of Electrical and Electronics Engineers Inc.).
Abstract | Links | BibTeX | Tags: 3D models, 3D object, 3D Object Generation, 3d-modeling, AI-Assisted Design, Artificial intelligence, Behavioral Research, Content creation, Generative AI, Immersive, Metaverse, Metaverses, Natural language processing systems, Natural languages, Object oriented programming, Three dimensional computer graphics, user experience, User interfaces
@article{kurai_magiccraft_2025,
title = {MagicCraft: Natural Language-Driven Generation of Dynamic and Interactive 3D Objects for Commercial Metaverse Platforms},
author = {R. Kurai and T. Hiraki and Y. Hiroi and Y. Hirao and M. Perusquía-Hernández and H. Uchiyama and K. Kiyokawa},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105010187256&doi=10.1109%2FACCESS.2025.3587232&partnerID=40&md5=9b7a8115c62a8f9da4956dbbbb53dc4e},
doi = {10.1109/ACCESS.2025.3587232},
issn = {21693536 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Access},
volume = {13},
pages = {132459–132474},
abstract = {Metaverse platforms are rapidly evolving to provide immersive spaces for user interaction and content creation. However, the generation of dynamic and interactive 3D objects remains challenging due to the need for advanced 3D modeling and programming skills. To address this challenge, we present MagicCraft, a system that generates functional 3D objects from natural language prompts for metaverse platforms. MagicCraft uses generative AI models to manage the entire content creation pipeline: converting user text descriptions into images, transforming images into 3D models, predicting object behavior, and assigning necessary attributes and scripts. It also provides an interactive interface for users to refine generated objects by adjusting features such as orientation, scale, seating positions, and grip points. Implemented on Cluster, a commercial metaverse platform, MagicCraft was evaluated by 7 expert CG designers and 51 general users. Results show that MagicCraft significantly reduces the time and skill required to create 3D objects. Users with no prior experience in 3D modeling or programming successfully created complex, interactive objects and deployed them in the metaverse. Expert feedback highlighted the system's potential to improve content creation workflows and support rapid prototyping. By integrating AI-generated content into metaverse platforms, MagicCraft makes 3D content creation more accessible. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Institute of Electrical and Electronics Engineers Inc.},
keywords = {3D models, 3D object, 3D Object Generation, 3d-modeling, AI-Assisted Design, Artificial intelligence, Behavioral Research, Content creation, Generative AI, Immersive, Metaverse, Metaverses, Natural language processing systems, Natural languages, Object oriented programming, Three dimensional computer graphics, user experience, User interfaces},
pubstate = {published},
tppubtype = {article}
}
Mendoza, A. P.; Quiroga, K. J. Barrios; Celis, S. D. Solano; M., C. G. Quintero
NAIA: A Multi-Technology Virtual Assistant for Boosting Academic Environments—A Case Study Journal Article
In: IEEE Access, vol. 13, pp. 141461–141483, 2025, ISSN: 21693536 (ISSN), (Publisher: Institute of Electrical and Electronics Engineers Inc.).
Abstract | Links | BibTeX | Tags: Academic environment, Artificial intelligence, Case-studies, Computational Linguistics, Computer vision, Digital avatar, Digital avatars, Efficiency, Human computer interaction, Human-AI Interaction, Interactive computer graphics, Language Model, Large language model, large language model (LLM), Learning systems, Natural language processing systems, Personal digital assistants, Personnel training, Population statistics, Speech communication, Speech processing, Speech to text, speech to text (STT), Text to speech, text to speech (TTS), user experience, User interfaces, Virtual assistant, Virtual assistants, Virtual Reality
@article{mendoza_naia_2025,
title = {NAIA: A Multi-Technology Virtual Assistant for Boosting Academic Environments—A Case Study},
author = {A. P. Mendoza and K. J. Barrios Quiroga and S. D. Solano Celis and C. G. Quintero M.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105013598763&doi=10.1109%2FACCESS.2025.3597565&partnerID=40&md5=7ad6b037cfedb943fc026642c4854284},
doi = {10.1109/ACCESS.2025.3597565},
issn = {21693536 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Access},
volume = {13},
pages = {141461–141483},
abstract = {Virtual assistants have become essential tools for improving productivity and efficiency in various domains. This paper presents NAIA (Nimble Artificial Intelligence Assistant), an advanced multi-role and multi-task virtual assistant enhanced with artificial intelligence, designed to serve a university community case study. The system integrates AI technologies including Large Language Models (LLM), Computer Vision, and voice processing to create an immersive and efficient interaction through animated digital avatars. NAIA features five specialized roles: researcher, receptionist, personal skills trainer, personal assistant, and university guide, each equipped with specific capabilities to support different aspects of academic life. The system’s Computer Vision capabilities enable it to comment on users’ physical appearance and environment, enriching the interaction. Through natural language processing and voice interaction, NAIA aims to improve productivity and efficiency within the university environment while providing personalized assistance through a ubiquitous platform accessible across multiple devices. NAIA is evaluated through a user experience survey involving 30 participants with different demographic characteristics, this is the most accepted way by the community to evaluate this type of solution. Participants give their feedback after using one role of NAIA after using it for 30 minutes. The experiment showed that 90% of the participants considered NAIA-assisted tasks of higher quality and, on average, NAIA has a score of 4.27 out of 5 on user satisfaction. Participants particularly appreciated the assistant’s visual recognition, natural conversation flow, and user interaction capabilities. Results demonstrate NAIA’s capabilities and effectiveness across the five roles. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Institute of Electrical and Electronics Engineers Inc.},
keywords = {Academic environment, Artificial intelligence, Case-studies, Computational Linguistics, Computer vision, Digital avatar, Digital avatars, Efficiency, Human computer interaction, Human-AI Interaction, Interactive computer graphics, Language Model, Large language model, large language model (LLM), Learning systems, Natural language processing systems, Personal digital assistants, Personnel training, Population statistics, Speech communication, Speech processing, Speech to text, speech to text (STT), Text to speech, text to speech (TTS), user experience, User interfaces, Virtual assistant, Virtual assistants, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Liu, Y.; Li, Z.
Research on the Design System of Bamboo-Woven Products Based on Traditional Bamboo-Weaving Craft VR Experience Journal Article
In: Forest Products Journal, vol. 75, no. 3, pp. 238–250, 2025, ISSN: 00157473 (ISSN), (Publisher: Forest Products Society).
Abstract | Links | BibTeX | Tags: 3D modeling, Artificial intelligence, Bamboo, Design models, Design systems, evaluation, Experience design, Function evaluation, Human computer interaction, Learn+, Low-costs, Novel techniques, Product design, Product experience, Products, Reliability analysis, Systems analysis, Systems Engineering, Techniques, user experience, User interfaces, Virtual Reality, Virtual reality experiences, Weaving, Weaving technique, Woven products
@article{liu_research_2025,
title = {Research on the Design System of Bamboo-Woven Products Based on Traditional Bamboo-Weaving Craft VR Experience},
author = {Y. Liu and Z. Li},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105013654324&doi=10.13073%2FFPJ-D-25-00005&partnerID=40&md5=d03d78771ee8194ffc8450d259b5f129},
doi = {10.13073/FPJ-D-25-00005},
issn = {00157473 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Forest Products Journal},
volume = {75},
number = {3},
pages = {238–250},
abstract = {Virtual reality (VR) is a simulated experience capable of replicating or creating an entirely new environment. Through VR experience, designers can learn bamboo-weaving techniques at a low cost and showcase their design models of bamboo-woven products virtually, allowing these products to be put into production after experience and evaluation. This study introduces novel techniques to transform and innovate traditional bamboo-woven products to establish a comprehensive VR-based product experience design system. This system follows a pioneering pathway, including the following steps: VR weaving skill experience, generative artificial intelligence design (AIGC)–driven bamboo design creativity, 3D modeling technology support, and VR product evaluation. Moreover, the framework conducts user experience research from three dimensions: visual design, system function design, and human–computer interaction design. Usability assessments and statistical analysis were employed before and after the VR experience to assess the system’s reliability. The findings indicate that designers and users can remarkably use and evaluate the new system, offering a practical technical pathway for the modern design exploration of traditional bamboo products. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Forest Products Society},
keywords = {3D modeling, Artificial intelligence, Bamboo, Design models, Design systems, evaluation, Experience design, Function evaluation, Human computer interaction, Learn+, Low-costs, Novel techniques, Product design, Product experience, Products, Reliability analysis, Systems analysis, Systems Engineering, Techniques, user experience, User interfaces, Virtual Reality, Virtual reality experiences, Weaving, Weaving technique, Woven products},
pubstate = {published},
tppubtype = {article}
}
Saengthongkam, S.; Ali, S.; Chokphantavee, S.; Chokphantavee, S.; Noisri, S.; Vanichchanunt, P.; Butcharoen, S.; Boontevee, S.; Phanomchoeng, G.; Deepaisarn, S.; Wuttisittikulkij, L.
AI-Powered Virtual Assistants in the Metaverse: Leveraging Retrieval-Augmented Generation for Smarter Interactions Proceedings Article
In: Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331522230 (ISBN).
Abstract | Links | BibTeX | Tags: AI, Artificial intelligence, chatbot, Chatbots, Cosine similarity, Intelligent Agents, Load testing, Metaverse, Metaverses, On the spots, Performance, Search engines, Similarity scores, user experience, User query, User support, Users' satisfactions, Virtual assistants, Virtual Reality
@inproceedings{saengthongkam_ai-powered_2025,
title = {AI-Powered Virtual Assistants in the Metaverse: Leveraging Retrieval-Augmented Generation for Smarter Interactions},
author = {S. Saengthongkam and S. Ali and S. Chokphantavee and S. Chokphantavee and S. Noisri and P. Vanichchanunt and S. Butcharoen and S. Boontevee and G. Phanomchoeng and S. Deepaisarn and L. Wuttisittikulkij},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105014379689&doi=10.1109%2FECTI-CON64996.2025.11101141&partnerID=40&md5=3f81fb234377399184ad031c8aa65333},
doi = {10.1109/ECTI-CON64996.2025.11101141},
isbn = {9798331522230 (ISBN)},
year = {2025},
date = {2025-01-01},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The metaverse has evolved at an unprecedented pace, creating new demands for on the spot user support and more engaging digital encounters. This paper describes a chatbot system built for the NT metaverse that combines retrieval-based search with advanced generative AI methods to provide accurate, context-driven responses. At the core of our approach is Retrieval Augmented Generation (RAG), which adeptly interprets diverse user queries while sustaining high performance under concurrent usage, as evidenced by a cosine similarity score of 0.79. In addition to maintaining efficiency during load testing, the system manages compound queries with ease, enhancing user satisfaction in complex virtual environments. Although these results are promising, future upgrades such as integrating voice-based interactions, multilingual support, and adaptive learning could further expand the chatbot's utility. Overall, this study demonstrates the tangible benefits of AI-driven conversational agents in digital realms, laying the groundwork for richer, more intelligent user experiences in emerging metaverse platforms. © 2025 Elsevier B.V., All rights reserved.},
keywords = {AI, Artificial intelligence, chatbot, Chatbots, Cosine similarity, Intelligent Agents, Load testing, Metaverse, Metaverses, On the spots, Performance, Search engines, Similarity scores, user experience, User query, User support, Users' satisfactions, Virtual assistants, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Tovias, E.; Wu, L.
Leveraging Virtual Reality and AI for Enhanced Vocabulary Learning Proceedings Article
In: pp. 308, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331521646 (ISBN).
Abstract | Links | BibTeX | Tags: Avatar, Avatars, E-Learning, Immersive, Interactive computer graphics, Interactive learning, Language Model, Large language model, large language models, Learning experiences, Real time interactions, Text-based methods, user experience, Users' experiences, Virtual environments, Virtual Reality, Vocabulary learning
@inproceedings{tovias_leveraging_2025,
title = {Leveraging Virtual Reality and AI for Enhanced Vocabulary Learning},
author = {E. Tovias and L. Wu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105017563813&doi=10.1109%2FICHMS65439.2025.11154184&partnerID=40&md5=7b79f93d6f8ec222b25a4bfeac408d3a},
doi = {10.1109/ICHMS65439.2025.11154184},
isbn = {9798331521646 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {308},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This study examines the integration of virtual reality (VR) and Artificial Intelligence (AI) to create more immersive, interactive learning experiences. By combining VR's engaging user experience with AI-powered avatars, this research explores how these tools can enhance vocabulary learning compared to traditional text-based methods. Utilizing a Meta Quest 3 headset, Unity for development, and OpenAI's API & ElevenLabs for dynamic dialogues, this system offers personalized, real-time interactions (Fig. 1). The integration of these technologies fosters a bright future, driving significant advancements in the development of highly immersive and effective learning environments. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Avatar, Avatars, E-Learning, Immersive, Interactive computer graphics, Interactive learning, Language Model, Large language model, large language models, Learning experiences, Real time interactions, Text-based methods, user experience, Users' experiences, Virtual environments, Virtual Reality, Vocabulary learning},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, H.; Zha, S.; Cai, J.; Wohn, D. Y.; Carroll, J. M.
Generative AI in Virtual Reality Communities: A Preliminary Analysis of the VRChat Discord Community Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 9798400713958 (ISBN); 9798400713941 (ISBN).
Abstract | Links | BibTeX | Tags: AI assistant, AI Technologies, Coding framework, Ethical technology, Human-ai collaboration, Immersive, On-line communities, online community, Preliminary analysis, Property, Qualitative analysis, user experience, Users' experiences
@inproceedings{zhang_generative_2025,
title = {Generative AI in Virtual Reality Communities: A Preliminary Analysis of the VRChat Discord Community},
author = {H. Zhang and S. Zha and J. Cai and D. Y. Wohn and J. M. Carroll},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005770564&doi=10.1145%2F3706599.3720120&partnerID=40&md5=3eda146676e43237a27192e89c4f74c3},
doi = {10.1145/3706599.3720120},
isbn = {9798400713958 (ISBN); 9798400713941 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {As immersive social platforms like VRChat increasingly adopt generative AI (GenAI) technologies, it becomes critical to understand how community members perceive, negotiate, and utilize these tools. In this preliminary study, we conducted a qualitative analysis of VRChat-related Discord discussions, employing a deductive coding framework to identify key themes related to AI-assisted content creation, intellectual property disputes, and evolving community norms. Our findings offer preliminary insights into the complex interplay between the community’s enthusiasm for AI-driven creativity and deep-rooted ethical and legal concerns. Users weigh issues of fair use, data ethics, intellectual property, and the role of community governance in establishing trust. By highlighting the tensions and trade-offs as users embrace new creative opportunities while seeking transparency, fair attribution, and equitable policies, this research offers valuable insights for designers, platform administrators, and policymakers aiming to foster responsible, inclusive, and ethically sound AI integration in future immersive virtual environments. © 2025 Elsevier B.V., All rights reserved.},
keywords = {AI assistant, AI Technologies, Coding framework, Ethical technology, Human-ai collaboration, Immersive, On-line communities, online community, Preliminary analysis, Property, Qualitative analysis, user experience, Users' experiences},
pubstate = {published},
tppubtype = {inproceedings}
}
Saddik, A. El; Ahmad, J.; Khan, M.; Abouzahir, S.; Gueaieb, W.
Unleashing Creativity in the Metaverse: Generative AI and Multimodal Content Journal Article
In: ACM Transactions on Multimedia Computing, Communications and Applications, vol. 21, no. 7, pp. 1–43, 2025, ISSN: 15516857 (ISSN); 15516865 (ISSN), (Publisher: Association for Computing Machinery).
Abstract | Links | BibTeX | Tags: Adversarial networks, Artificial intelligence, Content generation, Context information, Creatives, Diffusion Model, diffusion models, Generative adversarial networks, Generative AI, Human engineering, Information instructions, Interactive computer graphics, Interactive computer systems, Interactive devices, Interoperability, Metaverse, Metaverses, Multi-modal, multimodal, Simple++, Three dimensional computer graphics, user experience, User interfaces, Virtual Reality
@article{el_saddik_unleashing_2025,
title = {Unleashing Creativity in the Metaverse: Generative AI and Multimodal Content},
author = {A. El Saddik and J. Ahmad and M. Khan and S. Abouzahir and W. Gueaieb},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105011860002&doi=10.1145%2F3713075&partnerID=40&md5=20064843ced240c42e9353d747672cb3},
doi = {10.1145/3713075},
issn = {15516857 (ISSN); 15516865 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {ACM Transactions on Multimedia Computing, Communications and Applications},
volume = {21},
number = {7},
pages = {1–43},
abstract = {The metaverse presents an emerging creative expression and collaboration frontier where generative artificial intelligence (GenAI) can play a pivotal role with its ability to generate multimodal content from simple prompts. These prompts allow the metaverse to interact with GenAI, where context information, instructions, input data, or even output indications constituting the prompt can come from within the metaverse. However, their integration poses challenges regarding interoperability, lack of standards, scalability, and maintaining a high-quality user experience. This article explores how GenAI can productively assist in enhancing creativity within the contexts of the metaverse and unlock new opportunities. We provide a technical, in-depth overview of the different generative models for image, video, audio, and 3D content within the metaverse environments. We also explore the bottlenecks, opportunities, and innovative applications of GenAI from the perspectives of end users, developers, service providers, and AI researchers. This survey commences by highlighting the potential of GenAI for enhancing the metaverse experience through dynamic content generation to populate massive virtual worlds. Subsequently, we shed light on the ongoing research practices and trends in multimodal content generation, enhancing realism and creativity and alleviating bottlenecks related to standardization, computational cost, privacy, and safety. Last, we share insights into promising research directions toward the integration of GenAI with the metaverse for creative enhancement, improved immersion, and innovative interactive applications. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Association for Computing Machinery},
keywords = {Adversarial networks, Artificial intelligence, Content generation, Context information, Creatives, Diffusion Model, diffusion models, Generative adversarial networks, Generative AI, Human engineering, Information instructions, Interactive computer graphics, Interactive computer systems, Interactive devices, Interoperability, Metaverse, Metaverses, Multi-modal, multimodal, Simple++, Three dimensional computer graphics, user experience, User interfaces, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Yu, A.; Lee, G.; Liu, Y.; Zhang, M.; Jung, S.; Park, J.; Rhee, J.; Cho, K.
Development and Evaluation of an Immersive Metaverse-Based Meditation System for Psychological Well-Being Using LLM-Driven Scenario Generation Journal Article
In: Systems, vol. 13, no. 9, 2025, ISSN: 20798954 (ISSN), (Publisher: Multidisciplinary Digital Publishing Institute (MDPI)).
Abstract | Links | BibTeX | Tags: AI in healthcare, Artificial intelligence, Digital mindfulness intervention, feedback, Health care, Health technology, Immersive, Language Model, Mental health, Mental health technology, Metaverse, Metaverses, Mindfulness meditation, Scenarios generation, user experience, Virtual Reality
@article{yu_development_2025,
title = {Development and Evaluation of an Immersive Metaverse-Based Meditation System for Psychological Well-Being Using LLM-Driven Scenario Generation},
author = {A. Yu and G. Lee and Y. Liu and M. Zhang and S. Jung and J. Park and J. Rhee and K. Cho},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105017237114&doi=10.3390%2Fsystems13090798&partnerID=40&md5=553f0ecfd99de729fe0ff9c777c07dda},
doi = {10.3390/systems13090798},
issn = {20798954 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Systems},
volume = {13},
number = {9},
abstract = {The increasing prevalence of mental health disorders highlights the need for innovative and accessible interventions. Although existing digital meditation applications offer valuable basic guidance, they often lack interactivity, real-time personalized feedback, and dynamic simulation of real-life scenarios necessary for comprehensive experiential training applicable to daily stressors. To address these limitations, this study developed a novel immersive meditation system specifically designed for deployment within a metaverse environment. The system provides mindfulness practice through two distinct modules within the virtual world. The experience-based module delivers AI-driven social interactions within simulated everyday scenarios, with narrative content dynamically generated by large language models (LLMs), followed by guided inner reflection, thereby forming a scenario–experience–reflection cycle. The breathing-focused module provides real-time feedback through a breath-synchronization interface to enhance respiratory awareness. The feasibility and preliminary effects of this metaverse-based system were explored in a two-week, single-group, pre-test/post-test study involving 31 participants. The participants completed a battery of validated psychological questionnaires assessing psychological distress, mindfulness, acceptance, self-compassion, and self-esteem before and after engaging in the intervention. This study provides exploratory evidence supporting the feasibility and potential of immersive metaverse environments and LLM-based scenario generation for structured mental health interventions, providing initial insights into their psychological impact and user experience. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Multidisciplinary Digital Publishing Institute (MDPI)},
keywords = {AI in healthcare, Artificial intelligence, Digital mindfulness intervention, feedback, Health care, Health technology, Immersive, Language Model, Mental health, Mental health technology, Metaverse, Metaverses, Mindfulness meditation, Scenarios generation, user experience, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
2024
Haramina, E.; Paladin, M.; Petričušić, Z.; Posarić, F.; Drobnjak, A.; Botički, I.
Learning Algorithms Concepts in a Virtual Reality Escape Room Proceedings Article
In: Babic, S.; Car, Z.; Cicin-Sain, M.; Cisic, D.; Ergovic, P.; Grbac, T. G.; Gradisnik, V.; Gros, S.; Jokic, A.; Jovic, A.; Jurekovic, D.; Katulic, T.; Koricic, M.; Mornar, V.; Petrovic, J.; Skala, K.; Skvorc, D.; Sruk, V.; Svaco, M.; Tijan, E.; Vrcek, N.; Vrdoljak, B. (Ed.): ICT Electron. Conv., MIPRO - Proc., pp. 2057–2062, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 9798350382495 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Computational complexity, Computer generated three dimensional environment, E-Learning, Education, Escape room, Extended reality, generative artificial intelligence, Learn+, Learning, Learning algorithms, Learning systems, Puzzle, puzzles, user experience, User study, User testing, Users' experiences, Virtual Reality
@inproceedings{haramina_learning_2024,
title = {Learning Algorithms Concepts in a Virtual Reality Escape Room},
author = {E. Haramina and M. Paladin and Z. Petričušić and F. Posarić and A. Drobnjak and I. Botički},
editor = {S. Babic and Z. Car and M. Cicin-Sain and D. Cisic and P. Ergovic and T. G. Grbac and V. Gradisnik and S. Gros and A. Jokic and A. Jovic and D. Jurekovic and T. Katulic and M. Koricic and V. Mornar and J. Petrovic and K. Skala and D. Skvorc and V. Sruk and M. Svaco and E. Tijan and N. Vrcek and B. Vrdoljak},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85198221737&doi=10.1109%2FMIPRO60963.2024.10569447&partnerID=40&md5=ee56896e4128fd5a8bef03825469a46f},
doi = {10.1109/MIPRO60963.2024.10569447},
isbn = {9798350382495 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {ICT Electron. Conv., MIPRO - Proc.},
pages = {2057–2062},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Although the standard way to learn algorithms is by coding, learning through games is another way to obtain knowledge while having fun. Virtual reality is a computer-generated three-dimensional environment in which the player is fully immersed by having external stimuli mostly blocked out. In the game presented in this paper, players are enhancing their algorithms skills by playing an escape room game. The goal is to complete the room within the designated time by solving puzzles. The puzzles change for every playthrough with the use of generative artificial intelligence to provide every player with a unique experience. There are multiple types of puzzles such as. time complexity, sorting algorithms, searching algorithms, and code execution. The paper presents the results of a study indicating students' preference for learning through gaming as a method of acquiring algorithms knowledge. © 2024 Elsevier B.V., All rights reserved.},
keywords = {Artificial intelligence, Computational complexity, Computer generated three dimensional environment, E-Learning, Education, Escape room, Extended reality, generative artificial intelligence, Learn+, Learning, Learning algorithms, Learning systems, Puzzle, puzzles, user experience, User study, User testing, Users' experiences, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Bayat, R.; Maio, E.; Fiorenza, J.; Migliorini, M.; Lamberti, F.
Exploring Methodologies to Create a Unified VR User-Experience in the Field of Virtual Museum Experiences Proceedings Article
In: IEEE Gaming, Entertain., Media Conf., GEM, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 9798350374537 (ISBN).
Abstract | Links | BibTeX | Tags: Cultural heritages, Meta-museum, Meta-museums, Metaverse, Metaverses, Research frontiers, Research opportunities, user experience, User experience design, User interfaces, User-Experience Design, Users' experiences, Virtual avatar, Virtual machine, Virtual museum, Virtual Reality, Virtual reality experiences
@inproceedings{bayat_exploring_2024,
title = {Exploring Methodologies to Create a Unified VR User-Experience in the Field of Virtual Museum Experiences},
author = {R. Bayat and E. Maio and J. Fiorenza and M. Migliorini and F. Lamberti},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199517817&doi=10.1109%2FGEM61861.2024.10585452&partnerID=40&md5=ced2ae6561acc66c71806ccf609ac9d1},
doi = {10.1109/GEM61861.2024.10585452},
isbn = {9798350374537 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE Gaming, Entertain., Media Conf., GEM},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The emergence of Virtual Reality (VR) and meta-verse have opened doors to new research opportunities and frontiers in User Experience (UX). Within the cultural heritage domain, one of the key concepts is that of the Virtual Museums (VMs), whose definition has been extended through time by many research works and applications. However, most of the studies performed so far focused on only one application, and studied its UX without taking into account the experience with other VR experiences possibly available in the VM. The purpose of this work is to give a contribution for an optimal design to create a unified UX across multiple VR experiences. More specifically, the research included the development of two applications, respectively a VM in a metaverse platform and a virtual learning workshop as an individual application. With this premise, the study will also consider two fundamental elements for an effective UX design: a Virtual Environment (VE) and an Intelligent Virtual Avatar (IVA). In particular, the latest was developed following current trends about generative AI, integrating an IVA powered by a Large Language Model (LLM). © 2024 Elsevier B.V., All rights reserved.},
keywords = {Cultural heritages, Meta-museum, Meta-museums, Metaverse, Metaverses, Research frontiers, Research opportunities, user experience, User experience design, User interfaces, User-Experience Design, Users' experiences, Virtual avatar, Virtual machine, Virtual museum, Virtual Reality, Virtual reality experiences},
pubstate = {published},
tppubtype = {inproceedings}
}
Min, Y.; Jeong, J. -W.
Public Speaking Q&A Practice with LLM-Generated Personas in Virtual Reality Proceedings Article
In: Eck, U.; Sra, M.; Stefanucci, J.; Sugimoto, M.; Tatzgern, M.; Williams, I. (Ed.): Proc. - IEEE Int. Symp. Mixed Augment. Real. Adjunct, ISMAR-Adjunct, pp. 493–496, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 9798331506919 (ISBN).
Abstract | Links | BibTeX | Tags: Digital elevation model, Economic and social effects, Language Model, Large language model-based persona generation, LLM-based Persona Generation, Model-based OPC, Personnel training, Power, Practice systems, Presentation Anxiety, Public speaking, Q&A practice, user experience, Users' experiences, Virtual environments, Virtual Reality, VR training
@inproceedings{min_public_2024,
title = {Public Speaking Q&A Practice with LLM-Generated Personas in Virtual Reality},
author = {Y. Min and J. -W. Jeong},
editor = {U. Eck and M. Sra and J. Stefanucci and M. Sugimoto and M. Tatzgern and I. Williams},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85214393734&doi=10.1109%2FISMAR-Adjunct64951.2024.00143&partnerID=40&md5=62583f87d8d870d6e44a13fe311a761d},
doi = {10.1109/ISMAR-Adjunct64951.2024.00143},
isbn = {9798331506919 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Symp. Mixed Augment. Real. Adjunct, ISMAR-Adjunct},
pages = {493–496},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This paper introduces a novel VR-based Q&A practice system that harnesses the power of Large Language Models (LLMs). We support Q&A practice for upcoming public speaking by providing an immersive VR training environment populated with LLM-generated audiences, each capable of posing diverse and realistic questions based on different personas. We conducted a pilot user study involving 20 participants who engaged in VR-based Q&A practice sessions. The sessions featured a variety of questions regarding presentation material provided by the participants, all of which were generated by LLM-based personas. Through post-surveys and interviews, we evaluated the effectiveness of the proposed method. The participants valued the system for engagement and focus while also identifying several areas for improvement. Our study demonstrated the potential of integrating VR and LLMs to create a powerful, immersive tool for Q&A practice. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Digital elevation model, Economic and social effects, Language Model, Large language model-based persona generation, LLM-based Persona Generation, Model-based OPC, Personnel training, Power, Practice systems, Presentation Anxiety, Public speaking, Q&A practice, user experience, Users' experiences, Virtual environments, Virtual Reality, VR training},
pubstate = {published},
tppubtype = {inproceedings}
}
Tang, Y.; Situ, J.; Huang, Y.
Beyond User Experience: Technical and Contextual Metrics for Large Language Models in Extended Reality Proceedings Article
In: UbiComp Companion - Companion ACM Int. Jt. Conf. Pervasive Ubiquitous Comput., pp. 640–643, Association for Computing Machinery, Inc, 2024, ISBN: 9798400710582 (ISBN).
Abstract | Links | BibTeX | Tags: Augmented Reality, Computer simulation languages, Evaluation Metrics, Extended reality, Language Model, Large language model, large language models, Mixed reality, Modeling performance, Natural language processing systems, Physical world, Spatial computing, spatial data, user experience, Users' experiences, Virtual environments, Virtual Reality
@inproceedings{tang_beyond_2024,
title = {Beyond User Experience: Technical and Contextual Metrics for Large Language Models in Extended Reality},
author = {Y. Tang and J. Situ and Y. Huang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85206203437&doi=10.1145%2F3675094.3678995&partnerID=40&md5=335c61d650590b084fed80992b7b0072},
doi = {10.1145/3675094.3678995},
isbn = {9798400710582 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {UbiComp Companion - Companion ACM Int. Jt. Conf. Pervasive Ubiquitous Comput.},
pages = {640–643},
publisher = {Association for Computing Machinery, Inc},
abstract = {Spatial Computing involves interacting with the physical world through spatial data manipulation, closely linked with Extended Reality (XR), which includes Virtual Reality (VR), Augmented Reality (AR), and Mixed Reality (MR). Large Language Models (LLMs) significantly enhance XR applications by improving user interactions through natural language understanding and content generation. Typical evaluations of these applications focus on user experience (UX) metrics, such as task performance, user satisfaction, and psychological assessments, but often neglect the technical performance of the LLMs themselves. This paper identifies significant gaps in current evaluation practices for LLMs within XR environments, attributing them to the novelty of the field, the complexity of spatial contexts, and the multimodal nature of interactions in XR. To address these gaps, the paper proposes specific metrics tailored to evaluate LLM performance in XR contexts, including spatial contextual awareness, coherence, proactivity, multimodal integration, hallucination, and question-answering accuracy. These proposed metrics aim to complement existing UX evaluations, providing a comprehensive assessment framework that captures both the technical and user-centric aspects of LLM performance in XR applications. The conclusion underscores the necessity for a dual-focused approach that combines technical and UX metrics to ensure effective and user-friendly LLM-integrated XR systems. © 2024 Elsevier B.V., All rights reserved.},
keywords = {Augmented Reality, Computer simulation languages, Evaluation Metrics, Extended reality, Language Model, Large language model, large language models, Mixed reality, Modeling performance, Natural language processing systems, Physical world, Spatial computing, spatial data, user experience, Users' experiences, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}