AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Lv, J.; Slowik, A.; Rani, S.; Kim, B. -G.; Chen, C. -M.; Kumari, S.; Li, K.; Lyu, X.; Jiang, H.
In: Research, vol. 8, 2025, ISSN: 20965168 (ISSN).
Abstract | Links | BibTeX | Tags: Adaptive fusion, Collaborative representations, Diagnosis, Electronic health record, Generative adversarial networks, Health care application, Healthcare environments, Immersive, Learning frameworks, Metaverses, Multi-modal, Multi-modal learning, Performance
@article{lv_multimodal_2025,
title = {Multimodal Metaverse Healthcare: A Collaborative Representation and Adaptive Fusion Approach for Generative Artificial-Intelligence-Driven Diagnosis},
author = {J. Lv and A. Slowik and S. Rani and B. -G. Kim and C. -M. Chen and S. Kumari and K. Li and X. Lyu and H. Jiang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-86000613924&doi=10.34133%2fresearch.0616&partnerID=40&md5=fdc8ae3b29db905105dada9a5657b54b},
doi = {10.34133/research.0616},
issn = {20965168 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Research},
volume = {8},
abstract = {The metaverse enables immersive virtual healthcare environments, presenting opportunities for enhanced care delivery. A key challenge lies in effectively combining multimodal healthcare data and generative artificial intelligence abilities within metaverse-based healthcare applications, which is a problem that needs to be addressed. This paper proposes a novel multimodal learning framework for metaverse healthcare, MMLMH, based on collaborative intra- and intersample representation and adaptive fusion. Our framework introduces a collaborative representation learning approach that captures shared and modality-specific features across text, audio, and visual health data. By combining modality-specific and shared encoders with carefully formulated intrasample and intersample collaboration mechanisms, MMLMH achieves superior feature representation for complex health assessments. The framework’s adaptive fusion approach, utilizing attention mechanisms and gated neural networks, demonstrates robust performance across varying noise levels and data quality conditions. Experiments on metaverse healthcare datasets demonstrate MMLMH’s superior performance over baseline methods across multiple evaluation metrics. Longitudinal studies and visualization further illustrate MMLMH’s adaptability to evolving virtual environments and balanced performance across diagnostic accuracy, patient–system interaction efficacy, and data integration complexity. The proposed framework has a unique advantage in that a similar level of performance is maintained across various patient populations and virtual avatars, which could lead to greater personalization of healthcare experiences in the metaverse. MMLMH’s successful functioning in such complicated circumstances suggests that it can combine and process information streams from several sources. They can be successfully utilized in next-generation healthcare delivery through virtual reality. © 2025 Jianhui Lv et al.},
keywords = {Adaptive fusion, Collaborative representations, Diagnosis, Electronic health record, Generative adversarial networks, Health care application, Healthcare environments, Immersive, Learning frameworks, Metaverses, Multi-modal, Multi-modal learning, Performance},
pubstate = {published},
tppubtype = {article}
}
Kurai, R.; Hiraki, T.; Hiroi, Y.; Hirao, Y.; Perusquia-Hernandez, M.; Uchiyama, H.; Kiyokawa, K.
An implementation of MagicCraft: Generating Interactive 3D Objects and Their Behaviors from Text for Commercial Metaverse Platforms Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 1284–1285, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833151484-6 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, 3D models, 3D object, 3D Object Generation, 3d-modeling, AI-Assisted Design, Generative AI, Immersive, Metaverse, Metaverses, Model skill, Object oriented programming, Programming skills
@inproceedings{kurai_implementation_2025,
title = {An implementation of MagicCraft: Generating Interactive 3D Objects and Their Behaviors from Text for Commercial Metaverse Platforms},
author = {R. Kurai and T. Hiraki and Y. Hiroi and Y. Hirao and M. Perusquia-Hernandez and H. Uchiyama and K. Kiyokawa},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005153642&doi=10.1109%2fVRW66409.2025.00288&partnerID=40&md5=53fa1ac92c3210f0ffa090ffa1af7e6e},
doi = {10.1109/VRW66409.2025.00288},
isbn = {979-833151484-6 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {1284–1285},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Metaverse platforms are rapidly evolving to provide immersive spaces. However, the generation of dynamic and interactive 3D objects remains a challenge due to the need for advanced 3D modeling and programming skills. We present MagicCraft, a system that generates functional 3D objects from natural language prompts. MagicCraft uses generative AI models to manage the entire content creation pipeline: converting user text descriptions into images, transforming images into 3D models, predicting object behavior, and assigning necessary attributes and scripts. It also provides an interactive interface for users to refine generated objects by adjusting features like orientation, scale, seating positions, and grip points. © 2025 IEEE.},
keywords = {3D modeling, 3D models, 3D object, 3D Object Generation, 3d-modeling, AI-Assisted Design, Generative AI, Immersive, Metaverse, Metaverses, Model skill, Object oriented programming, Programming skills},
pubstate = {published},
tppubtype = {inproceedings}
}
Grubert, J.; Schmalstieg, D.; Dickhaut, K.
Towards Supporting Literary Studies Using Virtual Reality and Generative Artificial Intelligence Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 147–149, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833151484-6 (ISBN).
Abstract | Links | BibTeX | Tags: Cultural-historical, generative artificial intelligence, Immersive, literary studies, Literary study, Literary texts, Literature analysis, Textual-analysis, Virtual Reality, Visual elements
@inproceedings{grubert_towards_2025,
title = {Towards Supporting Literary Studies Using Virtual Reality and Generative Artificial Intelligence},
author = {J. Grubert and D. Schmalstieg and K. Dickhaut},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005144426&doi=10.1109%2fVRW66409.2025.00037&partnerID=40&md5=0315225b4c49f5f87bca94f82f41281c},
doi = {10.1109/VRW66409.2025.00037},
isbn = {979-833151484-6 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {147–149},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Literary studies critically examine fictional texts, exploring their structures, themes, stylistic features, and cultural-historical contexts. A central challenge in this field lies in bridging textual analysis with the spatial and sensory dimensions of settings described or implied in texts. Traditional methodologies often require scholars to mentally reconstruct these environments, leading to incomplete or inconsistent interpretations. Readers may be biased by their personal context or experiences, or may lack detailed knowledge of the relevant historical facts. This paper argues for the integration of virtual reality and generative artificial intelligence as supporting instruments to enhance literary research. The former enables immersive, spatially accurate reconstructions of historical environments, while the latter provides tools such as text-to-image and text-to-3D generation which let us dynamically render visual elements quoted in literary texts. Together, these technologies have the potential to significantly enhance traditional literature analysis methodologies, enabling novel approaches for contextualizing and analyzing literature in its spatial and cultural milieu. © 2025 IEEE.},
keywords = {Cultural-historical, generative artificial intelligence, Immersive, literary studies, Literary study, Literary texts, Literature analysis, Textual-analysis, Virtual Reality, Visual elements},
pubstate = {published},
tppubtype = {inproceedings}
}
Chang, K. -Y.; Lee, C. -F.
Enhancing Virtual Restorative Environment with Generative AI: Personalized Immersive Stress-Relief Experiences Proceedings Article
In: V.G., Duffy (Ed.): Lect. Notes Comput. Sci., pp. 132–144, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303193501-5 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence generated content, Artificial Intelligence Generated Content (AIGC), Electroencephalography, Electroencephalography (EEG), Generative AI, Immersive, Immersive environment, Mental health, Physical limitations, Restorative environment, Stress relief, Virtual reality exposure therapies, Virtual reality exposure therapy, Virtual Reality Exposure Therapy (VRET), Virtualization
@inproceedings{chang_enhancing_2025,
title = {Enhancing Virtual Restorative Environment with Generative AI: Personalized Immersive Stress-Relief Experiences},
author = {K. -Y. Chang and C. -F. Lee},
editor = {Duffy V.G.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007759157&doi=10.1007%2f978-3-031-93502-2_9&partnerID=40&md5=ee620a5da9b65e90ccb1eaa75ec8b724},
doi = {10.1007/978-3-031-93502-2_9},
isbn = {03029743 (ISSN); 978-303193501-5 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15791 LNCS},
pages = {132–144},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {In today’s fast-paced world, stress and mental health challenges are becoming more common. Restorative environments help people relax and recover emotionally, and Virtual Reality Exposure Therapy (VRET) offers a way to experience these benefits beyond physical limitations. However, most VRET applications rely on pre-designed content, limiting their adaptability to individual needs. This study explores how Generative AI can enhance VRET by creating personalized, immersive environments that better match users’ preferences and improve relaxation. To evaluate the impact of AI-generated restorative environments, we combined EEG measurements with user interviews. Thirty university students participated in the study, experiencing two different modes: static mode and walking mode. The EEG results showed an increase in Theta (θ) and High Beta (β) brain waves, suggesting a state of deep immersion accompanied by heightened cognitive engagement and mental effort. While participants found the experience enjoyable and engaging, the AI-generated environments tended to create excitement and focus rather than conventional relaxation. These findings suggest that for AI-generated environments in VRET to be more effective for stress relief, future designs should reduce cognitive load while maintaining immersion. This study provides insights into how AI can enhance relaxation experiences and introduces a new perspective on personalized digital stress-relief solutions. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Artificial intelligence generated content, Artificial Intelligence Generated Content (AIGC), Electroencephalography, Electroencephalography (EEG), Generative AI, Immersive, Immersive environment, Mental health, Physical limitations, Restorative environment, Stress relief, Virtual reality exposure therapies, Virtual reality exposure therapy, Virtual Reality Exposure Therapy (VRET), Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Tortora, A.; Amaro, I.; Greca, A. Della; Barra, P.
Exploring the Role of Generative Artificial Intelligence in Virtual Reality: Opportunities and Future Perspectives Proceedings Article
In: J.Y.C., Chen; G., Fragomeni (Ed.): Lect. Notes Comput. Sci., pp. 125–142, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303193699-9 (ISBN).
Abstract | Links | BibTeX | Tags: Ethical technology, Future perspectives, Generative AI, Image modeling, Immersive, immersive experience, Immersive Experiences, Information Management, Language Model, Personnel training, Professional training, Real- time, Sensitive data, Training design, Users' experiences, Virtual Reality
@inproceedings{tortora_exploring_2025,
title = {Exploring the Role of Generative Artificial Intelligence in Virtual Reality: Opportunities and Future Perspectives},
author = {A. Tortora and I. Amaro and A. Della Greca and P. Barra},
editor = {Chen J.Y.C. and Fragomeni G.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007788684&doi=10.1007%2f978-3-031-93700-2_9&partnerID=40&md5=7b69183bbf8172f9595f939254fb6831},
doi = {10.1007/978-3-031-93700-2_9},
isbn = {03029743 (ISSN); 978-303193699-9 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15788 LNCS},
pages = {125–142},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {In recent years, generative AI, such as language and image models, have started to revolutionize virtual reality (VR) by offering new opportunities for immersive and personalized interaction. This paper explores the potential of these Intelligent Augmentation technologies in the context of VR, analyzing how the generation of text and images in real time can enhance the user experience through dynamic and personalized environments and contents. The integration of generative AI in VR scenarios holds promise in multiple fields, including education, professional training, design, and healthcare. However, their implementation involves significant challenges, such as privacy management, data security, and ethical issues related to cognitive manipulation and representation of reality. Through an overview of current applications and future prospects, this paper highlights the crucial role of generative AI in enhancing VR, helping to outline a path for the ethical and sustainable development of these immersive technologies. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Ethical technology, Future perspectives, Generative AI, Image modeling, Immersive, immersive experience, Immersive Experiences, Information Management, Language Model, Personnel training, Professional training, Real- time, Sensitive data, Training design, Users' experiences, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Y.; Pang, E. C. H.; Ng, C. S. Y.; Azim, M.; Leung, H.
Enhancing Linear Algebra Education with AI-Generated Content in the CityU Metaverse: A Comparative Study Proceedings Article
In: T., Hao; J.G., Wu; X., Luo; Y., Sun; Y., Mu; S., Ge; W., Xie (Ed.): Lect. Notes Comput. Sci., pp. 3–16, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-981964406-3 (ISBN).
Abstract | Links | BibTeX | Tags: Comparatives studies, Digital age, Digital interactions, digital twin, Educational metaverse, Engineering education, Generative AI, Immersive, Matrix algebra, Metaverse, Metaverses, Personnel training, Students, Teaching, University campus, Virtual environments, virtual learning environment, Virtual learning environments, Virtual Reality, Virtualization
@inproceedings{li_enhancing_2025,
title = {Enhancing Linear Algebra Education with AI-Generated Content in the CityU Metaverse: A Comparative Study},
author = {Y. Li and E. C. H. Pang and C. S. Y. Ng and M. Azim and H. Leung},
editor = {Hao T. and Wu J.G. and Luo X. and Sun Y. and Mu Y. and Ge S. and Xie W.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003632691&doi=10.1007%2f978-981-96-4407-0_1&partnerID=40&md5=c067ba5d4c15e9c0353bf315680531fc},
doi = {10.1007/978-981-96-4407-0_1},
isbn = {03029743 (ISSN); 978-981964406-3 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15589 LNCS},
pages = {3–16},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {In today’s digital age, the metaverse is emerging as the forthcoming evolution of the internet. It provides an immersive space that marks a new frontier in the way digital interactions are facilitated and experienced. In this paper, we present the CityU Metaverse, which aims to construct a digital twin of our university campus. It is designed as an educational virtual world where learning applications can be embedded in this virtual campus, supporting not only remote and collaborative learning but also professional technical training to enhance educational experiences through immersive and interactive learning. To evaluate the effectiveness of this educational metaverse, we conducted an experiment focused on 3D linear transformation in linear algebra, with teaching content generated by generative AI, comparing our metaverse system with traditional teaching methods. Knowledge tests and surveys assessing learning interest revealed that students engaged with the CityU Metaverse, facilitated by AI-generated content, outperformed those in traditional settings and reported greater enjoyment during the learning process. The work provides valuable perspectives on the behaviors and interactions within the metaverse by analyzing user preferences and learning outcomes. © The Author(s), under exclusive license to Springer Nature Singapore Pte Ltd. 2025.},
keywords = {Comparatives studies, Digital age, Digital interactions, digital twin, Educational metaverse, Engineering education, Generative AI, Immersive, Matrix algebra, Metaverse, Metaverses, Personnel training, Students, Teaching, University campus, Virtual environments, virtual learning environment, Virtual learning environments, Virtual Reality, Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, L.; Hannah, S.; Mitchell, K.
HoloJig: Interactive Spoken Prompt Specified Generative AI Environments Journal Article
In: IEEE Computer Graphics and Applications, vol. 45, no. 2, pp. 69–77, 2025, ISSN: 02721716 (ISSN).
Abstract | Links | BibTeX | Tags: 3-D rendering, Article, Collaborative workspace, customer experience, Economic and social effects, generative artificial intelligence, human, Immersive, Immersive environment, parallax, Real- time, simulation, Simulation training, speech, Time based, Virtual environments, Virtual Reality, Virtual reality experiences, Virtual spaces, VR systems
@article{casas_holojig_2025,
title = {HoloJig: Interactive Spoken Prompt Specified Generative AI Environments},
author = {L. Casas and S. Hannah and K. Mitchell},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001182100&doi=10.1109%2fMCG.2025.3553780&partnerID=40&md5=ec5dc44023314b6f9221169357d81dcd},
doi = {10.1109/MCG.2025.3553780},
issn = {02721716 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Computer Graphics and Applications},
volume = {45},
number = {2},
pages = {69–77},
abstract = {HoloJig offers an interactive, speech-to-virtual reality (VR), VR experience that generates diverse environments in real time based on live spoken descriptions. Unlike traditional VR systems that rely on prebuilt assets, HoloJig dynamically creates personalized and immersive virtual spaces with depth-based parallax 3-D rendering, allowing users to define the characteristics of their immersive environment through verbal prompts. This generative approach opens up new possibilities for interactive experiences, including simulations, training, collaborative workspaces, and entertainment. In addition to speech-to-VR environment generation, a key innovation of HoloJig is its progressive visual transition mechanism, which smoothly dissolves between previously generated and newly requested environments, mitigating the delay caused by neural computations. This feature ensures a seamless and continuous user experience, even as new scenes are being rendered on remote servers. © 1981-2012 IEEE.},
keywords = {3-D rendering, Article, Collaborative workspace, customer experience, Economic and social effects, generative artificial intelligence, human, Immersive, Immersive environment, parallax, Real- time, simulation, Simulation training, speech, Time based, Virtual environments, Virtual Reality, Virtual reality experiences, Virtual spaces, VR systems},
pubstate = {published},
tppubtype = {article}
}
Bosser, A. -G.; Cascarano, P.; Lacoche, J.; Hajahmadi, S.; Stanescu, A.; Sörös, G.
Preface to the First Workshop on GenAI-XR: Generative Artificial Intelligence meets Extended Reality Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 129–130, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833151484-6 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive environment, Adaptive Environments, Art educations, Artificial Intelligence and Extended Reality Integration, Context-aware systems, Entertainment industry, Extended reality, Immersive, Indexterm: generative artificial intelligence, IndexTerms: Generative Artificial Intelligence, Industry professionals, Innovative method, Personalized interaction, Personalized Interactions
@inproceedings{bosser_preface_2025,
title = {Preface to the First Workshop on GenAI-XR: Generative Artificial Intelligence meets Extended Reality},
author = {A. -G. Bosser and P. Cascarano and J. Lacoche and S. Hajahmadi and A. Stanescu and G. Sörös},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005161220&doi=10.1109%2fVRW66409.2025.00033&partnerID=40&md5=41f0fb56b31c0beb94368c3379e5d75a},
doi = {10.1109/VRW66409.2025.00033},
isbn = {979-833151484-6 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {129–130},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The GenAI-XR workshop aims to explore the intersection of Generative Artificial Intelligence (GenAI) and Extended Reality (XR), examining their combined potential to revolutionize various sectors including entertainment, arts, education, factory work, healthcare, architecture, and others. The workshop will provide a platform for researchers, industry professionals, and practitioners to discuss innovative methods of integrating GenAI into XR environments, enhancing immersive experiences, and personalizing interactions in real time. Through presentation and discussion sessions, participants will gain insights into the latest developments, challenges, and future directions at the intersection of GenAI and XR. © 2025 IEEE.},
keywords = {Adaptive environment, Adaptive Environments, Art educations, Artificial Intelligence and Extended Reality Integration, Context-aware systems, Entertainment industry, Extended reality, Immersive, Indexterm: generative artificial intelligence, IndexTerms: Generative Artificial Intelligence, Industry professionals, Innovative method, Personalized interaction, Personalized Interactions},
pubstate = {published},
tppubtype = {inproceedings}
}
Otsuka, T.; Li, D.; Siriaraya, P.; Nakajima, S.
Development of A Relaxation Support System Utilizing Stereophonic AR Proceedings Article
In: Int. Conf. Comput., Netw. Commun., ICNC, pp. 463–467, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833152096-0 (ISBN).
Abstract | Links | BibTeX | Tags: Augmented Reality, Environmental sounds, Generative AI, Immersive, Mental Well-being, Soundscapes, Spatial Audio, Stereo image processing, Support method, Support systems, Well being
@inproceedings{otsuka_development_2025,
title = {Development of A Relaxation Support System Utilizing Stereophonic AR},
author = {T. Otsuka and D. Li and P. Siriaraya and S. Nakajima},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105006602014&doi=10.1109%2fICNC64010.2025.10993739&partnerID=40&md5=abdaca1aefc88381072c1e8090697638},
doi = {10.1109/ICNC64010.2025.10993739},
isbn = {979-833152096-0 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Int. Conf. Comput., Netw. Commun., ICNC},
pages = {463–467},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Given the high prevalence of stress and anxiety in today's society, there is an urgent need to explore effective methods to help people manage stress. This research aims to develop a relaxation support system using stereophonic augmented reality (AR), designed to help alleviate stress by recreating relaxing environments with immersive stereo soundscapes, including stories created from generative AI and environmental sounds while users are going for a walk. This paper presents a preliminary evaluation of the effectiveness of the proposed relaxation support method. © 2025 IEEE.},
keywords = {Augmented Reality, Environmental sounds, Generative AI, Immersive, Mental Well-being, Soundscapes, Spatial Audio, Stereo image processing, Support method, Support systems, Well being},
pubstate = {published},
tppubtype = {inproceedings}
}
Shen, Y.; Li, B.; Huang, J.; Wang, Z.
GaussianShopVR: Facilitating Immersive 3D Authoring Using Gaussian Splatting in VR Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 1292–1293, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833151484-6 (ISBN).
Abstract | Links | BibTeX | Tags: 3D authoring, 3D modeling, Digital replicas, Gaussian distribution, Gaussian Splatting editing, Gaussians, Graphical user interfaces, High quality, Immersive, Immersive environment, Interactive computer graphics, Rendering (computer graphics), Rendering pipelines, Splatting, Three dimensional computer graphics, User profile, Virtual Reality, Virtual reality user interface, Virtualization, VR user interface
@inproceedings{shen_gaussianshopvr_2025,
title = {GaussianShopVR: Facilitating Immersive 3D Authoring Using Gaussian Splatting in VR},
author = {Y. Shen and B. Li and J. Huang and Z. Wang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005138672&doi=10.1109%2fVRW66409.2025.00292&partnerID=40&md5=9b644bd19394a289d3027ab9a2dfed6a},
doi = {10.1109/VRW66409.2025.00292},
isbn = {979-833151484-6 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {1292–1293},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual reality (VR) applications require massive high-quality 3D assets to create immersive environments. Generating mesh-based 3D assets typically involves a significant amount of manpower and effort, which makes VR applications less accessible. 3D Gaussian Splatting (3DGS) has attracted much attention for its ability to quickly create digital replicas of real-life scenes and its compatibility with traditional rendering pipelines. However, it remains a challenge to edit 3DGS in a flexible and controllable manner. We propose GaussianShopVR, a system that leverages VR user interfaces to specify target areas to achieve flexible and controllable editing of reconstructed 3DGS. In addition, selected areas can provide 3D information to generative AI models to facilitate the editing. GaussianShopVR integrates object hierarchy management while keeping the backpropagated gradient flow to allow local editing with context information. © 2025 IEEE.},
keywords = {3D authoring, 3D modeling, Digital replicas, Gaussian distribution, Gaussian Splatting editing, Gaussians, Graphical user interfaces, High quality, Immersive, Immersive environment, Interactive computer graphics, Rendering (computer graphics), Rendering pipelines, Splatting, Three dimensional computer graphics, User profile, Virtual Reality, Virtual reality user interface, Virtualization, VR user interface},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, J.; Neshaei, S. P.; Müller, L.; Rietsche, R.; Davis, R. L.; Wambsganss, T.
SpatiaLearn: Exploring XR Learning Environments for Reflective Writing Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 979-840071395-8 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive Education, Conversational Agents, Conversational Tutoring, Critical thinking, Extended reality (XR), Immersive, Learning Environments, Metacognitive awareness, Reflective writing, Spatial computing
@inproceedings{li_spatialearn_2025,
title = {SpatiaLearn: Exploring XR Learning Environments for Reflective Writing},
author = {J. Li and S. P. Neshaei and L. Müller and R. Rietsche and R. L. Davis and T. Wambsganss},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005757843&doi=10.1145%2f3706599.3719742&partnerID=40&md5=6e9ce83d3508cb377e209edd6884c505},
doi = {10.1145/3706599.3719742},
isbn = {979-840071395-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Reflective writing promotes deeper learning by enhancing metacognitive awareness and critical thinking, but learners often struggle with structuring their reflections and maintaining focus. Generative AI and advances in spatial computing offer promising solutions. Extended reality (XR) environments create immersive, distraction-free settings, while conversational agents use dialog-based scaffolding guides to structure learners’ thoughts. However, research on combining dialog-based scaffolding with XR for reflective writing remains limited. To address this, we introduce SpatiaLearn, an adaptive XR tool that enhances reflective writing through conversational guidance in both traditional and immersive environments. A within-subjects study (N = 19) compared participants’ performance in traditional laptop and XR environments. Qualitative analysis shows the spatial interface enhances engagement but raises challenges like unfamiliar interactions and health concerns, requiring task adaptation for XR. This study advances the design of immersive tools for reflective writing, highlighting both the opportunities and challenges of spatial interfaces. © 2025 Copyright held by the owner/author(s).},
keywords = {Adaptive Education, Conversational Agents, Conversational Tutoring, Critical thinking, Extended reality (XR), Immersive, Learning Environments, Metacognitive awareness, Reflective writing, Spatial computing},
pubstate = {published},
tppubtype = {inproceedings}
}
Timmerman, K.; Mertens, R.; Yoncalik, A.; Spriet, L.
Cities Unseen: Experiencing the Imagined Proceedings Article
In: Proc. Int. Conf. Tangible, Embed., Embodied Interact., TEI, Association for Computing Machinery, Inc, 2025, ISBN: 979-840071197-8 (ISBN).
Abstract | Links | BibTeX | Tags: Art installation, Embodiment, Immersive, Immersive Storytelling, Multiplayers, Presence, Real time interactions, Sensory Involvement, Through the lens, Urban environments, Virtual environments, Virtual Reality
@inproceedings{timmerman_cities_2025,
title = {Cities Unseen: Experiencing the Imagined},
author = {K. Timmerman and R. Mertens and A. Yoncalik and L. Spriet},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000440301&doi=10.1145%2f3689050.3707685&partnerID=40&md5=6aa0968146eab8f91fba6eaeb30f7f9c},
doi = {10.1145/3689050.3707685},
isbn = {979-840071197-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. Int. Conf. Tangible, Embed., Embodied Interact., TEI},
publisher = {Association for Computing Machinery, Inc},
abstract = {"Cities Unseen: Experiencing the Imagined" is an art installation that reinterprets Italo Calvino's "Invisible Cities" through the lens of virtual reality. The project employs a physical suitcase as a portal, allowing participants to enter and explore virtual urban environments using the Oculus Quest 3. The cityscapes will be developed with generative AI, converting Calvino's descriptions into prompts, creating an immersive space for philosophical reflection on the nature of travel and the boundaries between reality and imagination. By integrating Unity's shared spatial anchors and advanced multiplayer features, "Cities Unseen" supports real-time interaction among participants, emphasizing the social and collaborative dimensions of virtual travel. © 2025 Copyright held by the owner/author(s).},
keywords = {Art installation, Embodiment, Immersive, Immersive Storytelling, Multiplayers, Presence, Real time interactions, Sensory Involvement, Through the lens, Urban environments, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Sousa, R. T.; Oliveira, E. A. M.; Cintra, L. M. F.; Filho, A. R. G.
Transformative Technologies for Rehabilitation: Leveraging Immersive and AI-Driven Solutions to Reduce Recidivism and Promote Decent Work Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 168–171, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833151484-6 (ISBN).
Abstract | Links | BibTeX | Tags: AI- Driven Rehabilitation, Artificial intelligence- driven rehabilitation, Emotional intelligence, Engineering education, Generative AI, generative artificial intelligence, Immersive, Immersive technologies, Immersive Technology, Language Model, Large language model, large language models, Skills development, Social Reintegration, Social skills, Sociology, Vocational training
@inproceedings{sousa_transformative_2025,
title = {Transformative Technologies for Rehabilitation: Leveraging Immersive and AI-Driven Solutions to Reduce Recidivism and Promote Decent Work},
author = {R. T. Sousa and E. A. M. Oliveira and L. M. F. Cintra and A. R. G. Filho},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005140551&doi=10.1109%2fVRW66409.2025.00042&partnerID=40&md5=89da6954863a272d48c0d8da3760bfb6},
doi = {10.1109/VRW66409.2025.00042},
isbn = {979-833151484-6 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {168–171},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The reintegration of incarcerated individuals into society presents significant challenges, particularly in addressing barriers related to vocational training, social skill development, and emotional rehabilitation. Immersive technologies, such as Virtual Reality and Augmented Reality, combined with generative Artificial Intelligence (AI) and Large Language Models, offer innovative opportunities to enhance these areas. These technologies create practical, controlled environments for skill acquisition and behavioral training, while generative AI enables dynamic, personalized, and adaptive experiences. This paper explores the broader potential of these integrated technologies in supporting rehabilitation, reducing recidivism, and fostering sustainable employment opportunities and these initiatives align with the overarching equity objective of ensuring Decent Work for All, reinforcing the commitment to inclusive and equitable progress across diverse communities, through the transformative potential of immersive and AI-driven systems in correctional systems. © 2025 IEEE.},
keywords = {AI- Driven Rehabilitation, Artificial intelligence- driven rehabilitation, Emotional intelligence, Engineering education, Generative AI, generative artificial intelligence, Immersive, Immersive technologies, Immersive Technology, Language Model, Large language model, large language models, Skills development, Social Reintegration, Social skills, Sociology, Vocational training},
pubstate = {published},
tppubtype = {inproceedings}
}
Gatti, E.; Giunchi, D.; Numan, N.; Steed, A.
Around the Virtual Campfire: Early UX Insights into AI-Generated Stories in VR Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 136–141, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833152157-8 (ISBN).
Abstract | Links | BibTeX | Tags: Generative AI, Images synthesis, Immersive, Interactive Environments, Language Model, Large language model, Storytelling, User input, User study, Users' experiences, Virtual environments, VR
@inproceedings{gatti_around_2025,
title = {Around the Virtual Campfire: Early UX Insights into AI-Generated Stories in VR},
author = {E. Gatti and D. Giunchi and N. Numan and A. Steed},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000263662&doi=10.1109%2fAIxVR63409.2025.00027&partnerID=40&md5=cd804d892d45554e936d0221508b3447},
doi = {10.1109/AIxVR63409.2025.00027},
isbn = {979-833152157-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {136–141},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual Reality (VR) presents an immersive platform for storytelling, allowing narratives to unfold in highly engaging, interactive environments. Leveraging AI capabilities and image synthesis offers new possibilities for creating scalable, generative VR content. In this work, we use an LLM-driven VR storytelling platform to explore how AI-generated visuals and narrative elements impact the user experience in VR storytelling. Previously, we presented AIsop, a system to integrate LLM-generated text and images and TTS audio into a storytelling experience, where the narrative unfolds based on user input. In this paper, we present two user studies focusing on how AI-generated visuals influence narrative perception and the overall VR experience. Our findings highlight the positive impact of AI-generated pictorial content on the storytelling experience, highlighting areas for enhancement and further research in interactive narrative design. © 2025 IEEE.},
keywords = {Generative AI, Images synthesis, Immersive, Interactive Environments, Language Model, Large language model, Storytelling, User input, User study, Users' experiences, Virtual environments, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Guo, P.; Zhang, Q.; Tian, C.; Xue, W.; Feng, X.
Digital Human Techniques for Education Reform Proceedings Article
In: ICETM - Proc. Int. Conf. Educ. Technol. Manag., pp. 173–178, Association for Computing Machinery, Inc, 2025, ISBN: 979-840071746-8 (ISBN).
Abstract | Links | BibTeX | Tags: Augmented Reality, Contrastive Learning, Digital elevation model, Digital human technique, Digital Human Techniques, Digital humans, Education Reform, Education reforms, Educational Technology, Express emotions, Federated learning, Human behaviors, Human form models, Human techniques, Immersive, Innovative technology, Modeling languages, Natural language processing systems, Teachers', Teaching, Virtual environments, Virtual humans
@inproceedings{guo_digital_2025,
title = {Digital Human Techniques for Education Reform},
author = {P. Guo and Q. Zhang and C. Tian and W. Xue and X. Feng},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001671326&doi=10.1145%2f3711403.3711428&partnerID=40&md5=dd96647315af9409d119f68f9cf4e980},
doi = {10.1145/3711403.3711428},
isbn = {979-840071746-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {ICETM - Proc. Int. Conf. Educ. Technol. Manag.},
pages = {173–178},
publisher = {Association for Computing Machinery, Inc},
abstract = {The rapid evolution of artificial intelligence, big data, and generative AI models has ushered in significant transformations across various sectors, including education. Digital Human Technique, an innovative technology grounded in advanced computer science and artificial intelligence, is reshaping educational paradigms by enabling virtual humans to simulate human behavior, express emotions, and interact with users. This paper explores the application of Digital Human Technique in education reform, focusing on creating immersive, intelligent classroom experiences that foster meaningful interactions between teachers and students. We define Digital Human Technique and delve into its key technical components such as character modeling and rendering, natural language processing, computer vision, and augmented reality technologies. Our methodology involves analyzing the role of educational digital humans created through these technologies, assessing their impact on educational processes, and examining various application scenarios in educational reform. Results indicate that Digital Human Technique significantly enhances the learning experience by enabling personalized teaching, increasing engagement, and fostering emotional connections. Educational digital humans serve as virtual teachers, interactive learning aids, and facilitators of emotional interaction, effectively addressing the challenges of traditional educational methods. They also promote a deeper understanding of complex concepts through simulated environments and interactive digital content. © 2024 Copyright held by the owner/author(s).},
keywords = {Augmented Reality, Contrastive Learning, Digital elevation model, Digital human technique, Digital Human Techniques, Digital humans, Education Reform, Education reforms, Educational Technology, Express emotions, Federated learning, Human behaviors, Human form models, Human techniques, Immersive, Innovative technology, Modeling languages, Natural language processing systems, Teachers', Teaching, Virtual environments, Virtual humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Oliveira, E. A. Masasi De; Sousa, R. T.; Bastos, A. A.; Cintra, L. Martins De Freitas; Filho, A. R. G.
Immersive Virtual Museums with Spatially-Aware Retrieval-Augmented Generation Proceedings Article
In: IMX - Proc. ACM Int. Conf. Interact. Media Experiences, pp. 437–440, Association for Computing Machinery, Inc, 2025, ISBN: 979-840071391-0 (ISBN).
Abstract | Links | BibTeX | Tags: Association reactions, Behavioral Research, Generation systems, Geographics, Human computer interaction, Human engineering, Immersive, Information Retrieval, Interactive computer graphics, Language Model, Large language model, large language models, Museums, Retrieval-Augmented Generation, Search engines, Spatially aware, User interfaces, Virtual environments, Virtual museum, Virtual museum., Virtual Reality, Visual Attention, Visual languages
@inproceedings{masasi_de_oliveira_immersive_2025,
title = {Immersive Virtual Museums with Spatially-Aware Retrieval-Augmented Generation},
author = {E. A. Masasi De Oliveira and R. T. Sousa and A. A. Bastos and L. Martins De Freitas Cintra and A. R. G. Filho},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007979183&doi=10.1145%2f3706370.3731643&partnerID=40&md5=db10b41217dd8a0b0705c3fb4a615666},
doi = {10.1145/3706370.3731643},
isbn = {979-840071391-0 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {IMX - Proc. ACM Int. Conf. Interact. Media Experiences},
pages = {437–440},
publisher = {Association for Computing Machinery, Inc},
abstract = {Virtual Reality has significantly expanded possibilities for immersive museum experiences, overcoming traditional constraints such as space, preservation, and geographic limitations. However, existing virtual museum platforms typically lack dynamic, personalized, and contextually accurate interactions. To address this, we propose Spatially-Aware Retrieval-Augmented Generation (SA-RAG), an innovative framework integrating visual attention tracking with Retrieval-Augmented Generation systems and advanced Large Language Models. By capturing users' visual attention in real time, SA-RAG dynamically retrieves contextually relevant data, enhancing the accuracy, personalization, and depth of user interactions within immersive virtual environments. The system's effectiveness is initially demonstrated through our preliminary tests within a realistic VR museum implemented using Unreal Engine. Although promising, comprehensive human evaluations involving broader user groups are planned for future studies to rigorously validate SA-RAG's effectiveness, educational enrichment potential, and accessibility improvements in virtual museums. The framework also presents opportunities for broader applications in immersive educational and storytelling domains. © 2025 Copyright held by the owner/author(s).},
keywords = {Association reactions, Behavioral Research, Generation systems, Geographics, Human computer interaction, Human engineering, Immersive, Information Retrieval, Interactive computer graphics, Language Model, Large language model, large language models, Museums, Retrieval-Augmented Generation, Search engines, Spatially aware, User interfaces, Virtual environments, Virtual museum, Virtual museum., Virtual Reality, Visual Attention, Visual languages},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, H.; Zha, S.; Cai, J.; Wohn, D. Y.; Carroll, J. M.
Generative AI in Virtual Reality Communities: A Preliminary Analysis of the VRChat Discord Community Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 979-840071395-8 (ISBN).
Abstract | Links | BibTeX | Tags: AI assistant, AI Technologies, Coding framework, Ethical technology, Human-ai collaboration, Immersive, On-line communities, online community, Preliminary analysis, Property, Qualitative analysis, user experience, Users' experiences
@inproceedings{zhang_generative_2025,
title = {Generative AI in Virtual Reality Communities: A Preliminary Analysis of the VRChat Discord Community},
author = {H. Zhang and S. Zha and J. Cai and D. Y. Wohn and J. M. Carroll},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005770564&doi=10.1145%2f3706599.3720120&partnerID=40&md5=9bdfc4e70b9b361d67791932f5a56413},
doi = {10.1145/3706599.3720120},
isbn = {979-840071395-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {As immersive social platforms like VRChat increasingly adopt generative AI (GenAI) technologies, it becomes critical to understand how community members perceive, negotiate, and utilize these tools. In this preliminary study, we conducted a qualitative analysis of VRChat-related Discord discussions, employing a deductive coding framework to identify key themes related to AI-assisted content creation, intellectual property disputes, and evolving community norms. Our findings offer preliminary insights into the complex interplay between the community’s enthusiasm for AI-driven creativity and deep-rooted ethical and legal concerns. Users weigh issues of fair use, data ethics, intellectual property, and the role of community governance in establishing trust. By highlighting the tensions and trade-offs as users embrace new creative opportunities while seeking transparency, fair attribution, and equitable policies, this research offers valuable insights for designers, platform administrators, and policymakers aiming to foster responsible, inclusive, and ethically sound AI integration in future immersive virtual environments. © 2025 Copyright held by the owner/author(s).},
keywords = {AI assistant, AI Technologies, Coding framework, Ethical technology, Human-ai collaboration, Immersive, On-line communities, online community, Preliminary analysis, Property, Qualitative analysis, user experience, Users' experiences},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, L.; Mitchell, K.
Structured Teaching Prompt Articulation for Generative-AI Role Embodiment with Augmented Mirror Video Displays Proceedings Article
In: S.N., Spencer (Ed.): Proc.: VRCAI - ACM SIGGRAPH Int. Conf. Virtual-Reality Contin. Appl. Ind., Association for Computing Machinery, Inc, 2025, ISBN: 979-840071348-4 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Augmented Reality, Computer interaction, Contrastive Learning, Cultural icon, Experiential learning, Generative adversarial networks, Generative AI, human-computer interaction, Immersive, Pedagogical practices, Role-based, Teachers', Teaching, Video display, Virtual environments, Virtual Reality
@inproceedings{casas_structured_2025,
title = {Structured Teaching Prompt Articulation for Generative-AI Role Embodiment with Augmented Mirror Video Displays},
author = {L. Casas and K. Mitchell},
editor = {Spencer S.N.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85217997060&doi=10.1145%2f3703619.3706049&partnerID=40&md5=7141c5dac7882232c6ee8e0bef0ba84e},
doi = {10.1145/3703619.3706049},
isbn = {979-840071348-4 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc.: VRCAI - ACM SIGGRAPH Int. Conf. Virtual-Reality Contin. Appl. Ind.},
publisher = {Association for Computing Machinery, Inc},
abstract = {We present a classroom enhanced with augmented reality video display in which students adopt snapshots of their corresponding virtual personas according to their teacher's live articulated spoken educational theme, linearly, such as historical figures, famous scientists, cultural icons, and laterally according to archetypal categories such as world dance styles. We define a structure of generative AI prompt guidance to assist teachers with focused specified visual role embodiment stylization. By leveraging role-based immersive embodiment, our proposed approach enriches pedagogical practices that prioritize experiential learning. © 2024 ACM.},
keywords = {Artificial intelligence, Augmented Reality, Computer interaction, Contrastive Learning, Cultural icon, Experiential learning, Generative adversarial networks, Generative AI, human-computer interaction, Immersive, Pedagogical practices, Role-based, Teachers', Teaching, Video display, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Lopes, M. K. S.; Falk, T. H.
Generative AI for Personalized Multisensory Immersive Experiences: Challenges and Opportunities for Stress Reduction Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 143–146, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833151484-6 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence tools, Environment personalization, Forest bathing, Generative AI, Immersive, Multi-Sensory, Multi-sensory virtual reality, Multisensory, Personalizations, Relaxation, Virtual Reality, Virtualization
@inproceedings{lopes_generative_2025,
title = {Generative AI for Personalized Multisensory Immersive Experiences: Challenges and Opportunities for Stress Reduction},
author = {M. K. S. Lopes and T. H. Falk},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005149501&doi=10.1109%2fVRW66409.2025.00036&partnerID=40&md5=e0a94aebd683f257899474d4c486d784},
doi = {10.1109/VRW66409.2025.00036},
isbn = {979-833151484-6 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {143–146},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Stress management and relaxation are critical areas of interest in mental health and well-being. Forest bathing is a practice that has been shown to have a positive effect on reducing stress by stimulating all the senses in an immersive nature experience. Since access to nature is not universally available to everyone, virtual reality has emerged as a promising tool to simulate this type of experience. Furthermore, generative artificial intelligence (GenAI) tools offer new opportunities to create highly personalized and immersive experiences that can enhance relaxation and reduce stress. This study explores the potential of personalized multisensory VR environments, designed using GenAI tools, to optimize relaxation and stress relief via two experiments that are currently underway. The first evaluates the effectiveness of non-personalized versus personalized VR scenes generated using AI tools to promote increased relaxation. The second explores the potential benefits of providing the user with additional personalization tools, from adding new virtual elements to the AI-generated scene, to adding AI-generated sounds and scent/haptics customization. Ultimately, this research aims to identify which customizable elements may lead to improved therapeutic benefits for multisensory VR experiences. © 2025 IEEE.},
keywords = {Artificial intelligence tools, Environment personalization, Forest bathing, Generative AI, Immersive, Multi-Sensory, Multi-sensory virtual reality, Multisensory, Personalizations, Relaxation, Virtual Reality, Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Vachha, C.; Kang, Y.; Dive, Z.; Chidambaram, A.; Gupta, A.; Jun, E.; Hartmann, B.
Dreamcrafter: Immersive Editing of 3D Radiance Fields Through Flexible, Generative Inputs and Outputs Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 979-840071394-1 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, 3D scenes, AI assisted creativity tool, Animation, Computer vision, Direct manipulation, Drawing (graphics), Gaussian Splatting, Gaussians, Generative AI, Graphic, Graphics, High level languages, Immersive, Interactive computer graphics, Splatting, Three dimensional computer graphics, Virtual Reality, Worldbuilding interface
@inproceedings{vachha_dreamcrafter_2025,
title = {Dreamcrafter: Immersive Editing of 3D Radiance Fields Through Flexible, Generative Inputs and Outputs},
author = {C. Vachha and Y. Kang and Z. Dive and A. Chidambaram and A. Gupta and E. Jun and B. Hartmann},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005725679&doi=10.1145%2f3706598.3714312&partnerID=40&md5=68cf2a08d3057fd9756e25d53959872b},
doi = {10.1145/3706598.3714312},
isbn = {979-840071394-1 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Authoring 3D scenes is a central task for spatial computing applications. Competing visions for lowering existing barriers are (1) focus on immersive, direct manipulation of 3D content or (2) leverage AI techniques that capture real scenes (3D Radiance Fields such as, NeRFs, 3D Gaussian Splatting) and modify them at a higher level of abstraction, at the cost of high latency. We unify the complementary strengths of these approaches and investigate how to integrate generative AI advances into real-time, immersive 3D Radiance Field editing. We introduce Dreamcrafter, a VR-based 3D scene editing system that: (1) provides a modular architecture to integrate generative AI algorithms; (2) combines different levels of control for creating objects, including natural language and direct manipulation; and (3) introduces proxy representations that support interaction during high-latency operations. We contribute empirical findings on control preferences and discuss how generative AI interfaces beyond text input enhance creativity in scene editing and world building. © 2025 Copyright held by the owner/author(s).},
keywords = {3D modeling, 3D scenes, AI assisted creativity tool, Animation, Computer vision, Direct manipulation, Drawing (graphics), Gaussian Splatting, Gaussians, Generative AI, Graphic, Graphics, High level languages, Immersive, Interactive computer graphics, Splatting, Three dimensional computer graphics, Virtual Reality, Worldbuilding interface},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, J.; Wu, X.; Lan, T.; Li, B.
LLMER: Crafting Interactive Extended Reality Worlds with JSON Data Generated by Large Language Models Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, vol. 31, no. 5, pp. 2715–2724, 2025, ISSN: 10772626 (ISSN).
Abstract | Links | BibTeX | Tags: % reductions, 3D modeling, algorithm, Algorithms, Augmented Reality, Coding errors, Computer graphics, Computer interaction, computer interface, Computer simulation languages, Extended reality, generative artificial intelligence, human, Human users, human-computer interaction, Humans, Imaging, Immersive, Language, Language Model, Large language model, large language models, Metadata, Natural Language Processing, Natural language processing systems, Natural languages, procedures, Script generation, Spatio-temporal data, Three dimensional computer graphics, Three-Dimensional, three-dimensional imaging, User-Computer Interface, Virtual Reality
@article{chen_llmer_2025,
title = {LLMER: Crafting Interactive Extended Reality Worlds with JSON Data Generated by Large Language Models},
author = {J. Chen and X. Wu and T. Lan and B. Li},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003825793&doi=10.1109%2fTVCG.2025.3549549&partnerID=40&md5=da4681d0714548e3a7e0c8c3295d2348},
doi = {10.1109/TVCG.2025.3549549},
issn = {10772626 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {31},
number = {5},
pages = {2715–2724},
abstract = {The integration of Large Language Models (LLMs) like GPT-4 with Extended Reality (XR) technologies offers the potential to build truly immersive XR environments that interact with human users through natural language, e.g., generating and animating 3D scenes from audio inputs. However, the complexity of XR environments makes it difficult to accurately extract relevant contextual data and scene/object parameters from an overwhelming volume of XR artifacts. It leads to not only increased costs with pay-per-use models, but also elevated levels of generation errors. Moreover, existing approaches focusing on coding script generation are often prone to generation errors, resulting in flawed or invalid scripts, application crashes, and ultimately a degraded user experience. To overcome these challenges, we introduce LLMER, a novel framework that creates interactive XR worlds using JSON data generated by LLMs. Unlike prior approaches focusing on coding script generation, LLMER translates natural language inputs into JSON data, significantly reducing the likelihood of application crashes and processing latency. It employs a multi-stage strategy to supply only the essential contextual information adapted to the user's request and features multiple modules designed for various XR tasks. Our preliminary user study reveals the effectiveness of the proposed system, with over 80% reduction in consumed tokens and around 60% reduction in task completion time compared to state-of-the-art approaches. The analysis of users' feedback also illuminates a series of directions for further optimization. © 1995-2012 IEEE.},
keywords = {% reductions, 3D modeling, algorithm, Algorithms, Augmented Reality, Coding errors, Computer graphics, Computer interaction, computer interface, Computer simulation languages, Extended reality, generative artificial intelligence, human, Human users, human-computer interaction, Humans, Imaging, Immersive, Language, Language Model, Large language model, large language models, Metadata, Natural Language Processing, Natural language processing systems, Natural languages, procedures, Script generation, Spatio-temporal data, Three dimensional computer graphics, Three-Dimensional, three-dimensional imaging, User-Computer Interface, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Zhao, S.; Huang, Y.; He, X.; Tong, X.; Li, X.; Wu, D.
Reviving Mural Art through Generative AI: A Comparative Study of AI-Generated and Hand-Crafted Recreations Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 979-840071394-1 (ISBN).
Abstract | Links | BibTeX | Tags: Biographies, Comparatives studies, Culture heritage, Dunhuang mural, Generative AI, Historic Preservation, Immersive, Interactive platform, Labour-intensive, Large-scales, Mural recreation, User study, Virtual Reality
@inproceedings{zhao_reviving_2025,
title = {Reviving Mural Art through Generative AI: A Comparative Study of AI-Generated and Hand-Crafted Recreations},
author = {S. Zhao and Y. Huang and X. He and X. Tong and X. Li and D. Wu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005707513&doi=10.1145%2f3706598.3714157&partnerID=40&md5=aab480eed29cdb31cc7720f915857b52},
doi = {10.1145/3706598.3714157},
isbn = {979-840071394-1 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Virtual reality (VR) provides an immersive and interactive platform for presenting ancient murals, enhancing users' understanding and appreciation of these invaluable culture treasures. However, traditional hand-crafted methods for recreating murals in VR are labor-intensive, time-consuming, and require significant expertise, limiting their scalability for large-scale mural scenes. To address these challenges, we propose a comprehensive pipeline that leverages generative AI to automate the mural recreation process. This pipeline is validated by the reconstruction of Foguang Temple scene in Dunhuang Murals. A user study comparing the AI-generated scene with a hand-crafted one reveals no significant differences in presence, authenticity, engagement and enjoyment, and emotion. Additionally, our findings identify areas for improvement in AI-generated recreations, such as enhancing historical fidelity and offering customization. This work paves the way for more scalable, efficient, and accessible methods of revitalizing cultural heritage in VR, offering new opportunities for mural preservation, demonstration, and dissemination using VR. © 2025 Copyright held by the owner/author(s).},
keywords = {Biographies, Comparatives studies, Culture heritage, Dunhuang mural, Generative AI, Historic Preservation, Immersive, Interactive platform, Labour-intensive, Large-scales, Mural recreation, User study, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Barbu, M.; Iordache, D. -D.; Petre, I.; Barbu, D. -C.; Băjenaru, L.
Framework Design for Reinforcing the Potential of XR Technologies in Transforming Inclusive Education Journal Article
In: Applied Sciences (Switzerland), vol. 15, no. 3, 2025, ISSN: 20763417 (ISSN).
Abstract | Links | BibTeX | Tags: Adaptive Learning, Adversarial machine learning, Artificial intelligence technologies, Augmented Reality, Contrastive Learning, Educational Technology, Extended reality (XR), Federated learning, Framework designs, Generative adversarial networks, Immersive, immersive experience, Immersive learning, Inclusive education, Learning platform, Special education needs
@article{barbu_framework_2025,
title = {Framework Design for Reinforcing the Potential of XR Technologies in Transforming Inclusive Education},
author = {M. Barbu and D. -D. Iordache and I. Petre and D. -C. Barbu and L. Băjenaru},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85217742383&doi=10.3390%2fapp15031484&partnerID=40&md5=3148ff2a8a8fa1bef8094199cd6d32e3},
doi = {10.3390/app15031484},
issn = {20763417 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Applied Sciences (Switzerland)},
volume = {15},
number = {3},
abstract = {This study presents a novel approach to inclusive education by integrating augmented reality (XR) and generative artificial intelligence (AI) technologies into an immersive and adaptive learning platform designed for students with special educational needs. Building upon existing solutions, the approach uniquely combines XR and generative AI to facilitate personalized, accessible, and interactive learning experiences tailored to individual requirements. The framework incorporates an intuitive Unity XR-based interface alongside a generative AI module to enable near real-time customization of content and interactions. Additionally, the study examines related generative AI initiatives that promote inclusion through enhanced communication tools, educational support, and customizable assistive technologies. The motivation for this study arises from the pressing need to address the limitations of traditional educational methods, which often fail to meet the diverse needs of learners with special educational requirements. The integration of XR and generative AI offers transformative potential by creating adaptive, immersive, and inclusive learning environments. This approach ensures real-time adaptability to individual progress and accessibility, addressing critical barriers such as static content and lack of inclusivity in existing systems. The research outlines a pathway toward more inclusive and equitable education, significantly enhancing opportunities for learners with diverse needs and contributing to broader social integration and equity in education. © 2025 by the authors.},
keywords = {Adaptive Learning, Adversarial machine learning, Artificial intelligence technologies, Augmented Reality, Contrastive Learning, Educational Technology, Extended reality (XR), Federated learning, Framework designs, Generative adversarial networks, Immersive, immersive experience, Immersive learning, Inclusive education, Learning platform, Special education needs},
pubstate = {published},
tppubtype = {article}
}
Stacchio, L.; Balloni, E.; Frontoni, E.; Paolanti, M.; Zingaretti, P.; Pierdicca, R.
MineVRA: Exploring the Role of Generative AI-Driven Content Development in XR Environments through a Context-Aware Approach Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, vol. 31, no. 5, pp. 3602–3612, 2025, ISSN: 10772626 (ISSN).
Abstract | Links | BibTeX | Tags: adult, Article, Artificial intelligence, Computer graphics, Computer vision, Content Development, Contents development, Context-Aware, Context-aware approaches, Extended reality, female, Generative adversarial networks, Generative AI, generative artificial intelligence, human, Human-in-the-loop, Immersive, Immersive environment, male, Multi-modal, User need, Virtual environments, Virtual Reality
@article{stacchio_minevra_2025,
title = {MineVRA: Exploring the Role of Generative AI-Driven Content Development in XR Environments through a Context-Aware Approach},
author = {L. Stacchio and E. Balloni and E. Frontoni and M. Paolanti and P. Zingaretti and R. Pierdicca},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003746367&doi=10.1109%2fTVCG.2025.3549160&partnerID=40&md5=70b162b574eebbb0cb71db871aa787e1},
doi = {10.1109/TVCG.2025.3549160},
issn = {10772626 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {31},
number = {5},
pages = {3602–3612},
abstract = {The convergence of Artificial Intelligence (AI), Computer Vision (CV), Computer Graphics (CG), and Extended Reality (XR) is driving innovation in immersive environments. A key challenge in these environments is the creation of personalized 3D assets, traditionally achieved through manual modeling, a time-consuming process that often fails to meet individual user needs. More recently, Generative AI (GenAI) has emerged as a promising solution for automated, context-aware content generation. In this paper, we present MineVRA (Multimodal generative artificial iNtelligence for contExt-aware Virtual Reality Assets), a novel Human-In-The-Loop (HITL) XR framework that integrates GenAI to facilitate coherent and adaptive 3D content generation in immersive scenarios. To evaluate the effectiveness of this approach, we conducted a comparative user study analyzing the performance and user satisfaction of GenAI-generated 3D objects compared to those generated by Sketchfab in different immersive contexts. The results suggest that GenAI can significantly complement traditional 3D asset libraries, with valuable design implications for the development of human-centered XR environments. © 1995-2012 IEEE.},
keywords = {adult, Article, Artificial intelligence, Computer graphics, Computer vision, Content Development, Contents development, Context-Aware, Context-aware approaches, Extended reality, female, Generative adversarial networks, Generative AI, generative artificial intelligence, human, Human-in-the-loop, Immersive, Immersive environment, male, Multi-modal, User need, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Yadav, R.; Huzooree, G.; Yadav, M.; Gangodawilage, D. S. K.
Generative AI for personalized learning content creation Book Section
In: Transformative AI Practices for Personalized Learning Strategies, pp. 107–130, IGI Global, 2025, ISBN: 979-836938746-7 (ISBN); 979-836938744-3 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive feedback, Advanced Analytics, AI systems, Contrastive Learning, Educational contents, Educational experiences, Enhanced learning, Ethical technology, Federated learning, Immersive, Learning content creation, Personalized learning, Student engagement, Students, Supervised learning, Tools and applications, Virtual Reality
@incollection{yadav_generative_2025,
title = {Generative AI for personalized learning content creation},
author = {R. Yadav and G. Huzooree and M. Yadav and D. S. K. Gangodawilage},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005387236&doi=10.4018%2f979-8-3693-8744-3.ch005&partnerID=40&md5=904e58b9c6de83dcd431c1706dda02b3},
doi = {10.4018/979-8-3693-8744-3.ch005},
isbn = {979-836938746-7 (ISBN); 979-836938744-3 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Transformative AI Practices for Personalized Learning Strategies},
pages = {107–130},
publisher = {IGI Global},
abstract = {Generative AI has emerged as a transformative force in personalized learning, offering unprecedented opportunities to tailor educational content to individual needs. By leveraging advanced algorithms and data analysis, AI systems can dynamically generate customized materials, provide adaptive feedback, and foster student engagement. This chapter explores the intersection of generative AI and personalized learning, discussing its techniques, tools, and applications in creating immersive and adaptive educational experiences. Key benefits include enhanced learning outcomes, efficiency, and scalability. However, challenges such as data privacy, algorithmic bias, and equitable access must be addressed to ensure responsible implementation. Future trends, including the integration of immersive technologies like Virtual Reality (VR) and predictive analytics, highlight AI's potential to revolutionize education. By navigating ethical considerations and fostering transparency, generative AI can become a powerful ally in creating inclusive, engaging, and student- centered learning environments. © 2025, IGI Global Scientific Publishing. All rights reserved.},
keywords = {Adaptive feedback, Advanced Analytics, AI systems, Contrastive Learning, Educational contents, Educational experiences, Enhanced learning, Ethical technology, Federated learning, Immersive, Learning content creation, Personalized learning, Student engagement, Students, Supervised learning, Tools and applications, Virtual Reality},
pubstate = {published},
tppubtype = {incollection}
}