AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Yadav, R.; Huzooree, G.; Yadav, M.; Gangodawilage, D. S. K.
Generative AI for personalized learning content creation Book Section
In: Transformative AI Practices for Personalized Learning Strategies, pp. 107–130, IGI Global, 2025, ISBN: 979-836938746-7 (ISBN); 979-836938744-3 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive feedback, Advanced Analytics, AI systems, Contrastive Learning, Educational contents, Educational experiences, Enhanced learning, Ethical technology, Federated learning, Immersive, Learning content creation, Personalized learning, Student engagement, Students, Supervised learning, Tools and applications, Virtual Reality
@incollection{yadav_generative_2025,
title = {Generative AI for personalized learning content creation},
author = {R. Yadav and G. Huzooree and M. Yadav and D. S. K. Gangodawilage},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005387236&doi=10.4018%2f979-8-3693-8744-3.ch005&partnerID=40&md5=904e58b9c6de83dcd431c1706dda02b3},
doi = {10.4018/979-8-3693-8744-3.ch005},
isbn = {979-836938746-7 (ISBN); 979-836938744-3 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Transformative AI Practices for Personalized Learning Strategies},
pages = {107–130},
publisher = {IGI Global},
abstract = {Generative AI has emerged as a transformative force in personalized learning, offering unprecedented opportunities to tailor educational content to individual needs. By leveraging advanced algorithms and data analysis, AI systems can dynamically generate customized materials, provide adaptive feedback, and foster student engagement. This chapter explores the intersection of generative AI and personalized learning, discussing its techniques, tools, and applications in creating immersive and adaptive educational experiences. Key benefits include enhanced learning outcomes, efficiency, and scalability. However, challenges such as data privacy, algorithmic bias, and equitable access must be addressed to ensure responsible implementation. Future trends, including the integration of immersive technologies like Virtual Reality (VR) and predictive analytics, highlight AI's potential to revolutionize education. By navigating ethical considerations and fostering transparency, generative AI can become a powerful ally in creating inclusive, engaging, and student- centered learning environments. © 2025, IGI Global Scientific Publishing. All rights reserved.},
keywords = {Adaptive feedback, Advanced Analytics, AI systems, Contrastive Learning, Educational contents, Educational experiences, Enhanced learning, Ethical technology, Federated learning, Immersive, Learning content creation, Personalized learning, Student engagement, Students, Supervised learning, Tools and applications, Virtual Reality},
pubstate = {published},
tppubtype = {incollection}
}
Gatti, E.; Giunchi, D.; Numan, N.; Steed, A.
Around the Virtual Campfire: Early UX Insights into AI-Generated Stories in VR Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 136–141, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331521578 (ISBN).
Abstract | Links | BibTeX | Tags: Generative AI, Images synthesis, Immersive, Interactive Environments, Language Model, Large language model, Storytelling, User input, User study, Users' experiences, Virtual environments, VR
@inproceedings{gatti_around_2025,
title = {Around the Virtual Campfire: Early UX Insights into AI-Generated Stories in VR},
author = {E. Gatti and D. Giunchi and N. Numan and A. Steed},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000263662&doi=10.1109%2FAIxVR63409.2025.00027&partnerID=40&md5=ab95e803af14233db6ed307222632542},
doi = {10.1109/AIxVR63409.2025.00027},
isbn = {9798331521578 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {136–141},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual Reality (VR) presents an immersive platform for storytelling, allowing narratives to unfold in highly engaging, interactive environments. Leveraging AI capabilities and image synthesis offers new possibilities for creating scalable, generative VR content. In this work, we use an LLM-driven VR storytelling platform to explore how AI-generated visuals and narrative elements impact the user experience in VR storytelling. Previously, we presented AIsop, a system to integrate LLM-generated text and images and TTS audio into a storytelling experience, where the narrative unfolds based on user input. In this paper, we present two user studies focusing on how AI-generated visuals influence narrative perception and the overall VR experience. Our findings highlight the positive impact of AI-generated pictorial content on the storytelling experience, highlighting areas for enhancement and further research in interactive narrative design. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Generative AI, Images synthesis, Immersive, Interactive Environments, Language Model, Large language model, Storytelling, User input, User study, Users' experiences, Virtual environments, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, L.; Hannah, S.; Mitchell, K.
HoloJig: Interactive Spoken Prompt Specified Generative AI Environments Journal Article
In: IEEE Computer Graphics and Applications, vol. 45, no. 2, pp. 69–77, 2025, ISSN: 02721716 (ISSN); 15581756 (ISSN), (Publisher: IEEE Computer Society).
Abstract | Links | BibTeX | Tags: 3-D rendering, Article, Collaborative workspace, customer experience, Economic and social effects, generative artificial intelligence, human, Immersive, Immersive environment, parallax, Real- time, simulation, Simulation training, speech, Time based, Virtual environments, Virtual Reality, Virtual reality experiences, Virtual spaces, VR systems
@article{casas_holojig_2025,
title = {HoloJig: Interactive Spoken Prompt Specified Generative AI Environments},
author = {L. Casas and S. Hannah and K. Mitchell},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001182100&doi=10.1109%2FMCG.2025.3553780&partnerID=40&md5=9fafa25e4b6ddc9d2fe32d813fbabb20},
doi = {10.1109/MCG.2025.3553780},
issn = {02721716 (ISSN); 15581756 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Computer Graphics and Applications},
volume = {45},
number = {2},
pages = {69–77},
abstract = {HoloJig offers an interactive, speech-to-virtual reality (VR), VR experience that generates diverse environments in real time based on live spoken descriptions. Unlike traditional VR systems that rely on prebuilt assets, HoloJig dynamically creates personalized and immersive virtual spaces with depth-based parallax 3-D rendering, allowing users to define the characteristics of their immersive environment through verbal prompts. This generative approach opens up new possibilities for interactive experiences, including simulations, training, collaborative workspaces, and entertainment. In addition to speech-to-VR environment generation, a key innovation of HoloJig is its progressive visual transition mechanism, which smoothly dissolves between previously generated and newly requested environments, mitigating the delay caused by neural computations. This feature ensures a seamless and continuous user experience, even as new scenes are being rendered on remote servers. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: IEEE Computer Society},
keywords = {3-D rendering, Article, Collaborative workspace, customer experience, Economic and social effects, generative artificial intelligence, human, Immersive, Immersive environment, parallax, Real- time, simulation, Simulation training, speech, Time based, Virtual environments, Virtual Reality, Virtual reality experiences, Virtual spaces, VR systems},
pubstate = {published},
tppubtype = {article}
}
Li, Y.; Pang, E. C. H.; Ng, C. S. Y.; Azim, M.; Leung, H.
Enhancing Linear Algebra Education with AI-Generated Content in the CityU Metaverse: A Comparative Study Proceedings Article
In: T., Hao; J.G., Wu; X., Luo; Y., Sun; Y., Mu; S., Ge; W., Xie (Ed.): Lect. Notes Comput. Sci., pp. 3–16, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-981964406-3 (ISBN).
Abstract | Links | BibTeX | Tags: Comparatives studies, Digital age, Digital interactions, digital twin, Educational metaverse, Engineering education, Generative AI, Immersive, Matrix algebra, Metaverse, Metaverses, Personnel training, Students, Teaching, University campus, Virtual environments, virtual learning environment, Virtual learning environments, Virtual Reality, Virtualization
@inproceedings{li_enhancing_2025,
title = {Enhancing Linear Algebra Education with AI-Generated Content in the CityU Metaverse: A Comparative Study},
author = {Y. Li and E. C. H. Pang and C. S. Y. Ng and M. Azim and H. Leung},
editor = {Hao T. and Wu J.G. and Luo X. and Sun Y. and Mu Y. and Ge S. and Xie W.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003632691&doi=10.1007%2f978-981-96-4407-0_1&partnerID=40&md5=c067ba5d4c15e9c0353bf315680531fc},
doi = {10.1007/978-981-96-4407-0_1},
isbn = {03029743 (ISSN); 978-981964406-3 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15589 LNCS},
pages = {3–16},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {In today’s digital age, the metaverse is emerging as the forthcoming evolution of the internet. It provides an immersive space that marks a new frontier in the way digital interactions are facilitated and experienced. In this paper, we present the CityU Metaverse, which aims to construct a digital twin of our university campus. It is designed as an educational virtual world where learning applications can be embedded in this virtual campus, supporting not only remote and collaborative learning but also professional technical training to enhance educational experiences through immersive and interactive learning. To evaluate the effectiveness of this educational metaverse, we conducted an experiment focused on 3D linear transformation in linear algebra, with teaching content generated by generative AI, comparing our metaverse system with traditional teaching methods. Knowledge tests and surveys assessing learning interest revealed that students engaged with the CityU Metaverse, facilitated by AI-generated content, outperformed those in traditional settings and reported greater enjoyment during the learning process. The work provides valuable perspectives on the behaviors and interactions within the metaverse by analyzing user preferences and learning outcomes. © The Author(s), under exclusive license to Springer Nature Singapore Pte Ltd. 2025.},
keywords = {Comparatives studies, Digital age, Digital interactions, digital twin, Educational metaverse, Engineering education, Generative AI, Immersive, Matrix algebra, Metaverse, Metaverses, Personnel training, Students, Teaching, University campus, Virtual environments, virtual learning environment, Virtual learning environments, Virtual Reality, Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Stacchio, L.; Balloni, E.; Frontoni, E.; Paolanti, M.; Zingaretti, P.; Pierdicca, R.
MineVRA: Exploring the Role of Generative AI-Driven Content Development in XR Environments through a Context-Aware Approach Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, vol. 31, no. 5, pp. 3602–3612, 2025, ISSN: 10772626 (ISSN), (Publisher: IEEE Computer Society).
Abstract | Links | BibTeX | Tags: adult, Article, Artificial intelligence, Computer graphics, Computer vision, Content Development, Contents development, Context-Aware, Context-aware approaches, Extended reality, female, Generative adversarial networks, Generative AI, generative artificial intelligence, human, Human-in-the-loop, Immersive, Immersive environment, male, Multi-modal, User need, Virtual environments, Virtual Reality
@article{stacchio_minevra_2025,
title = {MineVRA: Exploring the Role of Generative AI-Driven Content Development in XR Environments through a Context-Aware Approach},
author = {L. Stacchio and E. Balloni and E. Frontoni and M. Paolanti and P. Zingaretti and R. Pierdicca},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003746367&doi=10.1109%2FTVCG.2025.3549160&partnerID=40&md5=3356eb968b3e6a0d3c9b75716b05fac4},
doi = {10.1109/TVCG.2025.3549160},
issn = {10772626 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {31},
number = {5},
pages = {3602–3612},
abstract = {The convergence of Artificial Intelligence (AI), Computer Vision (CV), Computer Graphics (CG), and Extended Reality (XR) is driving innovation in immersive environments. A key challenge in these environments is the creation of personalized 3D assets, traditionally achieved through manual modeling, a time-consuming process that often fails to meet individual user needs. More recently, Generative AI (GenAI) has emerged as a promising solution for automated, context-aware content generation. In this paper, we present MineVRA (Multimodal generative artificial iNtelligence for contExt-aware Virtual Reality Assets), a novel Human-In-The-Loop (HITL) XR framework that integrates GenAI to facilitate coherent and adaptive 3D content generation in immersive scenarios. To evaluate the effectiveness of this approach, we conducted a comparative user study analyzing the performance and user satisfaction of GenAI-generated 3D objects compared to those generated by Sketchfab in different immersive contexts. The results suggest that GenAI can significantly complement traditional 3D asset libraries, with valuable design implications for the development of human-centered XR environments. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: IEEE Computer Society},
keywords = {adult, Article, Artificial intelligence, Computer graphics, Computer vision, Content Development, Contents development, Context-Aware, Context-aware approaches, Extended reality, female, Generative adversarial networks, Generative AI, generative artificial intelligence, human, Human-in-the-loop, Immersive, Immersive environment, male, Multi-modal, User need, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Chen, J.; Wu, X.; Lan, T.; Li, B.
LLMER: Crafting Interactive Extended Reality Worlds with JSON Data Generated by Large Language Models Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, vol. 31, no. 5, pp. 2715–2724, 2025, ISSN: 10772626 (ISSN), (Publisher: IEEE Computer Society).
Abstract | Links | BibTeX | Tags: % reductions, 3D modeling, algorithm, Algorithms, Augmented Reality, Coding errors, Computer graphics, Computer interaction, computer interface, Computer simulation languages, Extended reality, generative artificial intelligence, human, Human users, human-computer interaction, Humans, Imaging, Immersive, Language, Language Model, Large language model, large language models, Metadata, Natural Language Processing, Natural language processing systems, Natural languages, procedures, Script generation, Spatio-temporal data, Three dimensional computer graphics, Three-Dimensional, three-dimensional imaging, User-Computer Interface, Virtual Reality
@article{chen_llmer_2025,
title = {LLMER: Crafting Interactive Extended Reality Worlds with JSON Data Generated by Large Language Models},
author = {J. Chen and X. Wu and T. Lan and B. Li},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003825793&doi=10.1109%2FTVCG.2025.3549549&partnerID=40&md5=50597473616678390f143a33082a13d3},
doi = {10.1109/TVCG.2025.3549549},
issn = {10772626 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {31},
number = {5},
pages = {2715–2724},
abstract = {The integration of Large Language Models (LLMs) like GPT-4 with Extended Reality (XR) technologies offers the potential to build truly immersive XR environments that interact with human users through natural language, e.g., generating and animating 3D scenes from audio inputs. However, the complexity of XR environments makes it difficult to accurately extract relevant contextual data and scene/object parameters from an overwhelming volume of XR artifacts. It leads to not only increased costs with pay-per-use models, but also elevated levels of generation errors. Moreover, existing approaches focusing on coding script generation are often prone to generation errors, resulting in flawed or invalid scripts, application crashes, and ultimately a degraded user experience. To overcome these challenges, we introduce LLMER, a novel framework that creates interactive XR worlds using JSON data generated by LLMs. Unlike prior approaches focusing on coding script generation, LLMER translates natural language inputs into JSON data, significantly reducing the likelihood of application crashes and processing latency. It employs a multi-stage strategy to supply only the essential contextual information adapted to the user's request and features multiple modules designed for various XR tasks. Our preliminary user study reveals the effectiveness of the proposed system, with over 80% reduction in consumed tokens and around 60% reduction in task completion time compared to state-of-the-art approaches. The analysis of users' feedback also illuminates a series of directions for further optimization. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: IEEE Computer Society},
keywords = {% reductions, 3D modeling, algorithm, Algorithms, Augmented Reality, Coding errors, Computer graphics, Computer interaction, computer interface, Computer simulation languages, Extended reality, generative artificial intelligence, human, Human users, human-computer interaction, Humans, Imaging, Immersive, Language, Language Model, Large language model, large language models, Metadata, Natural Language Processing, Natural language processing systems, Natural languages, procedures, Script generation, Spatio-temporal data, Three dimensional computer graphics, Three-Dimensional, three-dimensional imaging, User-Computer Interface, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Shen, Y.; Li, B.; Huang, J.; Wang, Z.
GaussianShopVR: Facilitating Immersive 3D Authoring Using Gaussian Splatting in VR Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 1292–1293, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331514846 (ISBN).
Abstract | Links | BibTeX | Tags: 3D authoring, 3D modeling, Digital replicas, Gaussian distribution, Gaussian Splatting editing, Gaussians, Graphical user interfaces, High quality, Immersive, Immersive environment, Interactive computer graphics, Rendering (computer graphics), Rendering pipelines, Splatting, Three dimensional computer graphics, User profile, Virtual Reality, Virtual reality user interface, Virtualization, VR user interface
@inproceedings{shen_gaussianshopvr_2025,
title = {GaussianShopVR: Facilitating Immersive 3D Authoring Using Gaussian Splatting in VR},
author = {Y. Shen and B. Li and J. Huang and Z. Wang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005138672&doi=10.1109%2FVRW66409.2025.00292&partnerID=40&md5=2290016d250649f8d7f262212b1f59cb},
doi = {10.1109/VRW66409.2025.00292},
isbn = {9798331514846 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {1292–1293},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual reality (VR) applications require massive high-quality 3D assets to create immersive environments. Generating mesh-based 3D assets typically involves a significant amount of manpower and effort, which makes VR applications less accessible. 3D Gaussian Splatting (3DGS) has attracted much attention for its ability to quickly create digital replicas of real-life scenes and its compatibility with traditional rendering pipelines. However, it remains a challenge to edit 3DGS in a flexible and controllable manner. We propose GaussianShopVR, a system that leverages VR user interfaces to specify target areas to achieve flexible and controllable editing of reconstructed 3DGS. In addition, selected areas can provide 3D information to generative AI models to facilitate the editing. GaussianShopVR integrates object hierarchy management while keeping the backpropagated gradient flow to allow local editing with context information. © 2025 Elsevier B.V., All rights reserved.},
keywords = {3D authoring, 3D modeling, Digital replicas, Gaussian distribution, Gaussian Splatting editing, Gaussians, Graphical user interfaces, High quality, Immersive, Immersive environment, Interactive computer graphics, Rendering (computer graphics), Rendering pipelines, Splatting, Three dimensional computer graphics, User profile, Virtual Reality, Virtual reality user interface, Virtualization, VR user interface},
pubstate = {published},
tppubtype = {inproceedings}
}
Sousa, R. T.; Oliveira, E. A. M.; Cintra, L. M. F.; Filho, A. R. G. Galvão
Transformative Technologies for Rehabilitation: Leveraging Immersive and AI-Driven Solutions to Reduce Recidivism and Promote Decent Work Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 168–171, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331514846 (ISBN).
Abstract | Links | BibTeX | Tags: AI- Driven Rehabilitation, Artificial intelligence- driven rehabilitation, Emotional intelligence, Engineering education, Generative AI, generative artificial intelligence, Immersive, Immersive technologies, Immersive Technology, Language Model, Large language model, large language models, Skills development, Social Reintegration, Social skills, Sociology, Vocational training
@inproceedings{sousa_transformative_2025,
title = {Transformative Technologies for Rehabilitation: Leveraging Immersive and AI-Driven Solutions to Reduce Recidivism and Promote Decent Work},
author = {R. T. Sousa and E. A. M. Oliveira and L. M. F. Cintra and A. R. G. Galvão Filho},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005140551&doi=10.1109%2FVRW66409.2025.00042&partnerID=40&md5=a8dbe15493fd8361602d049f2b09efe3},
doi = {10.1109/VRW66409.2025.00042},
isbn = {9798331514846 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {168–171},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The reintegration of incarcerated individuals into society presents significant challenges, particularly in addressing barriers related to vocational training, social skill development, and emotional rehabilitation. Immersive technologies, such as Virtual Reality and Augmented Reality, combined with generative Artificial Intelligence (AI) and Large Language Models, offer innovative opportunities to enhance these areas. These technologies create practical, controlled environments for skill acquisition and behavioral training, while generative AI enables dynamic, personalized, and adaptive experiences. This paper explores the broader potential of these integrated technologies in supporting rehabilitation, reducing recidivism, and fostering sustainable employment opportunities and these initiatives align with the overarching equity objective of ensuring Decent Work for All, reinforcing the commitment to inclusive and equitable progress across diverse communities, through the transformative potential of immersive and AI-driven systems in correctional systems. © 2025 Elsevier B.V., All rights reserved.},
keywords = {AI- Driven Rehabilitation, Artificial intelligence- driven rehabilitation, Emotional intelligence, Engineering education, Generative AI, generative artificial intelligence, Immersive, Immersive technologies, Immersive Technology, Language Model, Large language model, large language models, Skills development, Social Reintegration, Social skills, Sociology, Vocational training},
pubstate = {published},
tppubtype = {inproceedings}
}
Grubert, J.; Schmalstieg, D.; Dickhaut, K.
Towards Supporting Literary Studies Using Virtual Reality and Generative Artificial Intelligence Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 147–149, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331514846 (ISBN).
Abstract | Links | BibTeX | Tags: Cultural-historical, generative artificial intelligence, Immersive, literary studies, Literary study, Literary texts, Literature analysis, Textual-analysis, Virtual Reality, Visual elements
@inproceedings{grubert_towards_2025,
title = {Towards Supporting Literary Studies Using Virtual Reality and Generative Artificial Intelligence},
author = {J. Grubert and D. Schmalstieg and K. Dickhaut},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005144426&doi=10.1109%2FVRW66409.2025.00037&partnerID=40&md5=2a1f62e6193cb0a54105d77c5ba85aa9},
doi = {10.1109/VRW66409.2025.00037},
isbn = {9798331514846 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {147–149},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Literary studies critically examine fictional texts, exploring their structures, themes, stylistic features, and cultural-historical contexts. A central challenge in this field lies in bridging textual analysis with the spatial and sensory dimensions of settings described or implied in texts. Traditional methodologies often require scholars to mentally reconstruct these environments, leading to incomplete or inconsistent interpretations. Readers may be biased by their personal context or experiences, or may lack detailed knowledge of the relevant historical facts. This paper argues for the integration of virtual reality and generative artificial intelligence as supporting instruments to enhance literary research. The former enables immersive, spatially accurate reconstructions of historical environments, while the latter provides tools such as text-to-image and text-to-3D generation which let us dynamically render visual elements quoted in literary texts. Together, these technologies have the potential to significantly enhance traditional literature analysis methodologies, enabling novel approaches for contextualizing and analyzing literature in its spatial and cultural milieu. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Cultural-historical, generative artificial intelligence, Immersive, literary studies, Literary study, Literary texts, Literature analysis, Textual-analysis, Virtual Reality, Visual elements},
pubstate = {published},
tppubtype = {inproceedings}
}
Lopes, M. K. S.; Falk, T. H.
Generative AI for Personalized Multisensory Immersive Experiences: Challenges and Opportunities for Stress Reduction Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 143–146, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331514846 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence tools, Environment personalization, Forest bathing, Generative AI, Immersive, Multi-Sensory, Multi-sensory virtual reality, Multisensory, Personalizations, Relaxation, Virtual Reality, Virtualization
@inproceedings{lopes_generative_2025,
title = {Generative AI for Personalized Multisensory Immersive Experiences: Challenges and Opportunities for Stress Reduction},
author = {M. K. S. Lopes and T. H. Falk},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005149501&doi=10.1109%2FVRW66409.2025.00036&partnerID=40&md5=9507cf2dcec341c434e08e8b6f92bfda},
doi = {10.1109/VRW66409.2025.00036},
isbn = {9798331514846 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {143–146},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Stress management and relaxation are critical areas of interest in mental health and well-being. Forest bathing is a practice that has been shown to have a positive effect on reducing stress by stimulating all the senses in an immersive nature experience. Since access to nature is not universally available to everyone, virtual reality has emerged as a promising tool to simulate this type of experience. Furthermore, generative artificial intelligence (GenAI) tools offer new opportunities to create highly personalized and immersive experiences that can enhance relaxation and reduce stress. This study explores the potential of personalized multisensory VR environments, designed using GenAI tools, to optimize relaxation and stress relief via two experiments that are currently underway. The first evaluates the effectiveness of non-personalized versus personalized VR scenes generated using AI tools to promote increased relaxation. The second explores the potential benefits of providing the user with additional personalization tools, from adding new virtual elements to the AI-generated scene, to adding AI-generated sounds and scent/haptics customization. Ultimately, this research aims to identify which customizable elements may lead to improved therapeutic benefits for multisensory VR experiences. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Artificial intelligence tools, Environment personalization, Forest bathing, Generative AI, Immersive, Multi-Sensory, Multi-sensory virtual reality, Multisensory, Personalizations, Relaxation, Virtual Reality, Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Kurai, R.; Hiraki, T.; Hiroi, Y.; Hirao, Y.; Perusquía-Hernández, M.; Uchiyama, H.; Kiyokawa, K.
An implementation of MagicCraft: Generating Interactive 3D Objects and Their Behaviors from Text for Commercial Metaverse Platforms Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 1284–1285, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331514846 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, 3D models, 3D object, 3D Object Generation, 3d-modeling, AI-Assisted Design, Generative AI, Immersive, Metaverse, Metaverses, Model skill, Object oriented programming, Programming skills
@inproceedings{kurai_implementation_2025,
title = {An implementation of MagicCraft: Generating Interactive 3D Objects and Their Behaviors from Text for Commercial Metaverse Platforms},
author = {R. Kurai and T. Hiraki and Y. Hiroi and Y. Hirao and M. Perusquía-Hernández and H. Uchiyama and K. Kiyokawa},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005153642&doi=10.1109%2FVRW66409.2025.00288&partnerID=40&md5=fd6d3b8d0dcbc5b9ccd4c31069bb4f4a},
doi = {10.1109/VRW66409.2025.00288},
isbn = {9798331514846 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {1284–1285},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Metaverse platforms are rapidly evolving to provide immersive spaces. However, the generation of dynamic and interactive 3D objects remains a challenge due to the need for advanced 3D modeling and programming skills. We present MagicCraft, a system that generates functional 3D objects from natural language prompts. MagicCraft uses generative AI models to manage the entire content creation pipeline: converting user text descriptions into images, transforming images into 3D models, predicting object behavior, and assigning necessary attributes and scripts. It also provides an interactive interface for users to refine generated objects by adjusting features like orientation, scale, seating positions, and grip points. © 2025 Elsevier B.V., All rights reserved.},
keywords = {3D modeling, 3D models, 3D object, 3D Object Generation, 3d-modeling, AI-Assisted Design, Generative AI, Immersive, Metaverse, Metaverses, Model skill, Object oriented programming, Programming skills},
pubstate = {published},
tppubtype = {inproceedings}
}
Bosser, A. -G.; Cascarano, P.; Lacoche, J.; Hajahmadi, S.; Stanescu, A.; Sörös, G.
Preface to the First Workshop on GenAI-XR: Generative Artificial Intelligence meets Extended Reality Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 129–130, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331514846 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive environment, Adaptive Environments, Art educations, Artificial Intelligence and Extended Reality Integration, Context-aware systems, Entertainment industry, Extended reality, Immersive, Indexterm: generative artificial intelligence, IndexTerms: Generative Artificial Intelligence, Industry professionals, Innovative method, Personalized interaction, Personalized Interactions
@inproceedings{bosser_preface_2025,
title = {Preface to the First Workshop on GenAI-XR: Generative Artificial Intelligence meets Extended Reality},
author = {A. -G. Bosser and P. Cascarano and J. Lacoche and S. Hajahmadi and A. Stanescu and G. Sörös},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005161220&doi=10.1109%2FVRW66409.2025.00033&partnerID=40&md5=2d463f32f31df557a5ba291a71ecb6ed},
doi = {10.1109/VRW66409.2025.00033},
isbn = {9798331514846 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {129–130},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The GenAI-XR workshop aims to explore the intersection of Generative Artificial Intelligence (GenAI) and Extended Reality (XR), examining their combined potential to revolutionize various sectors including entertainment, arts, education, factory work, healthcare, architecture, and others. The workshop will provide a platform for researchers, industry professionals, and practitioners to discuss innovative methods of integrating GenAI into XR environments, enhancing immersive experiences, and personalizing interactions in real time. Through presentation and discussion sessions, participants will gain insights into the latest developments, challenges, and future directions at the intersection of GenAI and XR. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Adaptive environment, Adaptive Environments, Art educations, Artificial Intelligence and Extended Reality Integration, Context-aware systems, Entertainment industry, Extended reality, Immersive, Indexterm: generative artificial intelligence, IndexTerms: Generative Artificial Intelligence, Industry professionals, Innovative method, Personalized interaction, Personalized Interactions},
pubstate = {published},
tppubtype = {inproceedings}
}
Otsuka, T.; Li, D.; Siriaraya, P.; Nakajima, S.
Development of A Relaxation Support System Utilizing Stereophonic AR Proceedings Article
In: Int. Conf. Comput., Netw. Commun., ICNC, pp. 463–467, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331520960 (ISBN).
Abstract | Links | BibTeX | Tags: Augmented Reality, Environmental sounds, Generative AI, Immersive, Mental Well-being, Soundscapes, Spatial Audio, Stereo image processing, Support method, Support systems, Well being
@inproceedings{otsuka_development_2025,
title = {Development of A Relaxation Support System Utilizing Stereophonic AR},
author = {T. Otsuka and D. Li and P. Siriaraya and S. Nakajima},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105006602014&doi=10.1109%2FICNC64010.2025.10993739&partnerID=40&md5=72d1dea49b65a396c9a6788ce1ed3274},
doi = {10.1109/ICNC64010.2025.10993739},
isbn = {9798331520960 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Int. Conf. Comput., Netw. Commun., ICNC},
pages = {463–467},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Given the high prevalence of stress and anxiety in today's society, there is an urgent need to explore effective methods to help people manage stress. This research aims to develop a relaxation support system using stereophonic augmented reality (AR), designed to help alleviate stress by recreating relaxing environments with immersive stereo soundscapes, including stories created from generative AI and environmental sounds while users are going for a walk. This paper presents a preliminary evaluation of the effectiveness of the proposed relaxation support method. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Augmented Reality, Environmental sounds, Generative AI, Immersive, Mental Well-being, Soundscapes, Spatial Audio, Stereo image processing, Support method, Support systems, Well being},
pubstate = {published},
tppubtype = {inproceedings}
}
Chang, K. -Y.; Lee, C. -F.
Enhancing Virtual Restorative Environment with Generative AI: Personalized Immersive Stress-Relief Experiences Proceedings Article
In: V.G., Duffy (Ed.): Lect. Notes Comput. Sci., pp. 132–144, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303193501-5 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence generated content, Artificial Intelligence Generated Content (AIGC), Electroencephalography, Electroencephalography (EEG), Generative AI, Immersive, Immersive environment, Mental health, Physical limitations, Restorative environment, Stress relief, Virtual reality exposure therapies, Virtual reality exposure therapy, Virtual Reality Exposure Therapy (VRET), Virtualization
@inproceedings{chang_enhancing_2025,
title = {Enhancing Virtual Restorative Environment with Generative AI: Personalized Immersive Stress-Relief Experiences},
author = {K. -Y. Chang and C. -F. Lee},
editor = {Duffy V.G.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007759157&doi=10.1007%2f978-3-031-93502-2_9&partnerID=40&md5=ee620a5da9b65e90ccb1eaa75ec8b724},
doi = {10.1007/978-3-031-93502-2_9},
isbn = {03029743 (ISSN); 978-303193501-5 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15791 LNCS},
pages = {132–144},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {In today’s fast-paced world, stress and mental health challenges are becoming more common. Restorative environments help people relax and recover emotionally, and Virtual Reality Exposure Therapy (VRET) offers a way to experience these benefits beyond physical limitations. However, most VRET applications rely on pre-designed content, limiting their adaptability to individual needs. This study explores how Generative AI can enhance VRET by creating personalized, immersive environments that better match users’ preferences and improve relaxation. To evaluate the impact of AI-generated restorative environments, we combined EEG measurements with user interviews. Thirty university students participated in the study, experiencing two different modes: static mode and walking mode. The EEG results showed an increase in Theta (θ) and High Beta (β) brain waves, suggesting a state of deep immersion accompanied by heightened cognitive engagement and mental effort. While participants found the experience enjoyable and engaging, the AI-generated environments tended to create excitement and focus rather than conventional relaxation. These findings suggest that for AI-generated environments in VRET to be more effective for stress relief, future designs should reduce cognitive load while maintaining immersion. This study provides insights into how AI can enhance relaxation experiences and introduces a new perspective on personalized digital stress-relief solutions. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Artificial intelligence generated content, Artificial Intelligence Generated Content (AIGC), Electroencephalography, Electroencephalography (EEG), Generative AI, Immersive, Immersive environment, Mental health, Physical limitations, Restorative environment, Stress relief, Virtual reality exposure therapies, Virtual reality exposure therapy, Virtual Reality Exposure Therapy (VRET), Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Tortora, A.; Amaro, I.; Greca, A. Della; Barra, P.
Exploring the Role of Generative Artificial Intelligence in Virtual Reality: Opportunities and Future Perspectives Proceedings Article
In: J.Y.C., Chen; G., Fragomeni (Ed.): Lect. Notes Comput. Sci., pp. 125–142, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303193699-9 (ISBN).
Abstract | Links | BibTeX | Tags: Ethical technology, Future perspectives, Generative AI, Image modeling, Immersive, immersive experience, Immersive Experiences, Information Management, Language Model, Personnel training, Professional training, Real- time, Sensitive data, Training design, Users' experiences, Virtual Reality
@inproceedings{tortora_exploring_2025,
title = {Exploring the Role of Generative Artificial Intelligence in Virtual Reality: Opportunities and Future Perspectives},
author = {A. Tortora and I. Amaro and A. Della Greca and P. Barra},
editor = {Chen J.Y.C. and Fragomeni G.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007788684&doi=10.1007%2f978-3-031-93700-2_9&partnerID=40&md5=7b69183bbf8172f9595f939254fb6831},
doi = {10.1007/978-3-031-93700-2_9},
isbn = {03029743 (ISSN); 978-303193699-9 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15788 LNCS},
pages = {125–142},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {In recent years, generative AI, such as language and image models, have started to revolutionize virtual reality (VR) by offering new opportunities for immersive and personalized interaction. This paper explores the potential of these Intelligent Augmentation technologies in the context of VR, analyzing how the generation of text and images in real time can enhance the user experience through dynamic and personalized environments and contents. The integration of generative AI in VR scenarios holds promise in multiple fields, including education, professional training, design, and healthcare. However, their implementation involves significant challenges, such as privacy management, data security, and ethical issues related to cognitive manipulation and representation of reality. Through an overview of current applications and future prospects, this paper highlights the crucial role of generative AI in enhancing VR, helping to outline a path for the ethical and sustainable development of these immersive technologies. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Ethical technology, Future perspectives, Generative AI, Image modeling, Immersive, immersive experience, Immersive Experiences, Information Management, Language Model, Personnel training, Professional training, Real- time, Sensitive data, Training design, Users' experiences, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Kurai, R.; Hiraki, T.; Hiroi, Y.; Hirao, Y.; Perusquía-Hernández, M.; Uchiyama, H.; Kiyokawa, K.
MagicCraft: Natural Language-Driven Generation of Dynamic and Interactive 3D Objects for Commercial Metaverse Platforms Journal Article
In: IEEE Access, vol. 13, pp. 132459–132474, 2025, ISSN: 21693536 (ISSN), (Publisher: Institute of Electrical and Electronics Engineers Inc.).
Abstract | Links | BibTeX | Tags: 3D models, 3D object, 3D Object Generation, 3d-modeling, AI-Assisted Design, Artificial intelligence, Behavioral Research, Content creation, Generative AI, Immersive, Metaverse, Metaverses, Natural language processing systems, Natural languages, Object oriented programming, Three dimensional computer graphics, user experience, User interfaces
@article{kurai_magiccraft_2025,
title = {MagicCraft: Natural Language-Driven Generation of Dynamic and Interactive 3D Objects for Commercial Metaverse Platforms},
author = {R. Kurai and T. Hiraki and Y. Hiroi and Y. Hirao and M. Perusquía-Hernández and H. Uchiyama and K. Kiyokawa},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105010187256&doi=10.1109%2FACCESS.2025.3587232&partnerID=40&md5=9b7a8115c62a8f9da4956dbbbb53dc4e},
doi = {10.1109/ACCESS.2025.3587232},
issn = {21693536 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Access},
volume = {13},
pages = {132459–132474},
abstract = {Metaverse platforms are rapidly evolving to provide immersive spaces for user interaction and content creation. However, the generation of dynamic and interactive 3D objects remains challenging due to the need for advanced 3D modeling and programming skills. To address this challenge, we present MagicCraft, a system that generates functional 3D objects from natural language prompts for metaverse platforms. MagicCraft uses generative AI models to manage the entire content creation pipeline: converting user text descriptions into images, transforming images into 3D models, predicting object behavior, and assigning necessary attributes and scripts. It also provides an interactive interface for users to refine generated objects by adjusting features such as orientation, scale, seating positions, and grip points. Implemented on Cluster, a commercial metaverse platform, MagicCraft was evaluated by 7 expert CG designers and 51 general users. Results show that MagicCraft significantly reduces the time and skill required to create 3D objects. Users with no prior experience in 3D modeling or programming successfully created complex, interactive objects and deployed them in the metaverse. Expert feedback highlighted the system's potential to improve content creation workflows and support rapid prototyping. By integrating AI-generated content into metaverse platforms, MagicCraft makes 3D content creation more accessible. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Institute of Electrical and Electronics Engineers Inc.},
keywords = {3D models, 3D object, 3D Object Generation, 3d-modeling, AI-Assisted Design, Artificial intelligence, Behavioral Research, Content creation, Generative AI, Immersive, Metaverse, Metaverses, Natural language processing systems, Natural languages, Object oriented programming, Three dimensional computer graphics, user experience, User interfaces},
pubstate = {published},
tppubtype = {article}
}
Paterakis, I.; Manoudaki, N.
Osmosis: Generative AI and XR for the real-time transformation of urban architectural environments Journal Article
In: International Journal of Architectural Computing, 2025, ISSN: 14780771 (ISSN), (Publisher: SAGE Publications Inc.).
Abstract | Links | BibTeX | Tags: Architectural design, Architectural environment, Artificial intelligence, Biodigital design, Case-studies, Computational architecture, Computer architecture, Extended reality, generative artificial intelligence, Immersive, Immersive environment, immersive environments, Natural language processing systems, Real- time, Urban environments, urban planning
@article{paterakis_osmosis_2025,
title = {Osmosis: Generative AI and XR for the real-time transformation of urban architectural environments},
author = {I. Paterakis and N. Manoudaki},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105014516125&doi=10.1177%2F14780771251356526&partnerID=40&md5=4bbcb09440d91899cb7d2d5d0c852507},
doi = {10.1177/14780771251356526},
issn = {14780771 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {International Journal of Architectural Computing},
abstract = {This work contributes to the evolving discourse on biodigital architecture by examining how generative artificial intelligence (AI) and extended reality (XR) systems can be combined to create immersive urban environments. Focusing on the case study of “Osmosis”, a series of large-scale public installations, this work proposes a methodological framework for real-time architectural composition in XR using diffusion models and interaction. The project reframes the architectural façade as a semi permeable membrane, through which digital content diffuses in response to environmental and user inputs. By integrating natural language prompts, multimodal input, and AI-generated visual synthesis with projection mapping, Osmosis advances a vision for urban architecture that is interactive, data-driven, and sensorially rich. The work explores new design territories where stochastic form-making and real-time responsiveness intersect, and positions AI as an augmentation of architectural creativity rather than its replacement. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: SAGE Publications Inc.},
keywords = {Architectural design, Architectural environment, Artificial intelligence, Biodigital design, Case-studies, Computational architecture, Computer architecture, Extended reality, generative artificial intelligence, Immersive, Immersive environment, immersive environments, Natural language processing systems, Real- time, Urban environments, urban planning},
pubstate = {published},
tppubtype = {article}
}
Li, Y.; Wang, S.; Sun, X.; Yang, L.; Zhu, T.; Chen, Y.; Zhao, K.; Zhao, Y.; Li, M.; Lc, R.
In: International Journal of Human-Computer Interaction, 2025, ISSN: 10447318 (ISSN); 15327590 (ISSN), (Publisher: Taylor and Francis Ltd.).
Abstract | Links | BibTeX | Tags: Across time, Artificial intelligence, Computer interaction, Cultural heritages, Design and evaluations, Extended reality, Generative AI, Hong-kong, Human computer interaction, human–computer interaction, Immersive, Mixed reality, TeleAbsence, Urban cultural heritage narrative, Urban cultural heritage narratives
@article{li_reality_2025,
title = {Reality as Imagined: Design and Evaluation of a TeleAbsence-Driven Extended Reality Experience for (Re) Interpreting Urban Cultural Heritage Narratives Across Time},
author = {Y. Li and S. Wang and X. Sun and L. Yang and T. Zhu and Y. Chen and K. Zhao and Y. Zhao and M. Li and R. Lc},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105016721876&doi=10.1080%2F10447318.2025.2554296&partnerID=40&md5=1ecd1a643f4ba85ae08d549db04a8c9b},
doi = {10.1080/10447318.2025.2554296},
issn = {10447318 (ISSN); 15327590 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {International Journal of Human-Computer Interaction},
abstract = {Visitors to Urban Cultural Heritage (UCH) often encounter official narratives but not the imaginations and relationships shaping its intangible aspects. Existing immersive experiences emphasize historical realities, overlooking personal and collective imaginations that shift with rapid development. To address this, we designed an Extended Reality (XR) experience around eight Hong Kong landmarks, enabling transitions between virtual and mixed-reality environments where users explore UCH narratives across past, present, and future. These narratives integrate (1) historical documentation with 360° visualizations and (2) images created in workshops supported by Generative AI tools. A mixed-method study with 24 participants examined their experiences and reflections. Results revealed deep immersion in both real and imagined worlds, as well as personal reinterpretations of UCH. This work demonstrates how XR can blend reality and imagination within one immersive experience and highlights design implications for archiving human imagination as an intangible form of cultural heritage. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Taylor and Francis Ltd.},
keywords = {Across time, Artificial intelligence, Computer interaction, Cultural heritages, Design and evaluations, Extended reality, Generative AI, Hong-kong, Human computer interaction, human–computer interaction, Immersive, Mixed reality, TeleAbsence, Urban cultural heritage narrative, Urban cultural heritage narratives},
pubstate = {published},
tppubtype = {article}
}
Tovias, E.; Wu, L.
Leveraging Virtual Reality and AI for Enhanced Vocabulary Learning Proceedings Article
In: pp. 308, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331521646 (ISBN).
Abstract | Links | BibTeX | Tags: Avatar, Avatars, E-Learning, Immersive, Interactive computer graphics, Interactive learning, Language Model, Large language model, large language models, Learning experiences, Real time interactions, Text-based methods, user experience, Users' experiences, Virtual environments, Virtual Reality, Vocabulary learning
@inproceedings{tovias_leveraging_2025,
title = {Leveraging Virtual Reality and AI for Enhanced Vocabulary Learning},
author = {E. Tovias and L. Wu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105017563813&doi=10.1109%2FICHMS65439.2025.11154184&partnerID=40&md5=7b79f93d6f8ec222b25a4bfeac408d3a},
doi = {10.1109/ICHMS65439.2025.11154184},
isbn = {9798331521646 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {308},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This study examines the integration of virtual reality (VR) and Artificial Intelligence (AI) to create more immersive, interactive learning experiences. By combining VR's engaging user experience with AI-powered avatars, this research explores how these tools can enhance vocabulary learning compared to traditional text-based methods. Utilizing a Meta Quest 3 headset, Unity for development, and OpenAI's API & ElevenLabs for dynamic dialogues, this system offers personalized, real-time interactions (Fig. 1). The integration of these technologies fosters a bright future, driving significant advancements in the development of highly immersive and effective learning environments. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Avatar, Avatars, E-Learning, Immersive, Interactive computer graphics, Interactive learning, Language Model, Large language model, large language models, Learning experiences, Real time interactions, Text-based methods, user experience, Users' experiences, Virtual environments, Virtual Reality, Vocabulary learning},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, R. -G.; Tsai, C. H.; Tseng, M. C.; Hong, R. -C.; Syu, H.; Chou, C. -C.
Immersive Smart Meter Data Analytics: Leveraging eXtended Reality with LSTM and LLMs Proceedings Article
In: pp. 32–36, International Workshop on Computer Science and Engineering (WCSE), 2025.
Abstract | Links | BibTeX | Tags: Data Analytics, Data visualization, Decision making, Energy management, Energy-consumption, Exponential growth, Extended reality (XR), Forecasting, Human computer interaction, Immersive, Language Model, Large language model, large language models (LLMs), Long short-term memory, Long Short-Term Memory (LSTM), Short term memory, Smart Grid technologies, Smart Meters, Smart power grids, Visual analytics
@inproceedings{wang_immersive_2025,
title = {Immersive Smart Meter Data Analytics: Leveraging eXtended Reality with LSTM and LLMs},
author = {R. -G. Wang and C. H. Tsai and M. C. Tseng and R. -C. Hong and H. Syu and C. -C. Chou},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105017965008&doi=10.18178%2Fwcse.2025.06.006&partnerID=40&md5=866ab1ca8cdf0372c020f0131f1d68c1},
doi = {10.18178/wcse.2025.06.006},
year = {2025},
date = {2025-01-01},
pages = {32–36},
publisher = {International Workshop on Computer Science and Engineering (WCSE)},
abstract = {The rapid advancement of smart grid technologies has led to an exponential growth in smart meter data, creating new opportunities for more accurate energy consumption forecasting and immersive data visualization. This study proposes an integrated framework that combines eXtended Reality (XR), Long Short-Term Memory (LSTM) networks, and Large Language Models (LLMs) to enhance smart meter data analytics. The process begins with the application of LSTM to capture temporal dependencies in historical electricity usage data. Subsequently, the Large Language Models (LLMs) are employed to refine these textual forecasts, offering better predictions and explanations that are easily understandable by end-users. Finally, the enriched insights are presented through an XR environment, enabling users to interact with smart meter analytics in an immersive and intuitive way. By visualizing data trends, predictions, and explanatory narratives in a spatial computing interface, users can explore complex information more effectively. This multi-modal approach facilitates better decision-making for energy management, promotes user engagement, and supports smart city initiatives aiming for sustainable energy consumption. The integration of XR, LSTM, and LLMs technologies demonstrates a promising direction for future research and practical applications in smart energy systems. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Data Analytics, Data visualization, Decision making, Energy management, Energy-consumption, Exponential growth, Extended reality (XR), Forecasting, Human computer interaction, Immersive, Language Model, Large language model, large language models (LLMs), Long short-term memory, Long Short-Term Memory (LSTM), Short term memory, Smart Grid technologies, Smart Meters, Smart power grids, Visual analytics},
pubstate = {published},
tppubtype = {inproceedings}
}
Vadisetty, R.; Polamarasetti, A.; Goyal, M. K.; Rongali, S. K.; Prajapati, S. K.; Butani, J. B.
Generative AI for Creating Immersive Learning Environments: Virtual Reality and Beyond Proceedings Article
In: Mishra, S.; Tripathy, H. K.; Mohanty, J. R. (Ed.): Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331523022 (ISBN).
Abstract | Links | BibTeX | Tags: AI in Education, Artificial intelligence in education, Augmented Reality, Augmented Reality (AR), Computer aided instruction, E-Learning, Educational spaces, Generative adversarial networks, Generative AI, generative artificial intelligence, Immersive, Immersive learning, Learning Environments, Learning systems, Personalized learning, Virtual and augmented reality, Virtual environments, Virtual Reality, Virtual Reality (VR)
@inproceedings{vadisetty_generative_2025,
title = {Generative AI for Creating Immersive Learning Environments: Virtual Reality and Beyond},
author = {R. Vadisetty and A. Polamarasetti and M. K. Goyal and S. K. Rongali and S. K. Prajapati and J. B. Butani},
editor = {S. Mishra and H. K. Tripathy and J. R. Mohanty},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105018128093&doi=10.1109%2FASSIC64892.2025.11158626&partnerID=40&md5=b29a005f42262bf50c58d7708e2ed91a},
doi = {10.1109/ASSIC64892.2025.11158626},
isbn = {9798331523022 (ISBN)},
year = {2025},
date = {2025-01-01},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Generative Artificial Intelligence (AI) revolutionizes immersive educational spaces with dynamic, personalized, and interactive experiences. In this article, Generative AI addresses its role in Virtual and Augmented Realities through automated creation, personalized learning pathways, and heightened engagement. With Generative AI, educational simulations can adapt to learner performance, produce interactive characters, and present real-time feedback through models such as Generative Adversarial Networks (GANs) and Transformerbased AI. Considering its potential, computational limitations, ethics, and authentic content concerns must be considered. In its examination, current implementations, benefits, and impediments, such as AI-powered flexible learning, are discussed in detail in this work. In conclusion, Generative AI's role in changing immersive instruction and opening doors for amplified and augmented educational offerings is stressed. © 2025 Elsevier B.V., All rights reserved.},
keywords = {AI in Education, Artificial intelligence in education, Augmented Reality, Augmented Reality (AR), Computer aided instruction, E-Learning, Educational spaces, Generative adversarial networks, Generative AI, generative artificial intelligence, Immersive, Immersive learning, Learning Environments, Learning systems, Personalized learning, Virtual and augmented reality, Virtual environments, Virtual Reality, Virtual Reality (VR)},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, L.; Mitchell, K.
Structured Teaching Prompt Articulation for Generative-AI Role Embodiment with Augmented Mirror Video Displays Proceedings Article
In: Spencer, S. N. (Ed.): Proc.: VRCAI - ACM SIGGRAPH Int. Conf. Virtual-Reality Contin. Appl. Ind., Association for Computing Machinery, Inc, 2025, ISBN: 9798400713484 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Augmented Reality, Computer interaction, Contrastive Learning, Cultural icon, Experiential learning, Generative adversarial networks, Generative AI, human-computer interaction, Immersive, Pedagogical practices, Role-based, Teachers', Teaching, Video display, Virtual environments, Virtual Reality
@inproceedings{casas_structured_2025,
title = {Structured Teaching Prompt Articulation for Generative-AI Role Embodiment with Augmented Mirror Video Displays},
author = {L. Casas and K. Mitchell},
editor = {S. N. Spencer},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85217997060&doi=10.1145%2F3703619.3706049&partnerID=40&md5=fb1b42dadbdc8ac44eeaafa93abc7f2c},
doi = {10.1145/3703619.3706049},
isbn = {9798400713484 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc.: VRCAI - ACM SIGGRAPH Int. Conf. Virtual-Reality Contin. Appl. Ind.},
publisher = {Association for Computing Machinery, Inc},
abstract = {We present a classroom enhanced with augmented reality video display in which students adopt snapshots of their corresponding virtual personas according to their teacher's live articulated spoken educational theme, linearly, such as historical figures, famous scientists, cultural icons, and laterally according to archetypal categories such as world dance styles. We define a structure of generative AI prompt guidance to assist teachers with focused specified visual role embodiment stylization. By leveraging role-based immersive embodiment, our proposed approach enriches pedagogical practices that prioritize experiential learning. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Artificial intelligence, Augmented Reality, Computer interaction, Contrastive Learning, Cultural icon, Experiential learning, Generative adversarial networks, Generative AI, human-computer interaction, Immersive, Pedagogical practices, Role-based, Teachers', Teaching, Video display, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Lv, J.; Słowik, A.; Rani, S.; Kim, B. -G.; Chen, C. -M.; Kumari, S.; Li, K.; Lyu, X.; Jiang, H.
In: Research, vol. 8, 2025, ISSN: 20965168 (ISSN); 26395274 (ISSN), (Publisher: American Association for the Advancement of Science).
Abstract | Links | BibTeX | Tags: Adaptive fusion, Collaborative representations, Diagnosis, Electronic health record, Generative adversarial networks, Health care application, Healthcare environments, Immersive, Learning frameworks, Metaverses, Multi-modal, Multi-modal learning, Performance
@article{lv_multimodal_2025,
title = {Multimodal Metaverse Healthcare: A Collaborative Representation and Adaptive Fusion Approach for Generative Artificial-Intelligence-Driven Diagnosis},
author = {J. Lv and A. Słowik and S. Rani and B. -G. Kim and C. -M. Chen and S. Kumari and K. Li and X. Lyu and H. Jiang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-86000613924&doi=10.34133%2Fresearch.0616&partnerID=40&md5=ce118b548f94bde494051760a217c33c},
doi = {10.34133/research.0616},
issn = {20965168 (ISSN); 26395274 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Research},
volume = {8},
abstract = {The metaverse enables immersive virtual healthcare environments, presenting opportunities for enhanced care delivery. A key challenge lies in effectively combining multimodal healthcare data and generative artificial intelligence abilities within metaverse-based healthcare applications, which is a problem that needs to be addressed. This paper proposes a novel multimodal learning framework for metaverse healthcare, MMLMH, based on collaborative intra- and intersample representation and adaptive fusion. Our framework introduces a collaborative representation learning approach that captures shared and modality-specific features across text, audio, and visual health data. By combining modality-specific and shared encoders with carefully formulated intrasample and intersample collaboration mechanisms, MMLMH achieves superior feature representation for complex health assessments. The framework’s adaptive fusion approach, utilizing attention mechanisms and gated neural networks, demonstrates robust performance across varying noise levels and data quality conditions. Experiments on metaverse healthcare datasets demonstrate MMLMH’s superior performance over baseline methods across multiple evaluation metrics. Longitudinal studies and visualization further illustrate MMLMH’s adaptability to evolving virtual environments and balanced performance across diagnostic accuracy, patient–system interaction efficacy, and data integration complexity. The proposed framework has a unique advantage in that a similar level of performance is maintained across various patient populations and virtual avatars, which could lead to greater personalization of healthcare experiences in the metaverse. MMLMH’s successful functioning in such complicated circumstances suggests that it can combine and process information streams from several sources. They can be successfully utilized in next-generation healthcare delivery through virtual reality. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: American Association for the Advancement of Science},
keywords = {Adaptive fusion, Collaborative representations, Diagnosis, Electronic health record, Generative adversarial networks, Health care application, Healthcare environments, Immersive, Learning frameworks, Metaverses, Multi-modal, Multi-modal learning, Performance},
pubstate = {published},
tppubtype = {article}
}
Barbu, M.; Iordache, D. -D.; Petre, I.; Barbu, D. -C.; Bajenaru, L.
Framework Design for Reinforcing the Potential of XR Technologies in Transforming Inclusive Education Journal Article
In: Applied Sciences (Switzerland), vol. 15, no. 3, 2025, ISSN: 20763417 (ISSN), (Publisher: Multidisciplinary Digital Publishing Institute (MDPI)).
Abstract | Links | BibTeX | Tags: Adaptive Learning, Adversarial machine learning, Artificial intelligence technologies, Augmented Reality, Contrastive Learning, Educational Technology, Extended reality (XR), Federated learning, Framework designs, Generative adversarial networks, Immersive, immersive experience, Immersive learning, Inclusive education, Learning platform, Special education needs
@article{barbu_framework_2025,
title = {Framework Design for Reinforcing the Potential of XR Technologies in Transforming Inclusive Education},
author = {M. Barbu and D. -D. Iordache and I. Petre and D. -C. Barbu and L. Bajenaru},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85217742383&doi=10.3390%2Fapp15031484&partnerID=40&md5=9ff9c99c76855723172055c73049fb5a},
doi = {10.3390/app15031484},
issn = {20763417 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Applied Sciences (Switzerland)},
volume = {15},
number = {3},
abstract = {This study presents a novel approach to inclusive education by integrating augmented reality (XR) and generative artificial intelligence (AI) technologies into an immersive and adaptive learning platform designed for students with special educational needs. Building upon existing solutions, the approach uniquely combines XR and generative AI to facilitate personalized, accessible, and interactive learning experiences tailored to individual requirements. The framework incorporates an intuitive Unity XR-based interface alongside a generative AI module to enable near real-time customization of content and interactions. Additionally, the study examines related generative AI initiatives that promote inclusion through enhanced communication tools, educational support, and customizable assistive technologies. The motivation for this study arises from the pressing need to address the limitations of traditional educational methods, which often fail to meet the diverse needs of learners with special educational requirements. The integration of XR and generative AI offers transformative potential by creating adaptive, immersive, and inclusive learning environments. This approach ensures real-time adaptability to individual progress and accessibility, addressing critical barriers such as static content and lack of inclusivity in existing systems. The research outlines a pathway toward more inclusive and equitable education, significantly enhancing opportunities for learners with diverse needs and contributing to broader social integration and equity in education. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Multidisciplinary Digital Publishing Institute (MDPI)},
keywords = {Adaptive Learning, Adversarial machine learning, Artificial intelligence technologies, Augmented Reality, Contrastive Learning, Educational Technology, Extended reality (XR), Federated learning, Framework designs, Generative adversarial networks, Immersive, immersive experience, Immersive learning, Inclusive education, Learning platform, Special education needs},
pubstate = {published},
tppubtype = {article}
}
Timmerman, K.; Mertens, R.; Yoncalik, A.; Spriet, L.
Cities Unseen: Experiencing the Imagined Proceedings Article
In: Proc. Int. Conf. Tangible, Embed., Embodied Interact., TEI, Association for Computing Machinery, Inc, 2025, ISBN: 9798400711978 (ISBN).
Abstract | Links | BibTeX | Tags: Art installation, Embodiment, Immersive, Immersive Storytelling, Multiplayers, Presence, Real time interactions, Sensory Involvement, Through the lens, Urban environments, Virtual environments, Virtual Reality
@inproceedings{timmerman_cities_2025,
title = {Cities Unseen: Experiencing the Imagined},
author = {K. Timmerman and R. Mertens and A. Yoncalik and L. Spriet},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000440301&doi=10.1145%2F3689050.3707685&partnerID=40&md5=051c0ab91b175ddbd001d36be07b3400},
doi = {10.1145/3689050.3707685},
isbn = {9798400711978 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. Int. Conf. Tangible, Embed., Embodied Interact., TEI},
publisher = {Association for Computing Machinery, Inc},
abstract = {"Cities Unseen: Experiencing the Imagined" is an art installation that reinterprets Italo Calvino's "Invisible Cities" through the lens of virtual reality. The project employs a physical suitcase as a portal, allowing participants to enter and explore virtual urban environments using the Oculus Quest 3. The cityscapes will be developed with generative AI, converting Calvino's descriptions into prompts, creating an immersive space for philosophical reflection on the nature of travel and the boundaries between reality and imagination. By integrating Unity's shared spatial anchors and advanced multiplayer features, "Cities Unseen" supports real-time interaction among participants, emphasizing the social and collaborative dimensions of virtual travel. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Art installation, Embodiment, Immersive, Immersive Storytelling, Multiplayers, Presence, Real time interactions, Sensory Involvement, Through the lens, Urban environments, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}