AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Wei, X.; Chen, Y.; Zhao, P.; Wang, L.; Lee, L. -K.; Liu, R.
In: Interactive Learning Environments, 2025, ISSN: 10494820 (ISSN).
Abstract | Links | BibTeX | Tags: 5E learning model, generative artificial intelligence, Immersive virtual reality, Pedagogical agents, primary students, Science education
@article{wei_effects_2025,
title = {Effects of immersive virtual reality on primary students’ science performance in classroom settings: a generative AI pedagogical agents-enhanced 5E approach},
author = {X. Wei and Y. Chen and P. Zhao and L. Wang and L. -K. Lee and R. Liu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007642085&doi=10.1080%2f10494820.2025.2514101&partnerID=40&md5=94fee41fcdce74ebb9e91c6430ed9507},
doi = {10.1080/10494820.2025.2514101},
issn = {10494820 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Interactive Learning Environments},
abstract = {Immersive virtual reality (IVR) holds the potential to transform science education by offering opportunities to enhance learners’ engagement, motivation, and conceptual understanding. However, the integration of generative AI pedagogical agents (GPAs) into IVR environments remains underexplored. Specifically, the application of GPAs as a scaffold within the framework of the 5E learning model in science education has not been fully examined. To address these gaps, this study explored the impact of a GPA-enhanced 5E (GPA-5E) learning approach in IVR on primary students’ academic achievement, self-efficacy, collective efficacy, and their perceptions of the proposed method. Adopting a mixed-methods design, eighty sixth-grade students from two complete classes were assigned to either an experimental group engaging IVR science learning with a GPA-5E approach or a control group following the traditional 5E method. The results indicated that the GPA-5E approach in IVR science learning significantly improved students’ academic achievement, self-efficacy, and collective efficacy compared to the traditional method. Students in the experimental group also reported positive perceptions of the GPA-5E method, emphasizing its benefits in IVR science learning. These findings underscore the potential of integrating GPA-enhanced scaffolds within IVR environments to enrich pedagogical strategies and improve student outcomes in science education. © 2025 Informa UK Limited, trading as Taylor & Francis Group.},
keywords = {5E learning model, generative artificial intelligence, Immersive virtual reality, Pedagogical agents, primary students, Science education},
pubstate = {published},
tppubtype = {article}
}
Grubert, J.; Schmalstieg, D.; Dickhaut, K.
Towards Supporting Literary Studies Using Virtual Reality and Generative Artificial Intelligence Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 147–149, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833151484-6 (ISBN).
Abstract | Links | BibTeX | Tags: Cultural-historical, generative artificial intelligence, Immersive, literary studies, Literary study, Literary texts, Literature analysis, Textual-analysis, Virtual Reality, Visual elements
@inproceedings{grubert_towards_2025,
title = {Towards Supporting Literary Studies Using Virtual Reality and Generative Artificial Intelligence},
author = {J. Grubert and D. Schmalstieg and K. Dickhaut},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005144426&doi=10.1109%2fVRW66409.2025.00037&partnerID=40&md5=0315225b4c49f5f87bca94f82f41281c},
doi = {10.1109/VRW66409.2025.00037},
isbn = {979-833151484-6 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {147–149},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Literary studies critically examine fictional texts, exploring their structures, themes, stylistic features, and cultural-historical contexts. A central challenge in this field lies in bridging textual analysis with the spatial and sensory dimensions of settings described or implied in texts. Traditional methodologies often require scholars to mentally reconstruct these environments, leading to incomplete or inconsistent interpretations. Readers may be biased by their personal context or experiences, or may lack detailed knowledge of the relevant historical facts. This paper argues for the integration of virtual reality and generative artificial intelligence as supporting instruments to enhance literary research. The former enables immersive, spatially accurate reconstructions of historical environments, while the latter provides tools such as text-to-image and text-to-3D generation which let us dynamically render visual elements quoted in literary texts. Together, these technologies have the potential to significantly enhance traditional literature analysis methodologies, enabling novel approaches for contextualizing and analyzing literature in its spatial and cultural milieu. © 2025 IEEE.},
keywords = {Cultural-historical, generative artificial intelligence, Immersive, literary studies, Literary study, Literary texts, Literature analysis, Textual-analysis, Virtual Reality, Visual elements},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, W. -S.; Lin, C. -J.; Lee, H. -Y.; Huang, Y. -M.; Wu, T. -T.
Integrating feedback mechanisms and ChatGPT for VR-based experiential learning: impacts on reflective thinking and AIoT physical hands-on tasks Journal Article
In: Interactive Learning Environments, vol. 33, no. 2, pp. 1770–1787, 2025, ISSN: 10494820 (ISSN).
Abstract | Links | BibTeX | Tags: AIoT, feedback mechanisms, generative artificial intelligence, physical hands-on tasks, reflective thinking, Virtual Reality
@article{wang_integrating_2025,
title = {Integrating feedback mechanisms and ChatGPT for VR-based experiential learning: impacts on reflective thinking and AIoT physical hands-on tasks},
author = {W. -S. Wang and C. -J. Lin and H. -Y. Lee and Y. -M. Huang and T. -T. Wu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001238541&doi=10.1080%2f10494820.2024.2375644&partnerID=40&md5=136ac17a3a460dfd98cf7baa6439062e},
doi = {10.1080/10494820.2024.2375644},
issn = {10494820 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Interactive Learning Environments},
volume = {33},
number = {2},
pages = {1770–1787},
abstract = {This study investigates the application of Virtual Reality (VR) in the educational field, particularly its integration with GAI technologies such as ChatGPT to enhance the learning experience. The research indicates that while VR provides an immersive learning environment fostering student interaction and interest, the lack of a structured learning framework and personalized feedback may limit its educational effectiveness and potentially affect the transfer of VR-learned knowledge to physical hands-on tasks. Hence, it calls for the provision of more targeted and personalized feedback in VR learning environments. Through a randomized controlled trial (RCT), this study collected data from 77 university students, integrating experiential learning in VR for acquiring AIoT knowledge and practical skills, and compared the effects of traditional feedback versus GPT feedback on promoting reflective thinking, learning motivation, cognitive levels, and AIoT hands-on abilities among the students. The results show that the group receiving GPT feedback significantly outperformed the control group across these learning indicators, demonstrating the effectiveness of GAI technologies in providing personalized learning support, facilitating deep learning, and enhancing educational outcomes. This study offers new insights into the integration of GAI technology in VR learning environments, paving new pathways for the development and application of future educational technologies. © 2024 Informa UK Limited, trading as Taylor & Francis Group.},
keywords = {AIoT, feedback mechanisms, generative artificial intelligence, physical hands-on tasks, reflective thinking, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Yokoyama, N.; Kimura, R.; Nakajima, T.
ViGen: Defamiliarizing Everyday Perception for Discovering Unexpected Insights Proceedings Article
In: H., Degen; S., Ntoa (Ed.): Lect. Notes Comput. Sci., pp. 397–417, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303193417-9 (ISBN).
Abstract | Links | BibTeX | Tags: Artful Expression, Artistic technique, Augmented Reality, Daily lives, Defamiliarization, Dynamic environments, Engineering education, Enhanced vision systems, Generative AI, generative artificial intelligence, Human augmentation, Human engineering, Human-AI Interaction, Human-artificial intelligence interaction, Semi-transparent
@inproceedings{yokoyama_vigen_2025,
title = {ViGen: Defamiliarizing Everyday Perception for Discovering Unexpected Insights},
author = {N. Yokoyama and R. Kimura and T. Nakajima},
editor = {Degen H. and Ntoa S.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007760030&doi=10.1007%2f978-3-031-93418-6_26&partnerID=40&md5=dee6f54688284313a45579aab5f934d6},
doi = {10.1007/978-3-031-93418-6_26},
isbn = {03029743 (ISSN); 978-303193417-9 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15821 LNAI},
pages = {397–417},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {This paper proposes ViGen, an Augmented Reality (AR) and Artificial Intelligence (AI)-enhanced vision system designed to facilitate defamiliarization in daily life. Humans rely on sight to gather information, think, and act, yet the act of seeing often becomes passive in daily life. Inspired by Victor Shklovsky’s concept of defamiliarization and the artistic technique of photomontage, ViGen seeks to disrupt habitual perceptions. It achieves this by overlaying semi-transparent, AI-generated images, created based on the user’s view, through an AR display. The system is evaluated by several structured interviews, in which participants experience ViGen in three different scenarios. Results indicate that AI-generated visuals effectively supported defamiliarization by transforming ordinary scenes into unfamiliar ones. However, the user’s familiarity with a place plays a significant role. Also, while the feature that adjusts the transparency of overlaid images enhances safety, its limitations in dynamic environments suggest the need for further research across diverse cultural and geographic contexts. This study demonstrates the potential of AI-augmented vision systems to stimulate new ways of seeing, offering insights for further development in visual augmentation technologies. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Artful Expression, Artistic technique, Augmented Reality, Daily lives, Defamiliarization, Dynamic environments, Engineering education, Enhanced vision systems, Generative AI, generative artificial intelligence, Human augmentation, Human engineering, Human-AI Interaction, Human-artificial intelligence interaction, Semi-transparent},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, L.; Hannah, S.; Mitchell, K.
HoloJig: Interactive Spoken Prompt Specified Generative AI Environments Journal Article
In: IEEE Computer Graphics and Applications, vol. 45, no. 2, pp. 69–77, 2025, ISSN: 02721716 (ISSN).
Abstract | Links | BibTeX | Tags: 3-D rendering, Article, Collaborative workspace, customer experience, Economic and social effects, generative artificial intelligence, human, Immersive, Immersive environment, parallax, Real- time, simulation, Simulation training, speech, Time based, Virtual environments, Virtual Reality, Virtual reality experiences, Virtual spaces, VR systems
@article{casas_holojig_2025,
title = {HoloJig: Interactive Spoken Prompt Specified Generative AI Environments},
author = {L. Casas and S. Hannah and K. Mitchell},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001182100&doi=10.1109%2fMCG.2025.3553780&partnerID=40&md5=ec5dc44023314b6f9221169357d81dcd},
doi = {10.1109/MCG.2025.3553780},
issn = {02721716 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Computer Graphics and Applications},
volume = {45},
number = {2},
pages = {69–77},
abstract = {HoloJig offers an interactive, speech-to-virtual reality (VR), VR experience that generates diverse environments in real time based on live spoken descriptions. Unlike traditional VR systems that rely on prebuilt assets, HoloJig dynamically creates personalized and immersive virtual spaces with depth-based parallax 3-D rendering, allowing users to define the characteristics of their immersive environment through verbal prompts. This generative approach opens up new possibilities for interactive experiences, including simulations, training, collaborative workspaces, and entertainment. In addition to speech-to-VR environment generation, a key innovation of HoloJig is its progressive visual transition mechanism, which smoothly dissolves between previously generated and newly requested environments, mitigating the delay caused by neural computations. This feature ensures a seamless and continuous user experience, even as new scenes are being rendered on remote servers. © 1981-2012 IEEE.},
keywords = {3-D rendering, Article, Collaborative workspace, customer experience, Economic and social effects, generative artificial intelligence, human, Immersive, Immersive environment, parallax, Real- time, simulation, Simulation training, speech, Time based, Virtual environments, Virtual Reality, Virtual reality experiences, Virtual spaces, VR systems},
pubstate = {published},
tppubtype = {article}
}
Dang, B.; Huynh, L.; Gul, F.; Rosé, C.; Järvelä, S.; Nguyen, A.
Human–AI collaborative learning in mixed reality: Examining the cognitive and socio-emotional interactions Journal Article
In: British Journal of Educational Technology, 2025, ISSN: 00071013 (ISSN).
Abstract | Links | BibTeX | Tags: Artificial intelligence agent, Collaborative learning, Educational robots, Embodied agent, Emotional intelligence, Emotional interactions, Generative adversarial networks, generative artificial intelligence, Hierarchical clustering, Human–AI collaboration, Interaction pattern, Mixed reality, ordered network analysis, Ordered network analyze, Social behavior, Social interactions, Social psychology, Students, Supervised learning, Teaching
@article{dang_humanai_2025,
title = {Human–AI collaborative learning in mixed reality: Examining the cognitive and socio-emotional interactions},
author = {B. Dang and L. Huynh and F. Gul and C. Rosé and S. Järvelä and A. Nguyen},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007896240&doi=10.1111%2fbjet.13607&partnerID=40&md5=b58a641069461f8880d1ee0adcf42457},
doi = {10.1111/bjet.13607},
issn = {00071013 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {British Journal of Educational Technology},
abstract = {The rise of generative artificial intelligence (GAI), especially with multimodal large language models like GPT-4o, sparked transformative potential and challenges for learning and teaching. With potential as a cognitive offloading tool, GAI can enable learners to focus on higher-order thinking and creativity. Yet, this also raises questions about integration into traditional education due to the limited research on learners' interactions with GAI. Some studies with GAI focus on text-based human–AI interactions, while research on embodied GAI in immersive environments like mixed reality (MR) remains unexplored. To address this, this study investigates interaction dynamics between learners and embodied GAI agents in MR, examining cognitive and socio-emotional interactions during collaborative learning. We investigated the paired interactive patterns between a student and an embodied GAI agent in MR, based on data from 26 higher education students with 1317 recorded activities. Data were analysed using a multi-layered learning analytics approach, including quantitative content analysis, sequence analysis via hierarchical clustering and pattern analysis through ordered network analysis (ONA). Our findings identified two interaction patterns: type (1) AI-led Supported Exploratory Questioning (AISQ) and type (2) Learner-Initiated Inquiry (LII) group. Despite their distinction in characteristic, both types demonstrated comparable levels of socio-emotional engagement and exhibited meaningful cognitive engagement, surpassing the superficial content reproduction that can be observed in interactions with GPT models. This study contributes to the human–AI collaboration and learning studies, extending understanding to learning in MR environments and highlighting implications for designing AI-based educational tools. Practitioner notes What is already known about this topic Socio-emotional interactions are fundamental to cognitive processes and play a critical role in collaborative learning. Generative artificial intelligence (GAI) holds transformative potential for education but raises questions about how learners interact with such technology. Most existing research focuses on text-based interactions with GAI; there is limited empirical evidence on how embodied GAI agents within immersive environments like Mixed Reality (MR) influence the cognitive and socio-emotional interactions for learning and regulation. What this paper adds Provides first empirical insights into cognitive and socio-emotional interaction patterns between learners and embodied GAI agents in MR environments. Identifies two distinct interaction patterns: AISQ type (structured, guided, supportive) and LII type (inquiry-driven, exploratory, engaging), demonstrating how these patterns influence collaborative learning dynamics. Shows that both interaction types facilitate meaningful cognitive engagement, moving beyond superficial content reproduction commonly associated with GAI interactions. Implications for practice and/or policy Insights from the identified interaction patterns can inform the design of teaching strategies that effectively integrate embodied GAI agents to enhance both cognitive and socio-emotional engagement. Findings can guide the development of AI-based educational tools that capitalise on the capabilities of embodied GAI agents, supporting a balance between structured guidance and exploratory learning. Highlights the need for ethical considerations in adopting embodied GAI agents, particularly regarding the human-like realism of these agents and potential impacts on learner dependency and interaction norms. © 2025 The Author(s). British Journal of Educational Technology published by John Wiley & Sons Ltd on behalf of British Educational Research Association.},
keywords = {Artificial intelligence agent, Collaborative learning, Educational robots, Embodied agent, Emotional intelligence, Emotional interactions, Generative adversarial networks, generative artificial intelligence, Hierarchical clustering, Human–AI collaboration, Interaction pattern, Mixed reality, ordered network analysis, Ordered network analyze, Social behavior, Social interactions, Social psychology, Students, Supervised learning, Teaching},
pubstate = {published},
tppubtype = {article}
}
Sousa, R. T.; Oliveira, E. A. M.; Cintra, L. M. F.; Filho, A. R. G.
Transformative Technologies for Rehabilitation: Leveraging Immersive and AI-Driven Solutions to Reduce Recidivism and Promote Decent Work Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 168–171, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833151484-6 (ISBN).
Abstract | Links | BibTeX | Tags: AI- Driven Rehabilitation, Artificial intelligence- driven rehabilitation, Emotional intelligence, Engineering education, Generative AI, generative artificial intelligence, Immersive, Immersive technologies, Immersive Technology, Language Model, Large language model, large language models, Skills development, Social Reintegration, Social skills, Sociology, Vocational training
@inproceedings{sousa_transformative_2025,
title = {Transformative Technologies for Rehabilitation: Leveraging Immersive and AI-Driven Solutions to Reduce Recidivism and Promote Decent Work},
author = {R. T. Sousa and E. A. M. Oliveira and L. M. F. Cintra and A. R. G. Filho},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005140551&doi=10.1109%2fVRW66409.2025.00042&partnerID=40&md5=89da6954863a272d48c0d8da3760bfb6},
doi = {10.1109/VRW66409.2025.00042},
isbn = {979-833151484-6 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {168–171},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The reintegration of incarcerated individuals into society presents significant challenges, particularly in addressing barriers related to vocational training, social skill development, and emotional rehabilitation. Immersive technologies, such as Virtual Reality and Augmented Reality, combined with generative Artificial Intelligence (AI) and Large Language Models, offer innovative opportunities to enhance these areas. These technologies create practical, controlled environments for skill acquisition and behavioral training, while generative AI enables dynamic, personalized, and adaptive experiences. This paper explores the broader potential of these integrated technologies in supporting rehabilitation, reducing recidivism, and fostering sustainable employment opportunities and these initiatives align with the overarching equity objective of ensuring Decent Work for All, reinforcing the commitment to inclusive and equitable progress across diverse communities, through the transformative potential of immersive and AI-driven systems in correctional systems. © 2025 IEEE.},
keywords = {AI- Driven Rehabilitation, Artificial intelligence- driven rehabilitation, Emotional intelligence, Engineering education, Generative AI, generative artificial intelligence, Immersive, Immersive technologies, Immersive Technology, Language Model, Large language model, large language models, Skills development, Social Reintegration, Social skills, Sociology, Vocational training},
pubstate = {published},
tppubtype = {inproceedings}
}
Shi, J.; Jain, R.; Chi, S.; Doh, H.; Chi, H. -G.; Quinn, A. J.; Ramani, K.
CARING-AI: Towards Authoring Context-aware Augmented Reality INstruction through Generative Artificial Intelligence Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 979-840071394-1 (ISBN).
Abstract | Links | BibTeX | Tags: 'current, Application scenario, AR application, Augmented Reality, Context-Aware, Contextual information, Generative adversarial networks, generative artificial intelligence, Humanoid avatars, In-situ learning, Learning experiences, Power
@inproceedings{shi_caring-ai_2025,
title = {CARING-AI: Towards Authoring Context-aware Augmented Reality INstruction through Generative Artificial Intelligence},
author = {J. Shi and R. Jain and S. Chi and H. Doh and H. -G. Chi and A. J. Quinn and K. Ramani},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005725461&doi=10.1145%2f3706598.3713348&partnerID=40&md5=e88afd8426e020155599ef3b2a044774},
doi = {10.1145/3706598.3713348},
isbn = {979-840071394-1 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Context-aware AR instruction enables adaptive and in-situ learning experiences. However, hardware limitations and expertise requirements constrain the creation of such instructions. With recent developments in Generative Artificial Intelligence (Gen-AI), current research tries to tackle these constraints by deploying AI-generated content (AIGC) in AR applications. However, our preliminary study with six AR practitioners revealed that the current AIGC lacks contextual information to adapt to varying application scenarios and is therefore limited in authoring. To utilize the strong generative power of GenAI to ease the authoring of AR instruction while capturing the context, we developed CARING-AI, an AR system to author context-aware humanoid-avatar-based instructions with GenAI. By navigating in the environment, users naturally provide contextual information to generate humanoid-avatar animation as AR instructions that blend in the context spatially and temporally. We showcased three application scenarios of CARING-AI: Asynchronous Instructions, Remote Instructions, and Ad Hoc Instructions based on a design space of AIGC in AR Instructions. With two user studies (N=12), we assessed the system usability of CARING-AI and demonstrated the easiness and effectiveness of authoring with Gen-AI. © 2025 Copyright held by the owner/author(s).},
keywords = {'current, Application scenario, AR application, Augmented Reality, Context-Aware, Contextual information, Generative adversarial networks, generative artificial intelligence, Humanoid avatars, In-situ learning, Learning experiences, Power},
pubstate = {published},
tppubtype = {inproceedings}
}
Behravan, M.; Matković, K.; Gračanin, D.
Generative AI for Context-Aware 3D Object Creation Using Vision-Language Models in Augmented Reality Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 73–81, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833152157-8 (ISBN).
Abstract | Links | BibTeX | Tags: 3D object, 3D Object Generation, Artificial intelligence systems, Augmented Reality, Capture images, Context-Aware, Generative adversarial networks, Generative AI, generative artificial intelligence, Generative model, Language Model, Object creation, Vision language model, vision language models, Visual languages
@inproceedings{behravan_generative_2025,
title = {Generative AI for Context-Aware 3D Object Creation Using Vision-Language Models in Augmented Reality},
author = {M. Behravan and K. Matković and D. Gračanin},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000292700&doi=10.1109%2fAIxVR63409.2025.00018&partnerID=40&md5=b40fa769a6b427918c3fcd86f7c52a75},
doi = {10.1109/AIxVR63409.2025.00018},
isbn = {979-833152157-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {73–81},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {We present a novel Artificial Intelligence (AI) system that functions as a designer assistant in augmented reality (AR) environments. Leveraging Vision Language Models (VLMs) like LLaVA and advanced text-to-3D generative models, users can capture images of their surroundings with an Augmented Reality (AR) headset. The system analyzes these images to recommend contextually relevant objects that enhance both functionality and visual appeal. The recommended objects are generated as 3D models and seamlessly integrated into the AR environment for interactive use. Our system utilizes open-source AI models running on local systems to enhance data security and reduce operational costs. Key features include context-aware object suggestions, optimal placement guidance, aesthetic matching, and an intuitive user interface for real-time interaction. Evaluations using the COCO 2017 dataset and real-world AR testing demonstrated high accuracy in object detection and contextual fit rating of 4.1 out of 5. By addressing the challenge of providing context-aware object recommendations in AR, our system expands the capabilities of AI applications in this domain. It enables users to create personalized digital spaces efficiently, leveraging AI for contextually relevant suggestions. © 2025 IEEE.},
keywords = {3D object, 3D Object Generation, Artificial intelligence systems, Augmented Reality, Capture images, Context-Aware, Generative adversarial networks, Generative AI, generative artificial intelligence, Generative model, Language Model, Object creation, Vision language model, vision language models, Visual languages},
pubstate = {published},
tppubtype = {inproceedings}
}
Angelopoulos, J.; Manettas, C.; Alexopoulos, K.
Industrial Maintenance Optimization Based on the Integration of Large Language Models (LLM) and Augmented Reality (AR) Proceedings Article
In: K., Alexopoulos; S., Makris; P., Stavropoulos (Ed.): Lect. Notes Mech. Eng., pp. 197–205, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 21954356 (ISSN); 978-303186488-9 (ISBN).
Abstract | Links | BibTeX | Tags: Augmented Reality, Competition, Cost reduction, Critical path analysis, Crushed stone plants, Generative AI, generative artificial intelligence, Human expertise, Industrial equipment, Industrial maintenance, Language Model, Large language model, Maintenance, Maintenance optimization, Maintenance procedures, Manufacturing data processing, Potential errors, Problem oriented languages, Scheduled maintenance, Shopfloors, Solar power plants
@inproceedings{angelopoulos_industrial_2025,
title = {Industrial Maintenance Optimization Based on the Integration of Large Language Models (LLM) and Augmented Reality (AR)},
author = {J. Angelopoulos and C. Manettas and K. Alexopoulos},
editor = {Alexopoulos K. and Makris S. and Stavropoulos P.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001421726&doi=10.1007%2f978-3-031-86489-6_20&partnerID=40&md5=63be31b9f4dda4aafd6a641630506c09},
doi = {10.1007/978-3-031-86489-6_20},
isbn = {21954356 (ISSN); 978-303186488-9 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Mech. Eng.},
pages = {197–205},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Traditional maintenance procedures often rely on manual data processing and human expertise, leading to inefficiencies and potential errors. In the context of Industry 4.0 several digital technologies, such as Artificial Intelligence (AI), Big Data Analytics (BDA), and eXtended Reality (XR) have been developed and are constantly being integrated in a plethora of manufacturing activities (including industrial maintenance), in an attempt to minimize human error, facilitate shop floor technicians, reduce costs as well as reduce equipment downtimes. The latest developments in the field of AI point towards Large Language Models (LLM) which can communicate with human operators in an intuitive manner. On the other hand, Augmented Reality, as part of XR technologies, offers useful functionalities for improving user perception and interaction with modern, complex industrial equipment. Therefore, the context of this research work lies in the development and training of an LLM in order to provide suggestions and actionable items for the mitigation of unforeseen events (e.g. equipment breakdowns), in order to facilitate shop-floor technicians during their everyday tasks. Paired with AR visualizations over the physical environment, the technicians will get instructions for performing tasks and checks on the industrial equipment in a manner similar to human-to-human communication. The functionality of the proposed framework extends to the integration of modules for exchanging information with the engineering department towards the scheduling of Maintenance and Repair Operations (MRO) as well as the creation of a repository of historical data in order to constantly retrain and optimize the LLM. © The Author(s) 2025.},
keywords = {Augmented Reality, Competition, Cost reduction, Critical path analysis, Crushed stone plants, Generative AI, generative artificial intelligence, Human expertise, Industrial equipment, Industrial maintenance, Language Model, Large language model, Maintenance, Maintenance optimization, Maintenance procedures, Manufacturing data processing, Potential errors, Problem oriented languages, Scheduled maintenance, Shopfloors, Solar power plants},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, J.; Wu, X.; Lan, T.; Li, B.
LLMER: Crafting Interactive Extended Reality Worlds with JSON Data Generated by Large Language Models Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, vol. 31, no. 5, pp. 2715–2724, 2025, ISSN: 10772626 (ISSN).
Abstract | Links | BibTeX | Tags: % reductions, 3D modeling, algorithm, Algorithms, Augmented Reality, Coding errors, Computer graphics, Computer interaction, computer interface, Computer simulation languages, Extended reality, generative artificial intelligence, human, Human users, human-computer interaction, Humans, Imaging, Immersive, Language, Language Model, Large language model, large language models, Metadata, Natural Language Processing, Natural language processing systems, Natural languages, procedures, Script generation, Spatio-temporal data, Three dimensional computer graphics, Three-Dimensional, three-dimensional imaging, User-Computer Interface, Virtual Reality
@article{chen_llmer_2025,
title = {LLMER: Crafting Interactive Extended Reality Worlds with JSON Data Generated by Large Language Models},
author = {J. Chen and X. Wu and T. Lan and B. Li},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003825793&doi=10.1109%2fTVCG.2025.3549549&partnerID=40&md5=da4681d0714548e3a7e0c8c3295d2348},
doi = {10.1109/TVCG.2025.3549549},
issn = {10772626 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {31},
number = {5},
pages = {2715–2724},
abstract = {The integration of Large Language Models (LLMs) like GPT-4 with Extended Reality (XR) technologies offers the potential to build truly immersive XR environments that interact with human users through natural language, e.g., generating and animating 3D scenes from audio inputs. However, the complexity of XR environments makes it difficult to accurately extract relevant contextual data and scene/object parameters from an overwhelming volume of XR artifacts. It leads to not only increased costs with pay-per-use models, but also elevated levels of generation errors. Moreover, existing approaches focusing on coding script generation are often prone to generation errors, resulting in flawed or invalid scripts, application crashes, and ultimately a degraded user experience. To overcome these challenges, we introduce LLMER, a novel framework that creates interactive XR worlds using JSON data generated by LLMs. Unlike prior approaches focusing on coding script generation, LLMER translates natural language inputs into JSON data, significantly reducing the likelihood of application crashes and processing latency. It employs a multi-stage strategy to supply only the essential contextual information adapted to the user's request and features multiple modules designed for various XR tasks. Our preliminary user study reveals the effectiveness of the proposed system, with over 80% reduction in consumed tokens and around 60% reduction in task completion time compared to state-of-the-art approaches. The analysis of users' feedback also illuminates a series of directions for further optimization. © 1995-2012 IEEE.},
keywords = {% reductions, 3D modeling, algorithm, Algorithms, Augmented Reality, Coding errors, Computer graphics, Computer interaction, computer interface, Computer simulation languages, Extended reality, generative artificial intelligence, human, Human users, human-computer interaction, Humans, Imaging, Immersive, Language, Language Model, Large language model, large language models, Metadata, Natural Language Processing, Natural language processing systems, Natural languages, procedures, Script generation, Spatio-temporal data, Three dimensional computer graphics, Three-Dimensional, three-dimensional imaging, User-Computer Interface, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Suzuki, R.; Gonzalez-Franco, M.; Sra, M.; Lindlbauer, D.
Everyday AR through AI-in-the-Loop Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 979-840071395-8 (ISBN).
Abstract | Links | BibTeX | Tags: Augmented Reality, Augmented reality content, Augmented reality hardware, Computer vision, Content creation, Context-Aware, Generative AI, generative artificial intelligence, Human-AI Interaction, Human-artificial intelligence interaction, Language Model, Large language model, large language models, machine learning, Machine-learning, Mixed reality, Virtual Reality, Virtualization
@inproceedings{suzuki_everyday_2025,
title = {Everyday AR through AI-in-the-Loop},
author = {R. Suzuki and M. Gonzalez-Franco and M. Sra and D. Lindlbauer},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005752990&doi=10.1145%2f3706599.3706741&partnerID=40&md5=56b5e447819dde7aa4a29f8e3899e535},
doi = {10.1145/3706599.3706741},
isbn = {979-840071395-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {This workshop brings together experts and practitioners from augmented reality (AR) and artificial intelligence (AI) to shape the future of AI-in-the-loop everyday AR experiences. With recent advancements in both AR hardware and AI capabilities, we envision that everyday AR—always-available and seamlessly integrated into users’ daily environments—is becoming increasingly feasible. This workshop will explore how AI can drive such everyday AR experiences. We discuss a range of topics, including adaptive and context-aware AR, generative AR content creation, always-on AI assistants, AI-driven accessible design, and real-world-oriented AI agents. Our goal is to identify the opportunities and challenges in AI-enabled AR, focusing on creating novel AR experiences that seamlessly blend the digital and physical worlds. Through the workshop, we aim to foster collaboration, inspire future research, and build a community to advance the research field of AI-enhanced AR. © 2025 Copyright held by the owner/author(s).},
keywords = {Augmented Reality, Augmented reality content, Augmented reality hardware, Computer vision, Content creation, Context-Aware, Generative AI, generative artificial intelligence, Human-AI Interaction, Human-artificial intelligence interaction, Language Model, Large language model, large language models, machine learning, Machine-learning, Mixed reality, Virtual Reality, Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Stacchio, L.; Balloni, E.; Frontoni, E.; Paolanti, M.; Zingaretti, P.; Pierdicca, R.
MineVRA: Exploring the Role of Generative AI-Driven Content Development in XR Environments through a Context-Aware Approach Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, vol. 31, no. 5, pp. 3602–3612, 2025, ISSN: 10772626 (ISSN).
Abstract | Links | BibTeX | Tags: adult, Article, Artificial intelligence, Computer graphics, Computer vision, Content Development, Contents development, Context-Aware, Context-aware approaches, Extended reality, female, Generative adversarial networks, Generative AI, generative artificial intelligence, human, Human-in-the-loop, Immersive, Immersive environment, male, Multi-modal, User need, Virtual environments, Virtual Reality
@article{stacchio_minevra_2025,
title = {MineVRA: Exploring the Role of Generative AI-Driven Content Development in XR Environments through a Context-Aware Approach},
author = {L. Stacchio and E. Balloni and E. Frontoni and M. Paolanti and P. Zingaretti and R. Pierdicca},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003746367&doi=10.1109%2fTVCG.2025.3549160&partnerID=40&md5=70b162b574eebbb0cb71db871aa787e1},
doi = {10.1109/TVCG.2025.3549160},
issn = {10772626 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {31},
number = {5},
pages = {3602–3612},
abstract = {The convergence of Artificial Intelligence (AI), Computer Vision (CV), Computer Graphics (CG), and Extended Reality (XR) is driving innovation in immersive environments. A key challenge in these environments is the creation of personalized 3D assets, traditionally achieved through manual modeling, a time-consuming process that often fails to meet individual user needs. More recently, Generative AI (GenAI) has emerged as a promising solution for automated, context-aware content generation. In this paper, we present MineVRA (Multimodal generative artificial iNtelligence for contExt-aware Virtual Reality Assets), a novel Human-In-The-Loop (HITL) XR framework that integrates GenAI to facilitate coherent and adaptive 3D content generation in immersive scenarios. To evaluate the effectiveness of this approach, we conducted a comparative user study analyzing the performance and user satisfaction of GenAI-generated 3D objects compared to those generated by Sketchfab in different immersive contexts. The results suggest that GenAI can significantly complement traditional 3D asset libraries, with valuable design implications for the development of human-centered XR environments. © 1995-2012 IEEE.},
keywords = {adult, Article, Artificial intelligence, Computer graphics, Computer vision, Content Development, Contents development, Context-Aware, Context-aware approaches, Extended reality, female, Generative adversarial networks, Generative AI, generative artificial intelligence, human, Human-in-the-loop, Immersive, Immersive environment, male, Multi-modal, User need, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Wang, C.; Sundstedt, V.; Garro, V.
Generative Artificial Intelligence for Immersive Analytics Proceedings Article
In: T., Bashford-Rogers; D., Meneveaux; M., Ammi; M., Ziat; S., Jänicke; H., Purchase; P., Radeva; A., Furnari; K., Bouatouch; A.A., Sousa (Ed.): Proc. Int. Jt. Conf. Comput. Vis. Imaging Comput. Graph. Theory Appl., pp. 938–946, Science and Technology Publications, Lda, 2025, ISBN: 21845921 (ISSN).
Abstract | Links | BibTeX | Tags: Extended reality, generative artificial intelligence, Immersive analytics, Visualization
@inproceedings{wang_generative_2025,
title = {Generative Artificial Intelligence for Immersive Analytics},
author = {C. Wang and V. Sundstedt and V. Garro},
editor = {Bashford-Rogers T. and Meneveaux D. and Ammi M. and Ziat M. and Jänicke S. and Purchase H. and Radeva P. and Furnari A. and Bouatouch K. and Sousa A.A.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001960708&doi=10.5220%2f0013308400003912&partnerID=40&md5=cb416a11c795ea8081730f6f339a0b4b},
doi = {10.5220/0013308400003912},
isbn = {21845921 (ISSN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. Int. Jt. Conf. Comput. Vis. Imaging Comput. Graph. Theory Appl.},
volume = {1},
pages = {938–946},
publisher = {Science and Technology Publications, Lda},
abstract = {Generative artificial intelligence (GenAI) models have advanced various applications with their ability to generate diverse forms of information, including text, images, audio, video, and 3D models. In visual computing, their primary applications have focused on creating graphic content and enabling data visualization on traditional desktop interfaces, which help automate visual analytics (VA) processes. With the rise of affordable immersive technologies, such as virtual reality (VR), augmented reality (AR), and mixed reality (MR), immersive analytics (IA) has been an emerging field offering unique opportunities for deeper engagement and understanding of complex data in immersive environments (IEs). However, IA system development remains resource-intensive and requires significant expertise, while integrating GenAI capabilities into IA is still under early exploration. Therefore, based on an analysis of recent publications in these fields, this position paper investigates how GenAI can support future IA systems for more effective data exploration with immersive experiences. Specifically, we discuss potential directions and key issues concerning future GenAI-supported IA applications. © 2025 by SCITEPRESS–Science and Technology Publications, Lda.},
keywords = {Extended reality, generative artificial intelligence, Immersive analytics, Visualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Wei, X.; Wang, L.; Lee, L. -K.; Liu, R.
Multiple Generative AI Pedagogical Agents in Augmented Reality Environments: A Study on Implementing the 5E Model in Science Education Journal Article
In: Journal of Educational Computing Research, vol. 63, no. 2, pp. 336–371, 2025, ISSN: 07356331 (ISSN).
Abstract | Links | BibTeX | Tags: 5E learning model, Augmented Reality, elementary science education, generative artificial intelligence, Pedagogical agents
@article{wei_multiple_2025,
title = {Multiple Generative AI Pedagogical Agents in Augmented Reality Environments: A Study on Implementing the 5E Model in Science Education},
author = {X. Wei and L. Wang and L. -K. Lee and R. Liu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85211165915&doi=10.1177%2f07356331241305519&partnerID=40&md5=ab592abf16398732391a5dd3bd4ca7ed},
doi = {10.1177/07356331241305519},
issn = {07356331 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Journal of Educational Computing Research},
volume = {63},
number = {2},
pages = {336–371},
abstract = {Notwithstanding the growing advantages of incorporating Augmented Reality (AR) in science education, the pedagogical use of AR combined with Pedagogical Agents (PAs) remains underexplored. Additionally, few studies have examined the integration of Generative Artificial Intelligence (GAI) into science education to create GAI-enhanced PAs (GPAs) that enrich the learning experiences. To address these gaps, this study designed and implemented a GPA-enhanced 5E model within AR environments to scaffold students’ science learning. A mixed-methods design was conducted to investigate the effectiveness of the proposed approach on students’ academic achievement, cognitive load, and their perceptions of GPAs as learning aids through using the 5E model. Sixty sixth-grade students from two complete classes were randomly assigned to either an experimental group engaged in AR science learning with a GPA-enhanced 5E approach or a control group that followed the traditional 5E method. The findings revealed that the GPA-enhanced 5E approach in AR environments significantly improved students’ academic achievement and decreased cognitive load. Furthermore, students in the experimental group reported positive perceptions of the GPA-enhanced 5E method during the AR science lessons. The findings offer valuable insights for instructional designers and educators who leverage advanced educational technologies to support science learning aligned with constructivist principles. © The Author(s) 2024.},
keywords = {5E learning model, Augmented Reality, elementary science education, generative artificial intelligence, Pedagogical agents},
pubstate = {published},
tppubtype = {article}
}
Abdelmagid, A. S.; Jabli, N. M.; Al-Mohaya, A. Y.; Teleb, A. A.
In: Sustainability (Switzerland), vol. 17, no. 12, 2025, ISSN: 20711050 (ISSN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, digitization, e-entrepreneurship, entrepreneur, generative artificial intelligence, green digital economy, green economy, higher education, Learning, Metaverse, Sustainable development
@article{abdelmagid_integrating_2025,
title = {Integrating Interactive Metaverse Environments and Generative Artificial Intelligence to Promote the Green Digital Economy and e-Entrepreneurship in Higher Education},
author = {A. S. Abdelmagid and N. M. Jabli and A. Y. Al-Mohaya and A. A. Teleb},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105008981835&doi=10.3390%2fsu17125594&partnerID=40&md5=0eaea40f26536c05c29c7b3f0d42d37d},
doi = {10.3390/su17125594},
issn = {20711050 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Sustainability (Switzerland)},
volume = {17},
number = {12},
abstract = {The rapid evolution of the Fourth Industrial Revolution has significantly transformed educational practices, necessitating the integration of advanced technologies into higher education to address contemporary sustainability challenges. This study explores the integration of interactive metaverse environments and generative artificial intelligence (GAI) in promoting the green digital economy and developing e-entrepreneurship skills among graduate students. Grounded in a quasi-experimental design, the research was conducted with a sample of 25 postgraduate students enrolled in the “Computers in Education” course at King Khalid University. A 3D immersive learning environment (FrameVR) was combined with GAI platforms (ChatGPT version 4.0, Elai.io version 2.5, Tome version 1.3) to create an innovative educational experience. Data were collected using validated instruments, including the Green Digital Economy Scale, the e-Entrepreneurship Scale, and a digital product evaluation rubric. The findings revealed statistically significant improvements in students’ awareness of green digital concepts, entrepreneurial competencies, and their ability to produce sustainable digital products. The study highlights the potential of immersive virtual learning environments and AI-driven content creation tools in enhancing digital literacy and sustainability-oriented innovation. It also underscores the urgent need to update educational strategies and curricula to prepare future professionals capable of navigating and shaping green digital economies. This research provides a practical and replicable model for universities seeking to embed sustainability through emerging technologies, supporting broader goals such as SDG 4 (Quality Education) and SDG 9 (Industry, Innovation, and Infrastructure). © 2025 by the authors.},
keywords = {Artificial intelligence, digitization, e-entrepreneurship, entrepreneur, generative artificial intelligence, green digital economy, green economy, higher education, Learning, Metaverse, Sustainable development},
pubstate = {published},
tppubtype = {article}
}
2024
Domenichini, D.; Bucchiarone, A.; Chiarello, F.; Schiavo, G.; Fantoni, G.
An AI-Driven Approach for Enhancing Engagement and Conceptual Understanding in Physics Education Proceedings Article
In: IEEE Global Eng. Edu. Conf., EDUCON, IEEE Computer Society, 2024, ISBN: 21659559 (ISSN); 979-835039402-3 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive Learning, Artificial intelligence, Artificial intelligence in education, Artificial Intelligence in Education (AIED), Conceptual Understanding, Educational System, Educational systems, Gamification, Generative AI, generative artificial intelligence, Learning Activity, Learning systems, Physics Education, Teachers', Teaching, Virtual Reality
@inproceedings{domenichini_ai-driven_2024,
title = {An AI-Driven Approach for Enhancing Engagement and Conceptual Understanding in Physics Education},
author = {D. Domenichini and A. Bucchiarone and F. Chiarello and G. Schiavo and G. Fantoni},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199035695&doi=10.1109%2fEDUCON60312.2024.10578670&partnerID=40&md5=4cf9f89e97664ae6d618a90f2dbc23e0},
doi = {10.1109/EDUCON60312.2024.10578670},
isbn = {21659559 (ISSN); 979-835039402-3 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE Global Eng. Edu. Conf., EDUCON},
publisher = {IEEE Computer Society},
abstract = {This Work in Progress paper introduces the design of an innovative educational system that leverages Artificial Intelligence (AI) to address challenges in physics education. The primary objective is to create a system that dynamically adapts to the individual needs and preferences of students while maintaining user-friendliness for teachers, allowing them to tailor their teaching methods. The emphasis is on fostering motivation and engagement, achieved through the implementation of a gamified virtual environment and a strong focus on personalization. Our aim is to develop a system capable of autonomously generating learning activities and constructing effective learning paths, all under the supervision and interaction of teachers. The generation of learning activities is guided by educational taxonomies that delineate and categorize the cognitive processes involved in these activities. The proposed educational system seeks to address challenges identified by Physics Education Research (PER), which offers valuable insights into how individuals learn physics and provides strategies to enhance the overall quality of physics education. Our specific focus revolves around two crucial aspects: concentrating on the conceptual understanding of physics concepts and processes, and fostering knowledge integration and coherence across various physics topics. These aspects are deemed essential for cultivating enduring knowledge and facilitating practical applications in the field of physics. © 2024 IEEE.},
keywords = {Adaptive Learning, Artificial intelligence, Artificial intelligence in education, Artificial Intelligence in Education (AIED), Conceptual Understanding, Educational System, Educational systems, Gamification, Generative AI, generative artificial intelligence, Learning Activity, Learning systems, Physics Education, Teachers', Teaching, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Gao, H.; Lindquist, M.; Vergel, R. S.
AI-Driven Avatars in Immersive 3D Environments for Education Workflow and Case Study of the Temple of Demeter, Greece Journal Article
In: Journal of Digital Landscape Architecture, vol. 2024, no. 9, pp. 640–651, 2024, ISSN: 23674253 (ISSN).
Abstract | Links | BibTeX | Tags: AI-driven avatars, generative artificial intelligence, heritage preservation, immersive experience, Virtual Reality
@article{gao_ai-driven_2024,
title = {AI-Driven Avatars in Immersive 3D Environments for Education Workflow and Case Study of the Temple of Demeter, Greece},
author = {H. Gao and M. Lindquist and R. S. Vergel},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85195600841&doi=10.14627%2f537752059&partnerID=40&md5=a9d79077ada5560e8985a5f76aaf5756},
doi = {10.14627/537752059},
issn = {23674253 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Journal of Digital Landscape Architecture},
volume = {2024},
number = {9},
pages = {640–651},
abstract = {Generative Artificial Intelligence (AI) and Virtual Reality (VR) technologies are changing education and offer new opportunities for how people interact with environments. Technological advances over the past few decades have lowered barriers to creating virtual environments; however, there are still challenges, particularly when creating realistic virtual environments of real places. Realism is important as the fidelity of virtual environments influences user experience. In addition, methods and techniques that can facilitate ease of interacting with these environments are needed to streamline the user experience. One promising way to do this is incorporating AI-driven avatars into realistic scenes, allowing users to use natural language immersive learning experiences focused on sustainability education that incorporate realistic scenes to interact with and learn about the landscapes. To this end, we have developed workflows for design scenarios and natural interaction with AI avatars. This study created point cloud digital 3D models from photogrammetry and incorporated these into Unreal Engine 5. We then integrated generative AI avatars into the environment, enabling natural language interaction between users and an AI tutor. The integration facilitates interactive participation and enables high-precision digital reproduction of the physical environment. The novel proposed workflow is presented via a case study for a virtual study abroad experience in Naxos, Greece, using an AI-driven tutor to educate about the historical aspects of the island. This project provides the beneficial learning experience of study abroad experiences without the economic and environmental costs of sending students on field excursions. We recommend constructing immersive education experiences using real-world environments and natural AI-driven conversations and demonstrating its potential to revolutionize social interaction, historical heritage preservation, and sustainable pedagogy. © Wichmann Verlag, VDE VERLAG GMBH · Berlin · Offenbach.},
keywords = {AI-driven avatars, generative artificial intelligence, heritage preservation, immersive experience, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Haramina, E.; Paladin, M.; Petričušić, Z.; Posarić, F.; Drobnjak, A.; Botički, I.
Learning Algorithms Concepts in a Virtual Reality Escape Room Proceedings Article
In: S., Babic; Z., Car; M., Cicin-Sain; D., Cisic; P., Ergovic; T.G., Grbac; V., Gradisnik; S., Gros; A., Jokic; A., Jovic; D., Jurekovic; T., Katulic; M., Koricic; V., Mornar; J., Petrovic; K., Skala; D., Skvorc; V., Sruk; M., Svaco; E., Tijan; N., Vrcek; B., Vrdoljak (Ed.): ICT Electron. Conv., MIPRO - Proc., pp. 2057–2062, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835038249-5 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Computational complexity, Computer generated three dimensional environment, E-Learning, Education, Escape room, Extended reality, generative artificial intelligence, Learn+, Learning, Learning algorithms, Learning systems, Puzzle, puzzles, user experience, User study, User testing, Users' experiences, Virtual Reality
@inproceedings{haramina_learning_2024,
title = {Learning Algorithms Concepts in a Virtual Reality Escape Room},
author = {E. Haramina and M. Paladin and Z. Petričušić and F. Posarić and A. Drobnjak and I. Botički},
editor = {Babic S. and Car Z. and Cicin-Sain M. and Cisic D. and Ergovic P. and Grbac T.G. and Gradisnik V. and Gros S. and Jokic A. and Jovic A. and Jurekovic D. and Katulic T. and Koricic M. and Mornar V. and Petrovic J. and Skala K. and Skvorc D. and Sruk V. and Svaco M. and Tijan E. and Vrcek N. and Vrdoljak B.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85198221737&doi=10.1109%2fMIPRO60963.2024.10569447&partnerID=40&md5=8a94d92d989d1f0feb84eba890945de8},
doi = {10.1109/MIPRO60963.2024.10569447},
isbn = {979-835038249-5 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {ICT Electron. Conv., MIPRO - Proc.},
pages = {2057–2062},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Although the standard way to learn algorithms is by coding, learning through games is another way to obtain knowledge while having fun. Virtual reality is a computer-generated three-dimensional environment in which the player is fully immersed by having external stimuli mostly blocked out. In the game presented in this paper, players are enhancing their algorithms skills by playing an escape room game. The goal is to complete the room within the designated time by solving puzzles. The puzzles change for every playthrough with the use of generative artificial intelligence to provide every player with a unique experience. There are multiple types of puzzles such as. time complexity, sorting algorithms, searching algorithms, and code execution. The paper presents the results of a study indicating students' preference for learning through gaming as a method of acquiring algorithms knowledge. © 2024 IEEE.},
keywords = {Artificial intelligence, Computational complexity, Computer generated three dimensional environment, E-Learning, Education, Escape room, Extended reality, generative artificial intelligence, Learn+, Learning, Learning algorithms, Learning systems, Puzzle, puzzles, user experience, User study, User testing, Users' experiences, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Venkatachalam, N.; Rayana, M.; Vignesh, S. Bala; Prathamesh, S.
Voice-Driven Panoramic Imagery: Real-Time Generative AI for Immersive Experiences Proceedings Article
In: Int. Conf. Intell. Data Commun. Technol. Internet Things, IDCIoT, pp. 1133–1138, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835032753-3 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive Visual Experience, First person, First-Person view, generative artificial intelligence, Generative Artificial Intelligence (AI), Image processing, Immersive, Immersive visual scene, Immersive Visual Scenes, Language processing, Natural Language Processing, Natural Language Processing (NLP), Natural language processing systems, Natural languages, Panoramic Images, Patient treatment, Personalized environment, Personalized Environments, Phobia Treatment, Prompt, prompts, Psychological intervention, Psychological Interventions, Real-Time Synthesis, User interaction, User interfaces, Virtual experience, Virtual Experiences, Virtual Reality, Virtual Reality (VR), Virtual-reality headsets, Visual experiences, Visual languages, Visual scene, Voice command, Voice commands, VR Headsets
@inproceedings{venkatachalam_voice-driven_2024,
title = {Voice-Driven Panoramic Imagery: Real-Time Generative AI for Immersive Experiences},
author = {N. Venkatachalam and M. Rayana and S. Bala Vignesh and S. Prathamesh},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85190121845&doi=10.1109%2fIDCIoT59759.2024.10467441&partnerID=40&md5=6594fbab013d9156b79a887f0d7209cb},
doi = {10.1109/IDCIoT59759.2024.10467441},
isbn = {979-835032753-3 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Int. Conf. Intell. Data Commun. Technol. Internet Things, IDCIoT},
pages = {1133–1138},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This research study introduces an innovative system that aims to synthesize 360-degree panoramic images in Realtime based on vocal prompts from the user, leveraging state-of-The-Art Generative AI with a combination of advanced NLP models. The primary objective of this system is to transform spoken descriptions into immersive and interactive visual scenes, specifically designed to provide users with first-person field views. This cutting-edge technology has the potential to revolutionize the realm of virtual reality (VR) experiences, enabling users to effortlessly create and navigate through personalized environments. The fundamental goal of this system is to enable the generation of real-Time images that are seamlessly compatible with VR headsets, offering a truly immersive and adaptive visual experience. Beyond its technological advancements, this research also highlights its significant potential for creating a positive social impact. One notable application lies in psychological interventions, particularly in the context of phobia treatment and therapeutic settings. Here, patients can safely confront and work through their fears within these synthesized environments, potentially offering new avenues for therapy. Furthermore, the system serves educational and entertainment purposes by bringing users' imaginations to life, providing an unparalleled platform for exploring the boundaries of virtual experiences. Overall, this research represents a promising stride towards a more immersive and adaptable future in VR technology, with the potential to enhance various aspects of human lives, from mental health treatment to entertainment and education. © 2024 IEEE.},
keywords = {Adaptive Visual Experience, First person, First-Person view, generative artificial intelligence, Generative Artificial Intelligence (AI), Image processing, Immersive, Immersive visual scene, Immersive Visual Scenes, Language processing, Natural Language Processing, Natural Language Processing (NLP), Natural language processing systems, Natural languages, Panoramic Images, Patient treatment, Personalized environment, Personalized Environments, Phobia Treatment, Prompt, prompts, Psychological intervention, Psychological Interventions, Real-Time Synthesis, User interaction, User interfaces, Virtual experience, Virtual Experiences, Virtual Reality, Virtual Reality (VR), Virtual-reality headsets, Visual experiences, Visual languages, Visual scene, Voice command, Voice commands, VR Headsets},
pubstate = {published},
tppubtype = {inproceedings}
}
Do, M. D.; Dahlem, N.; Paulus, M.; Krick, M.; Steffny, L.; Werth, D.
“Furnish Your Reality” - Intelligent Mobile AR Application for Personalized Furniture Proceedings Article
In: J., Wei; G., Margetis (Ed.): Lect. Notes Comput. Sci., pp. 196–210, Springer Science and Business Media Deutschland GmbH, 2024, ISBN: 03029743 (ISSN); 978-303160457-7 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Augmented Reality, Augmented reality applications, Electronic commerce, Generative AI, generative artificial intelligence, Human computer interaction, Human computer interfaces, LiDAR, Mobile augmented reality, Mobile human computer interface, Mobile Human Computer Interfaces, Personalized product design, Personalized products, Phygital customer journey, Physical environments, Product design, Recommender system, Recommender systems, Sales, User centered design, User interfaces, User-centered design
@inproceedings{do_furnish_2024,
title = {“Furnish Your Reality” - Intelligent Mobile AR Application for Personalized Furniture},
author = {M. D. Do and N. Dahlem and M. Paulus and M. Krick and L. Steffny and D. Werth},
editor = {Wei J. and Margetis G.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85196202642&doi=10.1007%2f978-3-031-60458-4_14&partnerID=40&md5=017510be06c286789867235cfd98bb36},
doi = {10.1007/978-3-031-60458-4_14},
isbn = {03029743 (ISSN); 978-303160457-7 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {14737 LNCS},
pages = {196–210},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Today’s online retailers are faced with the challenge of providing a convenient solution for their customers to browse through a wide range of products. Simultaneously, they must meet individual customer needs by creating unique, personalized, one-of-a-kind items. Technological advances in areas such as Augmented Reality (AR), Artificial Intelligence (AI) or sensors (e.g. LiDAR), have the potential to address these challenges by enhancing the customer experience in new ways. One option is to implement “phygital” commerce solutions, which combines the benefits of physical and digital environments to improve the customer journey. This work presents a concept for a mobile AR application that integrates LiDAR and an AI-powered recommender system to create a unique phygital customer journey in the context of furniture shopping. The combination of AR, LiDAR and AI enables an accurate immersive experience along with personalized product designs. This concept aims to deliver benefits in terms of usability, convenience, time savings and user experience, while bridging the gap between mass-produced and personalized products. The new possibilities for merging virtual with physical environments hold immense potential, but this work also highlights challenges for customers as well as for online platform providers and future researchers. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2024.},
keywords = {Artificial intelligence, Augmented Reality, Augmented reality applications, Electronic commerce, Generative AI, generative artificial intelligence, Human computer interaction, Human computer interfaces, LiDAR, Mobile augmented reality, Mobile human computer interface, Mobile Human Computer Interfaces, Personalized product design, Personalized products, Phygital customer journey, Physical environments, Product design, Recommender system, Recommender systems, Sales, User centered design, User interfaces, User-centered design},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, H.; Yao, X.; Wang, X.
Metaverses for Parallel Transportation: From General 3D Traffic Environment Construction to Virtual-Real I2TS Management and Control Proceedings Article
In: Proc. - IEEE Int. Conf. Digit. Twins Parallel Intell., DTPI, pp. 598–603, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835034925-2 (ISBN).
Abstract | Links | BibTeX | Tags: Advanced traffic management systems, Data fusion, generative artificial intelligence, Highway administration, Information Management, Intelligent transportation systems, Interactive Intelligent Transportation System, Metaverses, Mixed Traffic, Parallel Traffic System, Social Diversity and Uncertainty, Traffic control, Traffic Metaverse, Traffic systems, Uncertainty, Virtual addresses, Virtual environments
@inproceedings{ma_metaverses_2024,
title = {Metaverses for Parallel Transportation: From General 3D Traffic Environment Construction to Virtual-Real I2TS Management and Control},
author = {H. Ma and X. Yao and X. Wang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85214916181&doi=10.1109%2fDTPI61353.2024.10778876&partnerID=40&md5=94a6bf4b06a2a45f7c483936beee840f},
doi = {10.1109/DTPI61353.2024.10778876},
isbn = {979-835034925-2 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Conf. Digit. Twins Parallel Intell., DTPI},
pages = {598–603},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Metaverse technologies have enabled the creation of highly realistic artificial traffic system via real-time multi-source data fusion, while generative artificial intelligence (GAI) has facilitated the construction of large-scale traffic scenarios and the evaluation of strategies. This integration allows for the modeling of traffic environments that blend virtual and real-world interactions, providing digital proving grounds for the management and control (M&C) of intelligent transportation systems (ITS). This paper comprehensively reviews the evolution of traffic modeling tools, from traditional 2D and 3D traffic simulations to the construction of generative 3D traffic environments based on digital twin (DT) technologies and the metaverse. Furthermore, to address the challenges posed by social diversity and uncertainty in mixed traffic, as well as the limitations of traditional methods, we propose a virtual-real interaction M&C strategy based on GAI. This strategy integrates the metaverse into parallel traffic systems (PTS), enabling bidirectional interaction and collaboration between virtual and physical environments. Through specific case studies, this research demonstrates the potential of combining the metaverse with PTS to enhance the efficiency of mixed traffic systems. © 2024 IEEE.},
keywords = {Advanced traffic management systems, Data fusion, generative artificial intelligence, Highway administration, Information Management, Intelligent transportation systems, Interactive Intelligent Transportation System, Metaverses, Mixed Traffic, Parallel Traffic System, Social Diversity and Uncertainty, Traffic control, Traffic Metaverse, Traffic systems, Uncertainty, Virtual addresses, Virtual environments},
pubstate = {published},
tppubtype = {inproceedings}
}
Chheang, V.; Sharmin, S.; Marquez-Hernandez, R.; Patel, M.; Rajasekaran, D.; Caulfield, G.; Kiafar, B.; Li, J.; Kullu, P.; Barmaki, R. L.
Towards Anatomy Education with Generative AI-based Virtual Assistants in Immersive Virtual Reality Environments Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 21–30, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835037202-1 (ISBN).
Abstract | Links | BibTeX | Tags: 3-D visualization systems, Anatomy education, Anatomy educations, Cognitive complexity, E-Learning, Embodied virtual assistant, Embodied virtual assistants, Generative AI, generative artificial intelligence, Human computer interaction, human-computer interaction, Immersive virtual reality, Interactive 3d visualizations, Knowledge Management, Medical education, Three dimensional computer graphics, Verbal communications, Virtual assistants, Virtual Reality, Virtual-reality environment
@inproceedings{chheang_towards_2024,
title = {Towards Anatomy Education with Generative AI-based Virtual Assistants in Immersive Virtual Reality Environments},
author = {V. Chheang and S. Sharmin and R. Marquez-Hernandez and M. Patel and D. Rajasekaran and G. Caulfield and B. Kiafar and J. Li and P. Kullu and R. L. Barmaki},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85187216893&doi=10.1109%2fAIxVR59861.2024.00011&partnerID=40&md5=33e8744309add5fe400f4f341326505f},
doi = {10.1109/AIxVR59861.2024.00011},
isbn = {979-835037202-1 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {21–30},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual reality (VR) and interactive 3D visualization systems have enhanced educational experiences and environments, particularly in complicated subjects such as anatomy education. VR-based systems surpass the potential limitations of traditional training approaches in facilitating interactive engagement among students. However, research on embodied virtual assistants that leverage generative artificial intelligence (AI) and verbal communication in the anatomy education context is underrepresented. In this work, we introduce a VR environment with a generative AI-embodied virtual assistant to support participants in responding to varying cognitive complexity anatomy questions and enable verbal communication. We assessed the technical efficacy and usability of the proposed environment in a pilot user study with 16 participants. We conducted a within-subject design for virtual assistant configuration (avatar- and screen-based), with two levels of cognitive complexity (knowledge- and analysis-based). The results reveal a significant difference in the scores obtained from knowledge- and analysis-based questions in relation to avatar configuration. Moreover, results provide insights into usability, cognitive task load, and the sense of presence in the proposed virtual assistant configurations. Our environment and results of the pilot study offer potential benefits and future research directions beyond medical education, using generative AI and embodied virtual agents as customized virtual conversational assistants. © 2024 IEEE.},
keywords = {3-D visualization systems, Anatomy education, Anatomy educations, Cognitive complexity, E-Learning, Embodied virtual assistant, Embodied virtual assistants, Generative AI, generative artificial intelligence, Human computer interaction, human-computer interaction, Immersive virtual reality, Interactive 3d visualizations, Knowledge Management, Medical education, Three dimensional computer graphics, Verbal communications, Virtual assistants, Virtual Reality, Virtual-reality environment},
pubstate = {published},
tppubtype = {inproceedings}
}
Greca, A. D.; Amaro, I.; Barra, P.; Rosapepe, E.; Tortora, G.
Enhancing therapeutic engagement in Mental Health through Virtual Reality and Generative AI: A co-creation approach to trust building Proceedings Article
In: M., Cannataro; H., Zheng; L., Gao; J., Cheng; J.L., Miranda; E., Zumpano; X., Hu; Y.-R., Cho; T., Park (Ed.): Proc. - IEEE Int. Conf. Bioinform. Biomed., BIBM, pp. 6805–6811, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835038622-6 (ISBN).
Abstract | Links | BibTeX | Tags: Co-creation, Electronic health record, Fundamental component, Generative adversarial networks, Generative AI, generative artificial intelligence, Immersive, Mental health, Personalized therapies, Personalized Therapy, Three-dimensional object, Trust, Trust building, Virtual environments, Virtual Reality, Virtual Reality (VR)
@inproceedings{greca_enhancing_2024,
title = {Enhancing therapeutic engagement in Mental Health through Virtual Reality and Generative AI: A co-creation approach to trust building},
author = {A. D. Greca and I. Amaro and P. Barra and E. Rosapepe and G. Tortora},
editor = {Cannataro M. and Zheng H. and Gao L. and Cheng J. and Miranda J.L. and Zumpano E. and Hu X. and Cho Y.-R. and Park T.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85217278235&doi=10.1109%2fBIBM62325.2024.10822177&partnerID=40&md5=ed42f7ca6a0e52e9945402e2c439a7f0},
doi = {10.1109/BIBM62325.2024.10822177},
isbn = {979-835038622-6 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Conf. Bioinform. Biomed., BIBM},
pages = {6805–6811},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Trust is a fundamental component of effective therapeutic relationships, significantly influencing patient engagement and treatment outcomes in mental health care. This paper presents a preliminary study aimed at enhancing trust through the co-creation of virtual therapeutic environments using generative artificial intelligence (AI). We propose a multimodal AI model, integrated into a virtual reality (VR) platform developed in Unity, which generates three-dimensional (3D) objects from textual descriptions. This approach allows patients to actively participate in shaping their therapeutic environment, fostering a collaborative atmosphere that enhances trust between patients and therapists. The methodology is structured into four phases, combining non-immersive and immersive experiences to co-create personalized therapeutic spaces and 3D objects symbolizing emotional or psychological states. Preliminary results demonstrate the system's potential in improving the therapeutic process through the real-time creation of virtual objects that reflect patient needs, with high-quality mesh generation and semantic coherence. This work offers new possibilities for patient-centered care in mental health services, suggesting that virtual co-creation can improve therapeutic efficacy by promoting trust and emotional engagement. © 2024 IEEE.},
keywords = {Co-creation, Electronic health record, Fundamental component, Generative adversarial networks, Generative AI, generative artificial intelligence, Immersive, Mental health, Personalized therapies, Personalized Therapy, Three-dimensional object, Trust, Trust building, Virtual environments, Virtual Reality, Virtual Reality (VR)},
pubstate = {published},
tppubtype = {inproceedings}
}
Vallasciani, G.; Stacchio, L.; Cascarano, P.; Marfia, G.
CreAIXR: Fostering Creativity with Generative AI in XR environments Proceedings Article
In: Proc. - IEEE Int. Conf. Metaverse Comput., Netw., Appl., MetaCom, pp. 1–8, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-833151599-7 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Creative thinking, Creatives, Creativity, Extended reality, Generative adversarial networks, generative artificial intelligence, Immersive, Modern technologies, Research questions, Stable Diffusion, Web technologies
@inproceedings{vallasciani_creaixr_2024,
title = {CreAIXR: Fostering Creativity with Generative AI in XR environments},
author = {G. Vallasciani and L. Stacchio and P. Cascarano and G. Marfia},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85211481990&doi=10.1109%2fMetaCom62920.2024.00034&partnerID=40&md5=002e25a2d4ddb170e21029b27c157b28},
doi = {10.1109/MetaCom62920.2024.00034},
isbn = {979-833151599-7 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Conf. Metaverse Comput., Netw., Appl., MetaCom},
pages = {1–8},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Fostering creativity is paramount for cultivating innovative minds capable of addressing complex challenges. Modern technologies like eXtended Reality (XR) and Artificial Intelligence (AI) may nurture grounds supporting creative thinking by providing immersive and manipulable environments. An open research question is how such technologies may best lead to such a possible result. To help move one step closer to an answer, we present a portable XR platform, namely CreAIXR, where objects may be creatively defined and manipulated with AI paradigms. CreAIXR leverages web technologies, XR, and generative AI where creatives are immersed in a composable experience, allowing them to collaborate and customize an immersive environment through XR paradigms and generative AI. We here describe this system along with its validation through experiments carried out with a group of individuals having a background in the field of visual arts. © 2024 IEEE.},
keywords = {Artificial intelligence, Creative thinking, Creatives, Creativity, Extended reality, Generative adversarial networks, generative artificial intelligence, Immersive, Modern technologies, Research questions, Stable Diffusion, Web technologies},
pubstate = {published},
tppubtype = {inproceedings}
}