AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Miller, C. H.
Digital Storytelling: A Creator’s Guide to Interactive Entertainment: Volume I, Fifth Edition Book
CRC Press, 2025, ISBN: 978-104034442-2 (ISBN); 978-103285888-3 (ISBN).
Abstract | Links | BibTeX | Tags: Case-studies, Chatbots, Creatives, Digital storytelling, Entertainment, Immersive environment, Interactive documentary, Interactive entertainment, Social media, Use of video, Video-games, Virtual Reality
@book{miller_digital_2025,
title = {Digital Storytelling: A Creator’s Guide to Interactive Entertainment: Volume I, Fifth Edition},
author = {C. H. Miller},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105004122515&doi=10.1201%2f9781003520092&partnerID=40&md5=894bfbb310cbd095a54409f9ac5174da},
doi = {10.1201/9781003520092},
isbn = {978-104034442-2 (ISBN); 978-103285888-3 (ISBN)},
year = {2025},
date = {2025-01-01},
volume = {1},
publisher = {CRC Press},
series = {Digital Storytelling: a Creator's Guide to Interactive Entertainment: Volume I, Fifth Edition},
abstract = {Digital Storytelling: A Creator’s Guide to Interactive Entertainment, Volume I, fifth edition delves into the fascinating and groundbreaking stories enabled by interactive digital media, examining both fictional and non-fiction narratives. This fifth edition explores monumental developments, particularly the emergence of generative AI, and highlights exciting projects utilizing this technology. Additionally, it covers social media; interactive documentaries; immersive environments; and innovative uses of video games, chatbots, and virtual reality. Carolyn Handler Miller provides insights into storytelling essentials like character development, plot, structure, dialogue, and emotion, while examining how digital media and interactivity influence these elements. This book also dives into advanced topics, such as narratives using AR, VR, and XR, alongside new forms of immersive media, including large screens, escape rooms, and theme park experiences. With numerous case studies, this edition illustrates the creative possibilities of digital storytelling and its applications beyond entertainment, such as education, training, information, and promotion. Interviews with industry leaders further enhance the understanding of this evolving universe, making it a valuable resource for both professionals and enthusiasts. Key Features: • This book includes up-to-the-minute developments in digital storytelling. • It offers case studies of noteworthy examples of digital storytelling. • It includes a glossary clearly defining new or difficult terms. • Each chapter opens with several thought-provoking questions about the chapter’s topic. • Each chapter concludes with several creative and engaging exercises to promote the reader’s understanding of the chapter’s topic. © 2025 Carolyn Handler Miller.},
keywords = {Case-studies, Chatbots, Creatives, Digital storytelling, Entertainment, Immersive environment, Interactive documentary, Interactive entertainment, Social media, Use of video, Video-games, Virtual Reality},
pubstate = {published},
tppubtype = {book}
}
Casas, L.; Hannah, S.; Mitchell, K.
HoloJig: Interactive Spoken Prompt Specified Generative AI Environments Journal Article
In: IEEE Computer Graphics and Applications, vol. 45, no. 2, pp. 69–77, 2025, ISSN: 02721716 (ISSN); 15581756 (ISSN), (Publisher: IEEE Computer Society).
Abstract | Links | BibTeX | Tags: 3-D rendering, Article, Collaborative workspace, customer experience, Economic and social effects, generative artificial intelligence, human, Immersive, Immersive environment, parallax, Real- time, simulation, Simulation training, speech, Time based, Virtual environments, Virtual Reality, Virtual reality experiences, Virtual spaces, VR systems
@article{casas_holojig_2025,
title = {HoloJig: Interactive Spoken Prompt Specified Generative AI Environments},
author = {L. Casas and S. Hannah and K. Mitchell},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001182100&doi=10.1109%2FMCG.2025.3553780&partnerID=40&md5=9fafa25e4b6ddc9d2fe32d813fbabb20},
doi = {10.1109/MCG.2025.3553780},
issn = {02721716 (ISSN); 15581756 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Computer Graphics and Applications},
volume = {45},
number = {2},
pages = {69–77},
abstract = {HoloJig offers an interactive, speech-to-virtual reality (VR), VR experience that generates diverse environments in real time based on live spoken descriptions. Unlike traditional VR systems that rely on prebuilt assets, HoloJig dynamically creates personalized and immersive virtual spaces with depth-based parallax 3-D rendering, allowing users to define the characteristics of their immersive environment through verbal prompts. This generative approach opens up new possibilities for interactive experiences, including simulations, training, collaborative workspaces, and entertainment. In addition to speech-to-VR environment generation, a key innovation of HoloJig is its progressive visual transition mechanism, which smoothly dissolves between previously generated and newly requested environments, mitigating the delay caused by neural computations. This feature ensures a seamless and continuous user experience, even as new scenes are being rendered on remote servers. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: IEEE Computer Society},
keywords = {3-D rendering, Article, Collaborative workspace, customer experience, Economic and social effects, generative artificial intelligence, human, Immersive, Immersive environment, parallax, Real- time, simulation, Simulation training, speech, Time based, Virtual environments, Virtual Reality, Virtual reality experiences, Virtual spaces, VR systems},
pubstate = {published},
tppubtype = {article}
}
Chen, J.; Grubert, J.; Kristensson, P. O.
Analyzing Multimodal Interaction Strategies for LLM-Assisted Manipulation of 3D Scenes Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces, VR, pp. 206–216, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331536459 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, 3D reconstruction, 3D scene editing, 3D scenes, Computer simulation languages, Editing systems, Immersive environment, Interaction pattern, Interaction strategy, Language Model, Large language model, large language models, Multimodal Interaction, Scene editing, Three dimensional computer graphics, Virtual environments, Virtual Reality
@inproceedings{chen_analyzing_2025,
title = {Analyzing Multimodal Interaction Strategies for LLM-Assisted Manipulation of 3D Scenes},
author = {J. Chen and J. Grubert and P. O. Kristensson},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105002716635&doi=10.1109%2FVR59515.2025.00045&partnerID=40&md5=9db6769cd401503605578c4b711152b9},
doi = {10.1109/VR59515.2025.00045},
isbn = {9798331536459 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces, VR},
pages = {206–216},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {As more applications of large language models (LLMs) for 3D content in immersive environments emerge, it is crucial to study user behavior to identify interaction patterns and potential barriers to guide the future design of immersive content creation and editing systems which involve LLMs. In an empirical user study with 12 participants, we combine quantitative usage data with post-experience questionnaire feedback to reveal common interaction patterns and key barriers in LLM-assisted 3D scene editing systems. We identify opportunities for improving natural language interfaces in 3D design tools and propose design recommendations. Through an empirical study, we demonstrate that LLM-assisted interactive systems can be used productively in immersive environments. © 2025 Elsevier B.V., All rights reserved.},
keywords = {3D modeling, 3D reconstruction, 3D scene editing, 3D scenes, Computer simulation languages, Editing systems, Immersive environment, Interaction pattern, Interaction strategy, Language Model, Large language model, large language models, Multimodal Interaction, Scene editing, Three dimensional computer graphics, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Stacchio, L.; Balloni, E.; Frontoni, E.; Paolanti, M.; Zingaretti, P.; Pierdicca, R.
MineVRA: Exploring the Role of Generative AI-Driven Content Development in XR Environments through a Context-Aware Approach Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, vol. 31, no. 5, pp. 3602–3612, 2025, ISSN: 10772626 (ISSN), (Publisher: IEEE Computer Society).
Abstract | Links | BibTeX | Tags: adult, Article, Artificial intelligence, Computer graphics, Computer vision, Content Development, Contents development, Context-Aware, Context-aware approaches, Extended reality, female, Generative adversarial networks, Generative AI, generative artificial intelligence, human, Human-in-the-loop, Immersive, Immersive environment, male, Multi-modal, User need, Virtual environments, Virtual Reality
@article{stacchio_minevra_2025,
title = {MineVRA: Exploring the Role of Generative AI-Driven Content Development in XR Environments through a Context-Aware Approach},
author = {L. Stacchio and E. Balloni and E. Frontoni and M. Paolanti and P. Zingaretti and R. Pierdicca},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003746367&doi=10.1109%2FTVCG.2025.3549160&partnerID=40&md5=3356eb968b3e6a0d3c9b75716b05fac4},
doi = {10.1109/TVCG.2025.3549160},
issn = {10772626 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {31},
number = {5},
pages = {3602–3612},
abstract = {The convergence of Artificial Intelligence (AI), Computer Vision (CV), Computer Graphics (CG), and Extended Reality (XR) is driving innovation in immersive environments. A key challenge in these environments is the creation of personalized 3D assets, traditionally achieved through manual modeling, a time-consuming process that often fails to meet individual user needs. More recently, Generative AI (GenAI) has emerged as a promising solution for automated, context-aware content generation. In this paper, we present MineVRA (Multimodal generative artificial iNtelligence for contExt-aware Virtual Reality Assets), a novel Human-In-The-Loop (HITL) XR framework that integrates GenAI to facilitate coherent and adaptive 3D content generation in immersive scenarios. To evaluate the effectiveness of this approach, we conducted a comparative user study analyzing the performance and user satisfaction of GenAI-generated 3D objects compared to those generated by Sketchfab in different immersive contexts. The results suggest that GenAI can significantly complement traditional 3D asset libraries, with valuable design implications for the development of human-centered XR environments. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: IEEE Computer Society},
keywords = {adult, Article, Artificial intelligence, Computer graphics, Computer vision, Content Development, Contents development, Context-Aware, Context-aware approaches, Extended reality, female, Generative adversarial networks, Generative AI, generative artificial intelligence, human, Human-in-the-loop, Immersive, Immersive environment, male, Multi-modal, User need, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Shen, Y.; Li, B.; Huang, J.; Wang, Z.
GaussianShopVR: Facilitating Immersive 3D Authoring Using Gaussian Splatting in VR Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 1292–1293, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331514846 (ISBN).
Abstract | Links | BibTeX | Tags: 3D authoring, 3D modeling, Digital replicas, Gaussian distribution, Gaussian Splatting editing, Gaussians, Graphical user interfaces, High quality, Immersive, Immersive environment, Interactive computer graphics, Rendering (computer graphics), Rendering pipelines, Splatting, Three dimensional computer graphics, User profile, Virtual Reality, Virtual reality user interface, Virtualization, VR user interface
@inproceedings{shen_gaussianshopvr_2025,
title = {GaussianShopVR: Facilitating Immersive 3D Authoring Using Gaussian Splatting in VR},
author = {Y. Shen and B. Li and J. Huang and Z. Wang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005138672&doi=10.1109%2FVRW66409.2025.00292&partnerID=40&md5=2290016d250649f8d7f262212b1f59cb},
doi = {10.1109/VRW66409.2025.00292},
isbn = {9798331514846 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {1292–1293},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual reality (VR) applications require massive high-quality 3D assets to create immersive environments. Generating mesh-based 3D assets typically involves a significant amount of manpower and effort, which makes VR applications less accessible. 3D Gaussian Splatting (3DGS) has attracted much attention for its ability to quickly create digital replicas of real-life scenes and its compatibility with traditional rendering pipelines. However, it remains a challenge to edit 3DGS in a flexible and controllable manner. We propose GaussianShopVR, a system that leverages VR user interfaces to specify target areas to achieve flexible and controllable editing of reconstructed 3DGS. In addition, selected areas can provide 3D information to generative AI models to facilitate the editing. GaussianShopVR integrates object hierarchy management while keeping the backpropagated gradient flow to allow local editing with context information. © 2025 Elsevier B.V., All rights reserved.},
keywords = {3D authoring, 3D modeling, Digital replicas, Gaussian distribution, Gaussian Splatting editing, Gaussians, Graphical user interfaces, High quality, Immersive, Immersive environment, Interactive computer graphics, Rendering (computer graphics), Rendering pipelines, Splatting, Three dimensional computer graphics, User profile, Virtual Reality, Virtual reality user interface, Virtualization, VR user interface},
pubstate = {published},
tppubtype = {inproceedings}
}
Chang, K. -Y.; Lee, C. -F.
Enhancing Virtual Restorative Environment with Generative AI: Personalized Immersive Stress-Relief Experiences Proceedings Article
In: V.G., Duffy (Ed.): Lect. Notes Comput. Sci., pp. 132–144, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303193501-5 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence generated content, Artificial Intelligence Generated Content (AIGC), Electroencephalography, Electroencephalography (EEG), Generative AI, Immersive, Immersive environment, Mental health, Physical limitations, Restorative environment, Stress relief, Virtual reality exposure therapies, Virtual reality exposure therapy, Virtual Reality Exposure Therapy (VRET), Virtualization
@inproceedings{chang_enhancing_2025,
title = {Enhancing Virtual Restorative Environment with Generative AI: Personalized Immersive Stress-Relief Experiences},
author = {K. -Y. Chang and C. -F. Lee},
editor = {Duffy V.G.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007759157&doi=10.1007%2f978-3-031-93502-2_9&partnerID=40&md5=ee620a5da9b65e90ccb1eaa75ec8b724},
doi = {10.1007/978-3-031-93502-2_9},
isbn = {03029743 (ISSN); 978-303193501-5 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15791 LNCS},
pages = {132–144},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {In today’s fast-paced world, stress and mental health challenges are becoming more common. Restorative environments help people relax and recover emotionally, and Virtual Reality Exposure Therapy (VRET) offers a way to experience these benefits beyond physical limitations. However, most VRET applications rely on pre-designed content, limiting their adaptability to individual needs. This study explores how Generative AI can enhance VRET by creating personalized, immersive environments that better match users’ preferences and improve relaxation. To evaluate the impact of AI-generated restorative environments, we combined EEG measurements with user interviews. Thirty university students participated in the study, experiencing two different modes: static mode and walking mode. The EEG results showed an increase in Theta (θ) and High Beta (β) brain waves, suggesting a state of deep immersion accompanied by heightened cognitive engagement and mental effort. While participants found the experience enjoyable and engaging, the AI-generated environments tended to create excitement and focus rather than conventional relaxation. These findings suggest that for AI-generated environments in VRET to be more effective for stress relief, future designs should reduce cognitive load while maintaining immersion. This study provides insights into how AI can enhance relaxation experiences and introduces a new perspective on personalized digital stress-relief solutions. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Artificial intelligence generated content, Artificial Intelligence Generated Content (AIGC), Electroencephalography, Electroencephalography (EEG), Generative AI, Immersive, Immersive environment, Mental health, Physical limitations, Restorative environment, Stress relief, Virtual reality exposure therapies, Virtual reality exposure therapy, Virtual Reality Exposure Therapy (VRET), Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Paterakis, I.; Manoudaki, N.
Osmosis: Generative AI and XR for the real-time transformation of urban architectural environments Journal Article
In: International Journal of Architectural Computing, 2025, ISSN: 14780771 (ISSN), (Publisher: SAGE Publications Inc.).
Abstract | Links | BibTeX | Tags: Architectural design, Architectural environment, Artificial intelligence, Biodigital design, Case-studies, Computational architecture, Computer architecture, Extended reality, generative artificial intelligence, Immersive, Immersive environment, immersive environments, Natural language processing systems, Real- time, Urban environments, urban planning
@article{paterakis_osmosis_2025,
title = {Osmosis: Generative AI and XR for the real-time transformation of urban architectural environments},
author = {I. Paterakis and N. Manoudaki},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105014516125&doi=10.1177%2F14780771251356526&partnerID=40&md5=4bbcb09440d91899cb7d2d5d0c852507},
doi = {10.1177/14780771251356526},
issn = {14780771 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {International Journal of Architectural Computing},
abstract = {This work contributes to the evolving discourse on biodigital architecture by examining how generative artificial intelligence (AI) and extended reality (XR) systems can be combined to create immersive urban environments. Focusing on the case study of “Osmosis”, a series of large-scale public installations, this work proposes a methodological framework for real-time architectural composition in XR using diffusion models and interaction. The project reframes the architectural façade as a semi permeable membrane, through which digital content diffuses in response to environmental and user inputs. By integrating natural language prompts, multimodal input, and AI-generated visual synthesis with projection mapping, Osmosis advances a vision for urban architecture that is interactive, data-driven, and sensorially rich. The work explores new design territories where stochastic form-making and real-time responsiveness intersect, and positions AI as an augmentation of architectural creativity rather than its replacement. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: SAGE Publications Inc.},
keywords = {Architectural design, Architectural environment, Artificial intelligence, Biodigital design, Case-studies, Computational architecture, Computer architecture, Extended reality, generative artificial intelligence, Immersive, Immersive environment, immersive environments, Natural language processing systems, Real- time, Urban environments, urban planning},
pubstate = {published},
tppubtype = {article}
}
Boubakri, F. -E.; Kadri, M.; Kaghat, F. Z.; Azough, A.; Tairi, H.
Exploring 3D Cardiac Anatomy with Text-Based AI Guidance in Virtual Reality Proceedings Article
In: pp. 43–48, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331534899 (ISBN).
Abstract | Links | BibTeX | Tags: 3D cardiac anatomy, 3d heart models, Anatomy education, Anatomy educations, Cardiac anatomy, Collaborative environments, Collaborative learning, Computer aided instruction, Curricula, Design and Development, E-Learning, Education computing, Generative AI, Heart, Immersive environment, Learning systems, Natural language processing systems, Social virtual reality, Students, Teaching, Three dimensional computer graphics, Virtual Reality
@inproceedings{boubakri_exploring_2025,
title = {Exploring 3D Cardiac Anatomy with Text-Based AI Guidance in Virtual Reality},
author = {F. -E. Boubakri and M. Kadri and F. Z. Kaghat and A. Azough and H. Tairi},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105015676741&doi=10.1109%2FSCME62582.2025.11104869&partnerID=40&md5=c961694f97c50adc23b6826dddb265cd},
doi = {10.1109/SCME62582.2025.11104869},
isbn = {9798331534899 (ISBN)},
year = {2025},
date = {2025-01-01},
pages = {43–48},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This paper presents the design and development of a social virtual reality (VR) classroom focused on cardiac anatomy education for students in grades K-12. The application allows multiple learners to explore a detailed 3D heart model within an immersive and collaborative environment. A crucial part of the system is the integration of a text-based conversational AI interface powered by ChatGPT, which provides immediate, interactive explanations and addresses student inquiries about heart anatomy. The system supports both guided and exploratory learning modes, encourages peer collaboration, and offers personalized support through natural language dialogue. We evaluated the system's effectiveness through a comprehensive study measuring learning perception (LPQ), VR perception (VRPQ), AI perception (AIPQ), and VR-related symptoms (VRSQ). Potential applications include making high-quality cardiac anatomy education more affordable for K-12 schools with limited resources, offering an adaptable AI-based tutoring system for students to learn at their own pace, and equipping educators with an easy-to-use tool to integrate into their science curriculum with minimal additional training. © 2025 Elsevier B.V., All rights reserved.},
keywords = {3D cardiac anatomy, 3d heart models, Anatomy education, Anatomy educations, Cardiac anatomy, Collaborative environments, Collaborative learning, Computer aided instruction, Curricula, Design and Development, E-Learning, Education computing, Generative AI, Heart, Immersive environment, Learning systems, Natural language processing systems, Social virtual reality, Students, Teaching, Three dimensional computer graphics, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
2024
Rozo-Torres, A.; Sarmiento, W. J.
Coffee Masterclass: An Experience of Co-Creation with Prompt Engineering and Generative AI for Immersive Environments Development Proceedings Article
In: Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW, pp. 1170–1171, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 9798350374490 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Artificial intelligence tools, Co-creation, Computer graphics, Computing methodologies, Design and development process, Development teams, Graphic system, Graphics interface, Graphics systems and interfaces, Immersive, Immersive environment, Mixed/augmented reality
@inproceedings{rozo-torres_coffee_2024,
title = {Coffee Masterclass: An Experience of Co-Creation with Prompt Engineering and Generative AI for Immersive Environments Development},
author = {A. Rozo-Torres and W. J. Sarmiento},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85195576520&doi=10.1109%2FVRW62533.2024.00379&partnerID=40&md5=d41d501a87c60c9e22870b6f870845c3},
doi = {10.1109/VRW62533.2024.00379},
isbn = {9798350374490 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Conf. Virtual Real. 3D User Interfaces Abstr. Workshops, VRW},
pages = {1170–1171},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This work presents the design and development process of an immersive experience applying a co-creation approach between humans and generative artificial intelligence tools. From the point of view of any user, Coffee Masterclass is an immersive experience that brings anyone to the art and pleasure of preparing specialty coffees. However, the Coffee Masterclass is the result of the inclusion of prompt engineering outputs in each stage of the building process. The co-creation approach is included in all development processes, i.e., from the narrative to the visual content generated through code writing, which has been co-created between the creative team and GenAI. This work tells details of this approach, including how the generative artificial intelligence tools were used in each stage of immersive experience development. This work shows the advantage of involvement in a development team of people with skills in prompt engineering and interaction with Large Language Models. Also, it includes recommendations to other development teams, including generative artificial intelligence tools by future developments. © 2024 Elsevier B.V., All rights reserved.},
keywords = {Artificial intelligence, Artificial intelligence tools, Co-creation, Computer graphics, Computing methodologies, Design and development process, Development teams, Graphic system, Graphics interface, Graphics systems and interfaces, Immersive, Immersive environment, Mixed/augmented reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Shabanijou, M.; Sharma, V.; Ray, S.; Lu, R.; Xiong, P.
Large Language Model Empowered Spatio-Visual Queries for Extended Reality Environments Proceedings Article
In: Ding, W.; Lu, C. -T.; Wang, F.; Di, L.; Wu, K.; Huan, J.; Nambiar, R.; Li, J.; Ilievski, F.; Baeza-Yates, R.; Hu, X. (Ed.): Proc. - IEEE Int. Conf. Big Data, BigData, pp. 5843–5846, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 9798350362480 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, Digital elevation model, Emerging applications, Immersive environment, Language Model, Metaverses, Modeling languages, Natural language interfaces, Query languages, spatial data, Spatial queries, Structured Query Language, Technological advances, Users perspective, Virtual environments, Visual languages, Visual query
@inproceedings{shabanijou_large_2024,
title = {Large Language Model Empowered Spatio-Visual Queries for Extended Reality Environments},
author = {M. Shabanijou and V. Sharma and S. Ray and R. Lu and P. Xiong},
editor = {W. Ding and C. -T. Lu and F. Wang and L. Di and K. Wu and J. Huan and R. Nambiar and J. Li and F. Ilievski and R. Baeza-Yates and X. Hu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85218011140&doi=10.1109%2FBigData62323.2024.10825084&partnerID=40&md5=db4266a744037ba69c0989ee47160ae8},
doi = {10.1109/BigData62323.2024.10825084},
isbn = {9798350362480 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Conf. Big Data, BigData},
pages = {5843–5846},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {With the technological advances in creation and capture of 3D spatial data, new emerging applications are being developed. Digital Twins, metaverse and extended reality (XR) based immersive environments can be enriched by leveraging geocoded 3D spatial data. Unlike 2D spatial queries, queries involving 3D immersive environments need to take the query user's viewpoint into account. Spatio-visual queries return objects that are visible from the user's perspective.In this paper, we propose enhancing 3D spatio-visual queries with large language models (LLM). These kinds of queries allow a user to interact with the visible objects using a natural language interface. We have implemented a proof-of-concept prototype and conducted preliminary evaluation. Our results demonstrate the potential of truly interactive immersive environments. © 2025 Elsevier B.V., All rights reserved.},
keywords = {3D modeling, Digital elevation model, Emerging applications, Immersive environment, Language Model, Metaverses, Modeling languages, Natural language interfaces, Query languages, spatial data, Spatial queries, Structured Query Language, Technological advances, Users perspective, Virtual environments, Visual languages, Visual query},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, P.; Kitson, A.; Picard-Deland, C.; Carr, M.; Liu, S.; LC, R.; Chen, C.
Virtual Dream Reliving: Exploring Generative AI in Immersive Environment for Dream Re-experiencing Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2024, ISBN: 9798400703317 (ISBN).
Abstract | Links | BibTeX | Tags: Dream Re-experiencing, Dreamwork Engineering, Fundamental component, Generative AI, Immersive environment, Interactivity, Key elements, Personal Insight, Scientific Creativity, Virtual Reality, Virtual-reality environment
@inproceedings{liu_virtual_2024,
title = {Virtual Dream Reliving: Exploring Generative AI in Immersive Environment for Dream Re-experiencing},
author = {P. Liu and A. Kitson and C. Picard-Deland and M. Carr and S. Liu and R. LC and C. Chen},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85194182384&doi=10.1145%2F3613905.3644054&partnerID=40&md5=89a198e59bf383f9142703189035f0fd},
doi = {10.1145/3613905.3644054},
isbn = {9798400703317 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Dreaming is a fundamental component of the human experience. Modern-day psychologists and neuroscientists use "dreamwork" to describe a variety of strategies that deepen and engage with dreams. Re-experiencing the dream as if reliving the memory, feelings, and bodily sensations from the dream is a key element shared by many dreamwork practices. In this paper, we propose the concept of "dreamwork engineering" by creating a system enabling dream re-experiencing in a virtual reality environment through generative AI. Through an autoethnographic study, the first author documented his own dreams and relived his dream experiences for two weeks. Based on our results, we propose a technologyaided dreamwork framework, where technology could potentially augment traditional dreamwork methods through spatiality and movement, interactivity and abstract anchor. We further highlight the collaborative role of technology in dreamwork and advocate that the scientific community could also benefit from dreaming and dreamwork for scientific creativity. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Dream Re-experiencing, Dreamwork Engineering, Fundamental component, Generative AI, Immersive environment, Interactivity, Key elements, Personal Insight, Scientific Creativity, Virtual Reality, Virtual-reality environment},
pubstate = {published},
tppubtype = {inproceedings}
}