AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Dong, Y.
Enhancing Painting Exhibition Experiences with the Application of Augmented Reality-Based AI Video Generation Technology Proceedings Article
In: P., Zaphiris; A., Ioannou; A., Ioannou; R.A., Sottilare; J., Schwarz; M., Rauterberg (Ed.): Lect. Notes Comput. Sci., pp. 256–262, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303176814-9 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, AI-generated art, Art and Technology, Arts computing, Augmented Reality, Augmented reality technology, Digital Exhibition Design, Dynamic content, E-Learning, Education computing, Generation technologies, Interactive computer graphics, Knowledge Management, Multi dimensional, Planning designs, Three dimensional computer graphics, Video contents, Video generation
@inproceedings{dong_enhancing_2025,
title = {Enhancing Painting Exhibition Experiences with the Application of Augmented Reality-Based AI Video Generation Technology},
author = {Y. Dong},
editor = {Zaphiris P. and Ioannou A. and Ioannou A. and Sottilare R.A. and Schwarz J. and Rauterberg M.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85213302959&doi=10.1007%2f978-3-031-76815-6_18&partnerID=40&md5=35484f5ed199a831f1a30f265a0d32d5},
doi = {10.1007/978-3-031-76815-6_18},
isbn = {03029743 (ISSN); 978-303176814-9 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15378 LNCS},
pages = {256–262},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Traditional painting exhibitions often rely on flat presentation methods, such as walls and stands, limiting their impact. Augmented Reality (AR) technology presents an opportunity to transform these experiences by turning static, flat artwork into dynamic, multi-dimensional presentations. However, creating and integrating video or dynamic content can be time-consuming and challenging, requiring meticulous planning, design, and production. In the context of urban renewal and community revitalization, particularly in China’s first-tier cities where real estate development has saturated the market, there is a growing trend to repurpose traditional commercial and office spaces with cultural and artistic exhibitions. These exhibitions not only enhance the spatial quality but also elevate the user experience, making the spaces more competitive. However, these non-traditional exhibition venues often lack the amenities of professional galleries, relying on walls, windows, and corners for displays, and requiring quick setup times. For visitors, who are often office workers or shoppers with limited time, the use of personal mobile devices for interaction is common. WeChat, China’s most widely used mobile application, provides a platform for convenient digital interactive experiences through mini-programs, which can support lightweight AR applications. AI video generation technologies, such as Conditional Generative Adversarial Networks (ControlNet) and Latent Consistency Models (LCM), have seen significant advancements. These technologies now allow for the creation of 3D models and video content from text and images. Tools like Meshy and Pika provide the ability to generate various video styles and offer precise control over video content. New AI video applications like Stable Video further expand the possibilities by rapidly converting static images into dynamic videos, facilitating easy adjustments and edits. This paper explores the application of AR-based AI video generation technology in enhancing the experience of painting exhibitions. By integrating these technologies, traditional paintings can be transformed into interactive, engaging displays that enrich the viewer’s experience. The study demonstrates the potential of these innovations to make art exhibitions more appealing and competitive in various public spaces, thereby improving both artistic expression and audience engagement. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {3D modeling, AI-generated art, Art and Technology, Arts computing, Augmented Reality, Augmented reality technology, Digital Exhibition Design, Dynamic content, E-Learning, Education computing, Generation technologies, Interactive computer graphics, Knowledge Management, Multi dimensional, Planning designs, Three dimensional computer graphics, Video contents, Video generation},
pubstate = {published},
tppubtype = {inproceedings}
}
Song, T.; Liu, Z.; Zhao, R.; Fu, J.
ElderEase AR: Enhancing Elderly Daily Living with the Multimodal Large Language Model and Augmented Reality Proceedings Article
In: ICVRT - Proc. Int. Conf. Virtual Real. Technol., pp. 60–67, Association for Computing Machinery, Inc, 2025, ISBN: 979-840071018-6 (ISBN).
Abstract | Links | BibTeX | Tags: Age-related, Assisted living, Augmented Reality, Augmented reality technology, Daily Life Support, Daily living, Daily-life supports, Elderly, Elderly users, Independent living, Independent living systems, Language Model, Modeling languages, Multi agent systems, Multi-modal, Multimodal large language model
@inproceedings{song_elderease_2025,
title = {ElderEase AR: Enhancing Elderly Daily Living with the Multimodal Large Language Model and Augmented Reality},
author = {T. Song and Z. Liu and R. Zhao and J. Fu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001924899&doi=10.1145%2f3711496.3711505&partnerID=40&md5=4df693735547b505172657a73359f3ca},
doi = {10.1145/3711496.3711505},
isbn = {979-840071018-6 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {ICVRT - Proc. Int. Conf. Virtual Real. Technol.},
pages = {60–67},
publisher = {Association for Computing Machinery, Inc},
abstract = {Elderly individuals often face challenges in independent living due to age-related cognitive and physical decline. To address these issues, we propose an innovative Augmented Reality (AR) system, “ElderEase AR”, designed to assist elderly users in their daily lives by leveraging a Multimodal Large Language Model (MLLM). This system enables elderly users to capture images of their surroundings and ask related questions, providing context-aware feedback. We evaluated the system’s perceived ease-of-use and feasibility through a pilot study involving 30 elderly users, aiming to enhance their independence and quality of life. Our system integrates advanced AR technology with an intelligent agent trained on multimodal datasets. Through prompt engineering, the agent is tailored to respond in a manner that aligns with the speaking style of elderly users. Experimental results demonstrate high accuracy in object recognition and question answering, with positive feedback from user trials. Specifically, the system accurately identified objects in various environments and provided relevant answers to user queries. This study highlights the powerful potential of AR and AI technologies in creating support tools for the elderly. It suggests directions for future improvements and applications, such as enhancing the system’s adaptability to different user needs and expanding its functionality to cover more aspects of daily living. © 2024 Copyright held by the owner/author(s).},
keywords = {Age-related, Assisted living, Augmented Reality, Augmented reality technology, Daily Life Support, Daily living, Daily-life supports, Elderly, Elderly users, Independent living, Independent living systems, Language Model, Modeling languages, Multi agent systems, Multi-modal, Multimodal large language model},
pubstate = {published},
tppubtype = {inproceedings}
}
2024
Nebeling, M.; Oki, M.; Gelsomini, M.; Hayes, G. R.; Billinghurst, M.; Suzuki, K.; Graf, R.
Designing Inclusive Future Augmented Realities Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2024, ISBN: 979-840070331-7 (ISBN).
Abstract | Links | BibTeX | Tags: Accessible and inclusive design, Augmented Reality, Augmented reality technology, Display technologies, Generative AI, Inclusive design, Interactive computer graphics, Mixed reality, Mixed reality technologies, Rapid prototyping, Rapid-prototyping, Sensing technology, Spatial computing
@inproceedings{nebeling_designing_2024,
title = {Designing Inclusive Future Augmented Realities},
author = {M. Nebeling and M. Oki and M. Gelsomini and G. R. Hayes and M. Billinghurst and K. Suzuki and R. Graf},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85194176929&doi=10.1145%2f3613905.3636313&partnerID=40&md5=411b65058a4c96149182237aa586fa75},
doi = {10.1145/3613905.3636313},
isbn = {979-840070331-7 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Augmented and mixed reality technology is rapidly advancing, driven by innovations in display, sensing, and AI technologies. This evolution, particularly in the era of generative AI with large language and text-to-image models such as GPT and Stable Diffusion, has the potential, not only to make it easier to create, but also to adapt and personalize, new content. Our workshop explores the pivotal role of augmented and mixed reality to shape a user's interactions with their physical surroundings. We aim to explore how inclusive future augmented realities can be designed, with increasing support for automation, such that environments can welcome users with different needs, emphasizing accessibility and inclusion through layers of augmentations. Our aim is not only to remove barriers by providing accommodations, but also to create a sense of belonging by directly engaging users. Our workshop consists of three main activities: (1) Through brainstorming and discussion of examples provided by the workshop organizers and participants, we critically review the landscape of accessible and inclusive design and their vital role in augmented and mixed reality experiences. (2) Through rapid prototyping activities including bodystorming and low-fidelity, mixed-media prototypes, participants explore how augmented and mixed reality can transform physical space into a more personal place, enhancing accessibility and inclusion based on novel interface and interaction techniques that are desirable, but not necessarily technically feasible just yet. In the workshop, we plan to focus on physical space to facilitate rapid prototyping without technical constraints, but techniques developed in the workshop are likely applicable to immersive virtual environments as well. (3) Finally, we collaborate to outline a research agenda for designing future augmented realities that promote equal opportunities, benefiting diverse user populations. Our workshop inspires innovation in augmented and mixed reality, reshaping physical environments to be more accessible and inclusive through immersive design. © 2024 Owner/Author.},
keywords = {Accessible and inclusive design, Augmented Reality, Augmented reality technology, Display technologies, Generative AI, Inclusive design, Interactive computer graphics, Mixed reality, Mixed reality technologies, Rapid prototyping, Rapid-prototyping, Sensing technology, Spatial computing},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Yeo, J. Q.; Wang, Y.; Tanary, S.; Cheng, J.; Lau, M.; Ng, A. B.; Guan, F.
AICRID: AI-Empowered CR For Interior Design Proceedings Article
In: G., Bruder; A.H., Olivier; A., Cunningham; E.Y., Peng; J., Grubert; I., Williams (Ed.): Proc. - IEEE Int. Symp. Mixed Augment. Real. Adjunct, ISMAR-Adjunct, pp. 837–841, Institute of Electrical and Electronics Engineers Inc., 2023, ISBN: 979-835032891-2 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, 3D models, 3d-modeling, Architectural design, Artificial intelligence, Artificial intelligence technologies, Augmented Reality, Augmented reality technology, Interior Design, Interior designs, machine learning, Machine-learning, Model generation, Novel design, Text images, User need, Visualization
@inproceedings{yeo_aicrid_2023,
title = {AICRID: AI-Empowered CR For Interior Design},
author = {J. Q. Yeo and Y. Wang and S. Tanary and J. Cheng and M. Lau and A. B. Ng and F. Guan},
editor = {Bruder G. and Olivier A.H. and Cunningham A. and Peng E.Y. and Grubert J. and Williams I.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85180375829&doi=10.1109%2fISMAR-Adjunct60411.2023.00184&partnerID=40&md5=b14d89dbd38a4dfe3f85b90800d42e78},
doi = {10.1109/ISMAR-Adjunct60411.2023.00184},
isbn = {979-835032891-2 (ISBN)},
year = {2023},
date = {2023-01-01},
booktitle = {Proc. - IEEE Int. Symp. Mixed Augment. Real. Adjunct, ISMAR-Adjunct},
pages = {837–841},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Augmented Reality (AR) technologies have been utilized for interior design for years. Normally 3D furniture models need to be created manually or by scanning with specialized devices and this is usually a costly process. Additionally, users need controllers or hands for manipulating the virtual furniture which may lead to fatigue for long-time usage. Artificial Intelligence (AI) technologies have made it possible to generate 3D models from texts, images or both and show potential to automate interactions through the user's voice. We propose a novel design, AICRID in short, which aims to automate the 3D model generation and to facilitate the interactions for interior design AR by leveraging on AI technologies. Specifically, our design will allow the users to directly generate 3D furniture models with generative AI, enabling them to directly interact with the virtual objects through their voices. © 2023 IEEE.},
keywords = {3D modeling, 3D models, 3d-modeling, Architectural design, Artificial intelligence, Artificial intelligence technologies, Augmented Reality, Augmented reality technology, Interior Design, Interior designs, machine learning, Machine-learning, Model generation, Novel design, Text images, User need, Visualization},
pubstate = {published},
tppubtype = {inproceedings}
}