AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2024
Song, Y.; Wu, K.; Ding, J.
In: Computers and Education: X Reality, vol. 4, 2024, ISSN: 29496780 (ISSN).
Abstract | Links | BibTeX | Tags: Game-based learning, Generative AI, Immersion, Interaction, Virtual Reality (VR)
@article{song_developing_2024,
title = {Developing an immersive game-based learning platform with generative artificial intelligence and virtual reality technologies – “LearningverseVR”},
author = {Y. Song and K. Wu and J. Ding},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85205973323&doi=10.1016%2fj.cexr.2024.100069&partnerID=40&md5=91dd3ac3d01b4730f923f8541d5877f2},
doi = {10.1016/j.cexr.2024.100069},
issn = {29496780 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Computers and Education: X Reality},
volume = {4},
abstract = {The rapid evolution of generative artificial intelligence (AI) and virtual reality (VR) technologies are revolutionising various fields, including education and gaming industries. However, studies on how to enhance immersive game-based learning with AI and VR technologies remain scant. Given this, the article presents the creation of “LearningverseVR,” an immersive game-based learning platform developed using generative AI and VR technologies, which is based on “Learningverse,” a metaverse platform developed by the lead author and her research team. The “LearningverseVR” platform uses Unity as the client and Python, Flask and MySQL as the backend. Unity's multiplayer service provides multiplayer online functionality, supporting learners to engage in immersive and interactive learning activities. The design framework of the platform consists of two main components: Game-based learning with generative AI and immersion with VR technologies. First, generative AI is used to create NPCs with diverse personalities and life backgrounds, and enable learners to interact with NPCs without scripted dialogues, creating an interactive and immersive game-based learning environment. Secondly, such a learning experience is enhanced by leveraging the Large Language Model (LLM) ecosystem with VR technology. The creation of the “LearningverseVR” platform provides novel perspectives on digital game-based learning. © 2024 The Authors},
keywords = {Game-based learning, Generative AI, Immersion, Interaction, Virtual Reality (VR)},
pubstate = {published},
tppubtype = {article}
}
Janaka, N.
Towards Intelligent Wearable Assistants Proceedings Article
In: UbiComp Companion - Companion ACM Int. Jt. Conf. Pervasive Ubiquitous Comput., pp. 618–621, Association for Computing Machinery, Inc, 2024, ISBN: 979-840071058-2 (ISBN).
Abstract | Links | BibTeX | Tags: AI assistance, Augmented Reality, context-aware system, Context-aware systems, HMD, Interaction, interactions, Interruption, interruptions, MR, Notification, notifications, Smart glass, smart glasses, wearable, Wearable assistant, Wearable computers, XR
@inproceedings{janaka_towards_2024,
title = {Towards Intelligent Wearable Assistants},
author = {N. Janaka},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85206157248&doi=10.1145%2f3675094.3678989&partnerID=40&md5=539933fdbb3b5289b179cbe9e8f7c083},
doi = {10.1145/3675094.3678989},
isbn = {979-840071058-2 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {UbiComp Companion - Companion ACM Int. Jt. Conf. Pervasive Ubiquitous Comput.},
pages = {618–621},
publisher = {Association for Computing Machinery, Inc},
abstract = {This summary outlines my research toward developing intelligent wearable assistants that provide personalized, context-aware computing assistance. Previous work explored information presentation using smart glasses, socially-aware interactions, and applications for learning, communication, and documentation. Current research aims to develop tools for interaction research, including data collection, multimodal evaluation metrics, and a platform for creating context-aware AI assistants. Future goals include extending assistants to physical spaces via telepresence, optimizing learning with generative AI, and investigating collaborative human-AI learning. Ultimately, this research seeks to redefine how humans receive seamless support through proactive, intelligent wearable assistants that comprehend users and environments, augmenting capabilities while reducing reliance on manual labor. © 2024 Copyright held by the owner/author(s).},
keywords = {AI assistance, Augmented Reality, context-aware system, Context-aware systems, HMD, Interaction, interactions, Interruption, interruptions, MR, Notification, notifications, Smart glass, smart glasses, wearable, Wearable assistant, Wearable computers, XR},
pubstate = {published},
tppubtype = {inproceedings}
}
Hellström, T.; Kaiser, N.; Bensch, S.
A Taxonomy of Embodiment in the AI Era Journal Article
In: Electronics (Switzerland), vol. 13, no. 22, 2024, ISSN: 20799292 (ISSN).
Abstract | Links | BibTeX | Tags: Avatar, Cognition, Digital Twins, Interaction, Robotics
@article{hellstrom_taxonomy_2024,
title = {A Taxonomy of Embodiment in the AI Era},
author = {T. Hellström and N. Kaiser and S. Bensch},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85210288412&doi=10.3390%2felectronics13224441&partnerID=40&md5=a0394b3323e9fe1a58021065aaee5569},
doi = {10.3390/electronics13224441},
issn = {20799292 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Electronics (Switzerland)},
volume = {13},
number = {22},
abstract = {This paper presents a taxonomy of agents’ embodiment in physical and virtual environments. It categorizes embodiment based on five entities: the agent being embodied, the possible mediator of the embodiment, the environment in which sensing and acting take place, the degree of body, and the intertwining of body, mind, and environment. The taxonomy is applied to a wide range of embodiment of humans, artifacts, and programs, including recent technological and scientific innovations related to virtual reality, augmented reality, telepresence, the metaverse, digital twins, and large language models. The presented taxonomy is a powerful tool to analyze, clarify, and compare complex cases of embodiment. For example, it makes the choice between a dualistic and non-dualistic perspective of an agent’s embodiment explicit and clear. The taxonomy also aided us to formulate the term “embodiment by proxy” to denote how seemingly non-embodied agents may affect the world by using humans as “extended arms”. We also introduce the concept “off-line embodiment” to describe large language models’ ability to create an illusion of human perception. © 2024 by the authors.},
keywords = {Avatar, Cognition, Digital Twins, Interaction, Robotics},
pubstate = {published},
tppubtype = {article}
}
2022
Wang, A.; Gao, Z.; Lee, L. H.; Braud, T.; Hui, P.
Decentralized, not Dehumanized in the Metaverse: Bringing Utility to NFTs through Multimodal Interaction Proceedings Article
In: ACM Int. Conf. Proc. Ser., pp. 662–667, Association for Computing Machinery, 2022, ISBN: 978-145039390-4 (ISBN).
Abstract | Links | BibTeX | Tags: AI-generated art, Arts computing, Behavioral Research, Computation theory, Continuum mechanics, Decentralised, Human behaviors, Interaction, Multi-modal, multimodal, Multimodal Interaction, NFTs, Non-fungible token, Text-to-image, The metaverse
@inproceedings{wang_decentralized_2022,
title = {Decentralized, not Dehumanized in the Metaverse: Bringing Utility to NFTs through Multimodal Interaction},
author = {A. Wang and Z. Gao and L. H. Lee and T. Braud and P. Hui},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85142799074&doi=10.1145%2f3536221.3558176&partnerID=40&md5=f9dee1e9e60afc71c4533cbdee0b98a7},
doi = {10.1145/3536221.3558176},
isbn = {978-145039390-4 (ISBN)},
year = {2022},
date = {2022-01-01},
booktitle = {ACM Int. Conf. Proc. Ser.},
pages = {662–667},
publisher = {Association for Computing Machinery},
abstract = {User Interaction for NFTs (Non-fungible Tokens) is gaining increasing attention. Although NFTs have been traditionally single-use and monolithic, recent applications aim to connect multimodal interaction with human behavior. This paper reviews the related technological approaches and business practices in NFT art. We highlight that multimodal interaction is a currently under-studied issue in mainstream NFT art, and conjecture that multimodal interaction is a crucial enabler for decentralization in the NFT community. We present a continuum theory and propose a framework combining a bottom-up approach with AI multimodal process. Through this framework, we put forward integrating human behavior data into generative NFT units, as "multimodal interactive NFT."Our work displays the possibilities of NFTs in the art world, beyond the traditional 2D and 3D static content. © 2022 ACM.},
keywords = {AI-generated art, Arts computing, Behavioral Research, Computation theory, Continuum mechanics, Decentralised, Human behaviors, Interaction, Multi-modal, multimodal, Multimodal Interaction, NFTs, Non-fungible token, Text-to-image, The metaverse},
pubstate = {published},
tppubtype = {inproceedings}
}