AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2024
He, Z.; Li, S.; Song, Y.; Cai, Z.
Towards Building Condition-Based Cross-Modality Intention-Aware Human-AI Cooperation under VR Environment Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2024, ISBN: 979-840070330-0 (ISBN).
Abstract | Links | BibTeX | Tags: Action Generation, Building conditions, Condition, Critical challenges, Cross modality, Human-AI Cooperation, Information presentation, Intention Detection, Language Model, Multi-modal, Purchasing, User interfaces, Virtual Reality
@inproceedings{he_towards_2024,
title = {Towards Building Condition-Based Cross-Modality Intention-Aware Human-AI Cooperation under VR Environment},
author = {Z. He and S. Li and Y. Song and Z. Cai},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85194829231&doi=10.1145%2f3613904.3642360&partnerID=40&md5=44d237a6e2a686af74ffb684ef887ab6},
doi = {10.1145/3613904.3642360},
isbn = {979-840070330-0 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {To address critical challenges in effectively identifying user intent and forming relevant information presentations and recommendations in VR environments, we propose an innovative condition-based multi-modal human-AI cooperation framework. It highlights the intent tuples (intent, condition, intent prompt, action prompt) and 2-Large-Language-Models (2-LLMs) architecture. This design, utilizes “condition” as the core to describe tasks, dynamically match user interactions with intentions, and empower generations of various tailored multi-modal AI responses. The architecture of 2-LLMs separates the roles of intent detection and action generation, decreasing the prompt length and helping with generating appropriate responses. We implemented a VR-based intelligent furniture purchasing system based on the proposed framework and conducted a three-phase comparative user study. The results conclusively demonstrate the system's superiority in time efficiency and accuracy, intention conveyance improvements, effective product acquisitions, and user satisfaction and cooperation preference. Our framework provides a promising approach towards personalized and efficient user experiences in VR. © 2024 Copyright held by the owner/author(s)},
keywords = {Action Generation, Building conditions, Condition, Critical challenges, Cross modality, Human-AI Cooperation, Information presentation, Intention Detection, Language Model, Multi-modal, Purchasing, User interfaces, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Kumaar, D. Prasanna; Abisha, D.; Christy, J. Ida; Evanjalin, R. Navedha; Rajesh, P.; Haris, K. Mohamed
WRISTVIEW: Augmented Reality and Generative AI Integration for Enhanced Online Shopping Experiences Proceedings Article
In: Int. Conf. I-SMAC (IoT Soc., Mob., Anal. Cloud), I-SMAC - Proc., pp. 1115–1120, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835037642-5 (ISBN).
Abstract | Links | BibTeX | Tags: Augmented Reality, Chatbots, Conversational AI, Customer satisfaction, Decision making, E-commerce Innovation, Growing demand, Immersive, Interactivity, Online shopping, Personalizations, Personalized Shopping Experience, Purchasing, Sales, Virtual environments, Virtual Try-On
@inproceedings{prasanna_kumaar_wristview_2024,
title = {WRISTVIEW: Augmented Reality and Generative AI Integration for Enhanced Online Shopping Experiences},
author = {D. Prasanna Kumaar and D. Abisha and J. Ida Christy and R. Navedha Evanjalin and P. Rajesh and K. Mohamed Haris},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85208587489&doi=10.1109%2fI-SMAC61858.2024.10714789&partnerID=40&md5=e7742c8808cb551b17efe9ac3efeb961},
doi = {10.1109/I-SMAC61858.2024.10714789},
isbn = {979-835037642-5 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Int. Conf. I-SMAC (IoT Soc., Mob., Anal. Cloud), I-SMAC - Proc.},
pages = {1115–1120},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The traditional retail experience for purchasing watches lacks the interactivity and personalization that modern consumers seek. With the increasing shift towards online shopping platforms, there is a growing demand for an engaging and immersive virtual experience that allows customers to explore and try on watches. The absence of expert guidance during the online shopping process often results in less informed decision-making. To address these challenges, WRISTVIEW presents an innovative solution that integrates augmented reality (AR) and a conversational Generative AI (GENAI) chatbot. The AR component enables users to virtually try on watches, providing a realistic and interactive experience. The GENAI chatbot enhances this experience by offering expert advice, answering queries, and guiding users through the watch shopping journey, thereby creating a more personalized and informative shopping process. The objective of this research is to bridge the gap between the traditional in-store watch shopping experience and the online environment, ensuring that customers can make well-informed and satisfying purchase decisions in a virtual setting. The development, implementation, and potential impact of combining AR and GENAI technologies to transform the online watch shopping experience are discussed. © 2024 IEEE.},
keywords = {Augmented Reality, Chatbots, Conversational AI, Customer satisfaction, Decision making, E-commerce Innovation, Growing demand, Immersive, Interactivity, Online shopping, Personalizations, Personalized Shopping Experience, Purchasing, Sales, Virtual environments, Virtual Try-On},
pubstate = {published},
tppubtype = {inproceedings}
}