AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Fang, A.; Chhabria, H.; Maram, A.; Zhu, H.
Social Simulation for Everyday Self-Care: Design Insights from Leveraging VR, AR, and LLMs for Practicing Stress Relief Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 979-840071394-1 (ISBN).
Abstract | Links | BibTeX | Tags: design, Design insights, Language Model, Large language model, large language models, Mental health, Peer support, Professional supports, Self-care, Social simulations, Speed dating, Virtual environments, Virtual Reality, Well being
@inproceedings{fang_social_2025,
title = {Social Simulation for Everyday Self-Care: Design Insights from Leveraging VR, AR, and LLMs for Practicing Stress Relief},
author = {A. Fang and H. Chhabria and A. Maram and H. Zhu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005770377&doi=10.1145%2f3706598.3713115&partnerID=40&md5=87d43f04dfd3231cb189fa89570824c5},
doi = {10.1145/3706598.3713115},
isbn = {979-840071394-1 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Stress is an inevitable part of day-to-day life yet many find themselves unable to manage it themselves, particularly when professional or peer support are not always readily available. As self-care becomes increasingly vital for mental well-being, this paper explores the potential of social simulation as a safe, virtual environment for practicing in-the-moment stress relief for everyday social situations. Leveraging the immersive capabilities of VR, AR, and LLMs to create realistic interactions and environments, we developed eight interactive prototypes for various social stress related scenarios (e.g. public speaking, interpersonal conflict) across design dimensions of modality, interactivity, and mental health guidance in order to conduct prototype-driven semi-structured interviews with 19 participants. Our qualitative findings reveal that people currently lack effective means to support themselves through everyday stress and perceive social simulation - even at low immersion and interaction levels - to fill a gap for practical, controlled training of mental health practices. We outline key design needs for developing social simulation for self-care needs, and identify important considerations including risks of trauma from hyper-realism, distrust of LLM-recommended timing for mental health recommendations, and the value of accessibility for self-care interventions. © 2025 Copyright held by the owner/author(s).},
keywords = {design, Design insights, Language Model, Large language model, large language models, Mental health, Peer support, Professional supports, Self-care, Social simulations, Speed dating, Virtual environments, Virtual Reality, Well being},
pubstate = {published},
tppubtype = {inproceedings}
}
2024
Park, G. W.; Panda, P.; Tankelevitch, L.; Rintel, S.
CoExplorer: Generative AI Powered 2D and 3D Adaptive Interfaces to Support Intentionality in Video Meetings Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2024, ISBN: 979-840070331-7 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive interface, Adaptive user interface, design, Effectiveness, Facilitation, Generative AI, Goal, Goals, Intent recognition, Meeting, meetings, planning, Probes, Speech recognition, User interfaces, Video conferencing, Videoconferencing, Virtual Reality, Windowing
@inproceedings{park_coexplorer_2024,
title = {CoExplorer: Generative AI Powered 2D and 3D Adaptive Interfaces to Support Intentionality in Video Meetings},
author = {G. W. Park and P. Panda and L. Tankelevitch and S. Rintel},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85194194151&doi=10.1145%2f3613905.3650797&partnerID=40&md5=19d083f7aee201cc73fe0c2386f04bb6},
doi = {10.1145/3613905.3650797},
isbn = {979-840070331-7 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Current online meeting technologies lack holistic support for reducing the effort of planning and running meetings. We present CoExplorer2D and CoExplorerVR, generative AI (GenAI)-driven technology probes for exploring the significant transformative potential of GenAI to augment these aspects of meetings. In each system, before the meeting, these systems generate tools that allow synthesis and ranking of attendees' key issues for discussion, and likely phases that a meeting would require to cover these issues. During the meeting, these systems use speech recognition to generate 2D or VR window layouts with appropriate applications and files for each phase, and recognize the attendees' progress through the meeting's phases. We argue that these probes show the potential of GenAI to contribute to reducing the effort required for planning and running meetings, providing participants with a more engaging and effective meeting experiences. © 2024 Association for Computing Machinery. All rights reserved.},
keywords = {Adaptive interface, Adaptive user interface, design, Effectiveness, Facilitation, Generative AI, Goal, Goals, Intent recognition, Meeting, meetings, planning, Probes, Speech recognition, User interfaces, Video conferencing, Videoconferencing, Virtual Reality, Windowing},
pubstate = {published},
tppubtype = {inproceedings}
}
Xu, S.; Wei, Y.; Zheng, P.; Zhang, J.; Yu, C.
LLM enabled generative collaborative design in a mixed reality environment Journal Article
In: Journal of Manufacturing Systems, vol. 74, pp. 703–715, 2024, ISSN: 02786125 (ISSN).
Abstract | Links | BibTeX | Tags: Collaborative design, Collaborative design process, Communication barriers, Computational Linguistics, design, Design frameworks, generative artificial intelligence, Iterative methods, Language Model, Large language model, Mixed reality, Mixed-reality environment, Multi-modal, Visual languages
@article{xu_llm_2024,
title = {LLM enabled generative collaborative design in a mixed reality environment},
author = {S. Xu and Y. Wei and P. Zheng and J. Zhang and C. Yu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85192244873&doi=10.1016%2fj.jmsy.2024.04.030&partnerID=40&md5=3f050c429cf5a4120d10a432311f46cb},
doi = {10.1016/j.jmsy.2024.04.030},
issn = {02786125 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Journal of Manufacturing Systems},
volume = {74},
pages = {703–715},
abstract = {In the collaborative design process, diverse stakeholder backgrounds often introduce inefficiencies in collaboration, such as delays in design delivery and decreased creativity, primarily due to misunderstandings and communication barriers caused by this diversity. To respond, this study proposes an AI-augmented Multimodal Collaborative Design (AI-MCD) framework. This framework utilizes Large Language Models (LLM) to establish an iterative prompting mechanism that provides professional design prompts for Generative AI (GAI) to generate precise visual schemes. On this basis, the GAI cooperates with Mixed Reality (MR) technology to form an interactive and immersive environment for enabling full participation in the design process. By integrating these technologies, the study aims to help stakeholders form a unified cognition and optimize the traditional collaborative design process. Through a case study involving the development of heart education products for children, the effectiveness of the framework is emphasized, and the practical application and effectiveness of the proposed method innovation are demonstrated. © 2024 The Society of Manufacturing Engineers},
keywords = {Collaborative design, Collaborative design process, Communication barriers, Computational Linguistics, design, Design frameworks, generative artificial intelligence, Iterative methods, Language Model, Large language model, Mixed reality, Mixed-reality environment, Multi-modal, Visual languages},
pubstate = {published},
tppubtype = {article}
}