AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2024
Park, G. W.; Panda, P.; Tankelevitch, L.; Rintel, S.
CoExplorer: Generative AI Powered 2D and 3D Adaptive Interfaces to Support Intentionality in Video Meetings Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2024, ISBN: 979-840070331-7 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive interface, Adaptive user interface, design, Effectiveness, Facilitation, Generative AI, Goal, Goals, Intent recognition, Meeting, meetings, planning, Probes, Speech recognition, User interfaces, Video conferencing, Videoconferencing, Virtual Reality, Windowing
@inproceedings{park_coexplorer_2024,
title = {CoExplorer: Generative AI Powered 2D and 3D Adaptive Interfaces to Support Intentionality in Video Meetings},
author = {G. W. Park and P. Panda and L. Tankelevitch and S. Rintel},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85194194151&doi=10.1145%2f3613905.3650797&partnerID=40&md5=19d083f7aee201cc73fe0c2386f04bb6},
doi = {10.1145/3613905.3650797},
isbn = {979-840070331-7 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Current online meeting technologies lack holistic support for reducing the effort of planning and running meetings. We present CoExplorer2D and CoExplorerVR, generative AI (GenAI)-driven technology probes for exploring the significant transformative potential of GenAI to augment these aspects of meetings. In each system, before the meeting, these systems generate tools that allow synthesis and ranking of attendees' key issues for discussion, and likely phases that a meeting would require to cover these issues. During the meeting, these systems use speech recognition to generate 2D or VR window layouts with appropriate applications and files for each phase, and recognize the attendees' progress through the meeting's phases. We argue that these probes show the potential of GenAI to contribute to reducing the effort required for planning and running meetings, providing participants with a more engaging and effective meeting experiences. © 2024 Association for Computing Machinery. All rights reserved.},
keywords = {Adaptive interface, Adaptive user interface, design, Effectiveness, Facilitation, Generative AI, Goal, Goals, Intent recognition, Meeting, meetings, planning, Probes, Speech recognition, User interfaces, Video conferencing, Videoconferencing, Virtual Reality, Windowing},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Z.; Gebhardt, C.; Inglin, Y.; Steck, N.; Streli, P.; Holz, C.
SituationAdapt: Contextual UI Optimization in Mixed Reality with Situation Awareness via LLM Reasoning Proceedings Article
In: UIST - Proc. Annual ACM Symp. User Interface Softw. Technol., Association for Computing Machinery, Inc, 2024, ISBN: 979-840070628-8 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive user interface, Adaptive User Interfaces, Environmental cues, Language Model, Large language model, large language models, Mixed reality, Mobile settings, Office space, Optimisations, Optimization module, Situation awareness
@inproceedings{li_situationadapt_2024,
title = {SituationAdapt: Contextual UI Optimization in Mixed Reality with Situation Awareness via LLM Reasoning},
author = {Z. Li and C. Gebhardt and Y. Inglin and N. Steck and P. Streli and C. Holz},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85211434729&doi=10.1145%2f3654777.3676470&partnerID=40&md5=596480194c46ab2753cffeb8cce22243},
doi = {10.1145/3654777.3676470},
isbn = {979-840070628-8 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {UIST - Proc. Annual ACM Symp. User Interface Softw. Technol.},
publisher = {Association for Computing Machinery, Inc},
abstract = {Mixed Reality is increasingly used in mobile settings beyond controlled home and office spaces. This mobility introduces the need for user interface layouts that adapt to varying contexts. However, existing adaptive systems are designed only for static environments. In this paper, we introduce SituationAdapt, a system that adjusts Mixed Reality UIs to real-world surroundings by considering environmental and social cues in shared settings. Our system consists of perception, reasoning, and optimization modules for UI adaptation. Our perception module identifies objects and individuals around the user, while our reasoning module leverages a Vision-and-Language Model to assess the placement of interactive UI elements. This ensures that adapted layouts do not obstruct relevant environmental cues or interfere with social norms. Our optimization module then generates Mixed Reality interfaces that account for these considerations as well as temporal constraints. For evaluation, we first validate our reasoning module's capability of assessing UI contexts in comparison to human expert users. In an online user study, we then establish SituationAdapt's capability of producing context-aware layouts for Mixed Reality, where it outperformed previous adaptive layout methods. We conclude with a series of applications and scenarios to demonstrate SituationAdapt's versatility. © 2024 ACM.},
keywords = {Adaptive user interface, Adaptive User Interfaces, Environmental cues, Language Model, Large language model, large language models, Mixed reality, Mobile settings, Office space, Optimisations, Optimization module, Situation awareness},
pubstate = {published},
tppubtype = {inproceedings}
}