AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2024
Park, G. W.; Panda, P.; Tankelevitch, L.; Rintel, S.
CoExplorer: Generative AI Powered 2D and 3D Adaptive Interfaces to Support Intentionality in Video Meetings Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2024, ISBN: 979-840070331-7 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive interface, Adaptive user interface, design, Effectiveness, Facilitation, Generative AI, Goal, Goals, Intent recognition, Meeting, meetings, planning, Probes, Speech recognition, User interfaces, Video conferencing, Videoconferencing, Virtual Reality, Windowing
@inproceedings{park_coexplorer_2024,
title = {CoExplorer: Generative AI Powered 2D and 3D Adaptive Interfaces to Support Intentionality in Video Meetings},
author = {G. W. Park and P. Panda and L. Tankelevitch and S. Rintel},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85194194151&doi=10.1145%2f3613905.3650797&partnerID=40&md5=19d083f7aee201cc73fe0c2386f04bb6},
doi = {10.1145/3613905.3650797},
isbn = {979-840070331-7 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Current online meeting technologies lack holistic support for reducing the effort of planning and running meetings. We present CoExplorer2D and CoExplorerVR, generative AI (GenAI)-driven technology probes for exploring the significant transformative potential of GenAI to augment these aspects of meetings. In each system, before the meeting, these systems generate tools that allow synthesis and ranking of attendees' key issues for discussion, and likely phases that a meeting would require to cover these issues. During the meeting, these systems use speech recognition to generate 2D or VR window layouts with appropriate applications and files for each phase, and recognize the attendees' progress through the meeting's phases. We argue that these probes show the potential of GenAI to contribute to reducing the effort required for planning and running meetings, providing participants with a more engaging and effective meeting experiences. © 2024 Association for Computing Machinery. All rights reserved.},
keywords = {Adaptive interface, Adaptive user interface, design, Effectiveness, Facilitation, Generative AI, Goal, Goals, Intent recognition, Meeting, meetings, planning, Probes, Speech recognition, User interfaces, Video conferencing, Videoconferencing, Virtual Reality, Windowing},
pubstate = {published},
tppubtype = {inproceedings}
}
2022
Casoria, Luigi; Gallo, Luigi; Caggianese, Giuseppe
Safeguarding Face-To-Face Communication in Augmented Reality: An Adaptive Interface Proceedings Article
In: 2022 IEEE International Conference on Metrology for Extended Reality, Artificial Intelligence and Neural Engineering (MetroXRAINE), pp. 127–132, IEEE, 2022, ISBN: 978-1-66548-574-6.
Abstract | Links | BibTeX | Tags: Adaptive interface, Augmented Reality, Data visualization, Mobile computing, Patient monitoring, Ubiquitous computing, User interface
@inproceedings{casoriaSafeguardingFaceToFaceCommunication2022,
title = {Safeguarding Face-To-Face Communication in Augmented Reality: An Adaptive Interface},
author = { Luigi Casoria and Luigi Gallo and Giuseppe Caggianese},
doi = {10.1109/MetroXRAINE54828.2022.9967661},
isbn = {978-1-66548-574-6},
year = {2022},
date = {2022-10-01},
urldate = {2023-03-15},
booktitle = {2022 IEEE International Conference on Metrology for Extended Reality, Artificial Intelligence and Neural Engineering (MetroXRAINE)},
pages = {127--132},
publisher = {IEEE},
abstract = {Recent advances in wearable augmented reality devices foster the vision of ubiquitous interaction in an immersive, digitally augmented, physical world. Assuming that such devices could one day replace smartphones for accessing information, creating interfaces safeguarding face-to-face communication is challenging. This work presents the design of an interface that adapts the information visualisation to the presence of a possible interlocutor while allowing a high level of user control. The aim was to define an interface for wearable devices adaptive to interactions coming from the surrounding environment and expressly thought for application domains in which it will be necessary to continuously monitor information. For instance, those applications that require monitoring patient data in medical applications or the progress of a production process in an industrial environment. We focused on human-to-human communication, minimising the use of mid-air interaction to hide the synthetic information that might interrupt the conversation flow. Two different visualisation modalities allowing the coexistence of real and virtual worlds are proposed and evaluated in a preliminary study with six participants who showed a generalised appreciation for the solution which maximises the display of information requiring less user intervention.},
keywords = {Adaptive interface, Augmented Reality, Data visualization, Mobile computing, Patient monitoring, Ubiquitous computing, User interface},
pubstate = {published},
tppubtype = {inproceedings}
}
Casoria, Luigi; Gallo, Luigi; Caggianese, Giuseppe
Safeguarding Face-To-Face Communication in Augmented Reality: An Adaptive Interface Proceedings Article
In: 2022 IEEE International Conference on Metrology for Extended Reality, Artificial Intelligence and Neural Engineering (MetroXRAINE), pp. 127–132, IEEE, 2022, ISBN: 978-1-66548-574-6, (event-place: Rome, Italy).
Abstract | Links | BibTeX | Tags: Adaptive interface, Augmented Reality, Data visualization, Mobile computing, Patient monitoring, Ubiquitous computing, User interface
@inproceedings{casoria_safeguarding_2022,
title = {Safeguarding Face-To-Face Communication in Augmented Reality: An Adaptive Interface},
author = {Luigi Casoria and Luigi Gallo and Giuseppe Caggianese},
url = {https://ieeexplore.ieee.org/document/9967661/},
doi = {10.1109/MetroXRAINE54828.2022.9967661},
isbn = {978-1-66548-574-6},
year = {2022},
date = {2022-10-01},
urldate = {2023-03-15},
booktitle = {2022 IEEE International Conference on Metrology for Extended Reality, Artificial Intelligence and Neural Engineering (MetroXRAINE)},
pages = {127–132},
publisher = {IEEE},
abstract = {Recent advances in wearable augmented reality devices foster the vision of ubiquitous interaction in an immersive, digitally augmented, physical world. Assuming that such devices could one day replace smartphones for accessing information, creating interfaces safeguarding face-to-face communication is challenging. This work presents the design of an interface that adapts the information visualisation to the presence of a possible interlocutor while allowing a high level of user control. The aim was to define an interface for wearable devices adaptive to interactions coming from the surrounding environment and expressly thought for application domains in which it will be necessary to continuously monitor information. For instance, those applications that require monitoring patient data in medical applications or the progress of a production process in an industrial environment. We focused on human-to-human communication, minimising the use of mid-air interaction to hide the synthetic information that might interrupt the conversation flow. Two different visualisation modalities allowing the coexistence of real and virtual worlds are proposed and evaluated in a preliminary study with six participants who showed a generalised appreciation for the solution which maximises the display of information requiring less user intervention.},
note = {event-place: Rome, Italy},
keywords = {Adaptive interface, Augmented Reality, Data visualization, Mobile computing, Patient monitoring, Ubiquitous computing, User interface},
pubstate = {published},
tppubtype = {inproceedings}
}