AHCI RESEARCH GROUP
Publications
Papers published in international journals, 
proceedings of conferences, workshops and books.
				OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Wei, Q.; Huang, J.; Gao, Y.; Dong, W.
One Model to Fit Them All: Universal IMU-based Human Activity Recognition with LLM-assisted Cross-dataset Representation Journal Article
In: Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies, vol. 9, no. 3, 2025, ISSN: 24749567 (ISSN), (Publisher: Association for Computing Machinery).
Abstract | Links | BibTeX | Tags: Broad application, Contrastive Learning, Cross-dataset, Data collection, Human activity recognition, Human activity recognition systems, Human computer interaction, Intelligent interactions, Language Model, Large datasets, Large language model, large language models, Learning systems, Neural-networks, Pattern recognition, Spatial relationships, Ubiquitous computing, Virtual Reality
@article{wei_one_2025,
title = {One Model to Fit Them All: Universal IMU-based Human Activity Recognition with LLM-assisted Cross-dataset Representation},
author = {Q. Wei and J. Huang and Y. Gao and W. Dong},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105015431117&doi=10.1145%2F3749509&partnerID=40&md5=2a6f26a05856c48ba3aaaf356b375dc0},
doi = {10.1145/3749509},
issn = {24749567 (ISSN)},
year  = {2025},
date = {2025-01-01},
journal = {Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies},
volume = {9},
number = {3},
abstract = {Human Activity Recognition (HAR) is essential for pervasive computing and intelligent interaction, with broad applications across various fields. However, there is still no one model capable of fitting various HAR datasets, severely limiting its applicability in practical scenarios. To address this, we propose oneHAR, an LLM-assisted universal IMU-based HAR system designed to achieve "one model to fit them all" — just one model that can adapt to diverse HAR datasets without any dataset-specific operation. In particular, we propose Cross-Dataset neural network (CDNet) for the "one model," which models both the temporal context and spatial relationships of IMU data to capture cross-dataset representations, encompassing differences in device, participant, data collection position, and environment, etc. Additionally, we introduce LLM-driven data synthesis, which enhances the training process by generating virtual IMU data through three carefully designed strategies. Furthermore, LLM-assisted adaptive position processing optimizes the inference process by flexibly handling a variable combination of positional inputs. Our model demonstrates strong generalization across five public IMU-based HAR datasets, outperforming the best baselines by up to 46.9% in the unseen-dataset scenario, and 6.5% in the cross-dataset scenario. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Association for Computing Machinery},
keywords = {Broad application, Contrastive Learning, Cross-dataset, Data collection, Human activity recognition, Human activity recognition systems, Human computer interaction, Intelligent interactions, Language Model, Large datasets, Large language model, large language models, Learning systems, Neural-networks, Pattern recognition, Spatial relationships, Ubiquitous computing, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
2022
Casoria, Luigi; Gallo, Luigi; Caggianese, Giuseppe
Safeguarding Face-To-Face Communication in Augmented Reality: An Adaptive Interface Proceedings Article
In: 2022 IEEE International Conference on Metrology for Extended Reality, Artificial Intelligence and Neural Engineering (MetroXRAINE), pp. 127–132, IEEE, 2022, ISBN: 978-1-66548-574-6.
Abstract | Links | BibTeX | Tags: Adaptive interface, Augmented Reality, Data visualization, Mobile computing, Patient monitoring, Ubiquitous computing, User interface
@inproceedings{casoriaSafeguardingFaceToFaceCommunication2022,
title = {Safeguarding Face-To-Face Communication in Augmented Reality: An Adaptive Interface},
author = { Luigi Casoria and Luigi Gallo and Giuseppe Caggianese},
doi = {10.1109/MetroXRAINE54828.2022.9967661},
isbn = {978-1-66548-574-6},
year  = {2022},
date = {2022-10-01},
urldate = {2023-03-15},
booktitle = {2022 IEEE International Conference on Metrology for Extended Reality, Artificial Intelligence and Neural Engineering (MetroXRAINE)},
pages = {127--132},
publisher = {IEEE},
abstract = {Recent advances in wearable augmented reality devices foster the vision of ubiquitous interaction in an immersive, digitally augmented, physical world. Assuming that such devices could one day replace smartphones for accessing information, creating interfaces safeguarding face-to-face communication is challenging. This work presents the design of an interface that adapts the information visualisation to the presence of a possible interlocutor while allowing a high level of user control. The aim was to define an interface for wearable devices adaptive to interactions coming from the surrounding environment and expressly thought for application domains in which it will be necessary to continuously monitor information. For instance, those applications that require monitoring patient data in medical applications or the progress of a production process in an industrial environment. We focused on human-to-human communication, minimising the use of mid-air interaction to hide the synthetic information that might interrupt the conversation flow. Two different visualisation modalities allowing the coexistence of real and virtual worlds are proposed and evaluated in a preliminary study with six participants who showed a generalised appreciation for the solution which maximises the display of information requiring less user intervention.},
keywords = {Adaptive interface, Augmented Reality, Data visualization, Mobile computing, Patient monitoring, Ubiquitous computing, User interface},
pubstate = {published},
tppubtype = {inproceedings}
}
Casoria, Luigi; Gallo, Luigi; Caggianese, Giuseppe
Safeguarding Face-To-Face Communication in Augmented Reality: An Adaptive Interface Proceedings Article
In: 2022 IEEE International Conference on Metrology for Extended Reality, Artificial Intelligence and Neural Engineering (MetroXRAINE), pp. 127–132, IEEE, 2022, ISBN: 978-1-66548-574-6, (event-place: Rome, Italy).
Abstract | Links | BibTeX | Tags: Adaptive interface, Augmented Reality, Data visualization, Mobile computing, Patient monitoring, Ubiquitous computing, User interface
@inproceedings{casoria_safeguarding_2022,
title = {Safeguarding Face-To-Face Communication in Augmented Reality: An Adaptive Interface},
author = {Luigi Casoria and Luigi Gallo and Giuseppe Caggianese},
url = {https://ieeexplore.ieee.org/document/9967661/},
doi = {10.1109/MetroXRAINE54828.2022.9967661},
isbn = {978-1-66548-574-6},
year  = {2022},
date = {2022-10-01},
urldate = {2023-03-15},
booktitle = {2022 IEEE International Conference on Metrology for Extended Reality, Artificial Intelligence and Neural Engineering (MetroXRAINE)},
pages = {127–132},
publisher = {IEEE},
abstract = {Recent advances in wearable augmented reality devices foster the vision of ubiquitous interaction in an immersive, digitally augmented, physical world. Assuming that such devices could one day replace smartphones for accessing information, creating interfaces safeguarding face-to-face communication is challenging. This work presents the design of an interface that adapts the information visualisation to the presence of a possible interlocutor while allowing a high level of user control. The aim was to define an interface for wearable devices adaptive to interactions coming from the surrounding environment and expressly thought for application domains in which it will be necessary to continuously monitor information. For instance, those applications that require monitoring patient data in medical applications or the progress of a production process in an industrial environment. We focused on human-to-human communication, minimising the use of mid-air interaction to hide the synthetic information that might interrupt the conversation flow. Two different visualisation modalities allowing the coexistence of real and virtual worlds are proposed and evaluated in a preliminary study with six participants who showed a generalised appreciation for the solution which maximises the display of information requiring less user intervention.},
note = {event-place: Rome, Italy},
keywords = {Adaptive interface, Augmented Reality, Data visualization, Mobile computing, Patient monitoring, Ubiquitous computing, User interface},
pubstate = {published},
tppubtype = {inproceedings}
}