AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2024
Guo, Y.; Hou, K.; Yan, Z.; Chen, H.; Xing, G.; Jiang, X.
Sensor2Scene: Foundation Model-Driven Interactive Realities Proceedings Article
In: Proc. - IEEE Int. Workshop Found. Model. Cyber-Phys. Syst. Internet Things, FMSys, pp. 13–19, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835036345-6 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, Augmented Reality, Computational Linguistics, Data integration, Data visualization, Foundation models, Generative model, Language Model, Large language model, large language models, Model-driven, Sensor Data Integration, Sensors data, Text-to-3d generative model, Text-to-3D Generative Models, Three dimensional computer graphics, User interaction, User Interaction in AR, User interaction in augmented reality, User interfaces, Virtual Reality, Visualization
@inproceedings{guo_sensor2scene_2024,
title = {Sensor2Scene: Foundation Model-Driven Interactive Realities},
author = {Y. Guo and K. Hou and Z. Yan and H. Chen and G. Xing and X. Jiang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199893762&doi=10.1109%2fFMSys62467.2024.00007&partnerID=40&md5=c3bf1739e8c1dc6227d61609ddc66910},
doi = {10.1109/FMSys62467.2024.00007},
isbn = {979-835036345-6 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Workshop Found. Model. Cyber-Phys. Syst. Internet Things, FMSys},
pages = {13–19},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Augmented Reality (AR) is acclaimed for its potential to bridge the physical and virtual worlds. Yet, current integration between these realms often lacks a deep under-standing of the physical environment and the subsequent scene generation that reflects this understanding. This research introduces Sensor2Scene, a novel system framework designed to enhance user interactions with sensor data through AR. At its core, an AI agent leverages large language models (LLMs) to decode subtle information from sensor data, constructing detailed scene descriptions for visualization. To enable these scenes to be rendered in AR, we decompose the scene creation process into tasks of text-to-3D model generation and spatial composition, allowing new AR scenes to be sketched from the descriptions. We evaluated our framework using an LLM evaluator based on five metrics on various datasets to examine the correlation between sensor readings and corresponding visualizations, and demonstrated the system's effectiveness with scenes generated from end-to-end. The results highlight the potential of LLMs to understand IoT sensor data. Furthermore, generative models can aid in transforming these interpretations into visual formats, thereby enhancing user interaction. This work not only displays the capabilities of Sensor2Scene but also lays a foundation for advancing AR with the goal of creating more immersive and contextually rich experiences. © 2024 IEEE.},
keywords = {3D modeling, Augmented Reality, Computational Linguistics, Data integration, Data visualization, Foundation models, Generative model, Language Model, Large language model, large language models, Model-driven, Sensor Data Integration, Sensors data, Text-to-3d generative model, Text-to-3D Generative Models, Three dimensional computer graphics, User interaction, User Interaction in AR, User interaction in augmented reality, User interfaces, Virtual Reality, Visualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Lombardo, A.; Morabito, G.; Quattropani, S.; Ricci, C.; Siino, M.; Tinnirello, I.
AI-GeneSI: Exploiting generative AI for autonomous generation of the southbound interface in the IoT Proceedings Article
In: IEEE World Forum Internet Things, WF-IoT, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835037301-1 (ISBN).
Abstract | Links | BibTeX | Tags: 'current, Autonomous generation, Codes (symbols), Communications protocols, Complex task, Data representations, Digital world, Interface functions, Language Model, Reusability, Sensor nodes, Sensors data, Virtual objects, Virtual Reality
@inproceedings{lombardo_ai-genesi_2024,
title = {AI-GeneSI: Exploiting generative AI for autonomous generation of the southbound interface in the IoT},
author = {A. Lombardo and G. Morabito and S. Quattropani and C. Ricci and M. Siino and I. Tinnirello},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85216509327&doi=10.1109%2fWF-IoT62078.2024.10811300&partnerID=40&md5=cb20d5004d1f99b73b536dd0738cabd5},
doi = {10.1109/WF-IoT62078.2024.10811300},
isbn = {979-835037301-1 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE World Forum Internet Things, WF-IoT},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual objects, which are representations in the digital world of physical entities, uses the data collected by one or several sensor nodes to operate. To overcome the diversity and heterogeneity of protocols implemented by different sensor nodes and the way in which sensor data is represented, it is convenient to exploit appropriate components referred to as 'southbound interfaces' in this paper. The objective of the southbound interface is to convert the communication protocols implemented by sensor nodes and virtual objects and to harmonize data representations. The implementation of the southbound interfaces is not a complex task, however it is extremely specific of the current setting, which turns in low reusability of the code, and is time-consuming. In this paper, a methodology named AI-GeneSI is proposed to exploit Large Language Models (LLM)s to generate the code to communicate with the southbound interface. Such code is utilized to create and deploy a microservice which implements the southbound interface functions. A prototype of the proposed methodology has been implemented to demonstrate the feasibility of the proposed approach. © 2024 IEEE.},
keywords = {'current, Autonomous generation, Codes (symbols), Communications protocols, Complex task, Data representations, Digital world, Interface functions, Language Model, Reusability, Sensor nodes, Sensors data, Virtual objects, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}