AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2024
Peretti, A.; Mazzola, M.; Capra, L.; Piazzola, M.; Carlevaro, C.
Seamless Human-Robot Interaction Through a Distributed Zero-Trust Architecture and Advanced User Interfaces Proceedings Article
In: C., Secchi; L., Marconi (Ed.): Springer. Proc. Adv. Robot., pp. 92–95, Springer Nature, 2024, ISBN: 25111256 (ISSN); 978-303176427-1 (ISBN).
Abstract | Links | BibTeX | Tags: Advanced user interfaces, Digital Twins, HRC, Human Robot Interaction, Human-Robot Collaboration, Humans-robot interactions, Industrial robots, Industry 4.0, Intelligent robots, Interaction platform, Language Model, Large language model, LLM, Problem oriented languages, Robot Operating System, Robot operating system 2, Robot-robot collaboration, ROS2, RRC, Wages, XR, ZTA
@inproceedings{peretti_seamless_2024,
title = {Seamless Human-Robot Interaction Through a Distributed Zero-Trust Architecture and Advanced User Interfaces},
author = {A. Peretti and M. Mazzola and L. Capra and M. Piazzola and C. Carlevaro},
editor = {Secchi C. and Marconi L.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85216090556&doi=10.1007%2f978-3-031-76428-8_18&partnerID=40&md5=9f58281f8a8c034fb45fed610ce64bd2},
doi = {10.1007/978-3-031-76428-8_18},
isbn = {25111256 (ISSN); 978-303176427-1 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Springer. Proc. Adv. Robot.},
volume = {33 SPAR},
pages = {92–95},
publisher = {Springer Nature},
abstract = {The proposed work presents a novel interaction platform designed to address the shortage of skilled workers in the labor market, facilitating the seamless integration of robotics and advanced user interfaces such as eXtended Reality (XR) to optimize Human-Robot Collaboration (HRC) as well as Robot-Robot Collaboration (RRC) in an Industry 4.0 scenario. One of the most challenging situations is to optimize and simplify the collaborations of humans and robots to decrease or avoid system slowdowns, blocks, or dangerous situations for both users and robots. The advent of the LLMs (Large Language Model) have been breakthrough the whole IT environment because they perform well in different scenario from human text generation to autonomous systems management. Due to their malleability, LLMs have a primary role for Human-Robot collaboration processes. For this reason, the platform comprises three key technical components: a distributed zero-trust architecture, a virtual avatar, and digital twins of robots powered by the Robot Operating System 2 (ROS2) platform. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2024.},
keywords = {Advanced user interfaces, Digital Twins, HRC, Human Robot Interaction, Human-Robot Collaboration, Humans-robot interactions, Industrial robots, Industry 4.0, Intelligent robots, Interaction platform, Language Model, Large language model, LLM, Problem oriented languages, Robot Operating System, Robot operating system 2, Robot-robot collaboration, ROS2, RRC, Wages, XR, ZTA},
pubstate = {published},
tppubtype = {inproceedings}
}
Sonawani, S.; Weigend, F.; Amor, H. B.
SiSCo: Signal Synthesis for Effective Human-Robot Communication Via Large Language Models Proceedings Article
In: IEEE Int Conf Intell Rob Syst, pp. 7107–7114, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 21530858 (ISSN); 979-835037770-5 (ISBN).
Abstract | Links | BibTeX | Tags: Communications channels, Extensive resources, Human engineering, Human Robot Interaction, Human-Robot Collaboration, Human-robot communication, Humans-robot interactions, Industrial robots, Intelligent robots, Language Model, Man machine systems, Microrobots, Robust communication, Signal synthesis, Specialized knowledge, Visual communication, Visual cues, Visual languages
@inproceedings{sonawani_sisco_2024,
title = {SiSCo: Signal Synthesis for Effective Human-Robot Communication Via Large Language Models},
author = {S. Sonawani and F. Weigend and H. B. Amor},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85216466596&doi=10.1109%2fIROS58592.2024.10802561&partnerID=40&md5=ccd14b4f0b5d527b179394dffd4e2c73},
doi = {10.1109/IROS58592.2024.10802561},
isbn = {21530858 (ISSN); 979-835037770-5 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE Int Conf Intell Rob Syst},
pages = {7107–7114},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Effective human-robot collaboration hinges on robust communication channels, with visual signaling playing a pivotal role due to its intuitive appeal. Yet, the creation of visually intuitive cues often demands extensive resources and specialized knowledge. The emergence of Large Language Models (LLMs) offers promising avenues for enhancing human-robot interactions and revolutionizing the way we generate context-aware visual cues. To this end, we introduce SiSCo-a novel framework that combines the computational power of LLMs with mixed-reality technologies to streamline the creation of visual cues for human-robot collaboration. Our results show that SiSCo improves the efficiency of communication in human-robot teaming tasks, reducing task completion time by approximately 73% and increasing task success rates by 18% compared to baseline natural language signals. Additionally, SiSCo reduces cognitive load for participants by 46%, as measured by the NASA-TLX subscale, and receives above-average user ratings for on-the-fly signals generated for unseen objects. To encourage further development and broader community engagement, we provide full access to SiSCo's implementation and related materials on our GitHub repository.1 © 2024 IEEE.},
keywords = {Communications channels, Extensive resources, Human engineering, Human Robot Interaction, Human-Robot Collaboration, Human-robot communication, Humans-robot interactions, Industrial robots, Intelligent robots, Language Model, Man machine systems, Microrobots, Robust communication, Signal synthesis, Specialized knowledge, Visual communication, Visual cues, Visual languages},
pubstate = {published},
tppubtype = {inproceedings}
}
Fujii, A.; Fukuda, K.
Initial Study on Robot Emotional Expression Using Manpu Proceedings Article
In: ACM/IEEE Int. Conf. Hum.-Rob. Interact., pp. 463–467, IEEE Computer Society, 2024, ISBN: 21672148 (ISSN); 979-840070323-2 (ISBN).
Abstract | Links | BibTeX | Tags: Comic engineering, Comic symbol, Comic symbols, Display devices, Emotional expressions, Express emotions, Generic expression, Human Robot Interaction, Human robots, Human-robot interaction, Humans-robot interactions, Machine design, Man machine systems, Manpu, Mixed reality, Symbiotics, Symbolic methods
@inproceedings{fujii_initial_2024,
title = {Initial Study on Robot Emotional Expression Using Manpu},
author = {A. Fujii and K. Fukuda},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85188120223&doi=10.1145%2f3610978.3640652&partnerID=40&md5=4277cbd98c0474e2e7ba19d352e6f46e},
doi = {10.1145/3610978.3640652},
isbn = {21672148 (ISSN); 979-840070323-2 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {ACM/IEEE Int. Conf. Hum.-Rob. Interact.},
pages = {463–467},
publisher = {IEEE Computer Society},
abstract = {In recent years, robots have started to play an active role in various places in society. The ability of robots not only to convey information but also to interact emotionally, is necessary to realize a human-robot symbiotic society. Many studies have been conducted on the emotional expression of robots. However, as robots come in a wide variety of designs, it is difficult to construct a generic expression method, and some robots are not equipped with expression devices such as faces or displays. To address these problems, this research aims to develop technology that enables robots to express emotions, using Manpu (a symbolic method used in comic books, expressing not only the emotions of humans and animals but also the states of objects) and mixed reality technology. As the first step of the research, we categorize manpu and use large language models to generate manpu expressions according to the dialogue information. © 2024 Copyright held by the owner/author(s)},
keywords = {Comic engineering, Comic symbol, Comic symbols, Display devices, Emotional expressions, Express emotions, Generic expression, Human Robot Interaction, Human robots, Human-robot interaction, Humans-robot interactions, Machine design, Man machine systems, Manpu, Mixed reality, Symbiotics, Symbolic methods},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Bottega, J. A.; Kich, V. A.; Jesus, J. C.; Steinmetz, R.; Kolling, A. H.; Grando, R. B.; Guerra, R. S.; Gamarra, D. F. T.
Jubileo: An Immersive Simulation Framework for Social Robot Design Journal Article
In: Journal of Intelligent and Robotic Systems: Theory and Applications, vol. 109, no. 4, 2023, ISSN: 09210296 (ISSN).
Abstract | Links | BibTeX | Tags: Anthropomorphic Robots, Computational Linguistics, Cost effectiveness, E-Learning, English language learning, English languages, Human Robot Interaction, Human-robot interaction, Humanoid robot, Humans-robot interactions, Immersive, Language learning, Language Model, Large language model, large language models, Learning game, Machine design, Man machine systems, Open systems, Robot Operating System, Simulation framework, Simulation platform, Virtual Reality
@article{bottega_jubileo_2023,
title = {Jubileo: An Immersive Simulation Framework for Social Robot Design},
author = {J. A. Bottega and V. A. Kich and J. C. Jesus and R. Steinmetz and A. H. Kolling and R. B. Grando and R. S. Guerra and D. F. T. Gamarra},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85178895874&doi=10.1007%2fs10846-023-01991-3&partnerID=40&md5=6392af1e9a500ef51c3e215bd9709ce5},
doi = {10.1007/s10846-023-01991-3},
issn = {09210296 (ISSN)},
year = {2023},
date = {2023-01-01},
journal = {Journal of Intelligent and Robotic Systems: Theory and Applications},
volume = {109},
number = {4},
abstract = {This paper introduces Jubileo, an open-source simulated humanoid robot as a framework for the development of human-robot interaction applications. By leveraging the power of the Robot Operating System (ROS) and Unity in a virtual reality environment, this simulation establishes a strong connection to real robotics, faithfully replicating the robot’s physical components down to its motors and enabling communication with servo-actuators to control both the animatronic face and the joints of a real humanoid robot. To validate the capabilities of the framework, we propose English teaching games that integrate Virtual Reality (VR), game-based Human-Robot Interaction (HRI), and advanced large language models such as Generative Pre-trained Transformer (GPT). These games aim to foster linguistic competence within dynamic and interactive virtual environments. The incorporation of large language models bolsters the robot’s capability to generate human-like responses, thus facilitating a more realistic conversational experience. Moreover, the simulation framework reduces real-world testing risks and offers a cost-effective, efficient, and scalable platform for developing new HRI applications. The paper underscores the transformative potential of converging VR, large language models, and HRI, particularly in educational applications. © 2023, The Author(s), under exclusive licence to Springer Nature B.V.},
keywords = {Anthropomorphic Robots, Computational Linguistics, Cost effectiveness, E-Learning, English language learning, English languages, Human Robot Interaction, Human-robot interaction, Humanoid robot, Humans-robot interactions, Immersive, Language learning, Language Model, Large language model, large language models, Learning game, Machine design, Man machine systems, Open systems, Robot Operating System, Simulation framework, Simulation platform, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}