AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2024
Gkournelos, C.; Konstantinou, C.; Angelakis, P.; Michalos, G.; Makris, S.
Enabling Seamless Human-Robot Collaboration in Manufacturing Using LLMs Proceedings Article
In: A., Wagner; K., Alexopoulos; S., Makris (Ed.): Lect. Notes Mech. Eng., pp. 81–89, Springer Science and Business Media Deutschland GmbH, 2024, ISBN: 21954356 (ISSN); 978-303157495-5 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Augmented Reality, Collaboration capabilities, Computational Linguistics, Human operator, Human-Robot Collaboration, Industrial research, Industrial robots, Intelligent robots, Language Model, Large language model, large language models, Manufacturing environments, Programming robots, Reality interface, Research papers, Robot programming, User friendly
@inproceedings{gkournelos_enabling_2024,
title = {Enabling Seamless Human-Robot Collaboration in Manufacturing Using LLMs},
author = {C. Gkournelos and C. Konstantinou and P. Angelakis and G. Michalos and S. Makris},
editor = {Wagner A. and Alexopoulos K. and Makris S.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199196139&doi=10.1007%2f978-3-031-57496-2_9&partnerID=40&md5=cd0b33b3c9e9f9e53f1e99882945e134},
doi = {10.1007/978-3-031-57496-2_9},
isbn = {21954356 (ISSN); 978-303157495-5 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Lect. Notes Mech. Eng.},
pages = {81–89},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {In the era of Industry 5.0, there is a growing interest in harnessing the potential of human-robot collaboration (HRC) in manufacturing environments. This research paper focuses on the integration of Large Language Models (LLMs) to augment HRC capabilities, particularly in addressing configuration issues when programming robots to collaborate with human operators. By harnessing the capabilities of LLMs in combination with a user-friendly augmented reality (AR) interface, the proposed approach empowers human operators to seamlessly collaborate with robots, facilitating smooth and efficient assembly processes. This research introduces the CollabAI an AI assistant for task management and natural communication based on a fine-tuned GPT model focusing on collaborative manufacturing. Real-world experiments conducted in two manufacturing settings coming from the automotive and machinery industries. The findings have implications for various industries seeking to increase productivity and foster a new era of efficient and effective collaboration in manufacturing environments. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2024.},
keywords = {Artificial intelligence, Augmented Reality, Collaboration capabilities, Computational Linguistics, Human operator, Human-Robot Collaboration, Industrial research, Industrial robots, Intelligent robots, Language Model, Large language model, large language models, Manufacturing environments, Programming robots, Reality interface, Research papers, Robot programming, User friendly},
pubstate = {published},
tppubtype = {inproceedings}
}
Peretti, A.; Mazzola, M.; Capra, L.; Piazzola, M.; Carlevaro, C.
Seamless Human-Robot Interaction Through a Distributed Zero-Trust Architecture and Advanced User Interfaces Proceedings Article
In: C., Secchi; L., Marconi (Ed.): Springer. Proc. Adv. Robot., pp. 92–95, Springer Nature, 2024, ISBN: 25111256 (ISSN); 978-303176427-1 (ISBN).
Abstract | Links | BibTeX | Tags: Advanced user interfaces, Digital Twins, HRC, Human Robot Interaction, Human-Robot Collaboration, Humans-robot interactions, Industrial robots, Industry 4.0, Intelligent robots, Interaction platform, Language Model, Large language model, LLM, Problem oriented languages, Robot Operating System, Robot operating system 2, Robot-robot collaboration, ROS2, RRC, Wages, XR, ZTA
@inproceedings{peretti_seamless_2024,
title = {Seamless Human-Robot Interaction Through a Distributed Zero-Trust Architecture and Advanced User Interfaces},
author = {A. Peretti and M. Mazzola and L. Capra and M. Piazzola and C. Carlevaro},
editor = {Secchi C. and Marconi L.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85216090556&doi=10.1007%2f978-3-031-76428-8_18&partnerID=40&md5=9f58281f8a8c034fb45fed610ce64bd2},
doi = {10.1007/978-3-031-76428-8_18},
isbn = {25111256 (ISSN); 978-303176427-1 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Springer. Proc. Adv. Robot.},
volume = {33 SPAR},
pages = {92–95},
publisher = {Springer Nature},
abstract = {The proposed work presents a novel interaction platform designed to address the shortage of skilled workers in the labor market, facilitating the seamless integration of robotics and advanced user interfaces such as eXtended Reality (XR) to optimize Human-Robot Collaboration (HRC) as well as Robot-Robot Collaboration (RRC) in an Industry 4.0 scenario. One of the most challenging situations is to optimize and simplify the collaborations of humans and robots to decrease or avoid system slowdowns, blocks, or dangerous situations for both users and robots. The advent of the LLMs (Large Language Model) have been breakthrough the whole IT environment because they perform well in different scenario from human text generation to autonomous systems management. Due to their malleability, LLMs have a primary role for Human-Robot collaboration processes. For this reason, the platform comprises three key technical components: a distributed zero-trust architecture, a virtual avatar, and digital twins of robots powered by the Robot Operating System 2 (ROS2) platform. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2024.},
keywords = {Advanced user interfaces, Digital Twins, HRC, Human Robot Interaction, Human-Robot Collaboration, Humans-robot interactions, Industrial robots, Industry 4.0, Intelligent robots, Interaction platform, Language Model, Large language model, LLM, Problem oriented languages, Robot Operating System, Robot operating system 2, Robot-robot collaboration, ROS2, RRC, Wages, XR, ZTA},
pubstate = {published},
tppubtype = {inproceedings}
}
Zheng, P.; Li, C.; Fan, J.; Wang, L.
In: CIRP Annals, vol. 73, no. 1, pp. 341–344, 2024, ISSN: 00078506 (ISSN).
Abstract | Links | BibTeX | Tags: Collaboration task, Collaborative manufacturing, Deep learning, Helmet mounted displays, Human robots, Human-centric, Human-guided robot learning, Human-Robot Collaboration, Interface states, Manipulators, Manufacturing system, Manufacturing tasks, Mixed reality, Mixed reality head-mounted displays, Reinforcement Learning, Reinforcement learnings, Robot vision, Smart manufacturing
@article{zheng_vision-language-guided_2024,
title = {A vision-language-guided and deep reinforcement learning-enabled approach for unstructured human-robot collaborative manufacturing task fulfilment},
author = {P. Zheng and C. Li and J. Fan and L. Wang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85190754943&doi=10.1016%2fj.cirp.2024.04.003&partnerID=40&md5=59c453e1931e912472e76b86b77a881b},
doi = {10.1016/j.cirp.2024.04.003},
issn = {00078506 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {CIRP Annals},
volume = {73},
number = {1},
pages = {341–344},
abstract = {Human-Robot Collaboration (HRC) has emerged as a pivot in contemporary human-centric smart manufacturing scenarios. However, the fulfilment of HRC tasks in unstructured scenes brings many challenges to be overcome. In this work, mixed reality head-mounted display is modelled as an effective data collection, communication, and state representation interface/tool for HRC task settings. By integrating vision-language cues with large language model, a vision-language-guided HRC task planning approach is firstly proposed. Then, a deep reinforcement learning-enabled mobile manipulator motion control policy is generated to fulfil HRC task primitives. Its feasibility is demonstrated in several HRC unstructured manufacturing tasks with comparative results. © 2024 The Author(s)},
keywords = {Collaboration task, Collaborative manufacturing, Deep learning, Helmet mounted displays, Human robots, Human-centric, Human-guided robot learning, Human-Robot Collaboration, Interface states, Manipulators, Manufacturing system, Manufacturing tasks, Mixed reality, Mixed reality head-mounted displays, Reinforcement Learning, Reinforcement learnings, Robot vision, Smart manufacturing},
pubstate = {published},
tppubtype = {article}
}
Sonawani, S.; Weigend, F.; Amor, H. B.
SiSCo: Signal Synthesis for Effective Human-Robot Communication Via Large Language Models Proceedings Article
In: IEEE Int Conf Intell Rob Syst, pp. 7107–7114, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 21530858 (ISSN); 979-835037770-5 (ISBN).
Abstract | Links | BibTeX | Tags: Communications channels, Extensive resources, Human engineering, Human Robot Interaction, Human-Robot Collaboration, Human-robot communication, Humans-robot interactions, Industrial robots, Intelligent robots, Language Model, Man machine systems, Microrobots, Robust communication, Signal synthesis, Specialized knowledge, Visual communication, Visual cues, Visual languages
@inproceedings{sonawani_sisco_2024,
title = {SiSCo: Signal Synthesis for Effective Human-Robot Communication Via Large Language Models},
author = {S. Sonawani and F. Weigend and H. B. Amor},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85216466596&doi=10.1109%2fIROS58592.2024.10802561&partnerID=40&md5=ccd14b4f0b5d527b179394dffd4e2c73},
doi = {10.1109/IROS58592.2024.10802561},
isbn = {21530858 (ISSN); 979-835037770-5 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE Int Conf Intell Rob Syst},
pages = {7107–7114},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Effective human-robot collaboration hinges on robust communication channels, with visual signaling playing a pivotal role due to its intuitive appeal. Yet, the creation of visually intuitive cues often demands extensive resources and specialized knowledge. The emergence of Large Language Models (LLMs) offers promising avenues for enhancing human-robot interactions and revolutionizing the way we generate context-aware visual cues. To this end, we introduce SiSCo-a novel framework that combines the computational power of LLMs with mixed-reality technologies to streamline the creation of visual cues for human-robot collaboration. Our results show that SiSCo improves the efficiency of communication in human-robot teaming tasks, reducing task completion time by approximately 73% and increasing task success rates by 18% compared to baseline natural language signals. Additionally, SiSCo reduces cognitive load for participants by 46%, as measured by the NASA-TLX subscale, and receives above-average user ratings for on-the-fly signals generated for unseen objects. To encourage further development and broader community engagement, we provide full access to SiSCo's implementation and related materials on our GitHub repository.1 © 2024 IEEE.},
keywords = {Communications channels, Extensive resources, Human engineering, Human Robot Interaction, Human-Robot Collaboration, Human-robot communication, Humans-robot interactions, Industrial robots, Intelligent robots, Language Model, Man machine systems, Microrobots, Robust communication, Signal synthesis, Specialized knowledge, Visual communication, Visual cues, Visual languages},
pubstate = {published},
tppubtype = {inproceedings}
}