AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2024
Zheng, P.; Li, C.; Fan, J.; Wang, L.
In: CIRP Annals, vol. 73, no. 1, pp. 341–344, 2024, ISSN: 00078506 (ISSN).
Abstract | Links | BibTeX | Tags: Collaboration task, Collaborative manufacturing, Deep learning, Helmet mounted displays, Human robots, Human-centric, Human-guided robot learning, Human-Robot Collaboration, Interface states, Manipulators, Manufacturing system, Manufacturing tasks, Mixed reality, Mixed reality head-mounted displays, Reinforcement Learning, Reinforcement learnings, Robot vision, Smart manufacturing
@article{zheng_vision-language-guided_2024,
title = {A vision-language-guided and deep reinforcement learning-enabled approach for unstructured human-robot collaborative manufacturing task fulfilment},
author = {P. Zheng and C. Li and J. Fan and L. Wang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85190754943&doi=10.1016%2fj.cirp.2024.04.003&partnerID=40&md5=59c453e1931e912472e76b86b77a881b},
doi = {10.1016/j.cirp.2024.04.003},
issn = {00078506 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {CIRP Annals},
volume = {73},
number = {1},
pages = {341–344},
abstract = {Human-Robot Collaboration (HRC) has emerged as a pivot in contemporary human-centric smart manufacturing scenarios. However, the fulfilment of HRC tasks in unstructured scenes brings many challenges to be overcome. In this work, mixed reality head-mounted display is modelled as an effective data collection, communication, and state representation interface/tool for HRC task settings. By integrating vision-language cues with large language model, a vision-language-guided HRC task planning approach is firstly proposed. Then, a deep reinforcement learning-enabled mobile manipulator motion control policy is generated to fulfil HRC task primitives. Its feasibility is demonstrated in several HRC unstructured manufacturing tasks with comparative results. © 2024 The Author(s)},
keywords = {Collaboration task, Collaborative manufacturing, Deep learning, Helmet mounted displays, Human robots, Human-centric, Human-guided robot learning, Human-Robot Collaboration, Interface states, Manipulators, Manufacturing system, Manufacturing tasks, Mixed reality, Mixed reality head-mounted displays, Reinforcement Learning, Reinforcement learnings, Robot vision, Smart manufacturing},
pubstate = {published},
tppubtype = {article}
}
Fujii, A.; Fukuda, K.
Initial Study on Robot Emotional Expression Using Manpu Proceedings Article
In: ACM/IEEE Int. Conf. Hum.-Rob. Interact., pp. 463–467, IEEE Computer Society, 2024, ISBN: 21672148 (ISSN); 979-840070323-2 (ISBN).
Abstract | Links | BibTeX | Tags: Comic engineering, Comic symbol, Comic symbols, Display devices, Emotional expressions, Express emotions, Generic expression, Human Robot Interaction, Human robots, Human-robot interaction, Humans-robot interactions, Machine design, Man machine systems, Manpu, Mixed reality, Symbiotics, Symbolic methods
@inproceedings{fujii_initial_2024,
title = {Initial Study on Robot Emotional Expression Using Manpu},
author = {A. Fujii and K. Fukuda},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85188120223&doi=10.1145%2f3610978.3640652&partnerID=40&md5=4277cbd98c0474e2e7ba19d352e6f46e},
doi = {10.1145/3610978.3640652},
isbn = {21672148 (ISSN); 979-840070323-2 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {ACM/IEEE Int. Conf. Hum.-Rob. Interact.},
pages = {463–467},
publisher = {IEEE Computer Society},
abstract = {In recent years, robots have started to play an active role in various places in society. The ability of robots not only to convey information but also to interact emotionally, is necessary to realize a human-robot symbiotic society. Many studies have been conducted on the emotional expression of robots. However, as robots come in a wide variety of designs, it is difficult to construct a generic expression method, and some robots are not equipped with expression devices such as faces or displays. To address these problems, this research aims to develop technology that enables robots to express emotions, using Manpu (a symbolic method used in comic books, expressing not only the emotions of humans and animals but also the states of objects) and mixed reality technology. As the first step of the research, we categorize manpu and use large language models to generate manpu expressions according to the dialogue information. © 2024 Copyright held by the owner/author(s)},
keywords = {Comic engineering, Comic symbol, Comic symbols, Display devices, Emotional expressions, Express emotions, Generic expression, Human Robot Interaction, Human robots, Human-robot interaction, Humans-robot interactions, Machine design, Man machine systems, Manpu, Mixed reality, Symbiotics, Symbolic methods},
pubstate = {published},
tppubtype = {inproceedings}
}