AHCI RESEARCH GROUP
Publications
Papers published in international journals, 
proceedings of conferences, workshops and books.
				OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Dong, W.; Li, S.; Zheng, P.; Liu, L.; Chen, S.
A 3DGS and LLM-based physical-to-virtual approach for human-robot interactive manufacturing Journal Article
In: Manufacturing Letters, vol. 44, pp. 121–128, 2025, ISSN: 22138463 (ISSN), (Publisher: Elsevier Ltd).
Abstract | Links | BibTeX | Tags: 3D modeling, Gaussian distribution, Gaussians, High level languages, Human computer interaction, Human Robot Interaction, Human robots, Humans-robot interactions, Industrial robots, Language Model, Large language model, Man machine systems, Metaverses, Model-based OPC, Natural language processing systems, Physical-to-virtual, Robot programming, Robotic assembly, Splatting, Three dimensional computer graphics, Three-dimensional gaussian splatting
@article{dong_3dgs_2025,
title = {A 3DGS and LLM-based physical-to-virtual approach for human-robot interactive manufacturing},
author = {W. Dong and S. Li and P. Zheng and L. Liu and S. Chen},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105014947667&doi=10.1016%2Fj.mfglet.2025.06.016&partnerID=40&md5=8fd8b07c1f2c71e46b396d2e244bf701},
doi = {10.1016/j.mfglet.2025.06.016},
issn = {22138463 (ISSN)},
year  = {2025},
date = {2025-01-01},
journal = {Manufacturing Letters},
volume = {44},
pages = {121–128},
abstract = {With the exploration of digital transformation in the industry, the introduction of the industrial metaverse is bringing unprecedented opportunities and challenges to the manufacturing industry. In the industrial metaverse, humans can interact safely and naturally with robots in high-fidelity digital environments, enabling non-technical operators to quickly validate industrial scenarios and help optimize decision-making and production processes. However, the complexity of Three-Dimensional (3D) modeling poses a challenge to achieving this goal. Additionally, programming-based Human Robot Interaction (HRI) also presents obstacles, as operators need significant time to learn how to control robots. Therefore, this paper proposes a 3D Gaussian Splatting (3DGS) and Large Language Model (LLM)-based physical-to-virtual approach for human-robot interactive manufacturing, which further facilitates digital interaction for non-technical operators in manufacturing environments. Specifically, 3DGS is first used for rapid visualization and reconstruction of the overall scene, achieving new perspective rendering and providing a gaussian ellipsoid representation. Then mesh extraction algorithms based on gaussian representation are used to build a physical-to-virtual transfer framework. Finally, LLM is utilized for understanding natural language commands and generating virtual robot Python programming to complete robot assembly tasks. This framework is implemented in the Isaac Sim simulator, and the case study shows that the proposed framework can quickly and accurately complete physical-to-virtual transfer and accomplish robot assembly manufacturing tasks in the simulator with low code. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Elsevier Ltd},
keywords = {3D modeling, Gaussian distribution, Gaussians, High level languages, Human computer interaction, Human Robot Interaction, Human robots, Humans-robot interactions, Industrial robots, Language Model, Large language model, Man machine systems, Metaverses, Model-based OPC, Natural language processing systems, Physical-to-virtual, Robot programming, Robotic assembly, Splatting, Three dimensional computer graphics, Three-dimensional gaussian splatting},
pubstate = {published},
tppubtype = {article}
}
2024
Zheng, P.; Li, C.; Fan, J.; Wang, L.
In: CIRP Annals, vol. 73, no. 1, pp. 341–344, 2024, ISSN: 00078506 (ISSN).
Abstract | Links | BibTeX | Tags: Collaboration task, Collaborative manufacturing, Deep learning, Helmet mounted displays, Human robots, Human-centric, Human-guided robot learning, Human-Robot Collaboration, Interface states, Manipulators, Manufacturing system, Manufacturing tasks, Mixed reality, Mixed reality head-mounted displays, Reinforcement Learning, Reinforcement learnings, Robot vision, Smart manufacturing
@article{zheng_vision-language-guided_2024,
title = {A vision-language-guided and deep reinforcement learning-enabled approach for unstructured human-robot collaborative manufacturing task fulfilment},
author = {P. Zheng and C. Li and J. Fan and L. Wang},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85190754943&doi=10.1016%2fj.cirp.2024.04.003&partnerID=40&md5=59c453e1931e912472e76b86b77a881b},
doi = {10.1016/j.cirp.2024.04.003},
issn = {00078506 (ISSN)},
year  = {2024},
date = {2024-01-01},
journal = {CIRP Annals},
volume = {73},
number = {1},
pages = {341–344},
abstract = {Human-Robot Collaboration (HRC) has emerged as a pivot in contemporary human-centric smart manufacturing scenarios. However, the fulfilment of HRC tasks in unstructured scenes brings many challenges to be overcome. In this work, mixed reality head-mounted display is modelled as an effective data collection, communication, and state representation interface/tool for HRC task settings. By integrating vision-language cues with large language model, a vision-language-guided HRC task planning approach is firstly proposed. Then, a deep reinforcement learning-enabled mobile manipulator motion control policy is generated to fulfil HRC task primitives. Its feasibility is demonstrated in several HRC unstructured manufacturing tasks with comparative results. © 2024 The Author(s)},
keywords = {Collaboration task, Collaborative manufacturing, Deep learning, Helmet mounted displays, Human robots, Human-centric, Human-guided robot learning, Human-Robot Collaboration, Interface states, Manipulators, Manufacturing system, Manufacturing tasks, Mixed reality, Mixed reality head-mounted displays, Reinforcement Learning, Reinforcement learnings, Robot vision, Smart manufacturing},
pubstate = {published},
tppubtype = {article}
}
Fujii, A.; Fukuda, K.
Initial Study on Robot Emotional Expression Using Manpu Proceedings Article
In: ACM/IEEE Int. Conf. Hum.-Rob. Interact., pp. 463–467, IEEE Computer Society, 2024, ISBN: 21672148 (ISSN); 979-840070323-2 (ISBN).
Abstract | Links | BibTeX | Tags: Comic engineering, Comic symbol, Comic symbols, Display devices, Emotional expressions, Express emotions, Generic expression, Human Robot Interaction, Human robots, Human-robot interaction, Humans-robot interactions, Machine design, Man machine systems, Manpu, Mixed reality, Symbiotics, Symbolic methods
@inproceedings{fujii_initial_2024,
title = {Initial Study on Robot Emotional Expression Using Manpu},
author = {A. Fujii and K. Fukuda},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85188120223&doi=10.1145%2f3610978.3640652&partnerID=40&md5=4277cbd98c0474e2e7ba19d352e6f46e},
doi = {10.1145/3610978.3640652},
isbn = {21672148 (ISSN); 979-840070323-2 (ISBN)},
year  = {2024},
date = {2024-01-01},
booktitle = {ACM/IEEE Int. Conf. Hum.-Rob. Interact.},
pages = {463–467},
publisher = {IEEE Computer Society},
abstract = {In recent years, robots have started to play an active role in various places in society. The ability of robots not only to convey information but also to interact emotionally, is necessary to realize a human-robot symbiotic society. Many studies have been conducted on the emotional expression of robots. However, as robots come in a wide variety of designs, it is difficult to construct a generic expression method, and some robots are not equipped with expression devices such as faces or displays. To address these problems, this research aims to develop technology that enables robots to express emotions, using Manpu (a symbolic method used in comic books, expressing not only the emotions of humans and animals but also the states of objects) and mixed reality technology. As the first step of the research, we categorize manpu and use large language models to generate manpu expressions according to the dialogue information. © 2024 Copyright held by the owner/author(s)},
keywords = {Comic engineering, Comic symbol, Comic symbols, Display devices, Emotional expressions, Express emotions, Generic expression, Human Robot Interaction, Human robots, Human-robot interaction, Humans-robot interactions, Machine design, Man machine systems, Manpu, Mixed reality, Symbiotics, Symbolic methods},
pubstate = {published},
tppubtype = {inproceedings}
}