AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Ding, S.; Yalla, J. P.; Chen, Y.
Demo Abstract: RAG-Driven 3D Question Answering in Edge-Assisted Virtual Reality Proceedings Article
In: Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 9798331543709 (ISBN).
Abstract | Links | BibTeX | Tags: Edge computing, Edge server, Interface states, Knowledge database, Language Model, Local knowledge, Office environments, Question Answering, Real- time, User interaction, User interfaces, Virtual environments, Virtual Reality, Virtual reality system, Virtual-reality environment
@inproceedings{ding_demo_2025,
title = {Demo Abstract: RAG-Driven 3D Question Answering in Edge-Assisted Virtual Reality},
author = {S. Ding and J. P. Yalla and Y. Chen},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105017970015&doi=10.1109%2FINFOCOMWKSHPS65812.2025.11152992&partnerID=40&md5=0e079de018ae9c4a564b98c304a9ea6c},
doi = {10.1109/INFOCOMWKSHPS65812.2025.11152992},
isbn = {9798331543709 (ISBN)},
year = {2025},
date = {2025-01-01},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The rapid development of large language models (LLMs) has created new opportunities in 3D question answering (3D-QA) for virtual reality (VR). 3D-QA enhances user interaction by answering questions about virtual environments. However, performing 3D-QA in VR systems using LLM-based approaches is computation-intensive. Furthermore, general LLMs tend to generate inaccurate responses as they lack context-specific information in VR environments. To mitigate these limitations, we propose OfficeVR-QA, a 3D-QA framework for edge-assisted VR to alleviate the resource constraints of VR devices with the help of edge servers, demonstrated in a virtual office environment. To improve the accuracy of the generated answers, the edge server of OfficeVR-QA hosts retrieval-augmented generation (RAG) that augments LLMs with external knowledge retrieved from a local knowledge database extracted from VR environments and users. During an interactive demo, OfficeVR-QA will continuously update the local knowledge database in real time by transmitting participants' position and orientation data to the edge server, enabling adaptive responses to changes in the participants' states. Participants will navigate a VR office environment, interact with a VR user interface to ask questions, and observe the accuracy of dynamic responses based on their real-time state changes. © 2025 Elsevier B.V., All rights reserved.},
keywords = {Edge computing, Edge server, Interface states, Knowledge database, Language Model, Local knowledge, Office environments, Question Answering, Real- time, User interaction, User interfaces, Virtual environments, Virtual Reality, Virtual reality system, Virtual-reality environment},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
DeChant, C.; Akinola, I.; Bauer, D.
Learning to summarize and answer questions about a virtual robot’s past actions Journal Article
In: Autonomous Robots, vol. 47, no. 8, pp. 1103–1118, 2023, ISSN: 09295593 (ISSN); 15737527 (ISSN), (Publisher: Springer).
Abstract | Links | BibTeX | Tags: Action sequences, E-Learning, Interpretability, Language Model, Long horizon task, Long horizon tasks, Natural language processing systems, Natural languages, Question Answering, Representation learning, Robots, Summarization, Video frame, Virtual Reality, Virtual robots, Zero-shot learning
@article{dechant_learning_2023,
title = {Learning to summarize and answer questions about a virtual robot’s past actions},
author = {C. DeChant and I. Akinola and D. Bauer},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85176588341&doi=10.1007%2Fs10514-023-10134-4&partnerID=40&md5=a1f0792eceb7cdfb7ffe862db74cd24d},
doi = {10.1007/s10514-023-10134-4},
issn = {09295593 (ISSN); 15737527 (ISSN)},
year = {2023},
date = {2023-01-01},
journal = {Autonomous Robots},
volume = {47},
number = {8},
pages = {1103–1118},
abstract = {When robots perform long action sequences, users will want to easily and reliably find out what they have done. We therefore demonstrate the task of learning to summarize and answer questions about a robot agent’s past actions using natural language alone. A single system with a large language model at its core is trained to both summarize and answer questions about action sequences given ego-centric video frames of a virtual robot and a question prompt. To enable training of question answering, we develop a method to automatically generate English-language questions and answers about objects, actions, and the temporal order in which actions occurred during episodes of robot action in the virtual environment. Training one model to both summarize and answer questions enables zero-shot transfer of representations of objects learned through question answering to improved action summarization. © 2023 Elsevier B.V., All rights reserved.},
note = {Publisher: Springer},
keywords = {Action sequences, E-Learning, Interpretability, Language Model, Long horizon task, Long horizon tasks, Natural language processing systems, Natural languages, Question Answering, Representation learning, Robots, Summarization, Video frame, Virtual Reality, Virtual robots, Zero-shot learning},
pubstate = {published},
tppubtype = {article}
}