AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Logothetis, I.; Diakogiannis, K.; Vidakis, N.
Interactive Learning Through Conversational Avatars and Immersive VR: Enhancing Diabetes Education and Self-Management Proceedings Article
In: X., Fang (Ed.): Lect. Notes Comput. Sci., pp. 415–429, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303192577-1 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Chronic disease, Computer aided instruction, Diabetes Education, Diagnosis, E-Learning, Education management, Engineering education, Gamification, Immersive virtual reality, Interactive computer graphics, Interactive learning, Large population, Learning systems, NUI, Self management, Serious game, Serious games, simulation, Virtual Reality
@inproceedings{logothetis_interactive_2025,
title = {Interactive Learning Through Conversational Avatars and Immersive VR: Enhancing Diabetes Education and Self-Management},
author = {I. Logothetis and K. Diakogiannis and N. Vidakis},
editor = {Fang X.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105008266480&doi=10.1007%2f978-3-031-92578-8_27&partnerID=40&md5=451274dfa3ef0b3f1b39c7d5a665ee3b},
doi = {10.1007/978-3-031-92578-8_27},
isbn = {03029743 (ISSN); 978-303192577-1 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15816 LNCS},
pages = {415–429},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Diabetes is a chronic disease affecting a large population of the world. Education and self-management of diabetes are crucial. Technologies such as Virtual Reality (VR) have presented promising results in healthcare education, while studies suggest that Artificial Intelligence (AI) can help in learning by further engaging the learner. This study aims to educate users on the entire routine of managing diabetes. The serious game utilizes VR for realistic interaction with diabetes tools and generative AI through a conversational avatar that acts as an assistant instructor. In this way, it allows users to practice diagnostic and therapeutic interventions in a controlled virtual environment, helping to build their understanding and confidence in diabetes management. To measure the effects of the proposed serious game, presence, and perceived agency were measured. Preliminary results indicate that this setup aids in the engagement and immersion of learners, while the avatar can provide helpful information during gameplay. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {Artificial intelligence, Chronic disease, Computer aided instruction, Diabetes Education, Diagnosis, E-Learning, Education management, Engineering education, Gamification, Immersive virtual reality, Interactive computer graphics, Interactive learning, Large population, Learning systems, NUI, Self management, Serious game, Serious games, simulation, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
AlJadaan, O.; Ibrahim, O.; Ani, N. N. Al; Jabas, A.; Alfaress, M.
Navigating Intellectual Property Rights in the Age of AI and the Metaverse Book Section
In: Studies in Systems, Decision and Control, vol. 234, pp. 219–228, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 21984182 (ISSN).
Abstract | Links | BibTeX | Tags: AI-generated content, Artificial intelligence, Authorship, Blockchain, Copyright law, Creative industries, Cross-jurisdictional issues, Digital assets, Ethical implications, Intellectual property, IP enforcement, Legal frameworks, Metaverse, Non-fungible tokens (NFTs), Ownership, Patent law, Smart contracts, Virtual goods
@incollection{aljadaan_navigating_2025,
title = {Navigating Intellectual Property Rights in the Age of AI and the Metaverse},
author = {O. AlJadaan and O. Ibrahim and N. N. Al Ani and A. Jabas and M. Alfaress},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003758740&doi=10.1007%2f978-3-031-84636-6_18&partnerID=40&md5=f23526f6ff8a70c783074256ca6351a8},
doi = {10.1007/978-3-031-84636-6_18},
isbn = {21984182 (ISSN)},
year = {2025},
date = {2025-01-01},
booktitle = {Studies in Systems, Decision and Control},
volume = {234},
pages = {219–228},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {The paper examines the challenges that Intellectual Property (IP) rights pose within artificial intelligence (AI) and further into the Metaverse, the constantly developing virtual world wherein one is allowed to create, trade, and own digital assets. The increasing development of AI-generated content and virtual environments has also challenged the traditional notion of authorship and ownership and, thus, to the existing regimes regulating IP. This paper discusses the current legal framework, case studies on AI-generated content, and IP use in Metaverse through smart contracts and blockchain technology. Moreover, this paper examines the ethics involved in such development and public policy reforms that can be implemented in the IP law relating to this evolving technology. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
keywords = {AI-generated content, Artificial intelligence, Authorship, Blockchain, Copyright law, Creative industries, Cross-jurisdictional issues, Digital assets, Ethical implications, Intellectual property, IP enforcement, Legal frameworks, Metaverse, Non-fungible tokens (NFTs), Ownership, Patent law, Smart contracts, Virtual goods},
pubstate = {published},
tppubtype = {incollection}
}
Mereu, J.
Using LLMs to enhance end-user development support in XR Proceedings Article
In: V., Paneva; D., Tetteroo; V., Frau; S., Feger; D., Spano; F., Paterno; S., Sauer; M., Manca (Ed.): CEUR Workshop Proc., CEUR-WS, 2025, ISBN: 16130073 (ISSN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Condition, Configuration, Development support, Development technique, End-User Development, End-Users, Event-condition-action, Event-Condition-Actions, Extended reality, Human computer interaction, Information Systems, Information use, Natural Language, Natural language processing systems, Natural languages, Rule, rules
@inproceedings{mereu_using_2025,
title = {Using LLMs to enhance end-user development support in XR},
author = {J. Mereu},
editor = {Paneva V. and Tetteroo D. and Frau V. and Feger S. and Spano D. and Paterno F. and Sauer S. and Manca M.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105008755984&partnerID=40&md5=bfaaa38c3bee309621426f8f35332107},
isbn = {16130073 (ISSN)},
year = {2025},
date = {2025-01-01},
booktitle = {CEUR Workshop Proc.},
volume = {3978},
publisher = {CEUR-WS},
abstract = {This paper outlines the center stage of my PhD research, which aims to empower non-developer users to create and customize eXtended Reality (XR) environments through End-User Development (EUD) techniques combined with the latest AI tools. In particular, I describe my contributions to the EUD4XR project, detailing both the work completed and the ongoing developments. EUD4XR seeks to support end-users in customizing XR content with the assistance of a Large Language Model (LLM)-based conversational agent. © 2025 Copyright for this paper by its authors.},
keywords = {Artificial intelligence, Condition, Configuration, Development support, Development technique, End-User Development, End-Users, Event-condition-action, Event-Condition-Actions, Extended reality, Human computer interaction, Information Systems, Information use, Natural Language, Natural language processing systems, Natural languages, Rule, rules},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, L.; Mitchell, K.
Structured Teaching Prompt Articulation for Generative-AI Role Embodiment with Augmented Mirror Video Displays Proceedings Article
In: S.N., Spencer (Ed.): Proc.: VRCAI - ACM SIGGRAPH Int. Conf. Virtual-Reality Contin. Appl. Ind., Association for Computing Machinery, Inc, 2025, ISBN: 979-840071348-4 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Augmented Reality, Computer interaction, Contrastive Learning, Cultural icon, Experiential learning, Generative adversarial networks, Generative AI, human-computer interaction, Immersive, Pedagogical practices, Role-based, Teachers', Teaching, Video display, Virtual environments, Virtual Reality
@inproceedings{casas_structured_2025,
title = {Structured Teaching Prompt Articulation for Generative-AI Role Embodiment with Augmented Mirror Video Displays},
author = {L. Casas and K. Mitchell},
editor = {Spencer S.N.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85217997060&doi=10.1145%2f3703619.3706049&partnerID=40&md5=7141c5dac7882232c6ee8e0bef0ba84e},
doi = {10.1145/3703619.3706049},
isbn = {979-840071348-4 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc.: VRCAI - ACM SIGGRAPH Int. Conf. Virtual-Reality Contin. Appl. Ind.},
publisher = {Association for Computing Machinery, Inc},
abstract = {We present a classroom enhanced with augmented reality video display in which students adopt snapshots of their corresponding virtual personas according to their teacher's live articulated spoken educational theme, linearly, such as historical figures, famous scientists, cultural icons, and laterally according to archetypal categories such as world dance styles. We define a structure of generative AI prompt guidance to assist teachers with focused specified visual role embodiment stylization. By leveraging role-based immersive embodiment, our proposed approach enriches pedagogical practices that prioritize experiential learning. © 2024 ACM.},
keywords = {Artificial intelligence, Augmented Reality, Computer interaction, Contrastive Learning, Cultural icon, Experiential learning, Generative adversarial networks, Generative AI, human-computer interaction, Immersive, Pedagogical practices, Role-based, Teachers', Teaching, Video display, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Häfner, P.; Eisenlohr, F.; Karande, A.; Grethler, M.; Mukherjee, A.; Tran, N.
Leveraging Virtual Prototypes for Training Data Collection in LLM-Based Voice User Interface Development for Machines Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 281–285, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833152157-8 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Behavioral Research, Data collection, Language Model, Large language model, large language models, Model-based OPC, Training data, User interface development, Virtual environments, Virtual Prototype, Virtual Prototyping, Virtual Reality, Voice User Interface, Voice User Interfaces, Wizard of Oz, Wizard-of-Oz Method
@inproceedings{hafner_leveraging_2025,
title = {Leveraging Virtual Prototypes for Training Data Collection in LLM-Based Voice User Interface Development for Machines},
author = {P. Häfner and F. Eisenlohr and A. Karande and M. Grethler and A. Mukherjee and N. Tran},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000344182&doi=10.1109%2fAIxVR63409.2025.00054&partnerID=40&md5=05fe014eddba395881575bec5d96ce15},
doi = {10.1109/AIxVR63409.2025.00054},
isbn = {979-833152157-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {281–285},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Voice User Interfaces (VUIs) are becoming increasingly valuable in industrial applications, offering hands-free control in complex environments. However, developing and validating VUIs for such applications faces challenges, including limited access to physical prototypes and high testing costs. This paper presents a methodology that utilizes virtual reality (VR) prototypes to collect training data for large language model (LLM)-based VUIs, allowing early-stage voice control development before physical prototypes are accessible. Through an immersive Wizard-of-Oz (WoZ) method, participants interact with a virtual reality representation of a machine, generating realistic, scenario-based conversational data. This combined WoZ and VR approach enables high-quality data collection and iterative model training, offering an effective solution that can be applied across various types of machine. Preliminary findings demonstrate the viability of VR in generating diverse and robust data sets that closely simulate real-world dialogs for voice interactions in industrial settings. © 2025 IEEE.},
keywords = {Artificial intelligence, Behavioral Research, Data collection, Language Model, Large language model, large language models, Model-based OPC, Training data, User interface development, Virtual environments, Virtual Prototype, Virtual Prototyping, Virtual Reality, Voice User Interface, Voice User Interfaces, Wizard of Oz, Wizard-of-Oz Method},
pubstate = {published},
tppubtype = {inproceedings}
}
Stacchio, L.; Balloni, E.; Frontoni, E.; Paolanti, M.; Zingaretti, P.; Pierdicca, R.
MineVRA: Exploring the Role of Generative AI-Driven Content Development in XR Environments through a Context-Aware Approach Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, vol. 31, no. 5, pp. 3602–3612, 2025, ISSN: 10772626 (ISSN).
Abstract | Links | BibTeX | Tags: adult, Article, Artificial intelligence, Computer graphics, Computer vision, Content Development, Contents development, Context-Aware, Context-aware approaches, Extended reality, female, Generative adversarial networks, Generative AI, generative artificial intelligence, human, Human-in-the-loop, Immersive, Immersive environment, male, Multi-modal, User need, Virtual environments, Virtual Reality
@article{stacchio_minevra_2025,
title = {MineVRA: Exploring the Role of Generative AI-Driven Content Development in XR Environments through a Context-Aware Approach},
author = {L. Stacchio and E. Balloni and E. Frontoni and M. Paolanti and P. Zingaretti and R. Pierdicca},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003746367&doi=10.1109%2fTVCG.2025.3549160&partnerID=40&md5=70b162b574eebbb0cb71db871aa787e1},
doi = {10.1109/TVCG.2025.3549160},
issn = {10772626 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {31},
number = {5},
pages = {3602–3612},
abstract = {The convergence of Artificial Intelligence (AI), Computer Vision (CV), Computer Graphics (CG), and Extended Reality (XR) is driving innovation in immersive environments. A key challenge in these environments is the creation of personalized 3D assets, traditionally achieved through manual modeling, a time-consuming process that often fails to meet individual user needs. More recently, Generative AI (GenAI) has emerged as a promising solution for automated, context-aware content generation. In this paper, we present MineVRA (Multimodal generative artificial iNtelligence for contExt-aware Virtual Reality Assets), a novel Human-In-The-Loop (HITL) XR framework that integrates GenAI to facilitate coherent and adaptive 3D content generation in immersive scenarios. To evaluate the effectiveness of this approach, we conducted a comparative user study analyzing the performance and user satisfaction of GenAI-generated 3D objects compared to those generated by Sketchfab in different immersive contexts. The results suggest that GenAI can significantly complement traditional 3D asset libraries, with valuable design implications for the development of human-centered XR environments. © 1995-2012 IEEE.},
keywords = {adult, Article, Artificial intelligence, Computer graphics, Computer vision, Content Development, Contents development, Context-Aware, Context-aware approaches, Extended reality, female, Generative adversarial networks, Generative AI, generative artificial intelligence, human, Human-in-the-loop, Immersive, Immersive environment, male, Multi-modal, User need, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Shawash, J.; Thibault, M.; Hamari, J.
Who Killed Helene Pumpulivaara?: AI-Assisted Content Creation and XR Implementation for Interactive Built Heritage Storytelling Proceedings Article
In: IMX - Proc. ACM Int. Conf. Interact. Media Experiences, pp. 377–379, Association for Computing Machinery, Inc, 2025, ISBN: 979-840071391-0 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Augmented Reality, Built heritage, Content creation, Digital heritage, Digital Interpretation, Extended reality, Human computer interaction, Human engineering, Industrial Heritage, Interactive computer graphics, Interactive computer systems, Mobile photographies, Narrative Design, Narrative designs, Production pipelines, Uncanny valley, Virtual Reality
@inproceedings{shawash_who_2025,
title = {Who Killed Helene Pumpulivaara?: AI-Assisted Content Creation and XR Implementation for Interactive Built Heritage Storytelling},
author = {J. Shawash and M. Thibault and J. Hamari},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105008003446&doi=10.1145%2f3706370.3731703&partnerID=40&md5=bc8a8d221abcf6c560446979fbd06cbc},
doi = {10.1145/3706370.3731703},
isbn = {979-840071391-0 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {IMX - Proc. ACM Int. Conf. Interact. Media Experiences},
pages = {377–379},
publisher = {Association for Computing Machinery, Inc},
abstract = {This demo presents "Who Killed Helene Pumpulivaara?", an innovative interactive heritage experience that combines crime mystery narrative with XR technology to address key challenges in digital heritage interpretation. Our work makes six significant contributions: (1) the discovery of a "Historical Uncanny Valley"effect where varying fidelity levels between AI-generated and authentic content serve as implicit markers distinguishing fact from interpretation; (2) an accessible production pipeline combining mobile photography with AI tools that democratizes XR heritage creation for resource-limited institutions; (3) a spatial storytelling approach that effectively counters decontextualization in digital heritage; (4) a multi-platform implementation strategy across web and VR environments; (5) a practical model for AI-assisted heritage content creation balancing authenticity with engagement; and (6) a pathway toward spatial augmented reality for future heritage interpretation. Using the historic Finlayson Factory in Tampere, Finland as a case study, our implementation demonstrates how emerging technologies can enrich the authenticity of heritage experiences, fostering deeper emotional connections between visitors and the histories embedded in place. © 2025 Copyright held by the owner/author(s).},
keywords = {Artificial intelligence, Augmented Reality, Built heritage, Content creation, Digital heritage, Digital Interpretation, Extended reality, Human computer interaction, Human engineering, Industrial Heritage, Interactive computer graphics, Interactive computer systems, Mobile photographies, Narrative Design, Narrative designs, Production pipelines, Uncanny valley, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Abdelmagid, A. S.; Jabli, N. M.; Al-Mohaya, A. Y.; Teleb, A. A.
In: Sustainability (Switzerland), vol. 17, no. 12, 2025, ISSN: 20711050 (ISSN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, digitization, e-entrepreneurship, entrepreneur, generative artificial intelligence, green digital economy, green economy, higher education, Learning, Metaverse, Sustainable development
@article{abdelmagid_integrating_2025,
title = {Integrating Interactive Metaverse Environments and Generative Artificial Intelligence to Promote the Green Digital Economy and e-Entrepreneurship in Higher Education},
author = {A. S. Abdelmagid and N. M. Jabli and A. Y. Al-Mohaya and A. A. Teleb},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105008981835&doi=10.3390%2fsu17125594&partnerID=40&md5=0eaea40f26536c05c29c7b3f0d42d37d},
doi = {10.3390/su17125594},
issn = {20711050 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Sustainability (Switzerland)},
volume = {17},
number = {12},
abstract = {The rapid evolution of the Fourth Industrial Revolution has significantly transformed educational practices, necessitating the integration of advanced technologies into higher education to address contemporary sustainability challenges. This study explores the integration of interactive metaverse environments and generative artificial intelligence (GAI) in promoting the green digital economy and developing e-entrepreneurship skills among graduate students. Grounded in a quasi-experimental design, the research was conducted with a sample of 25 postgraduate students enrolled in the “Computers in Education” course at King Khalid University. A 3D immersive learning environment (FrameVR) was combined with GAI platforms (ChatGPT version 4.0, Elai.io version 2.5, Tome version 1.3) to create an innovative educational experience. Data were collected using validated instruments, including the Green Digital Economy Scale, the e-Entrepreneurship Scale, and a digital product evaluation rubric. The findings revealed statistically significant improvements in students’ awareness of green digital concepts, entrepreneurial competencies, and their ability to produce sustainable digital products. The study highlights the potential of immersive virtual learning environments and AI-driven content creation tools in enhancing digital literacy and sustainability-oriented innovation. It also underscores the urgent need to update educational strategies and curricula to prepare future professionals capable of navigating and shaping green digital economies. This research provides a practical and replicable model for universities seeking to embed sustainability through emerging technologies, supporting broader goals such as SDG 4 (Quality Education) and SDG 9 (Industry, Innovation, and Infrastructure). © 2025 by the authors.},
keywords = {Artificial intelligence, digitization, e-entrepreneurship, entrepreneur, generative artificial intelligence, green digital economy, green economy, higher education, Learning, Metaverse, Sustainable development},
pubstate = {published},
tppubtype = {article}
}
2024
Xi, M.; Perera, M.; Matthews, B.; Wang, R.; Weiley, V.; Somarathna, R.; Maqbool, H.; Chen, J.; Engelke, U.; Anderson, S.; Adcock, M.; Thomas, B. H.
Towards Immersive AI Proceedings Article
In: U., Eck; M., Sra; J., Stefanucci; M., Sugimoto; M., Tatzgern; I., Williams (Ed.): Proc. - IEEE Int. Symp. Mixed Augment. Real. Adjunct, ISMAR-Adjunct, pp. 260–264, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-833150691-9 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Augmented Reality, Data visualization, Decision making, Heterogenous data, Immersive, Immersive analytic, Immersive analytics, Industrial research, Mixed reality, Neuro-symbolic system, Real- time, Scientific paradigm, Situated imaging., Time-interleaved, Visual analytics, Work-flows
@inproceedings{xi_towards_2024,
title = {Towards Immersive AI},
author = {M. Xi and M. Perera and B. Matthews and R. Wang and V. Weiley and R. Somarathna and H. Maqbool and J. Chen and U. Engelke and S. Anderson and M. Adcock and B. H. Thomas},
editor = {Eck U. and Sra M. and Stefanucci J. and Sugimoto M. and Tatzgern M. and Williams I.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85214375967&doi=10.1109%2fISMAR-Adjunct64951.2024.00062&partnerID=40&md5=fd07c97119d71418bb4365582b1d188c},
doi = {10.1109/ISMAR-Adjunct64951.2024.00062},
isbn = {979-833150691-9 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Symp. Mixed Augment. Real. Adjunct, ISMAR-Adjunct},
pages = {260–264},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {With every shift in scientific paradigms comes not only a new way of seeing the world, but as Kunh argues, new tools for seeing [13]. Today, generative AI and neuro-symbolic systems show signs of changing how science functions, making it possible to synthesise complex heterogenous data in real time, interleaved with complex and situated workflows. But the new tools are not yet fully formed. To realise the opportunities and meet the challenges posed by the growth of generative AI for science and other knowledge work requires us to look beyond improvements in algorithms. The decision-making landscape for information workers has drastically changed, and the pressing need for analysts and experts to collaborate with AI in complex, high-tempo data environments has never been more evident.To bring strategic focus to these challenges in ways that will enable social, environmental and economic benefits for all, CSIRO's Data61 (the data and digital specialist arm of the Commonwealth Scientific and Industrial Research Organisation - Australia's national science agency) has established the Immersive AI Research Cluster. The cluster allows more than 30 research scientists and engineers to focus on defining a broad range of scientific disciplines for people to work with and understand the information provided by AI, such as data visualisation, visual analytics, connecting remote people, through immersive technologies like virtual and augmented reality. This workshop paper presents the trending research directions and challenges that emerged from this research cluster, which are closely linked to the scientific domains and illustrated through use cases. © 2024 IEEE.},
keywords = {Artificial intelligence, Augmented Reality, Data visualization, Decision making, Heterogenous data, Immersive, Immersive analytic, Immersive analytics, Industrial research, Mixed reality, Neuro-symbolic system, Real- time, Scientific paradigm, Situated imaging., Time-interleaved, Visual analytics, Work-flows},
pubstate = {published},
tppubtype = {inproceedings}
}
Domenichini, D.; Bucchiarone, A.; Chiarello, F.; Schiavo, G.; Fantoni, G.
An AI-Driven Approach for Enhancing Engagement and Conceptual Understanding in Physics Education Proceedings Article
In: IEEE Global Eng. Edu. Conf., EDUCON, IEEE Computer Society, 2024, ISBN: 21659559 (ISSN); 979-835039402-3 (ISBN).
Abstract | Links | BibTeX | Tags: Adaptive Learning, Artificial intelligence, Artificial intelligence in education, Artificial Intelligence in Education (AIED), Conceptual Understanding, Educational System, Educational systems, Gamification, Generative AI, generative artificial intelligence, Learning Activity, Learning systems, Physics Education, Teachers', Teaching, Virtual Reality
@inproceedings{domenichini_ai-driven_2024,
title = {An AI-Driven Approach for Enhancing Engagement and Conceptual Understanding in Physics Education},
author = {D. Domenichini and A. Bucchiarone and F. Chiarello and G. Schiavo and G. Fantoni},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199035695&doi=10.1109%2fEDUCON60312.2024.10578670&partnerID=40&md5=4cf9f89e97664ae6d618a90f2dbc23e0},
doi = {10.1109/EDUCON60312.2024.10578670},
isbn = {21659559 (ISSN); 979-835039402-3 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE Global Eng. Edu. Conf., EDUCON},
publisher = {IEEE Computer Society},
abstract = {This Work in Progress paper introduces the design of an innovative educational system that leverages Artificial Intelligence (AI) to address challenges in physics education. The primary objective is to create a system that dynamically adapts to the individual needs and preferences of students while maintaining user-friendliness for teachers, allowing them to tailor their teaching methods. The emphasis is on fostering motivation and engagement, achieved through the implementation of a gamified virtual environment and a strong focus on personalization. Our aim is to develop a system capable of autonomously generating learning activities and constructing effective learning paths, all under the supervision and interaction of teachers. The generation of learning activities is guided by educational taxonomies that delineate and categorize the cognitive processes involved in these activities. The proposed educational system seeks to address challenges identified by Physics Education Research (PER), which offers valuable insights into how individuals learn physics and provides strategies to enhance the overall quality of physics education. Our specific focus revolves around two crucial aspects: concentrating on the conceptual understanding of physics concepts and processes, and fostering knowledge integration and coherence across various physics topics. These aspects are deemed essential for cultivating enduring knowledge and facilitating practical applications in the field of physics. © 2024 IEEE.},
keywords = {Adaptive Learning, Artificial intelligence, Artificial intelligence in education, Artificial Intelligence in Education (AIED), Conceptual Understanding, Educational System, Educational systems, Gamification, Generative AI, generative artificial intelligence, Learning Activity, Learning systems, Physics Education, Teachers', Teaching, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaccour, C.; Saad, W.; Debbah, M.; Poor, H. V.
Joint Sensing, Communication, and AI: A Trifecta for Resilient THz User Experiences Journal Article
In: IEEE Transactions on Wireless Communications, vol. 23, no. 9, pp. 11444–11460, 2024, ISSN: 15361276 (ISSN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, artificial intelligence (AI), Behavioral Research, Channel state information, Computer hardware, Cramer-Rao bounds, Extended reality (XR), Hardware, Joint sensing and communication, Learning systems, machine learning, machine learning (ML), Machine-learning, Multi agent systems, reliability, Resilience, Sensor data fusion, Tera Hertz, Terahertz, terahertz (THz), Terahertz communication, Wireless communications, Wireless sensor networks, X reality
@article{chaccour_joint_2024,
title = {Joint Sensing, Communication, and AI: A Trifecta for Resilient THz User Experiences},
author = {C. Chaccour and W. Saad and M. Debbah and H. V. Poor},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85190170739&doi=10.1109%2fTWC.2024.3382192&partnerID=40&md5=da12c6f31faacaa08118b26e4570843f},
doi = {10.1109/TWC.2024.3382192},
issn = {15361276 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {IEEE Transactions on Wireless Communications},
volume = {23},
number = {9},
pages = {11444–11460},
abstract = {In this paper a novel joint sensing, communication, and artificial intelligence (AI) framework is proposed so as to optimize extended reality (XR) experiences over terahertz (THz) wireless systems. Within this framework, active reconfigurable intelligent surfaces (RISs) are incorporated as pivotal elements, serving as enhanced base stations in the THz band to enhance Line-of-Sight (LoS) communication. The proposed framework consists of three main components. First, a tensor decomposition framework is proposed to extract unique sensing parameters for XR users and their environment by exploiting the THz channel sparsity. Essentially, the THz band's quasi-opticality is exploited and the sensing parameters are extracted from the uplink communication signal, thereby allowing for the use of the same waveform, spectrum, and hardware for both communication and sensing functionalities. Then, the Cramér-Rao lower bound is derived to assess the accuracy of the estimated sensing parameters. Second, a non-autoregressive multi-resolution generative AI framework integrated with an adversarial transformer is proposed to predict missing and future sensing information. The proposed framework offers robust and comprehensive historical sensing information and anticipatory forecasts of future environmental changes, which are generalizable to fluctuations in both known and unforeseen user behaviors and environmental conditions. Third, a multi-agent deep recurrent hysteretic Q-neural network is developed to control the handover policy of RIS subarrays, leveraging the informative nature of sensing information to minimize handover cost, maximize the individual quality of personal experiences (QoPEs), and improve the robustness and resilience of THz links. Simulation results show a high generalizability of the proposed unsupervised generative artificial intelligence (AI) framework to fluctuations in user behavior and velocity, leading to a 61% improvement in instantaneous reliability compared to schemes with known channel state information. © 2002-2012 IEEE.},
keywords = {Artificial intelligence, artificial intelligence (AI), Behavioral Research, Channel state information, Computer hardware, Cramer-Rao bounds, Extended reality (XR), Hardware, Joint sensing and communication, Learning systems, machine learning, machine learning (ML), Machine-learning, Multi agent systems, reliability, Resilience, Sensor data fusion, Tera Hertz, Terahertz, terahertz (THz), Terahertz communication, Wireless communications, Wireless sensor networks, X reality},
pubstate = {published},
tppubtype = {article}
}
Takata, T.; Yamada, R.; Rene, A. Oliveira Nzinga; Xu, K.; Fujimoto, M.
Development of a Virtual Patient Model for Kampo Medical Interview: New Approach for Enhancing Empathy and Understanding of Kampo Medicine Pathological Concepts Proceedings Article
In: Jt. Int. Conf. Soft Comput. Intell. Syst. Int. Symp. Adv. Intell. Syst., SCIS ISIS, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835037333-2 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Clinical practices, Clinical training, Complementary and alternative medicines, Covid-19, Diagnosis, Educational approach, Empathy, Kampo medical interview, Medical education, Medical student, Medical students, New approaches, Virtual environments, Virtual patient, Virtual patient models, Virtual patients, Virtual Reality
@inproceedings{takata_development_2024,
title = {Development of a Virtual Patient Model for Kampo Medical Interview: New Approach for Enhancing Empathy and Understanding of Kampo Medicine Pathological Concepts},
author = {T. Takata and R. Yamada and A. Oliveira Nzinga Rene and K. Xu and M. Fujimoto},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85214666311&doi=10.1109%2fSCISISIS61014.2024.10759962&partnerID=40&md5=2e149e0fe211f586049914e571c6e2fa},
doi = {10.1109/SCISISIS61014.2024.10759962},
isbn = {979-835037333-2 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Jt. Int. Conf. Soft Comput. Intell. Syst. Int. Symp. Adv. Intell. Syst., SCIS ISIS},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Global interest in complementary and alternative medicine has increased in recent years, with Kampo medicine in Japan gaining greater trust and use. Detailed patient interviews are essential in Kampo medicine, as the physician's empathy is critical to diagnostic precision. Typically, medical students develop empathy and deepen their understanding of Kampo's pathological concepts through clinical practice. However, the COVID-19 pandemic has imposed significant restrictions on clinical training. To address this challenge, we propose a novel educational approach to enhance empathy and understanding of Kampo medicine by developing a virtual patient application. This application leverages generative artificial intelligence to simulate realistic patient interactions, enabling students to practice Kampo medical interviews in a safe, controlled environment. The AI-generated conversations are designed to reflect the emotional nuances of real-life dialogue, with the virtual patients' facial expressions synchronized to these emotions, thus enhancing the realism of the training. The suggested method allows repeated practice at any time and fosters the development of essential diag-nostic and empathetic skills. While promising challenges remain in improving these simulations' accuracy, further refinements are still under consideration. © 2024 IEEE.},
keywords = {Artificial intelligence, Clinical practices, Clinical training, Complementary and alternative medicines, Covid-19, Diagnosis, Educational approach, Empathy, Kampo medical interview, Medical education, Medical student, Medical students, New approaches, Virtual environments, Virtual patient, Virtual patient models, Virtual patients, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Samson, J.; Lameras, P.; Taylor, N.; Kneafsey, R.
Fostering a Co-creation Process for the Development of an Extended Reality Healthcare Education Resource Proceedings Article
In: M.E., Auer; T., Tsiatsos (Ed.): Lect. Notes Networks Syst., pp. 205–212, Springer Science and Business Media Deutschland GmbH, 2024, ISBN: 23673370 (ISSN); 978-303156074-3 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Co-creation, Creation process, Diagnosis, Education computing, Education resource, Extended reality, Health care education, Hospitals, Immersive, Inter professionals, Interprofessional Healthcare Education, Software products, Students, Virtual patients
@inproceedings{samson_fostering_2024,
title = {Fostering a Co-creation Process for the Development of an Extended Reality Healthcare Education Resource},
author = {J. Samson and P. Lameras and N. Taylor and R. Kneafsey},
editor = {Auer M.E. and Tsiatsos T.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85189759614&doi=10.1007%2f978-3-031-56075-0_20&partnerID=40&md5=6ae832882a2e224094c1beb81c925333},
doi = {10.1007/978-3-031-56075-0_20},
isbn = {23673370 (ISSN); 978-303156074-3 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Lect. Notes Networks Syst.},
volume = {937 LNNS},
pages = {205–212},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {The aim of this research is to create an immersive healthcare education resource using an extended reality (XR) platform. This platform leverages an existing software product, incorporating virtual patients with conversational capabilities driven by artificial intelligence (AI). The initial stage produced an early prototype focused on assessing an elderly virtual patient experiencing frailty. This scenario encompasses the hospital admission to post-discharge care at home, involving various healthcare professionals such as paramedics, emergency clinicians, diagnostic radiographers, geriatricians, physiotherapists, occupational therapists, nurses, operating department practitioners, dietitians, and social workers. The plan moving forward is to refine and expand this prototype through a co-creation with diverse stakeholders. The refinement process will include the introduction of updated scripts into the standard AI model. Furthermore, these scripts will be tested against a new hybrid model that combines generative AI. Ultimately, this resource will be co-designed to create a learning activity tailored for occupational therapy and physiotherapy students. This activity will undergo testing with a cohort of students, and the outcomes of this research are expected to inform the future development of interprofessional virtual simulated placements (VSPs). These placements will complement traditional clinical learning experiences, offering students an immersive environment to enhance their skills and knowledge in the healthcare field. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2024.},
keywords = {Artificial intelligence, Co-creation, Creation process, Diagnosis, Education computing, Education resource, Extended reality, Health care education, Hospitals, Immersive, Inter professionals, Interprofessional Healthcare Education, Software products, Students, Virtual patients},
pubstate = {published},
tppubtype = {inproceedings}
}
Gkournelos, C.; Konstantinou, C.; Angelakis, P.; Michalos, G.; Makris, S.
Enabling Seamless Human-Robot Collaboration in Manufacturing Using LLMs Proceedings Article
In: A., Wagner; K., Alexopoulos; S., Makris (Ed.): Lect. Notes Mech. Eng., pp. 81–89, Springer Science and Business Media Deutschland GmbH, 2024, ISBN: 21954356 (ISSN); 978-303157495-5 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Augmented Reality, Collaboration capabilities, Computational Linguistics, Human operator, Human-Robot Collaboration, Industrial research, Industrial robots, Intelligent robots, Language Model, Large language model, large language models, Manufacturing environments, Programming robots, Reality interface, Research papers, Robot programming, User friendly
@inproceedings{gkournelos_enabling_2024,
title = {Enabling Seamless Human-Robot Collaboration in Manufacturing Using LLMs},
author = {C. Gkournelos and C. Konstantinou and P. Angelakis and G. Michalos and S. Makris},
editor = {Wagner A. and Alexopoulos K. and Makris S.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199196139&doi=10.1007%2f978-3-031-57496-2_9&partnerID=40&md5=cd0b33b3c9e9f9e53f1e99882945e134},
doi = {10.1007/978-3-031-57496-2_9},
isbn = {21954356 (ISSN); 978-303157495-5 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Lect. Notes Mech. Eng.},
pages = {81–89},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {In the era of Industry 5.0, there is a growing interest in harnessing the potential of human-robot collaboration (HRC) in manufacturing environments. This research paper focuses on the integration of Large Language Models (LLMs) to augment HRC capabilities, particularly in addressing configuration issues when programming robots to collaborate with human operators. By harnessing the capabilities of LLMs in combination with a user-friendly augmented reality (AR) interface, the proposed approach empowers human operators to seamlessly collaborate with robots, facilitating smooth and efficient assembly processes. This research introduces the CollabAI an AI assistant for task management and natural communication based on a fine-tuned GPT model focusing on collaborative manufacturing. Real-world experiments conducted in two manufacturing settings coming from the automotive and machinery industries. The findings have implications for various industries seeking to increase productivity and foster a new era of efficient and effective collaboration in manufacturing environments. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2024.},
keywords = {Artificial intelligence, Augmented Reality, Collaboration capabilities, Computational Linguistics, Human operator, Human-Robot Collaboration, Industrial research, Industrial robots, Intelligent robots, Language Model, Large language model, large language models, Manufacturing environments, Programming robots, Reality interface, Research papers, Robot programming, User friendly},
pubstate = {published},
tppubtype = {inproceedings}
}
Pooryousef, V.; Cordeil, M.; Besançon, L.; Bassed, R.; Dwyer, T.
Collaborative Forensic Autopsy Documentation and Supervised Report Generation using a Hybrid Mixed-Reality Environment and Generative AI Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, vol. 30, no. 11, pp. 7452–7462, 2024, ISSN: 10772626 (ISSN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Augmented Reality, Autopsy, Causes of death, Complex procedure, Computer graphics, computer interface, Data visualization, Digital forensics, Documentation, Forensic autopsy, Forensic engineering, Forensic investigation, forensic science, Forensic Sciences, Generative AI, human, Humans, Imaging, Information Management, Laws and legislation, Mixed reality, Mixed-reality environment, Post mortem imaging, procedures, Report generation, Three-Dimensional, three-dimensional imaging, User-Computer Interface, Visualization, Workflow
@article{pooryousef_collaborative_2024,
title = {Collaborative Forensic Autopsy Documentation and Supervised Report Generation using a Hybrid Mixed-Reality Environment and Generative AI},
author = {V. Pooryousef and M. Cordeil and L. Besançon and R. Bassed and T. Dwyer},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85204066202&doi=10.1109%2fTVCG.2024.3456212&partnerID=40&md5=d1abaf1aaf3b033df21067ea34b8b98a},
doi = {10.1109/TVCG.2024.3456212},
issn = {10772626 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {30},
number = {11},
pages = {7452–7462},
abstract = {—Forensic investigation is a complex procedure involving experts working together to establish cause of death and report findings to legal authorities. While new technologies are being developed to provide better post-mortem imaging capabilities—including mixed-reality (MR) tools to support 3D visualisation of such data—these tools do not integrate seamlessly into their existing collaborative workflow and report authoring process, requiring extra steps, e.g. to extract imagery from the MR tool and combine with physical autopsy findings for inclusion in the report. Therefore, in this work we design and evaluate a new forensic autopsy report generation workflow and present a novel documentation system using hybrid mixed-reality approaches to integrate visualisation, voice and hand interaction, as well as collaboration and procedure recording. Our preliminary findings indicate that this approach has the potential to improve data management, aid reviewability, and thus, achieve more robust standards. Further, it potentially streamlines report generation and minimise dependency on external tools and assistance, reducing autopsy time and related costs. This system also offers significant potential for education. A free copy of this paper and all supplemental materials are available at https://osf.io/ygfzx. © 2024 IEEE.},
keywords = {Artificial intelligence, Augmented Reality, Autopsy, Causes of death, Complex procedure, Computer graphics, computer interface, Data visualization, Digital forensics, Documentation, Forensic autopsy, Forensic engineering, Forensic investigation, forensic science, Forensic Sciences, Generative AI, human, Humans, Imaging, Information Management, Laws and legislation, Mixed reality, Mixed-reality environment, Post mortem imaging, procedures, Report generation, Three-Dimensional, three-dimensional imaging, User-Computer Interface, Visualization, Workflow},
pubstate = {published},
tppubtype = {article}
}
Clocchiatti, A.; Fumero, N.; Soccini, A. M.
Character Animation Pipeline based on Latent Diffusion and Large Language Models Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 398–405, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835037202-1 (ISBN).
Abstract | Links | BibTeX | Tags: Animation, Animation pipeline, Artificial intelligence, Augmented Reality, Character animation, Computational Linguistics, Computer animation, Deep learning, Diffusion, E-Learning, Extended reality, Film production, Generative art, Language Model, Learning systems, Learning techniques, Natural language processing systems, Pipelines, Production pipelines, Virtual Reality
@inproceedings{clocchiatti_character_2024,
title = {Character Animation Pipeline based on Latent Diffusion and Large Language Models},
author = {A. Clocchiatti and N. Fumero and A. M. Soccini},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85187217072&doi=10.1109%2fAIxVR59861.2024.00067&partnerID=40&md5=d88b9ba7c80d49b60fd0d7acd5e7c4f0},
doi = {10.1109/AIxVR59861.2024.00067},
isbn = {979-835037202-1 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {398–405},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Artificial intelligence and deep learning techniques are revolutionizing the film production pipeline. The majority of the current screenplay-to-animation pipelines focus on understanding the screenplay through natural language processing techniques, and on the generation of the animation through custom engines, missing the possibility to customize the characters. To address these issues, we propose a high-level pipeline for generating 2D characters and animations starting from screenplays, through a combination of Latent Diffusion Models and Large Language Models. Our approach uses ChatGPT to generate character descriptions starting from the screenplay. Then, using that data, it generates images of custom characters with Stable Diffusion and animates them according to their actions in different scenes. The proposed approach avoids well-known problems in generative AI tools such as temporal inconsistency and lack of control on the outcome. The results suggest that the pipeline is consistent and reliable, benefiting industries ranging from film production to virtual, augmented and extended reality content creation. © 2024 IEEE.},
keywords = {Animation, Animation pipeline, Artificial intelligence, Augmented Reality, Character animation, Computational Linguistics, Computer animation, Deep learning, Diffusion, E-Learning, Extended reality, Film production, Generative art, Language Model, Learning systems, Learning techniques, Natural language processing systems, Pipelines, Production pipelines, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Haramina, E.; Paladin, M.; Petričušić, Z.; Posarić, F.; Drobnjak, A.; Botički, I.
Learning Algorithms Concepts in a Virtual Reality Escape Room Proceedings Article
In: S., Babic; Z., Car; M., Cicin-Sain; D., Cisic; P., Ergovic; T.G., Grbac; V., Gradisnik; S., Gros; A., Jokic; A., Jovic; D., Jurekovic; T., Katulic; M., Koricic; V., Mornar; J., Petrovic; K., Skala; D., Skvorc; V., Sruk; M., Svaco; E., Tijan; N., Vrcek; B., Vrdoljak (Ed.): ICT Electron. Conv., MIPRO - Proc., pp. 2057–2062, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835038249-5 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Computational complexity, Computer generated three dimensional environment, E-Learning, Education, Escape room, Extended reality, generative artificial intelligence, Learn+, Learning, Learning algorithms, Learning systems, Puzzle, puzzles, user experience, User study, User testing, Users' experiences, Virtual Reality
@inproceedings{haramina_learning_2024,
title = {Learning Algorithms Concepts in a Virtual Reality Escape Room},
author = {E. Haramina and M. Paladin and Z. Petričušić and F. Posarić and A. Drobnjak and I. Botički},
editor = {Babic S. and Car Z. and Cicin-Sain M. and Cisic D. and Ergovic P. and Grbac T.G. and Gradisnik V. and Gros S. and Jokic A. and Jovic A. and Jurekovic D. and Katulic T. and Koricic M. and Mornar V. and Petrovic J. and Skala K. and Skvorc D. and Sruk V. and Svaco M. and Tijan E. and Vrcek N. and Vrdoljak B.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85198221737&doi=10.1109%2fMIPRO60963.2024.10569447&partnerID=40&md5=8a94d92d989d1f0feb84eba890945de8},
doi = {10.1109/MIPRO60963.2024.10569447},
isbn = {979-835038249-5 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {ICT Electron. Conv., MIPRO - Proc.},
pages = {2057–2062},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Although the standard way to learn algorithms is by coding, learning through games is another way to obtain knowledge while having fun. Virtual reality is a computer-generated three-dimensional environment in which the player is fully immersed by having external stimuli mostly blocked out. In the game presented in this paper, players are enhancing their algorithms skills by playing an escape room game. The goal is to complete the room within the designated time by solving puzzles. The puzzles change for every playthrough with the use of generative artificial intelligence to provide every player with a unique experience. There are multiple types of puzzles such as. time complexity, sorting algorithms, searching algorithms, and code execution. The paper presents the results of a study indicating students' preference for learning through gaming as a method of acquiring algorithms knowledge. © 2024 IEEE.},
keywords = {Artificial intelligence, Computational complexity, Computer generated three dimensional environment, E-Learning, Education, Escape room, Extended reality, generative artificial intelligence, Learn+, Learning, Learning algorithms, Learning systems, Puzzle, puzzles, user experience, User study, User testing, Users' experiences, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Wu, J.; Gan, W.; Chao, H. -C.; Yu, P. S.
Geospatial Big Data: Survey and Challenges Journal Article
In: IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, vol. 17, pp. 17007–17020, 2024, ISSN: 19391404 (ISSN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, artificial intelligence (AI), Behavioral Research, Big Data, Data challenges, Data Mining, Data surveys, Data visualization, Earth observation data, Environmental management, environmental protection, Geo-spatial, Geo-spatial analysis, Geo-spatial data, Geospatial big data, geospatial big data (GBD), geospatial data, GIS, Green products, Human behaviors, Knowledge graph, Knowledge graphs, satellite, sensor, spatial data, Sustainable development, urban planning
@article{wu_geospatial_2024,
title = {Geospatial Big Data: Survey and Challenges},
author = {J. Wu and W. Gan and H. -C. Chao and P. S. Yu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85200804056&doi=10.1109%2fJSTARS.2024.3438376&partnerID=40&md5=53ee1c9695b3f2e78d6b565ed47f7585},
doi = {10.1109/JSTARS.2024.3438376},
issn = {19391404 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing},
volume = {17},
pages = {17007–17020},
abstract = {In recent years, geospatial big data (GBD) has obtained attention across various disciplines, categorized into big Earth observation data and big human behavior data. Identifying geospatial patterns from GBD has been a vital research focus in the fields of urban management and environmental sustainability. This article reviews the evolution of GBD mining and its integration with advanced artificial intelligence techniques. GBD consists of data generated by satellites, sensors, mobile devices, and geographical information systems, and we categorize geospatial data based on different perspectives. We outline the process of GBD mining and demonstrate how it can be incorporated into a unified framework. In addition, we explore new technologies, such as large language models, the metaverse, and knowledge graphs, and how they could make GBD even more useful. We also share examples of GBD helping with city management and protecting the environment. Finally, we discuss the real challenges that come up when working with GBD, such as issues with data retrieval and security. Our goal is to give readers a clear view of where GBD mining stands today and where it might go next. © 2024 The Authors. This work is licensed under a Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 License.},
keywords = {Artificial intelligence, artificial intelligence (AI), Behavioral Research, Big Data, Data challenges, Data Mining, Data surveys, Data visualization, Earth observation data, Environmental management, environmental protection, Geo-spatial, Geo-spatial analysis, Geo-spatial data, Geospatial big data, geospatial big data (GBD), geospatial data, GIS, Green products, Human behaviors, Knowledge graph, Knowledge graphs, satellite, sensor, spatial data, Sustainable development, urban planning},
pubstate = {published},
tppubtype = {article}
}
Weerasinghe, K.; Janapati, S.; Ge, X.; Kim, S.; Iyer, S.; Stankovic, J. A.; Alemzadeh, H.
Real-Time Multimodal Cognitive Assistant for Emergency Medical Services Proceedings Article
In: Proc. - ACM/IEEE Conf. Internet-of-Things Des. Implement., IoTDI, pp. 85–96, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835037025-6 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Augmented Reality, Cognitive Assistance, Computational Linguistics, Decision making, Domain knowledge, Edge computing, Emergency medical services, Forecasting, Graphic methods, Language Model, machine learning, Machine-learning, Multi-modal, Real- time, Service protocols, Smart Health, Speech recognition, State of the art
@inproceedings{weerasinghe_real-time_2024,
title = {Real-Time Multimodal Cognitive Assistant for Emergency Medical Services},
author = {K. Weerasinghe and S. Janapati and X. Ge and S. Kim and S. Iyer and J. A. Stankovic and H. Alemzadeh},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85197769304&doi=10.1109%2fIoTDI61053.2024.00012&partnerID=40&md5=a3b7cf14e46ecb2d4e49905fb845f2c9},
doi = {10.1109/IoTDI61053.2024.00012},
isbn = {979-835037025-6 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - ACM/IEEE Conf. Internet-of-Things Des. Implement., IoTDI},
pages = {85–96},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Emergency Medical Services (EMS) responders often operate under time-sensitive conditions, facing cognitive overload and inherent risks, requiring essential skills in critical thinking and rapid decision-making. This paper presents CognitiveEMS, an end-to-end wearable cognitive assistant system that can act as a collaborative virtual partner engaging in the real-time acquisition and analysis of multimodal data from an emergency scene and interacting with EMS responders through Augmented Reality (AR) smart glasses. CognitiveEMS processes the continuous streams of data in real-time and leverages edge computing to provide assistance in EMS protocol selection and intervention recognition. We address key technical challenges in real-time cognitive assistance by introducing three novel components: (i) a Speech Recognition model that is fine-tuned for real-world medical emergency conversations using simulated EMS audio recordings, augmented with synthetic data generated by large language models (LLMs); (ii) an EMS Protocol Prediction model that combines state-of-the-art (SOTA) tiny language models with EMS domain knowledge using graph-based attention mechanisms; (iii) an EMS Action Recognition module which leverages multimodal audio and video data and protocol predictions to infer the intervention/treatment actions taken by the responders at the incident scene. Our results show that for speech recognition we achieve superior performance compared to SOTA (WER of 0.290 vs. 0.618) on conversational data. Our protocol prediction component also significantly outperforms SOTA (top-3 accuracy of 0.800 vs. 0.200) and the action recognition achieves an accuracy of 0.727, while maintaining an end-to-end latency of 3.78s for protocol prediction on the edge and 0.31s on the server. © 2024 IEEE.},
keywords = {Artificial intelligence, Augmented Reality, Cognitive Assistance, Computational Linguistics, Decision making, Domain knowledge, Edge computing, Emergency medical services, Forecasting, Graphic methods, Language Model, machine learning, Machine-learning, Multi-modal, Real- time, Service protocols, Smart Health, Speech recognition, State of the art},
pubstate = {published},
tppubtype = {inproceedings}
}
Do, M. D.; Dahlem, N.; Paulus, M.; Krick, M.; Steffny, L.; Werth, D.
“Furnish Your Reality” - Intelligent Mobile AR Application for Personalized Furniture Proceedings Article
In: J., Wei; G., Margetis (Ed.): Lect. Notes Comput. Sci., pp. 196–210, Springer Science and Business Media Deutschland GmbH, 2024, ISBN: 03029743 (ISSN); 978-303160457-7 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Augmented Reality, Augmented reality applications, Electronic commerce, Generative AI, generative artificial intelligence, Human computer interaction, Human computer interfaces, LiDAR, Mobile augmented reality, Mobile human computer interface, Mobile Human Computer Interfaces, Personalized product design, Personalized products, Phygital customer journey, Physical environments, Product design, Recommender system, Recommender systems, Sales, User centered design, User interfaces, User-centered design
@inproceedings{do_furnish_2024,
title = {“Furnish Your Reality” - Intelligent Mobile AR Application for Personalized Furniture},
author = {M. D. Do and N. Dahlem and M. Paulus and M. Krick and L. Steffny and D. Werth},
editor = {Wei J. and Margetis G.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85196202642&doi=10.1007%2f978-3-031-60458-4_14&partnerID=40&md5=017510be06c286789867235cfd98bb36},
doi = {10.1007/978-3-031-60458-4_14},
isbn = {03029743 (ISSN); 978-303160457-7 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {14737 LNCS},
pages = {196–210},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Today’s online retailers are faced with the challenge of providing a convenient solution for their customers to browse through a wide range of products. Simultaneously, they must meet individual customer needs by creating unique, personalized, one-of-a-kind items. Technological advances in areas such as Augmented Reality (AR), Artificial Intelligence (AI) or sensors (e.g. LiDAR), have the potential to address these challenges by enhancing the customer experience in new ways. One option is to implement “phygital” commerce solutions, which combines the benefits of physical and digital environments to improve the customer journey. This work presents a concept for a mobile AR application that integrates LiDAR and an AI-powered recommender system to create a unique phygital customer journey in the context of furniture shopping. The combination of AR, LiDAR and AI enables an accurate immersive experience along with personalized product designs. This concept aims to deliver benefits in terms of usability, convenience, time savings and user experience, while bridging the gap between mass-produced and personalized products. The new possibilities for merging virtual with physical environments hold immense potential, but this work also highlights challenges for customers as well as for online platform providers and future researchers. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2024.},
keywords = {Artificial intelligence, Augmented Reality, Augmented reality applications, Electronic commerce, Generative AI, generative artificial intelligence, Human computer interaction, Human computer interfaces, LiDAR, Mobile augmented reality, Mobile human computer interface, Mobile Human Computer Interfaces, Personalized product design, Personalized products, Phygital customer journey, Physical environments, Product design, Recommender system, Recommender systems, Sales, User centered design, User interfaces, User-centered design},
pubstate = {published},
tppubtype = {inproceedings}
}
Torre, F. De La; Fang, C. M.; Huang, H.; Banburski-Fahey, A.; Fernandez, J. A.; Lanier, J.
LLMR: Real-time Prompting of Interactive Worlds using Large Language Models Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2024, ISBN: 979-840070330-0 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Computational Linguistics, Design goal, Interactive computer graphics, Interactive worlds, Internal dynamics, Language Model, Large language model, Mixed reality, Novel strategies, Real- time, Spatial Reasoning, Training data
@inproceedings{de_la_torre_llmr_2024,
title = {LLMR: Real-time Prompting of Interactive Worlds using Large Language Models},
author = {F. De La Torre and C. M. Fang and H. Huang and A. Banburski-Fahey and J. A. Fernandez and J. Lanier},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85194848276&doi=10.1145%2f3613904.3642579&partnerID=40&md5=14969e96507a1f0110262021e5b1172d},
doi = {10.1145/3613904.3642579},
isbn = {979-840070330-0 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {We present Large Language Model for Mixed Reality (LLMR), a framework for the real-time creation and modification of interactive Mixed Reality experiences using LLMs. LLMR leverages novel strategies to tackle difficult cases where ideal training data is scarce, or where the design goal requires the synthesis of internal dynamics, intuitive analysis, or advanced interactivity. Our framework relies on text interaction and the Unity game engine. By incorporating techniques for scene understanding, task planning, self-debugging, and memory management, LLMR outperforms the standard GPT-4 by 4x in average error rate. We demonstrate LLMR's cross-platform interoperability with several example worlds, and evaluate it on a variety of creation and modification tasks to show that it can produce and edit diverse objects, tools, and scenes. Finally, we conducted a usability study (N=11) with a diverse set that revealed participants had positive experiences with the system and would use it again. © 2024 Copyright held by the owner/author(s)},
keywords = {Artificial intelligence, Computational Linguistics, Design goal, Interactive computer graphics, Interactive worlds, Internal dynamics, Language Model, Large language model, Mixed reality, Novel strategies, Real- time, Spatial Reasoning, Training data},
pubstate = {published},
tppubtype = {inproceedings}
}
Truong, V. T.; Le, H. D.; Le, L. B.
Trust-Free Blockchain Framework for AI-Generated Content Trading and Management in Metaverse Journal Article
In: IEEE Access, vol. 12, pp. 41815–41828, 2024, ISSN: 21693536 (ISSN).
Abstract | Links | BibTeX | Tags: AI-generated content, AI-generated content (AIGC), Artificial intelligence, Asset management, Assets management, Block-chain, Blockchain, Commerce, Content distribution networks, Cyber-attacks, Decentralised, Decentralized application, Digital asset management, Digital system, Generative AI, Metaverse, Metaverses, Plagiarism, Security, Trustless service, Virtual Reality
@article{truong_trust-free_2024,
title = {Trust-Free Blockchain Framework for AI-Generated Content Trading and Management in Metaverse},
author = {V. T. Truong and H. D. Le and L. B. Le},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85188472793&doi=10.1109%2fACCESS.2024.3376509&partnerID=40&md5=301939c1faef0c5a7b56d9feadce27ee},
doi = {10.1109/ACCESS.2024.3376509},
issn = {21693536 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {IEEE Access},
volume = {12},
pages = {41815–41828},
abstract = {The rapid development of the metaverse and generative Artificial Intelligence (GAI) has led to the emergence of AI-Generated Content (AIGC). Unlike real-world products, AIGCs are represented as digital files, thus vulnerable to plagiarism and leakage on the Internet. In addition, the trading of AIGCs in the virtual world is prone to various trust issues between the involved participants. For example, some customers may try to avoid the payment after receiving the desired AIGC products, or the content sellers refuse to grant the products after obtaining the license fee. Existing digital asset management (DAM) systems often rely on a trusted third-party authority to mitigate these issues. However, this might lead to centralization problems such as the single-point-of-failure (SPoF) when the third parties are under attacks or being malicious. In this paper, we propose MetaTrade, a blockchain-empowered DAM framework that is designed to tackle these urgent trust issues, offering secured AIGC trading and management in the trustless metaverse environment. MetaTrade eliminates the role of the trusted third party, without requiring trust assumptions among participants. Numerical results show that MetaTrade offers higher performance and lower trading cost compared to existing platforms, while security analysis reveals that the framework is resilient against plagiarism, SPoF, and trust-related attacks. To showcase the feasibility of the design, a decentralized application (DApp) has been built on top of MetaTrade as a marketplace for metaverse AIGCs. © 2013 IEEE.},
keywords = {AI-generated content, AI-generated content (AIGC), Artificial intelligence, Asset management, Assets management, Block-chain, Blockchain, Commerce, Content distribution networks, Cyber-attacks, Decentralised, Decentralized application, Digital asset management, Digital system, Generative AI, Metaverse, Metaverses, Plagiarism, Security, Trustless service, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Omirgaliyev, R.; Kenzhe, D.; Mirambekov, S.
Simulating life: the application of generative agents in virtual environments Proceedings Article
In: IEEE AITU: Digit. Gener., Conf. Proc. - AITU, pp. 181–187, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835036437-8 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Artificial intelligence agent, Artificial Intelligence Agents, Autonomous agents, Behavioral Research, Behaviour models, Computational Linguistics, Decision making, Dynamics, Dynamics simulation, Economic and social effects, Game Development, Game environment, Language Model, Large language model, large language models, Modeling languages, Social dynamic simulation, Social dynamics, Social Dynamics Simulation, Software design, Virtual Reality, Virtual Societies
@inproceedings{omirgaliyev_simulating_2024,
title = {Simulating life: the application of generative agents in virtual environments},
author = {R. Omirgaliyev and D. Kenzhe and S. Mirambekov},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199876250&doi=10.1109%2fIEEECONF61558.2024.10585387&partnerID=40&md5=70f8b598d10bec13c39d3506a15534a1},
doi = {10.1109/IEEECONF61558.2024.10585387},
isbn = {979-835036437-8 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE AITU: Digit. Gener., Conf. Proc. - AITU},
pages = {181–187},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This research explores the innovative integration of Large Language Models (LLMs) in game development, focusing on the autonomous creation, development, and governance of a virtual village by AI agents within a 2D game environment. The core of this study lies in observing and analyzing the interactions and societal development among AI agents, utilizing advanced algorithms for generative behavior modeling and dynamic skill tree learning. These AI agents are endowed with human-like decision-making capabilities, enabled by LLMs, allowing them to engage in complex social interactions and contribute to emergent societal structures within the game. The uniqueness of this project stems from its approach to simulating lifelike social dynamics in a virtual setting, thus addressing a gap in existing research and marking a significant contribution to the interdisciplinary fields of artificial intelligence and game development. By comparing AI-generated societal behaviors with human social interactions, the study delves into the potential of AI to mirror or enhance human social structures, offering a fresh perspective on the capabilities of AI in game development. This research not only aims to push the boundaries of AI applications in game development but also seeks to provide valuable insights into the potential for AI-driven simulations in studying complex social and behavioral dynamics. ©2024 IEEE.},
keywords = {Artificial intelligence, Artificial intelligence agent, Artificial Intelligence Agents, Autonomous agents, Behavioral Research, Behaviour models, Computational Linguistics, Decision making, Dynamics, Dynamics simulation, Economic and social effects, Game Development, Game environment, Language Model, Large language model, large language models, Modeling languages, Social dynamic simulation, Social dynamics, Social Dynamics Simulation, Software design, Virtual Reality, Virtual Societies},
pubstate = {published},
tppubtype = {inproceedings}
}
Ivanova, M.; Grosseck, G.; Holotescu, C.
Unveiling Insights: A Bibliometric Analysis of Artificial Intelligence in Teaching Journal Article
In: Informatics, vol. 11, no. 1, 2024, ISSN: 22279709 (ISSN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, ChatGPT, Intelligent Environment, large language models, learning analytics, Teaching
@article{ivanova_unveiling_2024,
title = {Unveiling Insights: A Bibliometric Analysis of Artificial Intelligence in Teaching},
author = {M. Ivanova and G. Grosseck and C. Holotescu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85188949348&doi=10.3390%2finformatics11010010&partnerID=40&md5=aaf44928fb594e2807234da0f3799437},
doi = {10.3390/informatics11010010},
issn = {22279709 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Informatics},
volume = {11},
number = {1},
abstract = {The penetration of intelligent applications in education is rapidly increasing, posing a number of questions of a different nature to the educational community. This paper is coming to analyze and outline the influence of artificial intelligence (AI) on teaching practice which is an essential problem considering its growing utilization and pervasion on a global scale. A bibliometric approach is applied to outdraw the “big picture” considering gathered bibliographic data from scientific databases Scopus and Web of Science. Data on relevant publications matching the query “artificial intelligence and teaching” over the past 5 years have been researched and processed through Biblioshiny in R environment in order to establish a descriptive structure of the scientific production, to determine the impact of scientific publications, to trace collaboration patterns and to identify key research areas and emerging trends. The results point out the growth in scientific production lately that is an indicator of increased interest in the investigated topic by researchers who mainly work in collaborative teams as some of them are from different countries and institutions. The identified key research areas include techniques used in educational applications, such as artificial intelligence, machine learning, and deep learning. Additionally, there is a focus on applicable technologies like ChatGPT, learning analytics, and virtual reality. The research also explores the context of application for these techniques and technologies in various educational settings, including teaching, higher education, active learning, e-learning, and online learning. Based on our findings, the trending research topics can be encapsulated by terms such as ChatGPT, chatbots, AI, generative AI, machine learning, emotion recognition, large language models, convolutional neural networks, and decision theory. These findings offer valuable insights into the current landscape of research interests in the field. © 2024 by the authors.},
keywords = {Artificial intelligence, ChatGPT, Intelligent Environment, large language models, learning analytics, Teaching},
pubstate = {published},
tppubtype = {article}
}
Jeong, E.; Kim, H.; Park, S.; Yoon, S.; Ahn, J.; Woo, W.
Function-Adaptive Affordance Extraction from 3D Objects Using LLM for Interaction Authoring with Augmented Artifacts Proceedings Article
In: U., Eck; M., Sra; J., Stefanucci; M., Sugimoto; M., Tatzgern; I., Williams (Ed.): Proc. - IEEE Int. Symp. Mixed Augment. Real. Adjunct, ISMAR-Adjunct, pp. 205–208, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-833150691-9 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, Applied computing, Art and humanity, Artificial intelligence, Arts and humanities, Augmented Reality, Computer interaction, Computer vision, Computing methodologies, computing methodology, Human computer interaction, Human computer interaction (HCI), Human-centered computing, Humanities computing, Interaction paradigm, Interaction paradigms, Language processing, Mixed / augmented reality, Mixed reality, Modeling languages, Natural Language Processing, Natural language processing systems, Natural languages, Three dimensional computer graphics
@inproceedings{jeong_function-adaptive_2024,
title = {Function-Adaptive Affordance Extraction from 3D Objects Using LLM for Interaction Authoring with Augmented Artifacts},
author = {E. Jeong and H. Kim and S. Park and S. Yoon and J. Ahn and W. Woo},
editor = {Eck U. and Sra M. and Stefanucci J. and Sugimoto M. and Tatzgern M. and Williams I.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85214379963&doi=10.1109%2fISMAR-Adjunct64951.2024.00050&partnerID=40&md5=7222e0599a7e2aa0adaea38e4b9e13cc},
doi = {10.1109/ISMAR-Adjunct64951.2024.00050},
isbn = {979-833150691-9 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Symp. Mixed Augment. Real. Adjunct, ISMAR-Adjunct},
pages = {205–208},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {We propose an algorithm that extracts the most suitable affordances, interaction targets, and corresponding coordinates adaptively from 3D models of various artifacts based on their functional context for efficient authoring of XR content with artifacts. Traditionally, authoring AR scenes to convey artifact context required one-to-one manual work. Our approach leverages a Large Language Model (LLM) to extract interaction types, positions, and subjects based on the artifact's name and usage context. This enables templated XR experience creation, replacing repetitive manual labor. Consequently, our system streamlines the XR authoring process, making it more efficient and scalable. © 2024 IEEE.},
keywords = {3D modeling, Applied computing, Art and humanity, Artificial intelligence, Arts and humanities, Augmented Reality, Computer interaction, Computer vision, Computing methodologies, computing methodology, Human computer interaction, Human computer interaction (HCI), Human-centered computing, Humanities computing, Interaction paradigm, Interaction paradigms, Language processing, Mixed / augmented reality, Mixed reality, Modeling languages, Natural Language Processing, Natural language processing systems, Natural languages, Three dimensional computer graphics},
pubstate = {published},
tppubtype = {inproceedings}
}