AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Casas, L.; Hannah, S.; Mitchell, K.
HoloJig: Interactive Spoken Prompt Specified Generative AI Environments Journal Article
In: IEEE Computer Graphics and Applications, vol. 45, no. 2, pp. 69–77, 2025, ISSN: 02721716 (ISSN); 15581756 (ISSN), (Publisher: IEEE Computer Society).
Abstract | Links | BibTeX | Tags: 3-D rendering, Article, Collaborative workspace, customer experience, Economic and social effects, generative artificial intelligence, human, Immersive, Immersive environment, parallax, Real- time, simulation, Simulation training, speech, Time based, Virtual environments, Virtual Reality, Virtual reality experiences, Virtual spaces, VR systems
@article{casas_holojig_2025,
title = {HoloJig: Interactive Spoken Prompt Specified Generative AI Environments},
author = {L. Casas and S. Hannah and K. Mitchell},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001182100&doi=10.1109%2FMCG.2025.3553780&partnerID=40&md5=9fafa25e4b6ddc9d2fe32d813fbabb20},
doi = {10.1109/MCG.2025.3553780},
issn = {02721716 (ISSN); 15581756 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Computer Graphics and Applications},
volume = {45},
number = {2},
pages = {69–77},
abstract = {HoloJig offers an interactive, speech-to-virtual reality (VR), VR experience that generates diverse environments in real time based on live spoken descriptions. Unlike traditional VR systems that rely on prebuilt assets, HoloJig dynamically creates personalized and immersive virtual spaces with depth-based parallax 3-D rendering, allowing users to define the characteristics of their immersive environment through verbal prompts. This generative approach opens up new possibilities for interactive experiences, including simulations, training, collaborative workspaces, and entertainment. In addition to speech-to-VR environment generation, a key innovation of HoloJig is its progressive visual transition mechanism, which smoothly dissolves between previously generated and newly requested environments, mitigating the delay caused by neural computations. This feature ensures a seamless and continuous user experience, even as new scenes are being rendered on remote servers. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: IEEE Computer Society},
keywords = {3-D rendering, Article, Collaborative workspace, customer experience, Economic and social effects, generative artificial intelligence, human, Immersive, Immersive environment, parallax, Real- time, simulation, Simulation training, speech, Time based, Virtual environments, Virtual Reality, Virtual reality experiences, Virtual spaces, VR systems},
pubstate = {published},
tppubtype = {article}
}
Alibrahim, Y.; Ibrahim, M.; Gurdayal, D.; Munshi, M.
AI speechbots and 3D segmentations in virtual reality improve radiology on-call training in resource-limited settings Journal Article
In: Intelligence-Based Medicine, vol. 11, 2025, ISSN: 26665212 (ISSN), (Publisher: Elsevier B.V.).
Abstract | Links | BibTeX | Tags: 3D segmentation, AI speechbots, Article, artificial intelligence chatbot, ChatGPT, computer assisted tomography, Deep learning, headache, human, Image segmentation, interventional radiology, Large language model, Likert scale, nausea, Proof of concept, prospective study, radiology, radiology on call training, resource limited setting, Teaching, Training, ultrasound, Virtual Reality, voice recognition
@article{alibrahim_ai_2025,
title = {AI speechbots and 3D segmentations in virtual reality improve radiology on-call training in resource-limited settings},
author = {Y. Alibrahim and M. Ibrahim and D. Gurdayal and M. Munshi},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105001472313&doi=10.1016%2Fj.ibmed.2025.100245&partnerID=40&md5=981139e173e781b67dba5a46be64de31},
doi = {10.1016/j.ibmed.2025.100245},
issn = {26665212 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Intelligence-Based Medicine},
volume = {11},
abstract = {Objective: Evaluate the use of large-language model (LLM) speechbot tools and deep learning-assisted generation of 3D reconstructions when integrated in a virtual reality (VR) setting to teach radiology on-call topics to radiology residents. Methods: Three first year radiology residents in Guyana were enrolled in an 8-week radiology course that focused on preparation for on-call duties. The course, delivered via VR headsets with custom software integrating LLM-powered speechbots trained on imaging reports and 3D reconstructions segmented with the help of a deep learning model. Each session focused on a specific radiology area, employing a didactic and case-based learning approach, enhanced with 3D reconstructions and an LLM-powered speechbot. Post-session, residents reassessed their knowledge and provided feedback on their VR and LLM-powered speechbot experiences. Results/discussion: Residents found that the 3D reconstructions segmented semi-automatically by deep learning algorithms and AI-driven self-learning via speechbot was highly valuable. The 3D reconstructions, especially in the interventional radiology session, were helpful and the benefit is augmented by VR where navigating the models is seamless and perception of depth is pronounced. Residents also found conversing with the AI-speechbot seamless and was valuable in their post session self-learning. The major drawback of VR was motion sickness, which was mild and improved over time. Conclusion: AI-assisted VR radiology education could be used to develop new and accessible ways of teaching a variety of radiology topics in a seamless and cost-effective way. This could be especially useful in supporting radiology education remotely in regions which lack local radiology expertise. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Elsevier B.V.},
keywords = {3D segmentation, AI speechbots, Article, artificial intelligence chatbot, ChatGPT, computer assisted tomography, Deep learning, headache, human, Image segmentation, interventional radiology, Large language model, Likert scale, nausea, Proof of concept, prospective study, radiology, radiology on call training, resource limited setting, Teaching, Training, ultrasound, Virtual Reality, voice recognition},
pubstate = {published},
tppubtype = {article}
}
Stacchio, L.; Balloni, E.; Frontoni, E.; Paolanti, M.; Zingaretti, P.; Pierdicca, R.
MineVRA: Exploring the Role of Generative AI-Driven Content Development in XR Environments through a Context-Aware Approach Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, vol. 31, no. 5, pp. 3602–3612, 2025, ISSN: 10772626 (ISSN), (Publisher: IEEE Computer Society).
Abstract | Links | BibTeX | Tags: adult, Article, Artificial intelligence, Computer graphics, Computer vision, Content Development, Contents development, Context-Aware, Context-aware approaches, Extended reality, female, Generative adversarial networks, Generative AI, generative artificial intelligence, human, Human-in-the-loop, Immersive, Immersive environment, male, Multi-modal, User need, Virtual environments, Virtual Reality
@article{stacchio_minevra_2025,
title = {MineVRA: Exploring the Role of Generative AI-Driven Content Development in XR Environments through a Context-Aware Approach},
author = {L. Stacchio and E. Balloni and E. Frontoni and M. Paolanti and P. Zingaretti and R. Pierdicca},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003746367&doi=10.1109%2FTVCG.2025.3549160&partnerID=40&md5=3356eb968b3e6a0d3c9b75716b05fac4},
doi = {10.1109/TVCG.2025.3549160},
issn = {10772626 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {31},
number = {5},
pages = {3602–3612},
abstract = {The convergence of Artificial Intelligence (AI), Computer Vision (CV), Computer Graphics (CG), and Extended Reality (XR) is driving innovation in immersive environments. A key challenge in these environments is the creation of personalized 3D assets, traditionally achieved through manual modeling, a time-consuming process that often fails to meet individual user needs. More recently, Generative AI (GenAI) has emerged as a promising solution for automated, context-aware content generation. In this paper, we present MineVRA (Multimodal generative artificial iNtelligence for contExt-aware Virtual Reality Assets), a novel Human-In-The-Loop (HITL) XR framework that integrates GenAI to facilitate coherent and adaptive 3D content generation in immersive scenarios. To evaluate the effectiveness of this approach, we conducted a comparative user study analyzing the performance and user satisfaction of GenAI-generated 3D objects compared to those generated by Sketchfab in different immersive contexts. The results suggest that GenAI can significantly complement traditional 3D asset libraries, with valuable design implications for the development of human-centered XR environments. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: IEEE Computer Society},
keywords = {adult, Article, Artificial intelligence, Computer graphics, Computer vision, Content Development, Contents development, Context-Aware, Context-aware approaches, Extended reality, female, Generative adversarial networks, Generative AI, generative artificial intelligence, human, Human-in-the-loop, Immersive, Immersive environment, male, Multi-modal, User need, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Kim, Y.; Aamir, Z.; Singh, M.; Boorboor, S.; Mueller, K.; Kaufman, A. E.
Explainable XR: Understanding User Behaviors of XR Environments Using LLM-Assisted Analytics Framework Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, vol. 31, no. 5, pp. 2756–2766, 2025, ISSN: 10772626 (ISSN), (Publisher: IEEE Computer Society).
Abstract | Links | BibTeX | Tags: adult, Agnostic, Article, Assistive, Cross Reality, Data Analytics, Data collection, data interpretation, Data recording, Data visualization, Extended reality, human, Language Model, Large language model, large language models, Multi-modal, Multimodal Data Collection, normal human, Personalized assistive technique, Personalized Assistive Techniques, recorder, Spatio-temporal data, therapy, user behavior, User behaviors, Virtual addresses, Virtual environments, Virtual Reality, Visual analytics, Visual languages
@article{kim_explainable_2025,
title = {Explainable XR: Understanding User Behaviors of XR Environments Using LLM-Assisted Analytics Framework},
author = {Y. Kim and Z. Aamir and M. Singh and S. Boorboor and K. Mueller and A. E. Kaufman},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003815583&doi=10.1109%2FTVCG.2025.3549537&partnerID=40&md5=bc5ac38eb19faa224282cf385f43799f},
doi = {10.1109/TVCG.2025.3549537},
issn = {10772626 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {31},
number = {5},
pages = {2756–2766},
abstract = {We present Explainable XR, an end-to-end framework for analyzing user behavior in diverse eXtended Reality (XR) environments by leveraging Large Language Models (LLMs) for data interpretation assistance. Existing XR user analytics frameworks face challenges in handling cross-virtuality - AR, VR, MR - transitions, multi-user collaborative application scenarios, and the complexity of multimodal data. Explainable XR addresses these challenges by providing a virtuality-agnostic solution for the collection, analysis, and visualization of immersive sessions. We propose three main components in our framework: (1) A novel user data recording schema, called User Action Descriptor (UAD), that can capture the users' multimodal actions, along with their intents and the contexts; (2) a platform-agnostic XR session recorder, and (3) a visual analytics interface that offers LLM-assisted insights tailored to the analysts' perspectives, facilitating the exploration and analysis of the recorded XR session data. We demonstrate the versatility of Explainable XR by demonstrating five use-case scenarios, in both individual and collaborative XR applications across virtualities. Our technical evaluation and user studies show that Explainable XR provides a highly usable analytics solution for understanding user actions and delivering multifaceted, actionable insights into user behaviors in immersive environments. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: IEEE Computer Society},
keywords = {adult, Agnostic, Article, Assistive, Cross Reality, Data Analytics, Data collection, data interpretation, Data recording, Data visualization, Extended reality, human, Language Model, Large language model, large language models, Multi-modal, Multimodal Data Collection, normal human, Personalized assistive technique, Personalized Assistive Techniques, recorder, Spatio-temporal data, therapy, user behavior, User behaviors, Virtual addresses, Virtual environments, Virtual Reality, Visual analytics, Visual languages},
pubstate = {published},
tppubtype = {article}
}
Afzal, M. Z.; Ali, S. K. A.; Stricker, D.; Eisert, P.; Hilsmann, A.; Pérez-Marcos, D.; Bianchi, M.; Crottaz-Herbette, S.; Ioris, R.; Mangina, E.; Sanguineti, M.; Salaberria, A.; de Lacalle, O. Lopez; García-Pablos, A.; Cuadros, M.
Next Generation XR Systems - Large Language Models Meet Augmented and Virtual Reality Journal Article
In: IEEE Computer Graphics and Applications, vol. 45, no. 1, pp. 43–55, 2025, ISSN: 02721716 (ISSN); 15581756 (ISSN), (Publisher: IEEE Computer Society).
Abstract | Links | BibTeX | Tags: adult, Article, Augmented and virtual realities, Augmented Reality, Awareness, Context-Aware, human, Information Retrieval, Knowledge model, Knowledge reasoning, Knowledge retrieval, Language Model, Large language model, Mixed reality, neurorehabilitation, Position papers, privacy, Real- time, Reasoning, Situational awareness, Virtual environments, Virtual Reality
@article{afzal_next_2025,
title = {Next Generation XR Systems - Large Language Models Meet Augmented and Virtual Reality},
author = {M. Z. Afzal and S. K. A. Ali and D. Stricker and P. Eisert and A. Hilsmann and D. Pérez-Marcos and M. Bianchi and S. Crottaz-Herbette and R. Ioris and E. Mangina and M. Sanguineti and A. Salaberria and O. Lopez de Lacalle and A. García-Pablos and M. Cuadros},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105003598602&doi=10.1109%2FMCG.2025.3548554&partnerID=40&md5=94e7efe987708afc9f066b906ce232b1},
doi = {10.1109/MCG.2025.3548554},
issn = {02721716 (ISSN); 15581756 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Computer Graphics and Applications},
volume = {45},
number = {1},
pages = {43–55},
abstract = {Extended reality (XR) is evolving rapidly, offering new paradigms for human-computer interaction. This position paper argues that integrating large language models (LLMs) with XR systems represents a fundamental shift toward more intelligent, context-aware, and adaptive mixed-reality experiences. We propose a structured framework built on three key pillars: first, perception and situational awareness, second, knowledge modeling and reasoning, and third, visualization and interaction. We believe leveraging LLMs within XR environments enables enhanced situational awareness, real-time knowledge retrieval, and dynamic user interaction, surpassing traditional XR capabilities. We highlight the potential of this integration in neurorehabilitation, safety training, and architectural design while underscoring ethical considerations, such as privacy, transparency, and inclusivity. This vision aims to spark discussion and drive research toward more intelligent, human-centric XR systems. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: IEEE Computer Society},
keywords = {adult, Article, Augmented and virtual realities, Augmented Reality, Awareness, Context-Aware, human, Information Retrieval, Knowledge model, Knowledge reasoning, Knowledge retrieval, Language Model, Large language model, Mixed reality, neurorehabilitation, Position papers, privacy, Real- time, Reasoning, Situational awareness, Virtual environments, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Salinas, C. S.; Magudia, K.; Sangal, A.; Ren, L.; Segars, W. P.
In-silico CT simulations of deep learning generated heterogeneous phantoms Journal Article
In: Biomedical Physics and Engineering Express, vol. 11, no. 4, 2025, ISSN: 20571976 (ISSN), (Publisher: Institute of Physics).
Abstract | Links | BibTeX | Tags: adult, algorithm, Algorithms, anatomical concepts, anatomical location, anatomical variation, Article, Biological organs, bladder, Bone, bone marrow, CGAN, colon, comparative study, computer assisted tomography, Computer graphics, computer model, Computer Simulation, Computer-Assisted, Computerized tomography, CT organ texture, CT organ textures, CT scanners, CT synthesis, CT-scan, Deep learning, fluorodeoxyglucose f 18, Generative Adversarial Network, Generative AI, histogram, human, human tissue, Humans, III-V semiconductors, image analysis, Image processing, Image segmentation, Image texture, Imaging, imaging phantom, intra-abdominal fat, kidney blood vessel, Learning systems, liver, lung, major clinical study, male, mean absolute error, Medical Imaging, neoplasm, Phantoms, procedures, prostate muscle, radiological parameters, signal noise ratio, Signal to noise ratio, Signal-To-Noise Ratio, simulation, Simulation platform, small intestine, Statistical tests, stomach, structural similarity index, subcutaneous fat, Textures, three dimensional double u net conditional generative adversarial network, Three-Dimensional, three-dimensional imaging, Tomography, Virtual CT scanner, Virtual Reality, Virtual trial, virtual trials, whole body CT, X-Ray Computed, x-ray computed tomography
@article{salinas_-silico_2025,
title = {In-silico CT simulations of deep learning generated heterogeneous phantoms},
author = {C. S. Salinas and K. Magudia and A. Sangal and L. Ren and W. P. Segars},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105010297226&doi=10.1088%2F2057-1976%2Fade9c9&partnerID=40&md5=47f211fd93f80e407dcd7e4c490976c2},
doi = {10.1088/2057-1976/ade9c9},
issn = {20571976 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Biomedical Physics and Engineering Express},
volume = {11},
number = {4},
abstract = {Current virtual imaging phantoms primarily emphasize geometric accuracy of anatomical structures. However, to enhance realism, it is also important to incorporate intra-organ detail. Because biological tissues are heterogeneous in composition, virtual phantoms should reflect this by including realistic intra-organ texture and material variation. We propose training two 3D Double U-Net conditional generative adversarial networks (3D DUC-GAN) to generate sixteen unique textures that encompass organs found within the torso. The model was trained on 378 CT image-segmentation pairs taken from a publicly available dataset with 18 additional pairs reserved for testing. Textured phantoms were generated and imaged using DukeSim, a virtual CT simulation platform. Results showed that the deep learning model was able to synthesize realistic heterogeneous phantoms from a set of homogeneous phantoms. These phantoms were compared with original CT scans and had a mean absolute difference of 46.15 ± 1.06 HU. The structural similarity index (SSIM) and peak signal-to-noise ratio (PSNR) were 0.86 ± 0.004 and 28.62 ± 0.14, respectively. The maximum mean discrepancy between the generated and actual distribution was 0.0016. These metrics marked an improvement of 27%, 5.9%, 6.2%, and 28% respectively, compared to current homogeneous texture methods. The generated phantoms that underwent a virtual CT scan had a closer visual resemblance to the true CT scan compared to the previous method. The resulting heterogeneous phantoms offer a significant step toward more realistic in silico trials, enabling enhanced simulation of imaging procedures with greater fidelity to true anatomical variation. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Institute of Physics},
keywords = {adult, algorithm, Algorithms, anatomical concepts, anatomical location, anatomical variation, Article, Biological organs, bladder, Bone, bone marrow, CGAN, colon, comparative study, computer assisted tomography, Computer graphics, computer model, Computer Simulation, Computer-Assisted, Computerized tomography, CT organ texture, CT organ textures, CT scanners, CT synthesis, CT-scan, Deep learning, fluorodeoxyglucose f 18, Generative Adversarial Network, Generative AI, histogram, human, human tissue, Humans, III-V semiconductors, image analysis, Image processing, Image segmentation, Image texture, Imaging, imaging phantom, intra-abdominal fat, kidney blood vessel, Learning systems, liver, lung, major clinical study, male, mean absolute error, Medical Imaging, neoplasm, Phantoms, procedures, prostate muscle, radiological parameters, signal noise ratio, Signal to noise ratio, Signal-To-Noise Ratio, simulation, Simulation platform, small intestine, Statistical tests, stomach, structural similarity index, subcutaneous fat, Textures, three dimensional double u net conditional generative adversarial network, Three-Dimensional, three-dimensional imaging, Tomography, Virtual CT scanner, Virtual Reality, Virtual trial, virtual trials, whole body CT, X-Ray Computed, x-ray computed tomography},
pubstate = {published},
tppubtype = {article}
}
Tian, Y.; Li, X.; Cheng, Z.; Huang, Y.; Yu, T.
In: Sensors, vol. 25, no. 15, 2025, ISSN: 14248220 (ISSN), (Publisher: Multidisciplinary Digital Publishing Institute (MDPI)).
Abstract | Links | BibTeX | Tags: 3D faces, 3d facial model, 3D facial models, 3D modeling, adaptation, adult, Article, Audience perception evaluation, benchmarking, controlled study, Cross-modal, Face generation, Facial modeling, facies, Feature extraction, feedback, feedback system, female, Geometry, High-fidelity, human, illumination, Immersive media, Lighting, male, movie, Neural radiance field, Neural Radiance Fields, perception, Quality control, Rendering (computer graphics), Semantics, sensor, Three dimensional computer graphics, Virtual production, Virtual Reality
@article{tian_design_2025,
title = {Design of Realistic and Artistically Expressive 3D Facial Models for Film AIGC: A Cross-Modal Framework Integrating Audience Perception Evaluation},
author = {Y. Tian and X. Li and Z. Cheng and Y. Huang and T. Yu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105013137724&doi=10.3390%2Fs25154646&partnerID=40&md5=8508a27b693f0857ce7cb58e97a2705c},
doi = {10.3390/s25154646},
issn = {14248220 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Sensors},
volume = {25},
number = {15},
abstract = {The rise of virtual production has created an urgent need for both efficient and high-fidelity 3D face generation schemes for cinema and immersive media, but existing methods are often limited by lighting–geometry coupling, multi-view dependency, and insufficient artistic quality. To address this, this study proposes a cross-modal 3D face generation framework based on single-view semantic masks. It utilizes Swin Transformer for multi-level feature extraction and combines with NeRF for illumination decoupled rendering. We utilize physical rendering equations to explicitly separate surface reflectance from ambient lighting to achieve robust adaptation to complex lighting variations. In addition, to address geometric errors across illumination scenes, we construct geometric a priori constraint networks by mapping 2D facial features to 3D parameter space as regular terms with the help of semantic masks. On the CelebAMask-HQ dataset, this method achieves a leading score of SSIM = 0.892 (37.6% improvement from baseline) with FID = 40.6. The generated faces excel in symmetry and detail fidelity with realism and aesthetic scores of 8/10 and 7/10, respectively, in a perceptual evaluation with 1000 viewers. By combining physical-level illumination decoupling with semantic geometry a priori, this paper establishes a quantifiable feedback mechanism between objective metrics and human aesthetic evaluation, providing a new paradigm for aesthetic quality assessment of AI-generated content. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Multidisciplinary Digital Publishing Institute (MDPI)},
keywords = {3D faces, 3d facial model, 3D facial models, 3D modeling, adaptation, adult, Article, Audience perception evaluation, benchmarking, controlled study, Cross-modal, Face generation, Facial modeling, facies, Feature extraction, feedback, feedback system, female, Geometry, High-fidelity, human, illumination, Immersive media, Lighting, male, movie, Neural radiance field, Neural Radiance Fields, perception, Quality control, Rendering (computer graphics), Semantics, sensor, Three dimensional computer graphics, Virtual production, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
2024
Hubal, R.
Rethinking some Virtual Human Applications Journal Article
In: Annual Review of CyberTherapy and Telemedicine, vol. 22, pp. 28–33, 2024, ISSN: 15548716 (ISSN), (Publisher: Interactive Media Institute).
Abstract | Links | BibTeX | Tags: Article, Artificial intelligence, character and application fidelity, ChatGPT, Consequential conversations, conversation, Engagement, human, Large language model, Learning, responsibility, responsive virtual humans, social competence, telehealth, Virtual Reality
@article{hubal_rethinking_2024,
title = {Rethinking some Virtual Human Applications},
author = {R. Hubal},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85215435480&partnerID=40&md5=4526a7d54606ef0f1cc6234099eb4aae},
issn = {15548716 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Annual Review of CyberTherapy and Telemedicine},
volume = {22},
pages = {28–33},
abstract = {Increasingly realistic virtual environments incorporating virtual characters have been used to train or assess actual behavior, such as of people at risk, and identify reasons to remediate or intervene. Technology has improved so rapidly that today’s capabilities to create situations to focus training and intervention outshine past efforts. To name just a few current examples, tools like Unreal’s MetaHuman Creator for creating characters, Midjourney for creating environments, OpenAI’s ChatGPT for scripting, and GIFT for tutoring have enormous potential, as these tools promise to reduce simulation costs and increase realism. This paper, in contrast, discusses some movement in the other direction: Recent efforts suggest that increased realism may not always have resulting cost-benefit for training and assessment. Lessons learned and recommendations are presented to guide future developers. © 2025 Elsevier B.V., All rights reserved.},
note = {Publisher: Interactive Media Institute},
keywords = {Article, Artificial intelligence, character and application fidelity, ChatGPT, Consequential conversations, conversation, Engagement, human, Large language model, Learning, responsibility, responsive virtual humans, social competence, telehealth, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Sheehy, L.; Bouchard, S.; Kakkar, A.; El-Hakim, R.; Lhoest, J.; Frank, A.
Development and Initial Testing of an Artificial Intelligence-Based Virtual Reality Companion for People Living with Dementia in Long-Term Care Journal Article
In: Journal of Clinical Medicine, vol. 13, no. 18, 2024, ISSN: 20770383 (ISSN), (Publisher: Multidisciplinary Digital Publishing Institute (MDPI)).
Abstract | Links | BibTeX | Tags: aged, Article, Artificial intelligence, cognitive decline, cognitive impairment, compassion, conversation, Dementia, Elderly, female, human, large language models, long term care, long-term care, major clinical study, male, program acceptability, program feasibility, reaction time, reminiscence, speech discrimination, very elderly, Virtual Reality
@article{sheehy_development_2024,
title = {Development and Initial Testing of an Artificial Intelligence-Based Virtual Reality Companion for People Living with Dementia in Long-Term Care},
author = {L. Sheehy and S. Bouchard and A. Kakkar and R. El-Hakim and J. Lhoest and A. Frank},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85205071099&doi=10.3390%2Fjcm13185574&partnerID=40&md5=7573674f31227aff8a6569b8748b5d08},
doi = {10.3390/jcm13185574},
issn = {20770383 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Journal of Clinical Medicine},
volume = {13},
number = {18},
abstract = {Background/Objectives: Feelings of loneliness are common in people living with dementia (PLWD) in long-term care (LTC). The goals of this study were to describe the development of a novel virtual companion for PLWD living in LTC and assess its feasibility and acceptability. Methods: The computer-generated virtual companion, presented using a head-mounted virtual reality display, was developed in two stages. In Stage 1, the virtual companion asked questions designed to encourage conversation and reminiscence. In Stage 2, more powerful artificial intelligence tools allowed the virtual companion to engage users in nuanced discussions on any topic. PLWD in LTC tested the application at each stage to assess feasibility and acceptability. Results: Ten PLWD living in LTC participated in Stage 1 (4 men and 6 women; average 82 years old) and Stage 2 (2 men and 8 women; average 87 years old). Session lengths ranged from 0:00 to 5:30 min in Stage 1 and 0:00 to 53:50 min in Stage 2. Speech recognition issues and a limited repertoire of questions limited acceptance in Stage 1. Enhanced conversational ability in Stage 2 led to intimate and meaningful conversations with many participants. Many users found the head-mounted display heavy. There were no complaints of simulator sickness. The virtual companion was best suited to PLWD who could engage in reciprocal conversation. After Stage 2, response latency was identified as an opportunity for improvement in future versions. Conclusions: Virtual reality and artificial intelligence can be used to create a virtual companion that is acceptable and enjoyable to some PLWD living in LTC. Ongoing innovations in hardware and software will allow future iterations to provide more natural conversational interaction and an enhanced social experience. © 2024 Elsevier B.V., All rights reserved.},
note = {Publisher: Multidisciplinary Digital Publishing Institute (MDPI)},
keywords = {aged, Article, Artificial intelligence, cognitive decline, cognitive impairment, compassion, conversation, Dementia, Elderly, female, human, large language models, long term care, long-term care, major clinical study, male, program acceptability, program feasibility, reaction time, reminiscence, speech discrimination, very elderly, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
2023
Vlasov, A. V.
GALA Inspired by Klimt's Art: Text-to-image Processing with Implementation in Interaction and Perception Studies: Library and Case Examples Journal Article
In: Annual Review of CyberTherapy and Telemedicine, vol. 21, pp. 200–205, 2023, ISSN: 15548716 (ISSN), (Publisher: Interactive Media Institute).
Abstract | Links | BibTeX | Tags: AIGC, applied research, art library, Article, Artificial intelligence, benchmarking, dataset, GALA, human, Human computer interaction, Image processing, Klimt, library, life satisfaction, neuropoem, Text-to-image, Virtual Reality, Wellbeing
@article{vlasov_gala_2023,
title = {GALA Inspired by Klimt's Art: Text-to-image Processing with Implementation in Interaction and Perception Studies: Library and Case Examples},
author = {A. V. Vlasov},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85182461798&partnerID=40&md5=0c3f5f4214a46db51f46f0092495eb2b},
issn = {15548716 (ISSN)},
year = {2023},
date = {2023-01-01},
journal = {Annual Review of CyberTherapy and Telemedicine},
volume = {21},
pages = {200–205},
abstract = {Objectives: (a) to develop a library with AI generated content (AIGC) based on а combinatorial scheme of prompting for interaction and perception research; (b) to show examples of AIGC implementation. The result is a public library for applied research in the cyber-psychological community (CYPSY). The Generative Art Library Abstractions (GALA) include images (Figures 1-2) based on the text-image model and inspired by the artwork of Gustav Klimt. They can be used for comparative analysis (benchmarking), end-to-end evaluation, and advanced design. This allows experimentation with complex human-computer interaction (HCI) architectures and visual communication systems, and provides creative design support for experimenting. Examples include: interactive perception of positively colored generative images; HCI dialogues using visual language; generated moods in a VR environment; brain-computer interface for HCI. Respectfully, these visualization resources are a valuable example of AIGC for next-generation R&D. Any suggestions from the CYPSY community are welcome. © 2024 Elsevier B.V., All rights reserved.},
note = {Publisher: Interactive Media Institute},
keywords = {AIGC, applied research, art library, Article, Artificial intelligence, benchmarking, dataset, GALA, human, Human computer interaction, Image processing, Klimt, library, life satisfaction, neuropoem, Text-to-image, Virtual Reality, Wellbeing},
pubstate = {published},
tppubtype = {article}
}