AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Rasch, J.; Töws, J.; Hirzle, T.; Müller, F.; Schmitz, M.
CreepyCoCreator? Investigating AI Representation Modes for 3D Object Co-Creation in Virtual Reality Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 979-840071394-1 (ISBN).
Abstract | Links | BibTeX | Tags: 3D Creation, 3D modeling, 3D object, Building process, Co-creation, Co-creative system, Co-creative systems, Creative systems, Creatives, Generative AI, Three dimensional computer graphics, User expectations, User Studies, User study, Virtual Reality, Virtualization
@inproceedings{rasch_creepycocreator_2025,
title = {CreepyCoCreator? Investigating AI Representation Modes for 3D Object Co-Creation in Virtual Reality},
author = {J. Rasch and J. Töws and T. Hirzle and F. Müller and M. Schmitz},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005742763&doi=10.1145%2f3706598.3713720&partnerID=40&md5=e6cdcb6cc7249a8836ecc39ae103cd53},
doi = {10.1145/3706598.3713720},
isbn = {979-840071394-1 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Generative AI in Virtual Reality offers the potential for collaborative object-building, yet challenges remain in aligning AI contributions with user expectations. In particular, users often struggle to understand and collaborate with AI when its actions are not transparently represented. This paper thus explores the co-creative object-building process through a Wizard-of-Oz study, focusing on how AI can effectively convey its intent to users during object customization in Virtual Reality. Inspired by human-to-human collaboration, we focus on three representation modes: the presence of an embodied avatar, whether the AI's contributions are visualized immediately or incrementally, and whether the areas modified are highlighted in advance. The findings provide insights into how these factors affect user perception and interaction with object-generating AI tools in Virtual Reality as well as satisfaction and ownership of the created objects. The results offer design implications for co-creative world-building systems, aiming to foster more effective and satisfying collaborations between humans and AI in Virtual Reality. © 2025 Copyright held by the owner/author(s).},
keywords = {3D Creation, 3D modeling, 3D object, Building process, Co-creation, Co-creative system, Co-creative systems, Creative systems, Creatives, Generative AI, Three dimensional computer graphics, User expectations, User Studies, User study, Virtual Reality, Virtualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Gatti, E.; Giunchi, D.; Numan, N.; Steed, A.
Around the Virtual Campfire: Early UX Insights into AI-Generated Stories in VR Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 136–141, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833152157-8 (ISBN).
Abstract | Links | BibTeX | Tags: Generative AI, Images synthesis, Immersive, Interactive Environments, Language Model, Large language model, Storytelling, User input, User study, Users' experiences, Virtual environments, VR
@inproceedings{gatti_around_2025,
title = {Around the Virtual Campfire: Early UX Insights into AI-Generated Stories in VR},
author = {E. Gatti and D. Giunchi and N. Numan and A. Steed},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000263662&doi=10.1109%2fAIxVR63409.2025.00027&partnerID=40&md5=cd804d892d45554e936d0221508b3447},
doi = {10.1109/AIxVR63409.2025.00027},
isbn = {979-833152157-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {136–141},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Virtual Reality (VR) presents an immersive platform for storytelling, allowing narratives to unfold in highly engaging, interactive environments. Leveraging AI capabilities and image synthesis offers new possibilities for creating scalable, generative VR content. In this work, we use an LLM-driven VR storytelling platform to explore how AI-generated visuals and narrative elements impact the user experience in VR storytelling. Previously, we presented AIsop, a system to integrate LLM-generated text and images and TTS audio into a storytelling experience, where the narrative unfolds based on user input. In this paper, we present two user studies focusing on how AI-generated visuals influence narrative perception and the overall VR experience. Our findings highlight the positive impact of AI-generated pictorial content on the storytelling experience, highlighting areas for enhancement and further research in interactive narrative design. © 2025 IEEE.},
keywords = {Generative AI, Images synthesis, Immersive, Interactive Environments, Language Model, Large language model, Storytelling, User input, User study, Users' experiences, Virtual environments, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhao, S.; Huang, Y.; He, X.; Tong, X.; Li, X.; Wu, D.
Reviving Mural Art through Generative AI: A Comparative Study of AI-Generated and Hand-Crafted Recreations Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2025, ISBN: 979-840071394-1 (ISBN).
Abstract | Links | BibTeX | Tags: Biographies, Comparatives studies, Culture heritage, Dunhuang mural, Generative AI, Historic Preservation, Immersive, Interactive platform, Labour-intensive, Large-scales, Mural recreation, User study, Virtual Reality
@inproceedings{zhao_reviving_2025,
title = {Reviving Mural Art through Generative AI: A Comparative Study of AI-Generated and Hand-Crafted Recreations},
author = {S. Zhao and Y. Huang and X. He and X. Tong and X. Li and D. Wu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105005707513&doi=10.1145%2f3706598.3714157&partnerID=40&md5=aab480eed29cdb31cc7720f915857b52},
doi = {10.1145/3706598.3714157},
isbn = {979-840071394-1 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Virtual reality (VR) provides an immersive and interactive platform for presenting ancient murals, enhancing users' understanding and appreciation of these invaluable culture treasures. However, traditional hand-crafted methods for recreating murals in VR are labor-intensive, time-consuming, and require significant expertise, limiting their scalability for large-scale mural scenes. To address these challenges, we propose a comprehensive pipeline that leverages generative AI to automate the mural recreation process. This pipeline is validated by the reconstruction of Foguang Temple scene in Dunhuang Murals. A user study comparing the AI-generated scene with a hand-crafted one reveals no significant differences in presence, authenticity, engagement and enjoyment, and emotion. Additionally, our findings identify areas for improvement in AI-generated recreations, such as enhancing historical fidelity and offering customization. This work paves the way for more scalable, efficient, and accessible methods of revitalizing cultural heritage in VR, offering new opportunities for mural preservation, demonstration, and dissemination using VR. © 2025 Copyright held by the owner/author(s).},
keywords = {Biographies, Comparatives studies, Culture heritage, Dunhuang mural, Generative AI, Historic Preservation, Immersive, Interactive platform, Labour-intensive, Large-scales, Mural recreation, User study, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
2024
Melillo, Antonio; Rachedi, Sarah; Caggianese, Giuseppe; Gallo, Luigi; Maiorano, Patrizia; Gimigliano, Francesca; Lucidi, Fabio; Pietro, Giuseppe De; Guida, Maurizio; Giordano, Antonio; Chirico, Andrea
In: Games for Health Journal, 2024, ISSN: 2161-783X.
Abstract | Links | BibTeX | Tags: Pain, User study, Virtual Reality
@article{melilloSynchronizationVirtualReality2024,
title = {Synchronization of a Virtual Reality Scenario to Uterine Contractions for Labor Pain Management: Development Study and Randomized Controlled Trial},
author = {Antonio Melillo and Sarah Rachedi and Giuseppe Caggianese and Luigi Gallo and Patrizia Maiorano and Francesca Gimigliano and Fabio Lucidi and Giuseppe De Pietro and Maurizio Guida and Antonio Giordano and Andrea Chirico},
url = {https://www.liebertpub.com/doi/full/10.1089/g4h.2023.0202},
doi = {10.1089/g4h.2023.0202},
issn = {2161-783X},
year = {2024},
date = {2024-06-11},
urldate = {2024-06-11},
journal = {Games for Health Journal},
publisher = {Mary Ann Liebert, Inc., publishers},
abstract = {Background: Labor is described as one of the most painful events women can experience through their lives, and labor pain shows unique features and rhythmic fluctuations.Purpose: The present study aims to evaluate virtual reality (VR) analgesic interventions for active labor with biofeedback-based VR technologies synchronized to uterine activity.Materials and Methods: We developed a VR system modeled on uterine contractions by connecting it to cardiotocographic equipment. We conducted a randomized controlled trial on a sample of 74 cases and 80 controls during active labor.Results: Results of the study showed a significant reduction of pain scores compared with both preintervention scores and to control group scores; a significant reduction of anxiety levels both compared with preintervention assessment and to control group and significant reduction in fear of labor experience compared with controls.Conclusion: VR may be considered as an effective nonpharmacological analgesic technique for the treatment of pain and anxiety and fear of childbirth experience during labor. The developed system could improve personalization of care, modulating the multisensory stimulation tailored to labor progression. Further studies are needed to compare the synchronized VR system to uterine activity and unsynchronized VR interventions.},
keywords = {Pain, User study, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Melillo, Antonio; Rachedi, Sarah; Caggianese, Giuseppe; Gallo, Luigi; Maiorano, Patrizia; Gimigliano, Francesca; Lucidi, Fabio; Pietro, Giuseppe De; Guida, Maurizio; Giordano, Antonio; Chirico, Andrea
In: Games for Health Journal, 2024, ISSN: 2161-783X, (Publisher: Mary Ann Liebert, Inc., publishers).
Abstract | Links | BibTeX | Tags: Pain, User study, Virtual Reality
@article{melillo_synchronization_2024,
title = {Synchronization of a Virtual Reality Scenario to Uterine Contractions for Labor Pain Management: Development Study and Randomized Controlled Trial},
author = {Antonio Melillo and Sarah Rachedi and Giuseppe Caggianese and Luigi Gallo and Patrizia Maiorano and Francesca Gimigliano and Fabio Lucidi and Giuseppe De Pietro and Maurizio Guida and Antonio Giordano and Andrea Chirico},
url = {https://www.liebertpub.com/doi/full/10.1089/g4h.2023.0202},
doi = {10.1089/g4h.2023.0202},
issn = {2161-783X},
year = {2024},
date = {2024-06-01},
urldate = {2024-06-19},
journal = {Games for Health Journal},
abstract = {Background: Labor is described as one of the most painful events women can experience through their lives, and labor pain shows unique features and rhythmic fluctuations.Purpose: The present study aims to evaluate virtual reality (VR) analgesic interventions for active labor with biofeedback-based VR technologies synchronized to uterine activity.Materials and Methods: We developed a VR system modeled on uterine contractions by connecting it to cardiotocographic equipment. We conducted a randomized controlled trial on a sample of 74 cases and 80 controls during active labor.Results: Results of the study showed a significant reduction of pain scores compared with both preintervention scores and to control group scores; a significant reduction of anxiety levels both compared with preintervention assessment and to control group and significant reduction in fear of labor experience compared with controls.Conclusion: VR may be considered as an effective nonpharmacological analgesic technique for the treatment of pain and anxiety and fear of childbirth experience during labor. The developed system could improve personalization of care, modulating the multisensory stimulation tailored to labor progression. Further studies are needed to compare the synchronized VR system to uterine activity and unsynchronized VR interventions.},
note = {Publisher: Mary Ann Liebert, Inc., publishers},
keywords = {Pain, User study, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Weng, S. C. -C.
Studying How Prompt-Generated 3D Models Affect the Creation Process of Mixed Reality Applications Proceedings Article
In: U., Eck; M., Sra; J., Stefanucci; M., Sugimoto; M., Tatzgern; I., Williams (Ed.): Proc. - IEEE Int. Symp. Mixed Augment. Real. Adjunct, ISMAR-Adjunct, pp. 654–655, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-833150691-9 (ISBN).
Abstract | Links | BibTeX | Tags: 3D modeling, 3D models, 3d-modeling, Creation process, Generative AI, Mixed reality, Prompt-generated 3d model, Prompt-generated 3D models, Research prototype, Study plans, User study
@inproceedings{weng_studying_2024,
title = {Studying How Prompt-Generated 3D Models Affect the Creation Process of Mixed Reality Applications},
author = {S. C. -C. Weng},
editor = {Eck U. and Sra M. and Stefanucci J. and Sugimoto M. and Tatzgern M. and Williams I.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85214403987&doi=10.1109%2fISMAR-Adjunct64951.2024.00196&partnerID=40&md5=46d553927e96356d73ffc5996fbbdc71},
doi = {10.1109/ISMAR-Adjunct64951.2024.00196},
isbn = {979-833150691-9 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Proc. - IEEE Int. Symp. Mixed Augment. Real. Adjunct, ISMAR-Adjunct},
pages = {654–655},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {In this doctoral consortium, we build upon our previous research prototype, Dream Mesh, a Mixed Reality application that generates models in MR based on user speech prompts. To evaluate the application and answer the questions derived from our pilot research, I propose a future user study plan. This plan aims to investigate how prompt-generated 3D models affect the creation process of Mixed Reality applications. © 2024 IEEE.},
keywords = {3D modeling, 3D models, 3d-modeling, Creation process, Generative AI, Mixed reality, Prompt-generated 3d model, Prompt-generated 3D models, Research prototype, Study plans, User study},
pubstate = {published},
tppubtype = {inproceedings}
}
Haramina, E.; Paladin, M.; Petričušić, Z.; Posarić, F.; Drobnjak, A.; Botički, I.
Learning Algorithms Concepts in a Virtual Reality Escape Room Proceedings Article
In: S., Babic; Z., Car; M., Cicin-Sain; D., Cisic; P., Ergovic; T.G., Grbac; V., Gradisnik; S., Gros; A., Jokic; A., Jovic; D., Jurekovic; T., Katulic; M., Koricic; V., Mornar; J., Petrovic; K., Skala; D., Skvorc; V., Sruk; M., Svaco; E., Tijan; N., Vrcek; B., Vrdoljak (Ed.): ICT Electron. Conv., MIPRO - Proc., pp. 2057–2062, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835038249-5 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Computational complexity, Computer generated three dimensional environment, E-Learning, Education, Escape room, Extended reality, generative artificial intelligence, Learn+, Learning, Learning algorithms, Learning systems, Puzzle, puzzles, user experience, User study, User testing, Users' experiences, Virtual Reality
@inproceedings{haramina_learning_2024,
title = {Learning Algorithms Concepts in a Virtual Reality Escape Room},
author = {E. Haramina and M. Paladin and Z. Petričušić and F. Posarić and A. Drobnjak and I. Botički},
editor = {Babic S. and Car Z. and Cicin-Sain M. and Cisic D. and Ergovic P. and Grbac T.G. and Gradisnik V. and Gros S. and Jokic A. and Jovic A. and Jurekovic D. and Katulic T. and Koricic M. and Mornar V. and Petrovic J. and Skala K. and Skvorc D. and Sruk V. and Svaco M. and Tijan E. and Vrcek N. and Vrdoljak B.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85198221737&doi=10.1109%2fMIPRO60963.2024.10569447&partnerID=40&md5=8a94d92d989d1f0feb84eba890945de8},
doi = {10.1109/MIPRO60963.2024.10569447},
isbn = {979-835038249-5 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {ICT Electron. Conv., MIPRO - Proc.},
pages = {2057–2062},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Although the standard way to learn algorithms is by coding, learning through games is another way to obtain knowledge while having fun. Virtual reality is a computer-generated three-dimensional environment in which the player is fully immersed by having external stimuli mostly blocked out. In the game presented in this paper, players are enhancing their algorithms skills by playing an escape room game. The goal is to complete the room within the designated time by solving puzzles. The puzzles change for every playthrough with the use of generative artificial intelligence to provide every player with a unique experience. There are multiple types of puzzles such as. time complexity, sorting algorithms, searching algorithms, and code execution. The paper presents the results of a study indicating students' preference for learning through gaming as a method of acquiring algorithms knowledge. © 2024 IEEE.},
keywords = {Artificial intelligence, Computational complexity, Computer generated three dimensional environment, E-Learning, Education, Escape room, Extended reality, generative artificial intelligence, Learn+, Learning, Learning algorithms, Learning systems, Puzzle, puzzles, user experience, User study, User testing, Users' experiences, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Liebers, C.; Pfützenreuter, N.; Auda, J.; Gruenefeld, U.; Schneegass, S.
"computer, Generate!" - Investigating User-Controlled Generation of Immersive Virtual Environments Proceedings Article
In: F., Lorig; J., Tucker; A.D., Lindstrom; F., Dignum; P., Murukannaiah; A., Theodorou; P., Yolum (Ed.): Front. Artif. Intell. Appl., pp. 213–227, IOS Press BV, 2024, ISBN: 09226389 (ISSN); 978-164368522-9 (ISBN).
Abstract | Links | BibTeX | Tags: All-at-once, Controllers, Generative AI, Human-controled scene generation, Human-Controlled Scene Generation, Immersive, Immersive Virtual Environments, In-control, Process control, Scene Generation, Three-level, User study, User-centred, Virtual Reality
@inproceedings{liebers_computer_2024,
title = {"computer, Generate!" - Investigating User-Controlled Generation of Immersive Virtual Environments},
author = {C. Liebers and N. Pfützenreuter and J. Auda and U. Gruenefeld and S. Schneegass},
editor = {Lorig F. and Tucker J. and Lindstrom A.D. and Dignum F. and Murukannaiah P. and Theodorou A. and Yolum P.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85198740032&doi=10.3233%2fFAIA240196&partnerID=40&md5=215c47e3c831cbb44e5dc10604cda8af},
doi = {10.3233/FAIA240196},
isbn = {09226389 (ISSN); 978-164368522-9 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Front. Artif. Intell. Appl.},
volume = {386},
pages = {213–227},
publisher = {IOS Press BV},
abstract = {For immersive experiences such as virtual reality, explorable worlds are often fundamental. Generative artificial intelligence looks promising to accelerate the creation of such environments. However, it remains unclear how existing interaction modalities can support user-centered world generation and how users remain in control of the process. Thus, in this paper, we present a virtual reality application to generate virtual environments and compare three common interaction modalities (voice, controller, and hands) in a pre-study (N = 18), revealing a combination of initial voice input and continued controller manipulation as best suitable. We then investigate three levels of process control (all-at-once, creation-before-manipulation, and step-by-step) in a user study (N = 27). Our results show that although all-at-once reduced the number of object manipulations, participants felt more in control when using the step-by-step approach. © 2024 The Authors.},
keywords = {All-at-once, Controllers, Generative AI, Human-controled scene generation, Human-Controlled Scene Generation, Immersive, Immersive Virtual Environments, In-control, Process control, Scene Generation, Three-level, User study, User-centred, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Constantinides, N.; Constantinides, A.; Koukopoulos, D.; Fidas, C.; Belk, M.
CulturAI: Exploring Mixed Reality Art Exhibitions with Large Language Models for Personalized Immersive Experiences Proceedings Article
In: UMAP - Adjun. Proc. ACM Conf. User Model., Adapt. Personal., pp. 102–105, Association for Computing Machinery, Inc, 2024, ISBN: 979-840070466-6 (ISBN).
Abstract | Links | BibTeX | Tags: Computational Linguistics, Immersive, Language Model, Large language model, large language models, Mixed reality, Mixed reality art, Mixed reality technologies, Model-based OPC, User Experience Evaluation, User experience evaluations, User interfaces, User study, Users' experiences
@inproceedings{constantinides_culturai_2024,
title = {CulturAI: Exploring Mixed Reality Art Exhibitions with Large Language Models for Personalized Immersive Experiences},
author = {N. Constantinides and A. Constantinides and D. Koukopoulos and C. Fidas and M. Belk},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85198910809&doi=10.1145%2f3631700.3664874&partnerID=40&md5=952d82629a3fcdc6e2a960dd532b2c09},
doi = {10.1145/3631700.3664874},
isbn = {979-840070466-6 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {UMAP - Adjun. Proc. ACM Conf. User Model., Adapt. Personal.},
pages = {102–105},
publisher = {Association for Computing Machinery, Inc},
abstract = {Mixed Reality (MR) technologies have transformed the way in which we interact and engage with digital content, offering immersive experiences that blend the physical and virtual worlds. Over the past years, there has been increasing interest in employing Artificial Intelligence (AI) technologies to improve user experience and trustworthiness in cultural contexts. However, the integration of Large Language Models (LLMs) into MR applications within the Cultural Heritage (CH) domain is relatively underexplored. In this work, we present an investigation into the integration of LLMs within MR environments, focusing on the context of virtual art exhibitions. We implemented a HoloLens MR application, which enables users to explore artworks while interacting with an LLM through voice. To evaluate the user experience and perceived trustworthiness of individuals engaging with an LLM-based virtual art guide, we adopted a between-subject study design, in which participants were randomly assigned to either the LLM-based version or a control group using conventional interaction methods. The LLM-based version allows users to pose inquiries about the artwork displayed, ranging from details about the creator to information about the artwork's origin and historical significance. This paper presents the technical aspects of integrating LLMs within MR applications and evaluates the user experience and perceived trustworthiness of this approach in enhancing the exploration of virtual art exhibitions. Results of an initial evaluation provide evidence about the positive aspect of integrating LLMs in MR applications. Findings of this work contribute to the advancement of MR technologies for the development of future interactive personalized art experiences. © 2024 Owner/Author.},
keywords = {Computational Linguistics, Immersive, Language Model, Large language model, large language models, Mixed reality, Mixed reality art, Mixed reality technologies, Model-based OPC, User Experience Evaluation, User experience evaluations, User interfaces, User study, Users' experiences},
pubstate = {published},
tppubtype = {inproceedings}
}
2020
Caggianese, Giuseppe; Pietro, Giuseppe De; Esposito, Massimo; Gallo, Luigi; Minutolo, Aniello; Neroni, Pietro
Discovering Leonardo with Artificial Intelligence and Holograms: A User Study Journal Article
In: Pattern Recognition Letters, vol. 131, pp. 361–367, 2020, ISSN: 0167-8655.
Abstract | Links | BibTeX | Tags: Artificial intelligence, Conversational systems, Cultural heritage, Holograms, Touchless interfaces, User study
@article{caggianeseDiscoveringLeonardoArtificial2020,
title = {Discovering Leonardo with Artificial Intelligence and Holograms: A User Study},
author = { Giuseppe Caggianese and Giuseppe De Pietro and Massimo Esposito and Luigi Gallo and Aniello Minutolo and Pietro Neroni},
doi = {10.1016/j.patrec.2020.01.006},
issn = {0167-8655},
year = {2020},
date = {2020-01-01},
journal = {Pattern Recognition Letters},
volume = {131},
pages = {361--367},
abstract = {Cutting-edge visualization and interaction technologies are increasingly used in museum exhibitions, providing novel ways to engage visitors and enhance their cultural experience. Existing applications are commonly built upon a single technology, focusing on visualization, motion or verbal interaction (e.g., high-resolution projections, gesture interfaces, chatbots). This aspect limits their potential, since museums are highly heterogeneous in terms of visitors profiles and interests, requiring multi-channel, customizable interaction modalities. To this aim, this work describes and evaluates an artificial intelligence powered, interactive holographic stand aimed at describing Leonardo Da Vinci's art. This system provides the users with accurate 3D representations of Leonardo's machines, which can be interactively manipulated through a touchless user interface. It is also able to dialog with the users in natural language about Leonardo's art, while keeping the context of conversation and interactions. Furthermore, the results of a large user study, carried out during art and tech exhibitions, are presented and discussed. The goal was to assess how users of different ages and interests perceive, understand and explore cultural objects when holograms and artificial intelligence are used as instruments of knowledge and analysis.},
keywords = {Artificial intelligence, Conversational systems, Cultural heritage, Holograms, Touchless interfaces, User study},
pubstate = {published},
tppubtype = {article}
}
Caggianese, Giuseppe; Capece, Nicola; Erra, Ugo; Gallo, Luigi; Rinaldi, Michele
Freehand-Steering Locomotion Techniques for Immersive Virtual Environments: A Comparative Evaluation Journal Article
In: International Journal of Human– Computer Interaction, vol. 36, no. 18, pp. 1734–1755, 2020.
Abstract | Links | BibTeX | Tags: Locomotion techniques, Natural User Interfaces, User study, Virtual Reality
@article{caggianeseFreehandSteeringLocomotionTechniques2020,
title = {Freehand-Steering Locomotion Techniques for Immersive Virtual Environments: A Comparative Evaluation},
author = { Giuseppe Caggianese and Nicola Capece and Ugo Erra and Luigi Gallo and Michele Rinaldi},
doi = {10.1080/10447318.2020.1785151},
year = {2020},
date = {2020-01-01},
journal = {International Journal of Human– Computer Interaction},
volume = {36},
number = {18},
pages = {1734--1755},
abstract = {Virtual reality has achieved significant popularity in recent years, and allowing users to move freely within an immersive virtual world has become an important factor critical to realize. The user's interactions are generally designed to increase the perceived realism, but the locomotion techniques and how these affect the user's task performance still represent an open issue, much discussed in the literature. In this article, we evaluate the efficiency and effectiveness of, and user preferences relating to, freehand locomotion techniques designed for an immersive virtual environment performed through hand gestures tracked by a sensor placed in the egocentric position and experienced through a head-mounted display. Three freehand locomotion techniques have been implemented and compared with each other, and with a baseline technique based on a controller, through qualitative and quantitative measures. An extensive user study conducted with 60 subjects shows that the proposed methods have a performance comparable to the use of the controller, further revealing the users' preference for decoupling the locomotion in sub-tasks, even if this means renouncing precision and adapting the interaction to the possibilities of the tracker sensor.},
keywords = {Locomotion techniques, Natural User Interfaces, User study, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
Caggianese, Giuseppe; Pietro, Giuseppe De; Esposito, Massimo; Gallo, Luigi; Minutolo, Aniello; Neroni, Pietro
Discovering Leonardo with artificial intelligence and holograms: A user study Journal Article
In: Pattern Recognition Letters, vol. 131, pp. 361–367, 2020, ISSN: 0167-8655.
Abstract | Links | BibTeX | Tags: Artificial intelligence, Conversational systems, Cultural heritage, Holograms, Touchless interfaces, User study
@article{caggianese_discovering_2020,
title = {Discovering Leonardo with artificial intelligence and holograms: A user study},
author = {Giuseppe Caggianese and Giuseppe De Pietro and Massimo Esposito and Luigi Gallo and Aniello Minutolo and Pietro Neroni},
url = {https://www.sciencedirect.com/science/article/pii/S0167865520300039},
doi = {https://doi.org/10.1016/j.patrec.2020.01.006},
issn = {0167-8655},
year = {2020},
date = {2020-01-01},
journal = {Pattern Recognition Letters},
volume = {131},
pages = {361–367},
abstract = {Cutting-edge visualization and interaction technologies are increasingly used in museum exhibitions, providing novel ways to engage visitors and enhance their cultural experience. Existing applications are commonly built upon a single technology, focusing on visualization, motion or verbal interaction (e.g., high-resolution projections, gesture interfaces, chatbots). This aspect limits their potential, since museums are highly heterogeneous in terms of visitors profiles and interests, requiring multi-channel, customizable interaction modalities. To this aim, this work describes and evaluates an artificial intelligence powered, interactive holographic stand aimed at describing Leonardo Da Vinci’s art. This system provides the users with accurate 3D representations of Leonardo’s machines, which can be interactively manipulated through a touchless user interface. It is also able to dialog with the users in natural language about Leonardo’s art, while keeping the context of conversation and interactions. Furthermore, the results of a large user study, carried out during art and tech exhibitions, are presented and discussed. The goal was to assess how users of different ages and interests perceive, understand and explore cultural objects when holograms and artificial intelligence are used as instruments of knowledge and analysis.},
keywords = {Artificial intelligence, Conversational systems, Cultural heritage, Holograms, Touchless interfaces, User study},
pubstate = {published},
tppubtype = {article}
}
Caggianese, Giuseppe; Capece, Nicola; Erra, Ugo; Gallo, Luigi; Rinaldi, Michele
Freehand-Steering Locomotion Techniques for Immersive Virtual Environments: A Comparative Evaluation Journal Article
In: International Journal of Human–Computer Interaction, vol. 36, no. 18, pp. 1734–1755, 2020.
Abstract | Links | BibTeX | Tags: Locomotion techniques, Natural User Interfaces, User study, Virtual Reality
@article{caggianese_freehand-steering_2020,
title = {Freehand-Steering Locomotion Techniques for Immersive Virtual Environments: A Comparative Evaluation},
author = {Giuseppe Caggianese and Nicola Capece and Ugo Erra and Luigi Gallo and Michele Rinaldi},
url = {https://doi.org/10.1080/10447318.2020.1785151},
doi = {10.1080/10447318.2020.1785151},
year = {2020},
date = {2020-01-01},
journal = {International Journal of Human–Computer Interaction},
volume = {36},
number = {18},
pages = {1734–1755},
abstract = {Virtual reality has achieved significant popularity in recent years, and allowing users to move freely within an immersive virtual world has become an important factor critical to realize. The user’s interactions are generally designed to increase the perceived realism, but the locomotion techniques and how these affect the user’s task performance still represent an open issue, much discussed in the literature. In this article, we evaluate the efficiency and effectiveness of, and user preferences relating to, freehand locomotion techniques designed for an immersive virtual environment performed through hand gestures tracked by a sensor placed in the egocentric position and experienced through a head-mounted display. Three freehand locomotion techniques have been implemented and compared with each other, and with a baseline technique based on a controller, through qualitative and quantitative measures. An extensive user study conducted with 60 subjects shows that the proposed methods have a performance comparable to the use of the controller, further revealing the users’ preference for decoupling the locomotion in sub-tasks, even if this means renouncing precision and adapting the interaction to the possibilities of the tracker sensor.},
keywords = {Locomotion techniques, Natural User Interfaces, User study, Virtual Reality},
pubstate = {published},
tppubtype = {article}
}
2019
Caggianese, Giuseppe; Gallo, Luigi; Neroni, Pietro
The Vive Controllers vs. Leap Motion for Interactions in Virtual Environments: A Comparative Evaluation Proceedings Article
In: Pietro, Giuseppe De; Gallo, Luigi; Howlett, Robert J.; Jain, Lakhmi C.; Vlacic, Ljubo (Ed.): Intelligent Interactive Multimedia Systems and Services, pp. 24–33, Springer International Publishing, Cham, 2019, ISBN: 978-3-319-92231-7.
Abstract | Links | BibTeX | Tags: Head-mounted displays, Human computer interaction, Input devices, User study, Virtual Reality
@inproceedings{caggianeseViveControllersVs2019,
title = {The Vive Controllers vs. Leap Motion for Interactions in Virtual Environments: A Comparative Evaluation},
author = { Giuseppe Caggianese and Luigi Gallo and Pietro Neroni},
editor = { Giuseppe De Pietro and Luigi Gallo and Robert J. Howlett and Lakhmi C. Jain and Ljubo Vlacic},
doi = {10.1007/978-3-319-92231-7_3},
isbn = {978-3-319-92231-7},
year = {2019},
date = {2019-01-01},
booktitle = {Intelligent Interactive Multimedia Systems and Services},
pages = {24--33},
publisher = {Springer International Publishing},
address = {Cham},
series = {Smart Innovation, Systems and Technologies},
abstract = {In recent years, virtual reality technologies have been improving in terms of resolution, convenience and portability, fostering their adoption in real life applications. The Vive Controllers and Leap Motion are two of the most commonly used low-cost input devices for interactions in virtual environments. This paper discusses their differences in terms of interaction design, and presents the results of a user study focusing on manipulation tasks, namely Walking box and blocks, Block tower and Numbered cubes tasks, taking into account both quantitative and qualitative observations. The experimental findings show a general preference for the Vive Controllers, but also highlight that further work is needed to simplify complex tasks.},
keywords = {Head-mounted displays, Human computer interaction, Input devices, User study, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Caggianese, Giuseppe; Gallo, Luigi; Neroni, Pietro
The Vive Controllers vs. Leap Motion for Interactions in Virtual Environments: A Comparative Evaluation Proceedings Article
In: Pietro, Giuseppe De; Gallo, Luigi; Howlett, Robert J.; Jain, Lakhmi C.; Vlacic, Ljubo (Ed.): Intelligent Interactive Multimedia Systems and Services, pp. 24–33, Springer International Publishing, Cham, 2019, ISBN: 978-3-319-92231-7.
Abstract | Links | BibTeX | Tags: Head-mounted displays, Human computer interaction, Input devices, User study, Virtual Reality
@inproceedings{caggianese_vive_2019,
title = {The Vive Controllers vs. Leap Motion for Interactions in Virtual Environments: A Comparative Evaluation},
author = {Giuseppe Caggianese and Luigi Gallo and Pietro Neroni},
editor = {Giuseppe De Pietro and Luigi Gallo and Robert J. Howlett and Lakhmi C. Jain and Ljubo Vlacic},
doi = {10.1007/978-3-319-92231-7_3},
isbn = {978-3-319-92231-7},
year = {2019},
date = {2019-01-01},
booktitle = {Intelligent Interactive Multimedia Systems and Services},
pages = {24–33},
publisher = {Springer International Publishing},
address = {Cham},
series = {Smart Innovation, Systems and Technologies},
abstract = {In recent years, virtual reality technologies have been improving in terms of resolution, convenience and portability, fostering their adoption in real life applications. The Vive Controllers and Leap Motion are two of the most commonly used low-cost input devices for interactions in virtual environments. This paper discusses their differences in terms of interaction design, and presents the results of a user study focusing on manipulation tasks, namely Walking box and blocks, Block tower and Numbered cubes tasks, taking into account both quantitative and qualitative observations. The experimental findings show a general preference for the Vive Controllers, but also highlight that further work is needed to simplify complex tasks.},
keywords = {Head-mounted displays, Human computer interaction, Input devices, User study, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
2018
Caggianese, Giuseppe; Gallo, Luigi; Neroni, Pietro
Evaluation of Spatial Interaction Techniques for Virtual Heritage Applications: A Case Study of an Interactive Holographic Projection Journal Article
In: Future Generation Computer Systems, vol. 81, pp. 516–527, 2018, ISSN: 0167-739X.
Abstract | Links | BibTeX | Tags: Cultural heritage, Holograms, Interaction techniques, Museum, Touchless interaction, User study
@article{caggianeseEvaluationSpatialInteraction2018,
title = {Evaluation of Spatial Interaction Techniques for Virtual Heritage Applications: A Case Study of an Interactive Holographic Projection},
author = { Giuseppe Caggianese and Luigi Gallo and Pietro Neroni},
doi = {10.1016/j.future.2017.07.047},
issn = {0167-739X},
year = {2018},
date = {2018-01-01},
journal = {Future Generation Computer Systems},
volume = {81},
pages = {516--527},
abstract = {The increasing use of information and communication technologies (ICT) in museums is providing curators with new opportunities for the display of cultural heritage content, making it possible to merge real and digital works of art in a coherent exhibition space. However, humans learn and perceive by following an interactive process, a fact that is particularly true in relation to the understanding, analysis and interpretation of the cultural heritage. In order to allow visitors to fully exploit the potential of this new hybrid cultural communication, interactivity is essential. This paper analyzes interaction design focusing on a holographic projection system equipped with a gesture-based interface and discussing the results of both quantitative and qualitative user studies aimed at empirically investigating users' preferences in relation to interaction techniques when used in a museum context. The experimental findings suggest the adoption of task-specific patterns in the design of touchless user interfaces for the exploration of digital heritage content.},
keywords = {Cultural heritage, Holograms, Interaction techniques, Museum, Touchless interaction, User study},
pubstate = {published},
tppubtype = {article}
}
Caggianese, Giuseppe; Gallo, Luigi; Neroni, Pietro
Evaluation of spatial interaction techniques for virtual heritage applications: A case study of an interactive holographic projection Journal Article
In: Future Generation Computer Systems, vol. 81, pp. 516–527, 2018, ISSN: 0167-739X.
Abstract | Links | BibTeX | Tags: Cultural heritage, Holograms, Interaction techniques, Museum, Touchless interaction, User study
@article{caggianese_evaluation_2018,
title = {Evaluation of spatial interaction techniques for virtual heritage applications: A case study of an interactive holographic projection},
author = {Giuseppe Caggianese and Luigi Gallo and Pietro Neroni},
url = {https://www.sciencedirect.com/science/article/pii/S0167739X17316047},
doi = {https://doi.org/10.1016/j.future.2017.07.047},
issn = {0167-739X},
year = {2018},
date = {2018-01-01},
journal = {Future Generation Computer Systems},
volume = {81},
pages = {516–527},
abstract = {The increasing use of information and communication technologies (ICT) in museums is providing curators with new opportunities for the display of cultural heritage content, making it possible to merge real and digital works of art in a coherent exhibition space. However, humans learn and perceive by following an interactive process, a fact that is particularly true in relation to the understanding, analysis and interpretation of the cultural heritage. In order to allow visitors to fully exploit the potential of this new hybrid cultural communication, interactivity is essential. This paper analyzes interaction design focusing on a holographic projection system equipped with a gesture-based interface and discussing the results of both quantitative and qualitative user studies aimed at empirically investigating users’ preferences in relation to interaction techniques when used in a museum context. The experimental findings suggest the adoption of task-specific patterns in the design of touchless user interfaces for the exploration of digital heritage content.},
keywords = {Cultural heritage, Holograms, Interaction techniques, Museum, Touchless interaction, User study},
pubstate = {published},
tppubtype = {article}
}
2017
Brancati, Nadia; Caggianese, Giuseppe; Frucci, Maria; Gallo, Luigi; Neroni, Pietro
Experiencing Touchless Interaction with Augmented Content on Wearable Head-Mounted Displays in Cultural Heritage Applications Journal Article
In: Personal and Ubiquitous Computing, vol. 21, no. 2, pp. 203–217, 2017, ISSN: 1617-4909, 1617-4917.
Abstract | Links | BibTeX | Tags: Augmented Reality, Point-and-click interface, RGB-D, Touchless interaction, User study
@article{brancatiExperiencingTouchlessInteraction2017,
title = {Experiencing Touchless Interaction with Augmented Content on Wearable Head-Mounted Displays in Cultural Heritage Applications},
author = { Nadia Brancati and Giuseppe Caggianese and Maria Frucci and Luigi Gallo and Pietro Neroni},
doi = {10.1007/s00779-016-0987-8},
issn = {1617-4909, 1617-4917},
year = {2017},
date = {2017-01-01},
urldate = {2016-12-06},
journal = {Personal and Ubiquitous Computing},
volume = {21},
number = {2},
pages = {203--217},
abstract = {The cultural heritage could benefit significantly from the integration of wearable augmented reality (AR). This technology has the potential to guide the user and provide her with both in-depth information, without distracting her from the context, and a natural interaction, which can further allow her to explore and navigate her way through a huge amount of cultural information. The integration of touchless interaction and augmented reality is particularly challenging. On the technical side, the human-machine interface has to be reliable so as to guide users across the real world, which is composed of cluttered backgrounds and severe changes in illumination conditions. On the user experience side, the interface has to provide precise interaction tools while minimizing the perceived task difficulty. In this study, an interactive wearable AR system to augment the environment with cultural information is described. To confer robustness to the interface, a strategy that takes advantage of both depth and color data to find the most reliable information on each single frame is introduced. Moreover, the results of an ISO 9241-9 user study performed in both indoor and outdoor conditions are presented and discussed. The experimental results show that, by using both depth and color data, the interface can behave consistently in different indoor and outdoor scenarios. Furthermore, the results show that the presence of a virtual pointer in the augmented visualization significantly reduces the users error rate in selection tasks.},
keywords = {Augmented Reality, Point-and-click interface, RGB-D, Touchless interaction, User study},
pubstate = {published},
tppubtype = {article}
}
Brancati, Nadia; Caggianese, Giuseppe; Frucci, Maria; Gallo, Luigi; Neroni, Pietro
Experiencing touchless interaction with augmented content on wearable head-mounted displays in cultural heritage applications Journal Article
In: Personal and Ubiquitous Computing, vol. 21, no. 2, pp. 203–217, 2017, ISSN: 1617-4909, 1617-4917.
Abstract | Links | BibTeX | Tags: Augmented Reality, Point-and-click interface, RGB-D, Touchless interaction, User study
@article{brancati_experiencing_2017,
title = {Experiencing touchless interaction with augmented content on wearable head-mounted displays in cultural heritage applications},
author = {Nadia Brancati and Giuseppe Caggianese and Maria Frucci and Luigi Gallo and Pietro Neroni},
url = {http://link.springer.com/10.1007/s00779-016-0987-8},
doi = {10.1007/s00779-016-0987-8},
issn = {1617-4909, 1617-4917},
year = {2017},
date = {2017-01-01},
urldate = {2016-12-06},
journal = {Personal and Ubiquitous Computing},
volume = {21},
number = {2},
pages = {203–217},
abstract = {The cultural heritage could benefit significantly from the integration of wearable augmented reality (AR). This technology has the potential to guide the user and provide her with both in-depth information, without distracting her from the context, and a natural interaction, which can further allow her to explore and navigate her way through a huge amount of cultural information. The integration of touchless interaction and augmented reality is particularly challenging. On the technical side, the human-machine interface has to be reliable so as to guide users across the real world, which is composed of cluttered backgrounds and severe changes in illumination conditions. On the user experience side, the interface has to provide precise interaction tools while minimizing the perceived task difficulty. In this study, an interactive wearable AR system to augment the environment with cultural information is described. To confer robustness to the interface, a strategy that takes advantage of both depth and color data to find the most reliable information on each single frame is introduced. Moreover, the results of an ISO 9241-9 user study performed in both indoor and outdoor conditions are presented and discussed. The experimental results show that, by using both depth and color data, the interface can behave consistently in different indoor and outdoor scenarios. Furthermore, the results show that the presence of a virtual pointer in the augmented visualization significantly reduces the users error rate in selection tasks.},
keywords = {Augmented Reality, Point-and-click interface, RGB-D, Touchless interaction, User study},
pubstate = {published},
tppubtype = {article}
}
2015
Brancati, Nadia; Caggianese, Giuseppe; Pietro, Giuseppe De; Frucci, Maria; Gallo, Luigi; Neroni, Pietro
Usability Evaluation of a Wearable Augmented Reality System for the Enjoyment of the Cultural Heritage Proceedings Article
In: 2015 The 11th International Conference on Signal-Image Technology and Internet-Based Systems (SITIS), pp. 768–774, IEEE, Bangkok, Thailand, 2015, ISBN: 978-1-4673-9721-6.
Abstract | Links | BibTeX | Tags: Augmented Reality, Cultural heritage, Touchless interaction, User study
@inproceedings{brancatiUsabilityEvaluationWearable2015,
title = {Usability Evaluation of a Wearable Augmented Reality System for the Enjoyment of the Cultural Heritage},
author = { Nadia Brancati and Giuseppe Caggianese and Giuseppe De Pietro and Maria Frucci and Luigi Gallo and Pietro Neroni},
doi = {10.1109/SITIS.2015.98},
isbn = {978-1-4673-9721-6},
year = {2015},
date = {2015-11-01},
urldate = {2016-12-06},
booktitle = {2015 The 11th International Conference on Signal-Image Technology and Internet-Based Systems (SITIS)},
pages = {768--774},
publisher = {IEEE},
address = {Bangkok, Thailand},
abstract = {The recent availability of low cost wearable augmented reality (WAR) technologies is leveraging the design of applications in the cultural heritage domain in order to support users in their emotional journey among the cultural artefacts and monuments of a city. In this paper, we describe a user study evaluating the usability of a wearable augmented reality touchless interface for the enjoyment of the cultural heritage in outdoor environments. The usability evaluation has been carried out in out-of-lab settings with inexperienced users, during a three day exhibition in the city of Naples. The presented results are related to the ease of use and learning of the system, and to the user's satisfaction in the enjoyment of the system.},
keywords = {Augmented Reality, Cultural heritage, Touchless interaction, User study},
pubstate = {published},
tppubtype = {inproceedings}
}
Brancati, Nadia; Caggianese, Giuseppe; Pietro, Giuseppe De; Frucci, Maria; Gallo, Luigi; Neroni, Pietro
Usability Evaluation of a Wearable Augmented Reality System for the Enjoyment of the Cultural Heritage Proceedings Article
In: 2015 The 11th International Conference on Signal-Image Technology and Internet-Based Systems (SITIS), pp. 768–774, IEEE, Bangkok, Thailand, 2015, ISBN: 978-1-4673-9721-6.
Abstract | Links | BibTeX | Tags: Augmented Reality, Cultural heritage, Touchless interaction, User study
@inproceedings{brancati_usability_2015,
title = {Usability Evaluation of a Wearable Augmented Reality System for the Enjoyment of the Cultural Heritage},
author = {Nadia Brancati and Giuseppe Caggianese and Giuseppe De Pietro and Maria Frucci and Luigi Gallo and Pietro Neroni},
url = {http://ieeexplore.ieee.org/document/7400650/},
doi = {10.1109/SITIS.2015.98},
isbn = {978-1-4673-9721-6},
year = {2015},
date = {2015-11-01},
urldate = {2016-12-06},
booktitle = {2015 The 11th International Conference on Signal-Image Technology and Internet-Based Systems (SITIS)},
pages = {768–774},
publisher = {IEEE},
address = {Bangkok, Thailand},
abstract = {The recent availability of low cost wearable augmented reality (WAR) technologies is leveraging the design of applications in the cultural heritage domain in order to support users in their emotional journey among the cultural artefacts and monuments of a city. In this paper, we describe a user study evaluating the usability of a wearable augmented reality touchless interface for the enjoyment of the cultural heritage in outdoor environments. The usability evaluation has been carried out in out-of-lab settings with inexperienced users, during a three day exhibition in the city of Naples. The presented results are related to the ease of use and learning of the system, and to the user's satisfaction in the enjoyment of the system.},
keywords = {Augmented Reality, Cultural heritage, Touchless interaction, User study},
pubstate = {published},
tppubtype = {inproceedings}
}
2013
Gallo, Luigi
A Study on the Degrees of Freedom in Touchless Interaction Proceedings Article
In: SA '13 SIGGRAPH Asia 2013 Technical Briefs, pp. 28, ACM, Hong Kong, Hong Kong, 2013, ISBN: 978-1-4503-2629-2.
Abstract | Links | BibTeX | Tags: 3D interaction, DOF, Touchless interaction, User study
@inproceedings{galloStudyDegreesFreedom2013,
title = {A Study on the Degrees of Freedom in Touchless Interaction},
author = { Luigi Gallo},
doi = {10.1145/2542355.2542390},
isbn = {978-1-4503-2629-2},
year = {2013},
date = {2013-11-01},
booktitle = {SA '13 SIGGRAPH Asia 2013 Technical Briefs},
pages = {28},
publisher = {ACM},
address = {Hong Kong, Hong Kong},
abstract = {During the last few years, we have been witnessing a widespread adoption of touchless technologies in the context of surgical procedures. Touchless interfaces are advantageous in that they can preserve sterility around the patient, allowing surgeons to visualize medical images without having to physically touch any control or to rely on a proxy. Such interfaces have been tailored to interact with 2D medical images but not with 3D reconstructions of anatomical data, since such an interaction requires at least three degrees of freedom. In this paper, we discuss the results of a user study in which a mouse-based interface has been compared with two Kinect-based touchless interfaces which allow users to interact with 3D data with up to nine degrees of freedom. The experimental results show that there is a significant relation between the number of degrees of freedom simultaneously controlled by the user and the number of degrees of freedom required to perform, in a touchless way, an accurate manipulation task.},
keywords = {3D interaction, DOF, Touchless interaction, User study},
pubstate = {published},
tppubtype = {inproceedings}
}
Gallo, Luigi
A study on the degrees of freedom in touchless interaction Proceedings Article
In: SA '13 SIGGRAPH Asia 2013 Technical Briefs, pp. 28, ACM, Hong Kong, Hong Kong, 2013, ISBN: 978-1-4503-2629-2.
Abstract | Links | BibTeX | Tags: 3D interaction, DOF, Touchless interaction, User study
@inproceedings{gallo_study_2013,
title = {A study on the degrees of freedom in touchless interaction},
author = {Luigi Gallo},
doi = {10.1145/2542355.2542390},
isbn = {978-1-4503-2629-2},
year = {2013},
date = {2013-11-01},
booktitle = {SA '13 SIGGRAPH Asia 2013 Technical Briefs},
pages = {28},
publisher = {ACM},
address = {Hong Kong, Hong Kong},
abstract = {During the last few years, we have been witnessing a widespread adoption of touchless technologies in the context of surgical procedures. Touchless interfaces are advantageous in that they can preserve sterility around the patient, allowing surgeons to visualize medical images without having to physically touch any control or to rely on a proxy. Such interfaces have been tailored to interact with 2D medical images but not with 3D reconstructions of anatomical data, since such an interaction requires at least three degrees of freedom. In this paper, we discuss the results of a user study in which a mouse-based interface has been compared with two Kinect-based touchless interfaces which allow users to interact with 3D data with up to nine degrees of freedom. The experimental results show that there is a significant relation between the number of degrees of freedom simultaneously controlled by the user and the number of degrees of freedom required to perform, in a touchless way, an accurate manipulation task.},
keywords = {3D interaction, DOF, Touchless interaction, User study},
pubstate = {published},
tppubtype = {inproceedings}
}
2012
Gallo, Luigi; Minutolo, Aniello
Design and Comparative Evaluation of Smoothed Pointing: A Velocity-oriented Remote Pointing Enhancement Technique Journal Article
In: International Journal of Human-Computer Studies, vol. 70, no. 4, pp. 287–300, 2012, ISSN: 1071-5819.
Abstract | Links | BibTeX | Tags: C-D ratio, Ray pointing, Smoothed Pointing, User study, Wiimote
@article{galloDesignComparativeEvaluation2012,
title = {Design and Comparative Evaluation of Smoothed Pointing: A Velocity-oriented Remote Pointing Enhancement Technique},
author = { Luigi Gallo and Aniello Minutolo},
doi = {10.1016/j.ijhcs.2011.12.001},
issn = {1071-5819},
year = {2012},
date = {2012-04-01},
journal = {International Journal of Human-Computer Studies},
volume = {70},
number = {4},
pages = {287--300},
abstract = {The increasing use of remote pointing devices in various application domains is fostering the adoption of pointing enhancement techniques which are aimed at counterbalancing the shortcomings of desk-free interaction. This paper describes the strengths and weaknesses of existing methods for ray pointing facilitation, and presents a refinement of Smoothed Pointing, an auto-calibrating velocity-oriented precision enhancing technique. Furthermore, the paper discusses the results of a user study aimed at empirically investigating how velocity-oriented approaches perform in target acquisition and in trajectory-based interaction tasks, considering both laser-style and image-plane pointing modalities. The experiments, carried out in a low precision scenario in which a Wiimote was used both as a wand and a tracking system, show that Smoothed Pointing allows a significant decrease in the error rate and achieves the highest values of throughput in trajectory-based tasks. The results also indicate that the effectiveness of precision enhancing techniques is significantly affected by the pointing modality and the type of pointing task.},
keywords = {C-D ratio, Ray pointing, Smoothed Pointing, User study, Wiimote},
pubstate = {published},
tppubtype = {article}
}
Gallo, Luigi; Minutolo, Aniello
Design and Comparative Evaluation of Smoothed Pointing: a Velocity-oriented Remote Pointing Enhancement Technique Journal Article
In: International Journal of Human-Computer Studies, vol. 70, no. 4, pp. 287–300, 2012, ISSN: 1071-5819.
Abstract | Links | BibTeX | Tags: C-D ratio, Ray pointing, Smoothed Pointing, User study, Wiimote
@article{gallo_design_2012,
title = {Design and Comparative Evaluation of Smoothed Pointing: a Velocity-oriented Remote Pointing Enhancement Technique},
author = {Luigi Gallo and Aniello Minutolo},
doi = {10.1016/j.ijhcs.2011.12.001},
issn = {1071-5819},
year = {2012},
date = {2012-04-01},
journal = {International Journal of Human-Computer Studies},
volume = {70},
number = {4},
pages = {287–300},
abstract = {The increasing use of remote pointing devices in various application domains is fostering the adoption of pointing enhancement techniques which are aimed at counterbalancing the shortcomings of desk-free interaction. This paper describes the strengths and weaknesses of existing methods for ray pointing facilitation, and presents a refinement of Smoothed Pointing, an auto-calibrating velocity-oriented precision enhancing technique. Furthermore, the paper discusses the results of a user study aimed at empirically investigating how velocity-oriented approaches perform in target acquisition and in trajectory-based interaction tasks, considering both laser-style and image-plane pointing modalities. The experiments, carried out in a low precision scenario in which a Wiimote was used both as a wand and a tracking system, show that Smoothed Pointing allows a significant decrease in the error rate and achieves the highest values of throughput in trajectory-based tasks. The results also indicate that the effectiveness of precision enhancing techniques is significantly affected by the pointing modality and the type of pointing task.},
keywords = {C-D ratio, Ray pointing, Smoothed Pointing, User study, Wiimote},
pubstate = {published},
tppubtype = {article}
}