AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2025
Coronado, A.; Carvalho, S. T.; Berretta, L.
See Through My Eyes: Using Multimodal Large Language Model for Describing Rendered Environments to Blind People Proceedings Article
In: IMX - Proc. ACM Int. Conf. Interact. Media Experiences, pp. 451–457, Association for Computing Machinery, Inc, 2025, ISBN: 979-840071391-0 (ISBN).
Abstract | Links | BibTeX | Tags: Accessibility, Behavioral Research, Blind, Blind people, Helmet mounted displays, Human engineering, Human rehabilitation equipment, Interactive computer graphics, Interactive computer systems, Language Model, LLM, Multi-modal, Rendered environment, rendered environments, Spatial cognition, Virtual Reality, Vision aids, Visual impairment, Visual languages, Visually impaired people
@inproceedings{coronado_see_2025,
title = {See Through My Eyes: Using Multimodal Large Language Model for Describing Rendered Environments to Blind People},
author = {A. Coronado and S. T. Carvalho and L. Berretta},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007991842&doi=10.1145%2f3706370.3731641&partnerID=40&md5=2f7cb1535d39d5e59b1f43f773de3272},
doi = {10.1145/3706370.3731641},
isbn = {979-840071391-0 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {IMX - Proc. ACM Int. Conf. Interact. Media Experiences},
pages = {451–457},
publisher = {Association for Computing Machinery, Inc},
abstract = {Extended Reality (XR) is quickly expanding "as the next major technology wave in personal computing". Nevertheless, this expansion and adoption could also exclude certain disabled users, particularly people with visual impairment (VIP). According to the World Health Organization (WHO) in their 2019 publication, there were at least 2.2 billion people with visual impairment, a number that is also estimated to have increased in recent years. Therefore, it is important to include disabled users, especially visually impaired people, in the design of Head-Mounted Displays and Extended Reality environments. Indeed, this objective can be pursued by incorporating Multimodal Large Language Model (MLLM) technology, which can assist visually impaired people. As a case study, this study employs different prompts that result in environment descriptions from an MLLM integrated into a virtual reality (VR) escape room. Therefore, six potential prompts were engineered to generate valuable outputs for visually impaired users inside a VR environment. These outputs were evaluated using the G-Eval, and VIEScore metrics. Even though, the results show that the prompt patterns provided a description that aligns with the user's point of view, it is highly recommended to evaluate these outputs through "expected outputs"from Orientation and Mobility Specialists, and Sighted Guides. Furthermore, the subsequent step in the process is to evaluate these outputs by visually impaired people themselves to identify the most effective prompt pattern. © 2025 Copyright held by the owner/author(s).},
keywords = {Accessibility, Behavioral Research, Blind, Blind people, Helmet mounted displays, Human engineering, Human rehabilitation equipment, Interactive computer graphics, Interactive computer systems, Language Model, LLM, Multi-modal, Rendered environment, rendered environments, Spatial cognition, Virtual Reality, Vision aids, Visual impairment, Visual languages, Visually impaired people},
pubstate = {published},
tppubtype = {inproceedings}
}
Oliveira, E. A. Masasi De; Sousa, R. T.; Bastos, A. A.; Cintra, L. Martins De Freitas; Filho, A. R. G.
Immersive Virtual Museums with Spatially-Aware Retrieval-Augmented Generation Proceedings Article
In: IMX - Proc. ACM Int. Conf. Interact. Media Experiences, pp. 437–440, Association for Computing Machinery, Inc, 2025, ISBN: 979-840071391-0 (ISBN).
Abstract | Links | BibTeX | Tags: Association reactions, Behavioral Research, Generation systems, Geographics, Human computer interaction, Human engineering, Immersive, Information Retrieval, Interactive computer graphics, Language Model, Large language model, large language models, Museums, Retrieval-Augmented Generation, Search engines, Spatially aware, User interfaces, Virtual environments, Virtual museum, Virtual museum., Virtual Reality, Visual Attention, Visual languages
@inproceedings{masasi_de_oliveira_immersive_2025,
title = {Immersive Virtual Museums with Spatially-Aware Retrieval-Augmented Generation},
author = {E. A. Masasi De Oliveira and R. T. Sousa and A. A. Bastos and L. Martins De Freitas Cintra and A. R. G. Filho},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105007979183&doi=10.1145%2f3706370.3731643&partnerID=40&md5=db10b41217dd8a0b0705c3fb4a615666},
doi = {10.1145/3706370.3731643},
isbn = {979-840071391-0 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {IMX - Proc. ACM Int. Conf. Interact. Media Experiences},
pages = {437–440},
publisher = {Association for Computing Machinery, Inc},
abstract = {Virtual Reality has significantly expanded possibilities for immersive museum experiences, overcoming traditional constraints such as space, preservation, and geographic limitations. However, existing virtual museum platforms typically lack dynamic, personalized, and contextually accurate interactions. To address this, we propose Spatially-Aware Retrieval-Augmented Generation (SA-RAG), an innovative framework integrating visual attention tracking with Retrieval-Augmented Generation systems and advanced Large Language Models. By capturing users' visual attention in real time, SA-RAG dynamically retrieves contextually relevant data, enhancing the accuracy, personalization, and depth of user interactions within immersive virtual environments. The system's effectiveness is initially demonstrated through our preliminary tests within a realistic VR museum implemented using Unreal Engine. Although promising, comprehensive human evaluations involving broader user groups are planned for future studies to rigorously validate SA-RAG's effectiveness, educational enrichment potential, and accessibility improvements in virtual museums. The framework also presents opportunities for broader applications in immersive educational and storytelling domains. © 2025 Copyright held by the owner/author(s).},
keywords = {Association reactions, Behavioral Research, Generation systems, Geographics, Human computer interaction, Human engineering, Immersive, Information Retrieval, Interactive computer graphics, Language Model, Large language model, large language models, Museums, Retrieval-Augmented Generation, Search engines, Spatially aware, User interfaces, Virtual environments, Virtual museum, Virtual museum., Virtual Reality, Visual Attention, Visual languages},
pubstate = {published},
tppubtype = {inproceedings}
}
Häfner, P.; Eisenlohr, F.; Karande, A.; Grethler, M.; Mukherjee, A.; Tran, N.
Leveraging Virtual Prototypes for Training Data Collection in LLM-Based Voice User Interface Development for Machines Proceedings Article
In: Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR, pp. 281–285, Institute of Electrical and Electronics Engineers Inc., 2025, ISBN: 979-833152157-8 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Behavioral Research, Data collection, Language Model, Large language model, large language models, Model-based OPC, Training data, User interface development, Virtual environments, Virtual Prototype, Virtual Prototyping, Virtual Reality, Voice User Interface, Voice User Interfaces, Wizard of Oz, Wizard-of-Oz Method
@inproceedings{hafner_leveraging_2025,
title = {Leveraging Virtual Prototypes for Training Data Collection in LLM-Based Voice User Interface Development for Machines},
author = {P. Häfner and F. Eisenlohr and A. Karande and M. Grethler and A. Mukherjee and N. Tran},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105000344182&doi=10.1109%2fAIxVR63409.2025.00054&partnerID=40&md5=05fe014eddba395881575bec5d96ce15},
doi = {10.1109/AIxVR63409.2025.00054},
isbn = {979-833152157-8 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Proc. - IEEE Int. Conf. Artif. Intell. Ext. Virtual Real., AIxVR},
pages = {281–285},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Voice User Interfaces (VUIs) are becoming increasingly valuable in industrial applications, offering hands-free control in complex environments. However, developing and validating VUIs for such applications faces challenges, including limited access to physical prototypes and high testing costs. This paper presents a methodology that utilizes virtual reality (VR) prototypes to collect training data for large language model (LLM)-based VUIs, allowing early-stage voice control development before physical prototypes are accessible. Through an immersive Wizard-of-Oz (WoZ) method, participants interact with a virtual reality representation of a machine, generating realistic, scenario-based conversational data. This combined WoZ and VR approach enables high-quality data collection and iterative model training, offering an effective solution that can be applied across various types of machine. Preliminary findings demonstrate the viability of VR in generating diverse and robust data sets that closely simulate real-world dialogs for voice interactions in industrial settings. © 2025 IEEE.},
keywords = {Artificial intelligence, Behavioral Research, Data collection, Language Model, Large language model, large language models, Model-based OPC, Training data, User interface development, Virtual environments, Virtual Prototype, Virtual Prototyping, Virtual Reality, Voice User Interface, Voice User Interfaces, Wizard of Oz, Wizard-of-Oz Method},
pubstate = {published},
tppubtype = {inproceedings}
}
2024
Chaccour, C.; Saad, W.; Debbah, M.; Poor, H. V.
Joint Sensing, Communication, and AI: A Trifecta for Resilient THz User Experiences Journal Article
In: IEEE Transactions on Wireless Communications, vol. 23, no. 9, pp. 11444–11460, 2024, ISSN: 15361276 (ISSN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, artificial intelligence (AI), Behavioral Research, Channel state information, Computer hardware, Cramer-Rao bounds, Extended reality (XR), Hardware, Joint sensing and communication, Learning systems, machine learning, machine learning (ML), Machine-learning, Multi agent systems, reliability, Resilience, Sensor data fusion, Tera Hertz, Terahertz, terahertz (THz), Terahertz communication, Wireless communications, Wireless sensor networks, X reality
@article{chaccour_joint_2024,
title = {Joint Sensing, Communication, and AI: A Trifecta for Resilient THz User Experiences},
author = {C. Chaccour and W. Saad and M. Debbah and H. V. Poor},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85190170739&doi=10.1109%2fTWC.2024.3382192&partnerID=40&md5=da12c6f31faacaa08118b26e4570843f},
doi = {10.1109/TWC.2024.3382192},
issn = {15361276 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {IEEE Transactions on Wireless Communications},
volume = {23},
number = {9},
pages = {11444–11460},
abstract = {In this paper a novel joint sensing, communication, and artificial intelligence (AI) framework is proposed so as to optimize extended reality (XR) experiences over terahertz (THz) wireless systems. Within this framework, active reconfigurable intelligent surfaces (RISs) are incorporated as pivotal elements, serving as enhanced base stations in the THz band to enhance Line-of-Sight (LoS) communication. The proposed framework consists of three main components. First, a tensor decomposition framework is proposed to extract unique sensing parameters for XR users and their environment by exploiting the THz channel sparsity. Essentially, the THz band's quasi-opticality is exploited and the sensing parameters are extracted from the uplink communication signal, thereby allowing for the use of the same waveform, spectrum, and hardware for both communication and sensing functionalities. Then, the Cramér-Rao lower bound is derived to assess the accuracy of the estimated sensing parameters. Second, a non-autoregressive multi-resolution generative AI framework integrated with an adversarial transformer is proposed to predict missing and future sensing information. The proposed framework offers robust and comprehensive historical sensing information and anticipatory forecasts of future environmental changes, which are generalizable to fluctuations in both known and unforeseen user behaviors and environmental conditions. Third, a multi-agent deep recurrent hysteretic Q-neural network is developed to control the handover policy of RIS subarrays, leveraging the informative nature of sensing information to minimize handover cost, maximize the individual quality of personal experiences (QoPEs), and improve the robustness and resilience of THz links. Simulation results show a high generalizability of the proposed unsupervised generative artificial intelligence (AI) framework to fluctuations in user behavior and velocity, leading to a 61% improvement in instantaneous reliability compared to schemes with known channel state information. © 2002-2012 IEEE.},
keywords = {Artificial intelligence, artificial intelligence (AI), Behavioral Research, Channel state information, Computer hardware, Cramer-Rao bounds, Extended reality (XR), Hardware, Joint sensing and communication, Learning systems, machine learning, machine learning (ML), Machine-learning, Multi agent systems, reliability, Resilience, Sensor data fusion, Tera Hertz, Terahertz, terahertz (THz), Terahertz communication, Wireless communications, Wireless sensor networks, X reality},
pubstate = {published},
tppubtype = {article}
}
Liu, Z.; Zhu, Z.; Zhu, L.; Jiang, E.; Hu, X.; Peppler, K.; Ramani, K.
ClassMeta: Designing Interactive Virtual Classmate to Promote VR Classroom Participation Proceedings Article
In: Conf Hum Fact Comput Syst Proc, Association for Computing Machinery, 2024, ISBN: 979-840070330-0 (ISBN).
Abstract | Links | BibTeX | Tags: 3D Avatars, Behavioral Research, Classroom learning, Collaborative learning, Computational Linguistics, Condition, E-Learning, Human behaviors, Language Model, Large language model, Learning experiences, Learning systems, pedagogical agent, Pedagogical agents, Students, Three dimensional computer graphics, Virtual Reality, VR classroom
@inproceedings{liu_classmeta_2024,
title = {ClassMeta: Designing Interactive Virtual Classmate to Promote VR Classroom Participation},
author = {Z. Liu and Z. Zhu and L. Zhu and E. Jiang and X. Hu and K. Peppler and K. Ramani},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85194868458&doi=10.1145%2f3613904.3642947&partnerID=40&md5=0592b2f977a2ad2e6366c6fa05808a6a},
doi = {10.1145/3613904.3642947},
isbn = {979-840070330-0 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {Conf Hum Fact Comput Syst Proc},
publisher = {Association for Computing Machinery},
abstract = {Peer influence plays a crucial role in promoting classroom participation, where behaviors from active students can contribute to a collective classroom learning experience. However, the presence of these active students depends on several conditions and is not consistently available across all circumstances. Recently, Large Language Models (LLMs) such as GPT have demonstrated the ability to simulate diverse human behaviors convincingly due to their capacity to generate contextually coherent responses based on their role settings. Inspired by this advancement in technology, we designed ClassMeta, a GPT-4 powered agent to help promote classroom participation by playing the role of an active student. These agents, which are embodied as 3D avatars in virtual reality, interact with actual instructors and students with both spoken language and body gestures. We conducted a comparative study to investigate the potential of ClassMeta for improving the overall learning experience of the class. © 2024 Copyright held by the owner/author(s)},
keywords = {3D Avatars, Behavioral Research, Classroom learning, Collaborative learning, Computational Linguistics, Condition, E-Learning, Human behaviors, Language Model, Large language model, Learning experiences, Learning systems, pedagogical agent, Pedagogical agents, Students, Three dimensional computer graphics, Virtual Reality, VR classroom},
pubstate = {published},
tppubtype = {inproceedings}
}
Wu, J.; Gan, W.; Chao, H. -C.; Yu, P. S.
Geospatial Big Data: Survey and Challenges Journal Article
In: IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, vol. 17, pp. 17007–17020, 2024, ISSN: 19391404 (ISSN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, artificial intelligence (AI), Behavioral Research, Big Data, Data challenges, Data Mining, Data surveys, Data visualization, Earth observation data, Environmental management, environmental protection, Geo-spatial, Geo-spatial analysis, Geo-spatial data, Geospatial big data, geospatial big data (GBD), geospatial data, GIS, Green products, Human behaviors, Knowledge graph, Knowledge graphs, satellite, sensor, spatial data, Sustainable development, urban planning
@article{wu_geospatial_2024,
title = {Geospatial Big Data: Survey and Challenges},
author = {J. Wu and W. Gan and H. -C. Chao and P. S. Yu},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85200804056&doi=10.1109%2fJSTARS.2024.3438376&partnerID=40&md5=53ee1c9695b3f2e78d6b565ed47f7585},
doi = {10.1109/JSTARS.2024.3438376},
issn = {19391404 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing},
volume = {17},
pages = {17007–17020},
abstract = {In recent years, geospatial big data (GBD) has obtained attention across various disciplines, categorized into big Earth observation data and big human behavior data. Identifying geospatial patterns from GBD has been a vital research focus in the fields of urban management and environmental sustainability. This article reviews the evolution of GBD mining and its integration with advanced artificial intelligence techniques. GBD consists of data generated by satellites, sensors, mobile devices, and geographical information systems, and we categorize geospatial data based on different perspectives. We outline the process of GBD mining and demonstrate how it can be incorporated into a unified framework. In addition, we explore new technologies, such as large language models, the metaverse, and knowledge graphs, and how they could make GBD even more useful. We also share examples of GBD helping with city management and protecting the environment. Finally, we discuss the real challenges that come up when working with GBD, such as issues with data retrieval and security. Our goal is to give readers a clear view of where GBD mining stands today and where it might go next. © 2024 The Authors. This work is licensed under a Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 License.},
keywords = {Artificial intelligence, artificial intelligence (AI), Behavioral Research, Big Data, Data challenges, Data Mining, Data surveys, Data visualization, Earth observation data, Environmental management, environmental protection, Geo-spatial, Geo-spatial analysis, Geo-spatial data, Geospatial big data, geospatial big data (GBD), geospatial data, GIS, Green products, Human behaviors, Knowledge graph, Knowledge graphs, satellite, sensor, spatial data, Sustainable development, urban planning},
pubstate = {published},
tppubtype = {article}
}
Omirgaliyev, R.; Kenzhe, D.; Mirambekov, S.
Simulating life: the application of generative agents in virtual environments Proceedings Article
In: IEEE AITU: Digit. Gener., Conf. Proc. - AITU, pp. 181–187, Institute of Electrical and Electronics Engineers Inc., 2024, ISBN: 979-835036437-8 (ISBN).
Abstract | Links | BibTeX | Tags: Artificial intelligence, Artificial intelligence agent, Artificial Intelligence Agents, Autonomous agents, Behavioral Research, Behaviour models, Computational Linguistics, Decision making, Dynamics, Dynamics simulation, Economic and social effects, Game Development, Game environment, Language Model, Large language model, large language models, Modeling languages, Social dynamic simulation, Social dynamics, Social Dynamics Simulation, Software design, Virtual Reality, Virtual Societies
@inproceedings{omirgaliyev_simulating_2024,
title = {Simulating life: the application of generative agents in virtual environments},
author = {R. Omirgaliyev and D. Kenzhe and S. Mirambekov},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85199876250&doi=10.1109%2fIEEECONF61558.2024.10585387&partnerID=40&md5=70f8b598d10bec13c39d3506a15534a1},
doi = {10.1109/IEEECONF61558.2024.10585387},
isbn = {979-835036437-8 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE AITU: Digit. Gener., Conf. Proc. - AITU},
pages = {181–187},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {This research explores the innovative integration of Large Language Models (LLMs) in game development, focusing on the autonomous creation, development, and governance of a virtual village by AI agents within a 2D game environment. The core of this study lies in observing and analyzing the interactions and societal development among AI agents, utilizing advanced algorithms for generative behavior modeling and dynamic skill tree learning. These AI agents are endowed with human-like decision-making capabilities, enabled by LLMs, allowing them to engage in complex social interactions and contribute to emergent societal structures within the game. The uniqueness of this project stems from its approach to simulating lifelike social dynamics in a virtual setting, thus addressing a gap in existing research and marking a significant contribution to the interdisciplinary fields of artificial intelligence and game development. By comparing AI-generated societal behaviors with human social interactions, the study delves into the potential of AI to mirror or enhance human social structures, offering a fresh perspective on the capabilities of AI in game development. This research not only aims to push the boundaries of AI applications in game development but also seeks to provide valuable insights into the potential for AI-driven simulations in studying complex social and behavioral dynamics. ©2024 IEEE.},
keywords = {Artificial intelligence, Artificial intelligence agent, Artificial Intelligence Agents, Autonomous agents, Behavioral Research, Behaviour models, Computational Linguistics, Decision making, Dynamics, Dynamics simulation, Economic and social effects, Game Development, Game environment, Language Model, Large language model, large language models, Modeling languages, Social dynamic simulation, Social dynamics, Social Dynamics Simulation, Software design, Virtual Reality, Virtual Societies},
pubstate = {published},
tppubtype = {inproceedings}
}
2022
Wang, A.; Gao, Z.; Lee, L. H.; Braud, T.; Hui, P.
Decentralized, not Dehumanized in the Metaverse: Bringing Utility to NFTs through Multimodal Interaction Proceedings Article
In: ACM Int. Conf. Proc. Ser., pp. 662–667, Association for Computing Machinery, 2022, ISBN: 978-145039390-4 (ISBN).
Abstract | Links | BibTeX | Tags: AI-generated art, Arts computing, Behavioral Research, Computation theory, Continuum mechanics, Decentralised, Human behaviors, Interaction, Multi-modal, multimodal, Multimodal Interaction, NFTs, Non-fungible token, Text-to-image, The metaverse
@inproceedings{wang_decentralized_2022,
title = {Decentralized, not Dehumanized in the Metaverse: Bringing Utility to NFTs through Multimodal Interaction},
author = {A. Wang and Z. Gao and L. H. Lee and T. Braud and P. Hui},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85142799074&doi=10.1145%2f3536221.3558176&partnerID=40&md5=f9dee1e9e60afc71c4533cbdee0b98a7},
doi = {10.1145/3536221.3558176},
isbn = {978-145039390-4 (ISBN)},
year = {2022},
date = {2022-01-01},
booktitle = {ACM Int. Conf. Proc. Ser.},
pages = {662–667},
publisher = {Association for Computing Machinery},
abstract = {User Interaction for NFTs (Non-fungible Tokens) is gaining increasing attention. Although NFTs have been traditionally single-use and monolithic, recent applications aim to connect multimodal interaction with human behavior. This paper reviews the related technological approaches and business practices in NFT art. We highlight that multimodal interaction is a currently under-studied issue in mainstream NFT art, and conjecture that multimodal interaction is a crucial enabler for decentralization in the NFT community. We present a continuum theory and propose a framework combining a bottom-up approach with AI multimodal process. Through this framework, we put forward integrating human behavior data into generative NFT units, as "multimodal interactive NFT."Our work displays the possibilities of NFTs in the art world, beyond the traditional 2D and 3D static content. © 2022 ACM.},
keywords = {AI-generated art, Arts computing, Behavioral Research, Computation theory, Continuum mechanics, Decentralised, Human behaviors, Interaction, Multi-modal, multimodal, Multimodal Interaction, NFTs, Non-fungible token, Text-to-image, The metaverse},
pubstate = {published},
tppubtype = {inproceedings}
}
2018
Dignum, Virginia; Dignum, Frank; Vazquez-Salceda, Javier; Clodic, A.; Gentile, Manuel; Mascarenhas, Samuel; Augello, Agnese
Design for Values for Social Robot Architectures Journal Article
In: Frontiers in Artificial Intelligence and Applications, vol. 311, pp. 43–52, 2018, ISSN: 09226389.
Abstract | Links | BibTeX | Tags: Behavioral Research, Ethics, Human Robot Interaction, Responsible AI, Robotics, Social Practices
@article{dignumDesignValuesSocial2018,
title = {Design for Values for Social Robot Architectures},
author = { Virginia Dignum and Frank Dignum and Javier {Vazquez-Salceda} and A. Clodic and Manuel Gentile and Samuel Mascarenhas and Agnese Augello},
editor = { Loh J. Norskov M. Coeckelbergh M. Seibt J. Funk M.},
doi = {10.3233/978-1-61499-931-7-43},
issn = {09226389},
year = {2018},
date = {2018-01-01},
journal = {Frontiers in Artificial Intelligence and Applications},
volume = {311},
pages = {43--52},
abstract = {The integration of social robots in human societies requires that they are capable to take decisions that may affect the lives of people around them. In order to ensure that these robots will behave according to shared ethical principles, an important shift in the design and development of social robots is needed, one where the main goal is improving ethical transparency rather than technical performance, and placing human values at the core of robot designs. In this abstract, we discuss the concept of ethical decision making and how to achieve trust according to the principles of Autonomy, Responsibility and Transparency (ART). textcopyright 2018 The authors and IOS Press. All rights reserved.},
keywords = {Behavioral Research, Ethics, Human Robot Interaction, Responsible AI, Robotics, Social Practices},
pubstate = {published},
tppubtype = {article}
}
Dignum, Virginia; Dignum, Frank; Vazquez-Salceda, Javier; Clodic, A.; Gentile, Manuel; Mascarenhas, Samuel; Augello, Agnese
Design for values for social robot architectures Journal Article
In: Frontiers in Artificial Intelligence and Applications, vol. 311, pp. 43–52, 2018, ISSN: 09226389.
Abstract | Links | BibTeX | Tags: Behavioral Research, Ethics, Human Robot Interaction, Responsible AI, Robotics, Social Practices
@article{dignum_design_2018,
title = {Design for values for social robot architectures},
author = {Virginia Dignum and Frank Dignum and Javier Vazquez-Salceda and A. Clodic and Manuel Gentile and Samuel Mascarenhas and Agnese Augello},
editor = {Norskov M. Loh J. Funk M.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85058222837&doi=10.3233%2f978-1-61499-931-7-43&partnerID=40&md5=dab1f635e2f9f043c7d1f81a8a2768d8},
doi = {10.3233/978-1-61499-931-7-43},
issn = {09226389},
year = {2018},
date = {2018-01-01},
journal = {Frontiers in Artificial Intelligence and Applications},
volume = {311},
pages = {43–52},
abstract = {The integration of social robots in human societies requires that they are capable to take decisions that may affect the lives of people around them. In order to ensure that these robots will behave according to shared ethical principles, an important shift in the design and development of social robots is needed, one where the main goal is improving ethical transparency rather than technical performance, and placing human values at the core of robot designs. In this abstract, we discuss the concept of ethical decision making and how to achieve trust according to the principles of Autonomy, Responsibility and Transparency (ART). © 2018 The authors and IOS Press. All rights reserved.},
keywords = {Behavioral Research, Ethics, Human Robot Interaction, Responsible AI, Robotics, Social Practices},
pubstate = {published},
tppubtype = {article}
}
2014
Augello, Agnese; Gaglio, Salvatore
Detection of User Activities in Intelligent Environments Journal Article
In: Advances in Intelligent Systems and Computing, vol. 260, pp. 19–32, 2014, ISSN: 21945357.
Abstract | Links | BibTeX | Tags: Ambient intelligence, Behavioral Research, Intelligent Environment, User Behavior Analysis
@article{augelloDetectionUserActivities2014,
title = {Detection of User Activities in Intelligent Environments},
author = { Agnese Augello and Salvatore Gaglio},
doi = {10.1007/978-3-319-03992-3_2},
issn = {21945357},
year = {2014},
date = {2014-01-01},
journal = {Advances in Intelligent Systems and Computing},
volume = {260},
pages = {19--32},
abstract = {Research on Ambient Intelligence (AmI) focuses on the development of smart environments adaptable to the needs and preferences of their inhabitants. For this reason it is important to understand and model user preferences. In this chapter we describe a system to detect user behavior patterns in an intelligent workplace. The system is designed for a workplace equipped in the context of Sensor9k, a project carried out at the Department of Computer Science at the University of Palermo (Italy). textcopyright Springer International Publishing Switzerland 2014.},
keywords = {Ambient intelligence, Behavioral Research, Intelligent Environment, User Behavior Analysis},
pubstate = {published},
tppubtype = {article}
}
Augello, Agnese; Gaglio, Salvatore
Detection of user activities in intelligent environments Journal Article
In: Advances in Intelligent Systems and Computing, vol. 260, pp. 19–32, 2014, ISSN: 21945357.
Abstract | Links | BibTeX | Tags: Ambient intelligence, Behavioral Research, Intelligent Environment, User Behavior Analysis
@article{augello_detection_2014,
title = {Detection of user activities in intelligent environments},
author = {Agnese Augello and Salvatore Gaglio},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84903729976&doi=10.1007%2f978-3-319-03992-3_2&partnerID=40&md5=5280f33f184d7723e4506e1cb87438aa},
doi = {10.1007/978-3-319-03992-3_2},
issn = {21945357},
year = {2014},
date = {2014-01-01},
journal = {Advances in Intelligent Systems and Computing},
volume = {260},
pages = {19–32},
abstract = {Research on Ambient Intelligence (AmI) focuses on the development of smart environments adaptable to the needs and preferences of their inhabitants. For this reason it is important to understand and model user preferences. In this chapter we describe a system to detect user behavior patterns in an intelligent workplace. The system is designed for a workplace equipped in the context of Sensor9k, a project carried out at the Department of Computer Science at the University of Palermo (Italy). © Springer International Publishing Switzerland 2014.},
keywords = {Ambient intelligence, Behavioral Research, Intelligent Environment, User Behavior Analysis},
pubstate = {published},
tppubtype = {article}
}