AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2022
Augello, Agnese; Infantino, Ignazio; Pilato, Giovanni; Vitale, Gianpaolo
Extending Affective Capabilities for Medical Assistive Robots Journal Article
In: Cognitive Systems Research, vol. 73, pp. 21–25, 2022, ISSN: 13890417.
Abstract | Links | BibTeX | Tags: Anthropomorphic Robots, Assistive Robots, Emotion Detection, Facial Expressions, Human computer interaction, Human Robot Interaction, Humanoid Robots, Natural Language Processing, Robotics, Wellbeing
@article{augelloExtendingAffectiveCapabilities2022,
title = {Extending Affective Capabilities for Medical Assistive Robots},
author = { Agnese Augello and Ignazio Infantino and Giovanni Pilato and Gianpaolo Vitale},
doi = {10.1016/j.cogsys.2021.12.004},
issn = {13890417},
year = {2022},
date = {2022-01-01},
journal = {Cognitive Systems Research},
volume = {73},
pages = {21--25},
abstract = {In this work, we discuss methodologies and implementation choices to enable a humanoid robot to estimate patients' mood and emotions during postoperative home rehabilitation. The approach is modular and it has been implemented into a SoftBank Pepper robotic architecture; however, the approach is general and it can be easily adapted to other robotic platforms. A sample of an interactive session for the detection of the patient's affective state is also reported. textcopyright 2022 Elsevier B.V.},
keywords = {Anthropomorphic Robots, Assistive Robots, Emotion Detection, Facial Expressions, Human computer interaction, Human Robot Interaction, Humanoid Robots, Natural Language Processing, Robotics, Wellbeing},
pubstate = {published},
tppubtype = {article}
}
Gaglio, Giuseppe Fulvio; Augello, Agnese; Caggianese, Giuseppe; Gallo, Luigi
Modellazione 3D di avatar per il Serious Game SMILER Technical Report
no. RT-ICAR-NA-2022-01, 2022.
Abstract | Links | BibTeX | Tags: Healthcare, Human computer interaction, Touchless interaction, Virtual Reality
@techreport{gaglioModellazione3DDi2022,
title = {Modellazione 3D di avatar per il Serious Game SMILER},
author = { Giuseppe Fulvio Gaglio and Agnese Augello and Giuseppe Caggianese and Luigi Gallo},
url = {https://intranet.icar.cnr.it/wp-content/uploads/2022/07/RT-ICAR-NA-2022-01.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
number = {RT-ICAR-NA-2022-01},
abstract = {Il presente documento illustra la progettazione e la realizzazione di un avatar per il serious game previsto nell'ambito del progetto guillemotleft SMILER guillemotright - Serious gaMes as emerging e-health Interventions for young people with neurologicaL or rEspiratory disoRders. Dopo una breve introduzione del progetto, verranno descritte le tecniche e gli strumenti utilizzati per la modellazione 3D dell'avatar.},
keywords = {Healthcare, Human computer interaction, Touchless interaction, Virtual Reality},
pubstate = {published},
tppubtype = {techreport}
}
Gaglio, Giuseppe Fulvio; Augello, Agnese; Caggianese, Giuseppe; Gallo, Luigi
Modellazione 3D di avatar per il Serious Game SMILER Technical Report
ICAR-CNR no. RT-ICAR-NA-2022-01, 2022.
Abstract | Links | BibTeX | Tags: Healthcare, Human computer interaction, Touchless interaction, Virtual Reality
@techreport{gaglio_modellazione_2022,
title = {Modellazione 3D di avatar per il Serious Game SMILER},
author = {Giuseppe Fulvio Gaglio and Agnese Augello and Giuseppe Caggianese and Luigi Gallo},
url = {https://intranet.icar.cnr.it/wp-content/uploads/2022/07/RT-ICAR-NA-2022-01.pdf},
year = {2022},
date = {2022-01-01},
number = {RT-ICAR-NA-2022-01},
institution = {ICAR-CNR},
abstract = {Il presente documento illustra la progettazione e la realizzazione di un avatar per il serious game previsto
nell’ambito del progetto « SMILER » - Serious gaMes as emerging e-health Interventions for young people with
neurologicaL or rEspiratory disoRders. Dopo una breve introduzione del progetto, verranno descritte le tecniche e
gli strumenti utilizzati per la modellazione 3D dell’avatar.},
keywords = {Healthcare, Human computer interaction, Touchless interaction, Virtual Reality},
pubstate = {published},
tppubtype = {techreport}
}
nell’ambito del progetto « SMILER » - Serious gaMes as emerging e-health Interventions for young people with
neurologicaL or rEspiratory disoRders. Dopo una breve introduzione del progetto, verranno descritte le tecniche e
gli strumenti utilizzati per la modellazione 3D dell’avatar.
Augello, Agnese; Infantino, Ignazio; Pilato, Giovanni; Vitale, Gianpaolo
Extending affective capabilities for medical assistive robots Journal Article
In: Cognitive Systems Research, vol. 73, pp. 21–25, 2022, ISSN: 13890417.
Abstract | Links | BibTeX | Tags: Anthropomorphic Robots, Assistive Robots, Emotion Detection, Facial Expressions, Human computer interaction, Human Robot Interaction, Humanoid Robots, Natural Language Processing, Robotics, Wellbeing
@article{augello_extending_2022,
title = {Extending affective capabilities for medical assistive robots},
author = {Agnese Augello and Ignazio Infantino and Giovanni Pilato and Gianpaolo Vitale},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85123046436&doi=10.1016%2fj.cogsys.2021.12.004&partnerID=40&md5=6e76332f7f95333a9ae2e8f11c054622},
doi = {10.1016/j.cogsys.2021.12.004},
issn = {13890417},
year = {2022},
date = {2022-01-01},
journal = {Cognitive Systems Research},
volume = {73},
pages = {21–25},
abstract = {In this work, we discuss methodologies and implementation choices to enable a humanoid robot to estimate patients’ mood and emotions during postoperative home rehabilitation. The approach is modular and it has been implemented into a SoftBank Pepper robotic architecture; however, the approach is general and it can be easily adapted to other robotic platforms. A sample of an interactive session for the detection of the patient's affective state is also reported. © 2022 Elsevier B.V.},
keywords = {Anthropomorphic Robots, Assistive Robots, Emotion Detection, Facial Expressions, Human computer interaction, Human Robot Interaction, Humanoid Robots, Natural Language Processing, Robotics, Wellbeing},
pubstate = {published},
tppubtype = {article}
}
2020
Trifir`o, Irene; Augello, Agnese; Maniscalco, Umberto; Pilato, Giovanni; Vella, Filippo; Meo, Rosa
How Are You? How a Robot Can Learn to Express Its Own Roboceptions Proceedings Article
In: Cristiani, Matteo; Toro, Carlos; Zanni-Merk, Cecilia; Howlett, Robert J.; Jain, Lakhmi C. (Ed.): Procedia Computer Science, pp. 480–489, Elsevier B.V., 2020.
Abstract | Links | BibTeX | Tags: Human computer interaction, Knowledge Representation, Latent Semantic Analysis, Natural Language Processing, Robotics, Semantic Computing, Social Robots
@inproceedings{trifiroHowAreYou2020,
title = {How Are You? How a Robot Can Learn to Express Its Own Roboceptions},
author = { Irene Trifir{`o} and Agnese Augello and Umberto Maniscalco and Giovanni Pilato and Filippo Vella and Rosa Meo},
editor = { Matteo Cristiani and Carlos Toro and Cecilia {Zanni-Merk} and Robert J. Howlett and Lakhmi C. Jain},
doi = {10.1016/j.procs.2020.08.050},
year = {2020},
date = {2020-01-01},
booktitle = {Procedia Computer Science},
volume = {176},
pages = {480--489},
publisher = {Elsevier B.V.},
abstract = {This work is framed on investigating how a robot can learn associations between linguistic elements, such as words or sentences, and its bodily perceptions, that we named ``roboceptions''. We discuss the possibility of defining such a process of an association through the interaction with human beings. By interacting with a user, the robot can learn to ascribe a meaning to its roboceptions to express them in natural language. Such a process could then be used by the robot in a verbal interaction to detect some words recalling the previously experimented roboceptions. In this paper, we discuss a Dual-NMT approach to realize such an association. However, it requires adequate training corpus. For this reason, we consider two different phases towards the realization of the system, and we show the results of the first phase, comparing two approaches: one based on the Latent Semantic Analysis paradigm and one based on the Random Indexing methodology.},
keywords = {Human computer interaction, Knowledge Representation, Latent Semantic Analysis, Natural Language Processing, Robotics, Semantic Computing, Social Robots},
pubstate = {published},
tppubtype = {inproceedings}
}
Trifirò, Irene; Augello, Agnese; Maniscalco, Umberto; Pilato, Giovanni; Vella, Filippo; Meo, Rosa
How are you? How a robot can learn to express its own roboceptions Proceedings Article
In: Cristiani, Matteo; Toro, Carlos; Zanni-Merk, Cecilia; Howlett, Robert J.; Jain, Lakhmi C. (Ed.): Procedia Computer Science, pp. 480–489, Elsevier B.V., 2020.
Abstract | Links | BibTeX | Tags: Human computer interaction, Knowledge Representation, Latent Semantic Analysis, Natural Language Processing, Robotics, Semantic Computing, Social Robots
@inproceedings{trifiro_how_2020,
title = {How are you? How a robot can learn to express its own roboceptions},
author = {Irene Trifirò and Agnese Augello and Umberto Maniscalco and Giovanni Pilato and Filippo Vella and Rosa Meo},
editor = {Matteo Cristiani and Carlos Toro and Cecilia Zanni-Merk and Robert J. Howlett and Lakhmi C. Jain},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85093358258&doi=10.1016%2fj.procs.2020.08.050&partnerID=40&md5=d262d3c7852f492f6a871ed2c4b7e941},
doi = {10.1016/j.procs.2020.08.050},
year = {2020},
date = {2020-01-01},
booktitle = {Procedia Computer Science},
volume = {176},
pages = {480–489},
publisher = {Elsevier B.V.},
abstract = {This work is framed on investigating how a robot can learn associations between linguistic elements, such as words or sentences, and its bodily perceptions, that we named “roboceptions”. We discuss the possibility of defining such a process of an association through the interaction with human beings. By interacting with a user, the robot can learn to ascribe a meaning to its roboceptions to express them in natural language. Such a process could then be used by the robot in a verbal interaction to detect some words recalling the previously experimented roboceptions. In this paper, we discuss a Dual-NMT approach to realize such an association. However, it requires adequate training corpus. For this reason, we consider two different phases towards the realization of the system, and we show the results of the first phase, comparing two approaches: one based on the Latent Semantic Analysis paradigm and one based on the Random Indexing methodology.},
keywords = {Human computer interaction, Knowledge Representation, Latent Semantic Analysis, Natural Language Processing, Robotics, Semantic Computing, Social Robots},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
Caggianese, Giuseppe; Colonnese, Valerio; Gallo, Luigi
Situated Visualization in Augmented Reality: Exploring Information Seeking Strategies Proceedings Article
In: 2019 15th International Conference on Signal-Image Technology Internet-Based Systems (SITIS), pp. 390–395, 2019.
Abstract | Links | BibTeX | Tags: Augmented Reality, Human computer interaction, Task analysis, Visualization
@inproceedings{caggianeseSituatedVisualizationAugmented2019,
title = {Situated Visualization in Augmented Reality: Exploring Information Seeking Strategies},
author = { Giuseppe Caggianese and Valerio Colonnese and Luigi Gallo},
doi = {10.1109/SITIS.2019.00069},
year = {2019},
date = {2019-11-01},
booktitle = {2019 15th International Conference on Signal-Image Technology Internet-Based Systems (SITIS)},
pages = {390--395},
abstract = {In recent years augmented reality applications have been increasingly demonstrating the requirement for an interaction with information related to and directly shown in the surrounding environment. Situated information is visualized in its semantic and spatial context, building up an environment enhanced by an information level that dynamically adapts to the production of the information and to the actions of the user. The exploration and manipulation of this type of data through see-through augmented reality devices still represents a challenging task. The development of specific interaction strategies capable to mitigating the current limitations of augmented reality devices is essential. In this context, our contribution has been to design possible solutions to address some of these challenges allowing a dynamic interaction with situated information. Following the visual "information-seeking mantra" proposed by Shneiderman and introducing some "superpowers" for the users, in this work we present different strategies aimed at obtaining an overview and filtering, and acquiring details of a collection of situated data.},
keywords = {Augmented Reality, Human computer interaction, Task analysis, Visualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Caggianese, Giuseppe; Colonnese, Valerio; Gallo, Luigi
Situated Visualization in Augmented Reality: Exploring Information Seeking Strategies Proceedings Article
In: 2019 15th International Conference on Signal-Image Technology Internet-Based Systems (SITIS), pp. 390–395, 2019.
Abstract | Links | BibTeX | Tags: Augmented Reality, Human computer interaction, Task analysis, Visualization
@inproceedings{caggianese_situated_2019,
title = {Situated Visualization in Augmented Reality: Exploring Information Seeking Strategies},
author = {Giuseppe Caggianese and Valerio Colonnese and Luigi Gallo},
doi = {10.1109/SITIS.2019.00069},
year = {2019},
date = {2019-11-01},
booktitle = {2019 15th International Conference on Signal-Image Technology Internet-Based Systems (SITIS)},
pages = {390–395},
abstract = {In recent years augmented reality applications have been increasingly demonstrating the requirement for an interaction with information related to and directly shown in the surrounding environment. Situated information is visualized in its semantic and spatial context, building up an environment enhanced by an information level that dynamically adapts to the production of the information and to the actions of the user. The exploration and manipulation of this type of data through see-through augmented reality devices still represents a challenging task. The development of specific interaction strategies capable to mitigating the current limitations of augmented reality devices is essential. In this context, our contribution has been to design possible solutions to address some of these challenges allowing a dynamic interaction with situated information. Following the visual "information-seeking mantra" proposed by Shneiderman and introducing some "superpowers" for the users, in this work we present different strategies aimed at obtaining an overview and filtering, and acquiring details of a collection of situated data.},
keywords = {Augmented Reality, Human computer interaction, Task analysis, Visualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Caggianese, Giuseppe; Gallo, Luigi; Neroni, Pietro
The Vive Controllers vs. Leap Motion for Interactions in Virtual Environments: A Comparative Evaluation Proceedings Article
In: Pietro, Giuseppe De; Gallo, Luigi; Howlett, Robert J.; Jain, Lakhmi C.; Vlacic, Ljubo (Ed.): Intelligent Interactive Multimedia Systems and Services, pp. 24–33, Springer International Publishing, Cham, 2019, ISBN: 978-3-319-92231-7.
Abstract | Links | BibTeX | Tags: Head-mounted displays, Human computer interaction, Input devices, User study, Virtual Reality
@inproceedings{caggianeseViveControllersVs2019,
title = {The Vive Controllers vs. Leap Motion for Interactions in Virtual Environments: A Comparative Evaluation},
author = { Giuseppe Caggianese and Luigi Gallo and Pietro Neroni},
editor = { Giuseppe De Pietro and Luigi Gallo and Robert J. Howlett and Lakhmi C. Jain and Ljubo Vlacic},
doi = {10.1007/978-3-319-92231-7_3},
isbn = {978-3-319-92231-7},
year = {2019},
date = {2019-01-01},
booktitle = {Intelligent Interactive Multimedia Systems and Services},
pages = {24--33},
publisher = {Springer International Publishing},
address = {Cham},
series = {Smart Innovation, Systems and Technologies},
abstract = {In recent years, virtual reality technologies have been improving in terms of resolution, convenience and portability, fostering their adoption in real life applications. The Vive Controllers and Leap Motion are two of the most commonly used low-cost input devices for interactions in virtual environments. This paper discusses their differences in terms of interaction design, and presents the results of a user study focusing on manipulation tasks, namely Walking box and blocks, Block tower and Numbered cubes tasks, taking into account both quantitative and qualitative observations. The experimental findings show a general preference for the Vive Controllers, but also highlight that further work is needed to simplify complex tasks.},
keywords = {Head-mounted displays, Human computer interaction, Input devices, User study, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
Caggianese, Giuseppe; Gallo, Luigi; Neroni, Pietro
The Vive Controllers vs. Leap Motion for Interactions in Virtual Environments: A Comparative Evaluation Proceedings Article
In: Pietro, Giuseppe De; Gallo, Luigi; Howlett, Robert J.; Jain, Lakhmi C.; Vlacic, Ljubo (Ed.): Intelligent Interactive Multimedia Systems and Services, pp. 24–33, Springer International Publishing, Cham, 2019, ISBN: 978-3-319-92231-7.
Abstract | Links | BibTeX | Tags: Head-mounted displays, Human computer interaction, Input devices, User study, Virtual Reality
@inproceedings{caggianese_vive_2019,
title = {The Vive Controllers vs. Leap Motion for Interactions in Virtual Environments: A Comparative Evaluation},
author = {Giuseppe Caggianese and Luigi Gallo and Pietro Neroni},
editor = {Giuseppe De Pietro and Luigi Gallo and Robert J. Howlett and Lakhmi C. Jain and Ljubo Vlacic},
doi = {10.1007/978-3-319-92231-7_3},
isbn = {978-3-319-92231-7},
year = {2019},
date = {2019-01-01},
booktitle = {Intelligent Interactive Multimedia Systems and Services},
pages = {24–33},
publisher = {Springer International Publishing},
address = {Cham},
series = {Smart Innovation, Systems and Technologies},
abstract = {In recent years, virtual reality technologies have been improving in terms of resolution, convenience and portability, fostering their adoption in real life applications. The Vive Controllers and Leap Motion are two of the most commonly used low-cost input devices for interactions in virtual environments. This paper discusses their differences in terms of interaction design, and presents the results of a user study focusing on manipulation tasks, namely Walking box and blocks, Block tower and Numbered cubes tasks, taking into account both quantitative and qualitative observations. The experimental findings show a general preference for the Vive Controllers, but also highlight that further work is needed to simplify complex tasks.},
keywords = {Head-mounted displays, Human computer interaction, Input devices, User study, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
2018
Ciampi, Mario; Gallo, Luigi; Pietro, Giuseppe De
MITO: An Advanced Toolkit for Medical Imaging Processing and Visualization Technical Report
no. RT-ICAR-NA-2018-02, 2018.
Abstract | BibTeX | Tags: Healthcare, Human computer interaction, Touchless interaction, Virtual Reality
@techreport{ciampiMITOAdvancedToolkit2018,
title = {MITO: An Advanced Toolkit for Medical Imaging Processing and Visualization},
author = { Mario Ciampi and Luigi Gallo and Giuseppe De Pietro},
year = {2018},
date = {2018-01-01},
number = {RT-ICAR-NA-2018-02},
abstract = {This technical report presents MITO, an open software environment for medical image acquisition, processing, visualization, and navigation. The system is able to interact with PACS servers conforming to the DICOM 3.0 standard in order to retrieve and send radiological image data. Acquired 2D images can be manipulated with MITO for basic operations and advanced processing, like image segmentation, region extraction, image fusion, 3D reconstructions. Advanced 2D and 3D user interfaces allow users to interact with medical images and volumes through various input devices or in a completely touchless way. The high number of downloads of the software system, along with its widespread use in numerous experimental scenarios, show the high extensiveness and performance of the features developed.},
keywords = {Healthcare, Human computer interaction, Touchless interaction, Virtual Reality},
pubstate = {published},
tppubtype = {techreport}
}
Citt`a, Giuseppe; Arnab, Sylvester; Augello, Agnese; Gentile, Manuel; Zielonka, Sebastian Idelsohn; Ifenthaler, Dirk; Infantino, Ignazio; Guardia, Dario La; Manfr`e, Adriano; Allegra, Mario
Move Your Mind: Creative Dancing Humanoids as Support to STEAM Activities Journal Article
In: Smart Innovation, Systems and Technologies, vol. 76, pp. 190–199, 2018, ISSN: 21903018.
Abstract | Links | BibTeX | Tags: Anthropomorphic Robots, Cognitive Architectures, Cognitive Systems, Computational Creativity, Education, Embodied Cognition, Enactivism, Human computer interaction, STEAM
@article{cittaMoveYourMind2018,
title = {Move Your Mind: Creative Dancing Humanoids as Support to STEAM Activities},
author = { Giuseppe Citt{`a} and Sylvester Arnab and Agnese Augello and Manuel Gentile and Sebastian Idelsohn Zielonka and Dirk Ifenthaler and Ignazio Infantino and Dario La Guardia and Adriano Manfr{`e} and Mario Allegra},
editor = { Robert J. Howlett and Luigi Gallo and Giuseppe De Pietro and Lakhmi C. Jain},
doi = {10.1007/978-3-319-59480-4_20},
issn = {21903018},
year = {2018},
date = {2018-01-01},
journal = {Smart Innovation, Systems and Technologies},
volume = {76},
pages = {190--199},
abstract = {Educational activities based on dance can support interest in comprehension of concepts from maths, geometry, physics, bio-mechanics and computational thinking. In this work, we discuss a possible use of a dancing humanoid robot as an innovative technology to support and enhance STEAM learning activities. textcopyright Springer International Publishing AG 2018.},
keywords = {Anthropomorphic Robots, Cognitive Architectures, Cognitive Systems, Computational Creativity, Education, Embodied Cognition, Enactivism, Human computer interaction, STEAM},
pubstate = {published},
tppubtype = {article}
}
Messina, Antonio; Augello, Agnese; Pilato, Giovanni; Rizzo, Riccardo
BioGraphBot: A Conversational Assistant for Bioinformatics Graph Databases Proceedings Article
In: Barolli, L; Enokido, T (Ed.): INNOVATIVE MOBILE AND INTERNET SERVICES IN UBIQUITOUS COMPUTING, IMIS-2017, pp. 135–146, SPRINGER INTERNATIONAL PUBLISHING AG, GEWERBESTRASSE 11, CHAM, CH-6330, SWITZERLAND, 2018, ISBN: 978-3-319-61542-4 978-3-319-61541-7.
Abstract | Links | BibTeX | Tags: Biomedical, Chatbots, Gremlin, Human computer interaction
@inproceedings{messinaBioGraphBotConversationalAssistant2018,
title = {BioGraphBot: A Conversational Assistant for Bioinformatics Graph Databases},
author = { Antonio Messina and Agnese Augello and Giovanni Pilato and Riccardo Rizzo},
editor = { L Barolli and T Enokido},
doi = {10.1007/978-3-319-61542-4_12},
isbn = {978-3-319-61542-4 978-3-319-61541-7},
year = {2018},
date = {2018-01-01},
booktitle = {INNOVATIVE MOBILE AND INTERNET SERVICES IN UBIQUITOUS COMPUTING, IMIS-2017},
volume = {612},
pages = {135--146},
publisher = {SPRINGER INTERNATIONAL PUBLISHING AG},
address = {GEWERBESTRASSE 11, CHAM, CH-6330, SWITZERLAND},
series = {Advances in Intelligent Systems and Computing},
abstract = {Chatbots technology allows to easily add a conversational interface to a large set of applications. In this paper, we show a conversational agent based on ALICE framework aimed at playing the role of interface between human users and a bioinformatics graph database. The system has been embedded into the web frontend of BioGraphDB, a publicly available resource that uses Gremlin as the primary query language. To be successfully exploited, domain experts, such as biologists and bioinformaticians, should also have familiarity with that query language. The use of a chatbot allows translating queries expressed in natural language to queries expressed in Gremlin, simplifying the interaction with BioGraphDB.},
keywords = {Biomedical, Chatbots, Gremlin, Human computer interaction},
pubstate = {published},
tppubtype = {inproceedings}
}
Nuccio, Carlo; Augello, Agnese; Gaglio, Salvatore; Pilato, Giovanni
Interaction Capabilities of a Robotic Receptionist Journal Article
In: Smart Innovation, Systems and Technologies, vol. 76, pp. 171–180, 2018, ISSN: 21903018.
Abstract | Links | BibTeX | Tags: Anthropomorphic Robots, Computational Creativity, Human computer interaction, Human Robot Interaction, Humanoid Robots, Ontologies, Robotics
@article{nuccioInteractionCapabilitiesRobotic2018,
title = {Interaction Capabilities of a Robotic Receptionist},
author = { Carlo Nuccio and Agnese Augello and Salvatore Gaglio and Giovanni Pilato},
editor = { Giuseppe De Pietro and Luigi Gallo and Robert J. Howlett and Lakhmi C. Jain},
doi = {10.1007/978-3-319-59480-4_18},
issn = {21903018},
year = {2018},
date = {2018-01-01},
journal = {Smart Innovation, Systems and Technologies},
volume = {76},
pages = {171--180},
abstract = {A system aimed at facilitating the interaction between a human user and an humanoid robot is presented. The system is suited to answer questions about laboratories activities, people involved, projects, research themes and collaborations among employees. The task is accomplished by the HermiT reasoner invoked by a speech recognition module. The system is capable of navigating a specific ontology making inference on it. The presented system is part of a broader social robot framework whose goal is to give the user a fulfilling social interaction experience, driven by the perception of the robot internal state and involving intuitive and computational creativity capabilities. textcopyright Springer International Publishing AG 2018.},
keywords = {Anthropomorphic Robots, Computational Creativity, Human computer interaction, Human Robot Interaction, Humanoid Robots, Ontologies, Robotics},
pubstate = {published},
tppubtype = {article}
}
Ciampi, Mario; Gallo, Luigi; Pietro, Giuseppe De
MITO: An advanced toolkit for medical imaging processing and visualization Technical Report
ICAR-CNR no. RT-ICAR-NA-2018-02, 2018.
Abstract | Links | BibTeX | Tags: Healthcare, Human computer interaction, Touchless interaction, Virtual Reality
@techreport{ciampi_mito_2018,
title = {MITO: An advanced toolkit for medical imaging processing and visualization},
author = {Mario Ciampi and Luigi Gallo and Giuseppe De Pietro},
url = {https://intranet.icar.cnr.it/wp-content/uploads/2018/07/RT-ICAR-NA-2018-02.pdf},
year = {2018},
date = {2018-01-01},
number = {RT-ICAR-NA-2018-02},
institution = {ICAR-CNR},
abstract = {This technical report presents MITO, an open software environment for medical image acquisition, processing, visualization, and navigation. The system is able to interact with PACS servers conforming to the DICOM 3.0 standard in order to retrieve and send radiological image data. Acquired 2D images can be manipulated with MITO for basic operations and advanced processing, like image segmentation, region extraction, image fusion, 3D reconstructions. Advanced 2D and 3D user interfaces allow users to interact with medical images and volumes through various input devices or in a completely touchless way. The high number of downloads of the software system, along with its widespread use in numerous experimental scenarios, show the high extensiveness and performance of the features developed.},
keywords = {Healthcare, Human computer interaction, Touchless interaction, Virtual Reality},
pubstate = {published},
tppubtype = {techreport}
}
Nuccio, Carlo; Augello, Agnese; Gaglio, Salvatore; Pilato, Giovanni
Interaction capabilities of a robotic receptionist Journal Article
In: Smart Innovation, Systems and Technologies, vol. 76, pp. 171–180, 2018, ISSN: 21903018.
Abstract | Links | BibTeX | Tags: Anthropomorphic Robots, Computational Creativity, Human computer interaction, Human Robot Interaction, Humanoid Robots, Ontologies, Robotics
@article{nuccio_interaction_2018,
title = {Interaction capabilities of a robotic receptionist},
author = {Carlo Nuccio and Agnese Augello and Salvatore Gaglio and Giovanni Pilato},
editor = {Giuseppe De Pietro and Luigi Gallo and Robert J. Howlett and Lakhmi C. Jain},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85020412736&doi=10.1007%2f978-3-319-59480-4_18&partnerID=40&md5=36826af104eeb18f5d3fe6ff7ef1c18f},
doi = {10.1007/978-3-319-59480-4_18},
issn = {21903018},
year = {2018},
date = {2018-01-01},
journal = {Smart Innovation, Systems and Technologies},
volume = {76},
pages = {171–180},
abstract = {A system aimed at facilitating the interaction between a human user and an humanoid robot is presented. The system is suited to answer questions about laboratories activities, people involved, projects, research themes and collaborations among employees. The task is accomplished by the HermiT reasoner invoked by a speech recognition module. The system is capable of navigating a specific ontology making inference on it. The presented system is part of a broader social robot framework whose goal is to give the user a fulfilling social interaction experience, driven by the perception of the robot internal state and involving intuitive and computational creativity capabilities. © Springer International Publishing AG 2018.},
keywords = {Anthropomorphic Robots, Computational Creativity, Human computer interaction, Human Robot Interaction, Humanoid Robots, Ontologies, Robotics},
pubstate = {published},
tppubtype = {article}
}
Città, Giuseppe; Arnab, Sylvester; Augello, Agnese; Gentile, Manuel; Zielonka, Sebastian Idelsohn; Ifenthaler, Dirk; Infantino, Ignazio; Guardia, Dario La; Manfrè, Adriano; Allegra, Mario
Move your mind: Creative dancing humanoids as support to STEAM activities Journal Article
In: Smart Innovation, Systems and Technologies, vol. 76, pp. 190–199, 2018, ISSN: 21903018.
Abstract | Links | BibTeX | Tags: Anthropomorphic Robots, Cognitive Architectures, Cognitive Systems, Computational Creativity, Education, Embodied Cognition, Enactivism, Human computer interaction, STEAM
@article{citta_move_2018,
title = {Move your mind: Creative dancing humanoids as support to STEAM activities},
author = {Giuseppe Città and Sylvester Arnab and Agnese Augello and Manuel Gentile and Sebastian Idelsohn Zielonka and Dirk Ifenthaler and Ignazio Infantino and Dario La Guardia and Adriano Manfrè and Mario Allegra},
editor = {Robert J. Howlett and Luigi Gallo and Giuseppe De Pietro and Lakhmi C. Jain},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85020412823&doi=10.1007%2f978-3-319-59480-4_20&partnerID=40&md5=0dd3a32d07a98214b8c275b30f2ca49f},
doi = {10.1007/978-3-319-59480-4_20},
issn = {21903018},
year = {2018},
date = {2018-01-01},
journal = {Smart Innovation, Systems and Technologies},
volume = {76},
pages = {190–199},
abstract = {Educational activities based on dance can support interest in comprehension of concepts from maths, geometry, physics, bio-mechanics and computational thinking. In this work, we discuss a possible use of a dancing humanoid robot as an innovative technology to support and enhance STEAM learning activities. © Springer International Publishing AG 2018.},
keywords = {Anthropomorphic Robots, Cognitive Architectures, Cognitive Systems, Computational Creativity, Education, Embodied Cognition, Enactivism, Human computer interaction, STEAM},
pubstate = {published},
tppubtype = {article}
}
Messina, Antonio; Augello, Agnese; Pilato, Giovanni; Rizzo, Riccardo
BioGraphBot: A Conversational Assistant for Bioinformatics Graph Databases Proceedings Article
In: Barolli, L; Enokido, T (Ed.): INNOVATIVE MOBILE AND INTERNET SERVICES IN UBIQUITOUS COMPUTING, IMIS-2017, pp. 135–146, SPRINGER INTERNATIONAL PUBLISHING AG, GEWERBESTRASSE 11, CHAM, CH-6330, SWITZERLAND, 2018, ISBN: 978-3-319-61542-4 978-3-319-61541-7.
Abstract | Links | BibTeX | Tags: Biomedical, Chatbots, Gremlin, Human computer interaction
@inproceedings{messina_biographbot_2018,
title = {BioGraphBot: A Conversational Assistant for Bioinformatics Graph Databases},
author = {Antonio Messina and Agnese Augello and Giovanni Pilato and Riccardo Rizzo},
editor = {L Barolli and T Enokido},
doi = {10.1007/978-3-319-61542-4_12},
isbn = {978-3-319-61542-4 978-3-319-61541-7},
year = {2018},
date = {2018-01-01},
booktitle = {INNOVATIVE MOBILE AND INTERNET SERVICES IN UBIQUITOUS COMPUTING, IMIS-2017},
volume = {612},
pages = {135–146},
publisher = {SPRINGER INTERNATIONAL PUBLISHING AG},
address = {GEWERBESTRASSE 11, CHAM, CH-6330, SWITZERLAND},
series = {Advances in Intelligent Systems and Computing},
abstract = {Chatbots technology allows to easily add a conversational interface to a large set of applications. In this paper, we show a conversational agent based on ALICE framework aimed at playing the role of interface between human users and a bioinformatics graph database. The system has been embedded into the web frontend of BioGraphDB, a publicly available resource that uses Gremlin as the primary query language. To be successfully exploited, domain experts, such as biologists and bioinformaticians, should also have familiarity with that query language. The use of a chatbot allows translating queries expressed in natural language to queries expressed in Gremlin, simplifying the interaction with BioGraphDB.},
keywords = {Biomedical, Chatbots, Gremlin, Human computer interaction},
pubstate = {published},
tppubtype = {inproceedings}
}
2017
Gallo, Luigi; Marra, Ivana
Rapporto tecnico contabile (01/06/2015– 30/11/2016) Progetto SmartCARE Technical Report
no. RT-ICAR-NA-2017-03, 2017.
Abstract | BibTeX | Tags: Healthcare, Human computer interaction, Touchless interaction
@techreport{galloRapportoTecnicoContabile2017,
title = {Rapporto tecnico contabile (01/06/2015– 30/11/2016) Progetto SmartCARE},
author = { Luigi Gallo and Ivana Marra},
year = {2017},
date = {2017-01-01},
number = {RT-ICAR-NA-2017-03},
abstract = {Scopo di questo lavoro `e quello di presentare un report sulle attivit`a svolte dal soggetto beneficiario Istituto di Calcolo e Reti ad Alte Prestazioni (ICAR) del Consiglio Nazionale delle Ricerche (CNR) per la rendicontazione tecnico-amministrativa, con breve descrizione delle attivit`a di ricerca svolte e dettaglio dei costi sostenuti nel periodo 01/06/2015- 30/11/2016, nell'ambito del progetto SmartCARE ``Satellite enhanced Multi-channel ehealth Assistance for Remote Tele-rehabilitation and CAREgiving''- Proposal no. ITSLAB/ESA/ARTES/2014-1 - RFQ/ITT no. AO/1-5891/08/NL/US.},
keywords = {Healthcare, Human computer interaction, Touchless interaction},
pubstate = {published},
tppubtype = {techreport}
}
Gentile, Vito; Milazzo, Fabrizio; Sorce, Salvatore; Gentile, Antonio; Augello, Agnese; Pilato, Giovanni
Body Gestures and Spoken Sentences: A Novel Approach for Revealing User's Emotions Proceedings Article
In: Proceedings - IEEE 11th International Conference on Semantic Computing, ICSC 2017, pp. 69–72, Institute of Electrical and Electronics Engineers Inc., 2017, ISBN: 978-1-5090-4896-0.
Abstract | Links | BibTeX | Tags: Emotion Analysis, Emotion Recognition, Facial Expressions, Gestural user interfaces, Human computer interaction, Semantic Computing, Sentiment Analysis
@inproceedings{gentileBodyGesturesSpoken2017,
title = {Body Gestures and Spoken Sentences: A Novel Approach for Revealing User's Emotions},
author = { Vito Gentile and Fabrizio Milazzo and Salvatore Sorce and Antonio Gentile and Agnese Augello and Giovanni Pilato},
doi = {10.1109/ICSC.2017.14},
isbn = {978-1-5090-4896-0},
year = {2017},
date = {2017-01-01},
booktitle = {Proceedings - IEEE 11th International Conference on Semantic Computing, ICSC 2017},
pages = {69--72},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {In the last decade, there has been a growing interest in emotion analysis research, which has been applied in several areas of computer science. Many authors have contributed to the development of emotion recognition algorithms, considering textual or non verbal data as input, such as facial expressions, gestures or, in the case of multi-modal emotion recognition, a combination of them. In this paper, we describe a method to detect emotions from gestures using the skeletal data obtained from Kinect-like devices as input, as well as a textual description of their meaning. The experimental results show that the correlation existing between body movements and spoken user sentence(s) can be used to reveal user's emotions from gestures. textcopyright 2017 IEEE.},
keywords = {Emotion Analysis, Emotion Recognition, Facial Expressions, Gestural user interfaces, Human computer interaction, Semantic Computing, Sentiment Analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
Milazzo, Fabrizio; Augello, Agnese; Pilato, Giovanni; Gentile, Vito; Gentile, Antonio; Sorce, Salvatore
Exploiting Correlation between Body Gestures and Spoken Sentences for Real-Time Emotion Recognition Proceedings Article
In: ACM International Conference Proceeding Series, Association for Computing Machinery, 2017, ISBN: 978-1-4503-5237-6.
Abstract | Links | BibTeX | Tags: Emotion Recognition, Gestural user interfaces, Human computer interaction, Sentiment Analysis
@inproceedings{milazzoExploitingCorrelationBody2017,
title = {Exploiting Correlation between Body Gestures and Spoken Sentences for Real-Time Emotion Recognition},
author = { Fabrizio Milazzo and Agnese Augello and Giovanni Pilato and Vito Gentile and Antonio Gentile and Salvatore Sorce},
doi = {10.1145/3125571.3125590},
isbn = {978-1-4503-5237-6},
year = {2017},
date = {2017-01-01},
booktitle = {ACM International Conference Proceeding Series},
volume = {Part F131371},
publisher = {Association for Computing Machinery},
abstract = {Humans communicate their affective states through different media, both verbal and non-verbal, often used at the same time. The knowledge of the emotional state plays a key role to provide personalized and context-related information and services. This is the main reason why several algorithms have been proposed in the last few years for the automatic emotion recognition. In this work we exploit the correlation between one's affective state and the simultaneous body expressions in terms of speech and gestures. Herewe propose a system for real-Time emotion recognition from gestures. In a first step, the system builds a trusted dataset of association pairs (motion datatextrightarrow emotion pattern), also based on textual information. Such dataset is the ground truth for a further step, where emotion patterns can be extracted from new unclassified gestures. Experimental results demonstrate a good recognition accuracy and real-Time capabilities of the proposed system. textcopyright 2017 Copyright held by the owner/author(s).},
keywords = {Emotion Recognition, Gestural user interfaces, Human computer interaction, Sentiment Analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
Gallo, Luigi; Marra, Ivana
Rapporto tecnico contabile (01/06/2015–30/11/2016) Progetto SmartCARE Technical Report
ICAR-CNR no. RT-ICAR-NA-2017-03, 2017.
Abstract | Links | BibTeX | Tags: Healthcare, Human computer interaction, Touchless interaction
@techreport{gallo_rapporto_2017,
title = {Rapporto tecnico contabile (01/06/2015–30/11/2016) Progetto SmartCARE},
author = {Luigi Gallo and Ivana Marra},
url = {https://intranet.icar.cnr.it/wp-content/uploads/2017/10/RT-ICAR-NA-2017-03.pdf},
year = {2017},
date = {2017-01-01},
number = {RT-ICAR-NA-2017-03},
institution = {ICAR-CNR},
abstract = {Scopo di questo lavoro è quello di presentare un report sulle attività svolte dal soggetto
beneficiario Istituto di Calcolo e Reti ad Alte Prestazioni (ICAR) del Consiglio Nazionale
delle Ricerche (CNR) per la rendicontazione tecnico-amministrativa, con breve descrizione
delle attività di ricerca svolte e dettaglio dei costi sostenuti nel periodo 01/06/2015-
30/11/2016, nell’ambito del progetto SmartCARE “Satellite enhanced Multi-channel
ehealth Assistance for Remote Tele-rehabilitation and CAREgiving”- Proposal no.
ITSLAB/ESA/ARTES/2014-1 - RFQ/ITT no. AO/1-5891/08/NL/US.},
keywords = {Healthcare, Human computer interaction, Touchless interaction},
pubstate = {published},
tppubtype = {techreport}
}
beneficiario Istituto di Calcolo e Reti ad Alte Prestazioni (ICAR) del Consiglio Nazionale
delle Ricerche (CNR) per la rendicontazione tecnico-amministrativa, con breve descrizione
delle attività di ricerca svolte e dettaglio dei costi sostenuti nel periodo 01/06/2015-
30/11/2016, nell’ambito del progetto SmartCARE “Satellite enhanced Multi-channel
ehealth Assistance for Remote Tele-rehabilitation and CAREgiving”- Proposal no.
ITSLAB/ESA/ARTES/2014-1 - RFQ/ITT no. AO/1-5891/08/NL/US.
Gentile, Vito; Milazzo, Fabrizio; Sorce, Salvatore; Gentile, Antonio; Augello, Agnese; Pilato, Giovanni
Body Gestures and Spoken Sentences: A Novel Approach for Revealing User's Emotions Proceedings Article
In: Proceedings - IEEE 11th International Conference on Semantic Computing, ICSC 2017, pp. 69–72, Institute of Electrical and Electronics Engineers Inc., 2017, ISBN: 978-1-5090-4896-0.
Abstract | Links | BibTeX | Tags: Emotion Analysis, Emotion Recognition, Facial Expressions, Gestural user interfaces, Human computer interaction, Semantic Computing, Sentiment Analysis
@inproceedings{gentile_body_2017,
title = {Body Gestures and Spoken Sentences: A Novel Approach for Revealing User's Emotions},
author = {Vito Gentile and Fabrizio Milazzo and Salvatore Sorce and Antonio Gentile and Agnese Augello and Giovanni Pilato},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85018255013&doi=10.1109%2fICSC.2017.14&partnerID=40&md5=23d8bb016146afe5e384b12d84f3fb85},
doi = {10.1109/ICSC.2017.14},
isbn = {978-1-5090-4896-0},
year = {2017},
date = {2017-01-01},
booktitle = {Proceedings - IEEE 11th International Conference on Semantic Computing, ICSC 2017},
pages = {69–72},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {In the last decade, there has been a growing interest in emotion analysis research, which has been applied in several areas of computer science. Many authors have contributed to the development of emotion recognition algorithms, considering textual or non verbal data as input, such as facial expressions, gestures or, in the case of multi-modal emotion recognition, a combination of them. In this paper, we describe a method to detect emotions from gestures using the skeletal data obtained from Kinect-like devices as input, as well as a textual description of their meaning. The experimental results show that the correlation existing between body movements and spoken user sentence(s) can be used to reveal user's emotions from gestures. © 2017 IEEE.},
keywords = {Emotion Analysis, Emotion Recognition, Facial Expressions, Gestural user interfaces, Human computer interaction, Semantic Computing, Sentiment Analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
Milazzo, Fabrizio; Augello, Agnese; Pilato, Giovanni; Gentile, Vito; Gentile, Antonio; Sorce, Salvatore
Exploiting correlation between body gestures and spoken sentences for real-Time emotion recognition Proceedings Article
In: ACM International Conference Proceeding Series, Association for Computing Machinery, 2017, ISBN: 978-1-4503-5237-6.
Abstract | Links | BibTeX | Tags: Emotion Recognition, Gestural user interfaces, Human computer interaction, Sentiment Analysis
@inproceedings{milazzo_exploiting_2017,
title = {Exploiting correlation between body gestures and spoken sentences for real-Time emotion recognition},
author = {Fabrizio Milazzo and Agnese Augello and Giovanni Pilato and Vito Gentile and Antonio Gentile and Salvatore Sorce},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85034630360&doi=10.1145%2f3125571.3125590&partnerID=40&md5=b3d73715f756aded80e6b0a330ace70a},
doi = {10.1145/3125571.3125590},
isbn = {978-1-4503-5237-6},
year = {2017},
date = {2017-01-01},
booktitle = {ACM International Conference Proceeding Series},
volume = {Part F131371},
publisher = {Association for Computing Machinery},
abstract = {Humans communicate their affective states through different media, both verbal and non-verbal, often used at the same time. The knowledge of the emotional state plays a key role to provide personalized and context-related information and services. This is the main reason why several algorithms have been proposed in the last few years for the automatic emotion recognition. In this work we exploit the correlation between one's affective state and the simultaneous body expressions in terms of speech and gestures. Herewe propose a system for real-Time emotion recognition from gestures. In a first step, the system builds a trusted dataset of association pairs (motion data→emotion pattern), also based on textual information. Such dataset is the ground truth for a further step, where emotion patterns can be extracted from new unclassified gestures. Experimental results demonstrate a good recognition accuracy and real-Time capabilities of the proposed system. © 2017 Copyright held by the owner/author(s).},
keywords = {Emotion Recognition, Gestural user interfaces, Human computer interaction, Sentiment Analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
2016
Augello, Agnese; Infantino, Ignazio; Manfré, Adriano; Pilato, Giovanni; Vella, Filippo
Analyzing and Discussing Primary Creative Traits of a Robotic Artist Journal Article
In: Biologically Inspired Cognitive Architectures, vol. 17, pp. 22–31, 2016, ISSN: 2212683X.
Abstract | Links | BibTeX | Tags: Anthropomorphic Robots, Artificial intelligence, Cognitive Architectures, Computational Creativity, Creative Agents, Creative Process, Human computer interaction, Human Robot Interaction, Humanoid Robots, Information Management, Social Robots
@article{augelloAnalyzingDiscussingPrimary2016,
title = {Analyzing and Discussing Primary Creative Traits of a Robotic Artist},
author = { Agnese Augello and Ignazio Infantino and Adriano Manfré and Giovanni Pilato and Filippo Vella},
doi = {10.1016/j.bica.2016.07.006},
issn = {2212683X},
year = {2016},
date = {2016-01-01},
journal = {Biologically Inspired Cognitive Architectures},
volume = {17},
pages = {22--31},
abstract = {We present a robot aimed at producing a collage formed by a mix of photomontage and digital collage. The artwork is created after a visual and verbal interaction with a human user. The proposed system, through a cognitive architecture, allows the robot to manage the three different phases of the real-time artwork process: (i) taking inspiration from information captured during the postural and verbal interaction with the human user and from the analysis of his/her social web items; (ii) performing a creative process to obtain a model of the artwork; (iii) executing the creative collage composition and providing a significant title. The paper explains, primarily, how the creativity traits of the robot are implemented in the proposed architecture: how ideas are generated through an elaboration that is modulated by affective influences; how the personality and the artistic behavior are modeled by learning and guided by external evaluations; the motivation and the confidence evolution as a function of successes or failures. textcopyright 2016 Elsevier B.V. All rights reserved.},
keywords = {Anthropomorphic Robots, Artificial intelligence, Cognitive Architectures, Computational Creativity, Creative Agents, Creative Process, Human computer interaction, Human Robot Interaction, Humanoid Robots, Information Management, Social Robots},
pubstate = {published},
tppubtype = {article}
}