AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2022
Augello, Agnese; Infantino, Ignazio; Pilato, Giovanni; Vitale, Gianpaolo
Extending Affective Capabilities for Medical Assistive Robots Journal Article
In: Cognitive Systems Research, vol. 73, pp. 21–25, 2022, ISSN: 13890417.
Abstract | Links | BibTeX | Tags: Anthropomorphic Robots, Assistive Robots, Emotion Detection, Facial Expressions, Human computer interaction, Human Robot Interaction, Humanoid Robots, Natural Language Processing, Robotics, Wellbeing
@article{augelloExtendingAffectiveCapabilities2022,
title = {Extending Affective Capabilities for Medical Assistive Robots},
author = { Agnese Augello and Ignazio Infantino and Giovanni Pilato and Gianpaolo Vitale},
doi = {10.1016/j.cogsys.2021.12.004},
issn = {13890417},
year = {2022},
date = {2022-01-01},
journal = {Cognitive Systems Research},
volume = {73},
pages = {21--25},
abstract = {In this work, we discuss methodologies and implementation choices to enable a humanoid robot to estimate patients' mood and emotions during postoperative home rehabilitation. The approach is modular and it has been implemented into a SoftBank Pepper robotic architecture; however, the approach is general and it can be easily adapted to other robotic platforms. A sample of an interactive session for the detection of the patient's affective state is also reported. textcopyright 2022 Elsevier B.V.},
keywords = {Anthropomorphic Robots, Assistive Robots, Emotion Detection, Facial Expressions, Human computer interaction, Human Robot Interaction, Humanoid Robots, Natural Language Processing, Robotics, Wellbeing},
pubstate = {published},
tppubtype = {article}
}
Gaglio, Giuseppe Fulvio; Augello, Agnese; Caggianese, Giuseppe; Gallo, Luigi
Modellazione 3D di avatar per il Serious Game SMILER Technical Report
no. RT-ICAR-NA-2022-01, 2022.
Abstract | Links | BibTeX | Tags: Healthcare, Human computer interaction, Touchless interaction, Virtual Reality
@techreport{gaglioModellazione3DDi2022,
title = {Modellazione 3D di avatar per il Serious Game SMILER},
author = { Giuseppe Fulvio Gaglio and Agnese Augello and Giuseppe Caggianese and Luigi Gallo},
url = {https://intranet.icar.cnr.it/wp-content/uploads/2022/07/RT-ICAR-NA-2022-01.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
number = {RT-ICAR-NA-2022-01},
abstract = {Il presente documento illustra la progettazione e la realizzazione di un avatar per il serious game previsto nell'ambito del progetto guillemotleft SMILER guillemotright - Serious gaMes as emerging e-health Interventions for young people with neurologicaL or rEspiratory disoRders. Dopo una breve introduzione del progetto, verranno descritte le tecniche e gli strumenti utilizzati per la modellazione 3D dell'avatar.},
keywords = {Healthcare, Human computer interaction, Touchless interaction, Virtual Reality},
pubstate = {published},
tppubtype = {techreport}
}
2020
Trifir`o, Irene; Augello, Agnese; Maniscalco, Umberto; Pilato, Giovanni; Vella, Filippo; Meo, Rosa
How Are You? How a Robot Can Learn to Express Its Own Roboceptions Proceedings Article
In: Cristiani, Matteo; Toro, Carlos; Zanni-Merk, Cecilia; Howlett, Robert J.; Jain, Lakhmi C. (Ed.): Procedia Computer Science, pp. 480–489, Elsevier B.V., 2020.
Abstract | Links | BibTeX | Tags: Human computer interaction, Knowledge Representation, Latent Semantic Analysis, Natural Language Processing, Robotics, Semantic Computing, Social Robots
@inproceedings{trifiroHowAreYou2020,
title = {How Are You? How a Robot Can Learn to Express Its Own Roboceptions},
author = { Irene Trifir{`o} and Agnese Augello and Umberto Maniscalco and Giovanni Pilato and Filippo Vella and Rosa Meo},
editor = { Matteo Cristiani and Carlos Toro and Cecilia {Zanni-Merk} and Robert J. Howlett and Lakhmi C. Jain},
doi = {10.1016/j.procs.2020.08.050},
year = {2020},
date = {2020-01-01},
booktitle = {Procedia Computer Science},
volume = {176},
pages = {480--489},
publisher = {Elsevier B.V.},
abstract = {This work is framed on investigating how a robot can learn associations between linguistic elements, such as words or sentences, and its bodily perceptions, that we named ``roboceptions''. We discuss the possibility of defining such a process of an association through the interaction with human beings. By interacting with a user, the robot can learn to ascribe a meaning to its roboceptions to express them in natural language. Such a process could then be used by the robot in a verbal interaction to detect some words recalling the previously experimented roboceptions. In this paper, we discuss a Dual-NMT approach to realize such an association. However, it requires adequate training corpus. For this reason, we consider two different phases towards the realization of the system, and we show the results of the first phase, comparing two approaches: one based on the Latent Semantic Analysis paradigm and one based on the Random Indexing methodology.},
keywords = {Human computer interaction, Knowledge Representation, Latent Semantic Analysis, Natural Language Processing, Robotics, Semantic Computing, Social Robots},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
Caggianese, Giuseppe; Colonnese, Valerio; Gallo, Luigi
Situated Visualization in Augmented Reality: Exploring Information Seeking Strategies Proceedings Article
In: 2019 15th International Conference on Signal-Image Technology Internet-Based Systems (SITIS), pp. 390–395, 2019.
Abstract | Links | BibTeX | Tags: Augmented Reality, Human computer interaction, Task analysis, Visualization
@inproceedings{caggianeseSituatedVisualizationAugmented2019,
title = {Situated Visualization in Augmented Reality: Exploring Information Seeking Strategies},
author = { Giuseppe Caggianese and Valerio Colonnese and Luigi Gallo},
doi = {10.1109/SITIS.2019.00069},
year = {2019},
date = {2019-11-01},
booktitle = {2019 15th International Conference on Signal-Image Technology Internet-Based Systems (SITIS)},
pages = {390--395},
abstract = {In recent years augmented reality applications have been increasingly demonstrating the requirement for an interaction with information related to and directly shown in the surrounding environment. Situated information is visualized in its semantic and spatial context, building up an environment enhanced by an information level that dynamically adapts to the production of the information and to the actions of the user. The exploration and manipulation of this type of data through see-through augmented reality devices still represents a challenging task. The development of specific interaction strategies capable to mitigating the current limitations of augmented reality devices is essential. In this context, our contribution has been to design possible solutions to address some of these challenges allowing a dynamic interaction with situated information. Following the visual "information-seeking mantra" proposed by Shneiderman and introducing some "superpowers" for the users, in this work we present different strategies aimed at obtaining an overview and filtering, and acquiring details of a collection of situated data.},
keywords = {Augmented Reality, Human computer interaction, Task analysis, Visualization},
pubstate = {published},
tppubtype = {inproceedings}
}
Caggianese, Giuseppe; Gallo, Luigi; Neroni, Pietro
The Vive Controllers vs. Leap Motion for Interactions in Virtual Environments: A Comparative Evaluation Proceedings Article
In: Pietro, Giuseppe De; Gallo, Luigi; Howlett, Robert J.; Jain, Lakhmi C.; Vlacic, Ljubo (Ed.): Intelligent Interactive Multimedia Systems and Services, pp. 24–33, Springer International Publishing, Cham, 2019, ISBN: 978-3-319-92231-7.
Abstract | Links | BibTeX | Tags: Head-mounted displays, Human computer interaction, Input devices, User study, Virtual Reality
@inproceedings{caggianeseViveControllersVs2019,
title = {The Vive Controllers vs. Leap Motion for Interactions in Virtual Environments: A Comparative Evaluation},
author = { Giuseppe Caggianese and Luigi Gallo and Pietro Neroni},
editor = { Giuseppe De Pietro and Luigi Gallo and Robert J. Howlett and Lakhmi C. Jain and Ljubo Vlacic},
doi = {10.1007/978-3-319-92231-7_3},
isbn = {978-3-319-92231-7},
year = {2019},
date = {2019-01-01},
booktitle = {Intelligent Interactive Multimedia Systems and Services},
pages = {24--33},
publisher = {Springer International Publishing},
address = {Cham},
series = {Smart Innovation, Systems and Technologies},
abstract = {In recent years, virtual reality technologies have been improving in terms of resolution, convenience and portability, fostering their adoption in real life applications. The Vive Controllers and Leap Motion are two of the most commonly used low-cost input devices for interactions in virtual environments. This paper discusses their differences in terms of interaction design, and presents the results of a user study focusing on manipulation tasks, namely Walking box and blocks, Block tower and Numbered cubes tasks, taking into account both quantitative and qualitative observations. The experimental findings show a general preference for the Vive Controllers, but also highlight that further work is needed to simplify complex tasks.},
keywords = {Head-mounted displays, Human computer interaction, Input devices, User study, Virtual Reality},
pubstate = {published},
tppubtype = {inproceedings}
}
2018
Ciampi, Mario; Gallo, Luigi; Pietro, Giuseppe De
MITO: An Advanced Toolkit for Medical Imaging Processing and Visualization Technical Report
no. RT-ICAR-NA-2018-02, 2018.
Abstract | BibTeX | Tags: Healthcare, Human computer interaction, Touchless interaction, Virtual Reality
@techreport{ciampiMITOAdvancedToolkit2018,
title = {MITO: An Advanced Toolkit for Medical Imaging Processing and Visualization},
author = { Mario Ciampi and Luigi Gallo and Giuseppe De Pietro},
year = {2018},
date = {2018-01-01},
number = {RT-ICAR-NA-2018-02},
abstract = {This technical report presents MITO, an open software environment for medical image acquisition, processing, visualization, and navigation. The system is able to interact with PACS servers conforming to the DICOM 3.0 standard in order to retrieve and send radiological image data. Acquired 2D images can be manipulated with MITO for basic operations and advanced processing, like image segmentation, region extraction, image fusion, 3D reconstructions. Advanced 2D and 3D user interfaces allow users to interact with medical images and volumes through various input devices or in a completely touchless way. The high number of downloads of the software system, along with its widespread use in numerous experimental scenarios, show the high extensiveness and performance of the features developed.},
keywords = {Healthcare, Human computer interaction, Touchless interaction, Virtual Reality},
pubstate = {published},
tppubtype = {techreport}
}
Citt`a, Giuseppe; Arnab, Sylvester; Augello, Agnese; Gentile, Manuel; Zielonka, Sebastian Idelsohn; Ifenthaler, Dirk; Infantino, Ignazio; Guardia, Dario La; Manfr`e, Adriano; Allegra, Mario
Move Your Mind: Creative Dancing Humanoids as Support to STEAM Activities Journal Article
In: Smart Innovation, Systems and Technologies, vol. 76, pp. 190–199, 2018, ISSN: 21903018.
Abstract | Links | BibTeX | Tags: Anthropomorphic Robots, Cognitive Architectures, Cognitive Systems, Computational Creativity, Education, Embodied Cognition, Enactivism, Human computer interaction, STEAM
@article{cittaMoveYourMind2018,
title = {Move Your Mind: Creative Dancing Humanoids as Support to STEAM Activities},
author = { Giuseppe Citt{`a} and Sylvester Arnab and Agnese Augello and Manuel Gentile and Sebastian Idelsohn Zielonka and Dirk Ifenthaler and Ignazio Infantino and Dario La Guardia and Adriano Manfr{`e} and Mario Allegra},
editor = { Robert J. Howlett and Luigi Gallo and Giuseppe De Pietro and Lakhmi C. Jain},
doi = {10.1007/978-3-319-59480-4_20},
issn = {21903018},
year = {2018},
date = {2018-01-01},
journal = {Smart Innovation, Systems and Technologies},
volume = {76},
pages = {190--199},
abstract = {Educational activities based on dance can support interest in comprehension of concepts from maths, geometry, physics, bio-mechanics and computational thinking. In this work, we discuss a possible use of a dancing humanoid robot as an innovative technology to support and enhance STEAM learning activities. textcopyright Springer International Publishing AG 2018.},
keywords = {Anthropomorphic Robots, Cognitive Architectures, Cognitive Systems, Computational Creativity, Education, Embodied Cognition, Enactivism, Human computer interaction, STEAM},
pubstate = {published},
tppubtype = {article}
}
Messina, Antonio; Augello, Agnese; Pilato, Giovanni; Rizzo, Riccardo
BioGraphBot: A Conversational Assistant for Bioinformatics Graph Databases Proceedings Article
In: Barolli, L; Enokido, T (Ed.): INNOVATIVE MOBILE AND INTERNET SERVICES IN UBIQUITOUS COMPUTING, IMIS-2017, pp. 135–146, SPRINGER INTERNATIONAL PUBLISHING AG, GEWERBESTRASSE 11, CHAM, CH-6330, SWITZERLAND, 2018, ISBN: 978-3-319-61542-4 978-3-319-61541-7.
Abstract | Links | BibTeX | Tags: Biomedical, Chatbots, Gremlin, Human computer interaction
@inproceedings{messinaBioGraphBotConversationalAssistant2018,
title = {BioGraphBot: A Conversational Assistant for Bioinformatics Graph Databases},
author = { Antonio Messina and Agnese Augello and Giovanni Pilato and Riccardo Rizzo},
editor = { L Barolli and T Enokido},
doi = {10.1007/978-3-319-61542-4_12},
isbn = {978-3-319-61542-4 978-3-319-61541-7},
year = {2018},
date = {2018-01-01},
booktitle = {INNOVATIVE MOBILE AND INTERNET SERVICES IN UBIQUITOUS COMPUTING, IMIS-2017},
volume = {612},
pages = {135--146},
publisher = {SPRINGER INTERNATIONAL PUBLISHING AG},
address = {GEWERBESTRASSE 11, CHAM, CH-6330, SWITZERLAND},
series = {Advances in Intelligent Systems and Computing},
abstract = {Chatbots technology allows to easily add a conversational interface to a large set of applications. In this paper, we show a conversational agent based on ALICE framework aimed at playing the role of interface between human users and a bioinformatics graph database. The system has been embedded into the web frontend of BioGraphDB, a publicly available resource that uses Gremlin as the primary query language. To be successfully exploited, domain experts, such as biologists and bioinformaticians, should also have familiarity with that query language. The use of a chatbot allows translating queries expressed in natural language to queries expressed in Gremlin, simplifying the interaction with BioGraphDB.},
keywords = {Biomedical, Chatbots, Gremlin, Human computer interaction},
pubstate = {published},
tppubtype = {inproceedings}
}
Nuccio, Carlo; Augello, Agnese; Gaglio, Salvatore; Pilato, Giovanni
Interaction Capabilities of a Robotic Receptionist Journal Article
In: Smart Innovation, Systems and Technologies, vol. 76, pp. 171–180, 2018, ISSN: 21903018.
Abstract | Links | BibTeX | Tags: Anthropomorphic Robots, Computational Creativity, Human computer interaction, Human Robot Interaction, Humanoid Robots, Ontologies, Robotics
@article{nuccioInteractionCapabilitiesRobotic2018,
title = {Interaction Capabilities of a Robotic Receptionist},
author = { Carlo Nuccio and Agnese Augello and Salvatore Gaglio and Giovanni Pilato},
editor = { Giuseppe De Pietro and Luigi Gallo and Robert J. Howlett and Lakhmi C. Jain},
doi = {10.1007/978-3-319-59480-4_18},
issn = {21903018},
year = {2018},
date = {2018-01-01},
journal = {Smart Innovation, Systems and Technologies},
volume = {76},
pages = {171--180},
abstract = {A system aimed at facilitating the interaction between a human user and an humanoid robot is presented. The system is suited to answer questions about laboratories activities, people involved, projects, research themes and collaborations among employees. The task is accomplished by the HermiT reasoner invoked by a speech recognition module. The system is capable of navigating a specific ontology making inference on it. The presented system is part of a broader social robot framework whose goal is to give the user a fulfilling social interaction experience, driven by the perception of the robot internal state and involving intuitive and computational creativity capabilities. textcopyright Springer International Publishing AG 2018.},
keywords = {Anthropomorphic Robots, Computational Creativity, Human computer interaction, Human Robot Interaction, Humanoid Robots, Ontologies, Robotics},
pubstate = {published},
tppubtype = {article}
}
2017
Gallo, Luigi; Marra, Ivana
Rapporto tecnico contabile (01/06/2015– 30/11/2016) Progetto SmartCARE Technical Report
no. RT-ICAR-NA-2017-03, 2017.
Abstract | BibTeX | Tags: Healthcare, Human computer interaction, Touchless interaction
@techreport{galloRapportoTecnicoContabile2017,
title = {Rapporto tecnico contabile (01/06/2015– 30/11/2016) Progetto SmartCARE},
author = { Luigi Gallo and Ivana Marra},
year = {2017},
date = {2017-01-01},
number = {RT-ICAR-NA-2017-03},
abstract = {Scopo di questo lavoro `e quello di presentare un report sulle attivit`a svolte dal soggetto beneficiario Istituto di Calcolo e Reti ad Alte Prestazioni (ICAR) del Consiglio Nazionale delle Ricerche (CNR) per la rendicontazione tecnico-amministrativa, con breve descrizione delle attivit`a di ricerca svolte e dettaglio dei costi sostenuti nel periodo 01/06/2015- 30/11/2016, nell'ambito del progetto SmartCARE ``Satellite enhanced Multi-channel ehealth Assistance for Remote Tele-rehabilitation and CAREgiving''- Proposal no. ITSLAB/ESA/ARTES/2014-1 - RFQ/ITT no. AO/1-5891/08/NL/US.},
keywords = {Healthcare, Human computer interaction, Touchless interaction},
pubstate = {published},
tppubtype = {techreport}
}
Gentile, Vito; Milazzo, Fabrizio; Sorce, Salvatore; Gentile, Antonio; Augello, Agnese; Pilato, Giovanni
Body Gestures and Spoken Sentences: A Novel Approach for Revealing User's Emotions Proceedings Article
In: Proceedings - IEEE 11th International Conference on Semantic Computing, ICSC 2017, pp. 69–72, Institute of Electrical and Electronics Engineers Inc., 2017, ISBN: 978-1-5090-4896-0.
Abstract | Links | BibTeX | Tags: Emotion Analysis, Emotion Recognition, Facial Expressions, Gestural user interfaces, Human computer interaction, Semantic Computing, Sentiment Analysis
@inproceedings{gentileBodyGesturesSpoken2017,
title = {Body Gestures and Spoken Sentences: A Novel Approach for Revealing User's Emotions},
author = { Vito Gentile and Fabrizio Milazzo and Salvatore Sorce and Antonio Gentile and Agnese Augello and Giovanni Pilato},
doi = {10.1109/ICSC.2017.14},
isbn = {978-1-5090-4896-0},
year = {2017},
date = {2017-01-01},
booktitle = {Proceedings - IEEE 11th International Conference on Semantic Computing, ICSC 2017},
pages = {69--72},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {In the last decade, there has been a growing interest in emotion analysis research, which has been applied in several areas of computer science. Many authors have contributed to the development of emotion recognition algorithms, considering textual or non verbal data as input, such as facial expressions, gestures or, in the case of multi-modal emotion recognition, a combination of them. In this paper, we describe a method to detect emotions from gestures using the skeletal data obtained from Kinect-like devices as input, as well as a textual description of their meaning. The experimental results show that the correlation existing between body movements and spoken user sentence(s) can be used to reveal user's emotions from gestures. textcopyright 2017 IEEE.},
keywords = {Emotion Analysis, Emotion Recognition, Facial Expressions, Gestural user interfaces, Human computer interaction, Semantic Computing, Sentiment Analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
Milazzo, Fabrizio; Augello, Agnese; Pilato, Giovanni; Gentile, Vito; Gentile, Antonio; Sorce, Salvatore
Exploiting Correlation between Body Gestures and Spoken Sentences for Real-Time Emotion Recognition Proceedings Article
In: ACM International Conference Proceeding Series, Association for Computing Machinery, 2017, ISBN: 978-1-4503-5237-6.
Abstract | Links | BibTeX | Tags: Emotion Recognition, Gestural user interfaces, Human computer interaction, Sentiment Analysis
@inproceedings{milazzoExploitingCorrelationBody2017,
title = {Exploiting Correlation between Body Gestures and Spoken Sentences for Real-Time Emotion Recognition},
author = { Fabrizio Milazzo and Agnese Augello and Giovanni Pilato and Vito Gentile and Antonio Gentile and Salvatore Sorce},
doi = {10.1145/3125571.3125590},
isbn = {978-1-4503-5237-6},
year = {2017},
date = {2017-01-01},
booktitle = {ACM International Conference Proceeding Series},
volume = {Part F131371},
publisher = {Association for Computing Machinery},
abstract = {Humans communicate their affective states through different media, both verbal and non-verbal, often used at the same time. The knowledge of the emotional state plays a key role to provide personalized and context-related information and services. This is the main reason why several algorithms have been proposed in the last few years for the automatic emotion recognition. In this work we exploit the correlation between one's affective state and the simultaneous body expressions in terms of speech and gestures. Herewe propose a system for real-Time emotion recognition from gestures. In a first step, the system builds a trusted dataset of association pairs (motion datatextrightarrow emotion pattern), also based on textual information. Such dataset is the ground truth for a further step, where emotion patterns can be extracted from new unclassified gestures. Experimental results demonstrate a good recognition accuracy and real-Time capabilities of the proposed system. textcopyright 2017 Copyright held by the owner/author(s).},
keywords = {Emotion Recognition, Gestural user interfaces, Human computer interaction, Sentiment Analysis},
pubstate = {published},
tppubtype = {inproceedings}
}
2016
Augello, Agnese; Infantino, Ignazio; Manfré, Adriano; Pilato, Giovanni; Vella, Filippo
Analyzing and Discussing Primary Creative Traits of a Robotic Artist Journal Article
In: Biologically Inspired Cognitive Architectures, vol. 17, pp. 22–31, 2016, ISSN: 2212683X.
Abstract | Links | BibTeX | Tags: Anthropomorphic Robots, Artificial intelligence, Cognitive Architectures, Computational Creativity, Creative Agents, Creative Process, Human computer interaction, Human Robot Interaction, Humanoid Robots, Information Management, Social Robots
@article{augelloAnalyzingDiscussingPrimary2016,
title = {Analyzing and Discussing Primary Creative Traits of a Robotic Artist},
author = { Agnese Augello and Ignazio Infantino and Adriano Manfré and Giovanni Pilato and Filippo Vella},
doi = {10.1016/j.bica.2016.07.006},
issn = {2212683X},
year = {2016},
date = {2016-01-01},
journal = {Biologically Inspired Cognitive Architectures},
volume = {17},
pages = {22--31},
abstract = {We present a robot aimed at producing a collage formed by a mix of photomontage and digital collage. The artwork is created after a visual and verbal interaction with a human user. The proposed system, through a cognitive architecture, allows the robot to manage the three different phases of the real-time artwork process: (i) taking inspiration from information captured during the postural and verbal interaction with the human user and from the analysis of his/her social web items; (ii) performing a creative process to obtain a model of the artwork; (iii) executing the creative collage composition and providing a significant title. The paper explains, primarily, how the creativity traits of the robot are implemented in the proposed architecture: how ideas are generated through an elaboration that is modulated by affective influences; how the personality and the artistic behavior are modeled by learning and guided by external evaluations; the motivation and the confidence evolution as a function of successes or failures. textcopyright 2016 Elsevier B.V. All rights reserved.},
keywords = {Anthropomorphic Robots, Artificial intelligence, Cognitive Architectures, Computational Creativity, Creative Agents, Creative Process, Human computer interaction, Human Robot Interaction, Humanoid Robots, Information Management, Social Robots},
pubstate = {published},
tppubtype = {article}
}
Augello, Agnese; Maniscalco, Umberto; Pilato, Giovanni; Vella, Filippo
Disaster Prevention Virtual Advisors through Soft Sensor Paradigm Journal Article
In: Smart Innovation, Systems and Technologies, vol. 55, pp. 619–627, 2016, ISSN: 21903018.
Abstract | Links | BibTeX | Tags: Artificial intelligence, Conversational Agents, Decision Support Systems, Disaster Prevention, Human computer interaction, Natural Language Processing, Ontologies, Sensor systems
@article{augelloDisasterPreventionVirtual2016,
title = {Disaster Prevention Virtual Advisors through Soft Sensor Paradigm},
author = { Agnese Augello and Umberto Maniscalco and Giovanni Pilato and Filippo Vella},
editor = { Giuseppe De Pietro and Luigi Gallo and Robert J. Howlett and Lakhmi C. Jain},
doi = {10.1007/978-3-319-39345-2_55},
issn = {21903018},
year = {2016},
date = {2016-01-01},
journal = {Smart Innovation, Systems and Technologies},
volume = {55},
pages = {619--627},
abstract = {In this paper we illustrate the architecture of an intelligent advisor agent aimed at limiting, or as far as possible preventing, the damages caused by catastrophic events, such as floods and landslides. The agent models the domain and makes forecasting by exploiting both ontology models and belief network models. Furthermore, it uses a monitoring network to recommend preventive measures and giving alerts, if necessary, before that the event happens. The monitoring network can be implemented through both physical and soft sensors: this choice makes the measurements more adequate and available also in case of failure of some of the physical sensors. The front-end of the agent is made by a chat-bot, capable to interact with human users using natural language. textcopyright Springer International Publishing Switzerland 2016.},
keywords = {Artificial intelligence, Conversational Agents, Decision Support Systems, Disaster Prevention, Human computer interaction, Natural Language Processing, Ontologies, Sensor systems},
pubstate = {published},
tppubtype = {article}
}
Spiccia, Carmelo; Augello, Agnese; Pilato, Giovanni; Vassallo, Giorgio
Semantic Word Error Rate for Sentence Similarity Proceedings Article
In: Proceedings - 2016 IEEE 10th International Conference on Semantic Computing, ICSC 2016, pp. 266–269, Institute of Electrical and Electronics Engineers Inc., 2016, ISBN: 978-1-5090-0661-8.
Abstract | Links | BibTeX | Tags: Human computer interaction, Latent Semantic Analysis, Natural Language Processing, Semantic Computing
@inproceedings{spicciaSemanticWordError2016,
title = {Semantic Word Error Rate for Sentence Similarity},
author = { Carmelo Spiccia and Agnese Augello and Giovanni Pilato and Giorgio Vassallo},
doi = {10.1109/ICSC.2016.11},
isbn = {978-1-5090-0661-8},
year = {2016},
date = {2016-01-01},
booktitle = {Proceedings - 2016 IEEE 10th International Conference on Semantic Computing, ICSC 2016},
pages = {266--269},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Sentence similarity measures have applications in several tasks, including: Machine Translation, Paraphrase Identification, Speech Recognition, Question-answering and Text Summarization. However, measures designed for these tasks are aimed at assessing equivalence rather than resemblance, partly departing from human cognition of similarity. While this is reasonable for these activities, it hinders the applicability of sentence similarity measures to other tasks. We therefore propose a new sentence similarity measure specifically designed for resemblance evaluation, in order to cover these fields better. Experimental results are discussed. textcopyright 2016 IEEE.},
keywords = {Human computer interaction, Latent Semantic Analysis, Natural Language Processing, Semantic Computing},
pubstate = {published},
tppubtype = {inproceedings}
}
2012
Gallo, Luigi; Placitelli, Alessio Pierluigi
Studio tecnologico per il porting di MITO (Medical Imaging TOolkit) su dispositivi mobili Technical Report
no. RT-ICAR-NA-2012-8, 2012.
Abstract | BibTeX | Tags: Healthcare, Human computer interaction
@techreport{galloStudioTecnologicoPorting2012,
title = {Studio tecnologico per il porting di MITO (Medical Imaging TOolkit) su dispositivi mobili},
author = { Luigi Gallo and Alessio Pierluigi Placitelli},
year = {2012},
date = {2012-09-01},
number = {RT-ICAR-NA-2012-8},
abstract = {Sono presentate tre strategie per il porting del software MITO su piattaforme mobile. La prima strategia, raccomandata, prevede l'uso della libreria VES (Kitware) per eseguire i render sul dispositivo, fornendo una comoda base di sviluppo limitandosi alle piattaforme supportate. La seconda strategia prevede l'uso delle tecnologie HTML5 e WebGL, eseguendo i render sul dispositivo, permettendo l'esecuzione del software su tutte le architetture che supportano tali standard. La terza ed ultima strategia, prevede lo sviluppo di un thin client per ciascuna piattaforma supportata che si appoggi ad una render farm remota per l'esecuzione dei calcoli grafici.},
keywords = {Healthcare, Human computer interaction},
pubstate = {published},
tppubtype = {techreport}
}
Augello, Agnese; Pilato, Giovanni; Machi, Alberto; Gaglio, Salvatore
An Approach to Enhance Chatbot Semantic Power and Maintainability: Experiences within the FRASI Project Proceedings Article
In: Proceedings - IEEE 6th International Conference on Semantic Computing, ICSC 2012, pp. 186–193, 2012, ISBN: 978-0-7695-4859-3.
Abstract | Links | BibTeX | Tags: Artificial intelligence, Human computer interaction, Ontologies, Semantic Computing
@inproceedings{augelloApproachEnhanceChatbot2012,
title = {An Approach to Enhance Chatbot Semantic Power and Maintainability: Experiences within the FRASI Project},
author = { Agnese Augello and Giovanni Pilato and Alberto Machi and Salvatore Gaglio},
doi = {10.1109/ICSC.2012.26},
isbn = {978-0-7695-4859-3},
year = {2012},
date = {2012-01-01},
booktitle = {Proceedings - IEEE 6th International Conference on Semantic Computing, ICSC 2012},
pages = {186--193},
abstract = {The paper illustrates the implementation and semantic enhancement of a domain-oriented Question-Answering system based on a pattern-matching chat bot technology, developed within an industrial project, named FRASI. The main difficulty in building a KB for a chat bot is to handwrite all possible question-answer pairs that constitute the KB. The proposed approach simplifies the chat bot realization thanks to two solutions. The first one uses an ontology, which is exploited in a twofold manner: to construct dynamic answers as a result of an inference process about the domain, and to automatically populate, off-line, the chat bot KB with sentences that can be derived from the ontology, describing properties and relations between concepts involved in the dialogue. The second one is to preprocess user sentences, and to reduce them to a simpler structure that can be referred to existing elements of the chat bot KB. The enhanced symbolic reduction of user questions and the automatic population of question templates in the chat bot KB from domain ontology have been implemented as two computational services (external modules). textcopyright 2012 IEEE.},
keywords = {Artificial intelligence, Human computer interaction, Ontologies, Semantic Computing},
pubstate = {published},
tppubtype = {inproceedings}
}
2011
Blandford, Ann; Pietro, Giuseppe De; Gallo, Luigi; Gimblett, Andy; Oladimeji, Patrick; Thimbleby, Harold
The Biggest Challenges Are the Social Ones: Workshop Report from EICS4Med 2011 Proceedings Article
In: Proceedings of the Workshop on Engineering Interactive Computer Systems for Medicine and Healthcare, M. Jeusfeld c/o Redaktion Sun SITE, Informatik V, RWTH Aachen. - Aachen, Pisa, Italy, 2011.
Abstract | BibTeX | Tags: Healthcare, Human computer interaction
@inproceedings{blandfordBiggestChallengesAre2011,
title = {The Biggest Challenges Are the Social Ones: Workshop Report from EICS4Med 2011},
author = { Ann Blandford and Giuseppe De Pietro and Luigi Gallo and Andy Gimblett and Patrick Oladimeji and Harold Thimbleby},
year = {2011},
date = {2011-06-01},
booktitle = {Proceedings of the Workshop on Engineering Interactive Computer Systems for Medicine and Healthcare},
volume = {727},
publisher = {M. Jeusfeld c/o Redaktion Sun SITE, Informatik V, RWTH Aachen. - Aachen},
address = {Pisa, Italy},
series = {CEUR Workshop Proceedings},
abstract = {EICS4Med was held in conjunction with EICS2011 in Pisa. Many challenges to designing innovative healthcare applications where identified, including the tendency to design conservatively to avoid patient harm and the difficulties of establishing rich communications between clinicians and engineers. In considering the timescales for developments, the group concluded that technical developments are more easily achieved than the equally essential cultural changes, such that which errors are accepted and regarded as learning opportunities, and investment is directed toward the design of safer, more usable systems.},
keywords = {Healthcare, Human computer interaction},
pubstate = {published},
tppubtype = {inproceedings}
}
Blandford, Ann; Pietro, Giuseppe De; Gallo, Luigi; Gimblett, Andy; Oladimeji, Patrick; Thimbleby, Harold
Engineering Interactive Computer Systems for Medicine and Healthcare (EICS4Med) Proceedings Article
In: EICS '11 Proceedings of the 3rd ACM SIGCHI Symposium on Engineering Interactive Computing Systems, pp. 341–342, ACM, Pisa, Italy, 2011, ISBN: 978-1-4503-0670-6.
Abstract | Links | BibTeX | Tags: Formal methods, Healthcare, Human computer interaction, Medical devices, Mobile computing, Modeling, Natural User Interfaces, Safety
@inproceedings{blandfordEngineeringInteractiveComputer2011,
title = {Engineering Interactive Computer Systems for Medicine and Healthcare (EICS4Med)},
author = { Ann Blandford and Giuseppe De Pietro and Luigi Gallo and Andy Gimblett and Patrick Oladimeji and Harold Thimbleby},
doi = {http://doi.acm.org/10.1145/1996461.1996556},
isbn = {978-1-4503-0670-6},
year = {2011},
date = {2011-06-01},
booktitle = {EICS '11 Proceedings of the 3rd ACM SIGCHI Symposium on Engineering Interactive Computing Systems},
pages = {341--342},
publisher = {ACM},
address = {Pisa, Italy},
abstract = {This workshop brings together and develops the community of researchers and practitioners concerned with the design and evaluation of interactive medical devices (infusion pumps, etc) and systems (electronic patient records, etc), to deliver a roadmap for future research in this area. The workshop involves researchers and practitioners designing and evaluating dependable systems in a variety of contexts, and those developing innovative interactive computer systems for healthcare. These pose particular challenges because of the inherent variability - of patients, system configurations, and so on. Participants will represent a range of perspectives, including safety engineering and innovative design.},
keywords = {Formal methods, Healthcare, Human computer interaction, Medical devices, Mobile computing, Modeling, Natural User Interfaces, Safety},
pubstate = {published},
tppubtype = {inproceedings}
}
2009
Augello, Agnese; Pilato, Giovanni; Gaglio, Salvatore
A Conversational Agent to Support Decisions in SimCity like Games Proceedings Article
In: ICSC 2009 - 2009 IEEE International Conference on Semantic Computing, pp. 367–372, 2009, ISBN: 978-0-7695-3800-6.
Abstract | Links | BibTeX | Tags: Artificial intelligence, Conversational Agents, Decision Support Systems, Human computer interaction, Semantic Computing
@inproceedings{augelloConversationalAgentSupport2009,
title = {A Conversational Agent to Support Decisions in SimCity like Games},
author = { Agnese Augello and Giovanni Pilato and Salvatore Gaglio},
doi = {10.1109/ICSC.2009.74},
isbn = {978-0-7695-3800-6},
year = {2009},
date = {2009-01-01},
booktitle = {ICSC 2009 - 2009 IEEE International Conference on Semantic Computing},
pages = {367--372},
abstract = {Computational intelligent techniques applied to economics have played an important role in the last years. In this paper we propose a framework based on an intelligent conversational agent embedded with a decision support system, aimed at suggesting the best managing strategies for a game-based model of a virtual town. The agent tries to prospect the future evolutions of particular choices taken by the user. Interaction is conducted through a natural language interface built as an Alice-based conversational agent. textcopyright 2009 IEEE.},
keywords = {Artificial intelligence, Conversational Agents, Decision Support Systems, Human computer interaction, Semantic Computing},
pubstate = {published},
tppubtype = {inproceedings}
}
Gambino, Orazio; Augello, Agnese; Caronia, Alessandro; Pilato, Giovanni; Pirrone, Roberto; Gaglio, Salvatore
A Web-Oriented Java 3D Talking Head Journal Article
In: Advances in Intelligent and Soft Computing, vol. 60, pp. 295–311, 2009, ISSN: 18675662.
Abstract | Links | BibTeX | Tags: 3D Modelling, Animation, Human computer interaction, Natural Language Processing
@article{gambinoWeborientedJava3D2009,
title = {A Web-Oriented Java 3D Talking Head},
author = { Orazio Gambino and Agnese Augello and Alessandro Caronia and Giovanni Pilato and Roberto Pirrone and Salvatore Gaglio},
editor = { Kulikowski J.L. Hippe Z.S.},
doi = {10.1007/978-3-642-03202-8_24},
issn = {18675662},
year = {2009},
date = {2009-01-01},
journal = {Advances in Intelligent and Soft Computing},
volume = {60},
pages = {295--311},
abstract = {Facial animation denotes all those systems performing speech synchronization with an animated face model. These kinds of systems are named Talking Heads or Talking Faces. At the same time simple dialogue systems called chatbots have been developed. Chatbots are software agents able to interact with users through pattern-matching based rules. In this paper a Talking Head oriented to the creation of a Chatbot is presented. An answer is generated in form of text triggered by an input query. The answer is converted into a facial animation using a 3D face model whose lips movements are synchronized with the sound produced by a speech synthesis module. Our Talking Head exploits the naturalness of the facial animation and provides a real-time interactive interface to the user. Besides, it is specifically suited for being used on the web. This leads to a set of requirements to be satisfied, like: simple installation, visual quality, fast download, and interactivity in real time. The web infrastructure has been realized using the Client-Server model. The Chatbot, the Natural Language Processing and the Digital Signal Processing services are delegated to the server. The client is involved in animation and synchronization. This way, the server can handle multiple requests from clients. The conversation module has been implemented using the A.L.I.C.E. (Artificial Linguistic Internet Computer Entity) technology. The output of the chatbot is given input to the Natural Language Processing (Comedia Speech), incorporating a text analyzer, a letter-to-sound module and a module for the generation of prosody. The client, through the synchronization module, computes the time of real duration of the animation and the duration of each phoneme and consequently of each viseme. The morphing module performs the animation of the facial model and the voice reproduction. As a result, the user will see the answer to question both in textual form and in the form of visual animation. textcopyright Springer-Verlag Berlin Heidelberg 2009.},
keywords = {3D Modelling, Animation, Human computer interaction, Natural Language Processing},
pubstate = {published},
tppubtype = {article}
}
Sorce, Salvatore; Augello, Agnese; Santangelo, Antonella; Genco, Alessandro; Gentile, Antonio; Gaglio, Salvatore; Pilato, Giovanni
An RFID Framework for Multimodal Service Provision Proceedings Article
In: Proceedings of the International Conference on Complex, Intelligent and Software Intensive Systems, CISIS 2009, pp. 730–735, 2009, ISBN: 978-0-7695-3575-3.
Abstract | Links | BibTeX | Tags: Common Sense Reasoning, Context awareness, Conversational Agents, Human computer interaction, Multimodal Interaction, Ontologies, RFID Technology
@inproceedings{sorceRFIDFrameworkMultimodal2009,
title = {An RFID Framework for Multimodal Service Provision},
author = { Salvatore Sorce and Agnese Augello and Antonella Santangelo and Alessandro Genco and Antonio Gentile and Salvatore Gaglio and Giovanni Pilato},
doi = {10.1109/CISIS.2009.168},
isbn = {978-0-7695-3575-3},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the International Conference on Complex, Intelligent and Software Intensive Systems, CISIS 2009},
pages = {730--735},
abstract = {In recent years there has been a growing interest toward the evelopment of pervasive and contextaware services, and RFID technology played a relevant role in the context sensing task. We propose the use of RFID technology together with a conversational agent in order to implement a multimodal information retrieval service we call SensorMesh. The information acquired from RFID tags about the nearest point of interest is processed by the conversational agent that carries a more natural interaction with the user, also exploiting a common sense ontology. The service is accessible using a multimodal browser on Personal Digital Assistants (PDAs); the browser allows the user to interact with the conversational agent by means of spoken language instead of the traditional, keyboard- (or stylus-) based input systems. The resulting system offers a more natural interaction with respect to traditional prerecorded, audio-visual services, and it is particularly suitable for non technology-skilled users. textcopyright 2009 IEEE.},
keywords = {Common Sense Reasoning, Context awareness, Conversational Agents, Human computer interaction, Multimodal Interaction, Ontologies, RFID Technology},
pubstate = {published},
tppubtype = {inproceedings}
}
2008
Gaglio, Salvatore; Pilato, Giovanni; Pirrone, Roberto; Gambino, Orazio; Augello, Agnese; Caronia, Alessandro
A Java3D Talking Head for a Chatbot Proceedings Article
In: Proceedings - CISIS 2008: 2nd International Conference on Complex, Intelligent and Software Intensive Systems, pp. 709–714, 2008, ISBN: 0-7695-3109-1 978-0-7695-3109-0.
Abstract | Links | BibTeX | Tags: 3D Modelling, Animation, Artificial intelligence, Human computer interaction, Talking Heads
@inproceedings{gaglioJava3DTalkingHead2008,
title = {A Java3D Talking Head for a Chatbot},
author = { Salvatore Gaglio and Giovanni Pilato and Roberto Pirrone and Orazio Gambino and Agnese Augello and Alessandro Caronia},
doi = {10.1109/CISIS.2008.57},
isbn = {0-7695-3109-1 978-0-7695-3109-0},
year = {2008},
date = {2008-01-01},
booktitle = {Proceedings - CISIS 2008: 2nd International Conference on Complex, Intelligent and Software Intensive Systems},
pages = {709--714},
abstract = {Facial animation is referred to all those systems performing the speech synchronization with an animated face model. This kind of systems are called "Talking Head" or "Talking Face". In this paper a Talking Head oriented to the creation of a Chatbot is presented. It requires an input query and an answer is generated in form of text. The answer is transduced into a facial animation using a 3D face model whose lips movements are synchronized with the sound produced by a speech synthesis module. Our "Talking Head" explores the naturalness of the facial animation and provides a real-time interactive interface to the user. The WEB infrastructure has been realized using the Client-Server model delegating the Chatbot, the Natural Language Processing and the Digital Signal Processing services to the server, while the client is involved in animation, synchronization; in this way, the server can handle multiple requests from clients. textcopyright 2008 IEEE.},
keywords = {3D Modelling, Animation, Artificial intelligence, Human computer interaction, Talking Heads},
pubstate = {published},
tppubtype = {inproceedings}
}
Gambino, Orazio; Augello, Agnese; Caronia, Alessandro; Pilato, Giovanni; Pirrone, Roberto; Gaglio, Salvatore
Virtual Conversation with a Real Talking Head Proceedings Article
In: 2008 Conference on Human System Interaction, HSI 2008, pp. 263–268, 2008, ISBN: 1-4244-1543-8 978-1-4244-1543-4.
Abstract | Links | BibTeX | Tags: 3D Modelling, Animation, Artificial intelligence, Computational Linguistics, Conversational Agents, Human computer interaction, Natural Language Processing, Talking Heads
@inproceedings{gambinoVirtualConversationReal2008,
title = {Virtual Conversation with a Real Talking Head},
author = { Orazio Gambino and Agnese Augello and Alessandro Caronia and Giovanni Pilato and Roberto Pirrone and Salvatore Gaglio},
doi = {10.1109/HSI.2008.4581446},
isbn = {1-4244-1543-8 978-1-4244-1543-4},
year = {2008},
date = {2008-01-01},
booktitle = {2008 Conference on Human System Interaction, HSI 2008},
pages = {263--268},
abstract = {A talking head is system performing an animated face model synchronized with a speech synthesis module. It is used as a presentation layer of a conversational Agent which provide an answer . It provides an answer when a query is written as an input by the user. The textual answer is converted into facial movements of a 3D face model whose lips and tongue movements are synchronized with the sound of the synthetic voice. The Client-Server paradigm has been used for the WEB infrastructure delegating the animation and synchronization to the client, so that the server can satisfy multiple requests from clients; while the Chatbot, the Digital Signal Processing and the Natural language Processing are provided by the server. textcopyright 2008 IEEE.},
keywords = {3D Modelling, Animation, Artificial intelligence, Computational Linguistics, Conversational Agents, Human computer interaction, Natural Language Processing, Talking Heads},
pubstate = {published},
tppubtype = {inproceedings}
}
2007
Santangelo, Antonella; Augello, Agnese; Sorce, Salvatore; Pilato, Giovanni; Gentile, Antonio; Genco, Alessandro; Gaglio, Salvatore
A Virtual Shopper Customer Assistant in Pervasive Environments Journal Article
In: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 4805 LNCS, no. PART 1, pp. 447–456, 2007, ISSN: 03029743.
Abstract | Links | BibTeX | Tags: Chatbots, Human computer interaction, Knowledge Representation, Multimodal Interaction, Pervasive Systems
@article{santangeloVirtualShopperCustomer2007,
title = {A Virtual Shopper Customer Assistant in Pervasive Environments},
author = { Antonella Santangelo and Agnese Augello and Salvatore Sorce and Giovanni Pilato and Antonio Gentile and Alessandro Genco and Salvatore Gaglio},
doi = {10.1007/978-3-540-76888-3_67},
issn = {03029743},
year = {2007},
date = {2007-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {4805 LNCS},
number = {PART 1},
pages = {447--456},
abstract = {In this work we propose a smart, human-like PDA-based personal shopper assistant. The system is able to understand the user needs through a spoken natural language interaction and then stores the preferences of the potential customer. Subsequently the personal shopper suggests the most suitable items and shops that match the user profile. The interaction is given by automatic speech recognition and text-to-speech technologies; localization is allowed by the use of Wireless technologies, while the interaction is performed by an Alice-based chat-bot endowed with reasoning capabilities. Besides, being implemented on a PDA, the personal shopper satisfies the user needs of mobility and it is also usable on different mobile devices. textcopyright Springer-Verlag Berlin Heidelberg 2007.},
keywords = {Chatbots, Human computer interaction, Knowledge Representation, Multimodal Interaction, Pervasive Systems},
pubstate = {published},
tppubtype = {article}
}