AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2020
Augello, Agnese; Ciulla, Angelo; Cuzzocrea, Alfredo; Gaglio, Salvatore; Pilato, Giovanni; Vella, Filippo
A Kinect-Based Gesture Acquisition and Reproduction System for Humanoid Robots Proceedings Article
In: Gervasi, O; Murgante, B; Misra, S; Garau, C; Blecic, I; Taniar, D; relax BO Apduhan,; relax AMAC Rocha,; Tarantino, E; relax CM Torre,; Karaca, Y (Ed.): COMPUTATIONAL SCIENCE AND ITS APPLICATIONS - ICCSA 2020, PT I, pp. 967–977, SPRINGER INTERNATIONAL PUBLISHING AG, GEWERBESTRASSE 11, CHAM, CH-6330, SWITZERLAND, 2020, ISBN: 978-3-030-58799-4 978-3-030-58798-7.
Abstract | Links | BibTeX | Tags: Gestural Interaction, Humanoid Robots, RGB-D, Social Robots
@inproceedings{augelloKinectBasedGestureAcquisition2020,
title = {A Kinect-Based Gesture Acquisition and Reproduction System for Humanoid Robots},
author = { Agnese Augello and Angelo Ciulla and Alfredo Cuzzocrea and Salvatore Gaglio and Giovanni Pilato and Filippo Vella},
editor = { O Gervasi and B Murgante and S Misra and C Garau and I Blecic and D Taniar and {relax BO} Apduhan and {relax AMAC} Rocha and E Tarantino and {relax CM} Torre and Y Karaca},
doi = {10.1007/978-3-030-58799-4_69},
isbn = {978-3-030-58799-4 978-3-030-58798-7},
year = {2020},
date = {2020-01-01},
booktitle = {COMPUTATIONAL SCIENCE AND ITS APPLICATIONS - ICCSA 2020, PT I},
volume = {12249},
pages = {967--977},
publisher = {SPRINGER INTERNATIONAL PUBLISHING AG},
address = {GEWERBESTRASSE 11, CHAM, CH-6330, SWITZERLAND},
series = {Lecture Notes in Computer Science},
abstract = {The paper illustrates a system that endows an humanoid robot with the capability to mimic the motion of a human user in real time, serving as a basis for further gesture based human-robot interactions. The described approach uses the Microsoft Kinect as a low cost alternative to expensive motion capture devices.},
keywords = {Gestural Interaction, Humanoid Robots, RGB-D, Social Robots},
pubstate = {published},
tppubtype = {inproceedings}
}
Augello, Agnese; Ciulla, Angelo; Cuzzocrea, Alfredo; Gaglio, Salvatore; Pilato, Giovanni; Vella, Filippo
A Kinect-Based Gesture Acquisition and Reproduction System for Humanoid Robots Proceedings Article
In: Gervasi, O; Murgante, B; Misra, S; Garau, C; Blecic, I; Taniar, D; Apduhan, BO; Rocha, AMAC; Tarantino, E; Torre, CM; Karaca, Y (Ed.): COMPUTATIONAL SCIENCE AND ITS APPLICATIONS - ICCSA 2020, PT I, pp. 967–977, SPRINGER INTERNATIONAL PUBLISHING AG, GEWERBESTRASSE 11, CHAM, CH-6330, SWITZERLAND, 2020, ISBN: 978-3-030-58799-4 978-3-030-58798-7.
Abstract | Links | BibTeX | Tags: Gestural Interaction, Humanoid Robots, RGB-D, Social Robots
@inproceedings{augello_kinect-based_2020,
title = {A Kinect-Based Gesture Acquisition and Reproduction System for Humanoid Robots},
author = {Agnese Augello and Angelo Ciulla and Alfredo Cuzzocrea and Salvatore Gaglio and Giovanni Pilato and Filippo Vella},
editor = {O Gervasi and B Murgante and S Misra and C Garau and I Blecic and D Taniar and BO Apduhan and AMAC Rocha and E Tarantino and CM Torre and Y Karaca},
doi = {10.1007/978-3-030-58799-4_69},
isbn = {978-3-030-58799-4 978-3-030-58798-7},
year = {2020},
date = {2020-01-01},
booktitle = {COMPUTATIONAL SCIENCE AND ITS APPLICATIONS - ICCSA 2020, PT I},
volume = {12249},
pages = {967–977},
publisher = {SPRINGER INTERNATIONAL PUBLISHING AG},
address = {GEWERBESTRASSE 11, CHAM, CH-6330, SWITZERLAND},
series = {Lecture Notes in Computer Science},
abstract = {The paper illustrates a system that endows an humanoid robot with the capability to mimic the motion of a human user in real time, serving as a basis for further gesture based human-robot interactions. The described approach uses the Microsoft Kinect as a low cost alternative to expensive motion capture devices.},
keywords = {Gestural Interaction, Humanoid Robots, RGB-D, Social Robots},
pubstate = {published},
tppubtype = {inproceedings}
}
2017
Brancati, Nadia; Caggianese, Giuseppe; Frucci, Maria; Gallo, Luigi; Neroni, Pietro
Experiencing Touchless Interaction with Augmented Content on Wearable Head-Mounted Displays in Cultural Heritage Applications Journal Article
In: Personal and Ubiquitous Computing, vol. 21, no. 2, pp. 203–217, 2017, ISSN: 1617-4909, 1617-4917.
Abstract | Links | BibTeX | Tags: Augmented Reality, Point-and-click interface, RGB-D, Touchless interaction, User study
@article{brancatiExperiencingTouchlessInteraction2017,
title = {Experiencing Touchless Interaction with Augmented Content on Wearable Head-Mounted Displays in Cultural Heritage Applications},
author = { Nadia Brancati and Giuseppe Caggianese and Maria Frucci and Luigi Gallo and Pietro Neroni},
doi = {10.1007/s00779-016-0987-8},
issn = {1617-4909, 1617-4917},
year = {2017},
date = {2017-01-01},
urldate = {2016-12-06},
journal = {Personal and Ubiquitous Computing},
volume = {21},
number = {2},
pages = {203--217},
abstract = {The cultural heritage could benefit significantly from the integration of wearable augmented reality (AR). This technology has the potential to guide the user and provide her with both in-depth information, without distracting her from the context, and a natural interaction, which can further allow her to explore and navigate her way through a huge amount of cultural information. The integration of touchless interaction and augmented reality is particularly challenging. On the technical side, the human-machine interface has to be reliable so as to guide users across the real world, which is composed of cluttered backgrounds and severe changes in illumination conditions. On the user experience side, the interface has to provide precise interaction tools while minimizing the perceived task difficulty. In this study, an interactive wearable AR system to augment the environment with cultural information is described. To confer robustness to the interface, a strategy that takes advantage of both depth and color data to find the most reliable information on each single frame is introduced. Moreover, the results of an ISO 9241-9 user study performed in both indoor and outdoor conditions are presented and discussed. The experimental results show that, by using both depth and color data, the interface can behave consistently in different indoor and outdoor scenarios. Furthermore, the results show that the presence of a virtual pointer in the augmented visualization significantly reduces the users error rate in selection tasks.},
keywords = {Augmented Reality, Point-and-click interface, RGB-D, Touchless interaction, User study},
pubstate = {published},
tppubtype = {article}
}
Vella, Filippo; Augello, Agnese; Maniscalco, Umberto; Bentivenga, Vincenzo; Gaglio, Salvatore
Classification of Indoor Actions through Deep Neural Networks Proceedings Article
In: G., Dipanda A. Chbeir R. Gallo L. Yetongnon K. De Pietro (Ed.): Proceedings - 12th International Conference on Signal Image Technology and Internet-Based Systems, SITIS 2016, pp. 82–87, Institute of Electrical and Electronics Engineers Inc., 2017, ISBN: 978-1-5090-5698-9.
Abstract | Links | BibTeX | Tags: Action Recognition, Convolutional Neural Networks, Deep learning, RGB-D
@inproceedings{vellaClassificationIndoorActions2017,
title = {Classification of Indoor Actions through Deep Neural Networks},
author = { Filippo Vella and Agnese Augello and Umberto Maniscalco and Vincenzo Bentivenga and Salvatore Gaglio},
editor = { Dipanda A. Chbeir R. Gallo L. Yetongnon K. De Pietro G.},
doi = {10.1109/SITIS.2016.22},
isbn = {978-1-5090-5698-9},
year = {2017},
date = {2017-01-01},
booktitle = {Proceedings - 12th International Conference on Signal Image Technology and Internet-Based Systems, SITIS 2016},
pages = {82--87},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The raising number of elderly people urges the research of systems able to monitor and support people inside their domestic environment. An automatic system capturing data about the position of a person in the house, through accelerometers and RGBd cameras can monitor the person activities and produce outputs associating the movements to a given tasks or predicting the set of activities that will be executes. We considered, for the task the classification of the activities a Deep Convolutional Neural Network. We compared two different deep network and analyzed their outputs. textcopyright 2016 IEEE.},
keywords = {Action Recognition, Convolutional Neural Networks, Deep learning, RGB-D},
pubstate = {published},
tppubtype = {inproceedings}
}
Vella, Filippo; Augello, Agnese; Maniscalco, Umberto; Bentivenga, Vincenzo; Gaglio, Salvatore
Classification of Indoor Actions through Deep Neural Networks Proceedings Article
In: G., Chbeir R. Dipanda A. De Pietro (Ed.): Proceedings - 12th International Conference on Signal Image Technology and Internet-Based Systems, SITIS 2016, pp. 82–87, Institute of Electrical and Electronics Engineers Inc., 2017, ISBN: 978-1-5090-5698-9.
Abstract | Links | BibTeX | Tags: Action Recognition, Convolutional Neural Networks, Deep learning, RGB-D
@inproceedings{vella_classification_2017,
title = {Classification of Indoor Actions through Deep Neural Networks},
author = {Filippo Vella and Agnese Augello and Umberto Maniscalco and Vincenzo Bentivenga and Salvatore Gaglio},
editor = {Chbeir R. Dipanda A. De Pietro G.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019213644&doi=10.1109%2fSITIS.2016.22&partnerID=40&md5=329d35941a322add5df73469e33e0f07},
doi = {10.1109/SITIS.2016.22},
isbn = {978-1-5090-5698-9},
year = {2017},
date = {2017-01-01},
booktitle = {Proceedings - 12th International Conference on Signal Image Technology and Internet-Based Systems, SITIS 2016},
pages = {82–87},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {The raising number of elderly people urges the research of systems able to monitor and support people inside their domestic environment. An automatic system capturing data about the position of a person in the house, through accelerometers and RGBd cameras can monitor the person activities and produce outputs associating the movements to a given tasks or predicting the set of activities that will be executes. We considered, for the task the classification of the activities a Deep Convolutional Neural Network. We compared two different deep network and analyzed their outputs. © 2016 IEEE.},
keywords = {Action Recognition, Convolutional Neural Networks, Deep learning, RGB-D},
pubstate = {published},
tppubtype = {inproceedings}
}
Brancati, Nadia; Caggianese, Giuseppe; Frucci, Maria; Gallo, Luigi; Neroni, Pietro
Experiencing touchless interaction with augmented content on wearable head-mounted displays in cultural heritage applications Journal Article
In: Personal and Ubiquitous Computing, vol. 21, no. 2, pp. 203–217, 2017, ISSN: 1617-4909, 1617-4917.
Abstract | Links | BibTeX | Tags: Augmented Reality, Point-and-click interface, RGB-D, Touchless interaction, User study
@article{brancati_experiencing_2017,
title = {Experiencing touchless interaction with augmented content on wearable head-mounted displays in cultural heritage applications},
author = {Nadia Brancati and Giuseppe Caggianese and Maria Frucci and Luigi Gallo and Pietro Neroni},
url = {http://link.springer.com/10.1007/s00779-016-0987-8},
doi = {10.1007/s00779-016-0987-8},
issn = {1617-4909, 1617-4917},
year = {2017},
date = {2017-01-01},
urldate = {2016-12-06},
journal = {Personal and Ubiquitous Computing},
volume = {21},
number = {2},
pages = {203–217},
abstract = {The cultural heritage could benefit significantly from the integration of wearable augmented reality (AR). This technology has the potential to guide the user and provide her with both in-depth information, without distracting her from the context, and a natural interaction, which can further allow her to explore and navigate her way through a huge amount of cultural information. The integration of touchless interaction and augmented reality is particularly challenging. On the technical side, the human-machine interface has to be reliable so as to guide users across the real world, which is composed of cluttered backgrounds and severe changes in illumination conditions. On the user experience side, the interface has to provide precise interaction tools while minimizing the perceived task difficulty. In this study, an interactive wearable AR system to augment the environment with cultural information is described. To confer robustness to the interface, a strategy that takes advantage of both depth and color data to find the most reliable information on each single frame is introduced. Moreover, the results of an ISO 9241-9 user study performed in both indoor and outdoor conditions are presented and discussed. The experimental results show that, by using both depth and color data, the interface can behave consistently in different indoor and outdoor scenarios. Furthermore, the results show that the presence of a virtual pointer in the augmented visualization significantly reduces the users error rate in selection tasks.},
keywords = {Augmented Reality, Point-and-click interface, RGB-D, Touchless interaction, User study},
pubstate = {published},
tppubtype = {article}
}
2015
Brancati, Nadia; Caggianese, Giuseppe; Frucci, Maria; Gallo, Luigi; Neroni, Pietro
Robust Fingertip Detection in Egocentric Vision under Varying Illumination Conditions Proceedings Article
In: IEEE International Conference on Multimedia & Expo Workshops (ICMEW), pp. 1–6, IEEE, Torino, Italy, 2015, ISBN: 978-1-4799-7079-7.
Abstract | Links | BibTeX | Tags: Ego-Vision, Fingertip detection, Point-and-click interface, RGB-D, Touchless interaction
@inproceedings{brancatiRobustFingertipDetection2015,
title = {Robust Fingertip Detection in Egocentric Vision under Varying Illumination Conditions},
author = { Nadia Brancati and Giuseppe Caggianese and Maria Frucci and Luigi Gallo and Pietro Neroni},
doi = {10.1109/ICMEW.2015.7169798},
isbn = {978-1-4799-7079-7},
year = {2015},
date = {2015-07-01},
urldate = {2016-12-06},
booktitle = {IEEE International Conference on Multimedia & Expo Workshops (ICMEW)},
pages = {1--6},
publisher = {IEEE},
address = {Torino, Italy},
abstract = {Wearable augmented reality (AR) systems have the potential to significantly lower the barriers to accessing information, while leaving the focus of the user's attention on the real world. To reveal their true potential, the human-machine interface is crucial. A touchless point-and-click interface for wearable AR systems may be suitable for use in many realworld applications, but it demands fingertip detection techniques robust enough to cope with cluttered backgrounds and varying illumination conditions. In this paper we propose an approach that, by automatically choosing between color and depth features, allows to detect the hand and then the user's fingertip both in indoor and outdoor scenarios, with or without adequate illumination.},
keywords = {Ego-Vision, Fingertip detection, Point-and-click interface, RGB-D, Touchless interaction},
pubstate = {published},
tppubtype = {inproceedings}
}
Brancati, Nadia; Caggianese, Giuseppe; Frucci, Maria; Gallo, Luigi; Neroni, Pietro
Robust fingertip detection in egocentric vision under varying illumination conditions Proceedings Article
In: IEEE International Conference on Multimedia & Expo Workshops (ICMEW), pp. 1–6, IEEE, Torino, Italy, 2015, ISBN: 978-1-4799-7079-7.
Abstract | Links | BibTeX | Tags: Ego-Vision, Fingertip detection, Point-and-click interface, RGB-D, Touchless interaction
@inproceedings{brancati_robust_2015,
title = {Robust fingertip detection in egocentric vision under varying illumination conditions},
author = {Nadia Brancati and Giuseppe Caggianese and Maria Frucci and Luigi Gallo and Pietro Neroni},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=7169798},
doi = {10.1109/ICMEW.2015.7169798},
isbn = {978-1-4799-7079-7},
year = {2015},
date = {2015-07-01},
urldate = {2016-12-06},
booktitle = {IEEE International Conference on Multimedia & Expo Workshops (ICMEW)},
pages = {1–6},
publisher = {IEEE},
address = {Torino, Italy},
abstract = {Wearable augmented reality (AR) systems have the potential to significantly lower the barriers to accessing information, while leaving the focus of the user's attention on the real world. To reveal their true potential, the human-machine interface is crucial. A touchless point-and-click interface for wearable AR systems may be suitable for use in many realworld applications, but it demands fingertip detection techniques robust enough to cope with cluttered backgrounds and varying illumination conditions. In this paper we propose an approach that, by automatically choosing between color and depth features, allows to detect the hand and then the user's fingertip both in indoor and outdoor scenarios, with or without adequate illumination.},
keywords = {Ego-Vision, Fingertip detection, Point-and-click interface, RGB-D, Touchless interaction},
pubstate = {published},
tppubtype = {inproceedings}
}