AHCI RESEARCH GROUP
Publications
Papers published in international journals,
proceedings of conferences, workshops and books.
OUR RESEARCH
Scientific Publications
How to
You can use the tag cloud to select only the papers dealing with specific research topics.
You can expand the Abstract, Links and BibTex record for each paper.
2019
Essmaeel, Kyis; Migniot, Cyrille; Dipanda, Albert; Gallo, Luigi; Damiani, Ernesto; Pietro, Giuseppe De
A New 3D Descriptor for Human Classification: Application for Human Detection in a Multi-Kinect System Journal Article
In: Multimedia Tools and Applications, vol. 78, no. 16, pp. 22479–22508, 2019, ISSN: 1573-7721.
Abstract | Links | BibTeX | Tags: 3D descriptor, Classification, Human detection, Kinect
@article{essmaeelNew3DDescriptor2019,
title = {A New 3D Descriptor for Human Classification: Application for Human Detection in a Multi-Kinect System},
author = { Kyis Essmaeel and Cyrille Migniot and Albert Dipanda and Luigi Gallo and Ernesto Damiani and Giuseppe De Pietro},
doi = {10.1007/s11042-019-7568-6},
issn = {1573-7721},
year = {2019},
date = {2019-08-01},
journal = {Multimedia Tools and Applications},
volume = {78},
number = {16},
pages = {22479--22508},
abstract = {In this paper we present a new 3D descriptor for human classification and a human detection method based on this descriptor. The proposed 3D descriptor allows classification of an object represented by a point cloud, as human or non-human. It is derived from the well-known Histogram of Oriented Gradient by employing surface normals instead of gradients. The process consists in an appropriate subdivision of the object point cloud into blocks. These blocks provide the spatial distribution modeling of the surface normal orientation into the different parts of the object. This distribution modelling is expressed as a histogram. In addition we have set up a multi-kinect acquisition system that provides us with Complete Point Clouds (CPC) (i.e. 360textdegree view). Such CPCs enable a suitable processing, particularly in case of occlusions. Moreover they allow for the determination of the human frontal orientation. Based on the proposed 3D descriptor, we have developed a human detection method that is applied on CPCs. First, we evaluated the 3D descriptor over a set of CPC candidates by using the Support Vector Machine (SVM) classifier. The learning process was conducted with the original CPC database that we have built. The results are very promising. The descriptor can discriminate human from non-human candidates and provides the frontal direction of humans with high precision. In addition we demonstrated that using the CPCs improves significantly the classification results in comparison with Single Point Clouds (i.e. points clouds acquired with only one kinect). Second, we compared our detection method with two others, namely the HOG detector on RGB images and a 3D HOG-based detection method that is applied on RGB-depth data. The obtained results on different situations show that the proposed human detection method provides excellent performances that outperform the other two detection methods.},
keywords = {3D descriptor, Classification, Human detection, Kinect},
pubstate = {published},
tppubtype = {article}
}
Essmaeel, Kyis; Migniot, Cyrille; Dipanda, Albert; Gallo, Luigi; Damiani, Ernesto; Pietro, Giuseppe De
A new 3D descriptor for human classification: application for human detection in a multi-kinect system Journal Article
In: Multimedia Tools and Applications, vol. 78, no. 16, pp. 22479–22508, 2019, ISSN: 1573-7721.
Abstract | Links | BibTeX | Tags: 3D descriptor, Classification, Human detection, Kinect
@article{essmaeel_new_2019,
title = {A new 3D descriptor for human classification: application for human detection in a multi-kinect system},
author = {Kyis Essmaeel and Cyrille Migniot and Albert Dipanda and Luigi Gallo and Ernesto Damiani and Giuseppe De Pietro},
url = {https://doi.org/10.1007/s11042-019-7568-6},
doi = {10.1007/s11042-019-7568-6},
issn = {1573-7721},
year = {2019},
date = {2019-08-01},
journal = {Multimedia Tools and Applications},
volume = {78},
number = {16},
pages = {22479–22508},
abstract = {In this paper we present a new 3D descriptor for human classification and a human detection method based on this descriptor. The proposed 3D descriptor allows classification of an object represented by a point cloud, as human or non-human. It is derived from the well-known Histogram of Oriented Gradient by employing surface normals instead of gradients. The process consists in an appropriate subdivision of the object point cloud into blocks. These blocks provide the spatial distribution modeling of the surface normal orientation into the different parts of the object. This distribution modelling is expressed as a histogram. In addition we have set up a multi-kinect acquisition system that provides us with Complete Point Clouds (CPC) (i.e. 360° view). Such CPCs enable a suitable processing, particularly in case of occlusions. Moreover they allow for the determination of the human frontal orientation. Based on the proposed 3D descriptor, we have developed a human detection method that is applied on CPCs. First, we evaluated the 3D descriptor over a set of CPC candidates by using the Support Vector Machine (SVM) classifier. The learning process was conducted with the original CPC database that we have built. The results are very promising. The descriptor can discriminate human from non-human candidates and provides the frontal direction of humans with high precision. In addition we demonstrated that using the CPCs improves significantly the classification results in comparison with Single Point Clouds (i.e. points clouds acquired with only one kinect). Second, we compared our detection method with two others, namely the HOG detector on RGB images and a 3D HOG-based detection method that is applied on RGB-depth data. The obtained results on different situations show that the proposed human detection method provides excellent performances that outperform the other two detection methods.},
keywords = {3D descriptor, Classification, Human detection, Kinect},
pubstate = {published},
tppubtype = {article}
}
2017
Brancati, Nadia; Pietro, Giuseppe De; Frucci, Maria; Gallo, Luigi
Human Skin Detection through Correlation Rules between the YCb and YCr Subspaces Based on Dynamic Color Clustering Journal Article
In: Computer Vision and Image Understanding, vol. 155, pp. 33–42, 2017, ISSN: 1077-3142.
Abstract | Links | BibTeX | Tags: Classification, Clustering, Correlation rules, Skin detection, YCbCr color space
@article{brancatiHumanSkinDetection2017,
title = {Human Skin Detection through Correlation Rules between the YCb and YCr Subspaces Based on Dynamic Color Clustering},
author = { Nadia Brancati and Giuseppe De Pietro and Maria Frucci and Luigi Gallo},
doi = {10.1016/j.cviu.2016.12.001},
issn = {1077-3142},
year = {2017},
date = {2017-02-01},
urldate = {2017-02-05},
journal = {Computer Vision and Image Understanding},
volume = {155},
pages = {33--42},
abstract = {This paper presents a novel rule-based skin detection method that works in the YCbCr color space. The method is based on correlation rules that evaluate the combinations of chrominance values to identify the skin pixels in the YCb and YCr subspaces. The correlation rules depend on the shape and size of dynamically generated skin color clusters, which are computed on a statistical basis in the YCb and YCr subspaces for each single image, and represent the areas that include most of the candidate skin pixels. Comparisons with six well-known rule-based methods in literature carried out on four publicly available databases show that the proposed method outperforms the others in terms of quantitative performance evaluation parameters. Moreover, the qualitative analysis shows that the method achieves satisfactory results also in critical scenarios, including severe variations in illumination conditions.},
keywords = {Classification, Clustering, Correlation rules, Skin detection, YCbCr color space},
pubstate = {published},
tppubtype = {article}
}
Brancati, Nadia; Pietro, Giuseppe De; Frucci, Maria; Gallo, Luigi
Human skin detection through correlation rules between the YCb and YCr subspaces based on dynamic color clustering Journal Article
In: Computer Vision and Image Understanding, vol. 155, pp. 33–42, 2017, ISSN: 1077-3142.
Abstract | Links | BibTeX | Tags: Classification, Clustering, Correlation rules, Skin detection, YCbCr color space
@article{brancati_human_2017,
title = {Human skin detection through correlation rules between the YCb and YCr subspaces based on dynamic color clustering},
author = {Nadia Brancati and Giuseppe De Pietro and Maria Frucci and Luigi Gallo},
url = {https://www.sciencedirect.com/science/article/pii/S1077314216301989},
doi = {10.1016/j.cviu.2016.12.001},
issn = {1077-3142},
year = {2017},
date = {2017-02-01},
urldate = {2017-02-05},
journal = {Computer Vision and Image Understanding},
volume = {155},
pages = {33–42},
abstract = {This paper presents a novel rule-based skin detection method that works in the YCbCr color space. The method is based on correlation rules that evaluate the combinations of chrominance values to identify the skin pixels in the YCb and YCr subspaces. The correlation rules depend on the shape and size of dynamically generated skin color clusters, which are computed on a statistical basis in the YCb and YCr subspaces for each single image, and represent the areas that include most of the candidate skin pixels. Comparisons with six well-known rule-based methods in literature carried out on four publicly available databases show that the proposed method outperforms the others in terms of quantitative performance evaluation parameters. Moreover, the qualitative analysis shows that the method achieves satisfactory results also in critical scenarios, including severe variations in illumination conditions.},
keywords = {Classification, Clustering, Correlation rules, Skin detection, YCbCr color space},
pubstate = {published},
tppubtype = {article}
}
2014
Gallo, Luigi
Hand Shape Classification Using Depth Data for Unconstrained 3D Interaction Journal Article
In: Journal of Ambient Intelligence and Smart Environments, vol. 6, no. 1, pp. 93–105, 2014, ISSN: 1876-1364.
Abstract | Links | BibTeX | Tags: 3D interaction, Classification, Kinect, Static hand pose recognition, Touchless interaction, Visualization
@article{galloHandShapeClassification2014,
title = {Hand Shape Classification Using Depth Data for Unconstrained 3D Interaction},
author = { Luigi Gallo},
doi = {10.3233/AIS-130239},
issn = {1876-1364},
year = {2014},
date = {2014-01-01},
journal = {Journal of Ambient Intelligence and Smart Environments},
volume = {6},
number = {1},
pages = {93--105},
abstract = {In this paper, we introduce a novel method for view-independent hand pose recognition from depth data. The proposed approach, which does not rely on color information, provides an estimation of the shape and orientation of the user's hand without constraining him/her to maintain a fixed position in the 3D space. We use principal component analysis to estimate the hand orientation in space, Flusser moment invariants as image features and two SVM-RBF classifiers for visual recognition. Moreover, we describe a novel weighting method that takes advantage of the orientation and velocity of the user's hand to assign a score to each hand shape hypothesis. The complete processing chain is described and evaluated in terms of real-time performance and classification accuracy. As a case study, it has also been integrated into a touchless interface for 3D medical visualization, which allows users to manipulate 3D anatomical parts with up to six degrees of freedom. Furthermore, the paper discusses the results of a user study aimed at assessing if using hand velocity as an indicator of the user's intentionality in changing hand posture results in an overall gain in the classification accuracy. The experimental results show that, especially in the presence of out-of-plane rotations of the hand, the introduction of the velocity-based weighting method produces a significant increase in the pose recognition accuracy.},
keywords = {3D interaction, Classification, Kinect, Static hand pose recognition, Touchless interaction, Visualization},
pubstate = {published},
tppubtype = {article}
}
Gallo, Luigi
Hand shape classification using depth data for unconstrained 3D interaction Journal Article
In: Journal of Ambient Intelligence and Smart Environments, vol. 6, no. 1, pp. 93–105, 2014, ISSN: 1876-1364.
Abstract | Links | BibTeX | Tags: 3D interaction, Classification, Kinect, Static hand pose recognition, Touchless interaction, Visualization
@article{gallo_hand_2014,
title = {Hand shape classification using depth data for unconstrained 3D interaction},
author = {Luigi Gallo},
doi = {10.3233/AIS-130239},
issn = {1876-1364},
year = {2014},
date = {2014-01-01},
journal = {Journal of Ambient Intelligence and Smart Environments},
volume = {6},
number = {1},
pages = {93–105},
abstract = {In this paper, we introduce a novel method for view-independent hand pose recognition from depth data. The proposed approach, which does not rely on color information, provides an estimation of the shape and orientation of the user's hand without constraining him/her to maintain a fixed position in the 3D space. We use principal component analysis to estimate the hand orientation in space, Flusser moment invariants as image features and two SVM-RBF classifiers for visual recognition. Moreover, we describe a novel weighting method that takes advantage of the orientation and velocity of the user's hand to assign a score to each hand shape hypothesis. The complete processing chain is described and evaluated in terms of real-time performance and classification accuracy. As a case study, it has also been integrated into a touchless interface for 3D medical visualization, which allows users to manipulate 3D anatomical parts with up to six degrees of freedom. Furthermore, the paper discusses the results of a user study aimed at assessing if using hand velocity as an indicator of the user's intentionality in changing hand posture results in an overall gain in the classification accuracy. The experimental results show that, especially in the presence of out-of-plane rotations of the hand, the introduction of the velocity-based weighting method produces a significant increase in the pose recognition accuracy.},
keywords = {3D interaction, Classification, Kinect, Static hand pose recognition, Touchless interaction, Visualization},
pubstate = {published},
tppubtype = {article}
}