Gesture Spotting and Recognition Using Salience Detection and Concatenated Hidden Markov Models. Yin, Y. & Davis, R. In Proceedings of the 15th ACM on International Conference on Multimodal Interaction, of ICMI '13, pages 489--494, New York, NY, USA, 2013. ACM.
Paper doi abstract bibtex We developed a gesture salience based hand tracking method, and a gesture spotting and recognition method based on concatenated hidden Markov models. A 3-fold cross validation using the ChAirGest development data set with 10 users gives an F1 score of 0.907 and an accurate temporal segmentation rate (ATSR) of 0.923. The average final score is 0.9116. Compared with using the hand joint position from the Kinect SDK, using our hand tracking method gives a 3.7% absolute increase in the recognition F1 score.
@inproceedings{yin_gesture_2013,
address = {New York, NY, USA},
series = {{ICMI} '13},
title = {Gesture {Spotting} and {Recognition} {Using} {Salience} {Detection} and {Concatenated} {Hidden} {Markov} {Models}},
isbn = {978-1-4503-2129-7},
url = {http://doi.acm.org/10.1145/2522848.2532588},
doi = {10.1145/2522848.2532588},
abstract = {We developed a gesture salience based hand tracking method, and a gesture spotting and recognition method based on concatenated hidden Markov models. A 3-fold cross validation using the ChAirGest development data set with 10 users gives an F1 score of 0.907 and an accurate temporal segmentation rate (ATSR) of 0.923. The average final score is 0.9116. Compared with using the hand joint position from the Kinect SDK, using our hand tracking method gives a 3.7\% absolute increase in the recognition F1 score.},
urldate = {2014-06-05TZ},
booktitle = {Proceedings of the 15th {ACM} on {International} {Conference} on {Multimodal} {Interaction}},
publisher = {ACM},
author = {Yin, Ying and Davis, Randall},
year = {2013},
pages = {489--494}
}
Downloads: 0
{"_id":"d3Q4gGaF2ck78Fs68","bibbaseid":"yin-davis-gesturespottingandrecognitionusingsaliencedetectionandconcatenatedhiddenmarkovmodels-2013","downloads":0,"creationDate":"2016-10-05T13:48:42.957Z","title":"Gesture Spotting and Recognition Using Salience Detection and Concatenated Hidden Markov Models","author_short":["Yin, Y.","Davis, R."],"year":2013,"bibtype":"inproceedings","biburl":"http://bibbase.org/zotero/alanlivio","bibdata":{"bibtype":"inproceedings","type":"inproceedings","address":"New York, NY, USA","series":"ICMI '13","title":"Gesture Spotting and Recognition Using Salience Detection and Concatenated Hidden Markov Models","isbn":"978-1-4503-2129-7","url":"http://doi.acm.org/10.1145/2522848.2532588","doi":"10.1145/2522848.2532588","abstract":"We developed a gesture salience based hand tracking method, and a gesture spotting and recognition method based on concatenated hidden Markov models. A 3-fold cross validation using the ChAirGest development data set with 10 users gives an F1 score of 0.907 and an accurate temporal segmentation rate (ATSR) of 0.923. The average final score is 0.9116. Compared with using the hand joint position from the Kinect SDK, using our hand tracking method gives a 3.7% absolute increase in the recognition F1 score.","urldate":"2014-06-05TZ","booktitle":"Proceedings of the 15th ACM on International Conference on Multimodal Interaction","publisher":"ACM","author":[{"propositions":[],"lastnames":["Yin"],"firstnames":["Ying"],"suffixes":[]},{"propositions":[],"lastnames":["Davis"],"firstnames":["Randall"],"suffixes":[]}],"year":"2013","pages":"489--494","bibtex":"@inproceedings{yin_gesture_2013,\n\taddress = {New York, NY, USA},\n\tseries = {{ICMI} '13},\n\ttitle = {Gesture {Spotting} and {Recognition} {Using} {Salience} {Detection} and {Concatenated} {Hidden} {Markov} {Models}},\n\tisbn = {978-1-4503-2129-7},\n\turl = {http://doi.acm.org/10.1145/2522848.2532588},\n\tdoi = {10.1145/2522848.2532588},\n\tabstract = {We developed a gesture salience based hand tracking method, and a gesture spotting and recognition method based on concatenated hidden Markov models. A 3-fold cross validation using the ChAirGest development data set with 10 users gives an F1 score of 0.907 and an accurate temporal segmentation rate (ATSR) of 0.923. The average final score is 0.9116. Compared with using the hand joint position from the Kinect SDK, using our hand tracking method gives a 3.7\\% absolute increase in the recognition F1 score.},\n\turldate = {2014-06-05TZ},\n\tbooktitle = {Proceedings of the 15th {ACM} on {International} {Conference} on {Multimodal} {Interaction}},\n\tpublisher = {ACM},\n\tauthor = {Yin, Ying and Davis, Randall},\n\tyear = {2013},\n\tpages = {489--494}\n}\n\n","author_short":["Yin, Y.","Davis, R."],"key":"yin_gesture_2013","id":"yin_gesture_2013","bibbaseid":"yin-davis-gesturespottingandrecognitionusingsaliencedetectionandconcatenatedhiddenmarkovmodels-2013","role":"author","urls":{"Paper":"http://doi.acm.org/10.1145/2522848.2532588"},"downloads":0},"search_terms":["gesture","spotting","recognition","using","salience","detection","concatenated","hidden","markov","models","yin","davis"],"keywords":[],"authorIDs":[],"dataSources":["tudya6YojbqEiF783"]}