A gesture-driven computer interface using Kinect. Lai, K., Konrad, J., & Ishwar, P. 2012 IEEE Southwest Symposium on Image Analysis and Interpretation, 2012. Paper Website abstract bibtex Automatic recognition of human actions from video has been studied for many years. Although still very difficult in uncontrolled scenarios, it has been successful in more restricted settings (e.g., fixed viewpoint, no occlusions) with recognition rates approaching 100%. However, the best-performing methods are complex and computationally-demanding and thus not well-suited for real-time deployments. This paper proposes to leverage the Kinect camera for close-range gesture recognition using two methods. Both methods use feature vectors that are derived from the skeleton model provided by the Kinect SDK in real-time. Although both methods perform nearest-neighbor classification, one method does this in the space of features using the Euclidean distance metric, while the other method does this in the space of feature covariances using a log-Euclidean metric. Both methods recognize 8 hand gestures in real time achieving correct-classification rates of over 99% on a dataset of 20 subjects but the method based on Euclidean distance requires feature-vector collections to be of the same size, is sensitive to temporal misalignment, and has higher computation and storage requirements.
@article{
title = {A gesture-driven computer interface using Kinect},
type = {article},
year = {2012},
identifiers = {[object Object]},
keywords = {Cameras,Covariance matrix,Euclidean distance metric,Gesture recognition,Human action recognition,Human-computer interaction,Humans,Joints,Kinect SDK,Kinect camera,Real time systems,Vectors,automatic human action recognition,close-range gesture recognition,covariance analysis,feature covariances,feature vectors,gesture-driven computer interface,hand gesture recognition,human computer interaction,infrared imaging,log-Euclidean metric,nearest-neighbor classification,pattern classification,skeleton model,storage temporal,temporal misalignment,video signal processing},
pages = {185-188},
websites = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6202484},
id = {7cf2292a-0e8f-3b19-b128-fad72264a7be},
created = {2017-01-13T10:27:35.000Z},
file_attached = {true},
profile_id = {5d07ba72-227e-314d-9d79-37271da759ee},
group_id = {e79131d5-b618-3b3c-ae97-e4263040fd28},
last_modified = {2017-03-14T16:56:30.626Z},
read = {false},
starred = {false},
authored = {false},
confirmed = {true},
hidden = {false},
abstract = {Automatic recognition of human actions from video has been studied for many years. Although still very difficult in uncontrolled scenarios, it has been successful in more restricted settings (e.g., fixed viewpoint, no occlusions) with recognition rates approaching 100%. However, the best-performing methods are complex and computationally-demanding and thus not well-suited for real-time deployments. This paper proposes to leverage the Kinect camera for close-range gesture recognition using two methods. Both methods use feature vectors that are derived from the skeleton model provided by the Kinect SDK in real-time. Although both methods perform nearest-neighbor classification, one method does this in the space of features using the Euclidean distance metric, while the other method does this in the space of feature covariances using a log-Euclidean metric. Both methods recognize 8 hand gestures in real time achieving correct-classification rates of over 99% on a dataset of 20 subjects but the method based on Euclidean distance requires feature-vector collections to be of the same size, is sensitive to temporal misalignment, and has higher computation and storage requirements.},
bibtype = {article},
author = {Lai, Kam and Konrad, Janusz and Ishwar, Prakash},
journal = {2012 IEEE Southwest Symposium on Image Analysis and Interpretation}
}
Downloads: 0
{"_id":"9r5JqZeQLwFbxbefw","bibbaseid":"lai-konrad-ishwar-agesturedrivencomputerinterfaceusingkinect-2012","downloads":0,"creationDate":"2017-04-28T05:53:44.423Z","title":"A gesture-driven computer interface using Kinect","author_short":["Lai, K.","Konrad, J.","Ishwar, P."],"year":2012,"bibtype":"article","biburl":null,"bibdata":{"title":"A gesture-driven computer interface using Kinect","type":"article","year":"2012","identifiers":"[object Object]","keywords":"Cameras,Covariance matrix,Euclidean distance metric,Gesture recognition,Human action recognition,Human-computer interaction,Humans,Joints,Kinect SDK,Kinect camera,Real time systems,Vectors,automatic human action recognition,close-range gesture recognition,covariance analysis,feature covariances,feature vectors,gesture-driven computer interface,hand gesture recognition,human computer interaction,infrared imaging,log-Euclidean metric,nearest-neighbor classification,pattern classification,skeleton model,storage temporal,temporal misalignment,video signal processing","pages":"185-188","websites":"http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6202484","id":"7cf2292a-0e8f-3b19-b128-fad72264a7be","created":"2017-01-13T10:27:35.000Z","file_attached":"true","profile_id":"5d07ba72-227e-314d-9d79-37271da759ee","group_id":"e79131d5-b618-3b3c-ae97-e4263040fd28","last_modified":"2017-03-14T16:56:30.626Z","read":false,"starred":false,"authored":false,"confirmed":"true","hidden":false,"abstract":"Automatic recognition of human actions from video has been studied for many years. Although still very difficult in uncontrolled scenarios, it has been successful in more restricted settings (e.g., fixed viewpoint, no occlusions) with recognition rates approaching 100%. However, the best-performing methods are complex and computationally-demanding and thus not well-suited for real-time deployments. This paper proposes to leverage the Kinect camera for close-range gesture recognition using two methods. Both methods use feature vectors that are derived from the skeleton model provided by the Kinect SDK in real-time. Although both methods perform nearest-neighbor classification, one method does this in the space of features using the Euclidean distance metric, while the other method does this in the space of feature covariances using a log-Euclidean metric. Both methods recognize 8 hand gestures in real time achieving correct-classification rates of over 99% on a dataset of 20 subjects but the method based on Euclidean distance requires feature-vector collections to be of the same size, is sensitive to temporal misalignment, and has higher computation and storage requirements.","bibtype":"article","author":"Lai, Kam and Konrad, Janusz and Ishwar, Prakash","journal":"2012 IEEE Southwest Symposium on Image Analysis and Interpretation","bibtex":"@article{\n title = {A gesture-driven computer interface using Kinect},\n type = {article},\n year = {2012},\n identifiers = {[object Object]},\n keywords = {Cameras,Covariance matrix,Euclidean distance metric,Gesture recognition,Human action recognition,Human-computer interaction,Humans,Joints,Kinect SDK,Kinect camera,Real time systems,Vectors,automatic human action recognition,close-range gesture recognition,covariance analysis,feature covariances,feature vectors,gesture-driven computer interface,hand gesture recognition,human computer interaction,infrared imaging,log-Euclidean metric,nearest-neighbor classification,pattern classification,skeleton model,storage temporal,temporal misalignment,video signal processing},\n pages = {185-188},\n websites = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6202484},\n id = {7cf2292a-0e8f-3b19-b128-fad72264a7be},\n created = {2017-01-13T10:27:35.000Z},\n file_attached = {true},\n profile_id = {5d07ba72-227e-314d-9d79-37271da759ee},\n group_id = {e79131d5-b618-3b3c-ae97-e4263040fd28},\n last_modified = {2017-03-14T16:56:30.626Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n abstract = {Automatic recognition of human actions from video has been studied for many years. Although still very difficult in uncontrolled scenarios, it has been successful in more restricted settings (e.g., fixed viewpoint, no occlusions) with recognition rates approaching 100%. However, the best-performing methods are complex and computationally-demanding and thus not well-suited for real-time deployments. This paper proposes to leverage the Kinect camera for close-range gesture recognition using two methods. Both methods use feature vectors that are derived from the skeleton model provided by the Kinect SDK in real-time. Although both methods perform nearest-neighbor classification, one method does this in the space of features using the Euclidean distance metric, while the other method does this in the space of feature covariances using a log-Euclidean metric. Both methods recognize 8 hand gestures in real time achieving correct-classification rates of over 99% on a dataset of 20 subjects but the method based on Euclidean distance requires feature-vector collections to be of the same size, is sensitive to temporal misalignment, and has higher computation and storage requirements.},\n bibtype = {article},\n author = {Lai, Kam and Konrad, Janusz and Ishwar, Prakash},\n journal = {2012 IEEE Southwest Symposium on Image Analysis and Interpretation}\n}","author_short":["Lai, K.","Konrad, J.","Ishwar, P."],"urls":{"Paper":"http://bibbase.org/service/mendeley/bca0fddf-79ea-3c29-93ed-6177ce521efd/file/5ce3d633-33e0-69ab-5190-b58a0fab4874/2012-A_gesture-driven_computer_interface_using_Kinect.pdf.pdf","Website":"http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6202484"},"bibbaseid":"lai-konrad-ishwar-agesturedrivencomputerinterfaceusingkinect-2012","role":"author","keyword":["Cameras","Covariance matrix","Euclidean distance metric","Gesture recognition","Human action recognition","Human-computer interaction","Humans","Joints","Kinect SDK","Kinect camera","Real time systems","Vectors","automatic human action recognition","close-range gesture recognition","covariance analysis","feature covariances","feature vectors","gesture-driven computer interface","hand gesture recognition","human computer interaction","infrared imaging","log-Euclidean metric","nearest-neighbor classification","pattern classification","skeleton model","storage temporal","temporal misalignment","video signal processing"],"downloads":0},"search_terms":["gesture","driven","computer","interface","using","kinect","lai","konrad","ishwar"],"keywords":["cameras","covariance matrix","euclidean distance metric","gesture recognition","human action recognition","human-computer interaction","humans","joints","kinect sdk","kinect camera","real time systems","vectors","automatic human action recognition","close-range gesture recognition","covariance analysis","feature covariances","feature vectors","gesture-driven computer interface","hand gesture recognition","human computer interaction","infrared imaging","log-euclidean metric","nearest-neighbor classification","pattern classification","skeleton model","storage temporal","temporal misalignment","video signal processing"],"authorIDs":[]}