Recognizing Actions of Humans in Motion for Smart Environments. Starostenko, O., Rosas-Romero, R., Martinez-Carballido, J., Aquino, V., A., & Sanchez, J., A. In Workshop Proceedings of the 10th International Conference on Intelligent Environments, pages 4-15, 2014.
Recognizing Actions of Humans in Motion for Smart Environments. [link]Website  doi  abstract   bibtex   
Development of high precision systems for recognition of human actions directly from video records is still open problem. Recently, in smart environments the recognition of dynamic actions of human in motion receives a particular interest. We propose two approaches for human action recognition. In the first approach, the envelope of 30x30 pixels is applied to enclose invariant to dimensions human silhouette separated from background. Once the area with located figure is defined, the image sequence is used as input of convolutional neural network that extracts global figure features without previous image processing. The second proposed approach is based on natural knowledge of the human figure such as proportions of body and position of feet. Together with processing global features, we extract six local features combining in this way the holistic and cluster-based approaches for representation of human figure. The input sub-sequence of previously aligned binary silhouettes from video frames is processed to concatenate local and global features into a single feature vector feeding hierarchical system of three linear support vector machines for human action classification. In order to evaluate the proposed approaches, two frameworks for recognizing human actions such as walk, jump, run, side and skip have been designed and tested on Weizmann standard and proper developed datasets achieving correct classification rate of 97-100%.
@inproceedings{
 title = {Recognizing Actions of Humans in Motion for Smart Environments.},
 type = {inproceedings},
 year = {2014},
 pages = {4-15},
 websites = {https://ebooks.iospress.nl/ISBN/978-1-61499-410-7},
 id = {f97e68b6-d445-3938-9442-1a6b6c54c69b},
 created = {2020-07-06T23:56:45.495Z},
 file_attached = {false},
 profile_id = {940dd160-7d67-3a5f-b9f8-935da0571367},
 last_modified = {2021-10-25T01:34:48.577Z},
 read = {false},
 starred = {false},
 authored = {true},
 confirmed = {true},
 hidden = {false},
 source_type = {CONF},
 private_publication = {false},
 abstract = {Development of high precision systems for recognition of human actions directly from video records is still open problem. Recently, in smart environments the recognition of dynamic actions of human in motion receives a particular interest. We propose two approaches for human action recognition. In the first approach, the envelope of 30x30 pixels is applied to enclose invariant to dimensions human silhouette separated from background. Once the area with located figure is defined, the image sequence is used as input of convolutional neural network that extracts global figure features without previous image processing. The second proposed approach is based on natural knowledge of the human figure such as proportions of body and position of feet. Together with processing global features, we extract six local features combining in this way the holistic and cluster-based approaches for representation of human figure. The input sub-sequence of previously aligned binary silhouettes from video frames is processed to concatenate local and global features into a single feature vector feeding hierarchical system of three linear support vector machines for human action classification. In order to evaluate the proposed approaches, two frameworks for recognizing human actions such as walk, jump, run, side and skip have been designed and tested on Weizmann standard and proper developed datasets achieving correct classification rate of 97-100%.},
 bibtype = {inproceedings},
 author = {Starostenko, Oleg and Rosas-Romero, Roberto and Martinez-Carballido, Jorge and Aquino, Vicente Alarcon and Sanchez, J Alfredo},
 doi = {10.3233/978-1-61499-411-4-4},
 booktitle = {Workshop Proceedings of the 10th International Conference on Intelligent Environments}
}

Downloads: 0