Audio-driven human body motion analysis and synthesis. Ofli, F., Canton-Ferrer, C., Tilmanne, J., Demir, Y., Bozkurt, E., Yemez, Y., Erzin, E., & Tekalp, A. M. In 2008 IEEE INTERNATIONAL CONFERENCE ON ACOUSTICS, SPEECH AND SIGNAL PROCESSING, VOLS 1-12, of International Conference on Acoustics Speech and Signal Processing (ICASSP), pages 2233-2236, 2008. 33rd IEEE International Conference on Acoustics, Speech and Signal Processing, Las Vegas, NV, MAR 30-APR 04, 2008doi abstract bibtex This paper presents a framework for audio-driven human body motion analysis and synthesis. We address the problem in the context of a dance performance, where gestures and movements of the dancer are mainly driven by a musical piece and characterized by the repetition of a set of dance figures. The system is trained in a supervised manner using the multiview video recordings of the dancer. The human body posture is extracted from multiview video information without any human intervention using a novel marker-based algorithm based on annealing particle filtering. Audio is analyzed to extract beat and tempo information. The joint analysis of audio and motion features provides a correlation model that is then used to animate a dancing avatar when driven with any musical piece of the same genre. Results are provided showing the effectiveness of the proposed algorithm.
@inproceedings{ ISI:000257456701220,
Author = {Ofli, F. and Canton-Ferrer, C. and Tilmanne, J. and Demir, Y. and
Bozkurt, E. and Yemez, Y. and Erzin, E. and Tekalp, A. M.},
Book-Group-Author = {{IEEE}},
Title = {{Audio-driven human body motion analysis and synthesis}},
Booktitle = {{2008 IEEE INTERNATIONAL CONFERENCE ON ACOUSTICS, SPEECH AND SIGNAL
PROCESSING, VOLS 1-12}},
Series = {{International Conference on Acoustics Speech and Signal Processing
(ICASSP)}},
Year = {{2008}},
Pages = {{2233-2236}},
Note = {{33rd IEEE International Conference on Acoustics, Speech and Signal
Processing, Las Vegas, NV, MAR 30-APR 04, 2008}},
Abstract = {{This paper presents a framework for audio-driven human body motion
analysis and synthesis. We address the problem in the context of a dance
performance, where gestures and movements of the dancer are mainly
driven by a musical piece and characterized by the repetition of a set
of dance figures. The system is trained in a supervised manner using the
multiview video recordings of the dancer. The human body posture is
extracted from multiview video information without any human
intervention using a novel marker-based algorithm based on annealing
particle filtering. Audio is analyzed to extract beat and tempo
information. The joint analysis of audio and motion features provides a
correlation model that is then used to animate a dancing avatar when
driven with any musical piece of the same genre. Results are provided
showing the effectiveness of the proposed algorithm.}},
DOI = {{10.1109/ICASSP.2008.4518089}},
ISSN = {{1520-6149}},
ISBN = {{978-1-4244-1483-3}},
ResearcherID-Numbers = {{Erzin, Engin/H-1716-2011}},
ORCID-Numbers = {{Erzin, Engin/0000-0002-2715-2368}},
Unique-ID = {{ISI:000257456701220}},
}
Downloads: 0
{"_id":"WJBNkRG5fYxRPCPXe","bibbaseid":"ofli-cantonferrer-tilmanne-demir-bozkurt-yemez-erzin-tekalp-audiodrivenhumanbodymotionanalysisandsynthesis-2008","downloads":0,"creationDate":"2015-12-09T21:23:15.294Z","title":"Audio-driven human body motion analysis and synthesis","author_short":["Ofli, F.","Canton-Ferrer, C.","Tilmanne, J.","Demir, Y.","Bozkurt, E.","Yemez, Y.","Erzin, E.","Tekalp, A. M."],"year":2008,"bibtype":"inproceedings","biburl":"http://home.ku.edu.tr/~eerzin/pubs/mvgl.bib","bibdata":{"bibtype":"inproceedings","type":"inproceedings","author":[{"propositions":[],"lastnames":["Ofli"],"firstnames":["F."],"suffixes":[]},{"propositions":[],"lastnames":["Canton-Ferrer"],"firstnames":["C."],"suffixes":[]},{"propositions":[],"lastnames":["Tilmanne"],"firstnames":["J."],"suffixes":[]},{"propositions":[],"lastnames":["Demir"],"firstnames":["Y."],"suffixes":[]},{"propositions":[],"lastnames":["Bozkurt"],"firstnames":["E."],"suffixes":[]},{"propositions":[],"lastnames":["Yemez"],"firstnames":["Y."],"suffixes":[]},{"propositions":[],"lastnames":["Erzin"],"firstnames":["E."],"suffixes":[]},{"propositions":[],"lastnames":["Tekalp"],"firstnames":["A.","M."],"suffixes":[]}],"book-group-author":"IEEE","title":"Audio-driven human body motion analysis and synthesis","booktitle":"2008 IEEE INTERNATIONAL CONFERENCE ON ACOUSTICS, SPEECH AND SIGNAL PROCESSING, VOLS 1-12","series":"International Conference on Acoustics Speech and Signal Processing (ICASSP)","year":"2008","pages":"2233-2236","note":"33rd IEEE International Conference on Acoustics, Speech and Signal Processing, Las Vegas, NV, MAR 30-APR 04, 2008","abstract":"This paper presents a framework for audio-driven human body motion analysis and synthesis. We address the problem in the context of a dance performance, where gestures and movements of the dancer are mainly driven by a musical piece and characterized by the repetition of a set of dance figures. The system is trained in a supervised manner using the multiview video recordings of the dancer. The human body posture is extracted from multiview video information without any human intervention using a novel marker-based algorithm based on annealing particle filtering. Audio is analyzed to extract beat and tempo information. The joint analysis of audio and motion features provides a correlation model that is then used to animate a dancing avatar when driven with any musical piece of the same genre. Results are provided showing the effectiveness of the proposed algorithm.","doi":"10.1109/ICASSP.2008.4518089","issn":"1520-6149","isbn":"978-1-4244-1483-3","researcherid-numbers":"Erzin, Engin/H-1716-2011","orcid-numbers":"Erzin, Engin/0000-0002-2715-2368","unique-id":"ISI:000257456701220","bibtex":"@inproceedings{ ISI:000257456701220,\nAuthor = {Ofli, F. and Canton-Ferrer, C. and Tilmanne, J. and Demir, Y. and\n Bozkurt, E. and Yemez, Y. and Erzin, E. and Tekalp, A. M.},\nBook-Group-Author = {{IEEE}},\nTitle = {{Audio-driven human body motion analysis and synthesis}},\nBooktitle = {{2008 IEEE INTERNATIONAL CONFERENCE ON ACOUSTICS, SPEECH AND SIGNAL\n PROCESSING, VOLS 1-12}},\nSeries = {{International Conference on Acoustics Speech and Signal Processing\n (ICASSP)}},\nYear = {{2008}},\nPages = {{2233-2236}},\nNote = {{33rd IEEE International Conference on Acoustics, Speech and Signal\n Processing, Las Vegas, NV, MAR 30-APR 04, 2008}},\nAbstract = {{This paper presents a framework for audio-driven human body motion\n analysis and synthesis. We address the problem in the context of a dance\n performance, where gestures and movements of the dancer are mainly\n driven by a musical piece and characterized by the repetition of a set\n of dance figures. The system is trained in a supervised manner using the\n multiview video recordings of the dancer. The human body posture is\n extracted from multiview video information without any human\n intervention using a novel marker-based algorithm based on annealing\n particle filtering. Audio is analyzed to extract beat and tempo\n information. The joint analysis of audio and motion features provides a\n correlation model that is then used to animate a dancing avatar when\n driven with any musical piece of the same genre. Results are provided\n showing the effectiveness of the proposed algorithm.}},\nDOI = {{10.1109/ICASSP.2008.4518089}},\nISSN = {{1520-6149}},\nISBN = {{978-1-4244-1483-3}},\nResearcherID-Numbers = {{Erzin, Engin/H-1716-2011}},\nORCID-Numbers = {{Erzin, Engin/0000-0002-2715-2368}},\nUnique-ID = {{ISI:000257456701220}},\n}\n\n","author_short":["Ofli, F.","Canton-Ferrer, C.","Tilmanne, J.","Demir, Y.","Bozkurt, E.","Yemez, Y.","Erzin, E.","Tekalp, A. M."],"key":"ISI:000257456701220","id":"ISI:000257456701220","bibbaseid":"ofli-cantonferrer-tilmanne-demir-bozkurt-yemez-erzin-tekalp-audiodrivenhumanbodymotionanalysisandsynthesis-2008","role":"author","urls":{},"metadata":{"authorlinks":{"erzin, e":"http://home.ku.edu.tr/~eerzin/pubs/index6.html"}},"downloads":0,"html":""},"search_terms":["audio","driven","human","body","motion","analysis","synthesis","ofli","canton-ferrer","tilmanne","demir","bozkurt","yemez","erzin","tekalp"],"keywords":[],"authorIDs":["56689bc2b3110c264a000354","566927fe71adeb5a05000063","s4rze5RZET4EY5wXY"],"dataSources":["P7SB4qiBxZPhjXYRW","eoMYcQtZLjtLCGT3K"]}