The USC CreativeIT database of multimodal dyadic interactions: from speech and full body motion capture to continuous emotional annotations. Metallinou, A., Yang, Z., Lee, C., Busso, C., Carnicke, S., & Narayanan, S. S. Language Resources and Evaluation, 50(3):497–521, 2016. Paper doi bibtex @article{DBLP:journals/lre/MetallinouYLBCN16,
author = {Angeliki Metallinou and
Zhaojun Yang and
Chi{-}Chun Lee and
Carlos Busso and
Sharon Carnicke and
Shrikanth S. Narayanan},
title = {The {USC} CreativeIT database of multimodal dyadic interactions: from
speech and full body motion capture to continuous emotional annotations},
journal = {Language Resources and Evaluation},
volume = {50},
number = {3},
pages = {497--521},
year = {2016},
url = {https://doi.org/10.1007/s10579-015-9300-0},
doi = {10.1007/s10579-015-9300-0},
timestamp = {Tue, 03 Dec 2019 00:00:00 +0100},
biburl = {https://dblp.org/rec/bib/journals/lre/MetallinouYLBCN16},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
Downloads: 0
{"_id":"B3SpXffhTS64xdftT","bibbaseid":"metallinou-yang-lee-busso-carnicke-narayanan-theusccreativeitdatabaseofmultimodaldyadicinteractionsfromspeechandfullbodymotioncapturetocontinuousemotionalannotations-2016","authorIDs":["5e18f5f88fcbc2df01000180"],"author_short":["Metallinou, A.","Yang, Z.","Lee, C.","Busso, C.","Carnicke, S.","Narayanan, S. S."],"bibdata":{"bibtype":"article","type":"article","author":[{"firstnames":["Angeliki"],"propositions":[],"lastnames":["Metallinou"],"suffixes":[]},{"firstnames":["Zhaojun"],"propositions":[],"lastnames":["Yang"],"suffixes":[]},{"firstnames":["Chi-Chun"],"propositions":[],"lastnames":["Lee"],"suffixes":[]},{"firstnames":["Carlos"],"propositions":[],"lastnames":["Busso"],"suffixes":[]},{"firstnames":["Sharon"],"propositions":[],"lastnames":["Carnicke"],"suffixes":[]},{"firstnames":["Shrikanth","S."],"propositions":[],"lastnames":["Narayanan"],"suffixes":[]}],"title":"The USC CreativeIT database of multimodal dyadic interactions: from speech and full body motion capture to continuous emotional annotations","journal":"Language Resources and Evaluation","volume":"50","number":"3","pages":"497–521","year":"2016","url":"https://doi.org/10.1007/s10579-015-9300-0","doi":"10.1007/s10579-015-9300-0","timestamp":"Tue, 03 Dec 2019 00:00:00 +0100","biburl":"https://dblp.org/rec/bib/journals/lre/MetallinouYLBCN16","bibsource":"dblp computer science bibliography, https://dblp.org","bibtex":"@article{DBLP:journals/lre/MetallinouYLBCN16,\n author = {Angeliki Metallinou and\n Zhaojun Yang and\n Chi{-}Chun Lee and\n Carlos Busso and\n Sharon Carnicke and\n Shrikanth S. Narayanan},\n title = {The {USC} CreativeIT database of multimodal dyadic interactions: from\n speech and full body motion capture to continuous emotional annotations},\n journal = {Language Resources and Evaluation},\n volume = {50},\n number = {3},\n pages = {497--521},\n year = {2016},\n url = {https://doi.org/10.1007/s10579-015-9300-0},\n doi = {10.1007/s10579-015-9300-0},\n timestamp = {Tue, 03 Dec 2019 00:00:00 +0100},\n biburl = {https://dblp.org/rec/bib/journals/lre/MetallinouYLBCN16},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n\n","author_short":["Metallinou, A.","Yang, Z.","Lee, C.","Busso, C.","Carnicke, S.","Narayanan, S. S."],"key":"DBLP:journals/lre/MetallinouYLBCN16","id":"DBLP:journals/lre/MetallinouYLBCN16","bibbaseid":"metallinou-yang-lee-busso-carnicke-narayanan-theusccreativeitdatabaseofmultimodaldyadicinteractionsfromspeechandfullbodymotioncapturetocontinuousemotionalannotations-2016","role":"author","urls":{"Paper":"https://doi.org/10.1007/s10579-015-9300-0"},"downloads":0},"bibtype":"article","biburl":"https://dblp.org/pid/19/3899.bib","creationDate":"2020-01-10T22:08:56.843Z","downloads":0,"keywords":[],"search_terms":["usc","creativeit","database","multimodal","dyadic","interactions","speech","full","body","motion","capture","continuous","emotional","annotations","metallinou","yang","lee","busso","carnicke","narayanan"],"title":"The USC CreativeIT database of multimodal dyadic interactions: from speech and full body motion capture to continuous emotional annotations","year":2016,"dataSources":["z5xLfHHoMYwhn8vbA"]}