RAVEL: an annotated corpus for training robots with audiovisual abilities. Alameda-Pineda, X., Sanchez-Riera, J., Wienke, J., Franc, V., Čech, J., Kulkarni, K., Deleforge, A., & Horaud, R. Journal on Multimodal User Interfaces, 7(1-2):79--91, March, 2013. Paper doi abstract bibtex We introduce Ravel (Robots with Audiovisual Abilities), a publicly available data set which covers examples of Human Robot Interaction (HRI) scenarios. These scenarios are recorded using the audio-visual robot head POPEYE, equipped with two cameras and four microphones, two of which being plugged into the ears of a dummy head. All the recordings were performed in a standard room with no special equipment, thus providing a challenging indoor scenario. This data set provides a basis to test and benchmark methods and algorithms for audio-visual scene analysis with the ultimate goal of enabling robots to interact with people in the most natural way. The data acquisition setup, sensor calibration, data annotation and data content are fully detailed. Moreover, three examples of using the recorded data are provided, illustrating its appropriateness for carrying out a large variety of HRI experiments. The Ravel data are publicly available at: http://ravel.humavips.eu/.
@article{alameda-pineda_ravel:_2013,
title = {{RAVEL}: an annotated corpus for training robots with audiovisual abilities},
volume = {7},
issn = {1783-7677, 1783-8738},
shorttitle = {{RAVEL}},
url = {http://link.springer.com/article/10.1007/s12193-012-0111-y},
doi = {10.1007/s12193-012-0111-y},
abstract = {We introduce Ravel (Robots with Audiovisual Abilities), a publicly available data set which covers examples of Human Robot Interaction (HRI) scenarios. These scenarios are recorded using the audio-visual robot head POPEYE, equipped with two cameras and four microphones, two of which being plugged into the ears of a dummy head. All the recordings were performed in a standard room with no special equipment, thus providing a challenging indoor scenario. This data set provides a basis to test and benchmark methods and algorithms for audio-visual scene analysis with the ultimate goal of enabling robots to interact with people in the most natural way. The data acquisition setup, sensor calibration, data annotation and data content are fully detailed. Moreover, three examples of using the recorded data are provided, illustrating its appropriateness for carrying out a large variety of HRI experiments. The Ravel data are publicly available at: http://ravel.humavips.eu/.},
language = {en},
number = {1-2},
urldate = {2014-05-19TZ},
journal = {Journal on Multimodal User Interfaces},
author = {Alameda-Pineda, Xavier and Sanchez-Riera, Jordi and Wienke, Johannes and Franc, Vojtěch and Čech, Jan and Kulkarni, Kaustubh and Deleforge, Antoine and Horaud, Radu},
month = mar,
year = {2013},
pages = {79--91}
}
Downloads: 0
{"_id":"TqFeBbo7wWz8uLjjr","bibbaseid":"alamedapineda-sanchezriera-wienke-franc-ech-kulkarni-deleforge-horaud-ravelanannotatedcorpusfortrainingrobotswithaudiovisualabilities-2013","downloads":0,"creationDate":"2016-10-05T13:48:43.728Z","title":"RAVEL: an annotated corpus for training robots with audiovisual abilities","author_short":["Alameda-Pineda, X.","Sanchez-Riera, J.","Wienke, J.","Franc, V.","Čech, J.","Kulkarni, K.","Deleforge, A.","Horaud, R."],"year":2013,"bibtype":"article","biburl":"http://bibbase.org/zotero/alanlivio","bibdata":{"bibtype":"article","type":"article","title":"RAVEL: an annotated corpus for training robots with audiovisual abilities","volume":"7","issn":"1783-7677, 1783-8738","shorttitle":"RAVEL","url":"http://link.springer.com/article/10.1007/s12193-012-0111-y","doi":"10.1007/s12193-012-0111-y","abstract":"We introduce Ravel (Robots with Audiovisual Abilities), a publicly available data set which covers examples of Human Robot Interaction (HRI) scenarios. These scenarios are recorded using the audio-visual robot head POPEYE, equipped with two cameras and four microphones, two of which being plugged into the ears of a dummy head. All the recordings were performed in a standard room with no special equipment, thus providing a challenging indoor scenario. This data set provides a basis to test and benchmark methods and algorithms for audio-visual scene analysis with the ultimate goal of enabling robots to interact with people in the most natural way. The data acquisition setup, sensor calibration, data annotation and data content are fully detailed. Moreover, three examples of using the recorded data are provided, illustrating its appropriateness for carrying out a large variety of HRI experiments. The Ravel data are publicly available at: http://ravel.humavips.eu/.","language":"en","number":"1-2","urldate":"2014-05-19TZ","journal":"Journal on Multimodal User Interfaces","author":[{"propositions":[],"lastnames":["Alameda-Pineda"],"firstnames":["Xavier"],"suffixes":[]},{"propositions":[],"lastnames":["Sanchez-Riera"],"firstnames":["Jordi"],"suffixes":[]},{"propositions":[],"lastnames":["Wienke"],"firstnames":["Johannes"],"suffixes":[]},{"propositions":[],"lastnames":["Franc"],"firstnames":["Vojtěch"],"suffixes":[]},{"propositions":[],"lastnames":["Čech"],"firstnames":["Jan"],"suffixes":[]},{"propositions":[],"lastnames":["Kulkarni"],"firstnames":["Kaustubh"],"suffixes":[]},{"propositions":[],"lastnames":["Deleforge"],"firstnames":["Antoine"],"suffixes":[]},{"propositions":[],"lastnames":["Horaud"],"firstnames":["Radu"],"suffixes":[]}],"month":"March","year":"2013","pages":"79--91","bibtex":"@article{alameda-pineda_ravel:_2013,\n\ttitle = {{RAVEL}: an annotated corpus for training robots with audiovisual abilities},\n\tvolume = {7},\n\tissn = {1783-7677, 1783-8738},\n\tshorttitle = {{RAVEL}},\n\turl = {http://link.springer.com/article/10.1007/s12193-012-0111-y},\n\tdoi = {10.1007/s12193-012-0111-y},\n\tabstract = {We introduce Ravel (Robots with Audiovisual Abilities), a publicly available data set which covers examples of Human Robot Interaction (HRI) scenarios. These scenarios are recorded using the audio-visual robot head POPEYE, equipped with two cameras and four microphones, two of which being plugged into the ears of a dummy head. All the recordings were performed in a standard room with no special equipment, thus providing a challenging indoor scenario. This data set provides a basis to test and benchmark methods and algorithms for audio-visual scene analysis with the ultimate goal of enabling robots to interact with people in the most natural way. The data acquisition setup, sensor calibration, data annotation and data content are fully detailed. Moreover, three examples of using the recorded data are provided, illustrating its appropriateness for carrying out a large variety of HRI experiments. The Ravel data are publicly available at: http://ravel.humavips.eu/.},\n\tlanguage = {en},\n\tnumber = {1-2},\n\turldate = {2014-05-19TZ},\n\tjournal = {Journal on Multimodal User Interfaces},\n\tauthor = {Alameda-Pineda, Xavier and Sanchez-Riera, Jordi and Wienke, Johannes and Franc, Vojtěch and Čech, Jan and Kulkarni, Kaustubh and Deleforge, Antoine and Horaud, Radu},\n\tmonth = mar,\n\tyear = {2013},\n\tpages = {79--91}\n}\n\n","author_short":["Alameda-Pineda, X.","Sanchez-Riera, J.","Wienke, J.","Franc, V.","Čech, J.","Kulkarni, K.","Deleforge, A.","Horaud, R."],"key":"alameda-pineda_ravel:_2013","id":"alameda-pineda_ravel:_2013","bibbaseid":"alamedapineda-sanchezriera-wienke-franc-ech-kulkarni-deleforge-horaud-ravelanannotatedcorpusfortrainingrobotswithaudiovisualabilities-2013","role":"author","urls":{"Paper":"http://link.springer.com/article/10.1007/s12193-012-0111-y"},"downloads":0},"search_terms":["ravel","annotated","corpus","training","robots","audiovisual","abilities","alameda-pineda","sanchez-riera","wienke","franc","čech","kulkarni","deleforge","horaud"],"keywords":[],"authorIDs":[],"dataSources":["tudya6YojbqEiF783"]}