The JESTKOD database: an affective multimodal database of dyadic interactions. Bozkurt, E., Khaki, H., Kececi, S., Turker, B. B., Yemez, Y., & Erzin, E. LANGUAGE RESOURCES AND EVALUATION, 51(3):857-872, SEP, 2017. doi abstract bibtex In human-to-human communication, gesture and speech co-exist in time with a tight synchrony, and gestures are often utilized to complement or to emphasize speech. In human-computer interaction systems, natural, affective and believable use of gestures would be a valuable key component in adopting and emphasizing human-centered aspects. However, natural and affective multimodal data, for studying computational models of gesture and speech, is limited. In this study, we introduce the JESTKOD database, which consists of speech and full-body motion capture data recordings in dyadic interaction setting under agreement and disagreement scenarios. Participants of the dyadic interactions are native Turkish speakers and recordings of each participant are rated in dimensional affect space. We present our multimodal data collection and annotation process, as well as our preliminary experimental studies on agreement/disagreement classification of dyadic interactions using body gesture and speech data. The JESTKOD database provides a valuable asset to investigate gesture and speech towards designing more natural and affective human-computer interaction systems.
@article{ ISI:000407360600011,
Author = {Bozkurt, Elif and Khaki, Hossein and Kececi, Sinan and Turker, B. Berker
and Yemez, Yucel and Erzin, Engin},
Title = {{The JESTKOD database: an affective multimodal database of dyadic
interactions}},
Journal = {{LANGUAGE RESOURCES AND EVALUATION}},
Year = {{2017}},
Volume = {{51}},
Number = {{3}},
Pages = {{857-872}},
Month = {{SEP}},
Abstract = {{In human-to-human communication, gesture and speech co-exist in time
with a tight synchrony, and gestures are often utilized to complement or
to emphasize speech. In human-computer interaction systems, natural,
affective and believable use of gestures would be a valuable key
component in adopting and emphasizing human-centered aspects. However,
natural and affective multimodal data, for studying computational models
of gesture and speech, is limited. In this study, we introduce the
JESTKOD database, which consists of speech and full-body motion capture
data recordings in dyadic interaction setting under agreement and
disagreement scenarios. Participants of the dyadic interactions are
native Turkish speakers and recordings of each participant are rated in
dimensional affect space. We present our multimodal data collection and
annotation process, as well as our preliminary experimental studies on
agreement/disagreement classification of dyadic interactions using body
gesture and speech data. The JESTKOD database provides a valuable asset
to investigate gesture and speech towards designing more natural and
affective human-computer interaction systems.}},
DOI = {{10.1007/s10579-016-9377-0}},
ISSN = {{1574-020X}},
EISSN = {{1574-0218}},
Unique-ID = {{ISI:000407360600011}},
}
Downloads: 0
{"_id":"aCkcbbwswn8opc8kc","bibbaseid":"bozkurt-khaki-kececi-turker-yemez-erzin-thejestkoddatabaseanaffectivemultimodaldatabaseofdyadicinteractions-2017","downloads":0,"creationDate":"2017-10-31T07:33:33.633Z","title":"The JESTKOD database: an affective multimodal database of dyadic interactions","author_short":["Bozkurt, E.","Khaki, H.","Kececi, S.","Turker, B. B.","Yemez, Y.","Erzin, E."],"year":2017,"bibtype":"article","biburl":"http://home.ku.edu.tr/~eerzin/pubs/mvgl.bib","bibdata":{"bibtype":"article","type":"article","author":[{"propositions":[],"lastnames":["Bozkurt"],"firstnames":["Elif"],"suffixes":[]},{"propositions":[],"lastnames":["Khaki"],"firstnames":["Hossein"],"suffixes":[]},{"propositions":[],"lastnames":["Kececi"],"firstnames":["Sinan"],"suffixes":[]},{"propositions":[],"lastnames":["Turker"],"firstnames":["B.","Berker"],"suffixes":[]},{"propositions":[],"lastnames":["Yemez"],"firstnames":["Yucel"],"suffixes":[]},{"propositions":[],"lastnames":["Erzin"],"firstnames":["Engin"],"suffixes":[]}],"title":"The JESTKOD database: an affective multimodal database of dyadic interactions","journal":"LANGUAGE RESOURCES AND EVALUATION","year":"2017","volume":"51","number":"3","pages":"857-872","month":"SEP","abstract":"In human-to-human communication, gesture and speech co-exist in time with a tight synchrony, and gestures are often utilized to complement or to emphasize speech. In human-computer interaction systems, natural, affective and believable use of gestures would be a valuable key component in adopting and emphasizing human-centered aspects. However, natural and affective multimodal data, for studying computational models of gesture and speech, is limited. In this study, we introduce the JESTKOD database, which consists of speech and full-body motion capture data recordings in dyadic interaction setting under agreement and disagreement scenarios. Participants of the dyadic interactions are native Turkish speakers and recordings of each participant are rated in dimensional affect space. We present our multimodal data collection and annotation process, as well as our preliminary experimental studies on agreement/disagreement classification of dyadic interactions using body gesture and speech data. The JESTKOD database provides a valuable asset to investigate gesture and speech towards designing more natural and affective human-computer interaction systems.","doi":"10.1007/s10579-016-9377-0","issn":"1574-020X","eissn":"1574-0218","unique-id":"ISI:000407360600011","bibtex":"@article{ ISI:000407360600011,\nAuthor = {Bozkurt, Elif and Khaki, Hossein and Kececi, Sinan and Turker, B. Berker\n and Yemez, Yucel and Erzin, Engin},\nTitle = {{The JESTKOD database: an affective multimodal database of dyadic\n interactions}},\nJournal = {{LANGUAGE RESOURCES AND EVALUATION}},\nYear = {{2017}},\nVolume = {{51}},\nNumber = {{3}},\nPages = {{857-872}},\nMonth = {{SEP}},\nAbstract = {{In human-to-human communication, gesture and speech co-exist in time\n with a tight synchrony, and gestures are often utilized to complement or\n to emphasize speech. In human-computer interaction systems, natural,\n affective and believable use of gestures would be a valuable key\n component in adopting and emphasizing human-centered aspects. However,\n natural and affective multimodal data, for studying computational models\n of gesture and speech, is limited. In this study, we introduce the\n JESTKOD database, which consists of speech and full-body motion capture\n data recordings in dyadic interaction setting under agreement and\n disagreement scenarios. Participants of the dyadic interactions are\n native Turkish speakers and recordings of each participant are rated in\n dimensional affect space. We present our multimodal data collection and\n annotation process, as well as our preliminary experimental studies on\n agreement/disagreement classification of dyadic interactions using body\n gesture and speech data. The JESTKOD database provides a valuable asset\n to investigate gesture and speech towards designing more natural and\n affective human-computer interaction systems.}},\nDOI = {{10.1007/s10579-016-9377-0}},\nISSN = {{1574-020X}},\nEISSN = {{1574-0218}},\nUnique-ID = {{ISI:000407360600011}},\n}\n\n","author_short":["Bozkurt, E.","Khaki, H.","Kececi, S.","Turker, B. B.","Yemez, Y.","Erzin, E."],"key":"ISI:000407360600011","id":"ISI:000407360600011","bibbaseid":"bozkurt-khaki-kececi-turker-yemez-erzin-thejestkoddatabaseanaffectivemultimodaldatabaseofdyadicinteractions-2017","role":"author","urls":{},"metadata":{"authorlinks":{}}},"search_terms":["jestkod","database","affective","multimodal","database","dyadic","interactions","bozkurt","khaki","kececi","turker","yemez","erzin"],"keywords":[],"authorIDs":[],"dataSources":["P7SB4qiBxZPhjXYRW"]}