Leveraging Speech for Gesture Detection in Multimodal Communication. Ghaleb, E., Burenko, I., Rasenberg, M., Pouw, W., Toni, I., Uhrig, P., Wilson, A., Holler, J., Özyürek, A., & Fernández, R. ArXiv, Preprints. 2024.
Paper abstract bibtex 3 downloads Gestures are inherent to human interaction and often complement speech in face-to-face communication, forming a multimodal communication system. An important task in gesture analysis is detecting a gesture's beginning and end. Research on automatic gesture detection has primarily focused on visual and kinematic information to detect a limited set of isolated or silent gestures with low variability, neglecting the integration of speech and vision signals to detect gestures that co-occur with speech. This work addresses this gap by focusing on co-speech gesture detection, emphasising the synchrony between speech and co-speech hand gestures. We address three main challenges: the variability of gesture forms, the temporal misalignment between gesture and speech onsets, and differences in sampling rate between modalities. We investigate extended speech time windows and employ separate backbone models for each modality to address the temporal misalignment and sampling rate differences. We utilize Transformer encoders in cross-modal and early fusion techniques to effectively align and integrate speech and skeletal sequences. The study results show that combining visual and speech information significantly enhances gesture detection performance. Our findings indicate that expanding the speech buffer beyond visual time segments improves performance and that multimodal integration using cross-modal and early fusion techniques outperforms baseline methods using unimodal and late fusion methods. Additionally, we find a correlation between the models' gesture prediction confidence and low-level speech frequency features potentially associated with gestures. Overall, the study provides a better understanding and detection methods for co-speech gestures, facilitating the analysis of multimodal communication.
@article{ghaleb-etal-2024-arxiv,
title={Leveraging Speech for Gesture Detection in Multimodal Communication},
author={Esam Ghaleb and Ilya Burenko and Marlou Rasenberg and Wim Pouw and Ivan Toni and Peter Uhrig and Anna Wilson and Judith Holler and Asl\i \"Ozy\"urek and Raquel Fern\'andez},
journal = {ArXiv},
year={Preprints},
url={https://arxiv.org/pdf/2404.14952},
note={2024.},
abstract={Gestures are inherent to human interaction and often complement speech in face-to-face communication, forming a multimodal
communication system. An important task in gesture analysis is detecting a gesture's beginning and end. Research on automatic gesture
detection has primarily focused on visual and kinematic information to detect a limited set of isolated or silent gestures with low
variability, neglecting the integration of speech and vision signals to detect gestures that co-occur with speech.
This work addresses this gap by focusing on co-speech gesture detection, emphasising the synchrony between speech and co-speech hand gestures.
We address three main challenges: the variability of gesture forms, the temporal misalignment between gesture and speech onsets,
and differences in sampling rate between modalities. We investigate extended speech time windows and employ separate backbone models
for each modality to address the temporal misalignment and sampling rate differences. We utilize Transformer encoders in cross-modal
and early fusion techniques to effectively align and integrate speech and skeletal sequences. The study results show that combining
visual and speech information significantly enhances gesture detection performance. Our findings indicate that expanding the speech
buffer beyond visual time segments improves performance and that multimodal integration using cross-modal and early fusion techniques
outperforms baseline methods using unimodal and late fusion methods. Additionally, we find a correlation between the models' gesture
prediction confidence and low-level speech frequency features potentially associated with gestures. Overall, the study provides a better
understanding and detection methods for co-speech gestures, facilitating the analysis of multimodal communication.}
}
Downloads: 3
{"_id":"EKytqDxXxroahAaNR","bibbaseid":"ghaleb-burenko-rasenberg-pouw-toni-uhrig-wilson-holler-etal-leveragingspeechforgesturedetectioninmultimodalcommunication-preprints","author_short":["Ghaleb, E.","Burenko, I.","Rasenberg, M.","Pouw, W.","Toni, I.","Uhrig, P.","Wilson, A.","Holler, J.","Özyürek, A.","Fernández, R."],"bibdata":{"bibtype":"article","type":"article","title":"Leveraging Speech for Gesture Detection in Multimodal Communication","author":[{"firstnames":["Esam"],"propositions":[],"lastnames":["Ghaleb"],"suffixes":[]},{"firstnames":["Ilya"],"propositions":[],"lastnames":["Burenko"],"suffixes":[]},{"firstnames":["Marlou"],"propositions":[],"lastnames":["Rasenberg"],"suffixes":[]},{"firstnames":["Wim"],"propositions":[],"lastnames":["Pouw"],"suffixes":[]},{"firstnames":["Ivan"],"propositions":[],"lastnames":["Toni"],"suffixes":[]},{"firstnames":["Peter"],"propositions":[],"lastnames":["Uhrig"],"suffixes":[]},{"firstnames":["Anna"],"propositions":[],"lastnames":["Wilson"],"suffixes":[]},{"firstnames":["Judith"],"propositions":[],"lastnames":["Holler"],"suffixes":[]},{"firstnames":["Aslı"],"propositions":[],"lastnames":["Özyürek"],"suffixes":[]},{"firstnames":["Raquel"],"propositions":[],"lastnames":["Fernández"],"suffixes":[]}],"journal":"ArXiv","year":"Preprints","url":"https://arxiv.org/pdf/2404.14952","note":"2024.","abstract":"Gestures are inherent to human interaction and often complement speech in face-to-face communication, forming a multimodal communication system. An important task in gesture analysis is detecting a gesture's beginning and end. Research on automatic gesture detection has primarily focused on visual and kinematic information to detect a limited set of isolated or silent gestures with low variability, neglecting the integration of speech and vision signals to detect gestures that co-occur with speech. This work addresses this gap by focusing on co-speech gesture detection, emphasising the synchrony between speech and co-speech hand gestures. We address three main challenges: the variability of gesture forms, the temporal misalignment between gesture and speech onsets, and differences in sampling rate between modalities. We investigate extended speech time windows and employ separate backbone models for each modality to address the temporal misalignment and sampling rate differences. We utilize Transformer encoders in cross-modal and early fusion techniques to effectively align and integrate speech and skeletal sequences. The study results show that combining visual and speech information significantly enhances gesture detection performance. Our findings indicate that expanding the speech buffer beyond visual time segments improves performance and that multimodal integration using cross-modal and early fusion techniques outperforms baseline methods using unimodal and late fusion methods. Additionally, we find a correlation between the models' gesture prediction confidence and low-level speech frequency features potentially associated with gestures. Overall, the study provides a better understanding and detection methods for co-speech gestures, facilitating the analysis of multimodal communication.","bibtex":"@article{ghaleb-etal-2024-arxiv,\ntitle={Leveraging Speech for Gesture Detection in Multimodal Communication}, \nauthor={Esam Ghaleb and Ilya Burenko and Marlou Rasenberg and Wim Pouw and Ivan Toni and Peter Uhrig and Anna Wilson and Judith Holler and Asl\\i \\\"Ozy\\\"urek and Raquel Fern\\'andez},\njournal = {ArXiv},\nyear={Preprints},\nurl={https://arxiv.org/pdf/2404.14952},\nnote={2024.},\nabstract={Gestures are inherent to human interaction and often complement speech in face-to-face communication, forming a multimodal \ncommunication system. An important task in gesture analysis is detecting a gesture's beginning and end. Research on automatic gesture \ndetection has primarily focused on visual and kinematic information to detect a limited set of isolated or silent gestures with low \nvariability, neglecting the integration of speech and vision signals to detect gestures that co-occur with speech. \nThis work addresses this gap by focusing on co-speech gesture detection, emphasising the synchrony between speech and co-speech hand gestures. \nWe address three main challenges: the variability of gesture forms, the temporal misalignment between gesture and speech onsets, \nand differences in sampling rate between modalities. We investigate extended speech time windows and employ separate backbone models \nfor each modality to address the temporal misalignment and sampling rate differences. We utilize Transformer encoders in cross-modal \nand early fusion techniques to effectively align and integrate speech and skeletal sequences. The study results show that combining \nvisual and speech information significantly enhances gesture detection performance. Our findings indicate that expanding the speech \nbuffer beyond visual time segments improves performance and that multimodal integration using cross-modal and early fusion techniques \noutperforms baseline methods using unimodal and late fusion methods. Additionally, we find a correlation between the models' gesture \nprediction confidence and low-level speech frequency features potentially associated with gestures. Overall, the study provides a better \nunderstanding and detection methods for co-speech gestures, facilitating the analysis of multimodal communication.}\n}\n\n","author_short":["Ghaleb, E.","Burenko, I.","Rasenberg, M.","Pouw, W.","Toni, I.","Uhrig, P.","Wilson, A.","Holler, J.","Özyürek, A.","Fernández, R."],"key":"ghaleb-etal-2024-arxiv","id":"ghaleb-etal-2024-arxiv","bibbaseid":"ghaleb-burenko-rasenberg-pouw-toni-uhrig-wilson-holler-etal-leveragingspeechforgesturedetectioninmultimodalcommunication-preprints","role":"author","urls":{"Paper":"https://arxiv.org/pdf/2404.14952"},"metadata":{"authorlinks":{}},"downloads":3},"bibtype":"article","biburl":"https://raw.githubusercontent.com/dmg-illc/dmg/master/bibbase/dmg-preprints.bib","dataSources":["BBpukpTdstoNb9nak"],"keywords":[],"search_terms":["leveraging","speech","gesture","detection","multimodal","communication","ghaleb","burenko","rasenberg","pouw","toni","uhrig","wilson","holler","özyürek","fernández"],"title":"Leveraging Speech for Gesture Detection in Multimodal Communication","year":null,"downloads":3}