Multi-microphone fusion for detection of speech and acoustic events in smart spaces. Giannoulis, P., Potamianos, G., Katsamanis, A., & Maragos, P. In 2014 22nd European Signal Processing Conference (EUSIPCO), pages 2375-2379, Sep., 2014. Paper abstract bibtex In this paper, we examine the challenging problem of detecting acoustic events and voice activity in smart indoors environments, equipped with multiple microphones. In particular, we focus on channel combination strategies, aiming to take advantage of the multiple microphones installed in the smart space, capturing the potentially noisy acoustic scene from the far-field. We propose various such approaches that can be formulated as fusion at the signal, feature, or at the decision level, as well as combinations of the above, also including multi-channel training. We apply our methods on two multi-microphone databases: (a) one recorded inside a small meeting room, containing twelve classes of isolated acoustic events; and (b) a speech corpus containing interfering noise sources, simulated inside a smart home with multiple rooms. Our multi-channel approaches demonstrate significant improvements, reaching relative error reductions over a single-channel baseline of 9.3% and 44.8% in the two datasets, respectively.
@InProceedings{6952875,
author = {P. Giannoulis and G. Potamianos and A. Katsamanis and P. Maragos},
booktitle = {2014 22nd European Signal Processing Conference (EUSIPCO)},
title = {Multi-microphone fusion for detection of speech and acoustic events in smart spaces},
year = {2014},
pages = {2375-2379},
abstract = {In this paper, we examine the challenging problem of detecting acoustic events and voice activity in smart indoors environments, equipped with multiple microphones. In particular, we focus on channel combination strategies, aiming to take advantage of the multiple microphones installed in the smart space, capturing the potentially noisy acoustic scene from the far-field. We propose various such approaches that can be formulated as fusion at the signal, feature, or at the decision level, as well as combinations of the above, also including multi-channel training. We apply our methods on two multi-microphone databases: (a) one recorded inside a small meeting room, containing twelve classes of isolated acoustic events; and (b) a speech corpus containing interfering noise sources, simulated inside a smart home with multiple rooms. Our multi-channel approaches demonstrate significant improvements, reaching relative error reductions over a single-channel baseline of 9.3% and 44.8% in the two datasets, respectively.},
keywords = {acoustic signal detection;home computing;microphones;speech processing;relative error reductions;smart home;multimicrophone databases;noisy acoustic scene;smart indoors environments;acoustic event detection;speech detection;multimicrophone fusion;Hidden Markov models;Acoustics;Channel estimation;Speech;Microphones;Training;Event detection;acoustic event detection and classification;voice activity detection;multi-channel fusion},
issn = {2076-1465},
month = {Sep.},
url = {https://www.eurasip.org/proceedings/eusipco/eusipco2014/html/papers/1569925285.pdf},
}
Downloads: 0
{"_id":"6oEurvnsg5D5Cv6GJ","bibbaseid":"giannoulis-potamianos-katsamanis-maragos-multimicrophonefusionfordetectionofspeechandacousticeventsinsmartspaces-2014","authorIDs":[],"author_short":["Giannoulis, P.","Potamianos, G.","Katsamanis, A.","Maragos, P."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","author":[{"firstnames":["P."],"propositions":[],"lastnames":["Giannoulis"],"suffixes":[]},{"firstnames":["G."],"propositions":[],"lastnames":["Potamianos"],"suffixes":[]},{"firstnames":["A."],"propositions":[],"lastnames":["Katsamanis"],"suffixes":[]},{"firstnames":["P."],"propositions":[],"lastnames":["Maragos"],"suffixes":[]}],"booktitle":"2014 22nd European Signal Processing Conference (EUSIPCO)","title":"Multi-microphone fusion for detection of speech and acoustic events in smart spaces","year":"2014","pages":"2375-2379","abstract":"In this paper, we examine the challenging problem of detecting acoustic events and voice activity in smart indoors environments, equipped with multiple microphones. In particular, we focus on channel combination strategies, aiming to take advantage of the multiple microphones installed in the smart space, capturing the potentially noisy acoustic scene from the far-field. We propose various such approaches that can be formulated as fusion at the signal, feature, or at the decision level, as well as combinations of the above, also including multi-channel training. We apply our methods on two multi-microphone databases: (a) one recorded inside a small meeting room, containing twelve classes of isolated acoustic events; and (b) a speech corpus containing interfering noise sources, simulated inside a smart home with multiple rooms. Our multi-channel approaches demonstrate significant improvements, reaching relative error reductions over a single-channel baseline of 9.3% and 44.8% in the two datasets, respectively.","keywords":"acoustic signal detection;home computing;microphones;speech processing;relative error reductions;smart home;multimicrophone databases;noisy acoustic scene;smart indoors environments;acoustic event detection;speech detection;multimicrophone fusion;Hidden Markov models;Acoustics;Channel estimation;Speech;Microphones;Training;Event detection;acoustic event detection and classification;voice activity detection;multi-channel fusion","issn":"2076-1465","month":"Sep.","url":"https://www.eurasip.org/proceedings/eusipco/eusipco2014/html/papers/1569925285.pdf","bibtex":"@InProceedings{6952875,\n author = {P. Giannoulis and G. Potamianos and A. Katsamanis and P. Maragos},\n booktitle = {2014 22nd European Signal Processing Conference (EUSIPCO)},\n title = {Multi-microphone fusion for detection of speech and acoustic events in smart spaces},\n year = {2014},\n pages = {2375-2379},\n abstract = {In this paper, we examine the challenging problem of detecting acoustic events and voice activity in smart indoors environments, equipped with multiple microphones. In particular, we focus on channel combination strategies, aiming to take advantage of the multiple microphones installed in the smart space, capturing the potentially noisy acoustic scene from the far-field. We propose various such approaches that can be formulated as fusion at the signal, feature, or at the decision level, as well as combinations of the above, also including multi-channel training. We apply our methods on two multi-microphone databases: (a) one recorded inside a small meeting room, containing twelve classes of isolated acoustic events; and (b) a speech corpus containing interfering noise sources, simulated inside a smart home with multiple rooms. Our multi-channel approaches demonstrate significant improvements, reaching relative error reductions over a single-channel baseline of 9.3% and 44.8% in the two datasets, respectively.},\n keywords = {acoustic signal detection;home computing;microphones;speech processing;relative error reductions;smart home;multimicrophone databases;noisy acoustic scene;smart indoors environments;acoustic event detection;speech detection;multimicrophone fusion;Hidden Markov models;Acoustics;Channel estimation;Speech;Microphones;Training;Event detection;acoustic event detection and classification;voice activity detection;multi-channel fusion},\n issn = {2076-1465},\n month = {Sep.},\n url = {https://www.eurasip.org/proceedings/eusipco/eusipco2014/html/papers/1569925285.pdf},\n}\n\n","author_short":["Giannoulis, P.","Potamianos, G.","Katsamanis, A.","Maragos, P."],"key":"6952875","id":"6952875","bibbaseid":"giannoulis-potamianos-katsamanis-maragos-multimicrophonefusionfordetectionofspeechandacousticeventsinsmartspaces-2014","role":"author","urls":{"Paper":"https://www.eurasip.org/proceedings/eusipco/eusipco2014/html/papers/1569925285.pdf"},"keyword":["acoustic signal detection;home computing;microphones;speech processing;relative error reductions;smart home;multimicrophone databases;noisy acoustic scene;smart indoors environments;acoustic event detection;speech detection;multimicrophone fusion;Hidden Markov models;Acoustics;Channel estimation;Speech;Microphones;Training;Event detection;acoustic event detection and classification;voice activity detection;multi-channel fusion"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"inproceedings","biburl":"https://raw.githubusercontent.com/Roznn/EUSIPCO/main/eusipco2014url.bib","creationDate":"2021-02-13T17:43:41.787Z","downloads":0,"keywords":["acoustic signal detection;home computing;microphones;speech processing;relative error reductions;smart home;multimicrophone databases;noisy acoustic scene;smart indoors environments;acoustic event detection;speech detection;multimicrophone fusion;hidden markov models;acoustics;channel estimation;speech;microphones;training;event detection;acoustic event detection and classification;voice activity detection;multi-channel fusion"],"search_terms":["multi","microphone","fusion","detection","speech","acoustic","events","smart","spaces","giannoulis","potamianos","katsamanis","maragos"],"title":"Multi-microphone fusion for detection of speech and acoustic events in smart spaces","year":2014,"dataSources":["A2ezyFL6GG6na7bbs","oZFG3eQZPXnykPgnE"]}