var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/service/mendeley/ffa9027c-806a-3827-93a1-02c42eb146a1?jsonp=1&authorFirst=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/service/mendeley/ffa9027c-806a-3827-93a1-02c42eb146a1?jsonp=1&authorFirst=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/service/mendeley/ffa9027c-806a-3827-93a1-02c42eb146a1?jsonp=1&authorFirst=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2023\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n Lamont, A.; Bannister, S.; and Coutinho, E.\n\n\n \n \n \n \n \n ‘Talking’ about Music.\n \n \n \n \n\n\n \n\n\n\n YouTube and Music, pages 230-254. Rogers, H.; Freitas, J.; and Porfírio, J., F., editor(s). Bloomsbury Academic, 2023.\n \n\n\n\n
\n\n\n\n \n \n \"YouTubeWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2023},\n pages = {230-254},\n websites = {http://www.bloomsburycollections.com/book/youtube-and-music-online-culture-and-everyday-life/ch11-talking-about-music},\n publisher = {Bloomsbury Academic},\n city = {London},\n id = {c4df822f-f29a-3c5d-88d4-44ad13fbb9b9},\n created = {2022-05-04T11:30:07.875Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-03-02T12:10:04.866Z},\n read = {true},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {inbook},\n author = {Lamont, Alexandra and Bannister, Scott and Coutinho, Eduardo},\n editor = {Rogers, Holly and Freitas, Joana and Porfírio, João Francisco},\n doi = {10.5040/9781501387302.0023},\n chapter = {‘Talking’ about Music},\n title = {YouTube and Music}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Mira, R.; Coutinho, E.; Parada-Cabaleiro, E.; and Schuller, B.\n\n\n \n \n \n \n \n Automated composition of Galician Xota - tuning RNN-based composers for specific musical styles using Deep Q-Learning.\n \n \n \n \n\n\n \n\n\n\n PeerJ Computer Science, 9: e1356. 5 2023.\n \n\n\n\n
\n\n\n\n \n \n \"AutomatedWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Automated composition of Galician Xota - tuning RNN-based composers for specific musical styles using Deep Q-Learning},\n type = {article},\n year = {2023},\n keywords = {automated music composition,deep q-learning,galician xota,magenta,rl-tuner},\n pages = {e1356},\n volume = {9},\n websites = {https://peerj.com/articles/cs-1356},\n month = {5},\n day = {15},\n id = {dc6d199d-44fa-3ab0-b581-bebb3057fe16},\n created = {2023-05-15T08:14:20.617Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-08-23T15:24:46.421Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Music composition is a complex field that is difficult to automate because the computational definition of what is good or aesthetically pleasing is vague and subjective. Many neural network-based methods have been applied in the past, but they lack consistency and in most cases, their outputs fail to impress. The most common issues include excessive repetition and a lack of style and structure, which are hallmarks of artificial compositions. In this project, we build on a model created by Magenta—the RL Tuner—extending it to emulate a specific musical genre—the Galician Xota. To do this, we design a new rule-set containing rules that the composition should follow to adhere to this style. We then implement them using reward functions, which are used to train the Deep Q Network that will be used to generate the pieces. After extensive experimentation, we achieve an implementation of our rule-set that effectively enforces each rule on the generated compositions, and outline a solid research methodology for future researchers looking to use this architecture. Finally, we propose some promising future work regarding further applications for this model and improvements to the experimental procedure.},\n bibtype = {article},\n author = {Mira, R and Coutinho, Eduardo and Parada-Cabaleiro, E and Schuller, Björn},\n doi = {10.7717/peerj-cs.1356},\n journal = {PeerJ Computer Science}\n}
\n
\n\n\n
\n Music composition is a complex field that is difficult to automate because the computational definition of what is good or aesthetically pleasing is vague and subjective. Many neural network-based methods have been applied in the past, but they lack consistency and in most cases, their outputs fail to impress. The most common issues include excessive repetition and a lack of style and structure, which are hallmarks of artificial compositions. In this project, we build on a model created by Magenta—the RL Tuner—extending it to emulate a specific musical genre—the Galician Xota. To do this, we design a new rule-set containing rules that the composition should follow to adhere to this style. We then implement them using reward functions, which are used to train the Deep Q Network that will be used to generate the pieces. After extensive experimentation, we achieve an implementation of our rule-set that effectively enforces each rule on the generated compositions, and outline a solid research methodology for future researchers looking to use this architecture. Finally, we propose some promising future work regarding further applications for this model and improvements to the experimental procedure.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n De Berardinis, J.; Cangelosi, A.; and Coutinho, E.\n\n\n \n \n \n \n \n Measuring the Structural Complexity of Music: From Structural Segmentations to the Automatic Evaluation of Models for Music Generation.\n \n \n \n \n\n\n \n\n\n\n IEEE/ACM Transactions on Audio Speech and Language Processing, 30: 1963-1976. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"MeasuringWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Measuring the Structural Complexity of Music: From Structural Segmentations to the Automatic Evaluation of Models for Music Generation},\n type = {article},\n year = {2022},\n keywords = {Music structure analysis,evaluation measures},\n pages = {1963-1976},\n volume = {30},\n websites = {https://ieeexplore.ieee.org/document/9787343/},\n id = {d8babf70-62dd-3945-bbea-1086e22a2052},\n created = {2022-05-04T11:30:07.838Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.790Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Composing musical ideas longer than motifs or figures is still rare in music generated by machine learning methods, a problem that is commonly referred to as the lack of long-term structure in the generated sequences. In addition, the evaluation of the structural complexity of artificial compositions is still a manual task, requiring expert knowledge, time and involving subjectivity which is inherent in the perception of musical structure. Based on recent advancements in music structure analysis, we automate the evaluation process by introducing a collection of metrics that can objectively describe structural properties of the music signal. This is done by segmenting music hierarchically, and computing our metrics on the resulting hierarchies to characterise the decomposition process of music into its structural components. We tested our method on a dataset collecting music with different degrees of structural complexity, from random and computer-generated pieces to real compositions of different genres and formats. Results indicate that our method can discriminate between these classes of complexity and identify further non-trivial subdivisions according to their structural properties. Our work contributes a simple yet effective framework for the evaluation of music generation models in regard to their ability to create structurally meaningful compositions.},\n bibtype = {article},\n author = {De Berardinis, Jacopo and Cangelosi, Angelo and Coutinho, Eduardo},\n doi = {10.1109/TASLP.2022.3178203},\n journal = {IEEE/ACM Transactions on Audio Speech and Language Processing}\n}
\n
\n\n\n
\n Composing musical ideas longer than motifs or figures is still rare in music generated by machine learning methods, a problem that is commonly referred to as the lack of long-term structure in the generated sequences. In addition, the evaluation of the structural complexity of artificial compositions is still a manual task, requiring expert knowledge, time and involving subjectivity which is inherent in the perception of musical structure. Based on recent advancements in music structure analysis, we automate the evaluation process by introducing a collection of metrics that can objectively describe structural properties of the music signal. This is done by segmenting music hierarchically, and computing our metrics on the resulting hierarchies to characterise the decomposition process of music into its structural components. We tested our method on a dataset collecting music with different degrees of structural complexity, from random and computer-generated pieces to real compositions of different genres and formats. Results indicate that our method can discriminate between these classes of complexity and identify further non-trivial subdivisions according to their structural properties. Our work contributes a simple yet effective framework for the evaluation of music generation models in regard to their ability to create structurally meaningful compositions.\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; Van Criekinge, T.; Hanford, G.; Nathan, R.; Maden, M.; and Hill, R.\n\n\n \n \n \n \n \n Music therapy interventions for eating disorders: Lack of robust evidence and recommendations for future research.\n \n \n \n \n\n\n \n\n\n\n British Journal of Music Therapy, 36(2): 84-93. 11 2022.\n \n\n\n\n
\n\n\n\n \n \n \"MusicWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Music therapy interventions for eating disorders: Lack of robust evidence and recommendations for future research},\n type = {article},\n year = {2022},\n pages = {84-93},\n volume = {36},\n websites = {http://journals.sagepub.com/doi/10.1177/13594575221110193},\n month = {11},\n day = {13},\n id = {f00c8319-f8f2-318c-a1e8-8f8be34a8582},\n created = {2022-06-22T10:54:58.391Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2022-11-29T11:48:43.332Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Music therapy (MT) has been used to support people with a variety of eating disorders (EDs), but it is unclear whether there is sufficient and robust evidence from controlled experimental studies. In this article, we report the results of a systematic review that summarises the evidence from published controlled studies where MT has been used to treat people diagnosed with any type of ED. Our results demonstrate that robust evidence concerning the effectiveness of MT for the treatment of EDs is severely lacking. Nonetheless, the evidence described in this paper warrants further investigation especially given that new treatment strategies for EDs are urgently needed. To this end, we offer a set of recommendations for future high-quality experimental studies that can inform the development of effective MT interventions and support for people with EDs.},\n bibtype = {article},\n author = {Coutinho, Eduardo and Van Criekinge, Tamaya and Hanford, Greg and Nathan, Rajan and Maden, Michelle and Hill, Ruaraidh},\n doi = {10.1177/13594575221110193},\n journal = {British Journal of Music Therapy},\n number = {2}\n}
\n
\n\n\n
\n Music therapy (MT) has been used to support people with a variety of eating disorders (EDs), but it is unclear whether there is sufficient and robust evidence from controlled experimental studies. In this article, we report the results of a systematic review that summarises the evidence from published controlled studies where MT has been used to treat people diagnosed with any type of ED. Our results demonstrate that robust evidence concerning the effectiveness of MT for the treatment of EDs is severely lacking. Nonetheless, the evidence described in this paper warrants further investigation especially given that new treatment strategies for EDs are urgently needed. To this end, we offer a set of recommendations for future high-quality experimental studies that can inform the development of effective MT interventions and support for people with EDs.\n
\n\n\n
\n\n\n
\n \n\n \n \n Cheah, Y.; Wong, H., K.; Spitzer, M.; and Coutinho, E.\n\n\n \n \n \n \n \n Background Music and Cognitive Task Performance: A Systematic Review of Task, Music, and Population Impact.\n \n \n \n \n\n\n \n\n\n\n Music and Science, 5: 205920432211343. 1 2022.\n \n\n\n\n
\n\n\n\n \n \n \"BackgroundWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Background Music and Cognitive Task Performance: A Systematic Review of Task, Music, and Population Impact},\n type = {article},\n year = {2022},\n keywords = {Background music,cognitive task performance,effects of music,individual differences,systematic review},\n pages = {205920432211343},\n volume = {5},\n websites = {http://journals.sagepub.com/doi/10.1177/20592043221134392},\n month = {1},\n day = {28},\n id = {2fd5ea27-74e9-3b7c-b1d7-9feea85eb6d6},\n created = {2022-09-27T12:17:55.148Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-07-03T09:46:12.837Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Research on the effect of background music (BgM) on cognitive task performance is marked by inconsistent methods and inconclusive findings. In order to provide clarity to this area, we performed a systematic review on the impact of BgM on performances in a variety of tasks whilst considering the contributions of various task, music, and population characteristics. Following the PRISMA and SWiM protocols, we identified 95 articles (154 experiments) that comprise cognitive tasks across six different cognitive domains—memory; language; thinking, reasoning, and problem-solving; inhibition; attention and processing speed. Extracted data were synthesized using vote counting based (solely) on the direction of effects and analyzed using a sign test analysis. Overall, our results demonstrate a general detrimental effect of BgM on memory and language-related tasks, and a tendency for BgM with lyrics to be more detrimental than instrumental BgM. Only one positive effect (of instrumental BgM) was found; and in most cases, we did not find any effect of BgM on task performance. We also identified a general detrimental impact of BgM towards difficult (but not easy) tasks; and towards introverts (but not extraverts). Taken together, our results show that task, music, and population-specific analyses are all necessary when studying the effects of BgM on cognitive task performance. They also call attention to the necessity to control for task difficulty as well as individual differences (especially level of extraversion) in empirical studies. Finally, our results also demonstrate that many areas remain understudied and therefore a lot more work still needs to be done to gain a comprehensive understanding of how BgM impacts cognitive task performance.},\n bibtype = {article},\n author = {Cheah, Yiting and Wong, Hoo Keat and Spitzer, Michael and Coutinho, Eduardo},\n doi = {10.1177/20592043221134392},\n journal = {Music and Science}\n}
\n
\n\n\n
\n Research on the effect of background music (BgM) on cognitive task performance is marked by inconsistent methods and inconclusive findings. In order to provide clarity to this area, we performed a systematic review on the impact of BgM on performances in a variety of tasks whilst considering the contributions of various task, music, and population characteristics. Following the PRISMA and SWiM protocols, we identified 95 articles (154 experiments) that comprise cognitive tasks across six different cognitive domains—memory; language; thinking, reasoning, and problem-solving; inhibition; attention and processing speed. Extracted data were synthesized using vote counting based (solely) on the direction of effects and analyzed using a sign test analysis. Overall, our results demonstrate a general detrimental effect of BgM on memory and language-related tasks, and a tendency for BgM with lyrics to be more detrimental than instrumental BgM. Only one positive effect (of instrumental BgM) was found; and in most cases, we did not find any effect of BgM on task performance. We also identified a general detrimental impact of BgM towards difficult (but not easy) tasks; and towards introverts (but not extraverts). Taken together, our results show that task, music, and population-specific analyses are all necessary when studying the effects of BgM on cognitive task performance. They also call attention to the necessity to control for task difficulty as well as individual differences (especially level of extraversion) in empirical studies. Finally, our results also demonstrate that many areas remain understudied and therefore a lot more work still needs to be done to gain a comprehensive understanding of how BgM impacts cognitive task performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n Cheah, Y.; Wong, H., K.; Spitzer, M.; and Coutinho, E.\n\n\n \n \n \n \n \n Background music and cognitive task performance: systematic review dataset.\n \n \n \n \n\n\n \n\n\n\n 2022.\n \n\n\n\n
\n\n\n\n \n \n \"BackgroundWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{\n title = {Background music and cognitive task performance: systematic review dataset},\n type = {misc},\n year = {2022},\n websites = {https://doi.org/10.5281/zenodo.6301061},\n publisher = {Zenodo},\n id = {14ad7c10-5f4a-3688-b361-07ae155523a7},\n created = {2022-11-29T11:52:16.089Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-03-02T12:12:14.370Z},\n read = {true},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n source_type = {Dataset},\n private_publication = {false},\n abstract = {This repository contains the raw data used for a systematic review on the impact of background music on cognitive task performance (Cheah et al., 2022). Our intention is to facilitate future updates to this work. Cheah, Y., Wong, H. K., Spitzer, M., & Coutinho, E. (2022). Background music and cognitive task performance: A systematic review of task, music and population impact. Music & Science, 5(1), 1-38. https://doi.org/10.1177/20592043221134392},\n bibtype = {misc},\n author = {Cheah, Yiting and Wong, Hoo Keat and Spitzer, Michael and Coutinho, Eduardo},\n doi = {10.5281/zenodo.6301061}\n}
\n
\n\n\n
\n This repository contains the raw data used for a systematic review on the impact of background music on cognitive task performance (Cheah et al., 2022). Our intention is to facilitate future updates to this work. Cheah, Y., Wong, H. K., Spitzer, M., & Coutinho, E. (2022). Background music and cognitive task performance: A systematic review of task, music and population impact. Music & Science, 5(1), 1-38. https://doi.org/10.1177/20592043221134392\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n Coutinho, E.; and Dowrick, C.\n\n\n \n \n \n \n POLYHYMNIA Mood – A pilot evaluation of a new app to empower people to cope with low mood and depression through music listening.\n \n \n \n\n\n \n\n\n\n In Proceedings of the 16th International Conference on Music Perception and Cognition, 2021. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {POLYHYMNIA Mood – A pilot evaluation of a new app to empower people to cope with low mood and depression through music listening.},\n type = {inproceedings},\n year = {2021},\n city = {Sheffield, UK},\n id = {ce188c0f-1627-3197-8189-792c7f8a29cb},\n created = {2021-04-27T17:16:46.888Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2022-03-08T09:49:29.174Z},\n read = {true},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Coutinho2021},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Coutinho, Eduardo and Dowrick, Christopher},\n booktitle = {Proceedings of the 16th International Conference on Music Perception and Cognition},\n keywords = {conference,paper}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Van Criekinge, T.; D’Août, K.; O’Brien, J.; and Coutinho, E.\n\n\n \n \n \n \n \n Music and Hypertonia: Can Music Listening Help Reduce Muscle Tension and Improve Movement Quality?.\n \n \n \n \n\n\n \n\n\n\n Music and Science, 4: 1-12. 1 2021.\n \n\n\n\n
\n\n\n\n \n \n \"MusicWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{\n title = {Music and Hypertonia: Can Music Listening Help Reduce Muscle Tension and Improve Movement Quality?},\n type = {article},\n year = {2021},\n keywords = {journal},\n pages = {1-12},\n volume = {4},\n websites = {http://journals.sagepub.com/doi/10.1177/20592043211015353},\n month = {1},\n day = {1},\n id = {c5085928-f360-3eb9-9d63-1f43a5299f7a},\n created = {2021-04-27T17:16:46.934Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.017Z},\n read = {true},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {VanCriekinge2021},\n private_publication = {false},\n abstract = {Although there is a strong consensus that music listening is a common and effective means to induce states of relaxation, little attention has been given to the physical effects of such states and the potential health-related applications. In this article, we investigated whether music listening could induce affective states of relaxation and accelerate the recovery of fatigued muscles, through the analysis of quality of movement. Twenty healthy participants were asked to perform a fatigue induction protocol of the non-dominant arm followed by a resting period and the execution of a drinking task. During recovery periods, all participants were exposed to three experimental conditions: listening to relaxing music; arousing music; and no music. 3D motion capture and surface electromyography were used to record upper limb movements and muscle activity when performing the drinking task before and after the recovery periods. Movement quality was assessed by means of movement smoothness (jerk index) and muscle recovery (motor unit recruitment). Results showed that recovery of movement smoothness in the relaxing music condition was significantly greater (-35%) than in the relaxing music condition (compared to arousing music, -25%, and silence, -16%) which demonstrates that listening to relaxing music speeds up the recovery process of (fatigued) muscles. We discuss our findings in the context of potential applications of music listening for reducing muscle tension in people suffering from hypertonia.},\n bibtype = {article},\n author = {Van Criekinge, T. and D’Août, K. and O’Brien, J. and Coutinho, E.},\n doi = {10.1177/20592043211015353},\n journal = {Music and Science}\n}
\n
\n\n\n
\n Although there is a strong consensus that music listening is a common and effective means to induce states of relaxation, little attention has been given to the physical effects of such states and the potential health-related applications. In this article, we investigated whether music listening could induce affective states of relaxation and accelerate the recovery of fatigued muscles, through the analysis of quality of movement. Twenty healthy participants were asked to perform a fatigue induction protocol of the non-dominant arm followed by a resting period and the execution of a drinking task. During recovery periods, all participants were exposed to three experimental conditions: listening to relaxing music; arousing music; and no music. 3D motion capture and surface electromyography were used to record upper limb movements and muscle activity when performing the drinking task before and after the recovery periods. Movement quality was assessed by means of movement smoothness (jerk index) and muscle recovery (motor unit recruitment). Results showed that recovery of movement smoothness in the relaxing music condition was significantly greater (-35%) than in the relaxing music condition (compared to arousing music, -25%, and silence, -16%) which demonstrates that listening to relaxing music speeds up the recovery process of (fatigued) muscles. We discuss our findings in the context of potential applications of music listening for reducing muscle tension in people suffering from hypertonia.\n
\n\n\n
\n\n\n
\n \n\n \n \n Cheah, Y.; Spitzer, M.; and Coutinho, E.\n\n\n \n \n \n \n Background Music and Performance on Memory-related Tasks: Preliminary Findings from a Systematic Review.\n \n \n \n\n\n \n\n\n\n In Proceedings of the 16th International Conference on Music Perception and Cognition, 2021. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Background Music and Performance on Memory-related Tasks: Preliminary Findings from a Systematic Review},\n type = {inproceedings},\n year = {2021},\n city = {Sheffield, UK},\n id = {f6791c84-1bb2-3d0e-9d49-13ccce404bd2},\n created = {2021-04-27T17:16:46.934Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2022-03-08T09:49:51.029Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Cheah2021},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Cheah, Yi-Ting and Spitzer, Michael and Coutinho, Eduardo},\n booktitle = {Proceedings of the 16th International Conference on Music Perception and Cognition},\n keywords = {abstract,conference}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; Alshukri, A.; De Berardinis, J.; and Dowrick, C.\n\n\n \n \n \n \n POLYHYMNIA Mood-Empowering people to cope with depression through music listening.\n \n \n \n\n\n \n\n\n\n In UbiComp/ISWC 2021 - Adjunct Proceedings of the 2021 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2021 ACM International Symposium on Wearable Computers, pages 188-193, 2021. ACM\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {POLYHYMNIA Mood-Empowering people to cope with depression through music listening},\n type = {inproceedings},\n year = {2021},\n keywords = {Depression,Health Intervention,Machine Learning,Mood Regulation,Music Listening,Web App},\n pages = {188-193},\n publisher = {ACM},\n city = {Virtual, USA},\n id = {ab00675a-b93c-34d7-9e06-2cbce52968cb},\n created = {2021-08-31T11:17:11.812Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2024-03-28T16:00:11.221Z},\n read = {true},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {CoutinhoPolyhymnia2021},\n private_publication = {false},\n abstract = {Depression is one of the largest sources of burden of disease in the worldwide and the development of flexible, timely and easily accessible interventions is considered to be a critical direction for the future. Mood Regulation (MR) via music listening may be a viable tool support these aims if people have adequate support to make music selections that underpin healthy MR strategies. We developed a new app (POLYHYMNIA Mood) that automatically generates personalised music playlists for mood elevation and the reduction of depression symptoms and here we provide an overview of POLYHYMNIA Mood and report the results of a preliminary evaluation of its effectiveness and acceptability. Results show that listening to POLYHYMNIA Mood playlists over a period of 4 weeks led to a large reduction in negative affect and a clinically significant reduction in depression symptoms. Whereas these results should be interpreted cautiously due to the small sample size and the lack of control conditions, they provide strong support to our approach.},\n bibtype = {inproceedings},\n author = {Coutinho, Eduardo and Alshukri, Ayesh and De Berardinis, Jacopo and Dowrick, Chris},\n doi = {10.1145/3460418.3479334},\n booktitle = {UbiComp/ISWC 2021 - Adjunct Proceedings of the 2021 ACM International Joint Conference on Pervasive and Ubiquitous Computing and Proceedings of the 2021 ACM International Symposium on Wearable Computers}\n}
\n
\n\n\n
\n Depression is one of the largest sources of burden of disease in the worldwide and the development of flexible, timely and easily accessible interventions is considered to be a critical direction for the future. Mood Regulation (MR) via music listening may be a viable tool support these aims if people have adequate support to make music selections that underpin healthy MR strategies. We developed a new app (POLYHYMNIA Mood) that automatically generates personalised music playlists for mood elevation and the reduction of depression symptoms and here we provide an overview of POLYHYMNIA Mood and report the results of a preliminary evaluation of its effectiveness and acceptability. Results show that listening to POLYHYMNIA Mood playlists over a period of 4 weeks led to a large reduction in negative affect and a clinically significant reduction in depression symptoms. Whereas these results should be interpreted cautiously due to the small sample size and the lack of control conditions, they provide strong support to our approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; and Cheah, Y.\n\n\n \n \n \n \n \n Audição musical e performance cognitiva [Music listening and cognitive performance].\n \n \n \n \n\n\n \n\n\n\n Musica Humana. Martingo, Â., editor(s). Húmus, 1 edition, 2021.\n \n\n\n\n
\n\n\n\n \n \n \"MusicaWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2021},\n websites = {https://edicoeshumus.pt//index.php?route=product/product&product_id=1300&search=musica+humana&description=true},\n publisher = {Húmus},\n city = {V. N. Famalicão, Portugal},\n edition = {1},\n id = {c1d1a2aa-3a09-35ee-922b-8418ca5e7157},\n created = {2021-10-14T15:59:33.451Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2021-12-16T16:18:27.423Z},\n read = {true},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {In this chapter, we provide a brief overview of the available evidence concerning the impact of music listening on cognitive performance, i.e., how listening to music can affect mental abilities (e.g., memory, attention) and our performance in tasks that require them (e.g., writing, reading). The chapter is divided into two main sections which reflect two main research trends: cognitive performance after listening to music (a.k.a. Mozart effect) and cognitive performance whilst listening to music.},\n bibtype = {inbook},\n author = {Coutinho, Eduardo and Cheah, Yi-Ting},\n editor = {Martingo, Ângelo},\n chapter = {Audição musical e performance cognitiva [Music listening and cognitive performance]},\n title = {Musica Humana}\n}
\n
\n\n\n
\n In this chapter, we provide a brief overview of the available evidence concerning the impact of music listening on cognitive performance, i.e., how listening to music can affect mental abilities (e.g., memory, attention) and our performance in tasks that require them (e.g., writing, reading). The chapter is divided into two main sections which reflect two main research trends: cognitive performance after listening to music (a.k.a. Mozart effect) and cognitive performance whilst listening to music.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n De Berardinis, J.; Vamvakaris, M.; Cangelosi, A.; and Coutinho, E.\n\n\n \n \n \n \n \n Unveiling the Hierarchical Structure of Music by Multi-Resolution Community Detection.\n \n \n \n \n\n\n \n\n\n\n Transactions of the International Society for Music Information Retrieval, 3(1): 82-97. 6 2020.\n \n\n\n\n
\n\n\n\n \n \n \"UnveilingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Unveiling the Hierarchical Structure of Music by Multi-Resolution Community Detection},\n type = {article},\n year = {2020},\n keywords = {article,journal},\n pages = {82-97},\n volume = {3},\n websites = {http://transactions.ismir.net/articles/10.5334/tismir.41/},\n month = {6},\n day = {24},\n id = {7e94c5a8-6ed8-3cfd-a472-2e4e06353afe},\n created = {2019-09-18T08:06:23.731Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.862Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {deberardinismscomdetection},\n source_type = {article},\n folder_uuids = {116db2f1-e6ac-4780-bccf-a977325250cd,99880aa7-55df-4b45-bfce-0ffc00b23ced},\n private_publication = {false},\n abstract = {Human perception of musical structure is supposed to depend on the generation of hierarchies, which is inherently related to the actual organisation of sounds in music. Musical structures are indeed best retained by listeners when they form hierarchical patterns, with consequent implications on the appreciation of music and its performance. The automatic detection of musical structure in audio recordings is one of the most challenging problems in the field of music information retrieval, since even human experts tend to disagree on the structural decomposition of a piece of music. However, most of the current music segmentation algorithms in literature can only produce flat segmentations, meaning that they cannot segment music at different levels in order to reveal its hierarchical structure. We propose a novel methodology for the hierarchical analysis of music structure that is based on graph theory and multi-resolution community detection. This unsupervised method can perform both the tasks of boundary detection and structural grouping, without the need of particular constraints that would limit the resulting segmentation. To evaluate our approach, we designed an experiment that allowed us to compare its segmentation performance with that of the current state of the art algorithms for hierarchical segmentation. Our results indicate that the proposed methodology can achieve state of the art performance on a well-known benchmark dataset, thus providing a deeper analysis of musical structure.},\n bibtype = {article},\n author = {De Berardinis, Jacopo and Vamvakaris, Michail and Cangelosi, Angelo and Coutinho, Eduardo},\n doi = {10.5334/tismir.41},\n journal = {Transactions of the International Society for Music Information Retrieval},\n number = {1}\n}
\n
\n\n\n
\n Human perception of musical structure is supposed to depend on the generation of hierarchies, which is inherently related to the actual organisation of sounds in music. Musical structures are indeed best retained by listeners when they form hierarchical patterns, with consequent implications on the appreciation of music and its performance. The automatic detection of musical structure in audio recordings is one of the most challenging problems in the field of music information retrieval, since even human experts tend to disagree on the structural decomposition of a piece of music. However, most of the current music segmentation algorithms in literature can only produce flat segmentations, meaning that they cannot segment music at different levels in order to reveal its hierarchical structure. We propose a novel methodology for the hierarchical analysis of music structure that is based on graph theory and multi-resolution community detection. This unsupervised method can perform both the tasks of boundary detection and structural grouping, without the need of particular constraints that would limit the resulting segmentation. To evaluate our approach, we designed an experiment that allowed us to compare its segmentation performance with that of the current state of the art algorithms for hierarchical segmentation. Our results indicate that the proposed methodology can achieve state of the art performance on a well-known benchmark dataset, thus providing a deeper analysis of musical structure.\n
\n\n\n
\n\n\n
\n \n\n \n \n Cheah, Y.; Spitzer, M.; and Coutinho, E.\n\n\n \n \n \n \n \n The impact of background music on cognitive task performance: a systematic review protocol.\n \n \n \n \n\n\n \n\n\n\n 2020.\n \n\n\n\n
\n\n\n\n \n \n \"TheWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{\n title = {The impact of background music on cognitive task performance: a systematic review protocol.},\n type = {misc},\n year = {2020},\n websites = {https://www.crd.york.ac.uk/prospero/display_record.php?ID=CRD42020207193},\n publisher = {PROSPERO - International prospective register of systematic reviews},\n institution = {University of Liverpool},\n id = {70c0e760-c95b-3142-8377-961897aae211},\n created = {2020-10-12T15:19:09.853Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2021-05-14T09:05:35.953Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Cheah2020},\n source_type = {Systematic Review Protocol},\n private_publication = {false},\n bibtype = {misc},\n author = {Cheah, Yiting and Spitzer, Michael and Coutinho, Eduardo}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n de Berardinis, J.; Cangelosi, A.; and Coutinho, E.\n\n\n \n \n \n \n \n The multiple voices of musical emotions: source separation for improving music emotion recognition models and their interpretability.\n \n \n \n \n\n\n \n\n\n\n In Cumming, J.; Lee, J., H.; McFee, B.; Schedl, M.; Devaney, J.; McKay, C.; Zangerle, E.; and de Reuse, T., editor(s), Proceedings of the 21st International Society for Music Information Retrieval Conference, pages 310-217, 2020. International Society for Music Information Retrieval\n \n\n\n\n
\n\n\n\n \n \n \"TheWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {The multiple voices of musical emotions: source separation for improving music emotion recognition models and their interpretability},\n type = {inproceedings},\n year = {2020},\n pages = {310-217},\n websites = {https://www.ismir2020.net/assets/img/proceedings/2020_ISMIR_Proceedings.pdf},\n publisher = {International Society for Music Information Retrieval},\n city = {Montréal, Québec, Canada},\n id = {d352dc1b-deb3-348f-8e54-c7b1f689089a},\n created = {2020-10-12T15:41:02.947Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2021-05-14T09:05:35.878Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {DeBerardinis2020a},\n private_publication = {false},\n abstract = {Despite the manifold developments in music emotion recognition and related areas, estimating the emotional impact of music still poses many challenges. These are often associated to the complexity of the acoustic codes to emotion and the lack of large amounts of data with robust golden standards. In this paper, we propose a new computational model (EmoMucs) that considers the role of different musical voices in the prediction of the emotions induced by music. We combine source separation algorithms for breaking up music signals into independent song elements (vocals, bass, drums, other) and end-to-end state-of-the-art machine learning techniques for feature extraction and emotion modelling (valence and arousal regression). Through a series of computational experiments on a benchmark dataset using source-specialised models trained independently and different fusion strategies, we demonstrate that EmoMucs outperforms state-of-the-art approaches with the advantage of providing insights into the relative contribution of different musical elements to the emotions perceived by listeners.},\n bibtype = {inproceedings},\n author = {de Berardinis, J. and Cangelosi, A. and Coutinho, E.},\n editor = {Cumming, Julie and Lee, Jin Ha and McFee, Brian and Schedl, Markus and Devaney, Johanna and McKay, Cory and Zangerle, Eva and de Reuse, Timothy},\n booktitle = {Proceedings of the 21st International Society for Music Information Retrieval Conference}\n}
\n
\n\n\n
\n Despite the manifold developments in music emotion recognition and related areas, estimating the emotional impact of music still poses many challenges. These are often associated to the complexity of the acoustic codes to emotion and the lack of large amounts of data with robust golden standards. In this paper, we propose a new computational model (EmoMucs) that considers the role of different musical voices in the prediction of the emotions induced by music. We combine source separation algorithms for breaking up music signals into independent song elements (vocals, bass, drums, other) and end-to-end state-of-the-art machine learning techniques for feature extraction and emotion modelling (valence and arousal regression). Through a series of computational experiments on a benchmark dataset using source-specialised models trained independently and different fusion strategies, we demonstrate that EmoMucs outperforms state-of-the-art approaches with the advantage of providing insights into the relative contribution of different musical elements to the emotions perceived by listeners.\n
\n\n\n
\n\n\n
\n \n\n \n \n Lamont, A.; Bannister, S.; Coutinho, E.; and Egermann, H.\n\n\n \n \n \n \n \n 'Talking’ about music - The emotional content of comments on YouTube videos.\n \n \n \n \n\n\n \n\n\n\n In Music and cyberculture before and after the new decade, 2020. \n \n\n\n\n
\n\n\n\n \n \n \"'Talking’Website\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {'Talking’ about music - The emotional content of comments on YouTube videos},\n type = {inproceedings},\n year = {2020},\n websites = {https://youtcc2020.weebly.com/abstract-book.html},\n id = {dbbd3e1f-e74c-3e26-b26c-083294935ef0},\n created = {2020-10-12T15:41:03.005Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2021-05-14T09:05:35.797Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Lamont2020},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Lamont, A. and Bannister, S. and Coutinho, E. and Egermann, H.},\n booktitle = {Music and cyberculture before and after the new decade}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n de Berardinis, J.; Barrett, S.; Cangelosi, A.; and Coutinho, E.\n\n\n \n \n \n \n \n Modelling long- and short-term structure in symbolic music with attention and recurrence.\n \n \n \n \n\n\n \n\n\n\n In CSMC + MuMe 2020: 2020 Joint Conference on AI Music Creativity, 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ModellingWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Modelling long- and short-term structure in symbolic music with attention and recurrence},\n type = {inproceedings},\n year = {2020},\n websites = {https://boblsturm.github.io/aimusic2020/},\n city = {Stockholm, Sweden},\n id = {d987ec74-4b6e-3b6d-b6f3-0f19eb74b818},\n created = {2020-10-12T15:42:50.876Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.057Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {DeBerardinis2020},\n private_publication = {false},\n abstract = {The automatic composition of music with long-term structure is a central problem in music generation. Neural network-based models have been shown to perform relatively well in melody generation, but generating music with long-term structure is still a major challenge. This paper introduces a new approach for music modelling that combines recent advancements of transformer models with recurrent networks-the long-short term universal transformer (LSTUT), and compare its ability to predict music against current state-of-the-art music models. Our experiments are designed to push the boundaries of music models on considerably long music sequences-a crucial requirement for learning long-term structure effectively. Results show that the LSTUT outper-forms all the other models and can potentially learn features related to music structure at different time scales. Overall, we show the importance of integrating both recurrence and attention in the architecture of music models, and their potential use in future automatic composition systems.},\n bibtype = {inproceedings},\n author = {de Berardinis, J. and Barrett, S. and Cangelosi, A. and Coutinho, E.},\n booktitle = {CSMC + MuMe 2020: 2020 Joint Conference on AI Music Creativity}\n}
\n
\n\n\n
\n The automatic composition of music with long-term structure is a central problem in music generation. Neural network-based models have been shown to perform relatively well in melody generation, but generating music with long-term structure is still a major challenge. This paper introduces a new approach for music modelling that combines recent advancements of transformer models with recurrent networks-the long-short term universal transformer (LSTUT), and compare its ability to predict music against current state-of-the-art music models. Our experiments are designed to push the boundaries of music models on considerably long music sequences-a crucial requirement for learning long-term structure effectively. Results show that the LSTUT outper-forms all the other models and can potentially learn features related to music structure at different time scales. Overall, we show the importance of integrating both recurrence and attention in the architecture of music models, and their potential use in future automatic composition systems.\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; Van Criekinge, T.; Hanford, G.; Rajan, N.; Maden, M.; and Hill, R.\n\n\n \n \n \n \n \n The effectiveness of music therapy interventions for people with eating disorders: a systematic review protocol.\n \n \n \n \n\n\n \n\n\n\n 2020.\n \n\n\n\n
\n\n\n\n \n \n \"TheWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@misc{\n title = {The effectiveness of music therapy interventions for people with eating disorders: a systematic review protocol},\n type = {misc},\n year = {2020},\n websites = {https://www.crd.york.ac.uk/prospero/display_record.php?ID=CRD42020169901},\n publisher = {PROSPERO},\n id = {8695afb6-3ac9-3d29-b02f-76cc0c69e78e},\n created = {2021-05-14T08:41:49.496Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2021-05-14T09:08:05.487Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Coutinho2020},\n source_type = {Systematic Review Protocol},\n private_publication = {false},\n abstract = {Review question Is music therapy an effective treatment for the reduction in eating disorder psychopathology and/or symptoms? Searches MEDLINE (OVID), Cochrane Library, CINAHL, Embase, RILM, BASE, PsycINFO, Scopus No restrictions were applied during the searches. Types of study to be included Any study with an experimental design. Condition or domain being studied Eating disorders are severe disturbances in behaviors and related thoughts and emotions. People with eating disorders typically become pre-occupied with food and their body weight. A doctor or mental health profession will make a diagnosis based on physical examination (to rule out medical causes), psychological evaluation (questions about thoughts, feelings and eating habits), or other additional tests based on signs, symptoms and eating habits. Participants/population People diagnosed with any type of eating disorder of any age. Intervention(s), exposure(s) Music Therapy executed by qualified music therapists (formal, manualised and facilitated music therapies, such as Nordoff-Robbins and/or recognsied by professional bodies, such as British Association for Music Therapy) Comparator(s)/control Any other intervention, non-exposed control group, treatment as usual Main outcome(s) Reduction in eating disorder psychopathology and/or symptoms. No direction of the impact can be formulated as they can be moving in different directions. Measures of effect Mean Change Additional outcome(s) This will not be the main objective of this systematic review. However, if reported we will include economic evidence (cost-effectiveness), quality of life beyond the disorder and formulate a hierarchy of outcome measures. Measures of effect Mean Change Data extraction (selection and coding) The following information was collected: information concerning the first author, the year of publication, the number and characteristics of the participants, the outcome measures, the interventions, the results and the conclusions. Two independent reviewers will extract the data and third reviewer will resolve discrepancies when they occur. Risk of bias (quality) assessment The methodological scoring will be assessed by two independent reviewers. In case of uncertainty at any point during the screening- or scoring process, consensus will be sought during a meeting. Risk of bias scales suggested by Cochrane will be used depending on the design of the study. Strategy for data synthesis A synthesis without meta-analysis (SWiM) will be provided to discuss the observed effect as prelimary searches reveal a lack of homogenous and qualitative studies to perform statistical analysis of standard effect sizes. The population in the study will be grouped based on conditions of eating disorders (other subgroups as mentioned below or also possible). The criteria for justification of the results will be based on at least the support of two studies with sufficient methodological quality (moderate to low risk of bias). Level of Evidence will be determined for each study to support effect claims. The outcomes used will dependent on the available literature but the objective is to examine the change scores before and after music therapy. The data presentation methods used will consist of tables and graphs to act as supporting material to visualize the data. Analysis of subgroups or subsets Analysis of differentconditions of eating disorders or other patient characteristics such as age, clinical/community/home setting, type of music therapy, etc},\n bibtype = {misc},\n author = {Coutinho, Eduardo and Van Criekinge, Tamaya and Hanford, Greg and Rajan, Nathan and Maden, Michelle and Hill, Ruaraidh},\n keywords = {Systematic review,prospero,protocol}\n}
\n
\n\n\n
\n Review question Is music therapy an effective treatment for the reduction in eating disorder psychopathology and/or symptoms? Searches MEDLINE (OVID), Cochrane Library, CINAHL, Embase, RILM, BASE, PsycINFO, Scopus No restrictions were applied during the searches. Types of study to be included Any study with an experimental design. Condition or domain being studied Eating disorders are severe disturbances in behaviors and related thoughts and emotions. People with eating disorders typically become pre-occupied with food and their body weight. A doctor or mental health profession will make a diagnosis based on physical examination (to rule out medical causes), psychological evaluation (questions about thoughts, feelings and eating habits), or other additional tests based on signs, symptoms and eating habits. Participants/population People diagnosed with any type of eating disorder of any age. Intervention(s), exposure(s) Music Therapy executed by qualified music therapists (formal, manualised and facilitated music therapies, such as Nordoff-Robbins and/or recognsied by professional bodies, such as British Association for Music Therapy) Comparator(s)/control Any other intervention, non-exposed control group, treatment as usual Main outcome(s) Reduction in eating disorder psychopathology and/or symptoms. No direction of the impact can be formulated as they can be moving in different directions. Measures of effect Mean Change Additional outcome(s) This will not be the main objective of this systematic review. However, if reported we will include economic evidence (cost-effectiveness), quality of life beyond the disorder and formulate a hierarchy of outcome measures. Measures of effect Mean Change Data extraction (selection and coding) The following information was collected: information concerning the first author, the year of publication, the number and characteristics of the participants, the outcome measures, the interventions, the results and the conclusions. Two independent reviewers will extract the data and third reviewer will resolve discrepancies when they occur. Risk of bias (quality) assessment The methodological scoring will be assessed by two independent reviewers. In case of uncertainty at any point during the screening- or scoring process, consensus will be sought during a meeting. Risk of bias scales suggested by Cochrane will be used depending on the design of the study. Strategy for data synthesis A synthesis without meta-analysis (SWiM) will be provided to discuss the observed effect as prelimary searches reveal a lack of homogenous and qualitative studies to perform statistical analysis of standard effect sizes. The population in the study will be grouped based on conditions of eating disorders (other subgroups as mentioned below or also possible). The criteria for justification of the results will be based on at least the support of two studies with sufficient methodological quality (moderate to low risk of bias). Level of Evidence will be determined for each study to support effect claims. The outcomes used will dependent on the available literature but the objective is to examine the change scores before and after music therapy. The data presentation methods used will consist of tables and graphs to act as supporting material to visualize the data. Analysis of subgroups or subsets Analysis of differentconditions of eating disorders or other patient characteristics such as age, clinical/community/home setting, type of music therapy, etc\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (13)\n \n \n
\n
\n \n \n
\n \n\n \n \n van Criekinge, T.; D'Août, K.; O'Brien, J.; and Coutinho, E.\n\n\n \n \n \n \n \n Effect of music listening on hypertonia in neurologically impaired patients-systematic review.\n \n \n \n \n\n\n \n\n\n\n PeerJ, 7: e8228. 12 2019.\n \n\n\n\n
\n\n\n\n \n \n \"EffectWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 11 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Effect of music listening on hypertonia in neurologically impaired patients-systematic review},\n type = {article},\n year = {2019},\n keywords = {Electromyography,Hypertonia,Music,Neurology,Relaxation,Spasticity},\n pages = {e8228},\n volume = {7},\n websites = {https://peerj.com/articles/8228},\n month = {12},\n day = {19},\n id = {4529f512-222b-3f8a-b347-e6dc2a467232},\n created = {2020-05-27T15:18:58.472Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.869Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {VanCriekinge2019},\n source_type = {JOUR},\n folder_uuids = {16d8408d-4737-41ee-91fc-ea9af111c0fe},\n private_publication = {false},\n abstract = {Background. As music listening is able to induce self-perceived and physiological signs of relaxation, it might be an interesting tool to induce muscle relaxation in patients with hypertonia. To this date effective non-pharmacological rehabilitation strategies to treat hypertonia in neurologically impaired patients are lacking. Therefore the aim is to investigate the effectiveness of music listening on muscle activity and relaxation. Methodology. The search strategy was performed by the PRISMA guidelines and registered in the PROSPERO database (no. 42019128511). Seven databases were systematically searched until March 2019. Six of the 1,684 studies met the eligibility criteria and were included in this review. Risk of bias was assessed by the PEDro scale. In total 171 patients with a variety of neurological conditions were included assessing hypertonia with both clinicall and biomechanical measures. Results. The analysis showed that there was a large treatment effect of music listening on muscle performance (SMD 0.96, 95% CI [0.29-1.63], I2 = 10%, Z = 2.82, p = 0.005). Music can be used as either background music during rehabilitation (dual-task) or during rest (single-task) and musical preferences seem to play a major role in the observed treatment effect. Conclusions. Although music listening is able to induce muscle relaxation, several gaps in the available literature were acknowledged. Future research is in need of an accurate and objective assessment of hypertonia.},\n bibtype = {article},\n author = {van Criekinge, Tamaya and D'Août, Kristiaan and O'Brien, Jonathon and Coutinho, Eduardo},\n doi = {10.7717/PEERJ.8228},\n journal = {PeerJ}\n}
\n
\n\n\n
\n Background. As music listening is able to induce self-perceived and physiological signs of relaxation, it might be an interesting tool to induce muscle relaxation in patients with hypertonia. To this date effective non-pharmacological rehabilitation strategies to treat hypertonia in neurologically impaired patients are lacking. Therefore the aim is to investigate the effectiveness of music listening on muscle activity and relaxation. Methodology. The search strategy was performed by the PRISMA guidelines and registered in the PROSPERO database (no. 42019128511). Seven databases were systematically searched until March 2019. Six of the 1,684 studies met the eligibility criteria and were included in this review. Risk of bias was assessed by the PEDro scale. In total 171 patients with a variety of neurological conditions were included assessing hypertonia with both clinicall and biomechanical measures. Results. The analysis showed that there was a large treatment effect of music listening on muscle performance (SMD 0.96, 95% CI [0.29-1.63], I2 = 10%, Z = 2.82, p = 0.005). Music can be used as either background music during rehabilitation (dual-task) or during rest (single-task) and musical preferences seem to play a major role in the observed treatment effect. Conclusions. Although music listening is able to induce muscle relaxation, several gaps in the available literature were acknowledged. Future research is in need of an accurate and objective assessment of hypertonia.\n
\n\n\n
\n\n\n
\n \n\n \n \n Amiriparian, S.; Gerczuk, M.; Coutinho, E.; Baird, A.; Ottl, S.; Milling, M.; and Schuller, B.\n\n\n \n \n \n \n Emotion and themes recognition in music utilising convolutional and recurrent neural networks.\n \n \n \n\n\n \n\n\n\n In CEUR Workshop Proceedings, volume 2670, pages 26-28, 10 2019. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Emotion and themes recognition in music utilising convolutional and recurrent neural networks},\n type = {inproceedings},\n year = {2019},\n pages = {26-28},\n volume = {2670},\n month = {10},\n day = {27},\n city = {Sophia Antipolis, France},\n id = {6f372c47-d0e7-31aa-aec8-0901922dbda2},\n created = {2020-05-29T10:17:30.748Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.180Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Amiriparian2019},\n source_type = {CONF},\n private_publication = {false},\n abstract = {Emotion is an inherent aspect of music, and associations to music can be made via both life experience and specific musical techniques applied by the composer. Computational approaches for music recognition have been well-established in the research community; however, deep approaches have been limited and not yet comparable to conventional approaches. In this study, we present our fusion system of end-to-end convolutional recurrent neural networks (CRNN) and pre-trained convolutional feature extractors for music emotion and theme recognition1. We train 9 models and conduct various late fusion experiments. Our best performing model (team name: AugLi) achieves 74.2 % ROC-AUC on the test partition which is 1.6 percentage points over the baseline system of the MediaEval 2019 Emotion & Themes in Music task.},\n bibtype = {inproceedings},\n author = {Amiriparian, Shahin and Gerczuk, Maurice and Coutinho, Eduardo and Baird, Alice and Ottl, Sandra and Milling, Manuel and Schuller, Björn},\n booktitle = {CEUR Workshop Proceedings}\n}
\n
\n\n\n
\n Emotion is an inherent aspect of music, and associations to music can be made via both life experience and specific musical techniques applied by the composer. Computational approaches for music recognition have been well-established in the research community; however, deep approaches have been limited and not yet comparable to conventional approaches. In this study, we present our fusion system of end-to-end convolutional recurrent neural networks (CRNN) and pre-trained convolutional feature extractors for music emotion and theme recognition1. We train 9 models and conduct various late fusion experiments. Our best performing model (team name: AugLi) achieves 74.2 % ROC-AUC on the test partition which is 1.6 percentage points over the baseline system of the MediaEval 2019 Emotion & Themes in Music task.\n
\n\n\n
\n\n\n
\n \n\n \n \n Hood, S.\n\n\n \n \n \n \n \n Appraisal.\n \n \n \n \n\n\n \n\n\n\n The Cambridge Handbook of Systemic Functional Linguistics, pages 382-409. Thompson, W., F., editor(s). SAGE Publications, Inc., 2019.\n \n\n\n\n
\n\n\n\n \n \n \"TheWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2019},\n keywords = {book,chapter},\n pages = {382-409},\n websites = {http://dx.doi.org/10.4135/9781452283012.n23 http://sk.sagepub.com/reference/music-in-the-social-and-behavioral-sciences/n25.xml,http://sk.sagepub.com/reference/music-in-the-social-and-behavioral-sciences/n25.xml},\n publisher = {SAGE Publications, Inc.},\n city = {2455 Teller Road, Thousand Oaks California 91320 United States},\n id = {80f3e7ed-7f30-3702-bacc-15eddfd686fb},\n created = {2020-05-29T10:17:30.824Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.026Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Coutinho2014},\n source_type = {CHAP},\n private_publication = {false},\n bibtype = {inbook},\n author = {Hood, Susan},\n editor = {Thompson, W F},\n doi = {10.5422/fordham/9780823252008.003.0017},\n chapter = {Appraisal},\n title = {The Cambridge Handbook of Systemic Functional Linguistics}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Hou, X.; Brooks, H.; Donnellan, W.; and Coutinho, E.\n\n\n \n \n \n \n The impact of music listening on the quality of life of people with dementia and their caregivers.\n \n \n \n\n\n \n\n\n\n In Proceedings of the British Society of Gerontology 48th Annual Conference, 2019. British Society of Gerontology\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {The impact of music listening on the quality of life of people with dementia and their caregivers},\n type = {inproceedings},\n year = {2019},\n keywords = {abstract,conference},\n publisher = {British Society of Gerontology},\n institution = {Durham},\n id = {c24e9d8d-9b5f-3327-9bec-39ff8d12a990},\n created = {2020-05-29T11:51:36.880Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-06-08T17:31:31.678Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {houthecaregivers},\n source_type = {inproceedings},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n abstract = {Dementia is an incurable and fatal brain-damaging disease associated with psychological and behavioural symptoms that may decrease people’s Quality of Life (QoL) (Ravi, 2011). Current pharmacological interventions are costly, entail serious side effects and, most importantly, have limited effects on the QoL of people with dementia (PWD) and their caregivers. In this context, non-pharmacological interventions are being explored as alternative (or supplementary) strategies (Overshott & Burns, 2005), and meaningful music listening is a very promising one. Although academic and clinical evidence shows that exposure to music and musical activities has positive impacts in PWD at a variety of levels (e.g., Clark, Lipe, & Bilbrey, 1998; Gerdner, 2000), less research has been directed towards the QoL outcomes of music listening for both PWD and their caregivers. The current study addresses this void by systematically reviewing primary research studies that assess the impact of music listening on a variety of outcome measures related to PWD (QoL, mood, cognitive function and behavioural symptoms), patient-caregiver relationships, and caregivers (QoL, mood, and burden). The results of this systematic review will provide a clearer picture on the effectiveness of music listening intervention in improving the QoL of PWD and their caregivers, an evaluation of the interventions methodologies, and inform the design of a new empirical study aimed at devising a systematic methodology for the application of meaningful music listening to improve the QoL of PWD and their caregivers.},\n bibtype = {inproceedings},\n author = {Hou, Xiaoxiao and Brooks, H and Donnellan, W and Coutinho, E},\n booktitle = {Proceedings of the British Society of Gerontology 48th Annual Conference}\n}
\n
\n\n\n
\n Dementia is an incurable and fatal brain-damaging disease associated with psychological and behavioural symptoms that may decrease people’s Quality of Life (QoL) (Ravi, 2011). Current pharmacological interventions are costly, entail serious side effects and, most importantly, have limited effects on the QoL of people with dementia (PWD) and their caregivers. In this context, non-pharmacological interventions are being explored as alternative (or supplementary) strategies (Overshott & Burns, 2005), and meaningful music listening is a very promising one. Although academic and clinical evidence shows that exposure to music and musical activities has positive impacts in PWD at a variety of levels (e.g., Clark, Lipe, & Bilbrey, 1998; Gerdner, 2000), less research has been directed towards the QoL outcomes of music listening for both PWD and their caregivers. The current study addresses this void by systematically reviewing primary research studies that assess the impact of music listening on a variety of outcome measures related to PWD (QoL, mood, cognitive function and behavioural symptoms), patient-caregiver relationships, and caregivers (QoL, mood, and burden). The results of this systematic review will provide a clearer picture on the effectiveness of music listening intervention in improving the QoL of PWD and their caregivers, an evaluation of the interventions methodologies, and inform the design of a new empirical study aimed at devising a systematic methodology for the application of meaningful music listening to improve the QoL of PWD and their caregivers.\n
\n\n\n
\n\n\n
\n \n\n \n \n Hou, X.; Brooks, H.; Donnellan, W.; and Coutinho, E.\n\n\n \n \n \n \n The role of music listening in eliciting autobiographical memories and improving the quality of life of people with dementia and their caregivers.\n \n \n \n\n\n \n\n\n\n In Proceedings of the Music & lifetime memories: An interdisciplinary conference, 11 2019. University of Durham\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {The role of music listening in eliciting autobiographical memories and improving the quality of life of people with dementia and their caregivers},\n type = {inproceedings},\n year = {2019},\n keywords = {abstract,conference},\n month = {11},\n publisher = {University of Durham},\n day = {1},\n city = {Durham},\n institution = {Liverpool},\n id = {02fb37c9-20ca-359f-89ff-f12d20867b90},\n created = {2020-05-29T11:51:37.063Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:17:27.187Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {xiaoxiaothecaregivers},\n source_type = {inproceedings},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n abstract = {Dementia is an incurable and fatal brain-damaging disease associated with psychological and behavioural symptoms that may decrease people’s Quality of Life (QoL) (Ravi, 2011). Current pharmacological interventions are costly, entail serious side effects and, most importantly, have limited effects on the QoL of people with dementia (PWD) and their caregivers. In this context, non-pharmacological interventions are being explored as alternative (or supplementary) strategies (Overshott & Burns, 2005), and meaningful music listening is a very promising one. Although academic and clinical evidence shows that exposure to music and musical activities has positive impacts in PWD at a variety of levels (e.g., Clark, Lipe, & Bilbrey, 1998; Gerdner, 2000), less research has been directed towards the QoL outcomes of music listening for both PWD and their caregivers. The current study addresses this void by systematically reviewing primary research studies that assess the impact of music listening on a variety of outcome measures related to PWD (QoL, mood, cognitive function and behavioural symptoms), patient-caregiver relationships, and caregivers (QoL, mood, and burden). The results of this systematic review will provide a clearer picture on the effectiveness of music listening intervention in improving the QoL of PWD and their caregivers, an evaluation of the interventions methodologies, and inform the design of a new empirical study aimed at devising a systematic methodology for the application of meaningful music listening to improve the QoL of PWD and their caregivers.},\n bibtype = {inproceedings},\n author = {Hou, Xiaoxiao and Brooks, H and Donnellan, W and Coutinho, E},\n booktitle = {Proceedings of the Music & lifetime memories: An interdisciplinary conference}\n}
\n
\n\n\n
\n Dementia is an incurable and fatal brain-damaging disease associated with psychological and behavioural symptoms that may decrease people’s Quality of Life (QoL) (Ravi, 2011). Current pharmacological interventions are costly, entail serious side effects and, most importantly, have limited effects on the QoL of people with dementia (PWD) and their caregivers. In this context, non-pharmacological interventions are being explored as alternative (or supplementary) strategies (Overshott & Burns, 2005), and meaningful music listening is a very promising one. Although academic and clinical evidence shows that exposure to music and musical activities has positive impacts in PWD at a variety of levels (e.g., Clark, Lipe, & Bilbrey, 1998; Gerdner, 2000), less research has been directed towards the QoL outcomes of music listening for both PWD and their caregivers. The current study addresses this void by systematically reviewing primary research studies that assess the impact of music listening on a variety of outcome measures related to PWD (QoL, mood, cognitive function and behavioural symptoms), patient-caregiver relationships, and caregivers (QoL, mood, and burden). The results of this systematic review will provide a clearer picture on the effectiveness of music listening intervention in improving the QoL of PWD and their caregivers, an evaluation of the interventions methodologies, and inform the design of a new empirical study aimed at devising a systematic methodology for the application of meaningful music listening to improve the QoL of PWD and their caregivers.\n
\n\n\n
\n\n\n
\n \n\n \n \n Scherer, K., R.; Trznadel, S.; Fantini, B.; and Coutinho, E.\n\n\n \n \n \n \n \n Comments on comments by Cupchik (2019) and Jacobsen (2019).\n \n \n \n \n\n\n \n\n\n\n Psychology of Aesthetics, Creativity, and the Arts, 13(3): 264-265. 8 2019.\n \n\n\n\n
\n\n\n\n \n \n \"CommentsWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Comments on comments by Cupchik (2019) and Jacobsen (2019)},\n type = {article},\n year = {2019},\n keywords = {Comment,journal},\n pages = {264-265},\n volume = {13},\n websites = {https://psycnet.apa.org/record/2019-45121-004},\n month = {8},\n id = {6f6c7ba3-bcba-37fa-8c6a-eba7eb6bd00e},\n created = {2020-05-29T11:51:37.116Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-06-08T17:31:31.791Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {scherer2019comments2019},\n source_type = {article},\n folder_uuids = {5bf27d41-c6e6-415f-a135-2eb1d8164244},\n private_publication = {false},\n abstract = {In this response to comments by Cupchik (2019) and Jacobsen (2019), we address the points made and extend the discussion to raise a number of issues to consider in the quest for ecologically valid research on aesthetic emotions generated by music performances.},\n bibtype = {article},\n author = {Scherer, Klaus R. and Trznadel, Stéphanie and Fantini, Bernardino and Coutinho, Eduardo},\n doi = {10.1037/aca0000246},\n journal = {Psychology of Aesthetics, Creativity, and the Arts},\n number = {3}\n}
\n
\n\n\n
\n In this response to comments by Cupchik (2019) and Jacobsen (2019), we address the points made and extend the discussion to raise a number of issues to consider in the quest for ecologically valid research on aesthetic emotions generated by music performances.\n
\n\n\n
\n\n\n
\n \n\n \n \n Scherer, K., R.; Trznadel, S.; Fantini, B.; and Coutinho, E.\n\n\n \n \n \n \n \n Assessing emotional experiences of opera spectators in situ.\n \n \n \n \n\n\n \n\n\n\n Psychology of Aesthetics, Creativity, and the Arts, 13(3): 244-258. 8 2019.\n \n\n\n\n
\n\n\n\n \n \n \"AssessingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Assessing emotional experiences of opera spectators in situ},\n type = {article},\n year = {2019},\n keywords = {article,journal},\n pages = {244-258},\n volume = {13},\n websites = {http://doi.apa.org/getdoi.cfm?doi=10.1037/aca0000163},\n month = {8},\n publisher = {APA},\n day = {16},\n id = {015fa105-7208-3c6a-b818-def98574428d},\n created = {2020-05-29T11:51:37.137Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.718Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {schererassessingsitu},\n source_type = {article},\n folder_uuids = {116db2f1-e6ac-4780-bccf-a977325250cd,99880aa7-55df-4b45-bfce-0ffc00b23ced},\n private_publication = {false},\n abstract = {Opera performances elicit strong emotional reactions in listeners. Yet, empirical demonstrations of these effects in situ are rare. Here we report a series of studies examining the emotional reactions of participants invited to the dress rehearsal of three different operas at the Geneva opera house before large audiences. Using a new affect checklist developed specifically for in situ studies of music performances, we asked participants to record (a) the intensity of 12 different types of affective reactions they experienced during selected scenes or (b) the frequency with which they experienced these emotions during specific acts or the opera as a whole. Results showed a high degree of specificity regarding the emotional impact of the operas as a whole and of individual scenes/acts. For one opera, Verdi's Macbeth, we also asked participants to rate both the emotions they actually felt during the opera scenes and the emotions expressed by the music or the singers' interpretation. Results confirm that spectators are clearly able to separate their own affective responses from what they perceive to be the emotions portrayed by the orchestra or on the stage. In addition, we evaluated the effect of different types of preperformance information sessions (on plot or music), as well as of participant personality and prior mood. Overall, the results demonstrate the feasibility of measuring highly differentiated emotional audience reactions to an opera performance with a brief validated checklist during actual performances in the opera house and the validity of self-reported emotions.},\n bibtype = {article},\n author = {Scherer, Klaus R. and Trznadel, Stéphanie and Fantini, Bernardino and Coutinho, Eduardo},\n doi = {10.1037/aca0000163},\n journal = {Psychology of Aesthetics, Creativity, and the Arts},\n number = {3}\n}
\n
\n\n\n
\n Opera performances elicit strong emotional reactions in listeners. Yet, empirical demonstrations of these effects in situ are rare. Here we report a series of studies examining the emotional reactions of participants invited to the dress rehearsal of three different operas at the Geneva opera house before large audiences. Using a new affect checklist developed specifically for in situ studies of music performances, we asked participants to record (a) the intensity of 12 different types of affective reactions they experienced during selected scenes or (b) the frequency with which they experienced these emotions during specific acts or the opera as a whole. Results showed a high degree of specificity regarding the emotional impact of the operas as a whole and of individual scenes/acts. For one opera, Verdi's Macbeth, we also asked participants to rate both the emotions they actually felt during the opera scenes and the emotions expressed by the music or the singers' interpretation. Results confirm that spectators are clearly able to separate their own affective responses from what they perceive to be the emotions portrayed by the orchestra or on the stage. In addition, we evaluated the effect of different types of preperformance information sessions (on plot or music), as well as of participant personality and prior mood. Overall, the results demonstrate the feasibility of measuring highly differentiated emotional audience reactions to an opera performance with a brief validated checklist during actual performances in the opera house and the validity of self-reported emotions.\n
\n\n\n
\n\n\n
\n \n\n \n \n Zhang, Z.; Han, J.; Coutinho, E.; and Schuller, B.\n\n\n \n \n \n \n \n Dynamic difficulty awareness training for continuous emotion prediction.\n \n \n \n \n\n\n \n\n\n\n IEEE Transactions on Multimedia, 21(5): 1289-1301. 5 2019.\n \n\n\n\n
\n\n\n\n \n \n \"DynamicWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Dynamic difficulty awareness training for continuous emotion prediction},\n type = {article},\n year = {2019},\n keywords = {article,journal},\n pages = {1289-1301},\n volume = {21},\n websites = {https://ieeexplore.ieee.org/document/8471224/,http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2%5C&SrcApp=PARTNER_APP%5C&SrcAuth=LinksAMR%5C&KeyUT=WOS:000466223600017%5C&DestLinkType=FullRecord%5C&DestApp=ALL_WOS%5C&UsrCustomerID=f3ec48df2},\n month = {5},\n id = {27bf2db0-8b71-337c-a1f6-79e25600ed88},\n created = {2020-05-29T11:51:37.187Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.719Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Zhang2019},\n source_type = {JOUR},\n folder_uuids = {116db2f1-e6ac-4780-bccf-a977325250cd,99880aa7-55df-4b45-bfce-0ffc00b23ced},\n private_publication = {false},\n abstract = {Time-continuous emotion prediction has become an increasingly compelling task in machine learning. Considerable efforts have been made to advance the performance of these systems. Nonetheless, the main focus has been the development of more sophisticated models and the incorporation of different expressive modalities (e.g., speech, face, and physiology). In this paper, motivated by the benefit of difficulty awareness in a human learning procedure, we propose a novel machine learning framework, namely, dynamic difficulty awareness training (DDAT), which sheds fresh light on the research - directly exploiting the difficulties in learning to boost the machine learning process. The DDAT framework consists of two stages: information retrieval and information exploitation. In the first stage, we make use of the reconstruction error of input features or the annotation uncertainty to estimate the difficulty of learning specific information. The obtained difficulty level is then used in tandem with original features to update the model input in a second learning stage with the expectation that the model can learn to focus on high difficulty regions of the learning process. We perform extensive experiments on a benchmark database REmote COLlaborative and affective to evaluate the effectiveness of the proposed framework. The experimental results show that our approach outperforms related baselines as well as other well-established time-continuous emotion prediction systems, which suggests that dynamically integrating the difficulty information for neural networks can help enhance the learning process.},\n bibtype = {article},\n author = {Zhang, Zixing and Han, Jing and Coutinho, Eduardo and Schuller, Bjorn},\n doi = {10.1109/TMM.2018.2871949},\n journal = {IEEE Transactions on Multimedia},\n number = {5}\n}
\n
\n\n\n
\n Time-continuous emotion prediction has become an increasingly compelling task in machine learning. Considerable efforts have been made to advance the performance of these systems. Nonetheless, the main focus has been the development of more sophisticated models and the incorporation of different expressive modalities (e.g., speech, face, and physiology). In this paper, motivated by the benefit of difficulty awareness in a human learning procedure, we propose a novel machine learning framework, namely, dynamic difficulty awareness training (DDAT), which sheds fresh light on the research - directly exploiting the difficulties in learning to boost the machine learning process. The DDAT framework consists of two stages: information retrieval and information exploitation. In the first stage, we make use of the reconstruction error of input features or the annotation uncertainty to estimate the difficulty of learning specific information. The obtained difficulty level is then used in tandem with original features to update the model input in a second learning stage with the expectation that the model can learn to focus on high difficulty regions of the learning process. We perform extensive experiments on a benchmark database REmote COLlaborative and affective to evaluate the effectiveness of the proposed framework. The experimental results show that our approach outperforms related baselines as well as other well-established time-continuous emotion prediction systems, which suggests that dynamically integrating the difficulty information for neural networks can help enhance the learning process.\n
\n\n\n
\n\n\n
\n \n\n \n \n Baird, A.; Coutinho, E.; Hirschberg, J.; and Schuller, B.\n\n\n \n \n \n \n \n Sincerity in acted speech: Presenting the sincere apology corpus and results.\n \n \n \n \n\n\n \n\n\n\n In Proceedings of the Annual Conference of the International Speech Communication Association, INTERSPEECH, volume 2019-Septe, pages 539-543, 9 2019. ISCA\n \n\n\n\n
\n\n\n\n \n \n \"SincerityWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Sincerity in acted speech: Presenting the sincere apology corpus and results},\n type = {inproceedings},\n year = {2019},\n keywords = {article,conference},\n pages = {539-543},\n volume = {2019-Septe},\n websites = {http://www.isca-speech.org/archive/Interspeech_2019/abstracts/1349.html},\n month = {9},\n publisher = {ISCA},\n day = {15},\n city = {ISCA},\n institution = {Graz, Austria},\n id = {96556506-9cf4-38f9-8280-e97e4f1b3396},\n created = {2020-05-29T11:51:37.296Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-07-03T09:46:08.633Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {bairdsincerityresults},\n source_type = {inproceedings},\n folder_uuids = {aac08d0d-38e7-4f4e-a381-5271c5c099ce},\n private_publication = {false},\n abstract = {The ability to discern an individual's level of sincerity varies from person to person and across cultures. Sincerity is typically a key indication of personality traits such as trustworthiness, and portraying sincerity can be integral to an abundance of scenarios, e. g., when apologising. Speech signals are one important factor when discerning sincerity and, with more modern interactions occurring remotely, automatic approaches for the recognition of sincerity from speech are beneficial during both interpersonal and professional scenarios. In this study we present details of the Sincere Apology Corpus (SINA-C). Annotated by 22 individuals for their perception of sincerity, SINA-C is an English acted-speech corpus of 32 speakers, apologising in multiple ways. To provide an updated baseline for the corpus, various machine learning experiments are conducted. Finding that extracting deep data-representations (utilising the DEEP SPECTRUM toolkit) from the speech signals is best suited. Classification results on the binary (sincere / not sincere) task are at best 79.2 % Unweighted Average Recall and for regression, in regards to the degree of sincerity, a Root Mean Square Error of 0.395 from the standardised range [-1.51; 1.72] is obtained.},\n bibtype = {inproceedings},\n author = {Baird, Alice and Coutinho, Eduardo and Hirschberg, Julia and Schuller, Björn},\n doi = {10.21437/Interspeech.2019-1349},\n booktitle = {Proceedings of the Annual Conference of the International Speech Communication Association, INTERSPEECH}\n}
\n
\n\n\n
\n The ability to discern an individual's level of sincerity varies from person to person and across cultures. Sincerity is typically a key indication of personality traits such as trustworthiness, and portraying sincerity can be integral to an abundance of scenarios, e. g., when apologising. Speech signals are one important factor when discerning sincerity and, with more modern interactions occurring remotely, automatic approaches for the recognition of sincerity from speech are beneficial during both interpersonal and professional scenarios. In this study we present details of the Sincere Apology Corpus (SINA-C). Annotated by 22 individuals for their perception of sincerity, SINA-C is an English acted-speech corpus of 32 speakers, apologising in multiple ways. To provide an updated baseline for the corpus, various machine learning experiments are conducted. Finding that extracting deep data-representations (utilising the DEEP SPECTRUM toolkit) from the speech signals is best suited. Classification results on the binary (sincere / not sincere) task are at best 79.2 % Unweighted Average Recall and for regression, in regards to the degree of sincerity, a Root Mean Square Error of 0.395 from the standardised range [-1.51; 1.72] is obtained.\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; Scherer, K., R.; and Dibben, N.\n\n\n \n \n \n \n \n Singing and Emotion.\n \n \n \n \n\n\n \n\n\n\n Volume 1 of Scholarly Research Reviews. The Oxford Handbook of Singing, pages 296-314. Welch, G., F.; Howard, D., M.; and Nix, J., editor(s). Oxford University Press, 4 2019.\n \n\n\n\n
\n\n\n\n \n \n \"TheWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2019},\n keywords = {book,chapter},\n pages = {296-314},\n volume = {1},\n websites = {http://oxfordhandbooks.com/view/10.1093/oxfordhb/9780199660773.001.0001/oxfordhb-9780199660773-e-006},\n month = {4},\n publisher = {Oxford University Press},\n day = {11},\n series = {Scholarly Research Reviews},\n id = {13fce87d-e90d-34f0-ba3b-04c4740962dc},\n created = {2020-05-29T11:51:39.216Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-06-08T17:31:32.249Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2014singingemotion},\n source_type = {CHAP},\n folder_uuids = {5db95977-632e-457e-b1b5-8ad03c3d17c4},\n private_publication = {false},\n abstract = {In this chapter the authors discuss the emotional power of the singing voice. The chapter begins by providing an overview of the process of externalization of emotions by the human voice. Then, the authors discuss some fundamental determinants of emotional expression in singing, namely the ‘emotional script’, the artistic interpretation, and the singer’s affective state. Next, they describe the manner in which expressed emotions are encoded in the voice by singers and recognized by listeners, and compare it with vocal expression in everyday life. Finally, they identify various methodologies that can enhance understanding of the physiology of vocal production and the acoustic cues fundamental to perception and production of expressive sung performance. The authors propose that the knowledge gained from application of these methodologies can inform singing practice, and that interdisciplinary approaches and cooperation are central aspects of a fruitful and sustainable study of the expressive powers of the singing voice.},\n bibtype = {inbook},\n author = {Coutinho, Eduardo and Scherer, Klaus R. and Dibben, Nicola},\n editor = {Welch, Graham F. and Howard, David M and Nix, John},\n doi = {10.1093/oxfordhb/9780199660773.013.006},\n chapter = {Singing and Emotion},\n title = {The Oxford Handbook of Singing}\n}
\n
\n\n\n
\n In this chapter the authors discuss the emotional power of the singing voice. The chapter begins by providing an overview of the process of externalization of emotions by the human voice. Then, the authors discuss some fundamental determinants of emotional expression in singing, namely the ‘emotional script’, the artistic interpretation, and the singer’s affective state. Next, they describe the manner in which expressed emotions are encoded in the voice by singers and recognized by listeners, and compare it with vocal expression in everyday life. Finally, they identify various methodologies that can enhance understanding of the physiology of vocal production and the acoustic cues fundamental to perception and production of expressive sung performance. The authors propose that the knowledge gained from application of these methodologies can inform singing practice, and that interdisciplinary approaches and cooperation are central aspects of a fruitful and sustainable study of the expressive powers of the singing voice.\n
\n\n\n
\n\n\n
\n \n\n \n \n Baird, A.; and Coutinho, E.\n\n\n \n \n \n \n \n The Sincere Apology Corpus (SinA-C).\n \n \n \n \n\n\n \n\n\n\n 8 2019.\n \n\n\n\n
\n\n\n\n \n \n \"TheWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@misc{\n title = {The Sincere Apology Corpus (SinA-C)},\n type = {misc},\n year = {2019},\n keywords = {Dataset},\n websites = {http://doi.org/10.5281/zenodo.3241253},\n month = {8},\n id = {7cd66e98-384c-31ba-b26f-2aeffd173f5e},\n created = {2020-05-29T11:51:39.340Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-06-08T17:31:32.297Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {baird2019thesinac},\n source_type = {DATASET},\n folder_uuids = {d67f8010-76e6-4bba-9ed2-6cfd0416d87b,116db2f1-e6ac-4780-bccf-a977325250cd},\n private_publication = {false},\n abstract = {This repository contains the Sincere Apology Corpus (SinA-C). SinA-C is an English speech corpus of acted apologies in various prosodic styles created with the purpose of investigating the attributes of the human voice which convey sincerity.},\n bibtype = {misc},\n author = {Baird, A and Coutinho, E},\n doi = {10.5281/zenodo.3241253}\n}
\n
\n\n\n
\n This repository contains the Sincere Apology Corpus (SinA-C). SinA-C is an English speech corpus of acted apologies in various prosodic styles created with the purpose of investigating the attributes of the human voice which convey sincerity.\n
\n\n\n
\n\n\n
\n \n\n \n \n Van Criekinge, T.; D'Août, K.; O'Brien, J.; and Coutinho, E.\n\n\n \n \n \n \n \n The Influence of Sound-Based Interventions on Motor Behavior After Stroke: A Systematic Review.\n \n \n \n \n\n\n \n\n\n\n Frontiers in Neurology, 10(1141). 11 2019.\n \n\n\n\n
\n\n\n\n \n \n \"TheWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {The Influence of Sound-Based Interventions on Motor Behavior After Stroke: A Systematic Review},\n type = {article},\n year = {2019},\n keywords = {biomechanics,music,sound,sound-based interventions,stroke rehabilitation,stroke—diagnosis,therapy},\n volume = {10},\n websites = {https://www.frontiersin.org/articles/10.3389/fneur.2019.01141/full,https://www.frontiersin.org/article/10.3389/fneur.2019.01141/full},\n month = {11},\n publisher = {Frontiers Media},\n day = {1},\n id = {b7604290-cd17-3b4b-9e51-a193c186fc7d},\n created = {2020-05-30T14:51:24.355Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.765Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {VanCriekinge2019a},\n source_type = {JOUR},\n folder_uuids = {16d8408d-4737-41ee-91fc-ea9af111c0fe},\n private_publication = {false},\n abstract = {Objective: To investigate the effects of sound-based interventions (SBIs) on biomechanical parameters in stroke patients. Methods: PubMed/Medline, Web of Science, the Physiotherapy Evidence Database (PEDro), and the Cochrane Library were searched until September 2019. Studies examining the effect of SBIs on kinematic, kinetic, and electromyographic outcome measures were included. Two independent reviewers performed the screening, and data extraction and risk-of-bias assessment were conducted with the PEDro and Newcastle–Ottawa scale. Disagreements were resolved by a third independent reviewer. Results: Of the 858 studies obtained from all databases, 12 studies and 240 participants met the inclusion and exclusion criteria. Six studies investigated the effect of SBI on upper limb motor tasks, while six examined walking. Concerning quality assessment (Newcastle–Ottawa Quality Assessment Scale and PEDro), the nine cross-sectional studies had a median score of seven, while the randomized controlled trials had a median score of five (fair to good quality). In relation to upper limb motor tasks, only one study found improvements in cortical reorganization and increased central excitability and motor control during reaching after SBI (results of the other five studies were too diverse and lacked quality to substantiate their findings). In relation to walking, results were clearer: SBI led to improvements in knee flexion and gastrocnemius muscle activity. Conclusion: Despite of the heterogeneity of the included studies, evidence was found demonstrating that SBI can induce biomechanical changes in motor behavior during walking in stroke patients. No conclusions could be formulated regarding reaching tasks. Additionally, directions for future research for understanding the underlying mechanism of the clinical improvements after SBI are: (1) using actual music pieces instead of rhythmic sound sequences and (2) examining sub-acute stroke rather than chronic stroke patients.},\n bibtype = {article},\n author = {Van Criekinge, Tamaya and D'Août, Kristiaan and O'Brien, Jonathon and Coutinho, Eduardo},\n doi = {10.3389/fneur.2019.01141},\n journal = {Frontiers in Neurology},\n number = {1141}\n}
\n
\n\n\n
\n Objective: To investigate the effects of sound-based interventions (SBIs) on biomechanical parameters in stroke patients. Methods: PubMed/Medline, Web of Science, the Physiotherapy Evidence Database (PEDro), and the Cochrane Library were searched until September 2019. Studies examining the effect of SBIs on kinematic, kinetic, and electromyographic outcome measures were included. Two independent reviewers performed the screening, and data extraction and risk-of-bias assessment were conducted with the PEDro and Newcastle–Ottawa scale. Disagreements were resolved by a third independent reviewer. Results: Of the 858 studies obtained from all databases, 12 studies and 240 participants met the inclusion and exclusion criteria. Six studies investigated the effect of SBI on upper limb motor tasks, while six examined walking. Concerning quality assessment (Newcastle–Ottawa Quality Assessment Scale and PEDro), the nine cross-sectional studies had a median score of seven, while the randomized controlled trials had a median score of five (fair to good quality). In relation to upper limb motor tasks, only one study found improvements in cortical reorganization and increased central excitability and motor control during reaching after SBI (results of the other five studies were too diverse and lacked quality to substantiate their findings). In relation to walking, results were clearer: SBI led to improvements in knee flexion and gastrocnemius muscle activity. Conclusion: Despite of the heterogeneity of the included studies, evidence was found demonstrating that SBI can induce biomechanical changes in motor behavior during walking in stroke patients. No conclusions could be formulated regarding reaching tasks. Additionally, directions for future research for understanding the underlying mechanism of the clinical improvements after SBI are: (1) using actual music pieces instead of rhythmic sound sequences and (2) examining sub-acute stroke rather than chronic stroke patients.\n
\n\n\n
\n\n\n
\n \n\n \n \n Xu, X.; Deng, J.; Coutinho, E.; Wu, C.; Zhao, L.; and Schuller, B., W.\n\n\n \n \n \n \n \n Connecting subspace learning and extreme learning machine in speech emotion recognition.\n \n \n \n \n\n\n \n\n\n\n IEEE Transactions on Multimedia, 21(3): 795-808. 3 2019.\n \n\n\n\n
\n\n\n\n \n \n \"ConnectingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Connecting subspace learning and extreme learning machine in speech emotion recognition},\n type = {article},\n year = {2019},\n keywords = {article,journal},\n pages = {795-808},\n volume = {21},\n websites = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2%5C&SrcApp=PARTNER_APP%5C&SrcAuth=LinksAMR%5C&KeyUT=WOS:000460333800022%5C&DestLinkType=FullRecord%5C&DestApp=ALL_WOS%5C&UsrCustomerID=f3ec48df247ee1138ccd8d3ba59bacc2,https://ieeexplore.iee},\n month = {3},\n id = {882b8384-fc52-3a64-a198-4c8a4c774dbd},\n created = {2020-05-30T17:34:32.954Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-06-08T17:31:31.543Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {xu2019connectingrecognition},\n source_type = {article},\n folder_uuids = {116db2f1-e6ac-4780-bccf-a977325250cd,99880aa7-55df-4b45-bfce-0ffc00b23ced},\n private_publication = {false},\n abstract = {Speech emotion recognition (SER) is a powerful tool for endowing computers with the capacity to process information about the affective states of users in human–machine interactions. Recent research has shown the effectiveness of graph embedding-based subspace learning and extreme learning machine applied to SER, but there are still various drawbacks in these two techniques that limit their application. Regarding subspace learning, the change from linearity to nonlinearity is usually achieved through kernelization, whereas extreme learning machines only take label information into consideration at the output layer. In order to overcome these drawbacks, this paper leverages extreme learning machines for dimensionality reduction and proposes a novel framework to combine spectral regression-based subspace learning and extreme learning machines. The proposed framework contains three stages—data mapping, graph decomposition, and regression. At the data mapping stage, various mapping strategies provide different views of the samples. At the graph decomposition stage, specifically designed embedding graphs provide a possibility to better represent the structure of data through generating virtual coordinates. Finally, at the regression stage, dimension-reduced mappings are achieved by connecting the virtual coordinates and data mapping. Using this framework, we propose several novel dimensionality reduction algorithms, apply them to SER tasks, and compare their performance to relevant state-of-the-art methods. Our results on several paralinguistic corpora show that our proposed techniques lead to significant improvements.},\n bibtype = {article},\n author = {Xu, Xinzhou and Deng, Jun and Coutinho, Eduardo and Wu, Chen and Zhao, Li and Schuller, Björn W.},\n doi = {10.1109/TMM.2018.2865834},\n journal = {IEEE Transactions on Multimedia},\n number = {3}\n}
\n
\n\n\n
\n Speech emotion recognition (SER) is a powerful tool for endowing computers with the capacity to process information about the affective states of users in human–machine interactions. Recent research has shown the effectiveness of graph embedding-based subspace learning and extreme learning machine applied to SER, but there are still various drawbacks in these two techniques that limit their application. Regarding subspace learning, the change from linearity to nonlinearity is usually achieved through kernelization, whereas extreme learning machines only take label information into consideration at the output layer. In order to overcome these drawbacks, this paper leverages extreme learning machines for dimensionality reduction and proposes a novel framework to combine spectral regression-based subspace learning and extreme learning machines. The proposed framework contains three stages—data mapping, graph decomposition, and regression. At the data mapping stage, various mapping strategies provide different views of the samples. At the graph decomposition stage, specifically designed embedding graphs provide a possibility to better represent the structure of data through generating virtual coordinates. Finally, at the regression stage, dimension-reduced mappings are achieved by connecting the virtual coordinates and data mapping. Using this framework, we propose several novel dimensionality reduction algorithms, apply them to SER tasks, and compare their performance to relevant state-of-the-art methods. Our results on several paralinguistic corpora show that our proposed techniques lead to significant improvements.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n Chin, T., C.; Coutinho, E.; Scherer, K., R.; and Rickard, N., S.\n\n\n \n \n \n \n \n MUSEBAQ: A modular tool for music research to assess musicianship, musical capacity, music preferences, and motivations for music use.\n \n \n \n \n\n\n \n\n\n\n Music Perception, 35(3): 376-399. 2 2018.\n \n\n\n\n
\n\n\n\n \n \n \"MUSEBAQ:Website\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {MUSEBAQ: A modular tool for music research to assess musicianship, musical capacity, music preferences, and motivations for music use},\n type = {article},\n year = {2018},\n keywords = {article,journal},\n pages = {376-399},\n volume = {35},\n websites = {http://mp.ucpress.edu/lookup/doi/10.1525/mp.2018.35.3.376},\n month = {2},\n publisher = {University of California Press Journals},\n day = {12},\n id = {1777eeae-cddb-3be2-8dc4-12fb3090c33f},\n created = {2018-03-29T13:11:33.343Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.037Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {chin2018musebaquse},\n source_type = {article},\n folder_uuids = {116db2f1-e6ac-4780-bccf-a977325250cd,99880aa7-55df-4b45-bfce-0ffc00b23ced},\n private_publication = {false},\n abstract = {MUSIC ENGAGEMENT IS COMPLEX AND IS INFLUENCED by music training, capacity, preferences, and motivations. A multi-modular self-report instrument (the Music Use and Background Questionnaire, or MUSEBAQ) was developed to measure a diverse set of music engagement constructs. Based on earlier work, a hybrid approach of exploratory and confirmatory analyses was conducted across a series of three independent studies to establish reliability and validity of the modular tool. Module 1 (Musicianship) provides a brief assessment of formal and informalmusic knowledge and practice.Module 2 (Musical capacity) measures emotional sensitivity to music, listening sophistication, music memory and imagery, and personal commitment to music.Module 3 (Music preferences) captures preferences from six broad genres and utilizes adaptive reasoning to selectively expand subgenres when administered online. Module 4 (Motivations for music use) assesses musical transcendence, emotion regulation, social, and musical identity and expression. The MUSEBAQoffers researchers and practitioners a comprehensive, modular instrument that can be used inwhole, or by module as required to capture an individual's level of engagement with music and to serve as a background questionnaire to measure and interpret the effects of dispositional differences in emotional reactions to music.},\n bibtype = {article},\n author = {Chin, Tan Chyuan and Coutinho, Eduardo and Scherer, Klaus R. and Rickard, Nikki S.},\n doi = {10.1525/MP.2018.35.3.376},\n journal = {Music Perception},\n number = {3}\n}
\n
\n\n\n
\n MUSIC ENGAGEMENT IS COMPLEX AND IS INFLUENCED by music training, capacity, preferences, and motivations. A multi-modular self-report instrument (the Music Use and Background Questionnaire, or MUSEBAQ) was developed to measure a diverse set of music engagement constructs. Based on earlier work, a hybrid approach of exploratory and confirmatory analyses was conducted across a series of three independent studies to establish reliability and validity of the modular tool. Module 1 (Musicianship) provides a brief assessment of formal and informalmusic knowledge and practice.Module 2 (Musical capacity) measures emotional sensitivity to music, listening sophistication, music memory and imagery, and personal commitment to music.Module 3 (Music preferences) captures preferences from six broad genres and utilizes adaptive reasoning to selectively expand subgenres when administered online. Module 4 (Motivations for music use) assesses musical transcendence, emotion regulation, social, and musical identity and expression. The MUSEBAQoffers researchers and practitioners a comprehensive, modular instrument that can be used inwhole, or by module as required to capture an individual's level of engagement with music and to serve as a background questionnaire to measure and interpret the effects of dispositional differences in emotional reactions to music.\n
\n\n\n
\n\n\n
\n \n\n \n \n Dibben, N.; Coutinho, E.; Vilar, J., A.; and Estévez-Pérez, G.\n\n\n \n \n \n \n \n Do individual differences influence moment-by-moment reports of emotion perceived in music and speech prosody?.\n \n \n \n \n\n\n \n\n\n\n Frontiers in Behavioral Neuroscience, 12: 184. 8 2018.\n \n\n\n\n
\n\n\n\n \n \n \"DoWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Do individual differences influence moment-by-moment reports of emotion perceived in music and speech prosody?},\n type = {article},\n year = {2018},\n keywords = {article,journal},\n pages = {184},\n volume = {12},\n websites = {http://www.ncbi.nlm.nih.gov/pubmed/30210316,http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=PMC6119718,http://www.ncbi.nlm.nih.gov/pubmed/30210316 http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=PMC6119718,https://www.frontiersin.org/a},\n month = {8},\n day = {27},\n id = {31aec067-4e19-3835-8a72-9d8f6e5d84dc},\n created = {2020-05-29T11:51:38.782Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.900Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Dibben2018},\n source_type = {JOUR},\n folder_uuids = {116db2f1-e6ac-4780-bccf-a977325250cd,99880aa7-55df-4b45-bfce-0ffc00b23ced},\n private_publication = {false},\n abstract = {Comparison of emotion perception in music and prosody has the potential to contribute to an understanding of their speculated shared evolutionary origin. Previous research suggests shared sensitivity to and processing of music and speech, but less is known about how emotion perception in the auditory domain might be influenced by individual differences. Personality, emotional intelligence, gender, musical training and age exert some influence on discrete, summative judgments of perceived emotion in music and speech stimuli. However, music and speech are temporal phenomena, and little is known about whether individual differences influence moment-by-moment perception of emotion in these domains. A behavioral study collected two main types of data: continuous ratings of perceived emotion while listening to extracts of music and speech, using a computer interface which modeled emotion on two dimensions (arousal and valence), and demographic information including measures of personality (TIPI) and emotional intelligence (TEIQue-SF). Functional analysis of variance on the time series data revealed a small number of statistically significant differences associated with Emotional Stability, Agreeableness, musical training and age. The results indicate that individual differences exert limited influence on continuous judgments of dynamic, naturalistic expressions. We suggest that this reflects a reliance on acoustic cues to emotion in moment-by-moment judgments of perceived emotions and is further evidence of the shared sensitivity to and processing of music and speech.},\n bibtype = {article},\n author = {Dibben, Nicola and Coutinho, Eduardo and Vilar, José A. and Estévez-Pérez, Graciela},\n doi = {10.3389/fnbeh.2018.00184},\n journal = {Frontiers in Behavioral Neuroscience}\n}
\n
\n\n\n
\n Comparison of emotion perception in music and prosody has the potential to contribute to an understanding of their speculated shared evolutionary origin. Previous research suggests shared sensitivity to and processing of music and speech, but less is known about how emotion perception in the auditory domain might be influenced by individual differences. Personality, emotional intelligence, gender, musical training and age exert some influence on discrete, summative judgments of perceived emotion in music and speech stimuli. However, music and speech are temporal phenomena, and little is known about whether individual differences influence moment-by-moment perception of emotion in these domains. A behavioral study collected two main types of data: continuous ratings of perceived emotion while listening to extracts of music and speech, using a computer interface which modeled emotion on two dimensions (arousal and valence), and demographic information including measures of personality (TIPI) and emotional intelligence (TEIQue-SF). Functional analysis of variance on the time series data revealed a small number of statistically significant differences associated with Emotional Stability, Agreeableness, musical training and age. The results indicate that individual differences exert limited influence on continuous judgments of dynamic, naturalistic expressions. We suggest that this reflects a reliance on acoustic cues to emotion in moment-by-moment judgments of perceived emotions and is further evidence of the shared sensitivity to and processing of music and speech.\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; Gentsch, K.; Van Peer, J.; Scherer, K., R.; and Schuller, B., W.\n\n\n \n \n \n \n \n Evidence of emotion-antecedent appraisal checks in electroencephalography and facial electromyography.\n \n \n \n \n\n\n \n\n\n\n PLoS ONE, 13(1): e0189367. 1 2018.\n \n\n\n\n
\n\n\n\n \n \n \"EvidenceWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Evidence of emotion-antecedent appraisal checks in electroencephalography and facial electromyography},\n type = {article},\n year = {2018},\n keywords = {article,journal},\n pages = {e0189367},\n volume = {13},\n websites = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2%5C&SrcApp=PARTNER_APP%5C&SrcAuth=LinksAMR%5C&KeyUT=WOS:000419101600016%5C&DestLinkType=FullRecord%5C&DestApp=ALL_WOS%5C&UsrCustomerID=f3ec48df247ee1138ccd8d3ba59bacc2 https://dx.plos.org/10},\n month = {1},\n day = {2},\n id = {9336c657-e759-31c9-86c7-fd4d16d0d18c},\n created = {2020-05-29T11:51:38.987Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.853Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2018evidenceelectromyography},\n source_type = {JOUR},\n folder_uuids = {116db2f1-e6ac-4780-bccf-a977325250cd,99880aa7-55df-4b45-bfce-0ffc00b23ced},\n private_publication = {false},\n abstract = {In the present study, we applied Machine Learning (ML) methods to identify psychobiological markers of cognitive processes involved in the process of emotion elicitation as postulated by the Component Process Model (CPM). In particular, we focused on the automatic detection of five appraisal checks—novelty, intrinsic pleasantness, goal conduciveness, control, and power—in electroencephalography (EEG) and facial electromyography (EMG) signals. We also evaluated the effects on classification accuracy of averaging the raw physiological signals over different numbers of trials, and whether the use of minimal sets of EEG channels localized over specific scalp regions of interest are sufficient to discriminate between appraisal checks. We demonstrated the effectiveness of our approach on two data sets obtained from previous studies. Our results show that novelty and power appraisal checks can be consistently detected in EEG signals above chance level (binary tasks). For novelty, the best classification performance in terms of accuracy was achieved using features extracted from the whole scalp, and by averaging across 20 individual trials in the same experimental condition (UAR = 83.5 ± 4.2; N = 25). For power, the best performance was obtained by using the signals from four pre-selected EEG channels averaged across all trials available for each participant (UAR = 70.6 ± 5.3; N = 24). Together, our results indicate that accurate classification can be achieved with a relatively small number of trials and channels, but that averaging across a larger number of individual trials is beneficial for the classification for both appraisal checks. We were not able to detect any evidence of the appraisal checks under study in the EMG data. The proposed methodology is a promising tool for the study of the psychophysiological mechanisms underlying emotional episodes, and their application to the development of computerized tools (e.g., Brain-Computer Interface) for the study of cognitive processes involved in emotions.},\n bibtype = {article},\n author = {Coutinho, Eduardo and Gentsch, Kornelia and Van Peer, Jacobien and Scherer, Klaus R. and Schuller, Björn W.},\n editor = {Valenza, Gaetano},\n doi = {10.1371/journal.pone.0189367},\n journal = {PLoS ONE},\n number = {1}\n}
\n
\n\n\n
\n In the present study, we applied Machine Learning (ML) methods to identify psychobiological markers of cognitive processes involved in the process of emotion elicitation as postulated by the Component Process Model (CPM). In particular, we focused on the automatic detection of five appraisal checks—novelty, intrinsic pleasantness, goal conduciveness, control, and power—in electroencephalography (EEG) and facial electromyography (EMG) signals. We also evaluated the effects on classification accuracy of averaging the raw physiological signals over different numbers of trials, and whether the use of minimal sets of EEG channels localized over specific scalp regions of interest are sufficient to discriminate between appraisal checks. We demonstrated the effectiveness of our approach on two data sets obtained from previous studies. Our results show that novelty and power appraisal checks can be consistently detected in EEG signals above chance level (binary tasks). For novelty, the best classification performance in terms of accuracy was achieved using features extracted from the whole scalp, and by averaging across 20 individual trials in the same experimental condition (UAR = 83.5 ± 4.2; N = 25). For power, the best performance was obtained by using the signals from four pre-selected EEG channels averaged across all trials available for each participant (UAR = 70.6 ± 5.3; N = 24). Together, our results indicate that accurate classification can be achieved with a relatively small number of trials and channels, but that averaging across a larger number of individual trials is beneficial for the classification for both appraisal checks. We were not able to detect any evidence of the appraisal checks under study in the EMG data. The proposed methodology is a promising tool for the study of the psychophysiological mechanisms underlying emotional episodes, and their application to the development of computerized tools (e.g., Brain-Computer Interface) for the study of cognitive processes involved in emotions.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n Sabathe, R.; Coutinho, E.; and Schuller, B.\n\n\n \n \n \n \n \n Deep recurrent music writer: Memory-enhanced variational autoencoder-based musical score composition and an objective measure.\n \n \n \n \n\n\n \n\n\n\n In Proceedings of the International Joint Conference on Neural Networks, volume 2017-May, pages 3467-3474, 5 2017. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"DeepWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Deep recurrent music writer: Memory-enhanced variational autoencoder-based musical score composition and an objective measure},\n type = {inproceedings},\n year = {2017},\n pages = {3467-3474},\n volume = {2017-May},\n websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85031004981&doi=10.1109%2FIJCNN.2017.7966292&partnerID=40&md5=773c59463fe8c1985666a5d8ee739954,http://ieeexplore.ieee.org/document/7966292/},\n month = {5},\n publisher = {IEEE},\n id = {159c3cf8-fc75-3ad3-9351-d5d1c4bd98b6},\n created = {2020-05-27T15:19:59.533Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.519Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Sabathe2017},\n source_type = {CONF},\n notes = {cited By 4},\n private_publication = {false},\n abstract = {In recent years, there has been an increasing interest in music generation using machine learning techniques typically used for classification or regression tasks. This is a field still in its infancy, and most attempts are still characterized by the imposition of many restrictions to the music composition process in order to favor the creation of 'interesting' outputs. Furthermore, and most importantly, none of the past attempts has focused on developing objective measures to evaluate the music composed, which would allow to evaluate the pieces composed against a predetermined standard as well as permitting to fine-tune models for better 'performance' and music composition goals. In this work, we intend to advance state-of-the-art in this area by introducing and evaluating a new metric for an objective assessment of the quality of the generated pieces. We will use this measure to evaluate the outputs of a truly generative model based on Variational Autoencoders that we apply here to automated music composition. Using our metric, we demonstrate that our model can generate music pieces that follow general stylistic characteristics of a given composer or musical genre. Additionally, we use this measure to investigate the impact of various parameters and model architectures on the compositional process and output.},\n bibtype = {inproceedings},\n author = {Sabathe, Romain and Coutinho, Eduardo and Schuller, Bjorn},\n doi = {10.1109/IJCNN.2017.7966292},\n booktitle = {Proceedings of the International Joint Conference on Neural Networks}\n}
\n
\n\n\n
\n In recent years, there has been an increasing interest in music generation using machine learning techniques typically used for classification or regression tasks. This is a field still in its infancy, and most attempts are still characterized by the imposition of many restrictions to the music composition process in order to favor the creation of 'interesting' outputs. Furthermore, and most importantly, none of the past attempts has focused on developing objective measures to evaluate the music composed, which would allow to evaluate the pieces composed against a predetermined standard as well as permitting to fine-tune models for better 'performance' and music composition goals. In this work, we intend to advance state-of-the-art in this area by introducing and evaluating a new metric for an objective assessment of the quality of the generated pieces. We will use this measure to evaluate the outputs of a truly generative model based on Variational Autoencoders that we apply here to automated music composition. Using our metric, we demonstrate that our model can generate music pieces that follow general stylistic characteristics of a given composer or musical genre. Additionally, we use this measure to investigate the impact of various parameters and model architectures on the compositional process and output.\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.\n\n\n \n \n \n \n \n Shared Acoustic Codes Underlie Emotional Communication in Music and Speech - Evidence from Deep Transfer Learning (Datasets).\n \n \n \n \n\n\n \n\n\n\n 3 2017.\n \n\n\n\n
\n\n\n\n \n \n \"SharedWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@misc{\n title = {Shared Acoustic Codes Underlie Emotional Communication in Music and Speech - Evidence from Deep Transfer Learning (Datasets)},\n type = {misc},\n year = {2017},\n source = {Zenodo},\n keywords = {arousal,dataset,emotion,music,time-continuous,valence},\n issue = {1},\n websites = {https://zenodo.org/record/345944#.WTZdgl2qNFQ},\n month = {3},\n publisher = {Zenodo},\n day = {6},\n id = {a036a5c3-6770-37fc-8e0b-9b8ab8fca30e},\n created = {2020-05-29T10:17:30.772Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2021-05-14T09:05:35.698Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Coutinho2017},\n source_type = {DATASET},\n private_publication = {false},\n abstract = {This repository contains the datasets used in the article "Shared Acoustic Codes Underlie Emotional Communication in Music and Speech - Evidence from Deep Transfer Learning" (Coutinho & Schuller, 2017). In that article four different data sets were used: SEMAINE, RECOLA, ME14 and MP (acronyms and datasets described below). The SEMAINE (speech) and ME14 (music) corpora were used for the unsupervised training of the Denoising Auto-encoders (domain adaptation stage) - only the audio features extracted from the audio files in these corpora were used and are provided in this repository. The RECOLA (speech) and MP (music) corpora were used for the supervised training phase - both the audio features extracted from the audio files and the Arousal and Valence annotations were used. In this repository, we provide the audio features extracted from the audio files for both corpora, and Arousal and Valence annotations for some of the music datasets (those that the author of this repository is the data curator).},\n bibtype = {misc},\n author = {Coutinho, E},\n doi = {10.5281/zenodo.600657}\n}
\n
\n\n\n
\n This repository contains the datasets used in the article \"Shared Acoustic Codes Underlie Emotional Communication in Music and Speech - Evidence from Deep Transfer Learning\" (Coutinho & Schuller, 2017). In that article four different data sets were used: SEMAINE, RECOLA, ME14 and MP (acronyms and datasets described below). The SEMAINE (speech) and ME14 (music) corpora were used for the unsupervised training of the Denoising Auto-encoders (domain adaptation stage) - only the audio features extracted from the audio files in these corpora were used and are provided in this repository. The RECOLA (speech) and MP (music) corpora were used for the supervised training phase - both the audio features extracted from the audio files and the Arousal and Valence annotations were used. In this repository, we provide the audio features extracted from the audio files for both corpora, and Arousal and Valence annotations for some of the music datasets (those that the author of this repository is the data curator).\n
\n\n\n
\n\n\n
\n \n\n \n \n van Peer, J.; Coutinho, E.; Grandjean, D.; and Scherer, K., R.\n\n\n \n \n \n \n \n Emotion-Antecedent Appraisal Checks: EEG and EMG datasets for Novelty and Pleasantness.\n \n \n \n \n\n\n \n\n\n\n 12 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Emotion-AntecedentWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@misc{\n title = {Emotion-Antecedent Appraisal Checks: EEG and EMG datasets for Novelty and Pleasantness},\n type = {misc},\n year = {2017},\n keywords = {dataset},\n pages = {1-4},\n websites = {https://doi.org/10.5281/zenodo.197404},\n month = {12},\n publisher = {Zenodo},\n id = {c992033c-a9f0-37e8-b6d9-52ec697f3543},\n created = {2020-05-29T11:51:37.170Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-06-08T17:31:31.815Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2017emotionantecedentset},\n source_type = {DATASET},\n folder_uuids = {d67f8010-76e6-4bba-9ed2-6cfd0416d87b,116db2f1-e6ac-4780-bccf-a977325250cd},\n private_publication = {false},\n abstract = {This document describes the full details of the first data set (Study 1) used in Coutinho et al., to appear. The Electroencephalography (EEG) and facial Electromyography (EMG) signals included in this dataset, and now made public, were collected in the context of a previous study by Peer, Grandjean, and Scherer, 2014 that addressed three fundamental questions regarding the mechanisms underlying the appraisal process: Whether appraisal criteria are processed (a) in a fixed sequence, (b) independent of each other, and (c) by different neural structures or circuits. In that study, an oddball paradigm with affective pictures was used to experimentally manipulate novelty and intrinsic pleasantness appraisals. EEG was recorded during task performance, together with facial EMG, to measure, respectively, cognitive processing and efferent responses stemming from the appraisal manipulations.},\n bibtype = {misc},\n author = {van Peer, J. and Coutinho, E. and Grandjean, D. and Scherer, K. R.},\n doi = {10.5281/zenodo.197404}\n}
\n
\n\n\n
\n This document describes the full details of the first data set (Study 1) used in Coutinho et al., to appear. The Electroencephalography (EEG) and facial Electromyography (EMG) signals included in this dataset, and now made public, were collected in the context of a previous study by Peer, Grandjean, and Scherer, 2014 that addressed three fundamental questions regarding the mechanisms underlying the appraisal process: Whether appraisal criteria are processed (a) in a fixed sequence, (b) independent of each other, and (c) by different neural structures or circuits. In that study, an oddball paradigm with affective pictures was used to experimentally manipulate novelty and intrinsic pleasantness appraisals. EEG was recorded during task performance, together with facial EMG, to measure, respectively, cognitive processing and efferent responses stemming from the appraisal manipulations.\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; and Scherer, K., R.\n\n\n \n \n \n \n \n The effect of context and audio-visual modality on emotions elicited by a musical performance.\n \n \n \n \n\n\n \n\n\n\n Psychology of Music, 45(4): 550-569. 7 2017.\n \n\n\n\n
\n\n\n\n \n \n \"TheWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {The effect of context and audio-visual modality on emotions elicited by a musical performance},\n type = {article},\n year = {2017},\n keywords = {article,journal},\n pages = {550-569},\n volume = {45},\n websites = {http://dx.doi.org/10.1177/0305735616670496 http://journals.sagepub.com/doi/10.1177/0305735616670496,http://dx.doi.org/10.1177/0305735616670496,http://journals.sagepub.com/doi/10.1177/0305735616670496},\n month = {7},\n day = {26},\n id = {afc391cf-b15a-3422-b5d4-fa0c6522d298},\n created = {2020-05-29T11:51:37.246Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.220Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {doi:10.1177/0305735616670496},\n source_type = {JOUR},\n folder_uuids = {116db2f1-e6ac-4780-bccf-a977325250cd,99880aa7-55df-4b45-bfce-0ffc00b23ced},\n private_publication = {false},\n abstract = {In this work, we compared emotions induced by the same performance of Schubert Lieder during a live concert and in a laboratory viewing/listening setting to determine the extent to which laboratory research on affective reactions to music approximates real listening conditions in dedicated performances. We measured emotions experienced by volunteer members of an audience that attended a Lieder recital in a church (Context 1) and emotional reactions to an audio-video-recording of the same performance in a university lecture hall (Context 2). Three groups of participants were exposed to three presentation versions in Context 2: (1) an audio-visual recording, (2) an audio-only recording, and (3) a video-only recording. Participants achieved statistically higher levels of emotional convergence in the live performance than in the laboratory context, and the experience of particular emotions was determined by complex interactions between auditory and visual cues in the performance. This study demonstrates the contribution of the performance setting and the performers' appearance and nonverbal expression to emotion induction by music, encouraging further systematic research into the factors involved.},\n bibtype = {article},\n author = {Coutinho, Eduardo and Scherer, Klaus R.},\n doi = {10.1177/0305735616670496},\n journal = {Psychology of Music},\n number = {4}\n}
\n
\n\n\n
\n In this work, we compared emotions induced by the same performance of Schubert Lieder during a live concert and in a laboratory viewing/listening setting to determine the extent to which laboratory research on affective reactions to music approximates real listening conditions in dedicated performances. We measured emotions experienced by volunteer members of an audience that attended a Lieder recital in a church (Context 1) and emotional reactions to an audio-video-recording of the same performance in a university lecture hall (Context 2). Three groups of participants were exposed to three presentation versions in Context 2: (1) an audio-visual recording, (2) an audio-only recording, and (3) a video-only recording. Participants achieved statistically higher levels of emotional convergence in the live performance than in the laboratory context, and the experience of particular emotions was determined by complex interactions between auditory and visual cues in the performance. This study demonstrates the contribution of the performance setting and the performers' appearance and nonverbal expression to emotion induction by music, encouraging further systematic research into the factors involved.\n
\n\n\n
\n\n\n
\n \n\n \n \n Gentsch, K.; Coutinho, E.; Grandjean, D.; Scherer, K., R.; Gentsch, K.; Grandjean, D.; and Scherer, K., R.\n\n\n \n \n \n \n \n Emotion-Antecedent Appraisal Checks: EEG and EMG datasets for Goal Conduciveness, Control and Power.\n \n \n \n \n\n\n \n\n\n\n 12 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Emotion-AntecedentWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@misc{\n title = {Emotion-Antecedent Appraisal Checks: EEG and EMG datasets for Goal Conduciveness, Control and Power},\n type = {misc},\n year = {2017},\n keywords = {dataset},\n pages = {1-4},\n websites = {http://doi.org/10.5281/zenodo.222615},\n month = {12},\n publisher = {Zenodo},\n id = {921f0eb1-b115-3b70-8f13-16b31a419a9b},\n created = {2020-05-29T11:51:37.289Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-06-08T17:31:32.436Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2017emotionantecedentpower},\n source_type = {DATASET},\n folder_uuids = {d67f8010-76e6-4bba-9ed2-6cfd0416d87b,116db2f1-e6ac-4780-bccf-a977325250cd},\n private_publication = {false},\n abstract = {This document describes the full details of the second data set (Study 2) used in Coutinho et al., to appear. The Electroencephalography (EEG) and facial Electromyography (EMG) signals included in this data set, and now made public, were collected in the context of a previous study by Gentsch, Grandjean, and Scherer, 2013 that addressed three fundamental questions regarding the mechanisms underlying the appraisal process: Whether appraisal criteria are processed (1) in a fixed sequence, (2) independent of each other, and (3) by different neural structures or circuits. In this study, a gambling task was applied in which feedback stimuli manipulated simultaneously the information about goal conduciveness, control, and power appraisals. EEG was recorded during task performance, together with facial EMG, to measure, respectively, cognitive processing and efferent responses stemming from the appraisal manipulations.},\n bibtype = {misc},\n author = {Gentsch, K and Coutinho, E and Grandjean, D and Scherer, K R and Gentsch, K and Grandjean, D and Scherer, K R},\n doi = {10.5281/zenodo.222615}\n}
\n
\n\n\n
\n This document describes the full details of the second data set (Study 2) used in Coutinho et al., to appear. The Electroencephalography (EEG) and facial Electromyography (EMG) signals included in this data set, and now made public, were collected in the context of a previous study by Gentsch, Grandjean, and Scherer, 2013 that addressed three fundamental questions regarding the mechanisms underlying the appraisal process: Whether appraisal criteria are processed (1) in a fixed sequence, (2) independent of each other, and (3) by different neural structures or circuits. In this study, a gambling task was applied in which feedback stimuli manipulated simultaneously the information about goal conduciveness, control, and power appraisals. EEG was recorded during task performance, together with facial EMG, to measure, respectively, cognitive processing and efferent responses stemming from the appraisal manipulations.\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; and Schuller, B.\n\n\n \n \n \n \n \n Shared acoustic codes underlie emotional communication in music and speech—evidence from deep transfer learning.\n \n \n \n \n\n\n \n\n\n\n PLoS ONE, 12(6): e0179289. 6 2017.\n \n\n\n\n
\n\n\n\n \n \n \"SharedWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Shared acoustic codes underlie emotional communication in music and speech—evidence from deep transfer learning},\n type = {article},\n year = {2017},\n keywords = {article,journal},\n pages = {e0179289},\n volume = {12},\n websites = {http://dx.plos.org/10.1371/journal.pone.0179289,http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2%5C&SrcApp=PARTNER_APP%5C&SrcAuth=LinksAMR%5C&KeyUT=WOS:000404607900019%5C&DestLinkType=FullRecord%5C&DestApp=ALL_WOS%5C&UsrCustomerID=f3ec48d},\n month = {6},\n publisher = {Public Library of Science (PLoS)},\n day = {28},\n id = {8b0218eb-eaaa-36aa-9a6d-870b794ede57},\n created = {2020-05-29T11:51:38.591Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-06-08T17:31:31.970Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2017sharedlearning},\n source_type = {JOUR},\n folder_uuids = {116db2f1-e6ac-4780-bccf-a977325250cd,99880aa7-55df-4b45-bfce-0ffc00b23ced},\n private_publication = {false},\n abstract = {Music and speech exhibit striking similarities in the communication of emotions in the acoustic domain, in such a way that the communication of specific emotions is achieved, at least to a certain extent, by means of shared acoustic patterns. From an Affective Sciences points of view, determining the degree of overlap between both domains is fundamental to understand the shared mechanisms underlying such phenomenon. From a Machine learning perspective, the overlap between acoustic codes for emotional expression in music and speech opens new possibilities to enlarge the amount of data available to develop music and speech emotion recognition systems. In this article, we investigate time-continuous predictions of emotion (Arousal and Valence) in music and speech, and the Transfer Learning between these domains. We establish a comparative framework including intra- (i.e., models trained and tested on the same modality, either music or speech) and cross-domain experiments (i.e., models trained in one modality and tested on the other). In the cross-domain context, we evaluated two strategies—the direct transfer between domains, and the contribution of Transfer Learning techniques (feature-representation-transfer based on Denoising Auto Encoders) for reducing the gap in the feature space distributions. Our results demonstrate an excellent cross-domain generalisation performance with and without feature representation transfer in both directions. In the case of music, cross-domain approaches outperformed intra-domain models for Valence estimation, whereas for Speech intra-domain models achieve the best performance. This is the first demonstration of shared acoustic codes for emotional expression in music and speech in the time-continuous domain.},\n bibtype = {article},\n author = {Coutinho, Eduardo and Schuller, Björn},\n editor = {Zhang, Yudong},\n doi = {10.1371/journal.pone.0179289},\n journal = {PLoS ONE},\n number = {6}\n}
\n
\n\n\n
\n Music and speech exhibit striking similarities in the communication of emotions in the acoustic domain, in such a way that the communication of specific emotions is achieved, at least to a certain extent, by means of shared acoustic patterns. From an Affective Sciences points of view, determining the degree of overlap between both domains is fundamental to understand the shared mechanisms underlying such phenomenon. From a Machine learning perspective, the overlap between acoustic codes for emotional expression in music and speech opens new possibilities to enlarge the amount of data available to develop music and speech emotion recognition systems. In this article, we investigate time-continuous predictions of emotion (Arousal and Valence) in music and speech, and the Transfer Learning between these domains. We establish a comparative framework including intra- (i.e., models trained and tested on the same modality, either music or speech) and cross-domain experiments (i.e., models trained in one modality and tested on the other). In the cross-domain context, we evaluated two strategies—the direct transfer between domains, and the contribution of Transfer Learning techniques (feature-representation-transfer based on Denoising Auto Encoders) for reducing the gap in the feature space distributions. Our results demonstrate an excellent cross-domain generalisation performance with and without feature representation transfer in both directions. In the case of music, cross-domain approaches outperformed intra-domain models for Valence estimation, whereas for Speech intra-domain models achieve the best performance. This is the first demonstration of shared acoustic codes for emotional expression in music and speech in the time-continuous domain.\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; and Scherer, K., R.\n\n\n \n \n \n \n \n Introducing the Geneva music-induced affect checklist (GEMIAC): A brief instrument for the rapid assessment of musically induced emotions.\n \n \n \n \n\n\n \n\n\n\n Music Perception, 34(4): 371-386. 4 2017.\n \n\n\n\n
\n\n\n\n \n \n \"IntroducingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Introducing the Geneva music-induced affect checklist (GEMIAC): A brief instrument for the rapid assessment of musically induced emotions},\n type = {article},\n year = {2017},\n keywords = {Checklist,Emotion,Feeling,Measurement,Music},\n pages = {371-386},\n volume = {34},\n websites = {https://online.ucpress.edu/mp/article/34/4/371/62796/Introducing-the-GEneva-MusicInduced-Affect},\n month = {4},\n day = {1},\n id = {b67e6804-7446-3dca-938f-507d3cf631d8},\n created = {2021-09-17T10:31:01.529Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.680Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {THE SYSTEMATIC STUDY OF MUSIC-INDUCED emotions requires standardized measurement instruments to reliably assess the nature of affective reactions to music, which tend to go beyond garden-variety basic emotions.We describe the development and conceptual validation of a checklist for rapid assessment of musicinduced affect, designed to extend and complement the Geneva Emotional Music Scale. The checklist contains a selection of affect and emotion categories that are frequently used in the literature to refer to emotional reactions to music. The development of the checklist focused on an empirical investigation of the semantic structure of the relevant terms, combined with fuzzy classes based on a series of hierarchical cluster analyses. Two versions of the checklist for assessing the intensity and frequency of affective responses to music are proposed.},\n bibtype = {article},\n author = {Coutinho, Eduardo and Scherer, Klaus R.},\n doi = {10.1525/MP.2017.34.4.371},\n journal = {Music Perception},\n number = {4}\n}
\n
\n\n\n
\n THE SYSTEMATIC STUDY OF MUSIC-INDUCED emotions requires standardized measurement instruments to reliably assess the nature of affective reactions to music, which tend to go beyond garden-variety basic emotions.We describe the development and conceptual validation of a checklist for rapid assessment of musicinduced affect, designed to extend and complement the Geneva Emotional Music Scale. The checklist contains a selection of affect and emotion categories that are frequently used in the literature to refer to emotional reactions to music. The development of the checklist focused on an empirical investigation of the semantic structure of the relevant terms, combined with fuzzy classes based on a series of hierarchical cluster analyses. Two versions of the checklist for assessing the intensity and frequency of affective responses to music are proposed.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n Schuller, B.; Steidl, S.; Batliner, A.; Hirschberg, J.; Burgoon, J., K.; Baird, A.; Elkins, A.; Zhang, Y.; Coutinho, E.; and Evanini, K.\n\n\n \n \n \n \n \n The INTERSPEECH 2016 computational paralinguistics challenge: Deception, sincerity & native language.\n \n \n \n \n\n\n \n\n\n\n In Proceedings of the Annual Conference of the International Speech Communication Association, INTERSPEECH, volume 08-12-Sept, pages 2001-2005, 9 2016. \n \n\n\n\n
\n\n\n\n \n \n \"TheWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {The INTERSPEECH 2016 computational paralinguistics challenge: Deception, sincerity & native language},\n type = {inproceedings},\n year = {2016},\n keywords = {article,conference},\n pages = {2001-2005},\n volume = {08-12-Sept},\n websites = {http://www.isca-speech.org/archive/Interspeech_2016/abstracts/0129.html http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2%255C&SrcApp=PARTNER_APP%255C&SrcAuth=LinksAMR%255C&KeyUT=WOS:000409394401102%255C&DestLinkType=FullRecord%255C&DestAp},\n month = {9},\n day = {8},\n id = {68bb5f48-7268-3ea7-b41f-deb9e24ed29a},\n created = {2020-05-29T11:51:36.895Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-07-03T09:46:08.675Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {schuller2016thelanguage},\n source_type = {CONF},\n folder_uuids = {aac08d0d-38e7-4f4e-a381-5271c5c099ce},\n private_publication = {false},\n abstract = {The INTERSPEECH 2016 Computational Paralinguistics Challenge addresses three different problems for the first time in research competition under well-defined conditions: classification of deceptive vs. non-deceptive speech, the estimation of the degree of sincerity, and the identification of the native language out of eleven L1 classes of English L2 speakers. In this paper, we describe these sub-challenges, their conditions, the baseline feature extraction and classifiers, and the resulting baselines, as provided to the participants.},\n bibtype = {inproceedings},\n author = {Schuller, Björn and Steidl, Stefan and Batliner, Anton and Hirschberg, Julia and Burgoon, Judee K. and Baird, Alice and Elkins, Aaron and Zhang, Yue and Coutinho, Eduardo and Evanini, Keelan},\n doi = {10.21437/Interspeech.2016-129},\n booktitle = {Proceedings of the Annual Conference of the International Speech Communication Association, INTERSPEECH}\n}
\n
\n\n\n
\n The INTERSPEECH 2016 Computational Paralinguistics Challenge addresses three different problems for the first time in research competition under well-defined conditions: classification of deceptive vs. non-deceptive speech, the estimation of the degree of sincerity, and the identification of the native language out of eleven L1 classes of English L2 speakers. In this paper, we describe these sub-challenges, their conditions, the baseline feature extraction and classifiers, and the resulting baselines, as provided to the participants.\n
\n\n\n
\n\n\n
\n \n\n \n \n Rickard, N., S.; Chin, T.; Coutinho, E.; and Scherer, K., R.\n\n\n \n \n \n \n MUSEBAQ: A psychometrically robust questionnaire for capturing the many voices of music engagement.\n \n \n \n\n\n \n\n\n\n In Proceedings of the 14th International Conference on Music Perception and Cognition, 7 2016. San Francisco, CA\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {MUSEBAQ: A psychometrically robust questionnaire for capturing the many voices of music engagement},\n type = {inproceedings},\n year = {2016},\n keywords = {Conference,abstract},\n month = {7},\n publisher = {San Francisco, CA},\n institution = {San Francisco, CA},\n id = {cd02abac-1938-3fc5-a0d5-5631c96f5ca5},\n created = {2020-05-29T11:51:37.064Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:17:27.721Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {rickard2016musebaqengagement},\n source_type = {inproceedings},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Rickard, Nikki Sue and Chin, T and Coutinho, E and Scherer, K R},\n booktitle = {Proceedings of the 14th International Conference on Music Perception and Cognition}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Valstar, M.; Ghitulescu, A.; Baur, T.; Potard, B.; Cafaro, A.; Wagner, J.; André, E.; Durieu, L.; Aylett, M.; Dermouche, S.; Pelachaud, C.; Coutinho, E.; Schuller, B.; Zhang, Y.; Heylen, D.; Theune, M.; and van Waterschoot, J.\n\n\n \n \n \n \n \n Ask alice: An artificial retrieval of information agent.\n \n \n \n \n\n\n \n\n\n\n In ICMI 2016 - Proceedings of the 18th ACM International Conference on Multimodal Interaction, pages 419-420, 11 2016. ACM Press\n \n\n\n\n
\n\n\n\n \n \n \"AskWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Ask alice: An artificial retrieval of information agent},\n type = {inproceedings},\n year = {2016},\n keywords = {article,conference},\n pages = {419-420},\n websites = {http://dl.acm.org/citation.cfm?doid=2993148.2998535},\n month = {11},\n publisher = {ACM Press},\n city = {New York, New York, USA},\n institution = {Tokyo, Japan},\n id = {dce1e39d-d5f4-3e49-96f8-c4bad9f59b0e},\n created = {2020-05-29T11:51:38.700Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.989Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {valstar2016askagent},\n source_type = {inproceedings},\n folder_uuids = {aac08d0d-38e7-4f4e-a381-5271c5c099ce},\n private_publication = {false},\n abstract = {We present a demonstration of the ARIA framework, a modular approach for rapid development of virtual humans for information retrieval that have linguistic, emotional, and social skills and a strong personality. We demonstrate the framework's capabilities in a scenario where Alice in Wonderland', a popular English literature book, is embodied by a virtual human representing Alice. The user can engage in an information exchange dialogue, where Alice acts as the expert on the book, and the user as an interested novice. Besides speech recognition, sophisticated audio-visual behaviour analysis is used to inform the core agent dialogue module about the user's state and intentions, so that it can go beyond simple chat-bot dialogue. The behaviour generation module features a unique new capability of being able to deal gracefully with interruptions of the agent.},\n bibtype = {inproceedings},\n author = {Valstar, Michel and Ghitulescu, Alexandru and Baur, Tobias and Potard, Blaise and Cafaro, Angelo and Wagner, Johannes and André, Elisabeth and Durieu, Laurent and Aylett, Matthew and Dermouche, Soumia and Pelachaud, Catherine and Coutinho, Eduardo and Schuller, Björn and Zhang, Yue and Heylen, Dirk and Theune, Mariët and van Waterschoot, Jelte},\n doi = {10.1145/2993148.2998535},\n booktitle = {ICMI 2016 - Proceedings of the 18th ACM International Conference on Multimodal Interaction}\n}
\n
\n\n\n
\n We present a demonstration of the ARIA framework, a modular approach for rapid development of virtual humans for information retrieval that have linguistic, emotional, and social skills and a strong personality. We demonstrate the framework's capabilities in a scenario where Alice in Wonderland', a popular English literature book, is embodied by a virtual human representing Alice. The user can engage in an information exchange dialogue, where Alice acts as the expert on the book, and the user as an interested novice. Besides speech recognition, sophisticated audio-visual behaviour analysis is used to inform the core agent dialogue module about the user's state and intentions, so that it can go beyond simple chat-bot dialogue. The behaviour generation module features a unique new capability of being able to deal gracefully with interruptions of the agent.\n
\n\n\n
\n\n\n
\n \n\n \n \n Zhang, Z.; Ringeval, F.; Dong, B.; Coutinho, E.; Marchi, E.; and Schuller, B.\n\n\n \n \n \n \n \n Enhanced semi-supervised learning for multimodal emotion recognition.\n \n \n \n \n\n\n \n\n\n\n In ICASSP, IEEE International Conference on Acoustics, Speech and Signal Processing - Proceedings, volume 2016-May, pages 5185-5189, 3 2016. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"EnhancedWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Enhanced semi-supervised learning for multimodal emotion recognition},\n type = {inproceedings},\n year = {2016},\n keywords = {article,conference},\n pages = {5185-5189},\n volume = {2016-May},\n websites = {http://ieeexplore.ieee.org/document/7472666/},\n month = {3},\n publisher = {IEEE},\n id = {8f1f3221-0c83-30dd-84a1-4992535dbafb},\n created = {2020-05-29T11:51:38.718Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.986Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhang2016enhancedrecognition},\n source_type = {inproceedings},\n folder_uuids = {aac08d0d-38e7-4f4e-a381-5271c5c099ce},\n private_publication = {false},\n abstract = {Semi-Supervised Learning (SSL) techniques have found many applications where labeled data is scarce and/or expensive to obtain. However, SSL suffers from various inherent limitations that limit its performance in practical applications. A central problem is that the low performance that a classifier can deliver on challenging recognition tasks reduces the trustability of the automatically labeled data. Another related issue is the noise accumulation problem - instances that are misclassified by the system are still used to train it in future iterations. In this paper, we propose to address both issues in the context of emotion recognition. Initially, we exploit the complementarity between audio-visual features to improve the performance of the classifier during the supervised phase. Then, we iteratively re-evaluate the automatically labeled instances to correct possibly mislabeled data and this enhances the overall confidence of the system's predictions. Experimental results performed on the RECOLA database demonstrate that our methodology delivers a strong performance in the classification of high/low emotional arousal (UAR = 76.5%), and significantly outperforms traditional SSL methods by at least 5.0% (absolute gain).},\n bibtype = {inproceedings},\n author = {Zhang, Zixing and Ringeval, Fabien and Dong, Bin and Coutinho, Eduardo and Marchi, Erik and Schuller, Bjorn},\n doi = {10.1109/ICASSP.2016.7472666},\n booktitle = {ICASSP, IEEE International Conference on Acoustics, Speech and Signal Processing - Proceedings}\n}
\n
\n\n\n
\n Semi-Supervised Learning (SSL) techniques have found many applications where labeled data is scarce and/or expensive to obtain. However, SSL suffers from various inherent limitations that limit its performance in practical applications. A central problem is that the low performance that a classifier can deliver on challenging recognition tasks reduces the trustability of the automatically labeled data. Another related issue is the noise accumulation problem - instances that are misclassified by the system are still used to train it in future iterations. In this paper, we propose to address both issues in the context of emotion recognition. Initially, we exploit the complementarity between audio-visual features to improve the performance of the classifier during the supervised phase. Then, we iteratively re-evaluate the automatically labeled instances to correct possibly mislabeled data and this enhances the overall confidence of the system's predictions. Experimental results performed on the RECOLA database demonstrate that our methodology delivers a strong performance in the classification of high/low emotional arousal (UAR = 76.5%), and significantly outperforms traditional SSL methods by at least 5.0% (absolute gain).\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; Hönig, F.; Zhang, Y.; Hantke, S.; Batliner, A.; Nöth, E.; and Schuller, B.\n\n\n \n \n \n \n Assessing the prosody of non-native speakers of English: Measures and feature sets.\n \n \n \n\n\n \n\n\n\n In Calzolari, N.; Choukri, K.; Declerck, T.; Goggi, S.; Grobelnik, M.; Maegaard, B.; Mariani, J.; Mazo, H.; Moreno, A.; Odijk, J.; and Piperidis, S., editor(s), Proceedings of the 10th International Conference on Language Resources and Evaluation, LREC 2016, volume 645378, pages 1328-1332, 1 2016. European Language Resources Association (ELRA)\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Assessing the prosody of non-native speakers of English: Measures and feature sets},\n type = {inproceedings},\n year = {2016},\n keywords = {article,conference},\n pages = {1328-1332},\n volume = {645378},\n issue = {645378},\n month = {1},\n publisher = {European Language Resources Association (ELRA)},\n institution = {Paris, France},\n id = {2fec392e-1f05-3f48-b89b-b26c1456a278},\n created = {2020-05-29T11:51:39.102Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:16:39.657Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2016assessingsets},\n source_type = {CONF},\n folder_uuids = {aac08d0d-38e7-4f4e-a381-5271c5c099ce},\n private_publication = {false},\n abstract = {In this paper, we describe a new database with audio recordings of non-native (L2) speakers of English, and the perceptual evaluation experiment conducted with native English speakers for assessing the prosody of each recording. These annotations are then used to compute the gold standard using different methods, and a series of regression experiments is conducted to evaluate their impact on the performance of a regression model predicting the degree of naturalness of L2 speech. Further, we compare the relevance of different feature groups modelling prosody in general (without speech tempo), speech rate and pauses modelling speech tempo (fluency), voice quality, and a variety of spectral features. We also discuss the impact of various fusion strategies on performance. Overall, our results demonstrate that the prosody of non-native speakers of English as L2 can be reliably assessed using supra-segmental audio features; prosodic features seem to be the most important ones.},\n bibtype = {inproceedings},\n author = {Coutinho, Eduardo and Hönig, Florian and Zhang, Yue and Hantke, Simone and Batliner, Anton and Nöth, Elmar and Schuller, Björn},\n editor = {Calzolari, N and Choukri, K and Declerck, T and Goggi, S and Grobelnik, M and Maegaard, B and Mariani, J and Mazo, H and Moreno, A and Odijk, J and Piperidis, S},\n booktitle = {Proceedings of the 10th International Conference on Language Resources and Evaluation, LREC 2016}\n}
\n
\n\n\n
\n In this paper, we describe a new database with audio recordings of non-native (L2) speakers of English, and the perceptual evaluation experiment conducted with native English speakers for assessing the prosody of each recording. These annotations are then used to compute the gold standard using different methods, and a series of regression experiments is conducted to evaluate their impact on the performance of a regression model predicting the degree of naturalness of L2 speech. Further, we compare the relevance of different feature groups modelling prosody in general (without speech tempo), speech rate and pauses modelling speech tempo (fluency), voice quality, and a variety of spectral features. We also discuss the impact of various fusion strategies on performance. Overall, our results demonstrate that the prosody of non-native speakers of English as L2 can be reliably assessed using supra-segmental audio features; prosodic features seem to be the most important ones.\n
\n\n\n
\n\n\n
\n \n\n \n \n Han, W.; Coutinho, E.; Ruan, H.; Li, H.; Schuller, B.; Yu, X.; and Zhu, X.\n\n\n \n \n \n \n \n Semi-supervised active learning for sound classification in hybrid learning environments.\n \n \n \n \n\n\n \n\n\n\n PLoS ONE, 11(9): e0162075. 9 2016.\n \n\n\n\n
\n\n\n\n \n \n \"Semi-supervisedWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Semi-supervised active learning for sound classification in hybrid learning environments},\n type = {article},\n year = {2016},\n keywords = {article,journal},\n pages = {e0162075},\n volume = {11},\n websites = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000383680600017&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=f3ec48df247ee1138ccd8d3ba59bacc2 http://dx.plos.org/10.1371/journal.pone.},\n month = {9},\n day = {14},\n id = {2fb19fb0-efc7-3ae7-bd2c-40d1948d933b},\n created = {2020-05-29T11:51:39.251Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.423Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {han2016semisupervisedenvironments},\n source_type = {article},\n folder_uuids = {99880aa7-55df-4b45-bfce-0ffc00b23ced},\n private_publication = {false},\n abstract = {Coping with scarcity of labeled data is a common problem in sound classification tasks. Approaches for classifying sounds are commonly based on supervised learning algorithms, which require labeled data which is often scarce and leads to models that do not generalize well. In this paper, we make an efficient combination of confidence-based Active Learning and Self-Training with the aim of minimizing the need for human annotation for sound classification model training. The proposed method pre-processes the instances that are ready for labeling by calculating their classifier confidence scores, and then delivers the candidates with lower scores to human annotators, and those with high scores are automatically labeled by the machine. We demonstrate the feasibility and efficacy of this method in two practical scenarios: pool-based and stream-based processing. Extensive experimental results indicate that our approach requires significantly less labeled instances to reach the same performance in both scenarios compared to Passive Learning, Active Learning and Self-Training. A reduction of 52.2% in human labeled instances is achieved in both of the pool-based and stream-based scenarios on a sound classification task considering 16,930 sound instances.},\n bibtype = {article},\n author = {Han, Wenjing and Coutinho, Eduardo and Ruan, Huabin and Li, Haifeng and Schuller, Björn and Yu, Xiaojie and Zhu, Xuan},\n editor = {Schwenker, Friedhelm},\n doi = {10.1371/journal.pone.0162075},\n journal = {PLoS ONE},\n number = {9}\n}
\n
\n\n\n
\n Coping with scarcity of labeled data is a common problem in sound classification tasks. Approaches for classifying sounds are commonly based on supervised learning algorithms, which require labeled data which is often scarce and leads to models that do not generalize well. In this paper, we make an efficient combination of confidence-based Active Learning and Self-Training with the aim of minimizing the need for human annotation for sound classification model training. The proposed method pre-processes the instances that are ready for labeling by calculating their classifier confidence scores, and then delivers the candidates with lower scores to human annotators, and those with high scores are automatically labeled by the machine. We demonstrate the feasibility and efficacy of this method in two practical scenarios: pool-based and stream-based processing. Extensive experimental results indicate that our approach requires significantly less labeled instances to reach the same performance in both scenarios compared to Passive Learning, Active Learning and Self-Training. A reduction of 52.2% in human labeled instances is achieved in both of the pool-based and stream-based scenarios on a sound classification task considering 16,930 sound instances.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (16)\n \n \n
\n
\n \n \n
\n \n\n \n \n Coutinho, E.; and Schuller, B., W.\n\n\n \n \n \n \n \n Automatic Estimation of Biosignals From the Human Voice.\n \n \n \n \n\n\n \n\n\n\n Science, 350(6256): 114:48--50. 10 2015.\n \n\n\n\n
\n\n\n\n \n \n \"AutomaticWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Automatic Estimation of Biosignals From the Human Voice},\n type = {article},\n year = {2015},\n keywords = {article,invited,journal},\n pages = {114:48--50},\n volume = {350},\n websites = {http://science.sciencemag.org/content/350/6256/114.3},\n month = {10},\n day = {2},\n id = {7d00c805-b7f1-3183-b2da-2258809dd8fe},\n created = {2018-03-29T13:11:33.538Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.110Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2015automaticvoice},\n source_type = {article},\n notes = {invited contribution},\n private_publication = {false},\n abstract = {Computational paralinguistics (CP) is a relatively new area of research that provides new methods, tools, and techniques to automatically recognize the states, traits, and qualities embedded in the nonsemantic aspects of human speech (1). In recent years, CP has reached a level of maturity that has permitted the development of a myriad of applications in everyday life, such as the automatic estimation of a speaker’s age, gender, height, emotional state, cognitive load, personality traits, likability, intelligibility, and medical condition (2). Here, we provide an overview of one particular application of CP that offers new solutions for health care—the recognition of physiological parameters (biosignals) from the voice alone.},\n bibtype = {article},\n author = {Coutinho, E and Schuller, Björn W.},\n journal = {Science},\n number = {6256}\n}
\n
\n\n\n
\n Computational paralinguistics (CP) is a relatively new area of research that provides new methods, tools, and techniques to automatically recognize the states, traits, and qualities embedded in the nonsemantic aspects of human speech (1). In recent years, CP has reached a level of maturity that has permitted the development of a myriad of applications in everyday life, such as the automatic estimation of a speaker’s age, gender, height, emotional state, cognitive load, personality traits, likability, intelligibility, and medical condition (2). Here, we provide an overview of one particular application of CP that offers new solutions for health care—the recognition of physiological parameters (biosignals) from the voice alone.\n
\n\n\n
\n\n\n
\n \n\n \n \n Payan, A.; Sun, T.; Vidal, G.; Zhang, T.; Coutinho, E.; and Eyben, F.\n\n\n \n \n \n \n \n Does my Speech Rock? Automatic Assessment of Public Speaking Skills.\n \n \n \n \n\n\n \n\n\n\n In Proceedings of the Annual Conference of the International Speech Communication Association, INTERSPEECH, volume 1, pages 2519-2523, 2015. ISCA\n \n\n\n\n
\n\n\n\n \n \n \"DoesWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Does my Speech Rock? Automatic Assessment of Public Speaking Skills},\n type = {inproceedings},\n year = {2015},\n keywords = {article,conference},\n pages = {2519-2523},\n volume = {1},\n websites = {http://www.isca-speech.org/archive/interspeech_2015/papers/i15_2519.pdf},\n publisher = {ISCA},\n city = {Dresden, Germany},\n id = {c074b917-846e-3d0c-872d-00c4df3d3ca4},\n created = {2020-05-29T11:51:37.001Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-06-08T17:31:31.714Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {azais2015doesskills},\n source_type = {inproceedings},\n folder_uuids = {aac08d0d-38e7-4f4e-a381-5271c5c099ce},\n private_publication = {false},\n abstract = {In this paper, we introduce results for the task of Automatic Public Speech Assessment (APSA). Given the comparably sparse work carried out on this task up to this point, a novel database was required for training and evaluation of machine learning models. As a basis, the freely available oral presentations of the ICASSP conference in 2011 were selected due to their transcription including non-verbal vocalisations. The data was specifically labelled in terms of the perceived oratory ability of the speakers by five raters according to a 5-point Public Speaking Skill Rating Likert scale. We investigate the feasibility of speaker-independent APSA using different standardised acoustic feature sets computed per fixed chunk of an oral presentation in a series of ternary classification and continuous regression experiments. Further, we compare the relevance of different feature groups related to fluency (speech/hesitation rate), prosody, voice quality and a variety of spectral features. Our results demonstrate that oratory speaking skills can be reliably assessed using supra-segmental audio features, with prosodic ones being particularly suited.},\n bibtype = {inproceedings},\n author = {Payan, Adrien and Sun, Tianjiao and Vidal, Guillaume and Zhang, Tina and Coutinho, Eduardo and Eyben, Florian},\n booktitle = {Proceedings of the Annual Conference of the International Speech Communication Association, INTERSPEECH}\n}
\n
\n\n\n
\n In this paper, we introduce results for the task of Automatic Public Speech Assessment (APSA). Given the comparably sparse work carried out on this task up to this point, a novel database was required for training and evaluation of machine learning models. As a basis, the freely available oral presentations of the ICASSP conference in 2011 were selected due to their transcription including non-verbal vocalisations. The data was specifically labelled in terms of the perceived oratory ability of the speakers by five raters according to a 5-point Public Speaking Skill Rating Likert scale. We investigate the feasibility of speaker-independent APSA using different standardised acoustic feature sets computed per fixed chunk of an oral presentation in a series of ternary classification and continuous regression experiments. Further, we compare the relevance of different feature groups related to fluency (speech/hesitation rate), prosody, voice quality and a variety of spectral features. Our results demonstrate that oratory speaking skills can be reliably assessed using supra-segmental audio features, with prosodic ones being particularly suited.\n
\n\n\n
\n\n\n
\n \n\n \n \n Rickard, N., S.; Chin, T.; Coutinho, E.; and Scherer, K., R.\n\n\n \n \n \n \n The MUSEBAQ: a Comprehensive and Modular Instrument for Assessing Musical Engagement.\n \n \n \n\n\n \n\n\n\n In Proceedings of the 4th International Conference on Music and Emotion (ICME’4), pages 1, 2015. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {The MUSEBAQ: a Comprehensive and Modular Instrument for Assessing Musical Engagement},\n type = {inproceedings},\n year = {2015},\n keywords = {abstract,conference},\n pages = {1},\n city = {Geneva, Switzerland},\n id = {72fb3c65-7a58-31b9-a2e4-cbe3721548bc},\n created = {2020-05-29T11:51:37.111Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:17:28.286Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {rickard2015theengagement},\n source_type = {inproceedings},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Rickard, Nikki Sue and Chin, T-C and Coutinho, E and Scherer, K R},\n booktitle = {Proceedings of the 4th International Conference on Music and Emotion (ICME’4)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; and Schuller, B., W.\n\n\n \n \n \n \n \n Automatic Estimation of Biosignals From the Human Voice.\n \n \n \n \n\n\n \n\n\n\n Science, 350(6256): 114:48--50. 10 2015.\n \n\n\n\n
\n\n\n\n \n \n \"AutomaticWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Automatic Estimation of Biosignals From the Human Voice},\n type = {article},\n year = {2015},\n keywords = {article,invited,journal},\n pages = {114:48--50},\n volume = {350},\n websites = {http://science.sciencemag.org/content/350/6256/114.3},\n month = {10},\n edition = {Special Su},\n id = {5efd3c28-1e87-3da6-829f-566e56ba6cf4},\n created = {2020-05-29T11:51:37.235Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-06-08T17:36:24.579Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2015automaticvoice},\n source_type = {article},\n notes = {invited contribution},\n folder_uuids = {99880aa7-55df-4b45-bfce-0ffc00b23ced},\n private_publication = {false},\n abstract = {Computational paralinguistics (CP) is a relatively new area of research that provides new methods, tools, and techniques to automatically recognize the states, traits, and qualities embedded in the nonsemantic aspects of human speech (1). In recent years, CP has reached a level of maturity that has permitted the development of a myriad of applications in everyday life, such as the automatic estimation of a speaker’s age, gender, height, emotional state, cognitive load, personality traits, likability, intelligibility, and medical condition (2). Here, we provide an overview of one particular application of CP that offers new solutions for health care—the recognition of physiological parameters (biosignals) from the voice alone.},\n bibtype = {article},\n author = {Coutinho, E and Schuller, Björn W.},\n journal = {Science},\n number = {6256}\n}
\n
\n\n\n
\n Computational paralinguistics (CP) is a relatively new area of research that provides new methods, tools, and techniques to automatically recognize the states, traits, and qualities embedded in the nonsemantic aspects of human speech (1). In recent years, CP has reached a level of maturity that has permitted the development of a myriad of applications in everyday life, such as the automatic estimation of a speaker’s age, gender, height, emotional state, cognitive load, personality traits, likability, intelligibility, and medical condition (2). Here, we provide an overview of one particular application of CP that offers new solutions for health care—the recognition of physiological parameters (biosignals) from the voice alone.\n
\n\n\n
\n\n\n
\n \n\n \n \n Zhang, Y.; Coutinho, E.; Schuller, B.; Zhang, Z.; and Adam, M.\n\n\n \n \n \n \n \n On rater reliability and agreement based dynamic active learning.\n \n \n \n \n\n\n \n\n\n\n In 2015 International Conference on Affective Computing and Intelligent Interaction, ACII 2015, pages 70-76, 9 2015. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"OnWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {On rater reliability and agreement based dynamic active learning},\n type = {inproceedings},\n year = {2015},\n keywords = {article,conference},\n pages = {70-76},\n websites = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000377887000011&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=f3ec48df247ee1138ccd8d3ba59bacc2,http://ieeexplore.ieee.org/document/7344},\n month = {9},\n publisher = {IEEE},\n id = {38dc393a-f322-3f71-9db6-0bf17a81191b},\n created = {2020-05-29T11:51:37.237Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-06-08T17:31:31.875Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhang2015onlearning},\n source_type = {inproceedings},\n folder_uuids = {aac08d0d-38e7-4f4e-a381-5271c5c099ce},\n private_publication = {false},\n abstract = {In this paper, we propose two novel Dynamic Active Learning (DAL) methods with the aim of ultimately reducing the costly human labelling work for subjective tasks such as speech emotion recognition. Compared to conventional Active Learning (AL) algorithms, the proposed DAL approaches employ a highly efficient adaptive query strategy that minimises the number of annotations through three advancements. First, we shift from the standard majority voting procedure, in which unlabelled instances are annotated by a fixed number of raters, to an agreement-based annotation technique that dynamically determines how many human annotators are required to label a selected instance. Second, we introduce the concept of the order-based DAL algorithm by considering rater reliability and inter-rater agreement. Third, a highly dynamic development trend is successfully implemented by upgrading the agreement levels depending on the prediction uncertainty. In extensive experiments on standardised test-beds, we show that the new dynamic methods significantly improve the efficiency of the existing AL algorithms by reducing human labelling effort up to 85.41%, while achieving the same classification accuracy. Thus, the enhanced DAL derivations opens up high-potential research directions for the utmost exploitation of unlabelled data.},\n bibtype = {inproceedings},\n author = {Zhang, Yue and Coutinho, Eduardo and Schuller, Bjorn and Zhang, Zixing and Adam, Michael},\n doi = {10.1109/ACII.2015.7344553},\n booktitle = {2015 International Conference on Affective Computing and Intelligent Interaction, ACII 2015}\n}
\n
\n\n\n
\n In this paper, we propose two novel Dynamic Active Learning (DAL) methods with the aim of ultimately reducing the costly human labelling work for subjective tasks such as speech emotion recognition. Compared to conventional Active Learning (AL) algorithms, the proposed DAL approaches employ a highly efficient adaptive query strategy that minimises the number of annotations through three advancements. First, we shift from the standard majority voting procedure, in which unlabelled instances are annotated by a fixed number of raters, to an agreement-based annotation technique that dynamically determines how many human annotators are required to label a selected instance. Second, we introduce the concept of the order-based DAL algorithm by considering rater reliability and inter-rater agreement. Third, a highly dynamic development trend is successfully implemented by upgrading the agreement levels depending on the prediction uncertainty. In extensive experiments on standardised test-beds, we show that the new dynamic methods significantly improve the efficiency of the existing AL algorithms by reducing human labelling effort up to 85.41%, while achieving the same classification accuracy. Thus, the enhanced DAL derivations opens up high-potential research directions for the utmost exploitation of unlabelled data.\n
\n\n\n
\n\n\n
\n \n\n \n \n Zhang, Y.; Coutinho, E.; Zhang, Z.; Quan, C.; and Schuller, B.\n\n\n \n \n \n \n \n Dynamic Active Learning based on agreement and applied to emotion recognition in spoken interactions.\n \n \n \n \n\n\n \n\n\n\n In ICMI 2015 - Proceedings of the 2015 ACM International Conference on Multimodal Interaction, pages 275-278, 2015. ACM Press\n \n\n\n\n
\n\n\n\n \n \n \"DynamicWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Dynamic Active Learning based on agreement and applied to emotion recognition in spoken interactions},\n type = {inproceedings},\n year = {2015},\n keywords = {article,conference},\n pages = {275-278},\n websites = {http://dl.acm.org/citation.cfm?doid=2818346.2820774},\n publisher = {ACM Press},\n city = {New York, New York, USA},\n id = {b81a6392-0597-30a6-a8ba-10abc421d190},\n created = {2020-05-29T11:51:38.588Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:16:24.065Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhang2015dynamicinteractions},\n source_type = {inproceedings},\n folder_uuids = {aac08d0d-38e7-4f4e-a381-5271c5c099ce},\n private_publication = {false},\n abstract = {In this contribution, we propose a novel method for Active Learning (AL) - Dynamic Active Learning (DAL) - which targets the reduction of the costly human labelling work necessary for modelling subjective tasks such as emotion recognition in spoken interactions. The method implements an adaptive query strategy that minimises the amount of human labelling work by deciding for each instance whether it should automatically be labelled by machine or manually by human, as well as how many human annotators are required. Extensive experiments on standardised test-beds show that DAL significantly improves the efficiency of conventional AL. In particular, DAL achieves the same classification accuracy obtained with AL with up to 79.17 % less human annotation effort.},\n bibtype = {inproceedings},\n author = {Zhang, Yue and Coutinho, Eduardo and Zhang, Zixing and Quan, Caijiao and Schuller, Björn},\n doi = {10.1145/2818346.2820774},\n booktitle = {ICMI 2015 - Proceedings of the 2015 ACM International Conference on Multimodal Interaction}\n}
\n
\n\n\n
\n In this contribution, we propose a novel method for Active Learning (AL) - Dynamic Active Learning (DAL) - which targets the reduction of the costly human labelling work necessary for modelling subjective tasks such as emotion recognition in spoken interactions. The method implements an adaptive query strategy that minimises the amount of human labelling work by deciding for each instance whether it should automatically be labelled by machine or manually by human, as well as how many human annotators are required. Extensive experiments on standardised test-beds show that DAL significantly improves the efficiency of conventional AL. In particular, DAL achieves the same classification accuracy obtained with AL with up to 79.17 % less human annotation effort.\n
\n\n\n
\n\n\n
\n \n\n \n \n Zhang, Z.; Coutinho, E.; Deng, J.; and Schuller, B.\n\n\n \n \n \n \n \n Cooperative learning and its application to emotion recognition from speech.\n \n \n \n \n\n\n \n\n\n\n IEEE/ACM Transactions on Audio Speech and Language Processing, 23(1): 115-126. 1 2015.\n \n\n\n\n
\n\n\n\n \n \n \"CooperativeWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Cooperative learning and its application to emotion recognition from speech},\n type = {article},\n year = {2015},\n keywords = {article,journal},\n pages = {115-126},\n volume = {23},\n websites = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000348070700011&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=f3ec48df247ee1138ccd8d3ba59bacc2 http://ieeexplore.ieee.org/document/6971},\n month = {1},\n id = {b9377466-8ba0-3199-97bb-ca60b014716a},\n created = {2020-05-29T11:51:38.695Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-06-08T17:31:31.997Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhang2015cooperativespeech},\n source_type = {JOUR},\n folder_uuids = {116db2f1-e6ac-4780-bccf-a977325250cd,99880aa7-55df-4b45-bfce-0ffc00b23ced},\n private_publication = {false},\n abstract = {In this paper, we propose a novel method for highly efficient exploitation of unlabeled data-Cooperative Learning. Our approach consists of combining Active Learning and Semi-Supervised Learning techniques, with the aim of reducing the costly effects of human annotation. The core underlying idea of Cooperative Learning is to share the labeling work between human and machine efficiently in such a way that instances predicted with insufficient confidence value are subject to human labeling, and those with high confidence values are machine labeled. We conducted various test runs on two emotion recognition tasks with a variable number of initial supervised training instances and two different feature sets. The results show that Cooperative Learning consistently outperforms individual Active and Semi-Supervised Learning techniques in all test cases. In particular, we show that our method based on the combination of Active Learning and Co-Training leads to the same performance of a model trained on the whole training set, but using 75% fewer labeled instances. Therefore, our method efficiently and robustly reduces the need for human annotations.},\n bibtype = {article},\n author = {Zhang, Zixing and Coutinho, Eduardo and Deng, Jun and Schuller, Björn},\n doi = {10.1109/TASLP.2014.2375558},\n journal = {IEEE/ACM Transactions on Audio Speech and Language Processing},\n number = {1}\n}
\n
\n\n\n
\n In this paper, we propose a novel method for highly efficient exploitation of unlabeled data-Cooperative Learning. Our approach consists of combining Active Learning and Semi-Supervised Learning techniques, with the aim of reducing the costly effects of human annotation. The core underlying idea of Cooperative Learning is to share the labeling work between human and machine efficiently in such a way that instances predicted with insufficient confidence value are subject to human labeling, and those with high confidence values are machine labeled. We conducted various test runs on two emotion recognition tasks with a variable number of initial supervised training instances and two different feature sets. The results show that Cooperative Learning consistently outperforms individual Active and Semi-Supervised Learning techniques in all test cases. In particular, we show that our method based on the combination of Active Learning and Co-Training leads to the same performance of a model trained on the whole training set, but using 75% fewer labeled instances. Therefore, our method efficiently and robustly reduces the need for human annotations.\n
\n\n\n
\n\n\n
\n \n\n \n \n Eyben, F.; Coutinho, E.; and Schuller, B., W.\n\n\n \n \n \n \n Automatic recognition of emotional dimensions in singing.\n \n \n \n\n\n \n\n\n\n In Proceedings of the 4th International Conference on Music and Emotion (ICME’4), pages 1, 2015. University of Geneva\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Automatic recognition of emotional dimensions in singing},\n type = {inproceedings},\n year = {2015},\n keywords = {abstract,conference},\n pages = {1},\n publisher = {University of Geneva},\n city = {Geneva, Switzerland},\n institution = {University of Geneva},\n id = {03abea09-2867-3c3f-a387-224018afece2},\n created = {2020-05-29T11:51:38.859Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-06-08T17:36:24.317Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {eyben2015automaticsinging},\n source_type = {inproceedings},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Eyben, F and Coutinho, E and Schuller, Björn W.},\n booktitle = {Proceedings of the 4th International Conference on Music and Emotion (ICME’4)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Zhang, Y.; Coutinho, E.; Zhang, Z.; Quan, C.; and Schuller, B.\n\n\n \n \n \n \n Agreement-Based Dynamic Active Learning with Least and Medium Certainty Query Strategy.\n \n \n \n\n\n \n\n\n\n In Krishnamurthy, A.; Ramdas, A.; Balcan, N.; and Singh, A., editor(s), Proceedings of the 32nd International Conference on Machine Learning (ICML 2015), pages 1-5, 2015. International Machine Learning Society (IMLS)\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Agreement-Based Dynamic Active Learning with Least and Medium Certainty Query Strategy},\n type = {inproceedings},\n year = {2015},\n keywords = {article,conference},\n pages = {1-5},\n publisher = {International Machine Learning Society (IMLS)},\n city = {Lille, France},\n id = {ca15255b-57b8-3b51-9a22-035b3d36b816},\n created = {2020-05-29T11:51:38.863Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:16:56.561Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhang2015agreementbasedstrategy},\n source_type = {inproceedings},\n folder_uuids = {aac08d0d-38e7-4f4e-a381-5271c5c099ce},\n private_publication = {false},\n abstract = {In this contribution, we propose a novel method for active learning termed ‘dynamic active learn- ing’ or DAL for short, with the aim of ultimately reducing the costly human labelling work for sub- jective tasks such as speech emotion recognition. Through an adaptive query strategy, the amount of manual labelling work is minimised by deciding for each instance not only whether or not it should be annotated, but also dynamically on how many human annotators’ opinions are needed. Through extensive experiments on standardised test-beds, we show that DAL achieves the same classifica- tion accuracy of ‘traditional’ AL with a cost re- duction of up to 79.17%. Thus, the DAL method significantly improves the efficiency of existing al- gorithms, setting a new benchmark for the utmost exploitation of unlabelled data.},\n bibtype = {inproceedings},\n author = {Zhang, Yue and Coutinho, Eduardo and Zhang, Zixing and Quan, Caijiao and Schuller, Björn},\n editor = {Krishnamurthy, A and Ramdas, A and Balcan, N and Singh, A},\n booktitle = {Proceedings of the 32nd International Conference on Machine Learning (ICML 2015)}\n}
\n
\n\n\n
\n In this contribution, we propose a novel method for active learning termed ‘dynamic active learn- ing’ or DAL for short, with the aim of ultimately reducing the costly human labelling work for sub- jective tasks such as speech emotion recognition. Through an adaptive query strategy, the amount of manual labelling work is minimised by deciding for each instance not only whether or not it should be annotated, but also dynamically on how many human annotators’ opinions are needed. Through extensive experiments on standardised test-beds, we show that DAL achieves the same classifica- tion accuracy of ‘traditional’ AL with a cost re- duction of up to 79.17%. Thus, the DAL method significantly improves the efficiency of existing al- gorithms, setting a new benchmark for the utmost exploitation of unlabelled data.\n
\n\n\n
\n\n\n
\n \n\n \n \n Gentsch, K.; Coutinho, E.; Eyben, F.; Schuller, B., W.; and Scherer, K., R.\n\n\n \n \n \n \n Classifying Emotion-Antecedent Appraisal in Brain Activity Using Machine Learning Methods.\n \n \n \n\n\n \n\n\n\n In Proceedings of the International Society for Research on Emotions Conference (ISRE 2015), pages 1, 2015. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Classifying Emotion-Antecedent Appraisal in Brain Activity Using Machine Learning Methods},\n type = {inproceedings},\n year = {2015},\n keywords = {Conference,abstract},\n pages = {1},\n city = {Geneva, Switzerland},\n id = {283b5af8-4929-3a4e-aff8-298ed0c6612a},\n created = {2020-05-29T11:51:38.924Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:16:59.306Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {gentsch2015classifyingmethods},\n source_type = {inproceedings},\n notes = {1 page},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Gentsch, Kornelia and Coutinho, Eduardo and Eyben, Florian and Schuller, Björn W. and Scherer, Klaus R.},\n booktitle = {Proceedings of the International Society for Research on Emotions Conference (ISRE 2015)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Sagha, H.; Coutinho, E.; and Schuller, B.\n\n\n \n \n \n \n \n Exploring the importance of individual differences to the automatic estimation of emotions induced by music.\n \n \n \n \n\n\n \n\n\n\n In AVEC 2015 - Proceedings of the 5th International Workshop on Audio/Visual Emotion Challenge, co-Located with MM 2015, pages 57-63, 10 2015. ACM Press\n \n\n\n\n
\n\n\n\n \n \n \"ExploringWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Exploring the importance of individual differences to the automatic estimation of emotions induced by music},\n type = {inproceedings},\n year = {2015},\n keywords = {abstract,conference},\n pages = {57-63},\n websites = {http://dl.acm.org/citation.cfm?doid=2808196.2811643},\n month = {10},\n publisher = {ACM Press},\n city = {New York, New York, USA},\n id = {497e5bf2-b45b-3385-bf8f-85674f8d836e},\n created = {2020-05-29T11:51:38.946Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:16:58.644Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {sagha2015exploringmusic},\n source_type = {inproceedings},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n abstract = {The goal of this study was to evaluate the impact of the inclusion of listener-related factors (individual differences) on the prediction of music induced affect. A group of 24 subjects listened to a set of music excerpts previously demonstrated to express specific emotional characteristics (in terms of Arousal and Valence), and we collected information related to listeners' stable (personality, emotional intelligence, attentiveness, music preferences) and transient (mood, and physiological activity) states. Through a series of regression analysis we identified those factors which have a significant explanatory power over the affective states induced in the listeners. Our results show that incorporating information related to individual differences permits to identify more accurately the affective states induced in the listeners, which differ from those expressed by the music.},\n bibtype = {inproceedings},\n author = {Sagha, Hesam and Coutinho, Eduardo and Schuller, Björn},\n doi = {10.1145/2808196.2811643},\n booktitle = {AVEC 2015 - Proceedings of the 5th International Workshop on Audio/Visual Emotion Challenge, co-Located with MM 2015}\n}
\n
\n\n\n
\n The goal of this study was to evaluate the impact of the inclusion of listener-related factors (individual differences) on the prediction of music induced affect. A group of 24 subjects listened to a set of music excerpts previously demonstrated to express specific emotional characteristics (in terms of Arousal and Valence), and we collected information related to listeners' stable (personality, emotional intelligence, attentiveness, music preferences) and transient (mood, and physiological activity) states. Through a series of regression analysis we identified those factors which have a significant explanatory power over the affective states induced in the listeners. Our results show that incorporating information related to individual differences permits to identify more accurately the affective states induced in the listeners, which differ from those expressed by the music.\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; and Lisser, A.\n\n\n \n \n \n \n A Comparison of Undergraduates Music Listening Habits in Everyday Life and While Studying.\n \n \n \n\n\n \n\n\n\n In Proceedings of the 4th International Conference on Music and Emotion (ICME’4), pages 1, 2015. University of Geneva\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {A Comparison of Undergraduates Music Listening Habits in Everyday Life and While Studying},\n type = {inproceedings},\n year = {2015},\n keywords = {abstract,conference},\n pages = {1},\n publisher = {University of Geneva},\n city = {Geneva, Switzerland},\n institution = {University of Geneva},\n id = {194a8cd6-1a6b-3ba9-85f9-56ba013a761e},\n created = {2020-05-29T11:51:39.049Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:16:39.046Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2015astudying},\n source_type = {inproceedings},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Coutinho, E and Lisser, A},\n booktitle = {Proceedings of the 4th International Conference on Music and Emotion (ICME’4)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Perlovsky, L.; Coutinho, E.; and Wilkins, R., W.,\n editors.\n \n\n\n \n \n \n \n \n Music Cognition [Research Topic].\n \n \n \n \n\n\n \n\n\n\n Volume 7 . Frontiers in Psychology. Perlovsky, L.; Coutinho, E.; and Wilkins, R., W., editor(s). Frontiers Media, 2015.\n \n\n\n\n
\n\n\n\n \n \n \"FrontiersWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2015},\n keywords = {edition,journal},\n volume = {7},\n websites = {http://journal.frontiersin.org/researchtopic/3083/music-cognition},\n publisher = {Frontiers Media},\n id = {4d1fb4b4-b3ab-3b47-a92a-8b652b80d484},\n created = {2020-05-29T11:51:39.262Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:16:25.432Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {perlovsky2016frontierspsychology},\n source_type = {CHAP},\n folder_uuids = {22cecb78-7b06-4839-9375-3735f65ff563},\n private_publication = {false},\n bibtype = {inbook},\n author = {},\n editor = {Perlovsky, L and Coutinho, E and Wilkins, R W},\n chapter = {Music Cognition [Research Topic]},\n title = {Frontiers in Psychology}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.\n\n\n \n \n \n \n Predicting Musical Emotions From Low-level Acoustics and Physiological Measurements: Music and Speech.\n \n \n \n\n\n \n\n\n\n In Proceedings of the 4th International Conference on Music and Emotion (ICME’4), pages 1, 2015. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Predicting Musical Emotions From Low-level Acoustics and Physiological Measurements: Music and Speech},\n type = {inproceedings},\n year = {2015},\n keywords = {abstract,conference},\n pages = {1},\n city = {Geneva, Switzerland},\n id = {aef43df1-84ed-355c-bcfd-8ffd1b005903},\n created = {2020-05-29T11:51:39.350Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:16:32.978Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2015predictingspeech},\n source_type = {inproceedings},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Coutinho, E},\n booktitle = {Proceedings of the 4th International Conference on Music and Emotion (ICME’4)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Trigeorgis, G.; Coutinho, E.; Ringeval, F.; Marchi, E.; Zafeiriou, S.; and Schuller, B.\n\n\n \n \n \n \n The ICL-TUM-PASSAU approach for the MediaEval 2015 \"affective impact of movies\" task.\n \n \n \n\n\n \n\n\n\n In Larson, M.; Ionescu, B.; Sjöberg, M.; Anguera, X.; Poignant, J.; Riegler, M.; Eskevich, M.; Hauff, C.; Sutcliffe, R.; Jones, G., J.; Yang, Y.; Soleymani, M.; and Papadopoulos, S., editor(s), CEUR Workshop Proceedings, volume 1436, 1 2015. CEUR\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {The ICL-TUM-PASSAU approach for the MediaEval 2015 "affective impact of movies" task},\n type = {inproceedings},\n year = {2015},\n volume = {1436},\n month = {1},\n publisher = {CEUR},\n city = {Wurzen, Germany},\n id = {b264e3e7-0921-3c0c-bd3c-e33747025430},\n created = {2020-05-30T14:51:24.442Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:17:37.690Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {trigeorgis2015},\n source_type = {inproceedings},\n folder_uuids = {aac08d0d-38e7-4f4e-a381-5271c5c099ce},\n private_publication = {false},\n abstract = {In this paper we describe the Imperial College London, Technische Universitat München and University of Passau (ICL+TUM+PASSAU) team approach to the MediaEval's "Affective Impact of Movies" challenge, which consists in the automatic detection of affective (arousal and valence) and violent content in movie excerpts. In addition to the baseline features, we computed spectral and energy related acoustic features, and the probability of various objects being present in the video. Random Forests, AdaBoost and Support Vector Machines were used as classification methods. Best results show that the dataset is highly challenging for both affect and violence detection tasks, mainly because of issues in inter-rater agreement and data scarcity.},\n bibtype = {inproceedings},\n author = {Trigeorgis, George and Coutinho, Eduardo and Ringeval, Fabien and Marchi, Erik and Zafeiriou, Stefanos and Schuller, Björn},\n editor = {Larson, Martha and Ionescu, Bogdan and Sjöberg, Mats and Anguera, Xavier and Poignant, Johann and Riegler, Michael and Eskevich, Maria and Hauff, Claudia and Sutcliffe, Richard and Jones, Gareth J.F. and Yang, Yi-Hsuan and Soleymani, Mohammad and Papadopoulos, Symeon},\n booktitle = {CEUR Workshop Proceedings},\n keywords = {article,conference}\n}
\n
\n\n\n
\n In this paper we describe the Imperial College London, Technische Universitat München and University of Passau (ICL+TUM+PASSAU) team approach to the MediaEval's \"Affective Impact of Movies\" challenge, which consists in the automatic detection of affective (arousal and valence) and violent content in movie excerpts. In addition to the baseline features, we computed spectral and energy related acoustic features, and the probability of various objects being present in the video. Random Forests, AdaBoost and Support Vector Machines were used as classification methods. Best results show that the dataset is highly challenging for both affect and violence detection tasks, mainly because of issues in inter-rater agreement and data scarcity.\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; Trigeorgis, G.; Zafeiriou, S.; and Schuller, B.\n\n\n \n \n \n \n \n Automatically estimating emotion in music with deep long-short term memory recurrent neural networks.\n \n \n \n \n\n\n \n\n\n\n In Larson, M.; Ionescu, B.; Sjöberg, M.; Anguera, X.; Poignant, J.; Riegler, M.; Eskevich, M.; Hauff, C.; Sutcliffe, R.; Jones, G., J.; Yang, Y.; Soleymani, M.; and Papadopoulos, S., editor(s), CEUR Workshop Proceedings, volume 1436, pages 1-3, 9 2015. CEUR\n \n\n\n\n
\n\n\n\n \n \n \"AutomaticallyWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Automatically estimating emotion in music with deep long-short term memory recurrent neural networks},\n type = {inproceedings},\n year = {2015},\n pages = {1-3},\n volume = {1436},\n websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84989923198&partnerID=40&md5=f0d407c8ffb96c19f299c78435558371,http://ceur-ws.org/Vol-1436/Paper64.pdf},\n month = {9},\n publisher = {CEUR},\n city = {Wurzen, Germany},\n id = {dfa7431b-f2ff-3228-adca-a768df6d0581},\n created = {2020-05-30T17:34:32.702Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2021-06-17T15:16:03.160Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2015automaticallynetworks},\n source_type = {inproceedings},\n notes = {<b>From Duplicate 1 (<i>Automatically estimating emotion in music with deep long-short term memory recurrent neural networks</i> - Coutinho, Eduardo; Trigeorgis, George; Zafeiriou, Stefanos; Schuller, Björn)<br/></b><br/><b>From Duplicate 1 (<i>Automatically estimating emotion in music with deep long-short term memory recurrent neural networks</i> - Coutinho, Eduardo; Trigeorgis, George; Zafeiriou, Stefanos; Schuller, Björn)<br/></b><br/>cited By 7<br/><br/><b>From Duplicate 2 (<i>Automatically estimating emotion in music with deep long-short term memory recurrent neural networks</i> - Coutinho, Eduardo; Trigeorgis, George; Zafeiriou, Stefanos; Schuller, Björn)<br/></b><br/><b>From Duplicate 1 (<i>Automatically estimating emotion in music with deep long-short term memory recurrent neural networks</i> - Coutinho, E; Trigeorgis, G; Zafeiriou, S; Schuller, B)<br/></b><br/>cited By 7<br/><br/><b>From Duplicate 2 (<i>Automatically estimating emotion in music with deep long-short term memory recurrent neural networks</i> - Coutinho, Eduardo; Trigeorgis, George; Zafeiriou, Stefanos; Schuller, Björn)<br/></b><br/><b>From Duplicate 1 (<i>Automatically estimating emotion in music with deep long-short term memory recurrent neural networks</i> - Coutinho, E; Trigeorgis, G; Zafeiriou, S; Schuller, B)<br/></b><br/>cited By 7},\n folder_uuids = {aac08d0d-38e7-4f4e-a381-5271c5c099ce},\n private_publication = {false},\n abstract = {In this paper we describe our approach for the MediaEval's "Emotion in Music" task. Our method consists of deep Long-Short Term Memory Recurrent Neural Networks (LSTM-RNN) for dynamic Arousal and Valence regression, using acoustic and psychoacoustic features extracted from the songs that have been previously proven as effective for emotion prediction in music. Results on the challenge test demonstrate an excellent performance for Arousal estimation (r = 0.613 ± 0.278), but not for Valence (r = 0.026 ± 0.500). Issues regarding the quality of the test set annotations' reliability and distributions are indicated as plausible justifications for these results. By using a subset of the development set that was left out for performance estimation, we could determine that the performance of our approach may be underestimated for Valence (Arousal: r = 0.596 ± 0.386; Valence: r = 0.458 ± 0.551).},\n bibtype = {inproceedings},\n author = {Coutinho, Eduardo and Trigeorgis, George and Zafeiriou, Stefanos and Schuller, Björn},\n editor = {Larson, Martha and Ionescu, Bogdan and Sjöberg, Mats and Anguera, Xavier and Poignant, Johann and Riegler, Michael and Eskevich, Maria and Hauff, Claudia and Sutcliffe, Richard and Jones, Gareth J.F. and Yang, Yi-Hsuan and Soleymani, Mohammad and Papadopoulos, Symeon},\n booktitle = {CEUR Workshop Proceedings},\n keywords = {article,conference}\n}
\n
\n\n\n
\n In this paper we describe our approach for the MediaEval's \"Emotion in Music\" task. Our method consists of deep Long-Short Term Memory Recurrent Neural Networks (LSTM-RNN) for dynamic Arousal and Valence regression, using acoustic and psychoacoustic features extracted from the songs that have been previously proven as effective for emotion prediction in music. Results on the challenge test demonstrate an excellent performance for Arousal estimation (r = 0.613 ± 0.278), but not for Valence (r = 0.026 ± 0.500). Issues regarding the quality of the test set annotations' reliability and distributions are indicated as plausible justifications for these results. By using a subset of the development set that was left out for performance estimation, we could determine that the performance of our approach may be underestimated for Valence (Arousal: r = 0.596 ± 0.386; Valence: r = 0.458 ± 0.551).\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n Coutinho, E.; Weninger, F.; Schuller, B.; and Scherer, K., R.\n\n\n \n \n \n \n \n The munich LSTM-RNN approach to the MediaEval 2014 \"Emotion in Music\" Task.\n \n \n \n \n\n\n \n\n\n\n In CEUR Workshop Proceedings, volume 1263, 2014. \n \n\n\n\n
\n\n\n\n \n \n \"TheWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {The munich LSTM-RNN approach to the MediaEval 2014 "Emotion in Music" Task},\n type = {inproceedings},\n year = {2014},\n volume = {1263},\n websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84909953530&partnerID=40&md5=96b8657503c52119dd83867ccbdc3264},\n id = {87114ac2-214b-3813-b485-ba0f7153d1fd},\n created = {2020-05-27T15:09:41.796Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2021-06-18T09:04:34.619Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Coutinho2014a},\n source_type = {CONF},\n notes = {cited By 11},\n private_publication = {false},\n abstract = {In this paper we describe TUM's approach for the MediaEval's \\Emotion in Music" task. The goal of this task is to automatically estimate the emotions expressed by music (in terms of Arousal and Valence) in a time-continuous fashion. Our system consists of Long-Short Term Memory Recurrent Neural Networks (LSTM-RNN) for dynamic Arousal and Valence regression. We used two di erent sets of acoustic and psychoacoustic features that have been previously proven as e ective for emotion prediction in music and speech. The best model yielded an average Pearson's correlation coe-cient of 0.354 (Arousal) and 0.198 (Valence), and an average Root Mean Squared Error of 0.102 (Arousal) and 0.079 (Valence).},\n bibtype = {inproceedings},\n author = {Coutinho, Eduardo and Weninger, Felix and Schuller, Björn and Scherer, Klaus R.},\n booktitle = {CEUR Workshop Proceedings}\n}
\n
\n\n\n
\n In this paper we describe TUM's approach for the MediaEval's \\Emotion in Music\" task. The goal of this task is to automatically estimate the emotions expressed by music (in terms of Arousal and Valence) in a time-continuous fashion. Our system consists of Long-Short Term Memory Recurrent Neural Networks (LSTM-RNN) for dynamic Arousal and Valence regression. We used two di erent sets of acoustic and psychoacoustic features that have been previously proven as e ective for emotion prediction in music and speech. The best model yielded an average Pearson's correlation coe-cient of 0.354 (Arousal) and 0.198 (Valence), and an average Root Mean Squared Error of 0.102 (Arousal) and 0.079 (Valence).\n
\n\n\n
\n\n\n
\n \n\n \n \n Spitzer, M.; and Coutinho, E.\n\n\n \n \n \n \n \n The effects of expert musical training on the perception of emotions in Bach’s Sonata for Unaccompanied Violin No. 1 in G Minor (BWV 1001).\n \n \n \n \n\n\n \n\n\n\n Psychomusicology: Music, Mind, and Brain, 24(1): 35-57. 2014.\n \n\n\n\n
\n\n\n\n \n \n \"TheWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {The effects of expert musical training on the perception of emotions in Bach’s Sonata for Unaccompanied Violin No. 1 in G Minor (BWV 1001).},\n type = {article},\n year = {2014},\n keywords = {article,journal},\n pages = {35-57},\n volume = {24},\n websites = {http://doi.apa.org/getdoi.cfm?doi=10.1037/pmu0000036},\n id = {441486d4-3671-36e5-be8e-b13c2909e431},\n created = {2020-05-29T11:51:38.590Z},\n accessed = {2014-04-22},\n file_attached = {false},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-06-08T17:31:54.615Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {spitzer2014the1001},\n source_type = {article},\n folder_uuids = {116db2f1-e6ac-4780-bccf-a977325250cd,99880aa7-55df-4b45-bfce-0ffc00b23ced,ba331422-1c6a-4d1c-a682-9282f58b3182},\n private_publication = {false},\n abstract = {: The focus of this article is the relationship between musical emotion, as expressed by the composer and perceived by the listener, and the structural features of a work of art-music. First, we analyzed a work by J. S. Bach for solo violin (Sonata for Unaccompanied Violin, No. 1 in G Minor, BWV 1001), from the standpoint of how its structural features were associated with the expression of different emotional categories from the perspective of the composer and through the eyes and ears of the music analyst. We then constructed 2 empirical experiments to test whether contemporary listeners could identify the same emotions identified by the analysis, targeted at 2 groups of subjects: relatively inexperienced popular music students; and musicians, composers, and music academics (including some of the world's leading Bach scholars). Our results suggest that-emotional attributions by low-level experts are led by surface acoustic features; those by high-expert listeners are led by both acoustic and formal features; that this applied much more to the emotions of Sadness and Tenderness rather than to Anger or Fear; and that despite the common confusion between Anger and Fear in real life, listeners were capable of differentiating these emotions in the music supporting analytical findings in the score., (C) 2014 by the American Psychological Association},\n bibtype = {article},\n author = {Spitzer, Michael and Coutinho, Eduardo},\n doi = {10.1037/pmu0000036},\n journal = {Psychomusicology: Music, Mind, and Brain},\n number = {1}\n}
\n
\n\n\n
\n : The focus of this article is the relationship between musical emotion, as expressed by the composer and perceived by the listener, and the structural features of a work of art-music. First, we analyzed a work by J. S. Bach for solo violin (Sonata for Unaccompanied Violin, No. 1 in G Minor, BWV 1001), from the standpoint of how its structural features were associated with the expression of different emotional categories from the perspective of the composer and through the eyes and ears of the music analyst. We then constructed 2 empirical experiments to test whether contemporary listeners could identify the same emotions identified by the analysis, targeted at 2 groups of subjects: relatively inexperienced popular music students; and musicians, composers, and music academics (including some of the world's leading Bach scholars). Our results suggest that-emotional attributions by low-level experts are led by surface acoustic features; those by high-expert listeners are led by both acoustic and formal features; that this applied much more to the emotions of Sadness and Tenderness rather than to Anger or Fear; and that despite the common confusion between Anger and Fear in real life, listeners were capable of differentiating these emotions in the music supporting analytical findings in the score., (C) 2014 by the American Psychological Association\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; and Timmers, R.,\n editors.\n \n\n\n \n \n \n \n Special Issue on Interactions Between Emotion and Cognition in Music.\n \n \n \n\n\n \n\n\n\n Psychomusicology: a journal of research in music cognition, 24(1): 1-115. 3 2014.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Special Issue on Interactions Between Emotion and Cognition in Music},\n type = {article},\n year = {2014},\n keywords = {edition,journal},\n pages = {1-115},\n volume = {24},\n month = {3},\n publisher = {American Psychological Association},\n id = {f0842552-3d88-343e-b5f0-2d58bf18663a},\n created = {2020-05-29T11:51:38.797Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:17:11.103Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2014psychomusicologycognition},\n source_type = {CHAP},\n folder_uuids = {22cecb78-7b06-4839-9375-3735f65ff563},\n private_publication = {false},\n abstract = {Welcome to this special issue on interactions between emotion and cognition in music. The combined investigation of emotion and cognition and their interactions has been prominent in general psychology for some years. Nonetheless, we felt that within music research, this area of research has received less systematic attention. With a call for papers related to this theme, our intention was to stimulate more research activity in this area and to develop further our understanding of the ways in which emotions influence music cognition, as well as the ways in which our cognitive appraisal of events, persons and/or objects influence music related emotions. The very good response to our call from authors from a range of countries demonstrated that the theme taps into current developments in music research. A collection of submissions can be found in this issue, while publication of several others can be expected in upcoming issues. From the submissions to this special issue, it became clear that the topic has many ramifications and can be addressed from various perspectives. Together the submissions deepen and broaden our understanding of ways in which cognition and emotion are intertwined in the context of music. The first two articles investigate emotional associations of tonal modulation (Korsakova-Kreyn and Dowling) and emotional associations of musical sequences presented in different tonal modes (Straehley and Loebach), exploring the influence of our tonal knowledge on emotion perception in music. The influence of musical knowledge is also central to the paper by Spitzer and Coutinho who combine music analysis and psychological methods to compare the perception of emotion in Bach’s violin solo sonatas in highly trained experts and regular music connoisseurs. These laboratory based studies on perception of emotion are complemented by an exploration of listeners’ emotional experiences in ecological contexts: Balteş and Miu provide evidence on the importance of empathy and imagery mechanisms for the experience of musical emotions. Using a large-scale online questionnaire, Perdomo-Guevera investigates emotions experienced during music performance and highlights different profiles in performers depending on the context of emotional peak experiences which occur during practice, performance, and/or daily life. Taking an exploratory neuroscientific approach, Leslei, Ojeda and Makeig examine the behavior and brain dynamics related to musical engagement, and demonstrate that musical feelings can be effectively communicated through rhythmic gestures. From a theoretical perspective, Habibi and Damasio explore the link between music and feelings, and suggest the existence of a close tie between music and basic processes of life regulations, which have an impact at the individual level but are also significant promoters of socio-cultural organization. They consider that such a link is responsible for the pervasiveness of music-related experiences and activities. In a short report, Dean and Bailes use time series analysis to show that music-analytical large-scale segmentation can be discriminated in non-musicians' continuous perception of change in music, and suggest ways in which musical structure and agency (such as soloist vs. orchestra, singer vs. accompaniment) may influence those perceived changes. The special issue is complemented by a short report on an international summer school held in Sheffield during 2013, communicating new developments and ongoing activities in this psychology of music hub in the UK. Finally, we are pleased to highlight recently completed doctoral theses that directly or indirectly are related to the topic of the special issue, and which further illustrate the volume and breadth of current research efforts in this area and suggest that the future of the field is in good hands. We hope that the issue will foster further interest and research in this area and show that there is considerable scope for follow up issues on related topics. In particular, influences of emotional responses on the perception and cognition of music is an issue still mostly unexplored. Moreover, current models of emotion and cognition in music are still too often segregated. Renee Timmers & Eduardo Coutinho},\n bibtype = {article},\n author = {},\n editor = {Coutinho, E and Timmers, R},\n journal = {Psychomusicology: a journal of research in music cognition},\n number = {1}\n}
\n
\n\n\n
\n Welcome to this special issue on interactions between emotion and cognition in music. The combined investigation of emotion and cognition and their interactions has been prominent in general psychology for some years. Nonetheless, we felt that within music research, this area of research has received less systematic attention. With a call for papers related to this theme, our intention was to stimulate more research activity in this area and to develop further our understanding of the ways in which emotions influence music cognition, as well as the ways in which our cognitive appraisal of events, persons and/or objects influence music related emotions. The very good response to our call from authors from a range of countries demonstrated that the theme taps into current developments in music research. A collection of submissions can be found in this issue, while publication of several others can be expected in upcoming issues. From the submissions to this special issue, it became clear that the topic has many ramifications and can be addressed from various perspectives. Together the submissions deepen and broaden our understanding of ways in which cognition and emotion are intertwined in the context of music. The first two articles investigate emotional associations of tonal modulation (Korsakova-Kreyn and Dowling) and emotional associations of musical sequences presented in different tonal modes (Straehley and Loebach), exploring the influence of our tonal knowledge on emotion perception in music. The influence of musical knowledge is also central to the paper by Spitzer and Coutinho who combine music analysis and psychological methods to compare the perception of emotion in Bach’s violin solo sonatas in highly trained experts and regular music connoisseurs. These laboratory based studies on perception of emotion are complemented by an exploration of listeners’ emotional experiences in ecological contexts: Balteş and Miu provide evidence on the importance of empathy and imagery mechanisms for the experience of musical emotions. Using a large-scale online questionnaire, Perdomo-Guevera investigates emotions experienced during music performance and highlights different profiles in performers depending on the context of emotional peak experiences which occur during practice, performance, and/or daily life. Taking an exploratory neuroscientific approach, Leslei, Ojeda and Makeig examine the behavior and brain dynamics related to musical engagement, and demonstrate that musical feelings can be effectively communicated through rhythmic gestures. From a theoretical perspective, Habibi and Damasio explore the link between music and feelings, and suggest the existence of a close tie between music and basic processes of life regulations, which have an impact at the individual level but are also significant promoters of socio-cultural organization. They consider that such a link is responsible for the pervasiveness of music-related experiences and activities. In a short report, Dean and Bailes use time series analysis to show that music-analytical large-scale segmentation can be discriminated in non-musicians' continuous perception of change in music, and suggest ways in which musical structure and agency (such as soloist vs. orchestra, singer vs. accompaniment) may influence those perceived changes. The special issue is complemented by a short report on an international summer school held in Sheffield during 2013, communicating new developments and ongoing activities in this psychology of music hub in the UK. Finally, we are pleased to highlight recently completed doctoral theses that directly or indirectly are related to the topic of the special issue, and which further illustrate the volume and breadth of current research efforts in this area and suggest that the future of the field is in good hands. We hope that the issue will foster further interest and research in this area and show that there is considerable scope for follow up issues on related topics. In particular, influences of emotional responses on the perception and cognition of music is an issue still mostly unexplored. Moreover, current models of emotion and cognition in music are still too often segregated. Renee Timmers & Eduardo Coutinho\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; Deng, J.; and Schuller, B.\n\n\n \n \n \n \n \n Transfer learning emotion manifestation across music and speech.\n \n \n \n \n\n\n \n\n\n\n In Proceedings of the International Joint Conference on Neural Networks, pages 3592-3598, 7 2014. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"TransferWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Transfer learning emotion manifestation across music and speech},\n type = {inproceedings},\n year = {2014},\n keywords = {article,conference},\n pages = {3592-3598},\n websites = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6889814},\n month = {7},\n publisher = {IEEE},\n id = {dec34897-c5c4-365c-97fd-741efea93123},\n created = {2020-05-29T11:51:38.964Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.832Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2014transferspeech},\n source_type = {inproceedings},\n folder_uuids = {aac08d0d-38e7-4f4e-a381-5271c5c099ce},\n private_publication = {false},\n abstract = {In this article, we focus on time-continuous predictions of emotion in music and speech, and the transfer of learning from one domain to the other. First, we compare the use of Recurrent Neural Networks (RNN) with standard hidden units (Simple Recurrent Network SRN) and Long-Short Term Memory (LSTM) blocks for intra-domain acoustic emotion recognition. We show that LSTM networks outperform SRN, and we explain, in average, 74%/59% (music) and 42%/29% (speech) of the variance in Arousal/Valence. Next, we evaluate whether cross-domain predictions of emotion are a viable option for acoustic emotion recognition, and we test the use of Transfer Learning (TL) for feature space adaptation. In average, our models are able to explain 70%/43% (music) and 28%/ll% (speech) of the variance in Arousal/Valence. Overall, results indicate a good cross-domain generalization performance, particularly for the model trained on speech and tested on music without pre-encoding of the input features. To our best knowledge, this is the first demonstration of cross-modal time-continuous predictions of emotion in the acoustic domain.},\n bibtype = {inproceedings},\n author = {Coutinho, Eduardo and Deng, Jun and Schuller, Bjorn},\n doi = {10.1109/IJCNN.2014.6889814},\n booktitle = {Proceedings of the International Joint Conference on Neural Networks}\n}
\n
\n\n\n
\n In this article, we focus on time-continuous predictions of emotion in music and speech, and the transfer of learning from one domain to the other. First, we compare the use of Recurrent Neural Networks (RNN) with standard hidden units (Simple Recurrent Network SRN) and Long-Short Term Memory (LSTM) blocks for intra-domain acoustic emotion recognition. We show that LSTM networks outperform SRN, and we explain, in average, 74%/59% (music) and 42%/29% (speech) of the variance in Arousal/Valence. Next, we evaluate whether cross-domain predictions of emotion are a viable option for acoustic emotion recognition, and we test the use of Transfer Learning (TL) for feature space adaptation. In average, our models are able to explain 70%/43% (music) and 28%/ll% (speech) of the variance in Arousal/Valence. Overall, results indicate a good cross-domain generalization performance, particularly for the model trained on speech and tested on music without pre-encoding of the input features. To our best knowledge, this is the first demonstration of cross-modal time-continuous predictions of emotion in the acoustic domain.\n
\n\n\n
\n\n\n
\n \n\n \n \n Zhang, Z.; Coutinho, E.; Deng, J.; and Schuller, B.\n\n\n \n \n \n \n \n Distributing recognition in computational paralinguistics.\n \n \n \n \n\n\n \n\n\n\n IEEE Transactions on Affective Computing, 5(4): 406-417. 10 2014.\n \n\n\n\n
\n\n\n\n \n \n \"DistributingWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Distributing recognition in computational paralinguistics},\n type = {article},\n year = {2014},\n keywords = {article,journal},\n pages = {406-417},\n volume = {5},\n websites = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000346043900005&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=f3ec48df247ee1138ccd8d3ba59bacc2,http://ieeexplore.ieee.org/document/6906},\n month = {10},\n day = {1},\n id = {debe3fe6-0ca9-3a53-8af5-1c235385b724},\n created = {2020-05-29T11:51:39.067Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:16:40.774Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhang2014distributingparalinguistics},\n source_type = {article},\n folder_uuids = {116db2f1-e6ac-4780-bccf-a977325250cd,99880aa7-55df-4b45-bfce-0ffc00b23ced},\n private_publication = {false},\n abstract = {In this paper, we propose and evaluate a distributed system for multiple Computational Paralinguistics tasks in a client-server architecture. The client side deals with feature extraction, compression, and bit-stream formatting, while the server side performs the reverse process, plus model training, and classification. The proposed architecture favors large-scale data collection and continuous model updating, personal information protection, and transmission bandwidth optimization. In order to preliminarily investigate the feasibility and reliability of the proposed system, we focus on the trade-off between transmission bandwidth and recognition accuracy. We conduct large-scale evaluations of some key functions, namely, feature compression/decompression, model training and classification, on five common paralinguistic tasks related to emotion, intoxication, pathology, age and gender. We show that, for most tasks, with compression ratios up to 40 (bandwidth savings up to 97.5 percent), the recognition accuracies are very close to the baselines. Our results encourage future exploitation of the system proposed in this paper, and demonstrate that we are not far from the creation of robust distributed multi-task paralinguistic recognition systems which can be applied to a myriad of everyday life scenarios.},\n bibtype = {article},\n author = {Zhang, Zixing and Coutinho, Eduardo and Deng, Jun and Schuller, Björn},\n doi = {10.1109/TAFFC.2014.2359655},\n journal = {IEEE Transactions on Affective Computing},\n number = {4}\n}
\n
\n\n\n
\n In this paper, we propose and evaluate a distributed system for multiple Computational Paralinguistics tasks in a client-server architecture. The client side deals with feature extraction, compression, and bit-stream formatting, while the server side performs the reverse process, plus model training, and classification. The proposed architecture favors large-scale data collection and continuous model updating, personal information protection, and transmission bandwidth optimization. In order to preliminarily investigate the feasibility and reliability of the proposed system, we focus on the trade-off between transmission bandwidth and recognition accuracy. We conduct large-scale evaluations of some key functions, namely, feature compression/decompression, model training and classification, on five common paralinguistic tasks related to emotion, intoxication, pathology, age and gender. We show that, for most tasks, with compression ratios up to 40 (bandwidth savings up to 97.5 percent), the recognition accuracies are very close to the baselines. Our results encourage future exploitation of the system proposed in this paper, and demonstrate that we are not far from the creation of robust distributed multi-task paralinguistic recognition systems which can be applied to a myriad of everyday life scenarios.\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; and Scherer, K., R.\n\n\n \n \n \n \n Geneva Music Background Questionnaire (GEMUBAQ).\n \n \n \n\n\n \n\n\n\n Technical Report Swiss Center for Affective Sciences, 5 2014.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@techreport{\n title = {Geneva Music Background Questionnaire (GEMUBAQ)},\n type = {techreport},\n year = {2014},\n keywords = {report},\n month = {5},\n publisher = {Swiss Center for Affective Sciences},\n city = {Geneva, Switzerland},\n institution = {Swiss Center for Affective Sciences},\n id = {3f508823-4546-3f27-853e-5bf36958112f},\n created = {2020-05-29T11:51:39.233Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:16:32.320Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2014genevagemubaq},\n source_type = {RPRT},\n folder_uuids = {116db2f1-e6ac-4780-bccf-a977325250cd,8c7cc370-0269-4fc3-b738-15ea2990ae1e},\n private_publication = {false},\n abstract = {This report describes work carried out by the authors in preparation for the development of a Music Background Questionnaire intended to serve as a standard instrument to measure important background variables such as music activities, music abilities and attitudes, and frequent music listening experiences. In the course of development we decided to join forces with Tan-Chyuan Chin and Nikki Rickard who had been working on individual differences in music engagements. The result of the joint work is published as The MUSEBAQ: A Modular Tool for Measuring Multiple Dimensions of Music Engagement. The purpose of this brief report is to document the work performed in the context of the Music and Emotion project of the Swiss Center of Affective Sciences in 2014.},\n bibtype = {techreport},\n author = {Coutinho, E and Scherer, K R}\n}
\n
\n\n\n
\n This report describes work carried out by the authors in preparation for the development of a Music Background Questionnaire intended to serve as a standard instrument to measure important background variables such as music activities, music abilities and attitudes, and frequent music listening experiences. In the course of development we decided to join forces with Tan-Chyuan Chin and Nikki Rickard who had been working on individual differences in music engagements. The result of the joint work is published as The MUSEBAQ: A Modular Tool for Measuring Multiple Dimensions of Music Engagement. The purpose of this brief report is to document the work performed in the context of the Music and Emotion project of the Swiss Center of Affective Sciences in 2014.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n Coutinho, E.; Fantini, B.; and Scherer, K., R.\n\n\n \n \n \n \n \n Music, Voice and Emotion (Special Issue).\n \n \n \n \n\n\n \n\n\n\n International Journal of Interdisciplinary Music Studies, 7(1&2). 2013.\n \n\n\n\n
\n\n\n\n \n \n \"Music,Website\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Music, Voice and Emotion (Special Issue)},\n type = {article},\n year = {2013},\n volume = {7},\n websites = {http://musicstudies.org/all-issues/volume-7-2013/},\n id = {e29bacd0-33ff-38ab-a3cd-edff9b1bc43f},\n created = {2018-12-10T11:57:57.002Z},\n file_attached = {false},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2021-09-17T10:31:01.908Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Coutinho2013},\n private_publication = {false},\n bibtype = {article},\n author = {Coutinho, Eduardo and Fantini, Bernardino and Scherer, Klaus R.},\n journal = {International Journal of Interdisciplinary Music Studies},\n number = {1&2}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Dibben, N., J.; and Coutinho, E.\n\n\n \n \n \n \n The influence of individual differences on emotion perception in music and speech prosody.\n \n \n \n\n\n \n\n\n\n In Proceedings of the 3rd International Conference on Music and Emotion (ICME’3), Jyväskylä, Finland, June 11-15, pages 1, 2013. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {The influence of individual differences on emotion perception in music and speech prosody},\n type = {inproceedings},\n year = {2013},\n pages = {1},\n id = {b3384201-e12a-3046-835d-e48fb480ff8e},\n created = {2019-09-18T08:06:23.629Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-29T12:00:31.731Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {dibben2013individualprosody},\n source_type = {inproceedings},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n abstract = {The study reported here investigates the influence of individual differences on perception of emotion in music and speech: specifically we tested the mediating influence of personality, emotional intelligence, gender and musical training. A behavioural study collected two main types of data: continuous ratings of emotion perceived while listening to extracts of music and speech, using a computer interface which modelled emotion on two dimensions (arousal and valence), and demographic information including measures of personality (TIPI) and emotional intelligence (TEIQue-SF). We employed the novel statistical method of functional analysis of variance on the time series data which revealed a small number of statistically significant differences according to gender, emotional intelligence, emotional stability and musical training. This is the first time to our knowledge that effects of individual differences on continuous measures of emotion have been tested. \\n},\n bibtype = {inproceedings},\n author = {Dibben, N J and Coutinho, E},\n booktitle = {Proceedings of the 3rd International Conference on Music and Emotion (ICME’3), Jyväskylä, Finland, June 11-15},\n keywords = {abstract,conference}\n}
\n
\n\n\n
\n The study reported here investigates the influence of individual differences on perception of emotion in music and speech: specifically we tested the mediating influence of personality, emotional intelligence, gender and musical training. A behavioural study collected two main types of data: continuous ratings of emotion perceived while listening to extracts of music and speech, using a computer interface which modelled emotion on two dimensions (arousal and valence), and demographic information including measures of personality (TIPI) and emotional intelligence (TEIQue-SF). We employed the novel statistical method of functional analysis of variance on the time series data which revealed a small number of statistically significant differences according to gender, emotional intelligence, emotional stability and musical training. This is the first time to our knowledge that effects of individual differences on continuous measures of emotion have been tested. \\n\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; and Dibben, N.\n\n\n \n \n \n \n \n Psychoacoustic cues to emotion in speech prosody and music.\n \n \n \n \n\n\n \n\n\n\n Cognition and Emotion, 27(4): 658-684. 6 2013.\n \n\n\n\n
\n\n\n\n \n \n \"PsychoacousticWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Psychoacoustic cues to emotion in speech prosody and music},\n type = {article},\n year = {2013},\n keywords = {article,journal},\n pages = {658-684},\n volume = {27},\n websites = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2%5C&SrcApp=PARTNER_APP%5C&SrcAuth=LinksAMR%5C&KeyUT=WOS:000319106500006%5C&DestLinkType=FullRecord%5C&DestApp=ALL_WOS%5C&UsrCustomerID=f3ec48df247ee1138ccd8d3ba59bacc2},\n month = {6},\n id = {2a5a8467-79ff-3480-9ca8-9521b2d9c322},\n created = {2020-05-29T11:51:38.754Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.967Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2013psychoacousticmusic},\n source_type = {article},\n folder_uuids = {99880aa7-55df-4b45-bfce-0ffc00b23ced},\n private_publication = {false},\n abstract = {There is strong evidence of shared acoustic profiles common to the expression of emotions in music and speech, yet relatively limited understanding of the specific psychoacoustic features involved. This study combined a controlled experiment and computational modelling to investigate the perceptual codes associated with the expression of emotion in the acoustic domain. The empirical stage of the study provided continuous human ratings of emotions perceived in excerpts of film music and natural speech samples. The computational stage created a computer model that retrieves the relevant information from the acoustic stimuli and makes predictions about the emotional expressiveness of speech and music close to the responses of human subjects. We show that a significant part of the listeners' second-by-second reported emotions to music and speech prosody can be predicted from a set of seven psychoacoustic features: loudness, tempo/speech rate, melody/prosody contour, spectral centroid, spectral flux, sharpness, and roughness. The implications of these results are discussed in the context of cross-modal similarities in the communication of emotion in the acoustic domain. © 2013 Copyright Taylor and Francis Group, LLC.},\n bibtype = {article},\n author = {Coutinho, Eduardo and Dibben, Nicola},\n doi = {10.1080/02699931.2012.732559},\n journal = {Cognition and Emotion},\n number = {4}\n}
\n
\n\n\n
\n There is strong evidence of shared acoustic profiles common to the expression of emotions in music and speech, yet relatively limited understanding of the specific psychoacoustic features involved. This study combined a controlled experiment and computational modelling to investigate the perceptual codes associated with the expression of emotion in the acoustic domain. The empirical stage of the study provided continuous human ratings of emotions perceived in excerpts of film music and natural speech samples. The computational stage created a computer model that retrieves the relevant information from the acoustic stimuli and makes predictions about the emotional expressiveness of speech and music close to the responses of human subjects. We show that a significant part of the listeners' second-by-second reported emotions to music and speech prosody can be predicted from a set of seven psychoacoustic features: loudness, tempo/speech rate, melody/prosody contour, spectral centroid, spectral flux, sharpness, and roughness. The implications of these results are discussed in the context of cross-modal similarities in the communication of emotion in the acoustic domain. © 2013 Copyright Taylor and Francis Group, LLC.\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; Fantini, B.; and Scherer, K., R.\n\n\n \n \n \n \n \n Special Issue on Music, Voice and Emotion.\n \n \n \n \n\n\n \n\n\n\n Volume 7 . International Journal of Interdisciplinary Music Studies. Coutinho, E.; Fantini, B.; and Scherer, K., R., editor(s). 2013.\n \n\n\n\n
\n\n\n\n \n \n \"InternationalWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2013},\n keywords = {edition,journal},\n volume = {7},\n issue = {1&2},\n websites = {http://musicstudies.org/all-issues/volume-7-2013/},\n id = {f5220d83-9f39-3dfa-9ff1-501dc0bcae1c},\n created = {2020-05-29T11:51:38.857Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-07-03T09:46:08.601Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2013internationalstudies},\n source_type = {CHAP},\n folder_uuids = {22cecb78-7b06-4839-9375-3735f65ff563},\n private_publication = {false},\n bibtype = {inbook},\n author = {Coutinho, Eduardo and Fantini, Bernardino and Scherer, Klaus R.},\n editor = {Coutinho, E and Fantini, B and Scherer, K R},\n chapter = {Special Issue on Music, Voice and Emotion},\n title = {International Journal of Interdisciplinary Music Studies}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; and Scherer, K., R.\n\n\n \n \n \n \n Emotions Induced by Music: the Role of the Listening Context and Modality of Presentation.\n \n \n \n\n\n \n\n\n\n In Luck, G.; and Brabant, O., editor(s), Proceedings of the 3rd International Conference on Music and Emotion (ICME’3), pages 1, 2013. University of Jyväskylä, Department of Music.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Emotions Induced by Music: the Role of the Listening Context and Modality of Presentation},\n type = {inproceedings},\n year = {2013},\n keywords = {abstract,conference},\n pages = {1},\n publisher = {University of Jyväskylä, Department of Music.},\n city = {Jyväskylä, Finland},\n id = {18d5e4d4-ba40-3886-90a2-b27308da6bea},\n created = {2020-05-29T11:51:39.028Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:17:02.217Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2013emotionspresentation},\n source_type = {inproceedings},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Coutinho, E and Scherer, K R},\n editor = {Luck, Geoff and Brabant, Olivier},\n booktitle = {Proceedings of the 3rd International Conference on Music and Emotion (ICME’3)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; and Dibben, N.\n\n\n \n \n \n \n Emotions perceived in music and speech: relationships between psychoacoustic features, second-by-second subjective feelings of emotion and physiological responses.\n \n \n \n\n\n \n\n\n\n In Luck, G.; and Brabant, O., editor(s), 3rd International Conference on Music & Emotion, pages 1, 2013. University of Jyväskylä, Department of Music.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Emotions perceived in music and speech: relationships between psychoacoustic features, second-by-second subjective feelings of emotion and physiological responses},\n type = {inproceedings},\n year = {2013},\n keywords = {Conference,abstract},\n pages = {1},\n publisher = {University of Jyväskylä, Department of Music.},\n city = {Jyväskylä, Finland},\n id = {cfd4d04b-de5e-3c66-9849-40abaf5e3378},\n created = {2020-05-29T11:51:39.123Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:16:42.124Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2013emotionsresponses},\n source_type = {inproceedings},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n abstract = {There is strong evidence that the structure of affective responses to auditory stimuli is largely dependent on dynamic temporal patterns in low-level music structural parameters. Previous studies have shown that spatiotemporal dynamics in a small set of psychoacoustic features can predict two fundamental psychological dimensions of emotion: valence and arousal. The study reported here aims to determine the extent to which specific physiological responses can be used in tandem with psychoacoustic cues to predict emotional responses to music as well as to speech. In a behavioural study collected two main types of data: continuous ratings of emotion perceived while listening to extracts of music and speech, using a computer interface which modelled emotion on two dimensions (arousal and valence), and physiological measures (respiration, heart rate, skin conductance, skin temperature, and blood pressure) taken while listening to each stimulus. Then we analysed the existence of linear and non-linear correlations and associations between psychoacoutic features extracted from music and speech, physiological activity, and self-reported arousal and valence. For both domains, we found very strong correlations showing that physiological measures and psychoacoutic cues account for a large proportion of the variance in the reported arousal and valence. Strong correlations also emerged between psychocoutic cues and physiological responses, suggesting a possible route for the elicitation of subjective feelings. When comparing music and speech directly we found that while the significant changes to physiological measures for speech stimuli are confined to a small number of features, the physiological responses for music are much more diffuse across the various measures.},\n bibtype = {inproceedings},\n author = {Coutinho, Eduardo and Dibben, Nicola},\n editor = {Luck, Geoff and Brabant, Olivier},\n booktitle = {3rd International Conference on Music & Emotion}\n}
\n
\n\n\n
\n There is strong evidence that the structure of affective responses to auditory stimuli is largely dependent on dynamic temporal patterns in low-level music structural parameters. Previous studies have shown that spatiotemporal dynamics in a small set of psychoacoustic features can predict two fundamental psychological dimensions of emotion: valence and arousal. The study reported here aims to determine the extent to which specific physiological responses can be used in tandem with psychoacoustic cues to predict emotional responses to music as well as to speech. In a behavioural study collected two main types of data: continuous ratings of emotion perceived while listening to extracts of music and speech, using a computer interface which modelled emotion on two dimensions (arousal and valence), and physiological measures (respiration, heart rate, skin conductance, skin temperature, and blood pressure) taken while listening to each stimulus. Then we analysed the existence of linear and non-linear correlations and associations between psychoacoutic features extracted from music and speech, physiological activity, and self-reported arousal and valence. For both domains, we found very strong correlations showing that physiological measures and psychoacoutic cues account for a large proportion of the variance in the reported arousal and valence. Strong correlations also emerged between psychocoutic cues and physiological responses, suggesting a possible route for the elicitation of subjective feelings. When comparing music and speech directly we found that while the significant changes to physiological measures for speech stimuli are confined to a small number of features, the physiological responses for music are much more diffuse across the various measures.\n
\n\n\n
\n\n\n
\n \n\n \n \n Scherer, K., R.; and Coutinho, E.\n\n\n \n \n \n \n \n How music creates emotion.\n \n \n \n \n\n\n \n\n\n\n of Series in affective science. The Emotional Power of Music, pages 121-145. Cochrane, T.; Fantini, B.; and Scherer, K., R., editor(s). Oxford University Press, 2013.\n \n\n\n\n
\n\n\n\n \n \n \"TheWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2013},\n pages = {121-145},\n issue = {10},\n websites = {http://www.oxfordscholarship.com/view/10.1093/acprof:oso/9780199654888.001.0001/acprof-9780199654888-chapter-10,https://liverpool.idm.oclc.org/login?url=https://search.ebscohost.com/login.aspx?direct=true&db=psyh&AN=2013-32821-010&site=ehost-live&scope=si},\n publisher = {Oxford University Press},\n series = {Series in affective science},\n id = {659327c3-1d17-3477-a52f-777bd3c7ddeb},\n created = {2020-05-30T17:34:32.953Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2022-02-02T13:58:44.809Z},\n read = {true},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {scherer2013howapproach},\n source_type = {CHAP},\n folder_uuids = {5db95977-632e-457e-b1b5-8ad03c3d17c4},\n private_publication = {false},\n abstract = {- In this chapter we propose an integrated framework that links the perception and cognition of music to the production of emotion by means of psychobiological pathways recruiting various subsystems of the central and autonomous nervous systems. These pathways (which we call routes) are Appraisal, Memory, Entrainment, Empathy, and Contagion, and they permit to describe the nature and substrate of a wide variety of emotional responses to music. We focus on music characteristics, more precisely the musical structure and performance variables, as the determinant factors of emotional indication, while considering a variety of possible modulatory effects related to listener characteristics and states, the performer, and the listening context.},\n bibtype = {inbook},\n author = {Scherer, Klaus R. and Coutinho, Eduardo},\n editor = {Cochrane, T and Fantini, B and Scherer, K R},\n doi = {10.1093/acprof:oso/9780199654888.003.0010},\n chapter = {How music creates emotion},\n title = {The Emotional Power of Music},\n keywords = {book,chapter}\n}
\n
\n\n\n
\n - In this chapter we propose an integrated framework that links the perception and cognition of music to the production of emotion by means of psychobiological pathways recruiting various subsystems of the central and autonomous nervous systems. These pathways (which we call routes) are Appraisal, Memory, Entrainment, Empathy, and Contagion, and they permit to describe the nature and substrate of a wide variety of emotional responses to music. We focus on music characteristics, more precisely the musical structure and performance variables, as the determinant factors of emotional indication, while considering a variety of possible modulatory effects related to listener characteristics and states, the performer, and the listening context.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n Coutinho, E.; and Scherer, K., R.\n\n\n \n \n \n \n Towards a Brief Domain-specific Self-report Scale for the Rapid Assessment of Musically Induced Emotions.\n \n \n \n\n\n \n\n\n\n In Cambouropoulos, E.; Tsougras, C.; Mavromatis, P.; and Pastiadis, C., editor(s), Proceedings of the 12th International Conference of Music Perception and Cognition (ICMPC12), pages 229, 2012. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Towards a Brief Domain-specific Self-report Scale for the Rapid Assessment of Musically Induced Emotions},\n type = {inproceedings},\n year = {2012},\n keywords = {Conference,abstract},\n pages = {229},\n id = {e7500ca5-1c4e-3c2a-a137-4377ea35f715},\n created = {2020-05-29T11:51:38.897Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:16:55.584Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2012towardsemotions},\n source_type = {inproceedings},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n abstract = {The Geneva Emotional Music Scale (GEMS; Zentner, Grandjean, & Scherer, 2008) is the first domain-specific model of emotion specifically developed to measure musically evoked subjective feelings of emotion (particularly in live performances). The scale consists of a list of 45 emotion terms pertaining to nine emotion factors or categories, describing a pervasive universe of feelings of emotion frequently experienced while listening to music (particularly in live performances). In this paper, we highlight and address two potential limitations of the GEMS. The first one is related to the fact that the GEMS comprises a high number of elements to be rated, an aspect that creates many difficulties in fieldwork studies where a rapid assessment is often necessary. The second, is the extent to which the GEMS may be consistently used to discern the emotions experienced while listening to music genres differing significantly from those that led to its development (especially due to an overrepresentation of classical music performances).},\n bibtype = {inproceedings},\n author = {Coutinho, E and Scherer, K R},\n editor = {Cambouropoulos, E and Tsougras, C and Mavromatis, P and Pastiadis, C},\n booktitle = {Proceedings of the 12th International Conference of Music Perception and Cognition (ICMPC12)}\n}
\n
\n\n\n
\n The Geneva Emotional Music Scale (GEMS; Zentner, Grandjean, & Scherer, 2008) is the first domain-specific model of emotion specifically developed to measure musically evoked subjective feelings of emotion (particularly in live performances). The scale consists of a list of 45 emotion terms pertaining to nine emotion factors or categories, describing a pervasive universe of feelings of emotion frequently experienced while listening to music (particularly in live performances). In this paper, we highlight and address two potential limitations of the GEMS. The first one is related to the fact that the GEMS comprises a high number of elements to be rated, an aspect that creates many difficulties in fieldwork studies where a rapid assessment is often necessary. The second, is the extent to which the GEMS may be consistently used to discern the emotions experienced while listening to music genres differing significantly from those that led to its development (especially due to an overrepresentation of classical music performances).\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2011\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n Coutinho, E.; and Cangelosi, A.\n\n\n \n \n \n \n \n Musical Emotions: Predicting Second-by-Second Subjective Feelings of Emotion From Low-Level Psychoacoustic Features and Physiological Measurements.\n \n \n \n \n\n\n \n\n\n\n Emotion, 11(4): 921-937. 8 2011.\n \n\n\n\n
\n\n\n\n \n \n \"MusicalWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Musical Emotions: Predicting Second-by-Second Subjective Feelings of Emotion From Low-Level Psychoacoustic Features and Physiological Measurements},\n type = {article},\n year = {2011},\n keywords = {article,journal},\n pages = {921-937},\n volume = {11},\n websites = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000294594400021&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=f3ec48df247ee1138ccd8d3ba59bacc2,http://doi.apa.org/getdoi.cfm?doi=10.103},\n month = {8},\n id = {b6765f38-d386-3dd2-900c-061d0d93556e},\n created = {2020-05-29T11:51:38.748Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:17:09.709Z},\n read = {true},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Coutinho2011a},\n source_type = {article},\n folder_uuids = {3f6fce84-0b1d-41cd-9582-3954eada97d3,031e3fe6-33ba-42b4-a8d9-8583f8e1fd89,116db2f1-e6ac-4780-bccf-a977325250cd,99880aa7-55df-4b45-bfce-0ffc00b23ced,d3bdd308-08b1-4b78-a4c1-464ca9763e8d},\n private_publication = {false},\n abstract = {We sustain that the structure of affect elicited by music is largely dependent on dynamic temporal patterns in low-level music structural parameters. In support of this claim, we have previously provided evidence that spatiotemporal dynamics in psychoacoustic features resonate with two psychological dimensions of affect underlying judgments of subjective feelings: arousal and valence. In this article we extend our previous investigations in two aspects. First, we focus on the emotions experienced rather than perceived while listening to music. Second, we evaluate the extent to which peripheral feedback in music can account for the predicted emotional responses, that is, the role of physiological arousal in determining the intensity and valence of musical emotions. Akin to our previous findings, we will show that a significant part of the listeners' reported emotions can be predicted from a set of six psychoacoustic features-loudness, pitch level, pitch contour, tempo, texture, and sharpness. Furthermore, the accuracy of those predictions is improved with the inclusion of physiological cues-skin conductance and heart rate. The interdisciplinary work presented here provides a new methodology to the field of music and emotion research based on the combination of computational and experimental work, which aid the analysis of the emotional responses to music, while offering a platform for the abstract representation of those complex relationships. Future developments may aid specific areas, such as, psychology and music therapy, by providing coherent descriptions of the emotional effects of specific music stimuli. © 2011 American Psychological Association.},\n bibtype = {article},\n author = {Coutinho, Eduardo and Cangelosi, Angelo},\n doi = {10.1037/a0024700},\n journal = {Emotion},\n number = {4}\n}
\n
\n\n\n
\n We sustain that the structure of affect elicited by music is largely dependent on dynamic temporal patterns in low-level music structural parameters. In support of this claim, we have previously provided evidence that spatiotemporal dynamics in psychoacoustic features resonate with two psychological dimensions of affect underlying judgments of subjective feelings: arousal and valence. In this article we extend our previous investigations in two aspects. First, we focus on the emotions experienced rather than perceived while listening to music. Second, we evaluate the extent to which peripheral feedback in music can account for the predicted emotional responses, that is, the role of physiological arousal in determining the intensity and valence of musical emotions. Akin to our previous findings, we will show that a significant part of the listeners' reported emotions can be predicted from a set of six psychoacoustic features-loudness, pitch level, pitch contour, tempo, texture, and sharpness. Furthermore, the accuracy of those predictions is improved with the inclusion of physiological cues-skin conductance and heart rate. The interdisciplinary work presented here provides a new methodology to the field of music and emotion research based on the combination of computational and experimental work, which aid the analysis of the emotional responses to music, while offering a platform for the abstract representation of those complex relationships. Future developments may aid specific areas, such as, psychology and music therapy, by providing coherent descriptions of the emotional effects of specific music stimuli. © 2011 American Psychological Association.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2010\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n Coutinho, E.; and Dibben, N.\n\n\n \n \n \n \n Music, Speech and Emotion: psycho-physiological and computational investigations.\n \n \n \n\n\n \n\n\n\n In Timmers, R.; and Dibben, N., editor(s), Proceedings of the International Conference on Interdisciplinary Musicology: Nature versus Culture (CIM'10), pages 47-48, 2010. University of Sheffield\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Music, Speech and Emotion: psycho-physiological and computational investigations},\n type = {inproceedings},\n year = {2010},\n keywords = {Conference,abstract},\n pages = {47-48},\n publisher = {University of Sheffield},\n city = {Sheffield},\n id = {02c4ccfa-1c7e-372d-89b8-654d934c9a6f},\n created = {2020-05-29T11:51:38.645Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:17:25.713Z},\n read = {true},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Coutinho2010},\n source_type = {inproceedings},\n notes = {<b>From Duplicate 1 (<i>Music, Speech and Emotion: psycho-physiological and computational investigations</i> - Coutinho, Eduardo; Dibben, Nicola)<br/></b><br/>From Duplicate 1 (Music, Speech and Emotion: psycho-physiological and computational investigations - Coutinho, Eduardo; Dibben, Nicola) From Duplicate 3 ( Music, Speech and Emotion: psycho-physiological and computational investigations - Coutinho, Eduardo; Dibben, Nicola ) From Duplicate 2 (Music, Speech and Emotion: psycho-physiological and computational investigations - Coutinho, Eduardo; Dibben, Nicola) Proceedings of the International Conference on Interdisciplinary Musicology (CIM10): &quot;Nature versus Culture&quot;, 23-24 July 2010, Sheffield, UK<br/><br/><b>From Duplicate 2 (<i>Music, Speech and Emotion: psycho-physiological and computational investigations</i> - Coutinho, Eduardo; Dibben, Nicola)<br/></b><br/><b>From Duplicate 1 (<i>Music, Speech and Emotion: psycho-physiological and computational investigations</i> - Coutinho, Eduardo; Dibben, Nicola)<br/></b><br/><b>From Duplicate 3 ( <i>Music, Speech and Emotion: psycho-physiological and computational investigations</i> - Coutinho, Eduardo; Dibben, Nicola )<br/></b><br/><br/><b>From Duplicate 2 (<i>Music, Speech and Emotion: psycho-physiological and computational investigations</i> - Coutinho, Eduardo; Dibben, Nicola)<br/></b><br/>Proceedings of the International Conference on Interdisciplinary Musicology (CIM10): &quot;Nature versus Culture&quot;, 23-24 July 2010, Sheffield, UK},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71,3f6fce84-0b1d-41cd-9582-3954eada97d3,031e3fe6-33ba-42b4-a8d9-8583f8e1fd89,b6ff42e6-ef84-4735-ac6e-3ae5287032d9},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Coutinho, Eduardo and Dibben, Nicola},\n editor = {Timmers, Renee and Dibben, Nicola},\n booktitle = {Proceedings of the International Conference on Interdisciplinary Musicology: Nature versus Culture (CIM'10)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; and Cangelosi, A.\n\n\n \n \n \n \n \n A neural network model for the prediction of musical emotions.\n \n \n \n \n\n\n \n\n\n\n Advances in Cognitive Systems, pages 333-370. Nefti-Meziani, S.; and Gray, J., editor(s). IET, 1 2010.\n \n\n\n\n
\n\n\n\n \n \n \"AdvancesWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2010},\n keywords = {book,chapter},\n pages = {333-370},\n issue = {12},\n websites = {https://digital-library.theiet.org/content/books/10.1049/pbce071e_ch12},\n month = {1},\n publisher = {IET},\n city = {The Institution of Engineering and Technology, Michael Faraday House, Six Hills Way, Stevenage SG1 2AY, UK},\n id = {94f0b320-f5cd-367e-bf2c-83ed792c106e},\n created = {2020-05-29T11:51:39.178Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:16:43.662Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2010aemotions},\n source_type = {incollection},\n folder_uuids = {5db95977-632e-457e-b1b5-8ad03c3d17c4},\n private_publication = {false},\n abstract = {This chapter presents a novel methodology to analyse the dynamics of emotional responses to music in terms of computational representations of perceptual processes (psychoacoustic features) and self-perception of physiological activation (peripheral feedback). The approach consists of a computational investigation of musical emotions based on spatio-temporal neural networks sensitive to structural aspects of music. We present two computational studies based on connectionist network models that predict human subjective feelings of emotion. The first study uses six basic psychoacoustic dimensions extracted from the music pieces as predictors of the emotional response. The second computational study evaluates the additional contribution of physiological arousal to the subjective feeling of emotion. Both studies are backed up by experimental data. A detailed analysis of the simulation models’ results demonstrates that a significant part of the listener’s affective response can be predicted from a set of psychoacoustic features of sound tempo, loudness, multiplicity (texture), power spectrum centroid (mean pitch), sharpness (timbre) and mean STFT flux (pitch variation) and one physiological cue, heart rate. This work provides a new methodology to the field of music and emotion research based on combinations of computational and experimental work, which aid the analysis of emotional responses to music, while offering a platform for the abstract representation of those complex relationships.},\n bibtype = {inbook},\n author = {Coutinho, Eduardo and Cangelosi, Angelo},\n editor = {Nefti-Meziani, S and Gray, J},\n doi = {10.1049/PBCE071E_ch12},\n chapter = {A neural network model for the prediction of musical emotions},\n title = {Advances in Cognitive Systems}\n}
\n
\n\n\n
\n This chapter presents a novel methodology to analyse the dynamics of emotional responses to music in terms of computational representations of perceptual processes (psychoacoustic features) and self-perception of physiological activation (peripheral feedback). The approach consists of a computational investigation of musical emotions based on spatio-temporal neural networks sensitive to structural aspects of music. We present two computational studies based on connectionist network models that predict human subjective feelings of emotion. The first study uses six basic psychoacoustic dimensions extracted from the music pieces as predictors of the emotional response. The second computational study evaluates the additional contribution of physiological arousal to the subjective feeling of emotion. Both studies are backed up by experimental data. A detailed analysis of the simulation models’ results demonstrates that a significant part of the listener’s affective response can be predicted from a set of psychoacoustic features of sound tempo, loudness, multiplicity (texture), power spectrum centroid (mean pitch), sharpness (timbre) and mean STFT flux (pitch variation) and one physiological cue, heart rate. This work provides a new methodology to the field of music and emotion research based on combinations of computational and experimental work, which aid the analysis of emotional responses to music, while offering a platform for the abstract representation of those complex relationships.\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.\n\n\n \n \n \n \n Modeling Psycho-physiological Measurements of Emotional Responses to Multiple Music Genres.\n \n \n \n\n\n \n\n\n\n In S M Demorest, S., J., M.; and Campbell, P., S., editor(s), Proceedings of 11th International Conference of Music Perception and Cognition (ICMPC11), pages 53, 2010. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Modeling Psycho-physiological Measurements of Emotional Responses to Multiple Music Genres},\n type = {inproceedings},\n year = {2010},\n keywords = {Conference,abstract},\n pages = {53},\n id = {290458b4-4778-3b12-b0e4-eac187fb4f8b},\n created = {2020-05-29T11:51:39.400Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:16:36.235Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2010modelinggenres},\n source_type = {inproceedings},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Coutinho, E},\n editor = {S M Demorest, S J M and Campbell, P S},\n booktitle = {Proceedings of 11th International Conference of Music Perception and Cognition (ICMPC11)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.\n\n\n \n \n \n \n \n Cognitive dissonance, knowledge instinct and musical emotions.\n \n \n \n \n\n\n \n\n\n\n Physics of Life Reviews, 7(1): 30-32. 3 2010.\n \n\n\n\n
\n\n\n\n \n \n \"CognitiveWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Cognitive dissonance, knowledge instinct and musical emotions},\n type = {article},\n year = {2010},\n keywords = {article,journal},\n pages = {30-32},\n volume = {7},\n websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-77349096866&doi=10.1016%2Fj.plrev.2009.12.005&partnerID=40&md5=6ebe1ce514cf4fed7188ffff8fde7478,http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT},\n month = {3},\n publisher = {Elsevier B.V.},\n id = {2725a2b3-e3f7-340d-84f0-74544dbc06e6},\n created = {2020-05-30T17:34:32.815Z},\n accessed = {2012-03-15},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2021-06-17T15:16:16.758Z},\n read = {true},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Coutinho2010a},\n source_type = {article},\n notes = {<b>From Duplicate 1 (<i>Cognitive dissonance, knowledge instinct and musical emotions</i> - Coutinho, Eduardo)<br/></b><br/>cited By 2<br/><br/><b>From Duplicate 2 (<i>Cognitive dissonance, knowledge instinct and musical emotions</i> - Coutinho, Eduardo)<br/></b><br/><b>From Duplicate 2 (<i>Cognitive dissonance, knowledge instinct and musical emotions</i> - Coutinho, Eduardo)<br/></b><br/>cited By 2},\n folder_uuids = {116db2f1-e6ac-4780-bccf-a977325250cd,99880aa7-55df-4b45-bfce-0ffc00b23ced,813ed798-f952-404b-9427-9475822f127a,db78b332-cabb-4a66-a735-a51b0746de51},\n private_publication = {false},\n bibtype = {article},\n author = {Coutinho, Eduardo},\n doi = {10.1016/j.plrev.2009.12.005},\n journal = {Physics of Life Reviews},\n number = {1}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2009\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n Coutinho, E.; and Cangelosi, A.\n\n\n \n \n \n \n \n The use of spatio-temporal connectionist models in psychological studies of musical emotions.\n \n \n \n \n\n\n \n\n\n\n Music Perception, 27(1): 1-15. 9 2009.\n \n\n\n\n
\n\n\n\n \n \n \"TheWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {The use of spatio-temporal connectionist models in psychological studies of musical emotions},\n type = {article},\n year = {2009},\n keywords = {article,journal,thesis},\n pages = {1-15},\n volume = {27},\n websites = {http://caliber.ucpress.net/doi/abs/10.1525/mp.2009.27.1.1,http://www.jstor.org/stable/40286139,http://mp.ucpress.edu/cgi/doi/10.1525/mp.2009.27.1.1},\n month = {9},\n city = {Plymouth, UK},\n institution = {University of Plymouth},\n department = {School of Computing and Mathematics},\n id = {6d87fd6f-7923-3030-87d8-b46565c14aeb},\n created = {2020-05-30T17:34:32.696Z},\n accessed = {2012-03-15},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.322Z},\n read = {true},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Coutinho2009},\n source_type = {phdthesis},\n folder_uuids = {031e3fe6-33ba-42b4-a8d9-8583f8e1fd89,58f2988f-d929-444e-8c41-5db314c765aa,116db2f1-e6ac-4780-bccf-a977325250cd,99880aa7-55df-4b45-bfce-0ffc00b23ced},\n private_publication = {false},\n abstract = {This article presents a novel methodology to analyze the dynamics of emotional responses to music. It consists of a computational investigation based on spatiotemporal neural networks, which "mimic" human affective responses to music and predict the responses to novel music sequences. The results provide evidence suggesting that spatiotemporal patterns of sound resonate with affective features underlying judgments of subjective feelings (arousal and valence). A significant part of the listener's affective response is predicted from a set of six psychoacoustic features of sound-loudness, tempo, texture, mean pitch, pitch variation, and sharpness. A detailed analysis of the network parameters and dynamics also allows us to identify the role of specific psychoacoustic variables (e.g., tempo and loudness) in music emotional appraisal. This work contributes new evidence and insights to the study of musical emotions, with particular relevance to the music perception and cognition research community. © 2009 By the Regents of the University of California.},\n bibtype = {article},\n author = {Coutinho, Eduardo and Cangelosi, Angelo},\n doi = {10.1525/mp.2009.27.1.1},\n journal = {Music Perception},\n number = {1}\n}
\n
\n\n\n
\n This article presents a novel methodology to analyze the dynamics of emotional responses to music. It consists of a computational investigation based on spatiotemporal neural networks, which \"mimic\" human affective responses to music and predict the responses to novel music sequences. The results provide evidence suggesting that spatiotemporal patterns of sound resonate with affective features underlying judgments of subjective feelings (arousal and valence). A significant part of the listener's affective response is predicted from a set of six psychoacoustic features of sound-loudness, tempo, texture, mean pitch, pitch variation, and sharpness. A detailed analysis of the network parameters and dynamics also allows us to identify the role of specific psychoacoustic variables (e.g., tempo and loudness) in music emotional appraisal. This work contributes new evidence and insights to the study of musical emotions, with particular relevance to the music perception and cognition research community. © 2009 By the Regents of the University of California.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2008\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n Coutinho, E.; and Cangelosi, A.\n\n\n \n \n \n \n Psycho-physiological Patterns of Musical Emotions and Their Relation with Music Structure.\n \n \n \n\n\n \n\n\n\n In Miyazaki, K.; Hiraga, Y.; Adachi, M.; Nakajima, Y.; and Tsuzaki, M., editor(s), Proceedings of the 10th International Conference on Music Perception and Cognition (ICMPC10), pages 94, 2008. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Psycho-physiological Patterns of Musical Emotions and Their Relation with Music Structure},\n type = {inproceedings},\n year = {2008},\n pages = {94},\n id = {3d4b087b-3d1d-3f02-8667-2432bbd5a3be},\n created = {2018-03-29T13:11:33.525Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2021-05-14T08:53:58.900Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2008psychophysiologicalstructure},\n source_type = {inproceedings},\n notes = {date-added: 2010-03-11 09:46:03 +0000<br/>date-modified: 2010-03-11 09:54:36 +0000},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Coutinho, E and Cangelosi, A},\n editor = {Miyazaki, K and Hiraga, Y and Adachi, M and Nakajima, Y and Tsuzaki, M},\n booktitle = {Proceedings of the 10th International Conference on Music Perception and Cognition (ICMPC10)},\n keywords = {Conference,abstract}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2007\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n Coutinho, E.; Miranda, E., R.; and Cangelosi, A.\n\n\n \n \n \n \n \n Towards a Model for Embodied Emotions.\n \n \n \n \n\n\n \n\n\n\n In 2005 Portuguese Conference on Artificial Intelligence, pages 54-63, 12 2007. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"TowardsWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Towards a Model for Embodied Emotions},\n type = {inproceedings},\n year = {2007},\n keywords = {article,conference},\n pages = {54-63},\n websites = {http://ieeexplore.ieee.org/document/4145923/},\n month = {12},\n publisher = {IEEE},\n id = {fcefc664-af4b-3332-95c2-9a1e95c186dc},\n created = {2020-05-29T11:51:39.182Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:16:53.929Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2005towardsemotions},\n source_type = {inproceedings},\n folder_uuids = {aac08d0d-38e7-4f4e-a381-5271c5c099ce},\n private_publication = {false},\n abstract = {We are interested in developing A-Life-like models to study the evolution of emotional systems in artificial worlds inhabited by autonomous agents. This paper focuses on the emotional component of an agent at its very basic physical level. We adopt an evolutionary perspective by modelling the agent based on biologically plausible principles, whereby emotions emerge from homeostatic mechanisms. We suggest that the agent should be embodied so as to allow its behaviour to be affected by low-level physical tasks. By embodiment we mean that the agent has a virtual physical body whose states can be sensed by the agent itself. The simulations show the emergence of a stable emotional system with emotional contexts resulting from dynamical categorization of objects in the world. This proved to be effective and versatile enough to allow the agent to adapt itself to unknown world configurations. The results are coherent with Antonio Damasio's theory of background emotional system (2000). We demonstrate that body/world categorizations and body maps can evolve from a simple rule: self-survival},\n bibtype = {inproceedings},\n author = {Coutinho, Eduardo and Miranda, Eduardo R. and Cangelosi, Angelo},\n doi = {10.1109/epia.2005.341264},\n booktitle = {2005 Portuguese Conference on Artificial Intelligence}\n}
\n
\n\n\n
\n We are interested in developing A-Life-like models to study the evolution of emotional systems in artificial worlds inhabited by autonomous agents. This paper focuses on the emotional component of an agent at its very basic physical level. We adopt an evolutionary perspective by modelling the agent based on biologically plausible principles, whereby emotions emerge from homeostatic mechanisms. We suggest that the agent should be embodied so as to allow its behaviour to be affected by low-level physical tasks. By embodiment we mean that the agent has a virtual physical body whose states can be sensed by the agent itself. The simulations show the emergence of a stable emotional system with emotional contexts resulting from dynamical categorization of objects in the world. This proved to be effective and versatile enough to allow the agent to adapt itself to unknown world configurations. The results are coherent with Antonio Damasio's theory of background emotional system (2000). We demonstrate that body/world categorizations and body maps can evolve from a simple rule: self-survival\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; and Cangelosi, A.\n\n\n \n \n \n \n \n Emotion and embodiment in cognitive Agents: From instincts to music.\n \n \n \n \n\n\n \n\n\n\n In 2007 International Conference on Integration of Knowledge Intensive Multi-Agent Systems, KIMAS 2007, pages 133-138, 4 2007. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"EmotionWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Emotion and embodiment in cognitive Agents: From instincts to music},\n type = {inproceedings},\n year = {2007},\n pages = {133-138},\n websites = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-34548721619&doi=10.1109%2FKIMAS.2007.369798&partnerID=40&md5=f8f98e2614bad5c5d0c6ede9aa095954,http://ieeexplore.ieee.org/document/4227537/},\n month = {4},\n publisher = {IEEE},\n id = {54950cb4-26b9-3782-b3e3-509a6065443d},\n created = {2020-05-30T17:34:32.700Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.365Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Coutinho2007},\n source_type = {CONF},\n notes = {<b>From Duplicate 1 (<i>Emotion and embodiment in cognitive Agents: From instincts to music</i> - Coutinho, Eduardo; Cangelosi, Angelo)<br/></b><br/><b>From Duplicate 1 (<i>Emotion and embodiment in cognitive Agents: From instincts to music</i> - Coutinho, Eduardo; Cangelosi, Angelo)<br/></b><br/>cited By 5<br/><br/><b>From Duplicate 2 (<i>Emotion and embodiment in cognitive Agents: From instincts to music</i> - Coutinho, Eduardo; Cangelosi, Angelo)<br/></b><br/>International Conference on Integration of Knowledge Intensive Multi-Agent Systems, Waltham, MA, APR 30-MAY 03, 2007<br/><br/><b>From Duplicate 2 (<i>Emotion and embodiment in cognitive Agents: From instincts to music</i> - Coutinho, Eduardo; Cangelosi, Angelo)<br/></b><br/>cited By 5<br/><br/><b>From Duplicate 3 (<i>Emotion and embodiment in cognitive Agents: From instincts to music</i> - Coutinho, Eduardo; Cangelosi, Angelo)<br/></b><br/>International Conference on Integration of Knowledge Intensive Multi-Agent Systems, Waltham, MA, APR 30-MAY 03, 2007},\n private_publication = {false},\n abstract = {This paper suggests the use of modeling techniques to tack into the emotion'cognition paradigm. We presented two possible frameworks focusing on the embodiment basis of emotions. The first one explores the emergence of emotion mechanisms, by establishing the primary conditions of survival and exploring the basic roots of emotional systems. These simulations show the emergence of a stable motivational system with emotional contexts resulting from dynamical categorization of objects in the environment, in answer to survival pressures and homeostatic processes. The second framework uses music as a source of information about the mechanism of emotion and we propose a model based on recurrent connectionist architectures for the prediction of emotional states in response to music experience. Results demonstrate that there are strong relationships between arousal reports and music psychoacoustics, such as tempo and dynamics. Finally we discuss future directions of research on emotions based on cognitive agents and mathematical models. © 2007 IEEE.},\n bibtype = {inproceedings},\n author = {Coutinho, Eduardo and Cangelosi, Angelo},\n doi = {10.1109/KIMAS.2007.369798},\n booktitle = {2007 International Conference on Integration of Knowledge Intensive Multi-Agent Systems, KIMAS 2007}\n}
\n
\n\n\n
\n This paper suggests the use of modeling techniques to tack into the emotion'cognition paradigm. We presented two possible frameworks focusing on the embodiment basis of emotions. The first one explores the emergence of emotion mechanisms, by establishing the primary conditions of survival and exploring the basic roots of emotional systems. These simulations show the emergence of a stable motivational system with emotional contexts resulting from dynamical categorization of objects in the environment, in answer to survival pressures and homeostatic processes. The second framework uses music as a source of information about the mechanism of emotion and we propose a model based on recurrent connectionist architectures for the prediction of emotional states in response to music experience. Results demonstrate that there are strong relationships between arousal reports and music psychoacoustics, such as tempo and dynamics. Finally we discuss future directions of research on emotions based on cognitive agents and mathematical models. © 2007 IEEE.\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; Gimenes, M.; Martins, J., M.; and Miranda, E., R.\n\n\n \n \n \n \n \n Computational Musicology: An Artificial Life Approach.\n \n \n \n \n\n\n \n\n\n\n In Bento A and Dias, G, C., a., C., editor(s), 2005 Portuguese Conference on Artificial Intelligence, pages 85-93, 12 2007. APPIA; DIUBI; CISUC; Microsoft Res; FCT; Camara Municipal Coviha; Parkurbis; IMB-Hoteis; Caixa Geral Depositos; AUTO JARDIM Automoveis S A; TAP Air Portugal; SEMMAIS Programac Design Interact; OmniSys Tecnol Informacao Lda; Regisfund Maquinas Escritor Lda\n \n\n\n\n
\n\n\n\n \n \n \"ComputationalWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Computational Musicology: An Artificial Life Approach},\n type = {inproceedings},\n year = {2007},\n pages = {85-93},\n websites = {http://ieeexplore.ieee.org/document/4145929/},\n month = {12},\n publisher = {APPIA; DIUBI; CISUC; Microsoft Res; FCT; Camara Municipal Coviha; Parkurbis; IMB-Hoteis; Caixa Geral Depositos; AUTO JARDIM Automoveis S A; TAP Air Portugal; SEMMAIS Programac Design Interact; OmniSys Tecnol Informacao Lda; Regisfund Maquinas Escritor Lda},\n id = {5f4215c0-dcb3-3296-b43e-b065239b721e},\n created = {2020-05-30T17:34:32.958Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2021-06-17T15:16:17.710Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2005computationalapproach},\n source_type = {CONF},\n notes = {<b>From Duplicate 1 (<i>Computational Musicology: An Artificial Life Approach</i> - Coutinho, Eduardo; Gimenes, Marcelo; Martins, Joao M.; Miranda, Eduardo R.)<br/></b><br/>Portuguese Conference on Artificial Intelligence, Univ Beira Interior, Covilha, PORTUGAL, 2005<br/><br/><b>From Duplicate 2 (<i>Computational Musicology: An Artificial Life Approach</i> - Coutinho, Eduardo; Gimenes, Marcelo; Martins, Joao M.; Miranda, Eduardo R.)<br/></b><br/><b>From Duplicate 2 (<i>Computational Musicology: An Artificial Life Approach</i> - Coutinho, Eduardo; Gimenes, Marcelo; Martins, Joao M.; Miranda, Eduardo R.)<br/></b><br/>Portuguese Conference on Artificial Intelligence, Univ Beira Interior, Covilha, PORTUGAL, 2005},\n folder_uuids = {aac08d0d-38e7-4f4e-a381-5271c5c099ce},\n private_publication = {false},\n abstract = {Artificial Life (A-Life) and Evolutionary Algorithms (EA) provide a variety of new techniques for making and studying music. EA have been used in different musical applications, ranging from new systems for composition and performance, to models for studying musical evolution in artificial societies. This paper starts with a brief introduction to three main fields of application of EA in Music, namely sound design, creativity and computational musicology. Then it presents our work in the field of computational musicology. Computational musicology is broadly defined as the study of Music with computational modelling and simulation. We are interested in developing ALifebased models to study the evolution of musical cognition in an artificial society of agents. In this paper we present the main components of a model that we are developing to study the evolution of musical ontogenies, focusing on the evolution of rhythms and emotional systems. The paper concludes by suggesting that A-Life and EA provide a powerful paradigm for computational musicology.},\n bibtype = {inproceedings},\n author = {Coutinho, Eduardo and Gimenes, Marcelo and Martins, Joao M. and Miranda, Eduardo R.},\n editor = {Bento  A and Dias, G, C and Cardoso},\n doi = {10.1109/epia.2005.341270},\n booktitle = {2005 Portuguese Conference on Artificial Intelligence},\n keywords = {article,conference}\n}
\n
\n\n\n
\n Artificial Life (A-Life) and Evolutionary Algorithms (EA) provide a variety of new techniques for making and studying music. EA have been used in different musical applications, ranging from new systems for composition and performance, to models for studying musical evolution in artificial societies. This paper starts with a brief introduction to three main fields of application of EA in Music, namely sound design, creativity and computational musicology. Then it presents our work in the field of computational musicology. Computational musicology is broadly defined as the study of Music with computational modelling and simulation. We are interested in developing ALifebased models to study the evolution of musical cognition in an artificial society of agents. In this paper we present the main components of a model that we are developing to study the evolution of musical ontogenies, focusing on the evolution of rhythms and emotional systems. The paper concludes by suggesting that A-Life and EA provide a powerful paradigm for computational musicology.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2006\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n Coutinho, E.; and Cangelosi, A.\n\n\n \n \n \n \n The dynamics of music perception and emotional experience: a connectionist model.\n \n \n \n\n\n \n\n\n\n In Baroni, M.; Addessi, A., R.; Caterina, R.; and Costa, M., editor(s), 9th International Conference on Music Perception and Cognition, pages 1096-1104, 2006. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {The dynamics of music perception and emotional experience: a connectionist model},\n type = {inproceedings},\n year = {2006},\n keywords = {Conference,abstract},\n pages = {1096-1104},\n id = {f4e1a72d-0af3-38a0-8298-97ef5d2ea3a0},\n created = {2018-03-29T13:11:33.082Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2021-05-14T08:53:57.309Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2006themodel},\n source_type = {inproceedings},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n abstract = {In this paper we present a methodological framework for the study of musical emotions, incorporating psycho- physiological experiments and modelling techniques for data analysis. Our focus is restricted to the body implica- tions as a possible source of information about the emo- tional experience, and responsible to certain levels of emo- tional engagement in music. We present and apply the use of spatiotemporal connectionist models, as a modelling technique. Simulation results using a simple recurrent net- work, demonstrate that our connectionist approach leads to a better fit of the simulated process, compared with pre- vious models. We demonstrate that a spatiotemporal con- nectionist model trained on music and emotional rating data is capable of generalizing the level of arousal in re- sponse to novel music input. The model is also capable of identifying the main variables responsible for such an emo- tional rating behaviour.},\n bibtype = {inproceedings},\n author = {Coutinho, Eduardo and Cangelosi, Angelo},\n editor = {Baroni, M and Addessi, A R and Caterina, R and Costa, M},\n booktitle = {9th International Conference on Music Perception and Cognition}\n}
\n
\n\n\n
\n In this paper we present a methodological framework for the study of musical emotions, incorporating psycho- physiological experiments and modelling techniques for data analysis. Our focus is restricted to the body implica- tions as a possible source of information about the emo- tional experience, and responsible to certain levels of emo- tional engagement in music. We present and apply the use of spatiotemporal connectionist models, as a modelling technique. Simulation results using a simple recurrent net- work, demonstrate that our connectionist approach leads to a better fit of the simulated process, compared with pre- vious models. We demonstrate that a spatiotemporal con- nectionist model trained on music and emotional rating data is capable of generalizing the level of arousal in re- sponse to novel music input. The model is also capable of identifying the main variables responsible for such an emo- tional rating behaviour.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2005\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n Coutinho, E.; Miranda, E., R.; and Da Silva, P.\n\n\n \n \n \n \n \n Evolving emotional behaviour for expressive performance of music.\n \n \n \n \n\n\n \n\n\n\n Volume 3661 LNAI . Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 497. Panayiotopoulos, T.; Gratch, J.; Aylett, R.; Ballin, D.; Olivier, P.; and Rist, T., editor(s). 2005.\n \n\n\n\n
\n\n\n\n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2005},\n pages = {497},\n volume = {3661 LNAI},\n websites = {http://link.springer.com/10.1007/11550617_48},\n id = {108d0ade-9c4d-3c62-b7b9-3215a63e04d9},\n created = {2018-03-29T13:11:33.519Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2023-05-15T08:14:21.150Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2005evolvingmusic},\n source_type = {inproceedings},\n private_publication = {false},\n abstract = {Today computers can be programmed to compose music automatically, using techniques ranging from rule-based to evolutionary computation (e.g., genetic algorithms and cellular automata). However, we lack good techniques for programming the computer to play or interpret music with expression. Expression in music is largely associated with emotions. Therefore we are looking into the possibility of programming computer music systems with emotions. We are addressing this problem from an A-Life perspective combined with recent discoveries in the neurosciences with respect to emotion. Antonio Damasio refers to the importance of emotions to assist an individual to maintain survival, as they seem to be an important mechanism for adaptation and decision-making. Specifically, environmental events of value should be susceptible to preferential perceptual processing, regarding their pleasant or unpleasant. This approach assumes the existence of neural pathways that facilitate survival. Stable emotional systems should then emerge from self-regulatory homeostatic processes. We implemented a system consisting of an agent that inhabits an environment containing with a number of different objects. These objects cause different physiological reactions to the agent. The internal body state of the agent is defined by a set of internal drives and a set of physiological variables that vary as the agent interacts with the objects it encounters in the environment. The agent is controlled by a feed-forward neural network that integrates visual input with information about its internal states. The network learns through a reinforcement-learning algorithm, derivate from different body states, due to pleasant or unpleasant stimuli. The playback of musical recordings in MIDI format is steered by the physiological variables of the agent in different phases of the adaptation process. The behaviour of the system is coherent with Damasio's theory of background emotional system. It demonstrates that specific phenomena, such as body/world categorization and existence of a body map, can evolve from a simple rule: self-survival in the environment. Currently, we are in the process of defining a system of higher-level emotional states (or foreground system) that will operate in social contexts; i.e., with several agents in the environment reacting to objects and interacting with each other. © Springer-Verlag Berlin Heidelberg 2005.},\n bibtype = {inbook},\n author = {Coutinho, Eduardo and Miranda, Eduardo Reck and Da Silva, Patricio},\n editor = {Panayiotopoulos, T and Gratch, J and Aylett, R and Ballin, D and Olivier, P and Rist, T},\n doi = {10.1007/11550617_48},\n chapter = {Evolving emotional behaviour for expressive performance of music},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n Today computers can be programmed to compose music automatically, using techniques ranging from rule-based to evolutionary computation (e.g., genetic algorithms and cellular automata). However, we lack good techniques for programming the computer to play or interpret music with expression. Expression in music is largely associated with emotions. Therefore we are looking into the possibility of programming computer music systems with emotions. We are addressing this problem from an A-Life perspective combined with recent discoveries in the neurosciences with respect to emotion. Antonio Damasio refers to the importance of emotions to assist an individual to maintain survival, as they seem to be an important mechanism for adaptation and decision-making. Specifically, environmental events of value should be susceptible to preferential perceptual processing, regarding their pleasant or unpleasant. This approach assumes the existence of neural pathways that facilitate survival. Stable emotional systems should then emerge from self-regulatory homeostatic processes. We implemented a system consisting of an agent that inhabits an environment containing with a number of different objects. These objects cause different physiological reactions to the agent. The internal body state of the agent is defined by a set of internal drives and a set of physiological variables that vary as the agent interacts with the objects it encounters in the environment. The agent is controlled by a feed-forward neural network that integrates visual input with information about its internal states. The network learns through a reinforcement-learning algorithm, derivate from different body states, due to pleasant or unpleasant stimuli. The playback of musical recordings in MIDI format is steered by the physiological variables of the agent in different phases of the adaptation process. The behaviour of the system is coherent with Damasio's theory of background emotional system. It demonstrates that specific phenomena, such as body/world categorization and existence of a body map, can evolve from a simple rule: self-survival in the environment. Currently, we are in the process of defining a system of higher-level emotional states (or foreground system) that will operate in social contexts; i.e., with several agents in the environment reacting to objects and interacting with each other. © Springer-Verlag Berlin Heidelberg 2005.\n
\n\n\n
\n\n\n
\n \n\n \n \n Coutinho, E.; Miranda, E., R.; and Cangelosi, A.\n\n\n \n \n \n \n Artificial Emotion - Simulating Affective Behaviour.\n \n \n \n\n\n \n\n\n\n In Proceedings of the Post-cognitivist Psychology Conference, pages 7, 2005. Glasgow, Scotland\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Artificial Emotion - Simulating Affective Behaviour},\n type = {inproceedings},\n year = {2005},\n keywords = {Conference,abstract},\n pages = {7},\n publisher = {Glasgow, Scotland},\n institution = {Glasgow, Scotland},\n id = {17be4876-5dd7-3c79-a145-610a02526735},\n created = {2020-05-29T11:51:39.418Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n last_modified = {2020-05-30T17:16:35.716Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {coutinho2005artificialbehaviour},\n source_type = {inproceedings},\n folder_uuids = {a2a583e8-b0a3-48f9-900f-27e15c9a7f71},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Coutinho, E and Miranda, E R and Cangelosi, A},\n booktitle = {Proceedings of the Post-cognitivist Psychology Conference}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);