Emotion Identification System for Musical Tunes based on Characteristics of Acoustic Signal Data. Endrjukaite, T. & Yasushi Kiyoki Volume 272. Emotion Identification System for Musical Tunes based on Characteristics of Acoustic Signal Data, pages 88 - 107. IOS Press, 2014. abstract bibtex We design and implement a music-tune analysis system to realize automatic emotion identification and prediction based on acoustic signal data. To compute physical elements of music pieces we define three significant tunes parameters. These are: repeated parts or repetitions inside a tune, thumbnail of a music piece, and homogeneity pattern of a tune. They are significant, because they are related to how people perceive music pieces. By means of these three parameters we can express the essential features of emotional-aspects of each piece. Our system consists of music-tune features database and computational mechanism for comparison between different tunes. Based on Hevner’s emotions adjectives groups we created a new way of emotion presentation on emotion’s plane with two axes: activity and happiness. That makes it possible to determine perceived emotions of listening to a tune and calculate adjacent emotions on a plane. Finally, we performed a set of experiments on western classical and popular music pieces, which presented that our proposed approach reached 72% precision ratio and shows a positive trend of system’s efficiency when database size is increasing.
@inBook{
id = {9b9eaca9-ae81-342e-a52d-68f0dad20e3b},
title = {Emotion Identification System for Musical Tunes based on Characteristics of Acoustic Signal Data},
type = {inBook},
year = {2014},
keywords = {emotions,music,repetitions,tune’s internal homogeneity,tune’s thumbnail},
created = {2014-12-10T23:37:56.000Z},
pages = {88 - 107},
volume = {272},
websites = {http://www.iospress.nl/book/information-modelling-and-knowledge-bases-xxvi/},
publisher = {IOS Press},
file_attached = {false},
profile_id = {1a39a66b-a9cd-3b44-8498-85d3ad34e830},
group_id = {424eb3e1-9b50-35ed-91d3-65c78c3164d7},
last_modified = {2014-12-11T05:27:35.000Z},
read = {false},
starred = {false},
authored = {false},
confirmed = {true},
hidden = {false},
citation_key = {Endrjukaite2014a},
abstract = {We design and implement a music-tune analysis system to realize automatic emotion identification and prediction based on acoustic signal data. To compute physical elements of music pieces we define three significant tunes parameters. These are: repeated parts or repetitions inside a tune, thumbnail of a music piece, and homogeneity pattern of a tune. They are significant, because they are related to how people perceive music pieces. By means of these three parameters we can express the essential features of emotional-aspects of each piece. Our system consists of music-tune features database and computational mechanism for comparison between different tunes. Based on Hevner’s emotions adjectives groups we created a new way of emotion presentation on emotion’s plane with two axes: activity and happiness. That makes it possible to determine perceived emotions of listening to a tune and calculate adjacent emotions on a plane. Finally, we performed a set of experiments on western classical and popular music pieces, which presented that our proposed approach reached 72% precision ratio and shows a positive trend of system’s efficiency when database size is increasing.},
bibtype = {inBook},
author = {Endrjukaite, Tatiana and Yasushi Kiyoki, undefined},
book = {Information Modelling and Knowledge Bases XXVI}
}
Downloads: 0
{"_id":"XwWkS6fDSCeWqEeHf","bibbaseid":"endrjukaite-yasushikiyoki-emotionidentificationsystemformusicaltunesbasedoncharacteristicsofacousticsignaldata-2014","downloads":0,"creationDate":"2014-12-15T00:59:26.575Z","title":"Emotion Identification System for Musical Tunes based on Characteristics of Acoustic Signal Data","author_short":["Endrjukaite, T.","Yasushi Kiyoki"],"year":2014,"bibtype":"inBook","biburl":null,"bibdata":{"id":"9b9eaca9-ae81-342e-a52d-68f0dad20e3b","title":"Emotion Identification System for Musical Tunes based on Characteristics of Acoustic Signal Data","type":"inBook","year":"2014","keywords":"emotions,music,repetitions,tune’s internal homogeneity,tune’s thumbnail","created":"2014-12-10T23:37:56.000Z","pages":"88 - 107","volume":"272","websites":"http://www.iospress.nl/book/information-modelling-and-knowledge-bases-xxvi/","publisher":"IOS Press","file_attached":false,"profile_id":"1a39a66b-a9cd-3b44-8498-85d3ad34e830","group_id":"424eb3e1-9b50-35ed-91d3-65c78c3164d7","last_modified":"2014-12-11T05:27:35.000Z","read":false,"starred":false,"authored":false,"confirmed":"true","hidden":false,"citation_key":"Endrjukaite2014a","abstract":"We design and implement a music-tune analysis system to realize automatic emotion identification and prediction based on acoustic signal data. To compute physical elements of music pieces we define three significant tunes parameters. These are: repeated parts or repetitions inside a tune, thumbnail of a music piece, and homogeneity pattern of a tune. They are significant, because they are related to how people perceive music pieces. By means of these three parameters we can express the essential features of emotional-aspects of each piece. Our system consists of music-tune features database and computational mechanism for comparison between different tunes. Based on Hevner’s emotions adjectives groups we created a new way of emotion presentation on emotion’s plane with two axes: activity and happiness. That makes it possible to determine perceived emotions of listening to a tune and calculate adjacent emotions on a plane. Finally, we performed a set of experiments on western classical and popular music pieces, which presented that our proposed approach reached 72% precision ratio and shows a positive trend of system’s efficiency when database size is increasing.","bibtype":"inBook","author":"Endrjukaite, Tatiana and Yasushi Kiyoki, undefined","book":"Information Modelling and Knowledge Bases XXVI","bibtex":"@inBook{\n id = {9b9eaca9-ae81-342e-a52d-68f0dad20e3b},\n title = {Emotion Identification System for Musical Tunes based on Characteristics of Acoustic Signal Data},\n type = {inBook},\n year = {2014},\n keywords = {emotions,music,repetitions,tune’s internal homogeneity,tune’s thumbnail},\n created = {2014-12-10T23:37:56.000Z},\n pages = {88 - 107},\n volume = {272},\n websites = {http://www.iospress.nl/book/information-modelling-and-knowledge-bases-xxvi/},\n publisher = {IOS Press},\n file_attached = {false},\n profile_id = {1a39a66b-a9cd-3b44-8498-85d3ad34e830},\n group_id = {424eb3e1-9b50-35ed-91d3-65c78c3164d7},\n last_modified = {2014-12-11T05:27:35.000Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {Endrjukaite2014a},\n abstract = {We design and implement a music-tune analysis system to realize automatic emotion identification and prediction based on acoustic signal data. To compute physical elements of music pieces we define three significant tunes parameters. These are: repeated parts or repetitions inside a tune, thumbnail of a music piece, and homogeneity pattern of a tune. They are significant, because they are related to how people perceive music pieces. By means of these three parameters we can express the essential features of emotional-aspects of each piece. Our system consists of music-tune features database and computational mechanism for comparison between different tunes. Based on Hevner’s emotions adjectives groups we created a new way of emotion presentation on emotion’s plane with two axes: activity and happiness. That makes it possible to determine perceived emotions of listening to a tune and calculate adjacent emotions on a plane. Finally, we performed a set of experiments on western classical and popular music pieces, which presented that our proposed approach reached 72% precision ratio and shows a positive trend of system’s efficiency when database size is increasing.},\n bibtype = {inBook},\n author = {Endrjukaite, Tatiana and Yasushi Kiyoki, undefined},\n book = {Information Modelling and Knowledge Bases XXVI}\n}","author_short":["Endrjukaite, T.","Yasushi Kiyoki"],"bibbaseid":"endrjukaite-yasushikiyoki-emotionidentificationsystemformusicaltunesbasedoncharacteristicsofacousticsignaldata-2014","role":"author","urls":{},"keyword":["emotions","music","repetitions","tune’s internal homogeneity","tune’s thumbnail"],"downloads":0},"search_terms":["emotion","identification","system","musical","tunes","based","characteristics","acoustic","signal","data","endrjukaite","yasushi kiyoki"],"keywords":["emotions","music","repetitions","tune’s internal homogeneity","tune’s thumbnail"],"authorIDs":[]}