Modelling long- and short-term structure in symbolic music with attention and recurrence. de Berardinis, J., Barrett, S., Cangelosi, A., & Coutinho, E. In CSMC + MuMe 2020: 2020 Joint Conference on AI Music Creativity, 2020. Paper Website abstract bibtex 2 downloads The automatic composition of music with long-term structure is a central problem in music generation. Neural network-based models have been shown to perform relatively well in melody generation, but generating music with long-term structure is still a major challenge. This paper introduces a new approach for music modelling that combines recent advancements of transformer models with recurrent networks-the long-short term universal transformer (LSTUT), and compare its ability to predict music against current state-of-the-art music models. Our experiments are designed to push the boundaries of music models on considerably long music sequences-a crucial requirement for learning long-term structure effectively. Results show that the LSTUT outper-forms all the other models and can potentially learn features related to music structure at different time scales. Overall, we show the importance of integrating both recurrence and attention in the architecture of music models, and their potential use in future automatic composition systems.
@inproceedings{
title = {Modelling long- and short-term structure in symbolic music with attention and recurrence},
type = {inproceedings},
year = {2020},
websites = {https://boblsturm.github.io/aimusic2020/},
city = {Stockholm, Sweden},
id = {076d9860-a16d-36b1-907e-620448436fd3},
created = {2024-08-09T12:19:59.324Z},
file_attached = {true},
profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},
group_id = {da2a8249-fdf4-3036-ba56-7358198a1600},
last_modified = {2024-08-09T12:20:20.786Z},
read = {false},
starred = {false},
authored = {false},
confirmed = {true},
hidden = {false},
citation_key = {DeBerardinis2020},
private_publication = {false},
abstract = {The automatic composition of music with long-term structure is a central problem in music generation. Neural network-based models have been shown to perform relatively well in melody generation, but generating music with long-term structure is still a major challenge. This paper introduces a new approach for music modelling that combines recent advancements of transformer models with recurrent networks-the long-short term universal transformer (LSTUT), and compare its ability to predict music against current state-of-the-art music models. Our experiments are designed to push the boundaries of music models on considerably long music sequences-a crucial requirement for learning long-term structure effectively. Results show that the LSTUT outper-forms all the other models and can potentially learn features related to music structure at different time scales. Overall, we show the importance of integrating both recurrence and attention in the architecture of music models, and their potential use in future automatic composition systems.},
bibtype = {inproceedings},
author = {de Berardinis, J. and Barrett, S. and Cangelosi, A. and Coutinho, E.},
booktitle = {CSMC + MuMe 2020: 2020 Joint Conference on AI Music Creativity}
}
Downloads: 2
{"_id":"tEeb9EkbJAx3RRymo","bibbaseid":"deberardinis-barrett-cangelosi-coutinho-modellinglongandshorttermstructureinsymbolicmusicwithattentionandrecurrence-2020","author_short":["de Berardinis, J.","Barrett, S.","Cangelosi, A.","Coutinho, E."],"bibdata":{"title":"Modelling long- and short-term structure in symbolic music with attention and recurrence","type":"inproceedings","year":"2020","websites":"https://boblsturm.github.io/aimusic2020/","city":"Stockholm, Sweden","id":"076d9860-a16d-36b1-907e-620448436fd3","created":"2024-08-09T12:19:59.324Z","file_attached":"true","profile_id":"ffa9027c-806a-3827-93a1-02c42eb146a1","group_id":"da2a8249-fdf4-3036-ba56-7358198a1600","last_modified":"2024-08-09T12:20:20.786Z","read":false,"starred":false,"authored":false,"confirmed":"true","hidden":false,"citation_key":"DeBerardinis2020","private_publication":false,"abstract":"The automatic composition of music with long-term structure is a central problem in music generation. Neural network-based models have been shown to perform relatively well in melody generation, but generating music with long-term structure is still a major challenge. This paper introduces a new approach for music modelling that combines recent advancements of transformer models with recurrent networks-the long-short term universal transformer (LSTUT), and compare its ability to predict music against current state-of-the-art music models. Our experiments are designed to push the boundaries of music models on considerably long music sequences-a crucial requirement for learning long-term structure effectively. Results show that the LSTUT outper-forms all the other models and can potentially learn features related to music structure at different time scales. Overall, we show the importance of integrating both recurrence and attention in the architecture of music models, and their potential use in future automatic composition systems.","bibtype":"inproceedings","author":"de Berardinis, J. and Barrett, S. and Cangelosi, A. and Coutinho, E.","booktitle":"CSMC + MuMe 2020: 2020 Joint Conference on AI Music Creativity","bibtex":"@inproceedings{\n title = {Modelling long- and short-term structure in symbolic music with attention and recurrence},\n type = {inproceedings},\n year = {2020},\n websites = {https://boblsturm.github.io/aimusic2020/},\n city = {Stockholm, Sweden},\n id = {076d9860-a16d-36b1-907e-620448436fd3},\n created = {2024-08-09T12:19:59.324Z},\n file_attached = {true},\n profile_id = {ffa9027c-806a-3827-93a1-02c42eb146a1},\n group_id = {da2a8249-fdf4-3036-ba56-7358198a1600},\n last_modified = {2024-08-09T12:20:20.786Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {DeBerardinis2020},\n private_publication = {false},\n abstract = {The automatic composition of music with long-term structure is a central problem in music generation. Neural network-based models have been shown to perform relatively well in melody generation, but generating music with long-term structure is still a major challenge. This paper introduces a new approach for music modelling that combines recent advancements of transformer models with recurrent networks-the long-short term universal transformer (LSTUT), and compare its ability to predict music against current state-of-the-art music models. Our experiments are designed to push the boundaries of music models on considerably long music sequences-a crucial requirement for learning long-term structure effectively. Results show that the LSTUT outper-forms all the other models and can potentially learn features related to music structure at different time scales. Overall, we show the importance of integrating both recurrence and attention in the architecture of music models, and their potential use in future automatic composition systems.},\n bibtype = {inproceedings},\n author = {de Berardinis, J. and Barrett, S. and Cangelosi, A. and Coutinho, E.},\n booktitle = {CSMC + MuMe 2020: 2020 Joint Conference on AI Music Creativity}\n}","author_short":["de Berardinis, J.","Barrett, S.","Cangelosi, A.","Coutinho, E."],"urls":{"Paper":"https://bibbase.org/service/mendeley/ffa9027c-806a-3827-93a1-02c42eb146a1/file/2571b158-0789-18bf-0a99-495f265db8d7/2020___de_Berardinis_et_al___Modelling_long__and_short_term_structure_in_symbolic_music_with_attention_and_recurrence.pdf.pdf","Website":"https://boblsturm.github.io/aimusic2020/"},"biburl":"https://bibbase.org/service/mendeley/ffa9027c-806a-3827-93a1-02c42eb146a1","bibbaseid":"deberardinis-barrett-cangelosi-coutinho-modellinglongandshorttermstructureinsymbolicmusicwithattentionandrecurrence-2020","role":"author","metadata":{"authorlinks":{}},"downloads":2},"bibtype":"inproceedings","biburl":"https://bibbase.org/service/mendeley/ffa9027c-806a-3827-93a1-02c42eb146a1","dataSources":["YqW8pMoihb7JazZcx","ya2CyA73rpZseyrZ8","2252seNhipfTmjEBQ"],"keywords":[],"search_terms":["modelling","long","short","term","structure","symbolic","music","attention","recurrence","de berardinis","barrett","cangelosi","coutinho"],"title":"Modelling long- and short-term structure in symbolic music with attention and recurrence","year":2020,"downloads":2}