var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Fzotero-mypublications%2Ffsimonetta&jsonp=1&group0=type&sort=-year&theme=bullets&authorFirst=1&fullnames=0&owner=simonetta&commas=true&noTitleLinks=true&noIndex=true&folding=1&showSearch=true&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Fzotero-mypublications%2Ffsimonetta&jsonp=1&group0=type&sort=-year&theme=bullets&authorFirst=1&fullnames=0&owner=simonetta&commas=true&noTitleLinks=true&noIndex=true&folding=1&showSearch=true\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Fzotero-mypublications%2Ffsimonetta&jsonp=1&group0=type&sort=-year&theme=bullets&authorFirst=1&fullnames=0&owner=simonetta&commas=true&noTitleLinks=true&noIndex=true&folding=1&showSearch=true\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n Conference\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n Zimei, F., & Simonetta, F.\n\n\n \n \n \n Presentazione del Progetto presso la Biblioteca Angelica – Codex 4D.\n \n \n\n\n \n\n\n\n November 2023.\n \n\n\n\n
\n\n\n\n \n \n \"PresentazionePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{zimei_presentazione_2023,\n\taddress = {Biblioteca Angelica, Rome},\n\ttype = {Conference},\n\ttitle = {Presentazione del {Progetto} presso la {Biblioteca} {Angelica} – {Codex} {4D}},\n\tcopyright = {Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC-BY-NC-SA)},\n\turl = {https://codex4d.it/presentazione-del-progetto-presso-la-biblioteca-angelica/},\n\tlanguage = {it-IT},\n\turldate = {2023-11-24},\n\tauthor = {Zimei, Francesco and Simonetta, Federico},\n\tmonth = nov,\n\tyear = {2023},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Simonetta, F.\n\n\n \n \n \n Perspectives on the integration of machine learning techniques into musicological research.\n \n \n\n\n \n\n\n\n October 2023.\n \n\n\n\n
\n\n\n\n \n \n \"PerspectivesPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{simonetta_perspectives_2023,\n\taddress = {Online},\n\ttype = {Conference},\n\ttitle = {Perspectives on the integration of machine learning techniques into musicological research},\n\tcopyright = {Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC-BY-NC-SA)},\n\turl = {https://medievalcontrafacta.wordpress.com/contrafact-in-the-middle-ages-conference/},\n\tlanguage = {en-GB},\n\turldate = {2023-11-24},\n\tauthor = {Simonetta, Federico},\n\tmonth = oct,\n\tyear = {2023},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n article\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n Simonetta, F., Avanzini, F., & Ntalampiras, S.\n\n\n \n \n \n A Perceptual Measure for Evaluating the Resynthesis of Automatic Music Transcriptions.\n \n \n\n\n \n\n\n\n Multimedia Tools and Applications. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{simonetta_perceptual_2022,\n\ttitle = {A {Perceptual} {Measure} for {Evaluating} the {Resynthesis} of {Automatic} {Music} {Transcriptions}},\n\tcopyright = {All rights reserved},\n\turl = {https://hal.archives-ouvertes.fr/hal-03208235/},\n\tdoi = {10.1007/s11042-022-12476-0},\n\tabstract = {This study focuses on the perception of music performances when contextual factors, such as room acoustics and instrument, change. We propose to distinguish the concept of \\&quot;performance\\&quot; from the one of \\&quot;interpretation\\&quot;, which expresses the \\&quot;artistic intention\\&quot;. Towards assessing this distinction, we carried out an experimental evaluation where subjects were invited to listen to various audio recordings created by resynthesizing MIDI data obtained through Automatic Music Transcription (AMT) systems and a sensorized acoustic piano. During the resynthesis, we simulated different contexts and asked listeners to evaluate how much the interpretation changes when the context changes. Results show that: (1) MIDI format alone is not able to completely grasp the artistic intention of a music performance; (2) usual objective evaluation measures based on MIDI data present low correlations with the average subjective evaluation. To bridge this gap, we propose a novel measure which is meaningfully correlated with the outcome of the tests. In addition, we investigate multimodal machine learning by providing a new score-informed AMT method and propose an approximation algorithm for the p-dispersion problem.},\n\tjournal = {Multimedia Tools and Applications},\n\tauthor = {Simonetta, Federico and Avanzini, Federico and Ntalampiras, Stavros},\n\tyear = {2022},\n}\n\n
\n
\n\n\n
\n This study focuses on the perception of music performances when contextual factors, such as room acoustics and instrument, change. We propose to distinguish the concept of "performance" from the one of "interpretation", which expresses the "artistic intention". Towards assessing this distinction, we carried out an experimental evaluation where subjects were invited to listen to various audio recordings created by resynthesizing MIDI data obtained through Automatic Music Transcription (AMT) systems and a sensorized acoustic piano. During the resynthesis, we simulated different contexts and asked listeners to evaluate how much the interpretation changes when the context changes. Results show that: (1) MIDI format alone is not able to completely grasp the artistic intention of a music performance; (2) usual objective evaluation measures based on MIDI data present low correlations with the average subjective evaluation. To bridge this gap, we propose a novel measure which is meaningfully correlated with the outcome of the tests. In addition, we investigate multimodal machine learning by providing a new score-informed AMT method and propose an approximation algorithm for the p-dispersion problem.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n inproceedings\n \n \n (12)\n \n \n
\n
\n \n \n
\n \n\n \n \n Llorens, A., Simonetta, F., Serrano, M., & Torrente, Á.\n\n\n \n \n \n musif: a Python package for symbolic music feature extraction.\n \n \n\n\n \n\n\n\n In Proceedings of the Sound and Music Computing Conference, Stockholm, Sweden, 2023. \n \n\n\n\n
\n\n\n\n \n \n \"musif:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{llorens_musif:_2023,\n\taddress = {Stockholm, Sweden},\n\ttitle = {musif: a {Python} package for symbolic music feature extraction},\n\tcopyright = {All rights reserved},\n\turl = {https://arxiv.org/abs/2307.01120},\n\tabstract = {In this work, we introduce musif, a Python package that facilitates the automatic extraction of features from symbolic music scores. The package includes the implementation of a large number of features, which have been developed by a team of experts in musicology, music theory, statistics, and computer science. Additionally, the package allows for the easy creation of custom features using commonly available Python libraries. musif is primarily geared towards processing high-quality musicological data encoded in MusicXML format, but also supports other formats commonly used in music information retrieval tasks, including MIDI, MEI, Kern, and others. We provide comprehensive documentation and tutorials to aid in the extension of the framework and to facilitate the introduction of new and inexperienced users to its usage.},\n\tbooktitle = {Proceedings of the {Sound} and {Music} {Computing} {Conference}},\n\tauthor = {Llorens, Ana and Simonetta, Federico and Serrano, Martín and Torrente, Álvaro},\n\tyear = {2023},\n}\n\n
\n
\n\n\n
\n In this work, we introduce musif, a Python package that facilitates the automatic extraction of features from symbolic music scores. The package includes the implementation of a large number of features, which have been developed by a team of experts in musicology, music theory, statistics, and computer science. Additionally, the package allows for the easy creation of custom features using commonly available Python libraries. musif is primarily geared towards processing high-quality musicological data encoded in MusicXML format, but also supports other formats commonly used in music information retrieval tasks, including MIDI, MEI, Kern, and others. We provide comprehensive documentation and tutorials to aid in the extension of the framework and to facilitate the introduction of new and inexperienced users to its usage.\n
\n\n\n
\n\n\n
\n \n\n \n \n Simonetta, F., Llorens, A., Serrano, M., García-Portugués, E., & Torrente, Á.\n\n\n \n \n \n Optimizing Feature Extraction for Symbolic Music.\n \n \n\n\n \n\n\n\n In Proceedings of the 24th International Society for Music Information Retrieval Conference, Milan, November 2023. \n \n\n\n\n
\n\n\n\n \n \n \"OptimizingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{simonetta_optimizing_2023,\n\taddress = {Milan},\n\ttitle = {Optimizing {Feature} {Extraction} for {Symbolic} {Music}},\n\tcopyright = {All rights reserved},\n\turl = {https://arxiv.org/abs/2307.05107},\n\tabstract = {This paper presents a comprehensive investigation of existing feature extraction tools for symbolic music and contrasts their performance to determine the feature set that best characterizes the musical style of a given music score. In this regard, we propose a novel feature extraction tool, named musif, and evaluate its efficacy on various repertoires and file formats, including MIDI, MusicXML, and **kern. Musif approximates existing tools such as jSymbolic and music21 in terms of computational efficiency while attempting to enhance the usability for custom feature development. The proposed tool also enhances classification accuracy when combined with other feature sets. We demonstrate the contribution of each feature set and the computational resources they require. Our findings indicate that the optimal tool for feature extraction is a combination of the best features from each tool rather than a single one. To facilitate future research in music information retrieval, we release the source code of the tool and benchmarks.},\n\tbooktitle = {Proceedings of the 24th {International} {Society} for {Music} {Information} {Retrieval} {Conference}},\n\tauthor = {Simonetta, Federico and Llorens, Ana and Serrano, Martín and García-Portugués, Eduardo and Torrente, Álvaro},\n\tmonth = nov,\n\tyear = {2023},\n}\n\n
\n
\n\n\n
\n This paper presents a comprehensive investigation of existing feature extraction tools for symbolic music and contrasts their performance to determine the feature set that best characterizes the musical style of a given music score. In this regard, we propose a novel feature extraction tool, named musif, and evaluate its efficacy on various repertoires and file formats, including MIDI, MusicXML, and **kern. Musif approximates existing tools such as jSymbolic and music21 in terms of computational efficiency while attempting to enhance the usability for custom feature development. The proposed tool also enhances classification accuracy when combined with other feature sets. We demonstrate the contribution of each feature set and the computational resources they require. Our findings indicate that the optimal tool for feature extraction is a combination of the best features from each tool rather than a single one. To facilitate future research in music information retrieval, we release the source code of the tool and benchmarks.\n
\n\n\n
\n\n\n
\n \n\n \n \n Nicolini, M., Simonetta, F., & Ntalampiras, S.\n\n\n \n \n \n Lightweight Audio-Based Human Activity Classification Using Transfer Learning.\n \n \n\n\n \n\n\n\n In pages 783–789, March 2023. \n \n\n\n\n
\n\n\n\n \n \n \"LightweightPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{nicolini_lightweight_2023,\n\ttitle = {Lightweight {Audio}-{Based} {Human} {Activity} {Classification} {Using} {Transfer} {Learning}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-989-758-626-2},\n\turl = {https://www.scitepress.org/Papers/2023/116479/116479.pdf},\n\tdoi = {10.5220/0011647900003411},\n\tabstract = {This paper employs the acoustic modality to address the human activity recognition (HAR) problem. The cornerstone of the proposed solution is the YAMNet deep neural network, the embeddings of which comprise the input to a fully-connected linear layer trained for HAR. Importantly, the dataset is publicly available and includes the following human activities: preparing coffee, frying egg, no activity, showering, using microwave, washing dishes, washing hands, and washing teeth. The specific set of activities is representative of a standard home environment facilitating a wide range of applications. The performance offered by the proposed transfer learning-based framework surpasses the state of the art, while being able to be executed on mobile devices, such as smartphones, tablets, etc. In fact, the obtained model has been exported and thoroughly tested for real-time HAR on a smartphone device with the input being the audio captured from its microphone.},\n\turldate = {2023-03-06},\n\tauthor = {Nicolini, Marco and Simonetta, Federico and Ntalampiras, Stavros},\n\tmonth = mar,\n\tyear = {2023},\n\tpages = {783--789},\n}\n\n
\n
\n\n\n
\n This paper employs the acoustic modality to address the human activity recognition (HAR) problem. The cornerstone of the proposed solution is the YAMNet deep neural network, the embeddings of which comprise the input to a fully-connected linear layer trained for HAR. Importantly, the dataset is publicly available and includes the following human activities: preparing coffee, frying egg, no activity, showering, using microwave, washing dishes, washing hands, and washing teeth. The specific set of activities is representative of a standard home environment facilitating a wide range of applications. The performance offered by the proposed transfer learning-based framework surpasses the state of the art, while being able to be executed on mobile devices, such as smartphones, tablets, etc. In fact, the obtained model has been exported and thoroughly tested for real-time HAR on a smartphone device with the input being the audio captured from its microphone.\n
\n\n\n
\n\n\n
\n \n\n \n \n Cozzatti, M., Simonetta, F., & Ntalampiras, S.\n\n\n \n \n \n Variational Autoencoders for Anomaly Detection in Respiratory Sounds.\n \n \n\n\n \n\n\n\n In Artificial Neural Networks and Machine Learning – ICANN 2022, Cham, 2022. Springer Nature Switzerland\n \n\n\n\n
\n\n\n\n \n \n \"VariationalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{cozzatti_variational_2022,\n\taddress = {Cham},\n\ttitle = {Variational {Autoencoders} for {Anomaly} {Detection} in {Respiratory} {Sounds}},\n\tcopyright = {All rights reserved},\n\turl = {https://arxiv.org/abs/2208.03326},\n\tdoi = {10.1007/978-3-031-15937-4_28},\n\tlanguage = {en},\n\turldate = {2022-11-05},\n\tbooktitle = {Artificial {Neural} {Networks} and {Machine} {Learning} – {ICANN} 2022},\n\tpublisher = {Springer Nature Switzerland},\n\tauthor = {Cozzatti, Michele and Simonetta, Federico and Ntalampiras, Stavros},\n\tyear = {2022},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Poirè, A. M., Simonetta, F., & Ntalampiras, S.\n\n\n \n \n \n Deep Feature Learning for Medical Acoustics.\n \n \n\n\n \n\n\n\n In Artificial Neural Networks and Machine Learning – ICANN 2022, Cham, 2022. Springer Nature Switzerland\n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{poire_deep_2022,\n\taddress = {Cham},\n\ttitle = {Deep {Feature} {Learning} for {Medical} {Acoustics}},\n\tcopyright = {All rights reserved},\n\turl = {https://arxiv.org/abs/2208.03084},\n\tdoi = {10.1007/978-3-031-15937-4_4},\n\tlanguage = {en},\n\turldate = {2022-11-05},\n\tbooktitle = {Artificial {Neural} {Networks} and {Machine} {Learning} – {ICANN} 2022},\n\tpublisher = {Springer Nature Switzerland},\n\tauthor = {Poirè, Alessandro Maria and Simonetta, Federico and Ntalampiras, Stavros},\n\tyear = {2022},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Simonetta, F., Ntalampiras, S., & Avanzini, F.\n\n\n \n \n \n Acoustics-specific Piano Velocity Estimation.\n \n \n\n\n \n\n\n\n In Proceedings of the IEEE MMSP 2022, 2022. \n \n\n\n\n
\n\n\n\n \n \n \"Acoustics-specificPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{simonetta_acoustics-specific_2022,\n\ttitle = {Acoustics-specific {Piano} {Velocity} {Estimation}},\n\tcopyright = {All rights reserved},\n\turl = {http://arxiv.org/abs/2203.16294},\n\tdoi = {10.1109/mmsp55362.2022.9948719},\n\tabstract = {Motivated by the state-of-art psychological research, we note that a piano performance transcribed with existing Automatic Music Transcription (AMT) methods cannot be successfully resynthesized without affecting the artistic content of the performance. This is due to 1) the different mappings between MIDI parameters used by different instruments, and 2) the fact that musicians adapt their way of playing to the surrounding acoustic environment. To face this issue, we propose a methodology to build acoustics-specific AMT systems that are able to model the adaptations that musicians apply to convey their interpretation. Specifically, we train models tailored for virtual instruments in a modular architecture that takes as input an audio recording and the relative aligned music score, and outputs the acoustics-specific velocities of each note. We test different model shapes and show that the proposed methodology generally outperforms the usual AMT pipeline which does not consider specificities of the instrument and of the acoustic environment. Interestingly, such a methodology is extensible in a straightforward way since only slight efforts are required to train models for the inference of other piano parameters, such as pedaling.},\n\turldate = {2022-04-06},\n\tbooktitle = {Proceedings of the {IEEE} {MMSP} 2022},\n\tauthor = {Simonetta, Federico and Ntalampiras, Stavros and Avanzini, Federico},\n\tyear = {2022},\n}\n\n
\n
\n\n\n
\n Motivated by the state-of-art psychological research, we note that a piano performance transcribed with existing Automatic Music Transcription (AMT) methods cannot be successfully resynthesized without affecting the artistic content of the performance. This is due to 1) the different mappings between MIDI parameters used by different instruments, and 2) the fact that musicians adapt their way of playing to the surrounding acoustic environment. To face this issue, we propose a methodology to build acoustics-specific AMT systems that are able to model the adaptations that musicians apply to convey their interpretation. Specifically, we train models tailored for virtual instruments in a modular architecture that takes as input an audio recording and the relative aligned music score, and outputs the acoustics-specific velocities of each note. We test different model shapes and show that the proposed methodology generally outperforms the usual AMT pipeline which does not consider specificities of the instrument and of the acoustic environment. Interestingly, such a methodology is extensible in a straightforward way since only slight efforts are required to train models for the inference of other piano parameters, such as pedaling.\n
\n\n\n
\n\n\n
\n \n\n \n \n Simonetta, F., Ntalampiras, S., & Avanzini, F.\n\n\n \n \n \n Audio-to-Score Alignment Using Deep Automatic Music Transcription.\n \n \n\n\n \n\n\n\n In Proceeddings of the IEEE MMSP 2021, 2021. \n \n\n\n\n
\n\n\n\n \n \n \"Audio-to-ScorePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{simonetta_audio--score_2021,\n\ttitle = {Audio-to-{Score} {Alignment} {Using} {Deep} {Automatic} {Music} {Transcription}},\n\tcopyright = {Creative Commons Attribution 4.0 International},\n\turl = {https://arxiv.org/abs/2107.12854},\n\tdoi = {10.1109/mmsp53017.2021.9733531},\n\tabstract = {Audio-to-score alignment (A2SA) is a multimodal task consisting in the alignment of audio signals to music scores. Recent literature confirms the benefits of Automatic Music Transcription (AMT) for A2SA at the frame-level. In this work, we aim to elaborate on the exploitation of AMT Deep Learning (DL) models for achieving alignment at the note-level. We propose a method which benefits from HMM-based score-to-score alignment and AMT, showing a remarkable advancement beyond the state-of-the-art. We design a systematic procedure to take advantage of large datasets which do not offer an aligned score. Finally, we perform a thorough comparison and extensive tests on multiple datasets.},\n\tbooktitle = {Proceeddings of the {IEEE} {MMSP} 2021},\n\tauthor = {Simonetta, Federico and Ntalampiras, Stavros and Avanzini, Federico},\n\tyear = {2021},\n}\n\n
\n
\n\n\n
\n Audio-to-score alignment (A2SA) is a multimodal task consisting in the alignment of audio signals to music scores. Recent literature confirms the benefits of Automatic Music Transcription (AMT) for A2SA at the frame-level. In this work, we aim to elaborate on the exploitation of AMT Deep Learning (DL) models for achieving alignment at the note-level. We propose a method which benefits from HMM-based score-to-score alignment and AMT, showing a remarkable advancement beyond the state-of-the-art. We design a systematic procedure to take advantage of large datasets which do not offer an aligned score. Finally, we perform a thorough comparison and extensive tests on multiple datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n Simonetta, F., Ntalampiras, S., & Avanzini, F.\n\n\n \n \n \n ASMD: an automatic framework for compiling multimodal datasets with audio and scores.\n \n \n\n\n \n\n\n\n In Proceedings of the 17th Sound and Music Computing Conference, Torino, 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ASMD:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{simonetta_asmd:_2020,\n\taddress = {Torino},\n\ttitle = {{ASMD}: an automatic framework for compiling multimodal datasets with audio and scores},\n\tcopyright = {All rights reserved},\n\turl = {https://air.unimi.it/handle/2434/748917},\n\tdoi = {10.5281/zenodo.3898666},\n\tabstract = {This paper describes an open-source Python framework for handling datasets for music processing tasks, built with the aim of improving the reproducibility of research projects in music computing and assessing the generalization abilities of machine learning models. The framework enables the automatic download and installation of several commonly used datasets for multimodal music processing. Specifically, we provide a Python API to access the datasets through Boolean set operations based on particular attributes, such as intersections and unions of composers, instruments, and so on. The framework is designed to ease the inclusion of new datasets and the respective ground-truth annotations so that one can build, convert, and extend one's own collection as well as distribute it by means of a compliant format to take advantage of the API. All code and ground-truth are released under suitable open licenses.},\n\tbooktitle = {Proceedings of the 17th {Sound} and {Music} {Computing} {Conference}},\n\tauthor = {Simonetta, Federico and Ntalampiras, Stavros and Avanzini, Federico},\n\tyear = {2020},\n}\n\n
\n
\n\n\n
\n This paper describes an open-source Python framework for handling datasets for music processing tasks, built with the aim of improving the reproducibility of research projects in music computing and assessing the generalization abilities of machine learning models. The framework enables the automatic download and installation of several commonly used datasets for multimodal music processing. Specifically, we provide a Python API to access the datasets through Boolean set operations based on particular attributes, such as intersections and unions of composers, instruments, and so on. The framework is designed to ease the inclusion of new datasets and the respective ground-truth annotations so that one can build, convert, and extend one's own collection as well as distribute it by means of a compliant format to take advantage of the API. All code and ground-truth are released under suitable open licenses.\n
\n\n\n
\n\n\n
\n \n\n \n \n Ludovico, L. A., Baratè, A., Simonetta, F., & Mauro, D. A.\n\n\n \n \n \n On the Adoption of Standard Encoding Formats to Ensure Interoperability of Music Digital Archives: The IEEE 1599 Format.\n \n \n\n\n \n\n\n\n In 6th International Conference on Digital Libraries for Musicology, pages 20–24, November 2019. ACM\n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ludovico_adoption_2019,\n\ttitle = {On the {Adoption} of {Standard} {Encoding} {Formats} to {Ensure} {Interoperability} of {Music} {Digital} {Archives}: {The} {IEEE} 1599 {Format}.},\n\tcopyright = {All rights reserved},\n\turl = {https://air.unimi.it/handle/2434/687286},\n\tdoi = {10.1145/3358664.3358665},\n\tabstract = {With this paper, we want to stimulate the discussion about technologies for inter-operation between various music datasets and collections. Among the many standards for music representation, IEEE 1599 is the only one which was born with the exact purpose of representing the heterogeneous structures of music documents, granting full synchronization of all the different aspects of music (audio recordings, sheet music images, symbolic representations, musicological analysis, etc). We propose the adoption of IEEE 1599 as an interoperability framework between different collections for advanced music experience, musicological applications, and Music Information Retrieval (MIR). In the years to come, the format will undergo a review process aimed at providing an updated/improved version. It is now the perfect time, for all the stakeholders, to come together and discuss how the format can evolve to better support their requirements, enhancing its descriptive strength and available tools. Moreover, this standard can be profitably applied to any field that requires multi-layer and synchronized descriptions.},\n\tbooktitle = {6th {International} {Conference} on {Digital} {Libraries} for {Musicology}},\n\tpublisher = {ACM},\n\tauthor = {Ludovico, Luca Andrea and Baratè, Adriano and Simonetta, Federico and Mauro, Davide Andrea},\n\tmonth = nov,\n\tyear = {2019},\n\tpages = {20--24},\n}\n\n
\n
\n\n\n
\n With this paper, we want to stimulate the discussion about technologies for inter-operation between various music datasets and collections. Among the many standards for music representation, IEEE 1599 is the only one which was born with the exact purpose of representing the heterogeneous structures of music documents, granting full synchronization of all the different aspects of music (audio recordings, sheet music images, symbolic representations, musicological analysis, etc). We propose the adoption of IEEE 1599 as an interoperability framework between different collections for advanced music experience, musicological applications, and Music Information Retrieval (MIR). In the years to come, the format will undergo a review process aimed at providing an updated/improved version. It is now the perfect time, for all the stakeholders, to come together and discuss how the format can evolve to better support their requirements, enhancing its descriptive strength and available tools. Moreover, this standard can be profitably applied to any field that requires multi-layer and synchronized descriptions.\n
\n\n\n
\n\n\n
\n \n\n \n \n Simonetta, F., Ntalampiras, S., & Avanzini, F.\n\n\n \n \n \n Multimodal Music Information Processing and Retrieval: Survey and Future Challenges.\n \n \n\n\n \n\n\n\n In Proceedings of 2019 International Workshop on Multilayer Music Representation and Processing, pages 10–18, Milan, Italy, 2019. IEEE Conference Publishing Services\n \n\n\n\n
\n\n\n\n \n \n \"MultimodalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{simonetta_multimodal_2019,\n\taddress = {Milan, Italy},\n\ttitle = {Multimodal {Music} {Information} {Processing} and {Retrieval}: {Survey} and {Future} {Challenges}},\n\tcopyright = {All rights reserved},\n\turl = {https://air.unimi.it/handle/2434/633063},\n\tdoi = {10.1109/mmrp.2019.00012},\n\tabstract = {Towards improving the performance in various music information processing tasks, recent studies exploit different modalities able to capture diverse aspects of music. Such modalities include audio recordings, symbolic music scores, mid-level representations, motion and gestural data, video recordings, editorial or cultural tags, lyrics and album cover arts. This paper critically reviews the various approaches adopted in Music Information Processing and Retrieval, and highlights how multimodal algorithms can help Music Computing applications. First, we categorize the related literature based on the application they address. Subsequently, we analyze existing information fusion approaches, and we conclude with the set of challenges that Music Information Retrieval and Sound and Music Computing research communities should focus in the next years.},\n\tbooktitle = {Proceedings of 2019 {International} {Workshop} on {Multilayer} {Music} {Representation} and {Processing}},\n\tpublisher = {IEEE Conference Publishing Services},\n\tauthor = {Simonetta, Federico and Ntalampiras, Stavros and Avanzini, Federico},\n\tyear = {2019},\n\tpages = {10--18},\n}\n\n
\n
\n\n\n
\n Towards improving the performance in various music information processing tasks, recent studies exploit different modalities able to capture diverse aspects of music. Such modalities include audio recordings, symbolic music scores, mid-level representations, motion and gestural data, video recordings, editorial or cultural tags, lyrics and album cover arts. This paper critically reviews the various approaches adopted in Music Information Processing and Retrieval, and highlights how multimodal algorithms can help Music Computing applications. First, we categorize the related literature based on the application they address. Subsequently, we analyze existing information fusion approaches, and we conclude with the set of challenges that Music Information Retrieval and Sound and Music Computing research communities should focus in the next years.\n
\n\n\n
\n\n\n
\n \n\n \n \n Simonetta, F., Cancino-Chacón, C. E., Ntalampiras, S., & Widmer, G.\n\n\n \n \n \n A convolutional approach to melody line identification in symbolic scores.\n \n \n\n\n \n\n\n\n In Proceedings of the 20th international society for music information retrieval conference, pages 924–931, Delft, The Netherlands, November 2019. ISMIR\n tex.venue: Delft, The Netherlands\n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{simonetta_convolutional_2019,\n\taddress = {Delft, The Netherlands},\n\ttitle = {A convolutional approach to melody line identification in symbolic scores},\n\tcopyright = {All rights reserved},\n\turl = {https://doi.org/10.5281/zenodo.3527966},\n\tdoi = {10.5281/zenodo.3527966},\n\tbooktitle = {Proceedings of the 20th international society for music information retrieval conference},\n\tpublisher = {ISMIR},\n\tauthor = {Simonetta, Federico and Cancino-Chacón, Carlos Eduardo and Ntalampiras, Stavros and Widmer, Gerhard},\n\tmonth = nov,\n\tyear = {2019},\n\tnote = {tex.venue: Delft, The Netherlands},\n\tpages = {924--931},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Simonetta, F., Carnovalini, F., Orio, N., & Rodà, A.\n\n\n \n \n \n Symbolic Music Similarity through a Graph-based Representation.\n \n \n\n\n \n\n\n\n In Proceedings of the Audio Mostly 2018 on Sound in Immersion and Emotion - AM'18, 2018. ACM Press\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{simonetta_symbolic_2018,\n\ttitle = {Symbolic {Music} {Similarity} through a {Graph}-based {Representation}},\n\tcopyright = {All rights reserved},\n\tdoi = {10.1145/3243274.3243301},\n\tabstract = {In this work, a novel representation system for symbolic music is described. The proposed representation system is graph-based and could theoretically represent music both from a horizontal ( contrapuntal) and from a vertical (harmonic) point of view, by keeping into account contextual and harmonic information. It could also include relationships between internal variations of motifs and themes. This is achieved by gradually simplifying the melodies and generating layers of reductions that include only the most important notes from a structural and harmonic viewpoint. This representation system has been tested in a music information retrieval task, namely melodic similarity, and compared to another system that performs the same task but does not consider any contextual or harmonic information, showing how the structural information is needed in order to find certain relations between musical pieces. Moreover, a new dataset consisting of more than 5000 leadsheets is presented, with additional meta-musical information taken from different web databases, including author, year of first performance, lyrics, genre and stylistic tags.},\n\tbooktitle = {Proceedings of the {Audio} {Mostly} 2018 on {Sound} in {Immersion} and {Emotion} - {AM}'18},\n\tpublisher = {ACM Press},\n\tauthor = {Simonetta, Federico and Carnovalini, Filippo and Orio, Nicola and Rodà, Antonio},\n\tyear = {2018},\n}\n\n
\n
\n\n\n
\n In this work, a novel representation system for symbolic music is described. The proposed representation system is graph-based and could theoretically represent music both from a horizontal ( contrapuntal) and from a vertical (harmonic) point of view, by keeping into account contextual and harmonic information. It could also include relationships between internal variations of motifs and themes. This is achieved by gradually simplifying the melodies and generating layers of reductions that include only the most important notes from a structural and harmonic viewpoint. This representation system has been tested in a music information retrieval task, namely melodic similarity, and compared to another system that performs the same task but does not consider any contextual or harmonic information, showing how the structural information is needed in order to find certain relations between musical pieces. Moreover, a new dataset consisting of more than 5000 leadsheets is presented, with additional meta-musical information taken from different web databases, including author, year of first performance, lyrics, genre and stylistic tags.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n misc\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n Simonetta, F.\n\n\n \n \n \n Prospettive sull'integrazione di musicologia, filologia e tecnologia.\n \n \n\n\n \n\n\n\n November 2023.\n \n\n\n\n
\n\n\n\n \n \n \"ProspettivePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{simonetta_prospettive_2023,\n\ttitle = {Prospettive sull'integrazione di musicologia, filologia e tecnologia},\n\tcopyright = {All rights reserved},\n\turl = {https://apre.it/wp-content/uploads/2023/11/APREmagazine_N23_rev.pdf},\n\tjournal = {APRE magazine},\n\tauthor = {Simonetta, Federico},\n\tmonth = nov,\n\tyear = {2023},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Simonetta, F.\n\n\n \n \n \n 7 motivi per i quali il live coding crescerà.\n \n \n\n\n \n\n\n\n February 2019.\n English version: https://federicosimonetta.eu.org/post/live_coding/\n\n\n\n
\n\n\n\n \n \n \"7Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{simonetta_7_2019,\n\ttitle = {7 motivi per i quali il live coding crescerà},\n\tcopyright = {All rights reserved},\n\turl = {https://www.techeconomy2030.it/2019/02/18/7-motivi-per-cui-live-coding-crescera/},\n\tabstract = {L'esperienza del live coding per scrivere e far ascoltare musica dal vivo},\n\tlanguage = {it-IT},\n\turldate = {2023-11-24},\n\tjournal = {Tech Economy 2030},\n\tauthor = {Simonetta, Federico},\n\tmonth = feb,\n\tyear = {2019},\n\tnote = {English version: https://federicosimonetta.eu.org/post/live\\_coding/},\n}\n\n
\n
\n\n\n
\n L'esperienza del live coding per scrivere e far ascoltare musica dal vivo\n
\n\n\n
\n\n\n
\n \n\n \n \n Simonetta, F.\n\n\n \n \n \n Quando gli ingegneri combattevano il nazifascismo.\n \n \n\n\n \n\n\n\n April 2019.\n Section: Visions\n\n\n\n
\n\n\n\n \n \n \"QuandoPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{simonetta_quando_2019,\n\ttitle = {Quando gli ingegneri combattevano il nazifascismo},\n\tcopyright = {All rights reserved},\n\turl = {https://www.techeconomy2030.it/2019/04/25/quando-gli-ingegneri-combattevano-contro-il-nazifascismo/},\n\tabstract = {Quale il ruolo degli ingegneri nella lotta per la Liberazione dal nazifascismo in Italia?},\n\tlanguage = {it-IT},\n\turldate = {2023-11-24},\n\tjournal = {Tech Economy 2030},\n\tauthor = {Simonetta, Federico},\n\tmonth = apr,\n\tyear = {2019},\n\tnote = {Section: Visions},\n}\n\n
\n
\n\n\n
\n Quale il ruolo degli ingegneri nella lotta per la Liberazione dal nazifascismo in Italia?\n
\n\n\n
\n\n\n
\n \n\n \n \n Simonetta, F.\n\n\n \n \n \n Le emozioni dei computer: a che punto siamo e dove andiamo?.\n \n \n\n\n \n\n\n\n October 2018.\n Section: Society\n\n\n\n
\n\n\n\n \n \n \"LePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{simonetta_emozioni_2018,\n\ttitle = {Le emozioni dei computer: a che punto siamo e dove andiamo?},\n\tcopyright = {All rights reserved},\n\tshorttitle = {Le emozioni dei computer},\n\turl = {https://www.techeconomy2030.it/2018/10/02/emozioni-computer-a-che-punto-siamo-e-dove-andiamo/},\n\tabstract = {Sono molti gli scrittori, i registi e i “fantasticatori” in genere che hanno immaginato macchine capaci di provare emozioni, eppure, nessuno è mai riuscito ad oggi a crearne uno. Ci riusciremo? Io credo di sì, e nei prossimi paragrafi proverò a dare un’idea di quanto bene possano simulare le emozioni i computer di oggi. Ogni …},\n\tlanguage = {it-IT},\n\turldate = {2023-11-24},\n\tjournal = {Tech Economy 2030},\n\tauthor = {Simonetta, Federico},\n\tmonth = oct,\n\tyear = {2018},\n\tnote = {Section: Society},\n}\n\n
\n
\n\n\n
\n Sono molti gli scrittori, i registi e i “fantasticatori” in genere che hanno immaginato macchine capaci di provare emozioni, eppure, nessuno è mai riuscito ad oggi a crearne uno. Ci riusciremo? Io credo di sì, e nei prossimi paragrafi proverò a dare un’idea di quanto bene possano simulare le emozioni i computer di oggi. Ogni …\n
\n\n\n
\n\n\n
\n \n\n \n \n Simonetta, F.\n\n\n \n \n \n Enhanced Wikifonia Leadsheet Dataset.\n \n \n\n\n \n\n\n\n November 2018.\n Dataset\n\n\n\n
\n\n\n\n \n \n \"EnhancedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{simonetta_enhanced_2018,\n\ttitle = {Enhanced {Wikifonia} {Leadsheet} {Dataset}},\n\tcopyright = {Restricted Access},\n\turl = {doi.org/10.5281/zenodo.1476555},\n\tdoi = {10.5281/zenodo.1476555},\n\tabstract = {EWLD (Enhanced Wikifonia Leadsheet Dataset) is a music leadsheet dataset with more than 5.000 scores that comes with a lot of metadata about composers, works, lyrics and features. It is designed for musicological and research purposes. A Public Domain version, named OpenEWLD, is available at https://framagit.org/sapo/OpenEWLD. You can find an in-deep discussion in my Master Thesis. Request the access through Zenodo (click on "request access" button below). --- Please, use the following paper as reference: Simonetta, Federico, Carnovalini, Filippo, Orio, Nicola, \\&amp; Rodà, Antonio. (2018). Symbolic Music Similarity Through a Graph-Based Representation. In Proceedings of the Audio Mostly 2018 on Sound in Immersion and Emotion (pp. 26:1–26:7). New York, NY, USA: ACM. http://doi.org/10.1145/3243274.3243301 Zenodo link: https://zenodo.org/record/2537059 --- My Master Thesis: F. Simonetta, “Graph based representation of the music symbolic level. A music information retrieval application”, Università di Padova, 2018. Zenodo link: https://zenodo.org/record/1476564},\n\turldate = {2022-03-08},\n\tpublisher = {Zenodo},\n\tauthor = {Simonetta, Federico},\n\tmonth = nov,\n\tyear = {2018},\n\tnote = {Dataset},\n}\n\n
\n
\n\n\n
\n EWLD (Enhanced Wikifonia Leadsheet Dataset) is a music leadsheet dataset with more than 5.000 scores that comes with a lot of metadata about composers, works, lyrics and features. It is designed for musicological and research purposes. A Public Domain version, named OpenEWLD, is available at https://framagit.org/sapo/OpenEWLD. You can find an in-deep discussion in my Master Thesis. Request the access through Zenodo (click on \"request access\" button below). — Please, use the following paper as reference: Simonetta, Federico, Carnovalini, Filippo, Orio, Nicola, & Rodà, Antonio. (2018). Symbolic Music Similarity Through a Graph-Based Representation. In Proceedings of the Audio Mostly 2018 on Sound in Immersion and Emotion (pp. 26:1–26:7). New York, NY, USA: ACM. http://doi.org/10.1145/3243274.3243301 Zenodo link: https://zenodo.org/record/2537059 — My Master Thesis: F. Simonetta, “Graph based representation of the music symbolic level. A music information retrieval application”, Università di Padova, 2018. Zenodo link: https://zenodo.org/record/1476564\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n thesis\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n Simonetta, F.\n\n\n \n \n \n Music Interpretation Analysis. A Multimodal Approach to Score-Informed Resynthesis of Piano Recordings.\n \n \n\n\n \n\n\n\n Ph.D. Thesis, Università di Milano, 2022.\n \n\n\n\n
\n\n\n\n \n \n \"MusicPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{federico_simonetta_music_2022,\n\ttype = {thesis},\n\ttitle = {Music {Interpretation} {Analysis}. {A} {Multimodal} {Approach} to {Score}-{Informed} {Resynthesis} of {Piano} {Recordings}},\n\tcopyright = {All rights reserved},\n\turl = {http://hdl.handle.net/2434/918909},\n\tabstract = {This Thesis discusses the development of technologies for the automatic resynthesis of music recordings using digital synthesizers. First, the main issue is identified in the understanding of how Music Information Processing (MIP) methods can take into consideration the influence of the acoustic context on the music performance. For this, a novel conceptual and mathematical framework named “Music Interpretation Analysis” (MIA) is presented. In the proposed framework, a distinction is made between the “performance” – the physical action of playing – and the “interpretation” – the action that the performer wishes to achieve. Second, the Thesis describes further works aiming at the democratization of music production tools via automatic resynthesis: 1) it elaborates software and file formats for musicological archives and multimodal machine-learning datasets; 2) it explores and extends MIP technologies; 3) it presents the mathematical foundations of the MIA framework and shows preliminary evaluations to demonstrate the effectiveness of the proposed approach.},\n\tschool = {Università di Milano},\n\tauthor = {Federico Simonetta},\n\tyear = {2022},\n}\n\n
\n
\n\n\n
\n This Thesis discusses the development of technologies for the automatic resynthesis of music recordings using digital synthesizers. First, the main issue is identified in the understanding of how Music Information Processing (MIP) methods can take into consideration the influence of the acoustic context on the music performance. For this, a novel conceptual and mathematical framework named “Music Interpretation Analysis” (MIA) is presented. In the proposed framework, a distinction is made between the “performance” – the physical action of playing – and the “interpretation” – the action that the performer wishes to achieve. Second, the Thesis describes further works aiming at the democratization of music production tools via automatic resynthesis: 1) it elaborates software and file formats for musicological archives and multimodal machine-learning datasets; 2) it explores and extends MIP technologies; 3) it presents the mathematical foundations of the MIA framework and shows preliminary evaluations to demonstrate the effectiveness of the proposed approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n Simonetta, F.\n\n\n \n \n \n Graph based representation of the music symbolic level. A music information retrieval application.\n \n \n\n\n \n\n\n\n Ph.D. Thesis, Università di Padova, April 2018.\n Master Thesis\n\n\n\n
\n\n\n\n \n \n \"GraphPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{simonetta_graph_2018,\n\ttype = {thesis},\n\ttitle = {Graph based representation of the music symbolic level. {A} music information retrieval application},\n\tcopyright = {All rights reserved},\n\turl = {http://tesi.cab.unipd.it/59666/},\n\tabstract = {In this work, a new music symbolic level representation system is described. It has been tested in two information retrieval tasks concerning similarity between segments of music and genre detection of a given segment. It could include both harmonic and contrapuntal information. Moreover, a new large dataset consisting of more than 5000 leadsheets is presented, with meta information taken from different web databases, including author information, year of first performance, lyrics, genre, etc.},\n\tschool = {Università di Padova},\n\tauthor = {Simonetta, Federico},\n\tmonth = apr,\n\tyear = {2018},\n\tnote = {Master Thesis},\n}\n\n
\n
\n\n\n
\n In this work, a new music symbolic level representation system is described. It has been tested in two information retrieval tasks concerning similarity between segments of music and genre detection of a given segment. It could include both harmonic and contrapuntal information. Moreover, a new large dataset consisting of more than 5000 leadsheets is presented, with meta information taken from different web databases, including author information, year of first performance, lyrics, genre, etc.\n
\n\n\n
\n\n\n
\n \n\n \n \n Simonetta, F.\n\n\n \n \n \n Modellizzazione Musicale Tramite Catene Di Markov.\n \n \n\n\n \n\n\n\n Ph.D. Thesis, Università di Pavia, 2014.\n Bachelor Thesis\n\n\n\n
\n\n\n\n \n \n \"ModellizzazionePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{simonetta_modellizzazione_2014,\n\ttype = {thesis},\n\ttitle = {Modellizzazione {Musicale} {Tramite} {Catene} {Di} {Markov}},\n\tcopyright = {All rights reserved},\n\turl = {https://zenodo.org/record/1476574},\n\tabstract = {Questo lavoro ha avuto come obiettivo principale l'esplorazione del mondo della composizione algoritmica, in particolar modo di quel settore che opera con le catene di Markov, e lo sviluppo di un software che produca musica tramite catene di Markov allenate su brani preesistenti. Il primo capito- lo delinea brevemente le linee generali di una possibile storia del pensiero logico-razionale in musica. Non vuole essere una trattazione esaustiva, ma, riguardando argomenti già ampiamente conosciuti dagli studiosi, si pone come una proposta di percorso tematico. Il secondo capitolo tratta, dopo una breve introduzione teorica, alcuni lavori scientifici cui si è venuti a conoscenza. Si è individuata nella letteratura qui trattata due grandi linee di lavoro: una che riguarda l'analisi musicale assistita dal calcolatore e una che tratta nuovi metodi per le pratiche musicali, siano esse improvvisate o meno. Molti dei lavori citati sono di recente data, a conferma che si tratta di un settore in via di evoluzione. L'ultimo capitolo, infine, tratta del laboratorio annesso alla presente trattazione, durante il quale si è sviluppato un piccolo software in Java capace di elaborare brani tramite catene di Markov; alcuni risultati potrebbero essere utilizzati anche al fine dell'analisi musicale.},\n\tschool = {Università di Pavia},\n\tauthor = {Simonetta, Federico},\n\tyear = {2014},\n\tnote = {Bachelor Thesis},\n}\n
\n
\n\n\n
\n Questo lavoro ha avuto come obiettivo principale l'esplorazione del mondo della composizione algoritmica, in particolar modo di quel settore che opera con le catene di Markov, e lo sviluppo di un software che produca musica tramite catene di Markov allenate su brani preesistenti. Il primo capito- lo delinea brevemente le linee generali di una possibile storia del pensiero logico-razionale in musica. Non vuole essere una trattazione esaustiva, ma, riguardando argomenti già ampiamente conosciuti dagli studiosi, si pone come una proposta di percorso tematico. Il secondo capitolo tratta, dopo una breve introduzione teorica, alcuni lavori scientifici cui si è venuti a conoscenza. Si è individuata nella letteratura qui trattata due grandi linee di lavoro: una che riguarda l'analisi musicale assistita dal calcolatore e una che tratta nuovi metodi per le pratiche musicali, siano esse improvvisate o meno. Molti dei lavori citati sono di recente data, a conferma che si tratta di un settore in via di evoluzione. L'ultimo capitolo, infine, tratta del laboratorio annesso alla presente trattazione, durante il quale si è sviluppato un piccolo software in Java capace di elaborare brani tramite catene di Markov; alcuni risultati potrebbero essere utilizzati anche al fine dell'analisi musicale.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);