Comparing Deep Models and Evaluation Strategies for Multi-Pitch Estimation in Music Recordings. Weiß, C. & Peeters, G. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 30:2814–2827, 2022. Conference Name: IEEE/ACM Transactions on Audio, Speech, and Language Processingdoi abstract bibtex Extracting pitch information from music recordings is a challenging but important problem in music signal processing. Frame-wise transcription or multi-pitch estimation aims for detecting the simultaneous activity of pitches in polyphonic music recordings and has recently seen major improvements thanks to deep-learning techniques, with a variety of proposed model architectures. In this paper, we compare different architectures based on convolutional neural networks, the U-net structure, and self-attention components. We propose several modifications to these architectures including self-attention modules for skip connections, recurrent layers to replace the self-attention, and a multi-task strategy with simultaneous prediction of the degree of polyphony. We compare variants of these architectures in different sizes for multi-pitch estimation, focusing on Western classical music beyond the piano-solo scenario using the MusicNet and Schubert Winterreise datasets. Our experiments indicate that most architectures yield competitive results and that larger model variants seem to be beneficial. However, we find that these results substantially depend on randomization effects and the particular choice of the training–test split, which questions the claim of superiority for particular architectures given only small improvements. We therefore investigate the influence of dataset splits in the presence of several movements of a work cycle (cross-version evaluation) and propose a best-practice evaluation strategy for MusicNet, which weakens the influence of individual test tracks and suppresses overfitting to specific works and recording conditions. A final cross-dataset evaluation suggests that improvements on one specific dataset do not necessarily generalize to other scenarios, thus emphasizing the need for further high-quality multi-pitch datasets in order to reliably measure progress in music transcription tasks.
@article{weis_comparing_2022,
title = {Comparing {Deep} {Models} and {Evaluation} {Strategies} for {Multi}-{Pitch} {Estimation} in {Music} {Recordings}},
volume = {30},
issn = {2329-9304},
doi = {10.1109/TASLP.2022.3200547},
abstract = {Extracting pitch information from music recordings is a challenging but important problem in music signal processing. Frame-wise transcription or multi-pitch estimation aims for detecting the simultaneous activity of pitches in polyphonic music recordings and has recently seen major improvements thanks to deep-learning techniques, with a variety of proposed model architectures. In this paper, we compare different architectures based on convolutional neural networks, the U-net structure, and self-attention components. We propose several modifications to these architectures including self-attention modules for skip connections, recurrent layers to replace the self-attention, and a multi-task strategy with simultaneous prediction of the degree of polyphony. We compare variants of these architectures in different sizes for multi-pitch estimation, focusing on Western classical music beyond the piano-solo scenario using the MusicNet and Schubert Winterreise datasets. Our experiments indicate that most architectures yield competitive results and that larger model variants seem to be beneficial. However, we find that these results substantially depend on randomization effects and the particular choice of the training–test split, which questions the claim of superiority for particular architectures given only small improvements. We therefore investigate the influence of dataset splits in the presence of several movements of a work cycle (cross-version evaluation) and propose a best-practice evaluation strategy for MusicNet, which weakens the influence of individual test tracks and suppresses overfitting to specific works and recording conditions. A final cross-dataset evaluation suggests that improvements on one specific dataset do not necessarily generalize to other scenarios, thus emphasizing the need for further high-quality multi-pitch datasets in order to reliably measure progress in music transcription tasks.},
journal = {IEEE/ACM Transactions on Audio, Speech, and Language Processing},
author = {Weiß, Christof and Peeters, Geoffroy},
year = {2022},
note = {Conference Name: IEEE/ACM Transactions on Audio, Speech, and Language Processing},
keywords = {Annotations, Estimation, Instruments, Multiple signal classification, Music information retrieval, ReadList, Speech processing, Task analysis, Training, U-net, cross-version evaluation, generalization, music transcription},
pages = {2814--2827},
}
Downloads: 0
{"_id":"HQ7Dd2KzDrrFg53fF","bibbaseid":"wei-peeters-comparingdeepmodelsandevaluationstrategiesformultipitchestimationinmusicrecordings-2022","author_short":["Weiß, C.","Peeters, G."],"bibdata":{"bibtype":"article","type":"article","title":"Comparing Deep Models and Evaluation Strategies for Multi-Pitch Estimation in Music Recordings","volume":"30","issn":"2329-9304","doi":"10.1109/TASLP.2022.3200547","abstract":"Extracting pitch information from music recordings is a challenging but important problem in music signal processing. Frame-wise transcription or multi-pitch estimation aims for detecting the simultaneous activity of pitches in polyphonic music recordings and has recently seen major improvements thanks to deep-learning techniques, with a variety of proposed model architectures. In this paper, we compare different architectures based on convolutional neural networks, the U-net structure, and self-attention components. We propose several modifications to these architectures including self-attention modules for skip connections, recurrent layers to replace the self-attention, and a multi-task strategy with simultaneous prediction of the degree of polyphony. We compare variants of these architectures in different sizes for multi-pitch estimation, focusing on Western classical music beyond the piano-solo scenario using the MusicNet and Schubert Winterreise datasets. Our experiments indicate that most architectures yield competitive results and that larger model variants seem to be beneficial. However, we find that these results substantially depend on randomization effects and the particular choice of the training–test split, which questions the claim of superiority for particular architectures given only small improvements. We therefore investigate the influence of dataset splits in the presence of several movements of a work cycle (cross-version evaluation) and propose a best-practice evaluation strategy for MusicNet, which weakens the influence of individual test tracks and suppresses overfitting to specific works and recording conditions. A final cross-dataset evaluation suggests that improvements on one specific dataset do not necessarily generalize to other scenarios, thus emphasizing the need for further high-quality multi-pitch datasets in order to reliably measure progress in music transcription tasks.","journal":"IEEE/ACM Transactions on Audio, Speech, and Language Processing","author":[{"propositions":[],"lastnames":["Weiß"],"firstnames":["Christof"],"suffixes":[]},{"propositions":[],"lastnames":["Peeters"],"firstnames":["Geoffroy"],"suffixes":[]}],"year":"2022","note":"Conference Name: IEEE/ACM Transactions on Audio, Speech, and Language Processing","keywords":"Annotations, Estimation, Instruments, Multiple signal classification, Music information retrieval, ReadList, Speech processing, Task analysis, Training, U-net, cross-version evaluation, generalization, music transcription","pages":"2814–2827","bibtex":"@article{weis_comparing_2022,\n\ttitle = {Comparing {Deep} {Models} and {Evaluation} {Strategies} for {Multi}-{Pitch} {Estimation} in {Music} {Recordings}},\n\tvolume = {30},\n\tissn = {2329-9304},\n\tdoi = {10.1109/TASLP.2022.3200547},\n\tabstract = {Extracting pitch information from music recordings is a challenging but important problem in music signal processing. Frame-wise transcription or multi-pitch estimation aims for detecting the simultaneous activity of pitches in polyphonic music recordings and has recently seen major improvements thanks to deep-learning techniques, with a variety of proposed model architectures. In this paper, we compare different architectures based on convolutional neural networks, the U-net structure, and self-attention components. We propose several modifications to these architectures including self-attention modules for skip connections, recurrent layers to replace the self-attention, and a multi-task strategy with simultaneous prediction of the degree of polyphony. We compare variants of these architectures in different sizes for multi-pitch estimation, focusing on Western classical music beyond the piano-solo scenario using the MusicNet and Schubert Winterreise datasets. Our experiments indicate that most architectures yield competitive results and that larger model variants seem to be beneficial. However, we find that these results substantially depend on randomization effects and the particular choice of the training–test split, which questions the claim of superiority for particular architectures given only small improvements. We therefore investigate the influence of dataset splits in the presence of several movements of a work cycle (cross-version evaluation) and propose a best-practice evaluation strategy for MusicNet, which weakens the influence of individual test tracks and suppresses overfitting to specific works and recording conditions. A final cross-dataset evaluation suggests that improvements on one specific dataset do not necessarily generalize to other scenarios, thus emphasizing the need for further high-quality multi-pitch datasets in order to reliably measure progress in music transcription tasks.},\n\tjournal = {IEEE/ACM Transactions on Audio, Speech, and Language Processing},\n\tauthor = {Weiß, Christof and Peeters, Geoffroy},\n\tyear = {2022},\n\tnote = {Conference Name: IEEE/ACM Transactions on Audio, Speech, and Language Processing},\n\tkeywords = {Annotations, Estimation, Instruments, Multiple signal classification, Music information retrieval, ReadList, Speech processing, Task analysis, Training, U-net, cross-version evaluation, generalization, music transcription},\n\tpages = {2814--2827},\n}\n\n\n\n","author_short":["Weiß, C.","Peeters, G."],"key":"weis_comparing_2022","id":"weis_comparing_2022","bibbaseid":"wei-peeters-comparingdeepmodelsandevaluationstrategiesformultipitchestimationinmusicrecordings-2022","role":"author","urls":{},"keyword":["Annotations","Estimation","Instruments","Multiple signal classification","Music information retrieval","ReadList","Speech processing","Task analysis","Training","U-net","cross-version evaluation","generalization","music transcription"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"article","biburl":"https://bibbase.org/zotero/fsimonetta","dataSources":["pzyFFGWvxG2bs63zP"],"keywords":["annotations","estimation","instruments","multiple signal classification","music information retrieval","readlist","speech processing","task analysis","training","u-net","cross-version evaluation","generalization","music transcription"],"search_terms":["comparing","deep","models","evaluation","strategies","multi","pitch","estimation","music","recordings","weiß","peeters"],"title":"Comparing Deep Models and Evaluation Strategies for Multi-Pitch Estimation in Music Recordings","year":2022}