var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fraw.githubusercontent.com%2Fmusic-encoding%2Fmusic-encoding.github.io%2Fmaster%2Fresources%2Fmei_bibliography.bib&jsonp=1&theme=dividers&nocache=1&authorFirst=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fraw.githubusercontent.com%2Fmusic-encoding%2Fmusic-encoding.github.io%2Fmaster%2Fresources%2Fmei_bibliography.bib&jsonp=1&theme=dividers&nocache=1&authorFirst=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fraw.githubusercontent.com%2Fmusic-encoding%2Fmusic-encoding.github.io%2Fmaster%2Fresources%2Fmei_bibliography.bib&jsonp=1&theme=dividers&nocache=1&authorFirst=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2023\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n Kijas, A.; Calico, J.; Schaub, J.; Plaksin, A.; Grimmer, J.; Merchán Sánchez-Jara, J. F.; and González Gutiérrez, S.\n\n\n \n \n \n \n \n Roundtable: Pedagogical Approaches to Music Encoding.\n \n \n \n \n\n\n \n\n\n\n Journal of Musicological Research. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Roundtable:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Kijas_2023,\n abstract = {In this roundtable article, we present five essays from emerging scholars, librarians, and music faculty who are using music encoding as a pedagogical tool with undergraduate and graduate students in academic library and university settings. In these case studies and applications, the contributors are all using the Music Encoding Initiative (MEI), one of several music encoding standards. MEI is used to represent musical documents through the application of an MEI schema, or a set of rules for encoding physical and intellectual characteristics of musical sources, which are then expressed in XML. The MEI standard for encoding music is used increasingly to create both machine readable scores and to develop new digital musicology projects that can serve as information resources for scholars and students alike.},\n author = {Kijas, Anna and Calico, Joy and Schaub, Jake and Plaksin, Anna and Grimmer, Jessica and Merch\\'{a}n S\\'{a}nchez-Jara, Javier F. and Gonz\\'{a}lez Guti\\'{e}rrez, Sara},\n year = {2023},\n title = {Roundtable: Pedagogical Approaches to Music Encoding},\n url = {https://www.tandfonline.com/doi/full/10.1080/01411896.2023.2231837},\n journal = {Journal of Musicological Research},\n doi = {10.1080/01411896.2023.2231837}\n}\n\n\n
\n
\n\n\n
\n In this roundtable article, we present five essays from emerging scholars, librarians, and music faculty who are using music encoding as a pedagogical tool with undergraduate and graduate students in academic library and university settings. In these case studies and applications, the contributors are all using the Music Encoding Initiative (MEI), one of several music encoding standards. MEI is used to represent musical documents through the application of an MEI schema, or a set of rules for encoding physical and intellectual characteristics of musical sources, which are then expressed in XML. The MEI standard for encoding music is used increasingly to create both machine readable scores and to develop new digital musicology projects that can serve as information resources for scholars and students alike.\n
\n\n\n
\n\n\n
\n \n\n \n \n Münnich, S.\n\n\n \n \n \n \n Handwritten – Encoded – Semantically Operationalised: Musical Writing Scenes in the Digital Realm.\n \n \n \n\n\n \n\n\n\n In Celestini, F.; and Lutz, S., editor(s), Musikalische Schreibszenen / Scenes of Musical Writing, volume 4, of Theorie der musikalischen Schrift, pages 371–391. Brill | Fink, Paderborn, 2023.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Muennich_2023,\n abstract = {Looking at the first three volumes of the published output from the ‘On the Genealogy of Writing’ project, there seems to be a categorical difference between writing scenes in the age of manuscripts, typescripts, or in the ‘digital age’, as it is somewhat emphatically called there. I would like to take up this differentiation and, linked to reflections on writing theory, trace the categorial differences between various types of musical writing scenes. In doing so, I will limit myself to four different types: one handwritten, one printed, one in the form of music encoding formats and their rendering (MEI and Verovio), and one that I address here as ‘semantically operationalised’. In doing so, I first elaborate observations pertaining to writing theory and findings in relation to the historically transmitted material in order to then draw conclusions about the underlying writing scene and the transductive processes negotiated in it.},\n author = {Münnich, Stefan},\n title = {Handwritten -- Encoded -- Semantically Operationalised: Musical Writing Scenes in the Digital Realm},\n pages = {371--391},\n publisher = {{Brill | Fink}},\n isbn = {9783770567140},\n series = {Theorie der musikalischen Schrift},\n volume = {4},\n editor = {Celestini, Federico and Lutz, Sarah},\n booktitle = {Musikalische Schreibszenen / Scenes of Musical Writing},\n year = {2023},\n address = {Paderborn},\n doi = {10.30965/9783846767146_016}\n}\n\n\n
\n
\n\n\n
\n Looking at the first three volumes of the published output from the ‘On the Genealogy of Writing’ project, there seems to be a categorical difference between writing scenes in the age of manuscripts, typescripts, or in the ‘digital age’, as it is somewhat emphatically called there. I would like to take up this differentiation and, linked to reflections on writing theory, trace the categorial differences between various types of musical writing scenes. In doing so, I will limit myself to four different types: one handwritten, one printed, one in the form of music encoding formats and their rendering (MEI and Verovio), and one that I address here as ‘semantically operationalised’. In doing so, I first elaborate observations pertaining to writing theory and findings in relation to the historically transmitted material in order to then draw conclusions about the underlying writing scene and the transductive processes negotiated in it.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n Bue, M. S.; and Rockenberger, A.,\n editors.\n \n\n\n \n \n \n \n \n Notated Music in the Digital Sphere. Possibilities and Limitations.\n \n \n \n \n\n\n \n\n\n\n Volume 15 of Nota bene – Studies from the National Library of NorwayNational Library of Norway, 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Notated volume\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 17 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{Bue_2021,\n abstract = {Digital musicology is a diverse area of research. For many, the under-standing of “music” within Digital Humanities concerns sound, recordings, or music notation software. The articles in this collection, however, discuss different aspects of digital musicology: primarily how pre-existing notated music can be represented digitally. The articles deal with subjects such as digital reconstruction, editing, encoding, databases, digital preservation and computational analysis of notated music.},\n title = {{Notated Music in the Digital Sphere. Possibilities and Limitations}},\n editor = {Bue, Margrethe Støkken and Rockenberger, Annika},\n publisher = {National Library of Norway},\n location = {Oslo},\n year = {2021},\n url_Volume = {https://issuu.com/nasjonalbiblioteket/docs/nota_bene_15_layout_issuu},\n urldate = {2021-03-02},\n series = {Nota bene -- Studies from the National Library of Norway},\n volume = {15}\n}\n\n\n
\n
\n\n\n
\n Digital musicology is a diverse area of research. For many, the under-standing of “music” within Digital Humanities concerns sound, recordings, or music notation software. The articles in this collection, however, discuss different aspects of digital musicology: primarily how pre-existing notated music can be represented digitally. The articles deal with subjects such as digital reconstruction, editing, encoding, databases, digital preservation and computational analysis of notated music.\n
\n\n\n
\n\n\n
\n \n\n \n \n Moe, B.\n\n\n \n \n \n \n \n The Editor's Choice. From Sixteenth-Century Sources to Digital Editions Using MEI.\n \n \n \n \n\n\n \n\n\n\n In Bue, M. S.; and Rockenberger, A., editor(s), Notated Music in the Digital Sphere. Possibilities and Limitations, volume 15, of Nota bene – Studies from the National Library of Norway, pages 57–75. National Library of Norway, Oslo, 2021.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 7 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Moe_2021,\n abstract = {The article is concerned with discussing the role of the encoder of musical notation and with exploring how to make choices when encoding. A central point is that decision-making is to a large extent dependent on the purpose of the encoding. Consequently, the editor needs to take into consideration who is going to use the encoding and how. Taking its point of departure from an ongoing project on sixteenth-century monophonic songs, the article discusses the relationship between the visual appearance of notation and the meaning of it. Furthermore, the article explores how an editor is able to address problems during the interpretation and presentation of the source by employing Music Encoding Initiative (MEI).},\n author = {Moe, Bjarke},\n title = {{The Editor's Choice. From Sixteenth-Century Sources to Digital Editions Using MEI}},\n editor = {Bue, Margrethe Støkken and Rockenberger, Annika},\n booktitle = {{Notated Music in the Digital Sphere. Possibilities and Limitations}},\n address = {Oslo},\n publisher = {National Library of Norway},\n year = {2021},\n pages = {57–75},\n series = {Nota bene -- Studies from the National Library of Norway},\n volume = {15},\n url = {https://issuu.com/nasjonalbiblioteket/docs/nota_bene_15_layout_issuu/57},\n}\n\n\n
\n
\n\n\n
\n The article is concerned with discussing the role of the encoder of musical notation and with exploring how to make choices when encoding. A central point is that decision-making is to a large extent dependent on the purpose of the encoding. Consequently, the editor needs to take into consideration who is going to use the encoding and how. Taking its point of departure from an ongoing project on sixteenth-century monophonic songs, the article discusses the relationship between the visual appearance of notation and the meaning of it. Furthermore, the article explores how an editor is able to address problems during the interpretation and presentation of the source by employing Music Encoding Initiative (MEI).\n
\n\n\n
\n\n\n
\n \n\n \n \n Plaksin, A. V. K.\n\n\n \n \n \n \n \n Modelle zur computergestützten Analyse von Überlieferungen der Mensuralmusik : Empirische Textforschung im Kontext phylogenetischer Verfahren.\n \n \n \n \n\n\n \n\n\n\n Ph.D. Thesis, Technische Universität Darmstadt, Dortmund, 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Modelle urn\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{Plaksin_2021,\n abstract = {Since the analysis of transmission aims for the reconstruction of relations between sources, it focuses on the differences of rather similar items. Therefore, it is necessary to find substitution models which are optimized for distinguishing fine levels of differences and to deal with the structural ambiguities and visual variance of mensural notation. This book reports on the task of developing concepts for a computational analysis of the transmission of mensural music based on concepts of phylogenetic analysis. Part one lays the theoretical foundations, wrapping the key assumptions of stemmatics for 15th and 16th century mensural music and the computational methods of phylogenetic analysis. Part two covers in a case study the methodological and domain-specific requirements of mensural music, follows main questions of encoding and sequence-building up to an approach utilizing surrogate data analysis to determine the most suitable substitution model for a comparison of sources.},\n author = {Anna Viktoria Katrin Plaksin},\n title = {Modelle zur computergest{\\"u}tzten Analyse von {\\"U}berlieferungen der Mensuralmusik : Empirische Textforschung im Kontext phylogenetischer Verfahren},\n url_URN = {urn:nbn:de:tuda-tuprints-172112},\n series = {Wissenschaftliche Schriften der WWU M{\\"u}nster, Reihe XXVI: Schriften zur Musikwissenschaft aus M{\\"u}nster},\n volume = {27},\n address = {Dortmund},\n school = {Technische Universit{\\"a}t Darmstadt},\n publisher = {Readbox Unipress},\n year = {2021},\n doi = {10.26083/tuprints-00017211}\n}\n\n\n
\n
\n\n\n
\n Since the analysis of transmission aims for the reconstruction of relations between sources, it focuses on the differences of rather similar items. Therefore, it is necessary to find substitution models which are optimized for distinguishing fine levels of differences and to deal with the structural ambiguities and visual variance of mensural notation. This book reports on the task of developing concepts for a computational analysis of the transmission of mensural music based on concepts of phylogenetic analysis. Part one lays the theoretical foundations, wrapping the key assumptions of stemmatics for 15th and 16th century mensural music and the computational methods of phylogenetic analysis. Part two covers in a case study the methodological and domain-specific requirements of mensural music, follows main questions of encoding and sequence-building up to an approach utilizing surrogate data analysis to determine the most suitable substitution model for a comparison of sources.\n
\n\n\n
\n\n\n
\n \n\n \n \n Teich Geertinger, A.\n\n\n \n \n \n \n \n Digital Encoding of Music Notation with MEI.\n \n \n \n \n\n\n \n\n\n\n In Bue, M. S.; and Rockenberger, A., editor(s), Notated Music in the Digital Sphere. Possibilities and Limitations, volume 15, of Nota bene – Studies from the National Library of Norway, pages 35–56. National Library of Norway, Oslo, 2021.\n \n\n\n\n
\n\n\n\n \n \n \"DigitalPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 14 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{TeichGeertinger_2021,\n abstract = {There are profound differences between the notation of text and music in terms of their purpose, use of symbols and how they translate into a machine-readable form. This paper identifies the difficulties of encoding music notation as compared to text and how these difficulties may be handled. Despite the challenges, encoding formats for notated music which are as sophisticated as comparable textual formats are now available, such as the XML schema defined by the Music Encoding Initiative (MEI). But are there any limits to what is possible to encode? The author argues that today the primary limiting factor is not the available encoding systems but rather the ambiguity and complexity of music notation itself.},\n author = {{Teich Geertinger}, Axel},\n title = {{Digital Encoding of Music Notation with MEI}},\n editor = {Bue, Margrethe Støkken and Rockenberger, Annika},\n booktitle = {{Notated Music in the Digital Sphere. Possibilities and Limitations}},\n address = {Oslo},\n publisher = {National Library of Norway},\n year = {2021},\n pages = {35–56},\n series = {Nota bene -- Studies from the National Library of Norway},\n volume = {15},\n url = {https://issuu.com/nasjonalbiblioteket/docs/nota_bene_15_layout_issuu/35},\n}\n\n\n
\n
\n\n\n
\n There are profound differences between the notation of text and music in terms of their purpose, use of symbols and how they translate into a machine-readable form. This paper identifies the difficulties of encoding music notation as compared to text and how these difficulties may be handled. Despite the challenges, encoding formats for notated music which are as sophisticated as comparable textual formats are now available, such as the XML schema defined by the Music Encoding Initiative (MEI). But are there any limits to what is possible to encode? The author argues that today the primary limiting factor is not the available encoding systems but rather the ambiguity and complexity of music notation itself.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n Cox, S.; and Sänger, R.\n\n\n \n \n \n \n \n Digitale Fassungsvergleiche am Beispiel von Beethovens Eigenbearbeitungen.\n \n \n \n \n\n\n \n\n\n\n In Acquavella-Rauch, S.; Münzmay, A.; and Veit, J., editor(s), Brückenschläge zwischen Musikwissenschaft und Informatik. Theoretische und praktische Aspekte der Kooperation, volume 3, of Musikwissenschaft: Aktuelle Perspektiven, pages 97–104, 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Digitale urn\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Cox_2020,\n abstract = {In its second module, "Beethovens Werkstatt" deals with five of Beethoven's compositions which exist both in their original versions and as authentic arrangements (Piano Sonata op. 14/1 arranged for string quartet, Septett op. 20 and Trio op. 38, Opferlied op. 121b and Bundeslied op. 122 as piano reductions, Gro{\\ss}e Fuge op. 133 as arrangement for piano for four hands op. 134). To demonstrate Beethoven's arrangement practices, the original version of each work is synoptically linked with its arrangement in a digital edition called "VideApp Arr". Through digital tools for comparison the relationships between the two versions can be investigated from different perspectives. It becomes visible how the versions are related to each other both by "invariance" (text elements with the same structure), by "variance" (text elements with a similar structure) and, in special cases, also by "difference" (text elements without corresponding parameters). Each view within the "VideApp Arr" is generated from the underlying MEI data.},\n author = {Cox, Susanne and S{\\"a}nger, Richard},\n title = {{Digitale Fassungsvergleiche am Beispiel von Beethovens Eigenbearbeitungen}},\n url_URN = {https://nbn-resolving.org/urn:nbn:de:bsz:14-qucosa2-727483},\n pages = {97--104},\n series = {{Musikwissenschaft: Aktuelle Perspektiven}},\n volume = {3},\n editor = {Acquavella-Rauch, Stefanie and M{\\"u}nzmay, Andreas and Veit, Joachim},\n booktitle = {{Br{\\"u}ckenschl{\\"a}ge zwischen Musikwissenschaft und Informatik. Theoretische und praktische Aspekte der Kooperation}},\n year = {2020},\n doi = {10.25366/2020.99}\n}\n\n\n
\n
\n\n\n
\n In its second module, \"Beethovens Werkstatt\" deals with five of Beethoven's compositions which exist both in their original versions and as authentic arrangements (Piano Sonata op. 14/1 arranged for string quartet, Septett op. 20 and Trio op. 38, Opferlied op. 121b and Bundeslied op. 122 as piano reductions, Große Fuge op. 133 as arrangement for piano for four hands op. 134). To demonstrate Beethoven's arrangement practices, the original version of each work is synoptically linked with its arrangement in a digital edition called \"VideApp Arr\". Through digital tools for comparison the relationships between the two versions can be investigated from different perspectives. It becomes visible how the versions are related to each other both by \"invariance\" (text elements with the same structure), by \"variance\" (text elements with a similar structure) and, in special cases, also by \"difference\" (text elements without corresponding parameters). Each view within the \"VideApp Arr\" is generated from the underlying MEI data.\n
\n\n\n
\n\n\n
\n \n\n \n \n De Luca, E.; and Flanders, J.,\n editors.\n \n\n\n \n \n \n \n Music Encoding Conference Proceedings 2020.\n \n \n \n\n\n \n\n\n\n Humanities Commons. 2020.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@proceedings{DeLuca_2020,\n abstract = {Conference proceedings of the Music Encoding Conference 2020 with Foreword by Richard Freedman and Anna J. Kijas},\n year = {2020},\n title = {{Music Encoding Conference Proceedings 2020}},\n publisher = {{Humanities Commons}},\n editor = {{De Luca}, Elsa and Flanders, Julia},\n doi = {10.17613/mvxw-x477}\n}\n\n\n
\n
\n\n\n
\n Conference proceedings of the Music Encoding Conference 2020 with Foreword by Richard Freedman and Anna J. Kijas\n
\n\n\n
\n\n\n
\n \n\n \n \n Sapov, O.\n\n\n \n \n \n \n \n Algorithmische Automatisierung komplexer Notationsregeln in MEI-XML am Beispiel von Versetzungszeichen.\n \n \n \n \n\n\n \n\n\n\n In Acquavella-Rauch, S.; Münzmay, A.; and Veit, J., editor(s), Brückenschläge zwischen Musikwissenschaft und Informatik. Theoretische und praktische Aspekte der Kooperation, volume 3, of Musikwissenschaft: Aktuelle Perspektiven, pages 91–96, 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Algorithmische urn\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Sapov_2020,\n abstract = {Algorithmic automation of the complex music notation rules in MEI-XML through the example of the accidentals: The article demonstrates how music notation rules can be formalized into a computer algorithm. In particular, it handles the rule, whether the accidentals should be rendered or not when they repeat on the notes of the same pitch in the same measure. The decision process is described step-by-step while referring to the music notation rules. The algorithm was implemented in the XSLT-language for the needs of the Digital Interactive Mozart Edition and can be applied to MEI data. The tool can be downloaded from https://github.com/ismdme/DIME-tools.},\n author = {Sapov, Oleksii},\n title = {{Algorithmische Automatisierung komplexer Notationsregeln in MEI-XML am Beispiel von Versetzungszeichen}},\n url_URN = {https://nbn-resolving.org/urn:nbn:de:bsz:14-qucosa2-727474},\n keywords = {MEI, XML, Musiknotation, Versetzungszeichen},\n pages = {91--96},\n series = {{Musikwissenschaft: Aktuelle Perspektiven}},\n volume = {3},\n editor = {Acquavella-Rauch, Stefanie and M{\\"u}nzmay, Andreas and Veit, Joachim},\n booktitle = {{Br{\\"u}ckenschl{\\"a}ge zwischen Musikwissenschaft und Informatik. Theoretische und praktische Aspekte der Kooperation}},\n year = {2020},\n doi = {10.25366/2020.98}\n}\n\n\n
\n
\n\n\n
\n Algorithmic automation of the complex music notation rules in MEI-XML through the example of the accidentals: The article demonstrates how music notation rules can be formalized into a computer algorithm. In particular, it handles the rule, whether the accidentals should be rendered or not when they repeat on the notes of the same pitch in the same measure. The decision process is described step-by-step while referring to the music notation rules. The algorithm was implemented in the XSLT-language for the needs of the Digital Interactive Mozart Edition and can be applied to MEI data. The tool can be downloaded from https://github.com/ismdme/DIME-tools.\n
\n\n\n
\n\n\n
\n \n\n \n \n Seipelt, A.\n\n\n \n \n \n \n \n Digitale Edition und Harmonische Analyse mit MEI von Anton Bruckners Studienbuch.\n \n \n \n \n\n\n \n\n\n\n In Acquavella-Rauch, S.; Münzmay, A.; and Veit, J., editor(s), Brückenschläge zwischen Musikwissenschaft und Informatik. Theoretische und praktische Aspekte der Kooperation, volume 3, of Musikwissenschaft: Aktuelle Perspektiven, pages 105–113, 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Digitale urn\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Seipelt_2020,\n abstract = {The poster shows the results obtained in the project "Digital Music Analysis with the Techniques of the Music Encoding Initiative (MEI) using Anton Bruckner's compositional studies as an example" (2017 to 2019). On the one hand, the project had the goal of presenting a digital edition of Anton Bruckner's study book, which he produced during his lessons with Otto Kitzler from 1861 to 1863. An edition of the music in the textbook encoded with MEI and displayed using Verovio and the facsimile can be displayed simultaneously. On the other hand, an automated harmonic analysis of this music was to be designed. For this purpose, keys are recognized using the Krumhansl-Schmuckler algorithm that is based on a resource of pitch classes which are compared with reference values and thus their similarity is calculated. Based on this, chord recognitions are carried out, which are then linked to the keys in the last step and converted to a roman numeral analysis.},\n author = {Seipelt, Agnes},\n title = {{Digitale Edition und Harmonische Analyse mit MEI von Anton Bruckners Studienbuch}},\n url_URN = {https://nbn-resolving.org/urn:nbn:de:bsz:14-qucosa2-727492},\n keywords = {Anton Bruckner, digitale Edition, MEI, Analyse},\n pages = {105--113},\n series = {{Musikwissenschaft: Aktuelle Perspektiven}},\n volume = {3},\n editor = {Acquavella-Rauch, Stefanie and M{\\"u}nzmay, Andreas and Veit, Joachim},\n booktitle = {{Br{\\"u}ckenschl{\\"a}ge zwischen Musikwissenschaft und Informatik. Theoretische und praktische Aspekte der Kooperation}},\n year = {2020},\n doi = {10.25366/2020.100}\n}\n\n\n
\n
\n\n\n
\n The poster shows the results obtained in the project \"Digital Music Analysis with the Techniques of the Music Encoding Initiative (MEI) using Anton Bruckner's compositional studies as an example\" (2017 to 2019). On the one hand, the project had the goal of presenting a digital edition of Anton Bruckner's study book, which he produced during his lessons with Otto Kitzler from 1861 to 1863. An edition of the music in the textbook encoded with MEI and displayed using Verovio and the facsimile can be displayed simultaneously. On the other hand, an automated harmonic analysis of this music was to be designed. For this purpose, keys are recognized using the Krumhansl-Schmuckler algorithm that is based on a resource of pitch classes which are compared with reference values and thus their similarity is calculated. Based on this, chord recognitions are carried out, which are then linked to the keys in the last step and converted to a roman numeral analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n Wilson, E. A.\n\n\n \n \n \n \n Text Encoding with the Text Encoding Initiative (TEI) and the Music Encoding Initiative (MEI).\n \n \n \n\n\n \n\n\n\n In Wilson, E. A., editor(s), Digital Humanities for Librarians, pages 87–106. Rowman & Littlefield, Lanham; Boulder; New York; London, 2020.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Wilson_2020,\n abstract = {Some librarians are born to digital humanities; some aspire to digital humanities; and some have digital humanities thrust upon them. Digital Humanities For Librarians is a one-stop resource for librarians and LIS students working in this growing new area of academic librarianship. The book begins by introducing digital humanities, addressing key questions such as, "What is it?", "Who does it?", "How do they do it?", "Why do they do it?", and "How can I do it?". This broad overview is followed by a series of practical chapters answering those questions with step-by-step approaches to both the digital and the human elements of digital humanities librarianship.},\n title = {{Text Encoding with the Text Encoding Initiative (TEI) and the Music Encoding Initiative (MEI)}},\n author = {Wilson, Emma Annette},\n editor = {Wilson, Emma Annette},\n booktitle = {Digital Humanities for Librarians},\n pages = {87--106},\n publisher = {Rowman & Littlefield},\n address = {Lanham; Boulder; New York; London},\n year = {2020},\n isbn = {9781538116456}\n}\n\n\n
\n
\n\n\n
\n Some librarians are born to digital humanities; some aspire to digital humanities; and some have digital humanities thrust upon them. Digital Humanities For Librarians is a one-stop resource for librarians and LIS students working in this growing new area of academic librarianship. The book begins by introducing digital humanities, addressing key questions such as, \"What is it?\", \"Who does it?\", \"How do they do it?\", \"Why do they do it?\", and \"How can I do it?\". This broad overview is followed by a series of practical chapters answering those questions with step-by-step approaches to both the digital and the human elements of digital humanities librarianship.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n De Luca, E.; Bain, J.; Behrendt, I.; Fujinaga, I.; Helsen, K.; Ignesti, A.; Lacoste, D.; and Long, S.\n\n\n \n \n \n \n Capturing Early Notations in MEI: The Case of Old Hispanic Neumes.\n \n \n \n\n\n \n\n\n\n Musiktheorie. Zeitschrift für Musikwissenschaft, 34(3): 229–249. 2019.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{DeLuca_2019,\n abstract = {This paper describes the challenges involved in designing a computer encoding system for early plainchant notation (both with and without staff lines) and, in particular, Old Hispanic neume notation. We have been working on a new version of the Music Encoding Initiative (MEI) schema for neume notations since 2012. Our goal and that of MEI is to digitally represent as accurately as possible the notation in musical sources, especially its semantics wherever possible, so that the encoded files can be used for various musical research purposes, such as creating critical editions and data mining. Of the early plainchant notations, Old Hispanic neume notation is the least understood. We will present some of our solutions to resolve the specific issues surrounding Old Hispanic notation while maintaining compatibility with the encoding of other styles of plainchant notation.},\n author = {De Luca, Elsa and Bain, Jennifer and Behrendt, Inga and Fujinaga, Ichiro and Helsen, Katherine and Ignesti, Alessandra and Lacoste, Debra and Long, Sarah},\n year = {2019},\n title = {Capturing Early Notations in MEI: The Case of Old Hispanic Neumes},\n pages = {229--249},\n volume = {34},\n number = {3},\n journal = {Musiktheorie. Zeitschrift für Musikwissenschaft}\n}\n\n\n
\n
\n\n\n
\n This paper describes the challenges involved in designing a computer encoding system for early plainchant notation (both with and without staff lines) and, in particular, Old Hispanic neume notation. We have been working on a new version of the Music Encoding Initiative (MEI) schema for neume notations since 2012. Our goal and that of MEI is to digitally represent as accurately as possible the notation in musical sources, especially its semantics wherever possible, so that the encoded files can be used for various musical research purposes, such as creating critical editions and data mining. Of the early plainchant notations, Old Hispanic neume notation is the least understood. We will present some of our solutions to resolve the specific issues surrounding Old Hispanic notation while maintaining compatibility with the encoding of other styles of plainchant notation.\n
\n\n\n
\n\n\n
\n \n\n \n \n Devaney, J.; and Léveillé Gauvin, H.\n\n\n \n \n \n \n \n Encoding music performance data in Humdrum and MEI.\n \n \n \n \n\n\n \n\n\n\n International Journal on Digital Libraries, 20(1): 81–91. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"EncodingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Devaney_2019,\n abstract = {This paper proposes extensions to two existing music encoding formats, Humdrum and Music Encoding Initiative (MEI), in order to facilitate linking music performance data with corresponding score information. We began by surveying music scholars about their needs for encoding timing, loudness, pitch, and timbral performance data. We used the results of this survey to design and implement new spines in Humdrum syntax to encode summary descriptors at note, beat, and measure levels and new attributes in the MEI format to encode both note-wise summaries and continuous data. These extensions allow for multiple performances of the same piece to be directly compared with one another, facilitating both humanistic and computational study of recorded musical performances.},\n author = {Devaney, Johanna and {L{\\'e}veill{\\'e} Gauvin}, Hubert},\n title = {Encoding music performance data in Humdrum and MEI},\n pages = {81–91},\n volume = {20},\n number = {1},\n journal = {International Journal on Digital Libraries},\n year = {2019},\n doi = {10.1007/s00799-017-0229-3},\n url = {https://link.springer.com/content/pdf/10.1007%2Fs00799-017-0229-3.pdf}\n}\n\n\n
\n
\n\n\n
\n This paper proposes extensions to two existing music encoding formats, Humdrum and Music Encoding Initiative (MEI), in order to facilitate linking music performance data with corresponding score information. We began by surveying music scholars about their needs for encoding timing, loudness, pitch, and timbral performance data. We used the results of this survey to design and implement new spines in Humdrum syntax to encode summary descriptors at note, beat, and measure levels and new attributes in the MEI format to encode both note-wise summaries and continuous data. These extensions allow for multiple performances of the same piece to be directly compared with one another, facilitating both humanistic and computational study of recorded musical performances.\n
\n\n\n
\n\n\n
\n \n\n \n \n Di Bacco, G.; Kepper, J.; and Roland, P.,\n editors.\n \n\n\n \n \n \n \n Music Encoding Conference Proceedings 2015, 2016 and 2017.\n \n \n \n\n\n \n\n\n\n Bavarian State Library (BSB). 2019.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 14 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@proceedings{DiBacco_2019,\n abstract = {Conference proceedings of the Music Encoding Conferences 2015, 2016 and 2017 with Introduction by Giuliano Di Bacco},\n year = {2019},\n title = {{Music Encoding Conference Proceedings 2015, 2016 and 2017}},\n publisher = {{Bavarian State Library (BSB)}},\n editor = {{Di Bacco}, Giuliano and Kepper, Johannes and Roland, Perry},\n doi = {10.15463/music-1}\n}\n\n\n
\n
\n\n\n
\n Conference proceedings of the Music Encoding Conferences 2015, 2016 and 2017 with Introduction by Giuliano Di Bacco\n
\n\n\n
\n\n\n
\n \n\n \n \n Kijas, A.; and Viglianti, R.\n\n\n \n \n \n \n Introduction to the Music Encoding Initiative.\n \n \n \n\n\n \n\n\n\n In Rodrigues, L.; Pappas, E.; Rowell, C.; and Shorish, Y., editor(s), #DLFTeach Toolkit: Lesson Plans for Digital Library Instruction. PubPub, 2019.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 51 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@incollection{Kijas_2019,\n abstract = {In this tutorial, we provide an introduction to encoding music documents according to the Music Encoding Initiative (MEI) for people engaged in digital library work. The MEI is a community-driven effort to create an open source representation of music notation in a machine readable structure. While there are a number of different formats in use for marking up or encoding music, including MusicXML, Humdrum, and Plaine & Easie, the focus of this tutorial is on applying the MEI guidelines (version 3.0.0). These guidelines present a core set of rules for the representation of symbolic, physical, and intellectual aspects of music notation expressed using an XML schema.},\n author = {Kijas, Anna and Viglianti, Raff},\n title = {Introduction to the Music Encoding Initiative},\n publisher = {PubPub},\n editor = {Rodrigues, Liz and Pappas, Erin and Rowell, Chelcie and Shorish, Yasmeen},\n booktitle = {{\\#}DLFTeach Toolkit: Lesson Plans for Digital Library Instruction},\n year = {2019},\n doi = {10.21428/65a6243c.9fa9b4f7},\n keywords = {tutorial}\n}\n\n
\n
\n\n\n
\n In this tutorial, we provide an introduction to encoding music documents according to the Music Encoding Initiative (MEI) for people engaged in digital library work. The MEI is a community-driven effort to create an open source representation of music notation in a machine readable structure. While there are a number of different formats in use for marking up or encoding music, including MusicXML, Humdrum, and Plaine & Easie, the focus of this tutorial is on applying the MEI guidelines (version 3.0.0). These guidelines present a core set of rules for the representation of symbolic, physical, and intellectual aspects of music notation expressed using an XML schema.\n
\n\n\n
\n\n\n
\n \n\n \n \n Rizo, D.; and Marsden, A.\n\n\n \n \n \n \n \n An MEI-based standard encoding for hierarchical music analyses.\n \n \n \n \n\n\n \n\n\n\n International Journal on Digital Libraries, 20(1): 93–105. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Rizo_2019,\nabstract = {We propose a standard representation for hierarchical musical analyses as an extension to the Music Encoding Initiative (MEI) representation for music. Analyses of music need to be represented in digital form for the same reasons as music: preservation, sharing of data, data linking, and digital processing. Systems exist for representing sequential information, but many music analyses are hierarchical, whether represented explicitly in trees or graphs or not. Features of MEI allow the representation of an analysis to be directly associated with the elements of the music analyzed. MEI’s basis in TEI (Text Encoding Initiative), allows us to design a scheme which reuses some of the elements of TEI for the representation of trees and graphs. In order to capture both the information specific to a type of music analysis and the underlying form of an analysis as a tree or graph, we propose related “semantic” encodings, which capture the detailed information, and generic “non-semantic” encodings which expose the tree or graph structure. We illustrate this with examples of representations of a range of different kinds of analysis.},\nauthor = {Rizo, David and Marsden, Alan},\ntitle = {An MEI-based standard encoding for hierarchical music analyses},\npages = {93–105},\nvolume = {20},\nnumber = {1},\njournal = {International Journal on Digital Libraries},\nyear = {2019},\ndoi = {10.1007/s00799-018-0262-x},\nurl = {https://link.springer.com/content/pdf/10.1007%2Fs00799-018-0262-x.pdf}\n}\n\n\n
\n
\n\n\n
\n We propose a standard representation for hierarchical musical analyses as an extension to the Music Encoding Initiative (MEI) representation for music. Analyses of music need to be represented in digital form for the same reasons as music: preservation, sharing of data, data linking, and digital processing. Systems exist for representing sequential information, but many music analyses are hierarchical, whether represented explicitly in trees or graphs or not. Features of MEI allow the representation of an analysis to be directly associated with the elements of the music analyzed. MEI’s basis in TEI (Text Encoding Initiative), allows us to design a scheme which reuses some of the elements of TEI for the representation of trees and graphs. In order to capture both the information specific to a type of music analysis and the underlying form of an analysis as a tree or graph, we propose related “semantic” encodings, which capture the detailed information, and generic “non-semantic” encodings which expose the tree or graph structure. We illustrate this with examples of representations of a range of different kinds of analysis.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n Seipelt, A.; Gulewycz, P.; and Klugseder, R.\n\n\n \n \n \n \n Digitale Musikanalyse mit den Techniken der Music Encoding Initiative (MEI) am Beispiel von Kompositionsstudien Anton Bruckners.\n \n \n \n\n\n \n\n\n\n Die Musikforschung, 71(4): 366–378. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Seipelt_2018,\nabstract = {Studying the harmonic structures of a musical work and exploring its origins is one of the main tasks of traditional musicology. Since the advent of computer technologies, new tools for musical analysis emerged to gain new perspectives on well-known compositions. In the field of digital musical editions, the markup language MEI (Music Encoding Initiative) plays a prominent role for encoding musical notation with a musicological demand. This paper presents the current state of the project “Digital Music Analysis with MEI using the Example of Anton Bruckner's Compositional Studies”. Its aim is to encode the “Kitzler Study book” written by Bruckner and to present it in a digital Edition. Also, the project explores the capability of MEI for an automatic of half-automatic harmonic analysis.},\nauthor = {Seipelt, Agnes and Gulewycz, Paul and Klugseder, Robert},\nyear = {2018},\ntitle = {Digitale Musikanalyse mit den Techniken der Music Encoding Initiative (MEI) am Beispiel von Kompositionsstudien Anton Bruckners},\npages = {366–378},\nvolume = {71},\nnumber = {4},\njournal = {Die Musikforschung}\n}\n\n\n
\n
\n\n\n
\n Studying the harmonic structures of a musical work and exploring its origins is one of the main tasks of traditional musicology. Since the advent of computer technologies, new tools for musical analysis emerged to gain new perspectives on well-known compositions. In the field of digital musical editions, the markup language MEI (Music Encoding Initiative) plays a prominent role for encoding musical notation with a musicological demand. This paper presents the current state of the project “Digital Music Analysis with MEI using the Example of Anton Bruckner's Compositional Studies”. Its aim is to encode the “Kitzler Study book” written by Bruckner and to present it in a digital Edition. Also, the project explores the capability of MEI for an automatic of half-automatic harmonic analysis.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n Behrendt, I.; Bain, J.; and Helsen, K.\n\n\n \n \n \n \n \n MEI Kodierung der frühesten Notation in linienlosen Neumen.\n \n \n \n \n\n\n \n\n\n\n In Busch, H.; Fischer, F.; and Sahle, P., editor(s), Kodikologie und Paläographie im Digitalen Zeitalter 4 – Codicology and Palaeography in the Digital Age 4, volume 11, pages 275–291. Books on Demand, Norderstedt, July 2017.\n \n\n\n\n
\n\n\n\n \n \n \"MEIPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{behrendt_bain_helsen_2017,\n publisher = {Books on Demand},\n volume = {11},\n author = {Inga Behrendt and Jennifer Bain and Kate Helsen},\n address = {Norderstedt},\n month = {July},\n editor = {Hannah Busch and Franz Fischer and Patrick Sahle},\n pages = {275--291},\n booktitle = {Kodikologie und Pal{\\"a}ographie im Digitalen Zeitalter 4 -- Codicology and Palaeography in the Digital Age 4},\n title = {MEI Kodierung der fr{\\"u}hesten Notation in linienlosen Neumen},\n year = {2017},\n url = {https://kups.ub.uni-koeln.de/7789/},\n abstract = {Das Optical Neume Recognition Project (ONRP) hat die digitale Kodierung von musikalischen Notationszeichen aus dem Jahr um 1000 zum Ziel – ein ambitioniertes Vorhaben, das die Projektmitglieder veranlasste, verschiedenste methodische Ans{\\"a}tze zu evaluieren. Die Optical Music Recognition-Software soll eine linienlose Notation aus einem der {\\"a}ltesten erhaltenen Quellen mit Notationszeichen, dem Antiphonar Hartker aus der Benediktinerabtei St. Gallen (Schweiz), welches heute in zwei B{\\"a}nden in der Stiftsbibliothek in St. Gallen aufbewahrt wird, erfassen. Aufgrund der handgeschriebenen, linienlosen Notation stellt dieser Gregorianische Gesang den Forscher vor viele Herausforderungen. Das Werk umfasst {\\"u}ber 300 verschiedene Neumenzeichen und ihre Notation, die mit Hilfe der Music Encoding Initiative (MEI) erfasst und beschrieben werden sollen. Der folgende Artikel beschreibt den Prozess der Adaptierung, um die MEI auf die Notation von Neumen ohne Notenlinien anzuwenden. Beschrieben werden Eigenschaften der Neumennotation, um zu verdeutlichen, wo die Herausforderungen dieser Arbeit liegen sowie die Funktionsweise des Classifiers, einer Art digitalen Neumenw{\\"o}rterbuchs.}\n}\n\n\n
\n
\n\n\n
\n Das Optical Neume Recognition Project (ONRP) hat die digitale Kodierung von musikalischen Notationszeichen aus dem Jahr um 1000 zum Ziel – ein ambitioniertes Vorhaben, das die Projektmitglieder veranlasste, verschiedenste methodische Ansätze zu evaluieren. Die Optical Music Recognition-Software soll eine linienlose Notation aus einem der ältesten erhaltenen Quellen mit Notationszeichen, dem Antiphonar Hartker aus der Benediktinerabtei St. Gallen (Schweiz), welches heute in zwei Bänden in der Stiftsbibliothek in St. Gallen aufbewahrt wird, erfassen. Aufgrund der handgeschriebenen, linienlosen Notation stellt dieser Gregorianische Gesang den Forscher vor viele Herausforderungen. Das Werk umfasst über 300 verschiedene Neumenzeichen und ihre Notation, die mit Hilfe der Music Encoding Initiative (MEI) erfasst und beschrieben werden sollen. Der folgende Artikel beschreibt den Prozess der Adaptierung, um die MEI auf die Notation von Neumen ohne Notenlinien anzuwenden. Beschrieben werden Eigenschaften der Neumennotation, um zu verdeutlichen, wo die Herausforderungen dieser Arbeit liegen sowie die Funktionsweise des Classifiers, einer Art digitalen Neumenwörterbuchs.\n
\n\n\n
\n\n\n
\n \n\n \n \n Kallionpää, M.; Greenhalgh, C.; Hazzard, A.; Weigl, D. M.; Page, K. R.; and Benford, S.\n\n\n \n \n \n \n \n Composing and Realising a Game-Like Performance for Disklavier and Electronics.\n \n \n \n \n\n\n \n\n\n\n In Erkut, C., editor(s), NIME 2017: New Interfaces for Musical Expression, Copenhagen, 15–18 May 2017: Proceedings, pages 464–469, May 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ComposingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Kallionpaa_et_al_2017,\n booktitle = {NIME 2017: New Interfaces for Musical Expression, Copenhagen, 15--18 May 2017: Proceedings},\n editor = {Erkut, Cumhur},\n month = {May},\n title = {Composing and Realising a Game-Like Performance for Disklavier and Electronics},\n author = {Maria Kallionp{\\"a}{\\"a} and Chris Greenhalgh and Adrian Hazzard and David M. Weigl and Kevin R. Page and Steve Benford},\n year = {2017},\n pages = {464--469},\n url = {http://eprints.nottingham.ac.uk/44529/},\n abstract = {"Climb!" is a musical composition that combines the ideas of a classical virtuoso piece and a computer game. We present a case study of the composition process and realization of "Climb!", written for Disklavier and a digital interactive engine, which was co-developed together with the musical score. Specifically, the engine combines a system for recognising and responding to musical trigger phrases along with a dynamic digital score renderer. This tool chain allows for the composer's original scoring to include notational elements such as trigger phrases to be automatically extracted to auto-configure the engine for live performance. We reflect holistically on the development process to date and highlight the emerging challenges and opportunities. For example, this includes the potential for further developing the workflow around the scoring process and the ways in which support for musical triggers has shaped the compositional approach.}\n}\n\n\n
\n
\n\n\n
\n \"Climb!\" is a musical composition that combines the ideas of a classical virtuoso piece and a computer game. We present a case study of the composition process and realization of \"Climb!\", written for Disklavier and a digital interactive engine, which was co-developed together with the musical score. Specifically, the engine combines a system for recognising and responding to musical trigger phrases along with a dynamic digital score renderer. This tool chain allows for the composer's original scoring to include notational elements such as trigger phrases to be automatically extracted to auto-configure the engine for live performance. We reflect holistically on the development process to date and highlight the emerging challenges and opportunities. For example, this includes the potential for further developing the workflow around the scoring process and the ways in which support for musical triggers has shaped the compositional approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n Leon, M.\n\n\n \n \n \n \n \n Encoding Non-Standard Forms of Music Notation Using MEI (May 2016).\n \n \n \n \n\n\n \n\n\n\n 2017.\n Music & Computer Science, University of Virginia, Class of 2017. Project for Student Digital Humanities Fellowship.\n\n\n\n
\n\n\n\n \n \n \"EncodingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{Leon_2017,\n abstract = {Since the 1950’s, computers have been utilized heavily to further the study of music. Now, music is conceived, recorded, produced, stored, and consumed almost entirely with the assistance of computers. Though music and computers are deeply intertwined in the modern age, there is still a disconnect between music notation and computer representation. As an avid lover of both music and computer science, I became intrigued with this void in computer music. What is the proper way to store and symbolize musical scores? Is there a way to encode scores that do not follow standardized rules of notation? Inspired by both the old and new methods of computer music composition and technology, this project seeks to overcome the boundaries between music notation and digital encoding. […] Using a combination of Python, MEI, and JavaScript, I detail below a method for the encoding and representation of graphic musical notation as well as the creation of an interactive score utilizing this encoding.},\n author = {Leon, Matthew},\n year = {2017},\n title = {Encoding Non-Standard Forms of Music Notation Using MEI (May 2016)},\n url = {http://www4.iath.virginia.edu/mei/ML/encoding.pdf},\n note = {Music & Computer Science, University of Virginia, Class of 2017. Project for Student Digital Humanities Fellowship.}\n}\n\n\n
\n
\n\n\n
\n Since the 1950’s, computers have been utilized heavily to further the study of music. Now, music is conceived, recorded, produced, stored, and consumed almost entirely with the assistance of computers. Though music and computers are deeply intertwined in the modern age, there is still a disconnect between music notation and computer representation. As an avid lover of both music and computer science, I became intrigued with this void in computer music. What is the proper way to store and symbolize musical scores? Is there a way to encode scores that do not follow standardized rules of notation? Inspired by both the old and new methods of computer music composition and technology, this project seeks to overcome the boundaries between music notation and digital encoding. […] Using a combination of Python, MEI, and JavaScript, I detail below a method for the encoding and representation of graphic musical notation as well as the creation of an interactive score utilizing this encoding.\n
\n\n\n
\n\n\n
\n \n\n \n \n Merchán-Sánchez-Jara, J.; García, J. A. C.; and Díaz, R. G.\n\n\n \n \n \n \n Towards a Hypermedia Model for Digital Scholarly Edition of Musical Texts Based on MEI (Music Encoding Initiative) Standard: Integration of Hidden Traditions Within Social Editing Paradigm.\n \n \n \n\n\n \n\n\n\n In Proceedings of the 5th International Conference on Technological Ecosystems for Enhancing Multiculturality, TEEM 2017, Cádiz, Spain, October 18–20, 2017, of ACM International Conference Proceeding Series, pages 99:1–99:8, New York, NY, 2017. Association for Computing Machinery\n Article No. 99\n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Merchan-Sanchez-Jara_2017,\n abstract = {Digital scholarly editions are substantially modifying the way musical editions has been thought and conceptualized over long periods of time. It's hypermedia capabilities, and its multi-layered structure makes it possible to put in context different sources and testimonies in a virtual space where all objects are semantically related, as well as to take into account explicit distinctions about original sources, related historical information, or editorial interventions. Music digital scholarly editions embodied an interactive nature allowing users to choose from different outputs or reading paths on the bias of different purposes; namely musicological study, learning/teaching activities or performing.\n\nIn this context, this Ph.D. dissertation aims to develop a theoretical model for the integration of performing variants (technical and/or expressive) that are transmitted orally or through informal channels (marks, notes or text annotations), and usually from teacher to student, within a particular stylistic or interpretive school. The new standards for encoding musical documents like MEI, allow incorporating this information as superposed layers, explicitly differentiated, to the original sources and testimonies. The proposed model is developed within the so-called social editing paradigm, which postulates the integration of some of the 2.0 Web characteristics as the collaborative production of knowledge within the academic editing processes. These new editing practices allow the integration of work's related knowledge, that circulates outside the formal editing and publication circuits, within the scholarly edition.\n},\n author = {Merch\\'{a}n-S\\'{a}nchez-Jara, Javier and Garc\\'{\\i}a, Jos{\\'e} Antonio Cord\\'{o}n and D\\'{\\i}az, Raquel G\\'{o}mez},\n title = {Towards a Hypermedia Model for Digital Scholarly Edition of Musical Texts Based on MEI (Music Encoding Initiative) Standard: Integration of Hidden Traditions Within Social Editing Paradigm},\n booktitle = {Proceedings of the 5th International Conference on Technological Ecosystems for Enhancing Multiculturality, TEEM 2017, Cádiz, Spain, October 18–20, 2017},\n series = {ACM International Conference Proceeding Series},\n year = {2017},\n isbn = {978-1-4503-5386-1},\n location = {C\\ádiz, Spain},\n pages = {99:1--99:8},\n note = {Article No. 99},\n numpages = {8},\n doi = {10.1145/3144826.3145446},\n acmid = {3145446},\n publisher = {{Association for Computing Machinery}},\n address = {New York, NY}\n}\n\n\n
\n
\n\n\n
\n Digital scholarly editions are substantially modifying the way musical editions has been thought and conceptualized over long periods of time. It's hypermedia capabilities, and its multi-layered structure makes it possible to put in context different sources and testimonies in a virtual space where all objects are semantically related, as well as to take into account explicit distinctions about original sources, related historical information, or editorial interventions. Music digital scholarly editions embodied an interactive nature allowing users to choose from different outputs or reading paths on the bias of different purposes; namely musicological study, learning/teaching activities or performing. In this context, this Ph.D. dissertation aims to develop a theoretical model for the integration of performing variants (technical and/or expressive) that are transmitted orally or through informal channels (marks, notes or text annotations), and usually from teacher to student, within a particular stylistic or interpretive school. The new standards for encoding musical documents like MEI, allow incorporating this information as superposed layers, explicitly differentiated, to the original sources and testimonies. The proposed model is developed within the so-called social editing paradigm, which postulates the integration of some of the 2.0 Web characteristics as the collaborative production of knowledge within the academic editing processes. These new editing practices allow the integration of work's related knowledge, that circulates outside the formal editing and publication circuits, within the scholarly edition. \n
\n\n\n
\n\n\n
\n \n\n \n \n Weigl, D. M.; and Page, K. R.\n\n\n \n \n \n \n \n Dynamic Semantic Music Notation.\n \n \n \n \n\n\n \n\n\n\n In Blomqvist, E.; Hose, K.; Paulheim, H.; Lawrynowicz, A.; Ciravegna, F.; and Hartig, O., editor(s), The Semantic Web: ESWC 2017 Satellite Events, pages 31–34, Cham, 2017. Springer International Publishing\n \n\n\n\n
\n\n\n\n \n \n \"DynamicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{weigl_page_2017a,\n author = {Weigl, David M. and Page, Kevin R.},\n editor = {Blomqvist, Eva and Hose, Katja and Paulheim, Heiko and Lawrynowicz, Agnieszka and Ciravegna, Fabio and Hartig, Olaf},\n title = {Dynamic Semantic Music Notation},\n booktitle = {The Semantic Web: ESWC 2017 Satellite Events},\n year = {2017},\n publisher = {Springer International Publishing},\n address = {Cham},\n pages = {31--34},\n abstract = {The Music Encoding Initiative (MEI) XML schema expresses musical structure addressing score elements at musically meaningful levels of granularity (e.g., individual systems, measures, or notes). While this provides a comprehensive representation of music content, only concepts and relationships provided by the MEI schema can be encoded. Here, we present our Music Encoding and Linked Data (MELD) framework which applies RDF Web Annotations to targetted portions of the MEI structure. Concepts and relationships from the Semantic Web can be included alongside MEI in an expanded musical knowledge graph. We have implemented a music performance scenario which collects, distributes, and displays semantic annotations, enhancing a digital musical score used by performers in a live music jam session.},\n isbn = {978-3-319-70407-4},\n url = {https://link.springer.com/content/pdf/10.1007%2F978-3-319-70407-4_7.pdf},\n doi = {10.1007/978-3-319-70407-4_7}\n}\n\n
\n
\n\n\n
\n The Music Encoding Initiative (MEI) XML schema expresses musical structure addressing score elements at musically meaningful levels of granularity (e.g., individual systems, measures, or notes). While this provides a comprehensive representation of music content, only concepts and relationships provided by the MEI schema can be encoded. Here, we present our Music Encoding and Linked Data (MELD) framework which applies RDF Web Annotations to targetted portions of the MEI structure. Concepts and relationships from the Semantic Web can be included alongside MEI in an expanded musical knowledge graph. We have implemented a music performance scenario which collects, distributes, and displays semantic annotations, enhancing a digital musical score used by performers in a live music jam session.\n
\n\n\n
\n\n\n
\n \n\n \n \n Weigl, D. M.; and Page, K.\n\n\n \n \n \n \n \n A Framework for Distributed Semantic Annotation of Musical Score: \"Take it to the Bridge!\".\n \n \n \n \n\n\n \n\n\n\n In Proceedings of the 18th International Society for Music Information Retrieval Conference, ISMIR 2017, Suzhou, China, October 23–27, 2017, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{weigl_page_2017b,\n abstract = {Music notation expresses performance instructions in a way commonly understood by musicians, but printed paper parts are limited to encodings of static, a priori knowledge. In this paper we present a platform for multi-way communication between collaborating musicians through the dynamic modification of digital parts: the Music Encoding and Linked Data (MELD) framework for distributed real-time annotation of digital music scores. MELD users and software agents create semantic annotations of music concepts and relationships, which are associated with musical structure specified by the Music Encoding Initiative schema (MEI). Annotations are expressed in RDF, allowing alternative music vocabularies (e.g., popular vs. classical music structures) to be applied. The same underlying framework retrieves, distributes, and processes information that addresses semantically distinguishable music elements. Further knowledge is incorporated from external sources through the use of Linked Data. The RDF is also used to match annotation types and contexts to rendering actions which display the annotations upon the digital score. Here, we present a MELD implementation and deployment which augments the digital music scores used by musicians in a group performance, collaboratively changing the sequence within and between pieces in a set list.},\n year = {2017},\n url = {https://ismir2017.smcnus.org/wp-content/uploads/2017/10/190_Paper.pdf},\n title = {A Framework for Distributed Semantic Annotation of Musical Score: "Take it to the Bridge!"},\n author = {Weigl, David M. and Page, Kevin},\n booktitle = {Proceedings of the 18th International Society for Music Information Retrieval Conference, ISMIR 2017, Suzhou, China, October 23--27, 2017},\n isbn = {978-981-11-5179-8}\n}\n\n\n
\n
\n\n\n
\n Music notation expresses performance instructions in a way commonly understood by musicians, but printed paper parts are limited to encodings of static, a priori knowledge. In this paper we present a platform for multi-way communication between collaborating musicians through the dynamic modification of digital parts: the Music Encoding and Linked Data (MELD) framework for distributed real-time annotation of digital music scores. MELD users and software agents create semantic annotations of music concepts and relationships, which are associated with musical structure specified by the Music Encoding Initiative schema (MEI). Annotations are expressed in RDF, allowing alternative music vocabularies (e.g., popular vs. classical music structures) to be applied. The same underlying framework retrieves, distributes, and processes information that addresses semantically distinguishable music elements. Further knowledge is incorporated from external sources through the use of Linked Data. The RDF is also used to match annotation types and contexts to rendering actions which display the annotations upon the digital score. Here, we present a MELD implementation and deployment which augments the digital music scores used by musicians in a group performance, collaboratively changing the sequence within and between pieces in a set list.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (16)\n \n \n
\n
\n \n \n
\n \n\n \n \n Bell, E.; and Pugin, L.\n\n\n \n \n \n \n Approaches to Handwritten Conductor Annotation Extraction in Musical Scores.\n \n \n \n\n\n \n\n\n\n In Fields, B.; and Page, K., editor(s), DLfM 2016. Proceedings of the 3rd International Workshop on Digital Libraries for Musicology, of ACM International Conference Proceeding Series, pages 33–36, New York, NY, 2016. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Bell_2016,\n abstract = {Conductor copies of musical scores are typically rich in handwritten annotations. Ongoing archival efforts to digitize orchestral conductors' scores have made scanned copies of hundreds of these annotated scores available in digital formats.\n\nThe extraction of handwritten annotations from digitized printed documents is a difficult task for computer vision, with most approaches focusing on the extraction of handwritten text. However, conductors' annotation practices provide us with at least two affordances, which make the task more tractable in the musical domain.\n\nFirst, many conductors opt to mark their scores using colored pencils, which contrast with the black and white print of sheet music. Consequently, we show promising results when using color separation techniques alone to recover handwritten annotations from conductors' scores.\n\nWe also compare annotated scores to unannotated copies and use a printed sheet music comparison tool to recover handwritten annotations as additions to the clean copy. We then investigate the use of both of these techniques in a combined method, which improves the results of the color separation technique.\n\nThese techniques are demonstrated using a sample of orchestral scores annotated by professional conductors of the New York Philharmonic. Handwritten annotation extraction in musical scores has applications to the systematic investigation of score annotation practices by performers, annotator attribution, and to the interactive presentation of annotated scores, which we briefly discuss.},\n author = {Bell, Eamonn and Pugin, Laurent},\n title = {Approaches to Handwritten Conductor Annotation Extraction in Musical Scores},\n pages = {33–36},\n publisher = {{Association for Computing Machinery}},\n isbn = {978-1-4503-4751-8},\n series = {ACM International Conference Proceeding Series},\n editor = {Fields, Ben and Page, Kevin},\n booktitle = {DLfM 2016. Proceedings of the 3rd International Workshop on Digital Libraries for Musicology},\n year = {2016},\n address = {New York, NY},\n doi = {10.1145/2970044.2970053}\n}\n\n\n
\n
\n\n\n
\n Conductor copies of musical scores are typically rich in handwritten annotations. Ongoing archival efforts to digitize orchestral conductors' scores have made scanned copies of hundreds of these annotated scores available in digital formats. The extraction of handwritten annotations from digitized printed documents is a difficult task for computer vision, with most approaches focusing on the extraction of handwritten text. However, conductors' annotation practices provide us with at least two affordances, which make the task more tractable in the musical domain. First, many conductors opt to mark their scores using colored pencils, which contrast with the black and white print of sheet music. Consequently, we show promising results when using color separation techniques alone to recover handwritten annotations from conductors' scores. We also compare annotated scores to unannotated copies and use a printed sheet music comparison tool to recover handwritten annotations as additions to the clean copy. We then investigate the use of both of these techniques in a combined method, which improves the results of the color separation technique. These techniques are demonstrated using a sample of orchestral scores annotated by professional conductors of the New York Philharmonic. Handwritten annotation extraction in musical scores has applications to the systematic investigation of score annotation practices by performers, annotator attribution, and to the interactive presentation of annotated scores, which we briefly discuss.\n
\n\n\n
\n\n\n
\n \n\n \n \n Byrd, D. A.; and Isaacson, E.\n\n\n \n \n \n \n \n A Music Representation Requirement Specification for Academia.\n \n \n \n \n\n\n \n\n\n\n 2016.\n Revised version of the 2003 paper in Computer Music Journal\n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{Byrd_2016,\n abstract = {},\n author = {Byrd, Donald A. and Isaacson, Eric},\n year = {2016},\n title = {A Music Representation Requirement Specification for Academia},\n url = {http://homes.soic.indiana.edu/donbyrd/Papers/MusicRepReqForAcad.doc},\n originalyear = {2003},\n note = {Revised version of the 2003 paper in Computer Music Journal}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Crawford, T.; and Lewis, R.\n\n\n \n \n \n \n \n Review: Music Encoding Initiative.\n \n \n \n \n\n\n \n\n\n\n Journal of the American Musicological Society, 69(1): 273–285. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"Review:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Crawford_2016,\n abstract = {It will not have escaped the notice of many readers of this Journal that a number of ambitious projects in historical musicology with a major IT component have received generous grant funding in recent years. Underpinning each of these projects is the music-encoding standard known as the Music Encoding Initiative (MEI). […] Clearly MEI is here to stay. In this report we aim to give a sketch of its main features, which potentially enable new modes ofmusic research, and a hint of its impact on the discipline ofmusicology.},\n author = {Crawford, Tim and Lewis, Richard},\n year = {2016},\n title = {Review: Music Encoding Initiative},\n url = {https://jams.ucpress.edu/content/69/1/273.full.pdf},\n pages = {273–285},\n volume = {69},\n number = {1},\n journal = {Journal of the American Musicological Society},\n doi = {10.1525/jams.2016.69.1.273}\n}\n\n\n
\n
\n\n\n
\n It will not have escaped the notice of many readers of this Journal that a number of ambitious projects in historical musicology with a major IT component have received generous grant funding in recent years. Underpinning each of these projects is the music-encoding standard known as the Music Encoding Initiative (MEI). […] Clearly MEI is here to stay. In this report we aim to give a sketch of its main features, which potentially enable new modes ofmusic research, and a hint of its impact on the discipline ofmusicology.\n
\n\n\n
\n\n\n
\n \n\n \n \n Destandau, M.\n\n\n \n \n \n \n \n La MEI dans tous ses états. La Music Encoding Initiative, de l'encodage aux usages.\n \n \n \n \n\n\n \n\n\n\n Master's thesis, Université de Lille 3, Lille, France, 2016.\n \n\n\n\n
\n\n\n\n \n \n \"LaPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@mastersthesis{Destandau_2016,\n abstract = {A l’heure du numérique, les pratiques musicales se transforment et les objets qui les véhiculent aussi. Dans ce contexte, ce mémoire étudie la façon dont la Music Encoding Initiative, un format d’encodage pour la musique notée, interagit avec les usages. Il montre que la définition du modèle de description suppose une bonne connaissance du domaine qu’il représente, et un positionnement clair ; que les évolutions du modèle pour s’adapter à de nouvelles pratiques questionnent sa cohérence ; mais que cette flexibilité est pourtant indispensable car c’est elle qui rend le modèle vivant et permet de fédérer autour de lui une communauté, qui invente à son tour de nouvelles applications},\n author = {Destandau, Marie},\n year = {2016},\n title = {La MEI dans tous ses {\\'e}tats. La Music Encoding Initiative, de l'encodage aux usages},\n url = {http://www.pas-sages.org/_preview/master/memoireMEI-2016-09-15-5.pdf},\n address = {Lille, France},\n school = {{Universit{\\'e} de Lille 3}},\n type = {Master's thesis}\n}\n\n\n
\n
\n\n\n
\n A l’heure du numérique, les pratiques musicales se transforment et les objets qui les véhiculent aussi. Dans ce contexte, ce mémoire étudie la façon dont la Music Encoding Initiative, un format d’encodage pour la musique notée, interagit avec les usages. Il montre que la définition du modèle de description suppose une bonne connaissance du domaine qu’il représente, et un positionnement clair ; que les évolutions du modèle pour s’adapter à de nouvelles pratiques questionnent sa cohérence ; mais que cette flexibilité est pourtant indispensable car c’est elle qui rend le modèle vivant et permet de fédérer autour de lui une communauté, qui invente à son tour de nouvelles applications\n
\n\n\n
\n\n\n
\n \n\n \n \n Devaney, J.; and Léveillé Gauvin, H.\n\n\n \n \n \n \n Representing and Linking Music Performance Data with Score Information.\n \n \n \n\n\n \n\n\n\n In Fields, B.; and Page, K., editor(s), DLfM 2016. Proceedings of the 3rd International Workshop on Digital Libraries for Musicology, of ACM International Conference Proceeding Series, pages 1–8, New York, NY, 2016. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Devaney_2016,\n abstract = {This paper argues for the need to develop a representation for music performance data that is linked with corresponding score information at the note, beat, and measure levels. Building on the results of a survey of music scholars about their music performance data encoding needs, we propose best-practices for encoding perceptually relevant descriptors of the timing, pitch, loudness, and timbral aspects of performance. We are specifically interested in using descriptors that are sufficiently generalized that multiple performances of the same piece can be directly compared with one another. This paper also proposes a specific representation for encoding performance data and presents prototypes of this representation in both Humdrum and Music Encoding Initiative (MEI) formats.},\n author = {Devaney, Johanna and {L{\\'e}veill{\\'e} Gauvin}, Hubert},\n title = {Representing and Linking Music Performance Data with Score Information},\n pages = {1–8},\n publisher = {{Association for Computing Machinery}},\n isbn = {978-1-4503-4751-8},\n series = {ACM International Conference Proceeding Series},\n editor = {Fields, Ben and Page, Kevin},\n booktitle = {DLfM 2016. Proceedings of the 3rd International Workshop on Digital Libraries for Musicology},\n year = {2016},\n address = {New York, NY},\n doi = {10.1145/2970044.2970052}\n}\n\n
\n
\n\n\n
\n This paper argues for the need to develop a representation for music performance data that is linked with corresponding score information at the note, beat, and measure levels. Building on the results of a survey of music scholars about their music performance data encoding needs, we propose best-practices for encoding perceptually relevant descriptors of the timing, pitch, loudness, and timbral aspects of performance. We are specifically interested in using descriptors that are sufficiently generalized that multiple performances of the same piece can be directly compared with one another. This paper also proposes a specific representation for encoding performance data and presents prototypes of this representation in both Humdrum and Music Encoding Initiative (MEI) formats.\n
\n\n\n
\n\n\n
\n \n\n \n \n Duguid, T.\n\n\n \n \n \n \n \n MuSO. Aggregation and Peer Review in Music. NEH White Paper.\n \n \n \n \n\n\n \n\n\n\n Technical Report Texas A&M University, College Station, TX, 2016.\n \n\n\n\n
\n\n\n\n \n \n \"MuSO.Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{Duguid_2016,\n abstract = {},\n author = {Duguid, Timothy},\n date = {31.08.2016},\n year = {2016},\n title = {MuSO. Aggregation and Peer Review in Music. NEH White Paper},\n url = {http://oaktrust.library.tamu.edu/bitstream/handle/1969.1/157548/NEH-White-Paper.pdf},\n address = {College Station, TX},\n institution = {{Texas A{\\&}M University}}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Laplante, A.; and Fujinaga, I.\n\n\n \n \n \n \n Digitizing Musical Scores: Challenges and Opportunities for Libraries.\n \n \n \n\n\n \n\n\n\n In Fields, B.; and Page, K., editor(s), DLfM 2016. Proceedings of the 3rd International Workshop on Digital Libraries for Musicology, of ACM International Conference Proceeding Series, pages 45–48, New York, NY, 2016. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Laplante_2016,\n abstract = {Musical scores and manuscripts are essential resources for music theory research. Although many libraries are such documents from their collections, these online resources are dispersed and the functionalities for exploiting their content remain limited. In this paper, we present a qualitative study based on interviews with librarians on the challenges libraries of all types face when they wish to digitize musical scores. In the light of a literature review on the role libraries can play in supporting digital humanities research, we conclude by briefly discussing the opportunities new technologies for optical music recognition and computer-aided music analysis could create for libraries.},\n author = {Laplante, Audrey and Fujinaga, Ichiro},\n title = {Digitizing Musical Scores: Challenges and Opportunities for Libraries},\n pages = {45–48},\n publisher = {{Association for Computing Machinery}},\n isbn = {978-1-4503-4751-8},\n series = {ACM International Conference Proceeding Series},\n editor = {Fields, Ben and Page, Kevin},\n booktitle = {DLfM 2016. Proceedings of the 3rd International Workshop on Digital Libraries for Musicology},\n year = {2016},\n address = {New York, NY},\n doi = {10.1145/2970044.2970055}\n}\n\n\n
\n
\n\n\n
\n Musical scores and manuscripts are essential resources for music theory research. Although many libraries are such documents from their collections, these online resources are dispersed and the functionalities for exploiting their content remain limited. In this paper, we present a qualitative study based on interviews with librarians on the challenges libraries of all types face when they wish to digitize musical scores. In the light of a literature review on the role libraries can play in supporting digital humanities research, we conclude by briefly discussing the opportunities new technologies for optical music recognition and computer-aided music analysis could create for libraries.\n
\n\n\n
\n\n\n
\n \n\n \n \n Leblond Martin, S.\n\n\n \n \n \n \n Musiques orales, leur notation musicale et l'encodage numérique MEI – Music Encoding Initiative – de cette notation.\n \n \n \n\n\n \n\n\n\n In Leblond Martin, S., editor(s), Musiques orales, notations musicales et encodages numériques, pages 220–243. Les Éditions de l'Immatériel, Paris, 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{LeblondMartin_2016b,\n abstract = {},\n author = {{Leblond Martin}, Sylvaine},\n title = {Musiques orales, leur notation musicale et l'encodage num{\\'e}rique MEI – Music Encoding Initiative – de cette notation},\n pages = {220–243},\n publisher = {{Les {\\'E}ditions de l'Immat{\\'e}riel}},\n isbn = {979-1091636049},\n editor = {{Leblond Martin}, Sylvaine},\n booktitle = {Musiques orales, notations musicales et encodages num{\\'e}riques},\n year = {2016},\n address = {Paris}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n McAulay, K.\n\n\n \n \n \n \n Show Me a Strathspey. Taking Steps to Digitize Tune Collections.\n \n \n \n\n\n \n\n\n\n Reference Reviews, 30(7): 1–6. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{McAulay_2016,\n abstract = {\\textit{Purpose} The present paper describes an Arts and Humanities Research Council (AHRC) research project into Scottish fiddle music and the important considerations of music digitization, access and discovery in designing the website that will be one of the project's enduring outcomes.\n\n\\textit{Design/methodology/approach} The paper is a general review of existing online indices to music repertoires and some of the general problems associated with selecting metadata and indexing such material and is a survey of the various recent and contemporary projects into the digital encoding of musical notation for online use.\n\n\\textit{Findings} The questions addressed during the design of the Bass Culture project database serve to highlight the importance of cooperation between musicologists, information specialists and computer scientists, and the benefits of having researchers with strengths in more than one of these disciplines. The Music Encoding Initiative proves an effective means of providing digital access to the Scottish fiddle tune repertoire.\n\n\\textit{Originality/value} The digital encoding of music notation is still comparatively cutting-edge; the Bass Culture project is thus a useful exemplar for interdisciplinary collaboration between musicologists, information specialists and computer scientists, and it addresses issues which are likely to be applicable to future projects of this nature.},\n author = {McAulay, Karen},\n year = {2016},\n title = {Show Me a Strathspey. Taking Steps to Digitize Tune Collections},\n pages = {1–6},\n volume = {30},\n number = {7},\n issn = {0950-4125},\n journal = {Reference Reviews},\n doi = {10.1108/RR-03-2015-0073}\n}\n\n\n
\n
\n\n\n
\n Purpose The present paper describes an Arts and Humanities Research Council (AHRC) research project into Scottish fiddle music and the important considerations of music digitization, access and discovery in designing the website that will be one of the project's enduring outcomes. Design/methodology/approach The paper is a general review of existing online indices to music repertoires and some of the general problems associated with selecting metadata and indexing such material and is a survey of the various recent and contemporary projects into the digital encoding of musical notation for online use. Findings The questions addressed during the design of the Bass Culture project database serve to highlight the importance of cooperation between musicologists, information specialists and computer scientists, and the benefits of having researchers with strengths in more than one of these disciplines. The Music Encoding Initiative proves an effective means of providing digital access to the Scottish fiddle tune repertoire. Originality/value The digital encoding of music notation is still comparatively cutting-edge; the Bass Culture project is thus a useful exemplar for interdisciplinary collaboration between musicologists, information specialists and computer scientists, and it addresses issues which are likely to be applicable to future projects of this nature.\n
\n\n\n
\n\n\n
\n \n\n \n \n McKay, C.; Tenaglia, T.; and Fujinaga, I.\n\n\n \n \n \n \n \n JSymbolic2. Extracting Features from Symbolic Music Representations.\n \n \n \n \n\n\n \n\n\n\n In Mandel, M. I.; Devaney, J.; Turnbull, D.; and Tzanetakis, G., editor(s), Proceedings of the 17th International Society for Music Information Retrieval Conference, ISMIR 2016, New York City, United States, August 7-11, 2016, volume Late-Breaking Session, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"JSymbolic2.Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{McKay_2016,\n abstract = {This demo presents the jSymbolic2 software for extracting features from symbolic music representations. jSymbolic2 is a tool for assisting musicologists and music theorists in large-scale empirical research projects, and for directly performing the kinds of machine learning based classification and similarity research well-known to the MIR community.},\n author = {McKay, Cory and Tenaglia, Tristano and Fujinaga, Ichiro},\n title = {JSymbolic2. Extracting Features from Symbolic Music Representations},\n url = {https://wp.nyu.edu/ismir2016/wp-content/uploads/sites/2294/2016/08/mckay-jsymbolic2.pdf},\n volume = {Late-Breaking Session},\n isbn = {978-0-692-75506-8},\n editor = {Mandel, Michael I. and Devaney, Johanna and Turnbull, Douglas and Tzanetakis, George},\n booktitle = {Proceedings of the 17th International Society for Music Information Retrieval Conference, ISMIR 2016, New York City, United States, August 7-11, 2016},\n year = {2016}\n}\n\n\n
\n
\n\n\n
\n This demo presents the jSymbolic2 software for extracting features from symbolic music representations. jSymbolic2 is a tool for assisting musicologists and music theorists in large-scale empirical research projects, and for directly performing the kinds of machine learning based classification and similarity research well-known to the MIR community.\n
\n\n\n
\n\n\n
\n \n\n \n \n Pugin, L.\n\n\n \n \n \n \n Encodage de documents musicaux avec la MEI.\n \n \n \n\n\n \n\n\n\n In Leblond Martin, S., editor(s), Musiques orales, notations musicales et encodages numériques, pages 162–175. Les Éditions de l'Immatériel, Paris, 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Pugin_2016,\n abstract = {},\n author = {Pugin, Laurent},\n title = {Encodage de documents musicaux avec la MEI},\n pages = {162–175},\n publisher = {{Les {\\'E}ditions de l'Immat{\\'e}riel}},\n isbn = {979-1091636049},\n editor = {{Leblond Martin}, Sylvaine},\n booktitle = {Musiques orales, notations musicales et encodages num{\\'e}riques},\n year = {2016},\n address = {Paris}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Rizo, D.; and Marsden, A.\n\n\n \n \n \n \n A Standard Format Proposal for Hierarchical Analyses and Representations.\n \n \n \n\n\n \n\n\n\n In Fields, B.; and Page, K., editor(s), DLfM 2016. Proceedings of the 3rd International Workshop on Digital Libraries for Musicology, of ACM International Conference Proceeding Series, pages 25–32, New York, NY, 2016. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Rizo_2016,\n abstract = {In the realm of digital musicology, standardizations efforts to date have mostly concentrated on the representation of music. Analyses of music are increasingly being generated or communicated by digital means. We demonstrate that the same arguments for the desirability of standardization in the representation of music apply also to the representation of analyses of music: proper preservation, sharing of data, and facilitation of digital processing. We concentrate here on analyses which can be described as hierarchical and show that this covers a broad range of existing analytical formats. We propose an extension of MEI (Music Encoding Initiative) to allow the encoding of analyses unambiguously associated with and aligned to a representation of the music analysed, making use of existing mechanisms within MEI's parent TEI (Text Encoding Initiative) for the representation of trees and graphs.},\n author = {Rizo, David and Marsden, Alan},\n title = {A Standard Format Proposal for Hierarchical Analyses and Representations},\n pages = {25–32},\n publisher = {{Association for Computing Machinery}},\n isbn = {978-1-4503-4751-8},\n series = {ACM International Conference Proceeding Series},\n editor = {Fields, Ben and Page, Kevin},\n booktitle = {DLfM 2016. Proceedings of the 3rd International Workshop on Digital Libraries for Musicology},\n year = {2016},\n address = {New York, NY},\n doi = {10.1145/2970044.2970046}\n}\n\n\n
\n
\n\n\n
\n In the realm of digital musicology, standardizations efforts to date have mostly concentrated on the representation of music. Analyses of music are increasingly being generated or communicated by digital means. We demonstrate that the same arguments for the desirability of standardization in the representation of music apply also to the representation of analyses of music: proper preservation, sharing of data, and facilitation of digital processing. We concentrate here on analyses which can be described as hierarchical and show that this covers a broad range of existing analytical formats. We propose an extension of MEI (Music Encoding Initiative) to allow the encoding of analyses unambiguously associated with and aligned to a representation of the music analysed, making use of existing mechanisms within MEI's parent TEI (Text Encoding Initiative) for the representation of trees and graphs.\n
\n\n\n
\n\n\n
\n \n\n \n \n Roland, P.; and Kepper, J.,\n editors.\n \n\n\n \n \n \n \n \n Music Encoding Conference Proceedings 2013 and 2014.\n \n \n \n \n\n\n \n\n\n\n Bavarian State Library (BSB). 2016.\n \n\n\n\n
\n\n\n\n \n \n \"Music urn\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@proceedings{Roland_2016,\n abstract = {Conference proceedings of the Music Encoding Conferences 2013 and 2014 with Foreword by Perry D. Roland and Johannes Kepper},\n year = {2016},\n title = {{Music Encoding Conference Proceedings 2013 and 2014}},\n url_URN = {http://nbn-resolving.de/urn:nbn:de:bvb:12-babs2-0000007812},\n publisher = {{Bavarian State Library (BSB)}},\n editor = {Roland, Perry and Kepper, Johannes}\n}\n\n\n
\n
\n\n\n
\n Conference proceedings of the Music Encoding Conferences 2013 and 2014 with Foreword by Perry D. Roland and Johannes Kepper\n
\n\n\n
\n\n\n
\n \n\n \n \n Viglianti, R.\n\n\n \n \n \n \n The Music Addressability API.\n \n \n \n\n\n \n\n\n\n In Fields, B.; and Page, K., editor(s), DLfM 2016. Proceedings of the 3rd International Workshop on Digital Libraries for Musicology, of ACM International Conference Proceeding Series, pages 57–60, New York, NY, 2016. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Viglianti_2016,\n abstract = {This paper describes an Application Programming Interface (API) for addressing music notation on the web regardless of the format in which it is stored. This API was created as a method for addressing and extracting specific portions of music notation published in machine-readable formats on the web. Music notation, like text, can be ``addressed'' in new ways in a digital environment, allowing scholars to identify and name structures of various kinds, thus raising such questions as how can one virtually ``circle'' some music notation? How can a machine interpret this ``circling'' to select and retrieve the relevant music notation?\n\nThe API was evaluated by: 1) creating an implementation of the API for documents in the Music Encoding Initiative (MEI) format; and by 2) remodelling a dataset ofmusic analysis statements from the Du Chemin: Lost Voices project (Haverford College) by using the API to connect the analytical statements with the portion of notaiton they refer to. Building this corpus has demonstrated that the Music Addressability API is capable of modelling complex analytical statements containing references to music notation.},\n author = {Viglianti, Raffaele},\n title = {The Music Addressability API},\n pages = {57–60},\n publisher = {{Association for Computing Machinery}},\n isbn = {978-1-4503-4751-8},\n series = {ACM International Conference Proceeding Series},\n editor = {Fields, Ben and Page, Kevin},\n booktitle = {DLfM 2016. Proceedings of the 3rd International Workshop on Digital Libraries for Musicology},\n year = {2016},\n address = {New York, NY},\n doi = {10.1145/2970044.2970056}\n}\n\n\n
\n
\n\n\n
\n This paper describes an Application Programming Interface (API) for addressing music notation on the web regardless of the format in which it is stored. This API was created as a method for addressing and extracting specific portions of music notation published in machine-readable formats on the web. Music notation, like text, can be ``addressed'' in new ways in a digital environment, allowing scholars to identify and name structures of various kinds, thus raising such questions as how can one virtually ``circle'' some music notation? How can a machine interpret this ``circling'' to select and retrieve the relevant music notation? The API was evaluated by: 1) creating an implementation of the API for documents in the Music Encoding Initiative (MEI) format; and by 2) remodelling a dataset ofmusic analysis statements from the Du Chemin: Lost Voices project (Haverford College) by using the API to connect the analytical statements with the portion of notaiton they refer to. Building this corpus has demonstrated that the Music Addressability API is capable of modelling complex analytical statements containing references to music notation.\n
\n\n\n
\n\n\n
\n \n\n \n \n Weigl, D. M.; and Page, K.\n\n\n \n \n \n \n \n Dynamic Semantic Notation. Jamming Together Music Encoding and Linked Data.\n \n \n \n \n\n\n \n\n\n\n In Mandel, M. I.; Devaney, J.; Turnbull, D.; and Tzanetakis, G., editor(s), Proceedings of the 17th International Society for Music Information Retrieval Conference, ISMIR 2016, New York City, United States, August 7-11, 2016, 2016. \n Late-Breaking Session\n\n\n\n
\n\n\n\n \n \n \"DynamicPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Weigl_2016,\n abstract = {The Music Encoding Initiative (MEI) provides a framework for expressing musical notation that enables the identification (via XML identifiers), and thus addressing, of score elements at various levels of granularity (e.g. individual systems, measures, or notes). Verovio, an open-source MEI renderer that produces beautiful SVG renditions of the score, retains the MEI identifiers and element hierarchy in the produced output, enabling dynamic interactivity with score elements through a web browser. We present a demonstrator that combines these capabilities with semantic technologies including RDF, JSON-LD, SPARQL, and the Open Annotation data model, anchoring into the musical notation by using the MEI XML IDs as fragment identifiers to enable the fine-grained incorporation of musical notation within a web of Linked Data. This fusing of music and semantics affords the creation of rich Digital Music Objects supporting contemporary music consumption and performance.},\n author = {Weigl, David M. and Page, Kevin},\n title = {Dynamic Semantic Notation. Jamming Together Music Encoding and Linked Data},\n url = {https://wp.nyu.edu/ismir2016/wp-content/uploads/sites/2294/2016/08/weigl-dynamic.pdf},\n isbn = {978-0-692-75506-8},\n editor = {Mandel, Michael I. and Devaney, Johanna and Turnbull, Douglas and Tzanetakis, George},\n booktitle = {Proceedings of the 17th International Society for Music Information Retrieval Conference, ISMIR 2016, New York City, United States, August 7-11, 2016},\n year = {2016},\n note = {Late-Breaking Session}\n}\n\n\n
\n
\n\n\n
\n The Music Encoding Initiative (MEI) provides a framework for expressing musical notation that enables the identification (via XML identifiers), and thus addressing, of score elements at various levels of granularity (e.g. individual systems, measures, or notes). Verovio, an open-source MEI renderer that produces beautiful SVG renditions of the score, retains the MEI identifiers and element hierarchy in the produced output, enabling dynamic interactivity with score elements through a web browser. We present a demonstrator that combines these capabilities with semantic technologies including RDF, JSON-LD, SPARQL, and the Open Annotation data model, anchoring into the musical notation by using the MEI XML IDs as fragment identifiers to enable the fine-grained incorporation of musical notation within a web of Linked Data. This fusing of music and semantics affords the creation of rich Digital Music Objects supporting contemporary music consumption and performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n Zitellini, R.; and Pugin, L.\n\n\n \n \n \n \n \n Representing Atypical Music Notation Practices. An Example with Late 17th Century Music.\n \n \n \n \n\n\n \n\n\n\n In Hoadley, R.; Fober, D.; and Nash, C., editor(s), Proceedings of the Second International Conference on Technologies for Music Notation and Representation, TENOR 2016, Cambridge, UK, May 27–29, 2016, pages 71–76, Cambridge, UK, 2016. Anglia Ruskin University\n \n\n\n\n
\n\n\n\n \n \n \"RepresentingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Zitellini_2016,\n abstract = {From the 17th century to the first decades of the 18th century music notation slowly loses all its mensural influences, becoming virtually identical to what we would consider common modern notation. During these five decades of transformation composers did not just suddenly abandon older notation styles, but they used them alongside ones that would eventually become the standard. Void notation, black notation and uncommon tempi were all mixed together. The scholar preparing modern editions of this music is normally forced to normalise all these atypical notations as many software applications do not support them natively. This paper demonstrates the flexibility of the coding scheme proposed by the Music Encoding Initiative (MEI), and of Verovio, a visualisation library designed for it. The modular approach of these tools means that particular notation systems can be added easily while maintaining compatibility with other encoded notations.},\n author = {Zitellini, Rodolfo and Pugin, Laurent},\n title = {Representing Atypical Music Notation Practices. An Example with Late 17th Century Music},\n url = {http://tenor2016.tenor-conference.org/papers/10_Zitellini_tenor2016.pdf},\n pages = {71–76},\n publisher = {{Anglia Ruskin University}},\n isbn = {978-0-9931461-1-4},\n editor = {Hoadley, Richard and Fober, Dominique and Nash, Chris},\n booktitle = {Proceedings of the Second International Conference on Technologies for Music Notation and Representation, TENOR 2016, Cambridge, UK, May 27–29, 2016},\n year = {2016},\n address = {Cambridge, UK}\n}\n
\n
\n\n\n
\n From the 17th century to the first decades of the 18th century music notation slowly loses all its mensural influences, becoming virtually identical to what we would consider common modern notation. During these five decades of transformation composers did not just suddenly abandon older notation styles, but they used them alongside ones that would eventually become the standard. Void notation, black notation and uncommon tempi were all mixed together. The scholar preparing modern editions of this music is normally forced to normalise all these atypical notations as many software applications do not support them natively. This paper demonstrates the flexibility of the coding scheme proposed by the Music Encoding Initiative (MEI), and of Verovio, a visualisation library designed for it. The modular approach of these tools means that particular notation systems can be added easily while maintaining compatibility with other encoded notations.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n Duval, E.; van Berchum , M.; Jentzsch, A.; Parra Chicho, G. A.; and Drakos, A.\n\n\n \n \n \n \n \n Musicology of Early Music with Europeana Tools and Services.\n \n \n \n \n\n\n \n\n\n\n In Müller, M.; and Wiering, F., editor(s), Proceedings of the 16th International Society for Music Information Retrieval Conference, ISMIR 2015, Málaga, Spain, October 26-30, 2015, pages 632–638, 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MusicologyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Duval_2015,\n abstract = {The Europeana repository hosts large collections of digitized music manuscripts and prints. This paper investigates how tools and services for this repository can enable Early Music musicologists to carry out their research in a more effective or efficient way, or to carry out research that is impossible to do without such tools or services. We report on the methodology, user-centered development of a suite of tools that we have integrated loosely, in order to experiment with this specific target audience and an evaluation of the impact that such tools may have on how these musicologists carry out their research. Positive feedback relates to the automation of data sharing between the loosely coupled tools and support for an integrated workflow. Participants in this study wanted to have the ability to work not only with individual items, but also with collections of such items. The use of search facets to filter, and visualization around time and place were positively evaluated, as was the use of Optical Music Recognition and computer-supported analysis of music scores. The musicologists were not convinced of the value of activity streams. They also wanted a less strictly linear organization of their workflow and the ability to not only consume items from the repository, but to also push their research results back into the Europeana repository.},\n author = {Duval, Erik and {van Berchum}, Marnix and Jentzsch, Anja and {Parra Chicho}, Gonzalo Alberto and Drakos, Andreas},\n title = {Musicology of Early Music with Europeana Tools and Services},\n url = {http://ismir2015.uma.es/articles/232_Paper.pdf},\n pages = {632–638},\n isbn = {978-84-606-8853-2},\n editor = {M{\\"u}ller, Meinard and Wiering, Frans},\n booktitle = {Proceedings of the 16th International Society for Music Information Retrieval Conference, ISMIR 2015, M{\\'a}laga, Spain, October 26-30, 2015},\n year = {2015}\n}\n\n\n
\n
\n\n\n
\n The Europeana repository hosts large collections of digitized music manuscripts and prints. This paper investigates how tools and services for this repository can enable Early Music musicologists to carry out their research in a more effective or efficient way, or to carry out research that is impossible to do without such tools or services. We report on the methodology, user-centered development of a suite of tools that we have integrated loosely, in order to experiment with this specific target audience and an evaluation of the impact that such tools may have on how these musicologists carry out their research. Positive feedback relates to the automation of data sharing between the loosely coupled tools and support for an integrated workflow. Participants in this study wanted to have the ability to work not only with individual items, but also with collections of such items. The use of search facets to filter, and visualization around time and place were positively evaluated, as was the use of Optical Music Recognition and computer-supported analysis of music scores. The musicologists were not convinced of the value of activity streams. They also wanted a less strictly linear organization of their workflow and the ability to not only consume items from the repository, but to also push their research results back into the Europeana repository.\n
\n\n\n
\n\n\n
\n \n\n \n \n Lewis, R. J.; Crawford, T.; and Lewis, D.\n\n\n \n \n \n \n \n Exploring Information Retrieval, Semantic Technologies and Workflows for Music Scholarship. The Transforming Musicology Project.\n \n \n \n \n\n\n \n\n\n\n Early Music, 43(4): 635–647. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Lewis_2015,\n abstract = {Transforming Musicology is a three-year project undertaking musicological research exploring state-of-the-art computational methods in the areas of early modern vocal and instrumental music (mostly for lute), Wagner’s use of leitmotifs, and music as represented in the social media. An essential component of the work involves devising a semantic infrastructure which allows research data, results and methods to be published in a form that enables others to incorporate the research into their own discourse. This includes ways of capturing the processes of musicology in the form of ‘workflows’; in principle, these allow the processes to be repeated systematically using improved data, or on newly discovered sources as they emerge. A large part of the effort of Transforming Musicology (as with any digital research) is concerned with data preparation, which in the early music case described here means dealing with the outputs of optical music recognition software, which inevitably contain errors. This report describes in outline the process of correction and some of the web-based software which has been designed to make this as easy as possible for the musicologist.},\n author = {Lewis, Richard J. and Crawford, Tim and Lewis, David},\n year = {2015},\n title = {Exploring Information Retrieval, Semantic Technologies and Workflows for Music Scholarship. The Transforming Musicology Project},\n url = {http://em.oxfordjournals.org/content/43/4/635.full.pdf+html},\n pages = {635–647},\n volume = {43},\n number = {4},\n journal = {Early Music},\n doi = {10.1093/em/cav073}\n}\n\n\n
\n
\n\n\n
\n Transforming Musicology is a three-year project undertaking musicological research exploring state-of-the-art computational methods in the areas of early modern vocal and instrumental music (mostly for lute), Wagner’s use of leitmotifs, and music as represented in the social media. An essential component of the work involves devising a semantic infrastructure which allows research data, results and methods to be published in a form that enables others to incorporate the research into their own discourse. This includes ways of capturing the processes of musicology in the form of ‘workflows’; in principle, these allow the processes to be repeated systematically using improved data, or on newly discovered sources as they emerge. A large part of the effort of Transforming Musicology (as with any digital research) is concerned with data preparation, which in the early music case described here means dealing with the outputs of optical music recognition software, which inevitably contain errors. This report describes in outline the process of correction and some of the web-based software which has been designed to make this as easy as possible for the musicologist.\n
\n\n\n
\n\n\n
\n \n\n \n \n Pugin, L.\n\n\n \n \n \n \n \n The Challenge of Data in Digital Musicology.\n \n \n \n \n\n\n \n\n\n\n Frontiers in Digital Humanities, 2(4). 2015.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Pugin_2015,\n abstract = {},\n author = {Pugin, Laurent},\n year = {2015},\n title = {The Challenge of Data in Digital Musicology},\n url = {https://www.frontiersin.org/articles/10.3389/fdigh.2015.00004/full},\n volume = {2},\n number = {4},\n journal = {Frontiers in Digital Humanities},\n doi = {10.3389/fdigh.2015.00004}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Sapp, C.\n\n\n \n \n \n \n \n Graphic to Symbolic Representations of Musical Notation.\n \n \n \n \n\n\n \n\n\n\n In Battier, M.; Bresson, J.; Couprie, P.; Davy-Rigaux, C.; Fober, D.; Geslin, Y.; Genevois, H.; Picard, F.; and Tacaille, A., editor(s), Proceedings of the First International Conference on Technologies for Music Notation and Representation, TENOR 2015, Paris, France, May 28-30, 2015, pages 124–132, Paris, 2015. Institut de Recherche en Musicologie\n \n\n\n\n
\n\n\n\n \n \n \"GraphicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Sapp_2015,\n abstract = {This paper discusses the SCORE data format, a graphically oriented music representation developed in the early 1970's, and how such a representation can be converted into sequential descriptions of music notation. The graphical representation system for the SCORE editor is presented along with case studies for parsing and converting the data into other symbolic music formats such as Dox, Humdrum, MusicXML, MuseData, MEI, and MIDI using scorelib, an open-source code library for parsing SCORE data. Knowledge and understanding of the SCORE format is also useful for OMR (Optical Music Recognition) projects, as it can be used as an intermediate layer between raw image scans and higher-level digital music representation systems.},\n author = {Sapp, Craig},\n title = {Graphic to Symbolic Representations of Musical Notation},\n url = {http://tenor2015.tenor-conference.org/papers/20-Sapp-GraphicToSymbolic.pdf},\n pages = {124–132},\n publisher = {{Institut de Recherche en Musicologie}},\n isbn = {978-2-9552905-0-7},\n editor = {Battier, Marc and Bresson, Jean and Couprie, Pierre and Davy-Rigaux, C{\\'e}cile and Fober, Dominique and Geslin, Yann and Genevois, Hugues and Picard, Fran{\\c{c}}ois and Tacaille, Alice},\n booktitle = {Proceedings of the First International Conference on Technologies for Music Notation and Representation, TENOR 2015, Paris, France, May 28-30, 2015},\n year = {2015},\n address = {Paris},\n doi = {10.5281/zenodo.923829}\n}\n\n\n
\n
\n\n\n
\n This paper discusses the SCORE data format, a graphically oriented music representation developed in the early 1970's, and how such a representation can be converted into sequential descriptions of music notation. The graphical representation system for the SCORE editor is presented along with case studies for parsing and converting the data into other symbolic music formats such as Dox, Humdrum, MusicXML, MuseData, MEI, and MIDI using scorelib, an open-source code library for parsing SCORE data. Knowledge and understanding of the SCORE format is also useful for OMR (Optical Music Recognition) projects, as it can be used as an intermediate layer between raw image scans and higher-level digital music representation systems.\n
\n\n\n
\n\n\n
\n \n\n \n \n Selfridge-Field, E.\n\n\n \n \n \n \n \n Hybrid Critical Editions of Opera. Motives, Milestones, and Quandaries.\n \n \n \n \n\n\n \n\n\n\n Notes, 72(1): 9–22. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"HybridPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{SelfridgeField_2015,\n abstract = {The publication term “hybrid digital edition” came into existence at the end of 2013, with the publication of Salieri’s theatrical divertimento Prima la musica, e poi la parola. It is a hybrid in that its theatritical apparatus is a digital constellation of auxiliary materials consulted in the course of constructing a new edition, while the score is a conventional one. A user can in principle consult the two bilaterally, but the confinement of the critical report to a physical device unsuited to network use has crippled it in some libraries. Since Prima la musica is both the first of a series of other operas presented in the same way, and of a more opened-ended effort to edit works in other genres similarly, it is important to understand how this combination came to be and what its potential advantages are. Since the new model was perceived by many librarians in the U.S. as excessively expensive, a table of prices of recently published opera editions is provided. These editions prove not to be moderately priced, but the costs of opera scores produced in Europe is, on average, much higher than those produced in the U.S.},\n author = {Selfridge-Field, Eleanor},\n year = {2015},\n title = {Hybrid Critical Editions of Opera. Motives, Milestones, and Quandaries},\n url = {https://muse.jhu.edu/journals/notes/v072/72.1.selfridge-field.pdf},\n pages = {9–22},\n volume = {72},\n number = {1},\n journal = {Notes},\n doi = {10.1353/not.2015.0100}\n}\n\n\n
\n
\n\n\n
\n The publication term “hybrid digital edition” came into existence at the end of 2013, with the publication of Salieri’s theatrical divertimento Prima la musica, e poi la parola. It is a hybrid in that its theatritical apparatus is a digital constellation of auxiliary materials consulted in the course of constructing a new edition, while the score is a conventional one. A user can in principle consult the two bilaterally, but the confinement of the critical report to a physical device unsuited to network use has crippled it in some libraries. Since Prima la musica is both the first of a series of other operas presented in the same way, and of a more opened-ended effort to edit works in other genres similarly, it is important to understand how this combination came to be and what its potential advantages are. Since the new model was perceived by many librarians in the U.S. as excessively expensive, a table of prices of recently published opera editions is provided. These editions prove not to be moderately priced, but the costs of opera scores produced in Europe is, on average, much higher than those produced in the U.S.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n Hankinson, A.\n\n\n \n \n \n \n \n Optical Music Recognition Infrastructure for Large-Scale Music Document Analysis.\n \n \n \n \n\n\n \n\n\n\n Ph.D. Thesis, Schulich School of Music, McGill University, Montreal, 2014.\n \n\n\n\n
\n\n\n\n \n \n \"OpticalPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{Hankinson_2014,\n abstract = {},\n author = {Hankinson, Andrew},\n year = {2014},\n title = {Optical Music Recognition Infrastructure for Large-Scale Music Document Analysis},\n url = {http://digitool.library.mcgill.ca/R/-?func=dbin-jump-full&object_id=130291&silo_library=GEN01},\n address = {Montreal},\n school = {{Schulich School of Music, McGill University}},\n type = {PhD thesis}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Pugin, L.; Zitellini, R.; and Roland, P.\n\n\n \n \n \n \n \n Verovio. A Library for Engraving MEI Music Notation into SVG.\n \n \n \n \n\n\n \n\n\n\n In Wang, H.; Yang, Y.; and Lee, J. H., editor(s), Proceedings of the 15th International Society for Music Information Retrieval Conference, ISMIR 2014, Taipei, Taiwan, October 27-31, 2014, pages 107–112, 2014. International Society for Music Information Retrieval\n \n\n\n\n
\n\n\n\n \n \n \"Verovio.Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Pugin_2014,\n abstract = {Rendering symbolic music notation is a common component of many MIR applications, and many tools are available for this task. There is, however, a need for a tool that can natively render the Music Encoding Initiative (MEI) notation encodings that are increasingly used in music research projects. In this paper, we present Verovio, a library and toolkit for rendering MEI. A significant advantage of Verovio is that it implements MEI's structure internally, making it the best suited solution for rendering features that make MEI unique. Verovio is designed as a fast, portable, lightweight tool written in pure standard C++ with no dependencies on third-party frameworks or libraries. It can be used as a command-line rendering tool, as a library, or it can be compiled to JavaScript using the Emscripten LLVM-to-JavaScript compiler. This last option is particularly interesting because it provides a complete in-browser music MEI typesetter. The SVG output from Verovio is organized in such a way that the MEI structure is preserved as much as possible. Since every graphic in SVG is an XML element that is easily addressable, Verovio is particularly well-suited for interactive applications, especially in web browsers. Verovio is available under the GPL open-source license.},\n author = {Pugin, Laurent and Zitellini, Rodolfo and Roland, Perry},\n title = {Verovio. A Library for Engraving MEI Music Notation into SVG},\n url = {http://www.terasoft.com.tw/conf/ismir2014/proceedings/T020_221_Paper.pdf},\n pages = {107–112},\n publisher = {{International Society for Music Information Retrieval}},\n editor = {Wang, Hsin-Min and Yang, Yi-Hsuan and Lee, Jin Ha},\n booktitle = {Proceedings of the 15th International Society for Music Information Retrieval Conference, ISMIR 2014, Taipei, Taiwan, October 27-31, 2014},\n year = {2014}\n}\n\n\n
\n
\n\n\n
\n Rendering symbolic music notation is a common component of many MIR applications, and many tools are available for this task. There is, however, a need for a tool that can natively render the Music Encoding Initiative (MEI) notation encodings that are increasingly used in music research projects. In this paper, we present Verovio, a library and toolkit for rendering MEI. A significant advantage of Verovio is that it implements MEI's structure internally, making it the best suited solution for rendering features that make MEI unique. Verovio is designed as a fast, portable, lightweight tool written in pure standard C++ with no dependencies on third-party frameworks or libraries. It can be used as a command-line rendering tool, as a library, or it can be compiled to JavaScript using the Emscripten LLVM-to-JavaScript compiler. This last option is particularly interesting because it provides a complete in-browser music MEI typesetter. The SVG output from Verovio is organized in such a way that the MEI structure is preserved as much as possible. Since every graphic in SVG is an XML element that is easily addressable, Verovio is particularly well-suited for interactive applications, especially in web browsers. Verovio is available under the GPL open-source license.\n
\n\n\n
\n\n\n
\n \n\n \n \n Roland, P.; Hankinson, A.; and Pugin, L.\n\n\n \n \n \n \n Early Music and the Music Encoding Initiative.\n \n \n \n\n\n \n\n\n\n Early Music, 42(4): 605–611. 2014.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Roland_2014,\n abstract = {The Music Encoding Initiative (MEI) is a collaborative, open-source project focused on building a comprehensive framework for the creation of electronic formats that support encoding of symbolic music notation and other associated data and metadata. The MEI community strives to create scholarly standards for digital musical analysis, criticism and editorial work similar to those available for textual material. The community includes practitioners from a diverse range of related disciplines including musicology, music theory, music librarianship and music technology, each contributing to ongoing discussions and tools for building digital critical music editions. This article provides an introduction to music encoding and MEI in the context of early music.},\n author = {Roland, Perry and Hankinson, Andrew and Pugin, Laurent},\n year = {2014},\n title = {Early Music and the Music Encoding Initiative},\n pages = {605–611},\n volume = {42},\n number = {4},\n journal = {Early Music},\n doi = {10.1093/em/cau098}\n}\n\n\n
\n
\n\n\n
\n The Music Encoding Initiative (MEI) is a collaborative, open-source project focused on building a comprehensive framework for the creation of electronic formats that support encoding of symbolic music notation and other associated data and metadata. The MEI community strives to create scholarly standards for digital musical analysis, criticism and editorial work similar to those available for textual material. The community includes practitioners from a diverse range of related disciplines including musicology, music theory, music librarianship and music technology, each contributing to ongoing discussions and tools for building digital critical music editions. This article provides an introduction to music encoding and MEI in the context of early music.\n
\n\n\n
\n\n\n
\n \n\n \n \n Stinson, J.; and Stoessel, J.\n\n\n \n \n \n \n Encoding Medieval Music Notation for Research.\n \n \n \n\n\n \n\n\n\n Early Music, 42(4): 613–617. 2014.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Stinson_2014,\n abstract = {Encoding massive amounts of medieval music notation provides the raw data needed for gaining a systematic understanding of differences and similarities in musical writing, and for researching notation's role in compositional process and musical transmission in medieval societies. The ability to represent all forms of medieval music notation electronically is also of utmost importance for current and future projects which are developing Optical Music Recognition systems for medieval notation and creating searchable datasets of nowplentiful online images of digitized medieval music manuscripts.1 In this report the authors outline their efforts to date in encoding medieval music notation using the pioneering \\textit{Scribe }software and their current collaboration on transforming \\textit{Scribe }data into a valuable pool of open access research data for distribution on the internet, for both musicologists and generalists interested in the study and performance of medieval music from original notation.},\n author = {Stinson, John and Stoessel, Jason},\n year = {2014},\n title = {Encoding Medieval Music Notation for Research},\n pages = {613–617},\n volume = {42},\n number = {4},\n journal = {Early Music},\n doi = {10.1093/em/cau093}\n}\n\n\n
\n
\n\n\n
\n Encoding massive amounts of medieval music notation provides the raw data needed for gaining a systematic understanding of differences and similarities in musical writing, and for researching notation's role in compositional process and musical transmission in medieval societies. The ability to represent all forms of medieval music notation electronically is also of utmost importance for current and future projects which are developing Optical Music Recognition systems for medieval notation and creating searchable datasets of nowplentiful online images of digitized medieval music manuscripts.1 In this report the authors outline their efforts to date in encoding medieval music notation using the pioneering Scribe software and their current collaboration on transforming Scribe data into a valuable pool of open access research data for distribution on the internet, for both musicologists and generalists interested in the study and performance of medieval music from original notation.\n
\n\n\n
\n\n\n
\n \n\n \n \n Teich Geertinger, A.\n\n\n \n \n \n \n \n Turning Music Catalogues into Archives of Musical Scores–or Vice Versa. Music Archives and Catalogues Based on MEI XML.\n \n \n \n \n\n\n \n\n\n\n Fontes Artis Musicae, 61(1): 61–66. 2014.\n \n\n\n\n
\n\n\n\n \n \n \"Turning jstor\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{TeichGeertinger_2014,\n abstract = {Traditionally, digital collections of musical metadata, i.e., information about musical works – such as library catalogues or thematic catalogues are based on relational databases. Digital archives of musical scores, on the other hand, usually consist of collections of files, each containing one work in some presentation format (primarily PDF). Both types of collections are technically easy to build, but they have a number of limitations in terms of long-term preservation, data exchange and data re-use, and flexibility. A text-based data structure sophisticated enough to contain both detailed metadata and fully-featured scores may be a way of overcoming some of these limitations and at the same time include catalogue data in the score and vice versa. The Music Encoding Initiative (MEI) offers a framework for such an approach based on XML files. The article discusses pros and cons and illustrates some of the possible use cases.},\n author = {{Teich Geertinger}, Axel},\n year = {2014},\n title = {Turning Music Catalogues into Archives of Musical Scores–or Vice Versa. Music Archives and Catalogues Based on MEI XML},\n url_JSTOR = {http://www.jstor.org/stable/24330408},\n pages = {61–66},\n volume = {61},\n number = {1},\n journal = {Fontes Artis Musicae}\n}\n\n\n
\n
\n\n\n
\n Traditionally, digital collections of musical metadata, i.e., information about musical works – such as library catalogues or thematic catalogues are based on relational databases. Digital archives of musical scores, on the other hand, usually consist of collections of files, each containing one work in some presentation format (primarily PDF). Both types of collections are technically easy to build, but they have a number of limitations in terms of long-term preservation, data exchange and data re-use, and flexibility. A text-based data structure sophisticated enough to contain both detailed metadata and fully-featured scores may be a way of overcoming some of these limitations and at the same time include catalogue data in the score and vice versa. The Music Encoding Initiative (MEI) offers a framework for such an approach based on XML files. The article discusses pros and cons and illustrates some of the possible use cases.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n Burlet, G.; and Fujinaga, I.\n\n\n \n \n \n \n \n Robotaba Guitar Tablature Transcription Framework.\n \n \n \n \n\n\n \n\n\n\n In Souza Britto Jr., Alceu de; Gouyon, F.; and Dixon, S., editor(s), Proceedings of the 14th International Society for Music Information Retrieval Conference, ISMIR 2013, Curitiba, Brazil, November 4-8, 2013, pages 517–522, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"RobotabaPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Burlet_2013,\n abstract = {This paper presents Robotaba, a web-based guitar tablature transcription framework. The framework facilitates the creation of web applications in which polyphonic transcription and guitar tablature arrangement algorithms can be embedded. Such a web application is implemented, and consists of an existing polyphonic transcription algorithm and a new guitar tablature arrangement algorithm. The result is a unified system that is capable of transcribing guitar tablature from a digital audio recording and displaying the resulting tablature in the web browser. Additionally, two ground-truth datasets for polyphonic transcription and guitar tablature arrangement are compiled from manual transcriptions gathered from the tablature website ultimate-guitar.com. The implemented transcription web application is evaluated on the compiled ground-truth datasets using several metrics},\n author = {Burlet, Gregory and Fujinaga, Ichiro},\n title = {Robotaba Guitar Tablature Transcription Framework},\n url = {http://ismir2013.ismir.net/wp-content/uploads/2013/09/217_Paper.pdf},\n pages = {517–522},\n editor = {{Souza Britto Jr., Alceu de} and Gouyon, Fabien and Dixon, Simon},\n booktitle = {Proceedings of the 14th International Society for Music Information Retrieval Conference, ISMIR 2013, Curitiba, Brazil, November 4-8, 2013},\n year = {2013}\n}\n\n\n
\n
\n\n\n
\n This paper presents Robotaba, a web-based guitar tablature transcription framework. The framework facilitates the creation of web applications in which polyphonic transcription and guitar tablature arrangement algorithms can be embedded. Such a web application is implemented, and consists of an existing polyphonic transcription algorithm and a new guitar tablature arrangement algorithm. The result is a unified system that is capable of transcribing guitar tablature from a digital audio recording and displaying the resulting tablature in the web browser. Additionally, two ground-truth datasets for polyphonic transcription and guitar tablature arrangement are compiled from manual transcriptions gathered from the tablature website ultimate-guitar.com. The implemented transcription web application is evaluated on the compiled ground-truth datasets using several metrics\n
\n\n\n
\n\n\n
\n \n\n \n \n Martin de Guise, S.\n\n\n \n \n \n \n \n La MEI (Music Encoding Initiative). Un Standard Au Service De La Musique Kabyle.\n \n \n \n \n\n\n \n\n\n\n Iles d'Imesli, 5: 245–277. 2013.\n \n\n\n\n
\n\n\n\n \n \n \"La link\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{MartindeGuise_2013,\n abstract = {La MEI – Music Encoding Initiative – a {\\'e}t{\\'e} d{\\'e}velopp{\\'e}e selon le m{\\^e}me principe que la TEI – Text Encoding Initiative – c'est-{\\`a}-dire en poursuivant la volont{\\'e} d'offrir un standard qui permette de rendre et d'{\\'e}mettre des textes, ici musicaux, qui soient lisibles sur toutes les machines (ordinateurs) et qui puissent comporter une vari{\\'e}t{\\'e} d'information particuli{\\`e}rement exhaustive.\n\nPar exemple, la MEI autorise d{\\'e}sormais, non seulement de r{\\'e}aliser en langage {\\`a} balises le texte des partitions musicales, mais aussi de commenter ces partitions, de fond en comble, d'en pr{\\'e}senter des analyses fines et vari{\\'e}es qui soient incluses dans le document MEI lui-m{\\^e}me et de proposer en m{\\^e}me temps les cl{\\'e}s de ces analyses.\n\nSi la MEI s'est d'abord appuy{\\'e}e sur les caract{\\'e}ristiques de la musique occidentale, typiquement "{\\'e}crites", pour distinguer des cat{\\'e}gories musicales, elle a d{\\'e}velopp{\\'e} par la suite autant de moyens pour {\\'e}tudier les musiques "orales" et aborder leurs caract{\\'e}ristiques fondamentales, par exemple les micro-intervalles.\n\nC'est {\\`a} ce titre que la MEI montre une capacit{\\'e} et une souplesse remarquable {\\`a} s'adapter aux musiques de toute origine et de tout mode d'expression, dans le cas pr{\\'e}sent les musiques amazighes et arabes d'Alg{\\'e}rie. Ce standard devient, par cons{\\'e}quent, un outil incontournable au XXIe si{\\`e}cle pour faciliter la compr{\\'e}hension et la comparaison des musiques du monde.},\n author = {{Martin de Guise}, Sylvaine},\n year = {2013},\n title = {La MEI (Music Encoding Initiative). Un Standard Au Service De La Musique Kabyle},\n url_Link = {http://revue.ummto.dz/index.php/idi/article/view/292/0},\n pages = {245–277},\n volume = {5},\n journal = {Iles d'Imesli}\n}\n\n\n
\n
\n\n\n
\n La MEI – Music Encoding Initiative – a été développée selon le même principe que la TEI – Text Encoding Initiative – c'est-à-dire en poursuivant la volonté d'offrir un standard qui permette de rendre et d'émettre des textes, ici musicaux, qui soient lisibles sur toutes les machines (ordinateurs) et qui puissent comporter une variété d'information particulièrement exhaustive. Par exemple, la MEI autorise désormais, non seulement de réaliser en langage à balises le texte des partitions musicales, mais aussi de commenter ces partitions, de fond en comble, d'en présenter des analyses fines et variées qui soient incluses dans le document MEI lui-même et de proposer en même temps les clés de ces analyses. Si la MEI s'est d'abord appuyée sur les caractéristiques de la musique occidentale, typiquement \"écrites\", pour distinguer des catégories musicales, elle a développé par la suite autant de moyens pour étudier les musiques \"orales\" et aborder leurs caractéristiques fondamentales, par exemple les micro-intervalles. C'est à ce titre que la MEI montre une capacité et une souplesse remarquable à s'adapter aux musiques de toute origine et de tout mode d'expression, dans le cas présent les musiques amazighes et arabes d'Algérie. Ce standard devient, par conséquent, un outil incontournable au XXIe siècle pour faciliter la compréhension et la comparaison des musiques du monde.\n
\n\n\n
\n\n\n
\n \n\n \n \n Richts, K.\n\n\n \n \n \n \n \n Die FRBR Customization im Datenformat der Music Encoding Initiative (MEI).\n \n \n \n \n\n\n \n\n\n\n Master's thesis, Cologne University of Applied Sciences, Köln, Germany, 2013.\n \n\n\n\n
\n\n\n\n \n \n \"Die urn\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@mastersthesis{Richts_2013,\n abstract = {Following the digital turn and the increasing availability and usefulness of virtual research environments, a stronger collaboration between libraries and research institutions will become an essential prerequisite for future projects. The increasing internationalization in the field of data curation and management leads to higher requirements for libraries and research institutions. This study deals with the implementation of the model of the Functional Requirements for Bibliographic Records(FRBR)in the data framework of the Music Encoding Initiative (MEI), which received much recognition as a standard for encoding music notation in recent years. MEI offers manifold possibilities to store detailed metadata and aims at providing maximum compatibility between the data generated by musicological research projects and best-practice cataloging principles in libraries. Given the upcoming transition to RDA, this combination seems timely and highly promising.},\n author = {Richts, Kristina},\n year = {2013},\n title = {Die FRBR Customization im Datenformat der Music Encoding Initiative (MEI)},\n url_URN = {http://nbn-resolving.de/urn:nbn:de:hbz:79pbc-2013103042},\n address = {K{\\"o}ln, Germany},\n school = {{Cologne University of Applied Sciences}},\n type = {Master's thesis}\n}\n\n\n
\n
\n\n\n
\n Following the digital turn and the increasing availability and usefulness of virtual research environments, a stronger collaboration between libraries and research institutions will become an essential prerequisite for future projects. The increasing internationalization in the field of data curation and management leads to higher requirements for libraries and research institutions. This study deals with the implementation of the model of the Functional Requirements for Bibliographic Records(FRBR)in the data framework of the Music Encoding Initiative (MEI), which received much recognition as a standard for encoding music notation in recent years. MEI offers manifold possibilities to store detailed metadata and aims at providing maximum compatibility between the data generated by musicological research projects and best-practice cataloging principles in libraries. Given the upcoming transition to RDA, this combination seems timely and highly promising.\n
\n\n\n
\n\n\n
\n \n\n \n \n Richts, K.\n\n\n \n \n \n \n \n Entwicklung von Schulungsmaterialien für Einsatzmöglichkeiten von MEI im bibliothekarischen Bereich.\n \n \n \n \n\n\n \n\n\n\n In Oßwald, A.; Tapenbeck, I.; Meinhardt, H.; and Rösch, H., editor(s), MALIS Praxisprojekte 2013. Projektberichte aus dem berufsbegleitenden Masterstudiengang Bibliotheks- und Informationswissenschaft der Fachhochschule Köln, of b.i.t. online – Innovativ, pages 137–155. Dinges & Frick, Wiesbaden, 2013.\n \n\n\n\n
\n\n\n\n \n \n \"Entwicklung urn\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Richts_2013b,\n abstract = {The Music Encoding Initiative (MEI) is an XML-based framework for the encoding of music notation and accordingly sheet music. Along the findings of the Text Encoding Initiative (TEI) it was developed for the field of music. One of the distinguishing features of the format is that it not only supports detailed encodings of music notation, but also accommodates comprehensive and detailed metadata for bibliographical recordings of musical sources. Due to its versatility and its robust characteristics MEI is particularly suitable for long-term archiving of data and is thus predestined for use in both fields of musicology and library services. Within the project ``Digital Music Notation Data Model and Prototype Delivery System'' jointly funded by the German Research Foundation (DFG) and the National Endowment of the Humanities (NEH), in summer 2012 training materials have been developed for indexing musical texts and for capturing relevant metadata with MEI. They aim at demonstrating the efficiency of MEI and intend to introduce it to humanities scholars, librarians, editors and computer scientists.},\n author = {Richts, Kristina},\n title = {Entwicklung von Schulungsmaterialien f{\\"u}r Einsatzm{\\"o}glichkeiten von MEI im bibliothekarischen Bereich},\n url_URN = {http://nbn-resolving.de/urn:nbn:de:hbz:79pbc-opus-3763},\n pages = {137–155},\n publisher = {{Dinges {\\&} Frick}},\n isbn = {978-3-934997-51-6},\n series = {b.i.t. online – Innovativ},\n editor = {O{\\ss}wald, Achim and Tapenbeck, Inka and Meinhardt, Haike and R{\\"o}sch, Hermann},\n booktitle = {MALIS Praxisprojekte 2013. Projektberichte aus dem berufsbegleitenden Masterstudiengang Bibliotheks- und Informationswissenschaft der Fachhochschule K{\\"o}ln},\n year = {2013},\n address = {Wiesbaden}\n}\n\n\n
\n
\n\n\n
\n The Music Encoding Initiative (MEI) is an XML-based framework for the encoding of music notation and accordingly sheet music. Along the findings of the Text Encoding Initiative (TEI) it was developed for the field of music. One of the distinguishing features of the format is that it not only supports detailed encodings of music notation, but also accommodates comprehensive and detailed metadata for bibliographical recordings of musical sources. Due to its versatility and its robust characteristics MEI is particularly suitable for long-term archiving of data and is thus predestined for use in both fields of musicology and library services. Within the project ``Digital Music Notation Data Model and Prototype Delivery System'' jointly funded by the German Research Foundation (DFG) and the National Endowment of the Humanities (NEH), in summer 2012 training materials have been developed for indexing musical texts and for capturing relevant metadata with MEI. They aim at demonstrating the efficiency of MEI and intend to introduce it to humanities scholars, librarians, editors and computer scientists.\n
\n\n\n
\n\n\n
\n \n\n \n \n Stewart, D.\n\n\n \n \n \n \n \n XML for Music.\n \n \n \n \n\n\n \n\n\n\n December 2013.\n Electronic Musician (01.12.2013)\n\n\n\n
\n\n\n\n \n \n \"XML link\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{Stewart_2003,\n abstract = {},\n author = {Stewart, Darin},\n year = {2013},\n month = {December},\n title = {XML for Music},\n url_Link = {http://www.emusician.com/gear/1332/xml-for-music/33473},\n note = {Electronic Musician (01.12.2013)}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Vigliensoni, G.; Gregory, B.; and Fujinaga, I.\n\n\n \n \n \n \n \n Optical Measure Recognition in Common Music Notation.\n \n \n \n \n\n\n \n\n\n\n In Souza Britto Jr., Alceu de; Gouyon, F.; and Dixon, S., editor(s), Proceedings of the 14th International Society for Music Information Retrieval Conference, ISMIR 2013, Curitiba, Brazil, November 4–8, 2013, pages 125–130, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"OpticalPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Vigliensoni_2013,\n abstract = {This paper presents work on the automatic recognition of measures in common Western music notation scores using optical music recognition techniques. It is important to extract the bounding boxes of measures within a music score to facilitate some methods of multimodal navigation of music catalogues. We present an image processing algorithm that extracts the position of barlines on an input music score in order to deduce the number and position of measures on the page. An open-source implementation of this algorithm is made publicly available. In addition, we have created a ground-truth dataset of 100 images of music scores with manually annotated measures. We conducted several experiments using different combinations of values for two critical parameters to evaluate our measure recognition algorithm. Our algorithm obtained an f-score of 91 percent with the optimal set of parameters. Although our implementation obtained results similar to previous approaches, the scope and size of the evaluation dataset is significantly larger.},\n author = {Vigliensoni, Gabriel and Gregory, Burlet and Fujinaga, Ichiro},\n title = {Optical Measure Recognition in Common Music Notation},\n url = {http://ismir2013.ismir.net/wp-content/uploads/2013/09/207_Paper.pdf},\n pages = {125–130},\n editor = {{Souza Britto Jr., Alceu de} and Gouyon, Fabien and Dixon, Simon},\n booktitle = {Proceedings of the 14th International Society for Music Information Retrieval Conference, ISMIR 2013, Curitiba, Brazil, November 4–8, 2013},\n year = {2013}\n}\n\n
\n
\n\n\n
\n This paper presents work on the automatic recognition of measures in common Western music notation scores using optical music recognition techniques. It is important to extract the bounding boxes of measures within a music score to facilitate some methods of multimodal navigation of music catalogues. We present an image processing algorithm that extracts the position of barlines on an input music score in order to deduce the number and position of measures on the page. An open-source implementation of this algorithm is made publicly available. In addition, we have created a ground-truth dataset of 100 images of music scores with manually annotated measures. We conducted several experiments using different combinations of values for two critical parameters to evaluate our measure recognition algorithm. Our algorithm obtained an f-score of 91 percent with the optimal set of parameters. Although our implementation obtained results similar to previous approaches, the scope and size of the evaluation dataset is significantly larger.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n Burlet, G.; Porter, A.; Hankinson, A.; and Fujinaga, I.\n\n\n \n \n \n \n \n Neon.js. Neume Editor Online.\n \n \n \n \n\n\n \n\n\n\n In Gouyon, F.; Herrera, P.; Martins, L. G.; and Müller, M., editor(s), Proceedings of the 13th International Society for Music Information Retrieval Conference, ISMIR 2012, Mosteiro S.Bento Da Vitória, Porto, Portugal, October 8-12, 2012, pages 121–126, 2012. FEUP Edições\n \n\n\n\n
\n\n\n\n \n \n \"Neon.js.Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Burlet_2012,\n abstract = {This paper introduces Neon.js, a browser-based music notation editor written in JavaScript. The editor can be used to manipulate digitally encoded musical scores in square-note notation. This type of notation presents certain challenges to a music notation editor, since many neumes (groups of pitches) are ligatures–continuous graphical symbols that represent multiple notes. Neon.js will serve as a component within an online optical music recognition framework. The primary purpose of the editor is to provide a readily accessible interface to easily correct errors made in the process of optical music recognition. In this context, we envision an environment that promotes crowdsourcing to further the creation of editable and searchable online symbolic music collections and for generating and editing ground-truth data to train optical music recognition algorithms.},\n author = {Burlet, Gregory and Porter, Alastair and Hankinson, Andrew and Fujinaga, Ichiro},\n title = {Neon.js. Neume Editor Online},\n url = {http://www.ismir2012.ismir.net/event/papers/121_ISMIR_2012.pdf},\n publisher = {{FEUP Edi{\\c{c}}{\\~o}es}},\n editor = {Gouyon, Fabien and Herrera, Perfecto and Martins, Luis Gustavo and M{\\"u}ller, Meinard},\n booktitle = {Proceedings of the 13th International Society for Music Information Retrieval Conference, ISMIR 2012, Mosteiro S.Bento Da Vit{\\'o}ria, Porto, Portugal, October 8-12, 2012},\n year = {2012},\n pages = {121–126}\n}\n\n\n
\n
\n\n\n
\n This paper introduces Neon.js, a browser-based music notation editor written in JavaScript. The editor can be used to manipulate digitally encoded musical scores in square-note notation. This type of notation presents certain challenges to a music notation editor, since many neumes (groups of pitches) are ligatures–continuous graphical symbols that represent multiple notes. Neon.js will serve as a component within an online optical music recognition framework. The primary purpose of the editor is to provide a readily accessible interface to easily correct errors made in the process of optical music recognition. In this context, we envision an environment that promotes crowdsourcing to further the creation of editable and searchable online symbolic music collections and for generating and editing ground-truth data to train optical music recognition algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n Hankinson, A.; Burgoyne, J. A.; Vigliensoni, G.; and Fujinaga, I.\n\n\n \n \n \n \n Creating a Large-Scale Searchable Digital Collection from Printed Music Materials.\n \n \n \n\n\n \n\n\n\n In WWW'12. Proceedings of the 21st International Conference Companion on World Wide Web, pages 903–908, New York, NY, 2012. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Hankinson_2012,\n abstract = {In this paper we present our work towards developing a largescale web application for digitizing, recognizing (via optical music recognition), correcting, displaying, and searching printed music texts. We present the results of a recently completed prototype implementation of our workflow process, from document capture to presentation on the web. We discuss a number of lessons learned from this prototype. Finally, we present some open-source Web 2.0 tools developed to provide essential infrastructure components for making searchable printed music collections available online. Our hope is that these experiences and tools will help in creating next-generation globally accessible digital music libraries.},\n author = {Hankinson, Andrew and Burgoyne, John Ashley and Vigliensoni, Gabriel and Fujinaga, Ichiro},\n title = {Creating a Large-Scale Searchable Digital Collection from Printed Music Materials},\n pages = {903–908},\n publisher = {{Association for Computing Machinery}},\n booktitle = {WWW'12. Proceedings of the 21st International Conference Companion on World Wide Web},\n year = {2012},\n address = {New York, NY},\n doi = {10.1145/2187980.2188221}\n}\n\n\n
\n
\n\n\n
\n In this paper we present our work towards developing a largescale web application for digitizing, recognizing (via optical music recognition), correcting, displaying, and searching printed music texts. We present the results of a recently completed prototype implementation of our workflow process, from document capture to presentation on the web. We discuss a number of lessons learned from this prototype. Finally, we present some open-source Web 2.0 tools developed to provide essential infrastructure components for making searchable printed music collections available online. Our hope is that these experiences and tools will help in creating next-generation globally accessible digital music libraries.\n
\n\n\n
\n\n\n
\n \n\n \n \n Hankinson, A.; Burgoyne, J. A.; Vigliensoni, G.; Porter, A.; Thompson, J.; Liu, W.; Chiu, R.; and Fujinaga, I.\n\n\n \n \n \n \n \n Digital Document Image Retrieval Using Optical Music Recognition.\n \n \n \n \n\n\n \n\n\n\n In Gouyon, F.; Herrera, P.; Martins, L. G.; and Müller, M., editor(s), Proceedings of the 13th International Society for Music Information Retrieval Conference, ISMIR 2012, Mosteiro S.Bento Da Vitória, Porto, Portugal, October 8-12, 2012, pages 577–582, 2012. FEUP Edições\n \n\n\n\n
\n\n\n\n \n \n \"DigitalPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Hankinson_2012b,\n abstract = {Optical music recognition (OMR) and optical character recognition (OCR) have traditionally been used for document transcription–that is, extracting text or symbolic music from page images for use in an editor while discarding all spatial relationships between the transcribed notation and the original image. In this paper we discuss how OCR has shifted fundamentally from a transcription tool to an indexing tool for document image collections resulting from large digitization efforts. OMR tools and procedures, in contrast, are still focused on small-scale modes of operation. We argue that a shift in OMR development towards document image indexing would present new opportunities for searching, browsing, and analyzing large musical document collections. We present a prototype system we built to evaluate the tools and to develop practices needed to process print and manuscript sources.},\n author = {Hankinson, Andrew and Burgoyne, John Ashley and Vigliensoni, Gabriel and Porter, Alastair and Thompson, Jessica and Liu, Wendy and Chiu, Remi and Fujinaga, Ichiro},\n title = {Digital Document Image Retrieval Using Optical Music Recognition},\n url = {http://ismir2012.ismir.net/event/papers/577_ISMIR_2012.pdf},\n pages = {577–582},\n publisher = {{FEUP Edi{\\c{c}}{\\~o}es}},\n isbn = {978-972-752-144-9},\n editor = {Gouyon, Fabien and Herrera, Perfecto and Martins, Luis Gustavo and M{\\"u}ller, Meinard},\n booktitle = {Proceedings of the 13th International Society for Music Information Retrieval Conference, ISMIR 2012, Mosteiro S.Bento Da Vit{\\'o}ria, Porto, Portugal, October 8-12, 2012},\n year = {2012}\n}\n\n\n
\n
\n\n\n
\n Optical music recognition (OMR) and optical character recognition (OCR) have traditionally been used for document transcription–that is, extracting text or symbolic music from page images for use in an editor while discarding all spatial relationships between the transcribed notation and the original image. In this paper we discuss how OCR has shifted fundamentally from a transcription tool to an indexing tool for document image collections resulting from large digitization efforts. OMR tools and procedures, in contrast, are still focused on small-scale modes of operation. We argue that a shift in OMR development towards document image indexing would present new opportunities for searching, browsing, and analyzing large musical document collections. We present a prototype system we built to evaluate the tools and to develop practices needed to process print and manuscript sources.\n
\n\n\n
\n\n\n
\n \n\n \n \n Hartwig, M.; Kepper, J.; and Richts, K.\n\n\n \n \n \n \n \n Neue Wege der Musikerschließung. Über den möglichen Einsatz von MEI in deutschen Bibliotheken.\n \n \n \n \n\n\n \n\n\n\n Forum Musikbibliothek: Beitrage und Informationen aus der Musikbibliothekarischen Praxis, 33: 16–23. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"Neue link\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Hartwig_2012,\n abstract = {Der Aufsatz stellt die Music Encoding Initiative (MEI), ihr Datenformat, M{\\"o}glichkeiten f{\\"u}r den Einsatz in Bibliotheken sowie das in Detmold ans{\\"a}ssige Projekt zur Weiterentwicklung von MEI vor.},\n author = {Hartwig, Maja and Kepper, Johannes and Richts, Kristina},\n year = {2012},\n title = {Neue Wege der Musikerschlie{\\ss}ung. {\\"U}ber den m{\\"o}glichen Einsatz von MEI in deutschen Bibliotheken},\n url_Link = {https://oa.slub-dresden.de/ejournals/fmb/article/view/96},\n pages = {16–23},\n volume = {33},\n journal = {Forum Musikbibliothek: Beitrage und Informationen aus der Musikbibliothekarischen Praxis}\n}\n\n
\n
\n\n\n
\n Der Aufsatz stellt die Music Encoding Initiative (MEI), ihr Datenformat, Möglichkeiten für den Einsatz in Bibliotheken sowie das in Detmold ansässige Projekt zur Weiterentwicklung von MEI vor.\n
\n\n\n
\n\n\n
\n \n\n \n \n Krabbe, N.; and Teich Geertinger, A.\n\n\n \n \n \n \n \n MEI (Music Encoding Initiative) as a Basis for Thematic Catalogues. Thoughts, Experiences, and Preliminary Results.\n \n \n \n \n\n\n \n\n\n\n In RISM Conference 2012. Music Documentation in Libraries, Scholarship, and Practice, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"MEIPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Krabbe_2012,\n abstract = {The Danish Centre for Music Publication (DCM) was founded in the spring of 2009, building on the philological expertise of \\textit{The Carl Nielsen Edition, }which had published its 33rd and final volume in March 2009. The purpose of the DCM was, by nature, broader than that of \\textit{The Carl Nielsen Edition, }standing so to speak on two legs: one is the edition of unknown music kept in the library to be used by scholars and musicians and based on a philological approach, the other is the development of ways to disseminate the results of the Centre's work via the internet. The latter aim has resulted in developing a system for storing and presenting data, especially related to thematic catalogs, based on MEI (Music Encoding Initiative) XML. At present, the software developed at the DCM, called MerMEId (Metadata Editor and Repository for MEI Data), is used for catalogs-in-progress for the works of Carl Nielsen, Johan Svendsen, J. P. E. Hartmann, Niels W. Gade, and J. A. Scheibe. In a further perspective, MEI enables the integration of detailed metadata with the full music text, including variants and emendations within the same file in a format that is interchangeable with other software such as a graphical note editor. In our presentation, we will outline the ideas and principles behind the MerMEId software and briefly demonstrate its use, from the point of view of both the editor and the user.},\n author = {Krabbe, Niels and {Teich Geertinger}, Axel},\n title = {MEI (Music Encoding Initiative) as a Basis for Thematic Catalogues. Thoughts, Experiences, and Preliminary Results},\n url = {http://www.rism.info/fileadmin/content/community-content/events/RISM_Conference_2012/TeichGeertinger_Final.pdf},\n booktitle = {RISM Conference 2012. Music Documentation in Libraries, Scholarship, and Practice},\n year = {2012}\n}\n\n\n
\n
\n\n\n
\n The Danish Centre for Music Publication (DCM) was founded in the spring of 2009, building on the philological expertise of The Carl Nielsen Edition, which had published its 33rd and final volume in March 2009. The purpose of the DCM was, by nature, broader than that of The Carl Nielsen Edition, standing so to speak on two legs: one is the edition of unknown music kept in the library to be used by scholars and musicians and based on a philological approach, the other is the development of ways to disseminate the results of the Centre's work via the internet. The latter aim has resulted in developing a system for storing and presenting data, especially related to thematic catalogs, based on MEI (Music Encoding Initiative) XML. At present, the software developed at the DCM, called MerMEId (Metadata Editor and Repository for MEI Data), is used for catalogs-in-progress for the works of Carl Nielsen, Johan Svendsen, J. P. E. Hartmann, Niels W. Gade, and J. A. Scheibe. In a further perspective, MEI enables the integration of detailed metadata with the full music text, including variants and emendations within the same file in a format that is interchangeable with other software such as a graphical note editor. In our presentation, we will outline the ideas and principles behind the MerMEId software and briefly demonstrate its use, from the point of view of both the editor and the user.\n
\n\n\n
\n\n\n
\n \n\n \n \n Pugin, L.; Kepper, J.; Roland, P.; Hartwig, M.; and Hankinson, A.\n\n\n \n \n \n \n \n Separating Presentation and Content in MEI.\n \n \n \n \n\n\n \n\n\n\n In Gouyon, F.; Herrera, P.; Martins, L. G.; and Müller, M., editor(s), Proceedings of the 13th International Society for Music Information Retrieval Conference, ISMIR 2012, Mosteiro S.Bento Da Vitória, Porto, Portugal, October 8-12, 2012, pages 505–510, 2012. FEUP Edições\n \n\n\n\n
\n\n\n\n \n \n \"SeparatingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Pugin_2012,\n abstract = {Common Western music notation is traditionally organized on staves that can be grouped into systems. When multiple systems appear on a page, they are arranged from the top to the bottom of the page, similar to lines of words in a text document. Encoding music notation documents for printing requires this arrangement to be captured. However, in the music notation model proposed by the Music Encoding Initiative (MEI), the hierarchy of the XML sub-tree representing the music emphasizes the content rather than the layout. Since systems and pages do not coincide with the musical content, they are encoded in a secondary hierarchy that contains very limited information. In this paper, we present a complementary solution for augmenting the level of detail of the layout of musical documents; that is, the layout information can be encoded in a separate sub-tree with cross-references to other elements holding the musical content. The major advantage of the proposed solution is that it enables multiple layout descriptions, each describing a different visual instantiation of the same musical content.},\n author = {Pugin, Laurent and Kepper, Johannes and Roland, Perry and Hartwig, Maja and Hankinson, Andrew},\n title = {Separating Presentation and Content in MEI},\n url = {http://ismir2012.ismir.net/event/papers/505_ISMIR_2012.pdf},\n pages = {505–510},\n publisher = {{FEUP Edi{\\c{c}}{\\~o}es}},\n isbn = {978-972-752-144-9},\n editor = {Gouyon, Fabien and Herrera, Perfecto and Martins, Luis Gustavo and M{\\"u}ller, Meinard},\n booktitle = {Proceedings of the 13th International Society for Music Information Retrieval Conference, ISMIR 2012, Mosteiro S.Bento Da Vit{\\'o}ria, Porto, Portugal, October 8-12, 2012},\n year = {2012}\n}\n\n\n
\n
\n\n\n
\n Common Western music notation is traditionally organized on staves that can be grouped into systems. When multiple systems appear on a page, they are arranged from the top to the bottom of the page, similar to lines of words in a text document. Encoding music notation documents for printing requires this arrangement to be captured. However, in the music notation model proposed by the Music Encoding Initiative (MEI), the hierarchy of the XML sub-tree representing the music emphasizes the content rather than the layout. Since systems and pages do not coincide with the musical content, they are encoded in a secondary hierarchy that contains very limited information. In this paper, we present a complementary solution for augmenting the level of detail of the layout of musical documents; that is, the layout information can be encoded in a separate sub-tree with cross-references to other elements holding the musical content. The major advantage of the proposed solution is that it enables multiple layout descriptions, each describing a different visual instantiation of the same musical content.\n
\n\n\n
\n\n\n
\n \n\n \n \n TEI Music SIG\n\n\n \n \n \n \n \n TEI with Music Notation.\n \n \n \n \n\n\n \n\n\n\n 2012.\n \n\n\n\n
\n\n\n\n \n \n \"TEIPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 15 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@misc{TEIMusicSIG_2012,\n abstract = {As part of a project funded by the TEI, the group focussed on using TEI's One Document Does it all (ODD) vocabulary to connect the TEI to music encoding formats. Specifically, first efforts concentrated on the Music Encoding Initiative (MEI) format. Therefore, these guidelines describe a TEI-with-MEI customisation that uses the TEI element <notatedMusic> to embed encode music notation.\nThe examples in these guidelines present typical occurrences of music within written text. They are taken from a handful of documents that we believe demonstrate the need for the inclusion of encoded music notation within TEI-encoded texts. See the bibliography for more information.\nAll the music examples are encoded using MEI; however, it is possible to use <notatedMusic> to link to external representations of music in any other format. See below for an example.\nThese guidelines do not claim to be comprehensive; however, we present them with the hope of encouraging further testing, improvement, and widespread use of this customisation.},\n author = {{TEI Music SIG}},\n year = {2012},\n title = {{TEI with Music Notation}},\n url = {https://web.archive.org/web/20130306003734/https://tei-c.org/SIG/Music/twm/index.html},\n keywords = {tutorial}\n}\n\n\n
\n
\n\n\n
\n As part of a project funded by the TEI, the group focussed on using TEI's One Document Does it all (ODD) vocabulary to connect the TEI to music encoding formats. Specifically, first efforts concentrated on the Music Encoding Initiative (MEI) format. Therefore, these guidelines describe a TEI-with-MEI customisation that uses the TEI element to embed encode music notation. The examples in these guidelines present typical occurrences of music within written text. They are taken from a handful of documents that we believe demonstrate the need for the inclusion of encoded music notation within TEI-encoded texts. See the bibliography for more information. All the music examples are encoded using MEI; however, it is possible to use to link to external representations of music in any other format. See below for an example. These guidelines do not claim to be comprehensive; however, we present them with the hope of encouraging further testing, improvement, and widespread use of this customisation.\n
\n\n\n
\n\n\n
\n \n\n \n \n Veit, J.\n\n\n \n \n \n \n \n Wächst zusammen, was zusammen gehört? Wissenschaftliche Musikergesamtausgaben und Bibliotheken.\n \n \n \n \n\n\n \n\n\n\n Zeitschrift für Bibliothekswesen und Bibliographie, 59(3–4): 166–174. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"WächstPaper\n  \n \n \n \"Wächst link\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Veit_2012,\n abstract = {For the past 150 years scholarly music editions have made use of similar techniques, but the accessibility of sources has continually become easier. Digital editions make thorough use of facsimiles and, hence, bridge the gap between researcher and user. But the capabilities of digital media will only be realized when, in the future, images can be encoded and made available in standardized formats. The results of scholarly work presented in the formats of XML or MEI can be combined with bibliographic metadata and need to be made openly accessible over the long-term as base data for ongoing research. As a consequence, however, the boundaries between the tasks of research and library conservation become blurred. This article illustrates and discusses the problems and consequences of these transformations, which may ultimately lead to a re-definition of what is meant by the term "music edition".},\n author = {Veit, Joachim},\n year = {2012},\n title = {W{\\"a}chst zusammen, was zusammen geh{\\"o}rt? Wissenschaftliche Musikergesamtausgaben und Bibliotheken},\n url = {http://zs.thulb.uni-jena.de/servlets/MCRFileNodeServlet/jportal_derivate_00226208/j12-h3-4-auf-6.pdf},\n url_Link = {http://zs.thulb.uni-jena.de/receive/jportal_jparticle_00266455},\n pages = {166–174},\n volume = {59},\n number = {3–4},\n journal = {Zeitschrift f{\\"u}r Bibliothekswesen und Bibliographie},\n doi = {10.3196/1864295012593472}\n}\n\n\n
\n
\n\n\n
\n For the past 150 years scholarly music editions have made use of similar techniques, but the accessibility of sources has continually become easier. Digital editions make thorough use of facsimiles and, hence, bridge the gap between researcher and user. But the capabilities of digital media will only be realized when, in the future, images can be encoded and made available in standardized formats. The results of scholarly work presented in the formats of XML or MEI can be combined with bibliographic metadata and need to be made openly accessible over the long-term as base data for ongoing research. As a consequence, however, the boundaries between the tasks of research and library conservation become blurred. This article illustrates and discusses the problems and consequences of these transformations, which may ultimately lead to a re-definition of what is meant by the term \"music edition\".\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2011\n \n \n (10)\n \n \n
\n
\n \n \n
\n \n\n \n \n Bohl, B.; Kepper, J.; and Röwenstrunk, D.\n\n\n \n \n \n \n Perspektiven digitaler Musikeditionen aus der Sicht des Edirom-Projekts.\n \n \n \n\n\n \n\n\n\n Die Tonkunst: Magazin für Klassische Musik und Musikwissenschaft, 5(3): 270–276. 2011.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Bohl_2011,\n abstract = {},\n author = {Bohl, Benjamin and Kepper, Johannes and Röwenstrunk, Daniel},\n year = {2011},\n title = {Perspektiven digitaler Musikeditionen aus der Sicht des Edirom-Projekts},\n pages = {270–276},\n volume = {5},\n number = {3},\n journal = {Die Tonkunst: Magazin f{\\"u}r Klassische Musik und Musikwissenschaft}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Buschmeier, G.; and Betzwieser, T.\n\n\n \n \n \n \n Digitale Editionen in Akademienprogramm. Die Projektpraxis am Beispiel OPERA.\n \n \n \n\n\n \n\n\n\n Die Tonkunst: Magazin für Klassische Musik und Musikwissenschaft, 5(3): 263–269. 2011.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Buschmeier_2011,\n abstract = {},\n author = {Buschmeier, Gabriele and Betzwieser, Thomas},\n year = {2011},\n title = {Digitale Editionen in Akademienprogramm. Die Projektpraxis am Beispiel OPERA},\n pages = {263–269},\n volume = {5},\n number = {3},\n journal = {Die Tonkunst: Magazin f{\\"u}r Klassische Musik und Musikwissenschaft}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Doi, C.; and Martin, C.\n\n\n \n \n \n \n \n Conference Highlights and New Initiatives of MLA 2011.\n \n \n \n \n\n\n \n\n\n\n CAML Review/Revue de l'ACBM, 39(1): 28–33. 2011.\n \n\n\n\n
\n\n\n\n \n \n \"ConferencePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Doi_2011,\n abstract = {},\n author = {Doi, Carolyn and Martin, Cathy},\n year = {2011},\n title = {Conference Highlights and New Initiatives of MLA 2011},\n url = {https://caml.journals.yorku.ca/index.php/caml/article/viewFile/32103/29349.pdf},\n pages = {28–33},\n volume = {39},\n number = {1},\n journal = {CAML Review/Revue de l'ACBM}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Freedman, R.; and Vendrix, P.\n\n\n \n \n \n \n The Chansonniers of Nicolas du Chemin. A Digital Forum for Renaissance Music Books.\n \n \n \n\n\n \n\n\n\n Die Tonkunst: Magazin für Klassische Musik und Musikwissenschaft, 5(3): 284–288. 2011.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Freedman_2011,\n abstract = {},\n author = {Freedman, Richard and Vendrix, Philippe},\n year = {2011},\n title = {The Chansonniers of Nicolas du Chemin. A Digital Forum for Renaissance Music Books},\n pages = {284–288},\n volume = {5},\n number = {3},\n journal = {Die Tonkunst: Magazin f{\\"u}r Klassische Musik und Musikwissenschaft}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Hankinson, A.; Roland, P.; and Fujinaga, I.\n\n\n \n \n \n \n \n The Music Encoding Initiative as a Document-Encoding Framework.\n \n \n \n \n\n\n \n\n\n\n In Klapuri, A.; and Leider, C., editor(s), Proceedings of the 12th International Society for Music Information Retrieval Conference, ISMIR 2011, Miami, Florida, USA, October 24-28, 2011, pages 293–298, 2011. University of Miami\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Hankinson_2011,\n abstract = {Recent changes in the Music Encoding Initiative (MEI) have transformed it into an extensible platform from which new notation encoding schemes can be produced. This paper introduces MEI as a document-encoding framework, and illustrates how it can be extended to encode new types of notation, eliminating the need for creating specialized and potentially incompatible notation encoding standards.},\n author = {Hankinson, Andrew and Roland, Perry and Fujinaga, Ichiro},\n title = {The Music Encoding Initiative as a Document-Encoding Framework},\n url = {http://ismir2011.ismir.net/papers/OS3-1.pdf},\n pages = {293–298},\n publisher = {{University of Miami}},\n isbn = {978-0-615-54865-4},\n editor = {Klapuri, Anssi and Leider, Colby},\n booktitle = {Proceedings of the 12th International Society for Music Information Retrieval Conference, ISMIR 2011, Miami, Florida, USA, October 24-28, 2011},\n year = {2011}\n}\n\n\n
\n
\n\n\n
\n Recent changes in the Music Encoding Initiative (MEI) have transformed it into an extensible platform from which new notation encoding schemes can be produced. This paper introduces MEI as a document-encoding framework, and illustrates how it can be extended to encode new types of notation, eliminating the need for creating specialized and potentially incompatible notation encoding standards.\n
\n\n\n
\n\n\n
\n \n\n \n \n Kepper, J.\n\n\n \n \n \n \n \n Musikedition im Zeichen neuer Medien. Historische Entwicklung und gegenwärtige Perspektiven musikalischer Gesamtausgaben.\n \n \n \n \n\n\n \n\n\n\n of Schriften des Instituts für Dokumentologie und EditorikBoD, Norderstedt, 2011.\n \n\n\n\n
\n\n\n\n \n \n \"Musikedition urn\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{Kepper_2011,\n abstract = {Die Keimzelle der Musikwissenschaft als geisteswissenschaftlicher Disziplin liegt in den Bem{\\"u}hungen des 19. Jahrhunderts, die Werke herausragender Komponisten zu konservieren und einer breiteren {\\"O}ffentlichkeit zu erschlie{\\ss}en. In diesem Umfeld erschien im Jahr 1851 der erste Band der Bach-Gesamtausgabe, herausgegeben von der Leipziger Bachgesellschaft. Alle nachfolgenden Musiker-Ausgaben entwickelten sich auf dieser Basis und reizten die M{\\"o}glichkeiten des Buchmediums in zunehmenden Ma{\\ss}e aus. Seit etwa zehn Jahren wird versucht, das Potential digitaler Medien f{\\"u}r die Musikphilologie zu erschlie{\\ss}en. Ausgehend von der Geschichte musikwissenschaftlicher Ausgaben und einer kritischen Reflektion des bisher Geleisteten, weist dieser Band m{\\"o}gliche neue Perspektiven f{\\"u}r zuk{\\"u}nftige, dem neuen Medium angemessene Editionsformen auf.},\n author = {Kepper, Johannes},\n title = {Musikedition im Zeichen neuer Medien. Historische Entwicklung und gegenw{\\"a}rtige Perspektiven musikalischer Gesamtausgaben},\n publisher = {BoD},\n address = {Norderstedt},\n year = {2011},\n series = {Schriften des Instituts f{\\"u}r Dokumentologie und Editorik},\n number = {5},\n url_URN = {http://nbn-resolving.de/urn:nbn:de:hbz:38-66395},\n isbn = {9783844800760}\n\n}\n\n
\n
\n\n\n
\n Die Keimzelle der Musikwissenschaft als geisteswissenschaftlicher Disziplin liegt in den Bemühungen des 19. Jahrhunderts, die Werke herausragender Komponisten zu konservieren und einer breiteren Öffentlichkeit zu erschließen. In diesem Umfeld erschien im Jahr 1851 der erste Band der Bach-Gesamtausgabe, herausgegeben von der Leipziger Bachgesellschaft. Alle nachfolgenden Musiker-Ausgaben entwickelten sich auf dieser Basis und reizten die Möglichkeiten des Buchmediums in zunehmenden Maße aus. Seit etwa zehn Jahren wird versucht, das Potential digitaler Medien für die Musikphilologie zu erschließen. Ausgehend von der Geschichte musikwissenschaftlicher Ausgaben und einer kritischen Reflektion des bisher Geleisteten, weist dieser Band mögliche neue Perspektiven für zukünftige, dem neuen Medium angemessene Editionsformen auf.\n
\n\n\n
\n\n\n
\n \n\n \n \n Roland, P.; and Siegert, C.\n\n\n \n \n \n \n Process-Oriented Notation in MEI.\n \n \n \n\n\n \n\n\n\n Die Tonkunst: Magazin für Klassische Musik und Musikwissenschaft, 5(3): 305–309. 2011.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Roland_2011,\n abstract = {},\n author = {Roland, Perry and Siegert, Christine},\n year = {2011},\n title = {Process-Oriented Notation in MEI},\n pages = {305–309},\n volume = {5},\n number = {3},\n journal = {Die Tonkunst: Magazin f{\\"u}r Klassische Musik und Musikwissenschaft}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Teich Geertinger, A.; and Pugin, L.\n\n\n \n \n \n \n MEI for Bridging the Gap Between Music Cataloguing and Digital Critical Editions.\n \n \n \n\n\n \n\n\n\n Die Tonkunst: Magazin für Klassische Musik und Musikwissenschaft, 5(3): 289–294. 2011.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{TeichGeertinger_2011,\n abstract = {},\n author = {{Teich Geertinger}, Axel and Pugin, Laurent},\n year = {2011},\n title = {MEI for Bridging the Gap Between Music Cataloguing and Digital Critical Editions},\n pages = {289–294},\n volume = {5},\n number = {3},\n journal = {Die Tonkunst: Magazin f{\\"u}r Klassische Musik und Musikwissenschaft}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Viglianti, R.; and Veit, J.\n\n\n \n \n \n \n Mind the Gap. A Preliminary Evaluation of Issues in Combining Text and Music Encoding.\n \n \n \n\n\n \n\n\n\n Die Tonkunst: Magazin für Klassische Musik und Musikwissenschaft, 5(3): 318–325. 2011.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Viglianti_2011,\n abstract = {},\n author = {Viglianti, Raffaele and Veit, Joachim},\n year = {2011},\n title = {Mind the Gap. A Preliminary Evaluation of Issues in Combining Text and Music Encoding},\n pages = {318–325},\n volume = {5},\n number = {3},\n journal = {Die Tonkunst: Magazin f{\\"u}r Klassische Musik und Musikwissenschaft}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Vigliensoni, G.; Burgoyne, J. A.; Hankinson, A.; and Fujinaga, I.\n\n\n \n \n \n \n \n Automatic Pitch Recognition in Printed Square-Note Notation.\n \n \n \n \n\n\n \n\n\n\n In Klapuri, A.; and Leider, C., editor(s), Proceedings of the 12th International Society for Music Information Retrieval Conference, ISMIR 2011, Miami, Florida, USA, October 24-28, 2011, pages 423–428, 2011. University of Miami\n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Vigliensoni_2011,\n abstract = {In this paper we present our research in the development of a pitch-finding system to extract the pitches of neumes–some of the oldest representations of pitch in Western music– from the Liber Usualis, a well-known compendium of plainchant as used in the Roman Catholic church. Considerations regarding the staff position, staff removal, space- and linezones, as well as how we treat specific neume classes and modifiers are covered. This type of notation presents a challenge for traditional optical music recognition (OMR) systems because individual note pitches are indivisible from the larger ligature group that forms the neume. We have created a dataset of correctly-notated transcribed chant for comparing the performance of different variants of our pitch-finding system. The best result showed a recognition rate of 97{\\%} tested with more than 2000 neumes.},\n author = {Vigliensoni, Gabriel and Burgoyne, John Ashley and Hankinson, Andrew and Fujinaga, Ichiro},\n title = {Automatic Pitch Recognition in Printed Square-Note Notation},\n url = {http://www.ismir2011.ismir.net/papers/PS3-12.pdf},\n pages = {423–428},\n publisher = {{University of Miami}},\n isbn = {978-0-615-54865-4},\n editor = {Klapuri, Anssi and Leider, Colby},\n booktitle = {Proceedings of the 12th International Society for Music Information Retrieval Conference, ISMIR 2011, Miami, Florida, USA, October 24-28, 2011},\n year = {2011}\n}\n\n\n
\n
\n\n\n
\n In this paper we present our research in the development of a pitch-finding system to extract the pitches of neumes–some of the oldest representations of pitch in Western music– from the Liber Usualis, a well-known compendium of plainchant as used in the Roman Catholic church. Considerations regarding the staff position, staff removal, space- and linezones, as well as how we treat specific neume classes and modifiers are covered. This type of notation presents a challenge for traditional optical music recognition (OMR) systems because individual note pitches are indivisible from the larger ligature group that forms the neume. We have created a dataset of correctly-notated transcribed chant for comparing the performance of different variants of our pitch-finding system. The best result showed a recognition rate of 97% tested with more than 2000 neumes.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2010\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n Hankinson, A.; Pugin, L.; and Fujinaga, I.\n\n\n \n \n \n \n \n An Interchange Format for Optical Music Recognition Applications.\n \n \n \n \n\n\n \n\n\n\n In Downie, J. S.; and Veltkamp, R. C., editor(s), Proceedings of the 11th International Society for Music Information Retrieval Conference, ISMIR 2010, Utrecht, Netherlands, August 9-13, 2010, pages 51–56, 2010. International Society for Music Information Retrieval\n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Hankinson_2010,\n abstract = {Page appearance and layout for music notation is a critical component of the overall musical information contained in a document. To capture and transfer this information, we outline an interchange format for OMR applications, the OMR Interchange Package (OIP) format, which is designed to allow layout information and page images to be preserved and transferred along with semantic musical content. We identify a number of uses for this format that can enhance digital representations of music, and introduce a novel idea for distributed optical music recognition system based on this format.},\n author = {Hankinson, Andrew and Pugin, Laurent and Fujinaga, Ichiro},\n title = {An Interchange Format for Optical Music Recognition Applications},\n url = {http://ismir2010.ismir.net/proceedings/ismir2010-11.pdf},\n pages = {51–56},\n publisher = {{International Society for Music Information Retrieval}},\n isbn = {978-90-393-53813},\n editor = {Downie, J. Stephen and Veltkamp, Remco C.},\n booktitle = {Proceedings of the 11th International Society for Music Information Retrieval Conference, ISMIR 2010, Utrecht, Netherlands, August 9-13, 2010},\n year = {2010}\n}\n\n\n
\n
\n\n\n
\n Page appearance and layout for music notation is a critical component of the overall musical information contained in a document. To capture and transfer this information, we outline an interchange format for OMR applications, the OMR Interchange Package (OIP) format, which is designed to allow layout information and page images to be preserved and transferred along with semantic musical content. We identify a number of uses for this format that can enhance digital representations of music, and introduce a novel idea for distributed optical music recognition system based on this format.\n
\n\n\n
\n\n\n
\n \n\n \n \n Röwenstrunk, D.\n\n\n \n \n \n \n Digital Music Notation Data Model and Prototype Delivery System. Ein deutsch-amerikanisches Projekt zur Förderung eines wissenschaftlichen Codierungsformats für Musiknotation.\n \n \n \n\n\n \n\n\n\n Forum Musikbibliothek: Beitrage und Informationen aus der Musikbibliothekarischen Praxis, 31(2): 134–138. 2010.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Roewenstrunk_2010,\n abstract = {},\n author = {R{\\"o}wenstrunk, Daniel},\n year = {2010},\n title = {Digital Music Notation Data Model and Prototype Delivery System. Ein deutsch-amerikanisches Projekt zur F{\\"o}rderung eines wissenschaftlichen Codierungsformats f{\\"u}r Musiknotation},\n pages = {134–138},\n volume = {31},\n number = {2},\n journal = {Forum Musikbibliothek: Beitrage und Informationen aus der Musikbibliothekarischen Praxis}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Viglianti, R.\n\n\n \n \n \n \n \n Critical Editing of Music in the Digital Medium. An Experiment in MEI.\n \n \n \n \n\n\n \n\n\n\n In Digital Humanities 2010 (DH2010). Conference Abstracts, pages 380–382, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"CriticalPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Viglianti_2010,\n abstract = {This poster presents the results of the [author's] dissertation's case study: a digital edition of Claude Debussy's Syrinx (La Fl{\\^u}te de Pan) for flute solo. The XML-based model represents notation, variant readings and editorial intervention; additionally, several different views are extracted and rendered for presentation with vector images.},\n author = {Viglianti, Raffaele},\n title = {Critical Editing of Music in the Digital Medium. An Experiment in MEI},\n url = {https://web.archive.org/web/20221127170356/https://dh2010.cch.kcl.ac.uk/academic-programme/abstracts/papers/pdf/ab-819.pdf},\n pages = {380–382},\n booktitle = {Digital Humanities 2010 (DH2010). Conference Abstracts},\n year = {2010}\n}\n\n\n
\n
\n\n\n
\n This poster presents the results of the [author's] dissertation's case study: a digital edition of Claude Debussy's Syrinx (La Flûte de Pan) for flute solo. The XML-based model represents notation, variant readings and editorial intervention; additionally, several different views are extracted and rendered for presentation with vector images.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2009\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n Kepper, J.\n\n\n \n \n \n \n \n XML-Based Encoding of Musicological Data – About the Requirements of a Digital Music Philology.\n \n \n \n \n\n\n \n\n\n\n it – Information Technology Methoden und innovative Anwendungen der Informatik und Informationstechnik, 51(4): 216–221. 2009.\n \n\n\n\n
\n\n\n\n \n \n \"XML-Based link\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Kepper_2009,\n abstract = {The article starts with a brief introduction to the history of music notation encoding. MusicXML and MEI as two of the most recent XML-based file formats are based on completely different concepts of music notation. Whereas MusicXML is the unchallenged market-leader for data interchange, MEI deliberately concentrates on music editorial needs. During this article I will try to point out some of these needs and give a short impression of the specific problems of digital scholarly editions.},\n author = {Kepper, Johannes},\n year = {2009},\n title = {XML-Based Encoding of Musicological Data – About the Requirements of a Digital Music Philology},\n url_Link = {https://www.degruyter.com/view/j/itit.2009.51.issue-4/itit.2009.0544/itit.2009.0544.xml},\n pages = {216–221},\n volume = {51},\n number = {4},\n journal = {it – Information Technology Methoden und innovative Anwendungen der Informatik und Informationstechnik},\n doi = {10.1524/itit.2009.0544}\n}\n\n\n
\n
\n\n\n
\n The article starts with a brief introduction to the history of music notation encoding. MusicXML and MEI as two of the most recent XML-based file formats are based on completely different concepts of music notation. Whereas MusicXML is the unchallenged market-leader for data interchange, MEI deliberately concentrates on music editorial needs. During this article I will try to point out some of these needs and give a short impression of the specific problems of digital scholarly editions.\n
\n\n\n
\n\n\n
\n \n\n \n \n Roland, P.\n\n\n \n \n \n \n \n The Music Encoding Initiative (MEI) DTD and the OCVE.\n \n \n \n \n\n\n \n\n\n\n 2009.\n Charlottesville, VA. Powerpoint\n\n\n\n
\n\n\n\n \n \n \"The slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{Roland_2009,\n abstract = {},\n author = {Roland, Perry},\n year = {2009},\n title = {The Music Encoding Initiative (MEI) DTD and the OCVE},\n url_Slides = {http://slideplayer.com/slide/2544413/},\n note = {Charlottesville, VA. Powerpoint}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Roland, P.\n\n\n \n \n \n \n \n The Music Encoding Initiative (MEI) DTD and the Online Chopin Variorum Edition.\n \n \n \n \n\n\n \n\n\n\n Technical Report 2009.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{Roland_2009b,\n abstract = {The purpose of the Music Encoding Initiative (MEI) DTD is two-fold: to provide a standardized, universal XML encoding format for music content (and its accompanying meta-data) and to facilitate interchange of the encoded data. MEI is not designed to be an input code per se, like the Plaine and Easie code; however, it is intended to be human-readable and easily understood and applied. Because of its emphasis on comprehensiveness and software independence, MEI may also function as an archival data format. This white paper describes the features of MEI and the advantages of its use as the encoding standard for the Online Chopin Variorum Edition.},\n author = {Roland, Perry},\n year = {2009},\n title = {The Music Encoding Initiative (MEI) DTD and the Online Chopin Variorum Edition},\n url = {https://pdfs.semanticscholar.org/f216/823c759b89ad8f623cdbd0e3c6e77bc4fe7e.pdf}\n}\n\n\n
\n
\n\n\n
\n The purpose of the Music Encoding Initiative (MEI) DTD is two-fold: to provide a standardized, universal XML encoding format for music content (and its accompanying meta-data) and to facilitate interchange of the encoded data. MEI is not designed to be an input code per se, like the Plaine and Easie code; however, it is intended to be human-readable and easily understood and applied. Because of its emphasis on comprehensiveness and software independence, MEI may also function as an archival data format. This white paper describes the features of MEI and the advantages of its use as the encoding standard for the Online Chopin Variorum Edition.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2007\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n Roland, P.; and Downie, J. S.\n\n\n \n \n \n \n \n Recent Developments in the Music Encoding Initiative Project. Enhancing Digital Musicology and Scholarship.\n \n \n \n \n\n\n \n\n\n\n In 19th Joint Conference on the Digital Humanities, Conference Abstracts, pages 186–189, 2007. University of Illinois\n \n\n\n\n
\n\n\n\n \n \n \"Recent abstract\n  \n \n \n \"Recent poster\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Roland_2007,\n abstract = {},\n author = {Roland, Perry and Downie, J. Stephen},\n title = {Recent Developments in the Music Encoding Initiative Project. Enhancing Digital Musicology and Scholarship},\n url_Abstract = {http://www.digitalhumanities.org/dh2007/dh2007.abstracts.pdf},\n url_Poster = {http://music-encoding.org/downloads/RolandDownie2007poster.pdf},\n pages = {186–189},\n booktitle = {19th Joint Conference on the Digital Humanities, Conference Abstracts},\n publisher = {{University of Illinois}},\n year = {2007}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Schräder, G.\n\n\n \n \n \n \n \n Ein XML-Datenformat zur Repräsentation kritischer Musikedition unter besonderer Berücksichtigung von Neumennotation.\n \n \n \n \n\n\n \n\n\n\n August 2007.\n Seminar Paper. Tübingen, Eberhard Karls Universität\n\n\n\n
\n\n\n\n \n \n \"EinPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{Schraeder_2007,\n abstract = {Nach einer kurzen Einf{\\"u}hrung zu den Themen der Musikedition und Kodierung mittelalterlicher Neumen habe ich die Vorteile von XML als Speicherformat vorgestellt. Hierbei ist deutlich geworden, dass XML momentan die erste Wahl f{\\"u}r die Speicherung von Daten darstellt. Es existieren bereits Speicherformate auf der Basis von XML, die sich mit der Kodierung von Musik besch{\\"a}ftigen. MusicXML, MEI und NeumesXML wurden als Beispiele hierf{\\"u}r vorgestellt. Diese Formate unterst{\\"u}tzen die speziellen Anforderungen dieser Arbeit aber nur teilweise. MusicXML ist als Austauschformat zwischen unterschiedlichen Anwendungen gedacht und unterst{\\"u}tzt keine kritischen Editionen oder {\\"a}ltere Musiknotation. MEI unterst{\\"u}tzt zwar den kritischen Bericht sehr gut, aber nicht die Neumendarstellung. NeumesXML ist f{\\"u}r die Neumendarstellung optimiert, allerdings sind Varianten nicht vorgesehen. Eine Eigenentwicklung war dennoch nicht notwendig. Perry Roland, der Entwickler von MEI, zeigte die Bereitschaft, eine Neumenkodierung in MEI zu integrieren. Diese Erweiterung wurde in Kapitel 6 vorgestellt.},\n author = {Schr{\\"a}der, Gregor},\n year = {2007},\n month = {August},\n title = {{Ein XML-Datenformat zur Repr{\\"a}sentation kritischer Musikedition unter besonderer Ber{\\"u}cksichtigung von Neumennotation}},\n url = {http://www.dimused.uni-tuebingen.de/downloads/studienarbeit.pdf},\n note = {{Seminar Paper. T{\\"u}bingen, Eberhard Karls Universit{\\"a}t}},\n}\n\n\n
\n
\n\n\n
\n Nach einer kurzen Einführung zu den Themen der Musikedition und Kodierung mittelalterlicher Neumen habe ich die Vorteile von XML als Speicherformat vorgestellt. Hierbei ist deutlich geworden, dass XML momentan die erste Wahl für die Speicherung von Daten darstellt. Es existieren bereits Speicherformate auf der Basis von XML, die sich mit der Kodierung von Musik beschäftigen. MusicXML, MEI und NeumesXML wurden als Beispiele hierfür vorgestellt. Diese Formate unterstützen die speziellen Anforderungen dieser Arbeit aber nur teilweise. MusicXML ist als Austauschformat zwischen unterschiedlichen Anwendungen gedacht und unterstützt keine kritischen Editionen oder ältere Musiknotation. MEI unterstützt zwar den kritischen Bericht sehr gut, aber nicht die Neumendarstellung. NeumesXML ist für die Neumendarstellung optimiert, allerdings sind Varianten nicht vorgesehen. Eine Eigenentwicklung war dennoch nicht notwendig. Perry Roland, der Entwickler von MEI, zeigte die Bereitschaft, eine Neumenkodierung in MEI zu integrieren. Diese Erweiterung wurde in Kapitel 6 vorgestellt.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2006\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n Kepper, J.\n\n\n \n \n \n \n \n Codierungsformen von Musik.\n \n \n \n \n\n\n \n\n\n\n November 2006.\n \"Digitale Medien und Musikedition\". Kolloquium des Ausschusses für musikwissenschaftliche Editionen der Union der deutschen Akademien der Wissenschaften. Mainz, Akademie der Wissenschaften\n\n\n\n
\n\n\n\n \n \n \"CodierungsformenPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{Kepper_2006,\n abstract = {},\n author = {Kepper, Johannes},\n year = {2006},\n month = {November},\n title = {{Codierungsformen von Musik}},\n url = {http://www.adwmainz.de/fileadmin/adwmainz/MuKo_Veranstaltungen/S2-Digitale_Medien/kepper.pdf},\n note = {{"Digitale Medien und Musikedition". Kolloquium des Ausschusses f{\\"u}r musikwissenschaftliche Editionen der Union der deutschen Akademien der Wissenschaften. Mainz, Akademie der Wissenschaften}}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n Morent, S.; and Schräder, G.\n\n\n \n \n \n \n \n TüBingen. Digital Critical Edition of Medieval Music. The Music of Hildegard von Bingen [1198-1179].\n \n \n \n \n\n\n \n\n\n\n November 2006.\n \"Digitale Medien und Musikedition\". Kolloquium des Ausschusses für musikwissenschaftliche Editionen der Union der deutschen Akademien der Wissenschaften. Mainz, Akademie der Wissenschaften\n\n\n\n
\n\n\n\n \n \n \"TüBingen.Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{Morent_2006,\n abstract = {},\n author = {Morent, Stefan and Schr{\\"a}der, Gregor},\n year = {2006},\n month = {November},\n title = {T{\\"u}Bingen. Digital Critical Edition of Medieval Music. The Music of Hildegard von Bingen [1198-1179]},\n url = {http://www.adwmainz.de/fileadmin/adwmainz/MuKo_Veranstaltungen/S2-Digitale_Medien/TueBingen.pdf},\n note = {{"Digitale Medien und Musikedition". Kolloquium des Ausschusses f{\\"u}r musikwissenschaftliche Editionen der Union der deutschen Akademien der Wissenschaften. Mainz, Akademie der Wissenschaften}}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2003\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n Byrd, D. A.; and Isaacson, E.\n\n\n \n \n \n \n \n A Music Representation Requirement Specification for Academia.\n \n \n \n \n\n\n \n\n\n\n Computer Music Journal, 27(4): 43–57. 2003.\n \n\n\n\n
\n\n\n\n \n \n \"A jstor\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Byrd_2003,\n abstract = {This specification was originally intended for use in developing Variations2. It reflects Indiana University School of Music's orientation toward Western art music ("classical" music), though it also has a strong jazz program and offers courses in popular music. We believe, however, that our requirements are similar to those of almost any academic music department with a similar emphasis on classical music. Specifically, we believe that most music departments that emphasize classical music will have similar requirements regardless of how they approach teaching music theory and analysis and–at least within the limits of music for performance by instrumentalists and singers–regardless of what styles of composition they emphasize. Beyond that, these requirements directly reflect what information is important in notating music, and they should therefore be of considerable interest to designers of music-editing programs.},\n author = {Byrd, Donald A. and Isaacson, Eric},\n year = {2003},\n title = {A Music Representation Requirement Specification for Academia},\n url_JSTOR = {http://www.jstor.org/stable/3681900},\n pages = {43–57},\n volume = {27},\n number = {4},\n journal = {Computer Music Journal}\n}\n\n\n
\n
\n\n\n
\n This specification was originally intended for use in developing Variations2. It reflects Indiana University School of Music's orientation toward Western art music (\"classical\" music), though it also has a strong jazz program and offers courses in popular music. We believe, however, that our requirements are similar to those of almost any academic music department with a similar emphasis on classical music. Specifically, we believe that most music departments that emphasize classical music will have similar requirements regardless of how they approach teaching music theory and analysis and–at least within the limits of music for performance by instrumentalists and singers–regardless of what styles of composition they emphasize. Beyond that, these requirements directly reflect what information is important in notating music, and they should therefore be of considerable interest to designers of music-editing programs.\n
\n\n\n
\n\n\n
\n \n\n \n \n Roland, P.\n\n\n \n \n \n \n \n Design Patterns in XML Music Representation.\n \n \n \n \n\n\n \n\n\n\n In ISMIR 2003, 4th International Conference on Music Information Retrieval, Baltimore, Maryland, USA, October 27-30, 2003, Proceedings, 2003. \n \n\n\n\n
\n\n\n\n \n \n \"DesignPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Roland_2003a,\n abstract = {Design patterns attempt to formalize the discussion of recurring problems and their solutions. This paper introduces several XML design patterns and demonstrates their usefulness in the development of XML music representations. The patterns have been grouped into several categories of desirable outcome of the design process – modularity, separation of data and meta-data, reduction of learning requirements, assistance to tool development, and increase in legibility and understandability. The Music Encoding Initiative (MEI) DTD, from which the examples are drawn, the examples, and other materials related to MEI are available at http://www.people.virginia.edu/ {\\~{}}pdr4h/.},\n author = {Roland, Perry},\n title = {Design Patterns in XML Music Representation},\n url = {https://jscholarship.library.jhu.edu/bitstream/handle/1774.2/50/paper.pdf},\n booktitle = {ISMIR 2003, 4th International Conference on Music Information Retrieval, Baltimore, Maryland, USA, October 27-30, 2003, Proceedings},\n year = {2003}\n}\n\n\n
\n
\n\n\n
\n Design patterns attempt to formalize the discussion of recurring problems and their solutions. This paper introduces several XML design patterns and demonstrates their usefulness in the development of XML music representations. The patterns have been grouped into several categories of desirable outcome of the design process – modularity, separation of data and meta-data, reduction of learning requirements, assistance to tool development, and increase in legibility and understandability. The Music Encoding Initiative (MEI) DTD, from which the examples are drawn, the examples, and other materials related to MEI are available at http://www.people.virginia.edu/ \\ pdr4h/.\n
\n\n\n
\n\n\n
\n \n\n \n \n Roland, P.\n\n\n \n \n \n \n \n Music Encoding Initiative (MEI) DTD.\n \n \n \n \n\n\n \n\n\n\n 2003.\n MusicNetwork Notation Workshop: XML-Based Music Notation Solutions. Leeds, England\n\n\n\n
\n\n\n\n \n \n \"MusicPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{Roland_2003b,\n abstract = {This paper provides a technical introduction to the Music Encoding Initiative (MEI) DTD currently under development by the author. It is consciously modeled on the highly successful Text Encoding Initiative (TEI) DTD. The primary purpose of the MEI DTD is the creation of a comprehensive yet extensible standard for the encoding and transmission of music documents in electronic form.},\n author = {Roland, Perry},\n title = {Music Encoding Initiative (MEI) DTD},\n url = {http://xml.coverpages.org/PerryMusicnetwork2003.pdf},\n year = {2003},\n note = {MusicNetwork Notation Workshop: XML-Based Music Notation Solutions. Leeds, England}\n}\n\n\n
\n
\n\n\n
\n This paper provides a technical introduction to the Music Encoding Initiative (MEI) DTD currently under development by the author. It is consciously modeled on the highly successful Text Encoding Initiative (TEI) DTD. The primary purpose of the MEI DTD is the creation of a comprehensive yet extensible standard for the encoding and transmission of music documents in electronic form.\n
\n\n\n
\n\n\n
\n \n\n \n \n Roland, P.\n\n\n \n \n \n \n \n Modular Design of the Music Encoding Initiative (MEI) DTD.\n \n \n \n \n\n\n \n\n\n\n 2003.\n MusicNetwork Notation Workshop: XML-Based Music Notation Solutions, Leeds, England. Powerpoint\n\n\n\n
\n\n\n\n \n \n \"Modular slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{Roland_2003c,\n abstract = {},\n author = {Roland, Perry},\n year = {2003},\n title = {Modular Design of the Music Encoding Initiative (MEI) DTD},\n url_Slides = {https://www.powershow.com/view1/20f360-ZDc1Z/Modular_Design_of_the_Music_Encoding_Initiative_MEI_DTD_powerpoint_ppt_presentation},\n note = {MusicNetwork Notation Workshop: XML-Based Music Notation Solutions, Leeds, England. Powerpoint}\n}\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2002\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n Roland, P.\n\n\n \n \n \n \n \n The Music Encoding Initiative (MEI).\n \n \n \n \n\n\n \n\n\n\n In MAX2002. Proceedings of the First International Conference on Musical Application using XML, pages 55–59, 2002. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Roland_2002,\n abstract = {This paper draws parallels between the Text Encoding Initiative (TEI) and the proposed Music Encoding Initiative (MEI), reviews existing design principles for music representations, and describes an eXtensible Markup Language (XML) document type definition (DTD) for modeling music notation which attempts to incorporate those principles.},\n author = {Roland, Perry},\n title = {The Music Encoding Initiative (MEI)},\n url = {http://xml.coverpages.org/MAX2002-PRoland.pdf},\n pages = {55–59},\n booktitle = {MAX2002. Proceedings of the First International Conference on Musical Application using XML},\n year = {2002}\n}\n\n\n
\n
\n\n\n
\n This paper draws parallels between the Text Encoding Initiative (TEI) and the proposed Music Encoding Initiative (MEI), reviews existing design principles for music representations, and describes an eXtensible Markup Language (XML) document type definition (DTD) for modeling music notation which attempts to incorporate those principles.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2000\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n Roland, P.\n\n\n \n \n \n \n \n XML4MIR. Extensible Markup Language for Music Information Retrieval.\n \n \n \n \n\n\n \n\n\n\n In ISMIR 2000, 1st International Symposium on Music Information Retrieval, Plymouth, Massachusetts, USA, October 23-25, 2000, Proceedings, 2000. \n \n\n\n\n
\n\n\n\n \n \n \"XML4MIR.Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Roland_2000,\n abstract = {This paper evaluates the role of standards in information exchange and suggests the adoption of XML standards for music representation and meta-data to serve as the basis for music information retrieval.},\n author = {Roland, Perry},\n title = {XML4MIR. Extensible Markup Language for Music Information Retrieval},\n url = {http://ismir2000.ismir.net/papers/roland_paper.pdf},\n booktitle = {ISMIR 2000, 1st International Symposium on Music Information Retrieval, Plymouth, Massachusetts, USA, October 23-25, 2000, Proceedings},\n year = {2000}\n}\n\n\n
\n
\n\n\n
\n This paper evaluates the role of standards in information exchange and suggests the adoption of XML standards for music representation and meta-data to serve as the basis for music information retrieval.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);