<script src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2386895%2Fcollections%2FXX2NLPN2%2Fitems%3Fformat%3Dbibtex%26limit%3D100&jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2386895%2Fcollections%2FXX2NLPN2%2Fitems%3Fformat%3Dbibtex%26limit%3D100");
print_r($contents);
?>
<iframe src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2386895%2Fcollections%2FXX2NLPN2%2Fitems%3Fformat%3Dbibtex%26limit%3D100"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
@book{leonelli_philosophy_2023, address = {Cambridge}, series = {Elements in the {Philosophy} of {Science}}, title = {Philosophy of {Open} {Science}}, isbn = {978-1-00-941639-9}, url = {https://www.cambridge.org/core/elements/philosophy-of-open-science/0D049ECF635F3B676C03C6868873E406}, publisher = {Cambridge University Press}, author = {Leonelli, Sabina}, year = {2023}, doi = {10.1017/9781009416368}, }
@incollection{clavaud_ica_2021, title = {{ICA} {Records} in {Contexts}-{Ontology} ({RiC}-{O}): a {Semantic} {Framework} for {Describing} {Archival} {Resources}}, shorttitle = {{ICA} {Records} in {Contexts}-{Ontology} ({RiC}-{O})}, url = {https://enc.hal.science/hal-03965776}, abstract = {This article gives an overview of the new Records in Contexts Ontology (RiC-O), which is available at https://www.ica.org/standards/RiC/ontology. This ontology is part of the Records in Contexts (RiC) standard, which has been developed by the International Council on Archives to describe and contextualize archival resources in a comprehensible way that goes beyond the possibilities of the existing archival standards. The article explains the rationale for developing a new standard for archival description, and particularly the ontology. It provides a quick overview of the RiC Conceptual Model and then focuses on RiC-O, its design principles and content and giving references to more precise documentation that is publicly available online. Finally, it presents the roadmap and future perspectives of RiC.}, language = {en}, urldate = {2023-08-18}, author = {Clavaud, Florence and Wildi, Tobias}, year = {2021}, pages = {p. 79}, }
@article{hawkins_archives_2021, title = {Archives, linked data and the digital humanities: increasing access to digitised and born-digital archives via the semantic web}, issn = {1573-7500}, shorttitle = {Archives, linked data and the digital humanities}, url = {https://doi.org/10.1007/s10502-021-09381-0}, doi = {10.1007/s10502-021-09381-0}, abstract = {Mass digitisation and the exponential growth of born-digital archives over the past two decades have resulted in an enormous volume of archives and archival data being available digitally. This has produced a valuable but under-utilised source of large-scale digital data ripe for interrogation by scholars and practitioners in the Digital Humanities. However, current digitisation approaches fall short of the requirements of digital humanists for structured, integrated, interoperable, and interrogable data. Linked Data provides a viable means of producing such data, creating machine-readable archival data suited to analysis using digital humanities research methods. While a growing body of archival scholarship and praxis has explored Linked Data, its potential to open up digitised and born-digital archives to the Digital Humanities is under-examined. This article approaches Archival Linked Data from the perspective of the Digital Humanities, extrapolating from both archival and digital humanities Linked Data scholarship to identify the benefits to digital humanists of the production and provision of access to Archival Linked Data. It will consider some of the current barriers preventing digital humanists from being able to experience the benefits of Archival Linked Data evidenced, and to fully utilise archives which have been made available digitally. The article argues for increased collaboration between the two disciplines, challenges individuals and institutions to engage with Linked Data, and suggests the incorporation of AI and low-barrier tools such as Wikidata into the Linked Data production workflow in order to scale up the production of Archival Linked Data as a means of increasing access to and utilisation of digitised and born-digital archives.}, language = {en}, urldate = {2022-01-14}, journal = {Archival Science}, author = {Hawkins, Ashleigh}, month = dec, year = {2021}, }
@article{beretta_challenge_2021, title = {A challenge for historical research: {Making} data {FAIR} using a collaborative ontology management environment ({OntoME})}, volume = {12}, issn = {22104968, 15700844}, shorttitle = {A challenge for historical research}, url = {https://www.medra.org/servlet/aliasResolver?alias=iospress&doi=10.3233/SW-200416}, doi = {10.3233/SW-200416}, abstract = {This paper addresses the issue of interoperability of data generated by historical research and heritage institutions in order to make them re-usable for new research agendas according to the FAIR principles. After introducing the symogih.org project’s ontology, it proposes a description of the essential aspects of the process of historical knowledge production. It then develops an epistemological and semantic analysis of conceptual data modelling applied to factual historical information, based on the foundational ontologies Constructive Descriptions and Situations and DOLCE, and discusses the reasons for adopting the CIDOC CRM as a core ontology for the field of historical research, but extending it with some relevant, missing high-level classes. Finally, it shows how collaborative data modelling carried out in the ontology management environment OntoME makes it possible to elaborate a communal fine-grained and adaptive ontology of the domain, provided an active research community engages in this process. With this in mind, the Data for history consortium was founded in 2017 and promotes the adoption of a shared conceptualization in the field of historical research.}, number = {2}, urldate = {2021-03-16}, journal = {Semantic Web}, author = {Beretta, Francesco}, editor = {Bikakis, Antonis and Markhoff, Beatrice and Mosca, Alessandro and Jean, Stephane and Hyvönen, Eero and Bikakis, Antonis and Hyvonen, Eero and Jean, Stéphane and Markhoff, Beatrice and Mosca, Alessandro}, month = jan, year = {2021}, pages = {279--294}, }
@incollection{vogeler_semantic_2020, address = {Norderstedt}, title = {Das {Semantic} {Web} als {Giant} {Global} {Kontext}?}, volume = {14}, url = {http://www.uni-koeln.de/}, abstract = {Unter dem Gesichtspunkt der „Rekontextualisierung“ erweist sich das Semantic Web als eine eigenartige textuelle Ausdrucksform, die als Prototyp einer digitalen Rekontextualisierung verstanden werden könnte: Es besteht konstitutiv aus Aussagen (Basisprinzip Triples), die über das Internet miteinander verknüpfbar sind (Basisprinzip IRI). Im Unterschied zum Hypertext sind die Aussagen aber hochgradig fragmentiert und formalisiert. Sie sind damit „digitaler“ als Hypertext, da sie weit stärker als dieser diskret konfiguriert sind. Zusätzlich sind sie einigen formallogischen Methoden zugänglich, welche die für einen Text notwendige Kohärenz erzeugen. Aus textlinguistischer Sicht könnte man also das Semantic Web nicht nur als „Giant Global Graph“ sondern auch als „Giant Global Text“ zu beschreiben versuchen. Damit wird der Begriff der Rekontextualisierung für die Verwendung des Semantic Web in den DH produktiv, weil er darauf hinweist, dass das Semantic Web nicht ohne menschliche Intervention Text produziert. Daraus ergibt sich aber auch eine »Unsichtbarkeit« des Semantic Web. Die Rekontextualisierung mit Methoden des Semantic Web kann deshalb auch darin bestehen, den „Giant Global Text“ mit jeweils neuen Paratexten zu versehen und so neue Rezeptionsformen zu ermöglichen.}, language = {de}, urldate = {2021-12-30}, publisher = {BoD}, author = {Vogeler, Georg}, editor = {Meier-Vieracker, Simon and Viehhauser, Gabriel and Sahle, Patrick}, year = {2020}, note = {ISSN: 2197-6945 Num Pages: 14}, pages = {55--70}, }
@misc{evert_ims_2020, title = {The {IMS} {Open} {Corpus} {Workbench} ({CWB}) {CQP} {Query} {Language} {Tutorial}}, url = {http://cwb.sourceforge.net/files/CQP_Tutorial.pdf}, urldate = {2021-06-01}, author = {Evert, Stefan and {CWB Development Team}}, month = may, year = {2020}, }
@article{tolle_improving_2020, title = {Improving {Data} {Quality} by {Rules}: {A} {Numismatic} {Example}}, copyright = {http://creativecommons.org/licenses/by-nc-nd/3.0/de/deed.de}, shorttitle = {Improving {Data} {Quality} by {Rules}}, url = {https://publikationen.uni-tuebingen.de/xmlui/handle/10900/101838}, doi = {10.15496/publikation-43217}, abstract = {The archaeological data dealt with in our database solution Antike Fundmünzen in Europa (AFE), which records finds of ancient coins, is entered by humans. Based on the Linked Open Data (LOD) approach, we link our data to Nomisma.org concepts, as well as to other resources like Online Coins of the Roman Empire (OCRE). Since information such as denomination, material, etc. is recorded for each single coin, this information should be identical for coins of the same type. Unfortunately, this is not always the case, mostly due to human errors. Based on rules that we implemented, we were able to make use of this redundant information in order to detect possible errors within AFE, and were even able to correct errors in Nomimsa.org. However, the approach had the weakness that it was necessary to transform the data into an internal data model. In a second step, we therefore developed our rules within the Linked Open Data world. The rules can now be applied to datasets following the Nomisma. org modelling approach, as we demonstrated with data held by Corpus Nummorum Thracorum (CNT). We believe that the use of methods like this to increase the data quality of individual databases, as well as across different data sources and up to the higher levels of OCRE and Nomisma.org, is mandatory in order to increase trust in them.}, language = {en}, urldate = {2021-06-01}, author = {Tolle, Karsten and Wigg-Wolf, David}, month = nov, year = {2020}, note = {Accepted: 2020-06-24T12:40:04Z Publisher: Universität Tübingen}, }
@article{wurtz_archival_2020, title = {Archival {Linked} ({Open}) {Data}: {Empfehlungen} für bestehende {Metadaten} und {Massnahmen} für die {Zukunft} am {Fallbeispiel} des {Schweizerischen} {Sozialarchivs}}, volume = {6}, copyright = {Copyright (c) 2020 Fabian Würtz}, issn = {2297-9069}, shorttitle = {Archival {Linked} ({Open}) {Data}}, url = {https://bop.unibe.ch/iw/article/view/7083}, doi = {10.18755/iw.2020.17}, abstract = {Eine Kernaufgabe der Archive ist die Erschliessung des Archivguts. Bisher wurden Archivbestände meist als hierarchische und isolierte Einheiten verzeichnet. Die zunehmende Digitalisierung, neue Fachbereiche wie die Digital Humanities oder Entwicklungen wie das Semantic Web bzw. Linked Open Data haben jedoch neue Ideen in die Archivwelt getragen. Einer der deutlichsten Vorboten dieser neuen Welt ist Records in Context (RiC). Der neue Verzeichnungsstandard des wichtigen International Council on Archives (ICA) ist konzeptionell auf Linked Open Data und das Semantic Web ausgerichtet. Doch was bedeutet es für die Archive, wenn aus den bisher isolierten Beständen verlinkte und maschinenlesbare Netzwerke entstehen sollen? Wie sollen archivalische Metadaten und Datenmodelle in Linked Open Data aussehen und an welche Qualitätsansprüche sollen diese neu berücksichtigen?Um diese Fragen zu beantworten hat die Arbeit das Konzept und die Technologien die Linked Open Data zugrunde liegen vorgestellt. Danach wurden Qualitätsmerkmale für Linked Open Data zusammengetragen und der momentane Stand von Linked Open Data im Archivbereich beleuchtet. Dabei wurde unter anderem bereits existierende Ansätze und Anwendungen aus dem Archivbereich vorgestellt und mit den Qualitätsmerkmalen verglichen. Die Überprüfung der Praxistauglichkeit der Qualitätsmerkmale erfolgte am Fallbeispiel der Metadaten des Schweizerischen Sozialarchivs.Auf Basis der erarbeitenden Resultate spricht die Arbeit eine Reihe von Empfehlungen aus. Diese richten sich an Archive, die sich mit dem Thema Linked Open Data beschäftigen oder eine Anwendung in diesem Bereich planen.}, language = {de}, number = {1}, urldate = {2021-03-17}, journal = {Informationswissenschaft: Theorie, Methode und Praxis}, author = {Würtz, Fabian}, month = jul, year = {2020}, note = {Number: 1}, pages = {312--423}, }
@misc{wildi_matterhorn_2019, type = {Government \& {Nonprofit}}, title = {Le {Matterhorn} {RDF} {Data} {Model}: {Description} archivistique et {Linked} {Open} {Data}. {Vers} quelle convergence?}, shorttitle = {Alain {Dubois}, {Tobias} {Wildi}. {Le} {Matterhorn} {RDF} {Data} {Model}}, url = {https://www.slideshare.net/TobiasWildi/alain-dubois-tobias-wildi-le-matterhorn-rdf-data-model-description-archivistique-et-linked-open-data-vers-quelle-convergence-prsentation-au-forum-des-archivistes-5-avril-2019-saintetienne?from_m_app=ios}, abstract = {Approche généraliste du Matterhorn RDF Data Model: se fonder sur des}, language = {fr}, urldate = {2020-12-28}, author = {Wildi, Tobias and Dubois, Alain}, month = may, year = {2019}, }
@inproceedings{hyvonen_linked_2019, title = {A {Linked} {Open} {Data} {Service} and {Portal} for {Pre}-modern {Manuscript} {Research}}, url = {https://researchportal.helsinki.fi/en/publications/a-linked-open-data-service-and-portal-for-pre-modern-manuscript-r}, language = {English}, urldate = {2019-09-05}, booktitle = {Digital {Humanities} in {Nordic} {Countries}: {Proceedings} of the {Digital} {Humanities} in the {Nordic} {Countries} 4th {Conference}}, publisher = {CEUR-WS.org}, author = {Hyvönen, Eero and Ikkala, Esko and Tuominen, Jouni and Koho, Mikko and Burrows, Toby and Ransom, Lynn and Wijsman, Hanno}, year = {2019}, pages = {220--229}, }
@incollection{bruseker_cultural_2017, address = {Cham}, series = {Quantitative {Methods} in the {Humanities} and {Social} {Sciences}}, title = {Cultural {Heritage} {Data} {Management}: {The} {Role} of {Formal} {Ontology} and {CIDOC} {CRM}}, isbn = {978-3-319-65370-9}, shorttitle = {Cultural {Heritage} {Data} {Management}}, url = {https://doi.org/10.1007/978-3-319-65370-9_6}, abstract = {Building models for integrating the diverse data generated in Cultural Heritage disciplines is a long-term challenge both for securing presently generated knowledge and for making it progressively more widely accessible and interoperable into the future. This chapter reviews the multiple approaches undertaken to address this problem, finally proposing CIDOC CRM as the most robust solution for information integration in CH. The chapter begins by outlining the data challenge specific to the field and the main approaches that can be taken in facing it. Within this frame, it distinguishes knowledge engineering and formal ontology from other information modelling techniques as the necessary approach for tackling the broader data integration problem. It then outlines the basic principles of CIDOC CRM, the ISO standard formal ontology for CH. From there, an overview is given of some of the work that has been done both theoretically and in practice over the past five years in developing and implementing CRM as a practical data integration strategy in CH, particularly looking at model extensions to handle knowledge provenance across various disciplines and typical documentation and reasoning activities, as well as at successful implementation projects. Lastly, it summarizes the present potentials and challenges for using CIDOC CRM for solving the CH data management and integration puzzle. The intended audience of this chapter are specialists from all backgrounds within the broader domain of CH with an interest in data integration and CIDOC CRM.}, language = {en}, urldate = {2023-08-18}, booktitle = {Heritage and {Archaeology} in the {Digital} {Age}: {Acquisition}, {Curation}, and {Dissemination} of {Spatial} {Cultural} {Heritage} {Data}}, publisher = {Springer International Publishing}, author = {Bruseker, George and Carboni, Nicola and Guillem, Anaïs}, editor = {Vincent, Matthew L. and López-Menchero Bendicho, Víctor Manuel and Ioannides, Marinos and Levy, Thomas E.}, year = {2017}, doi = {10.1007/978-3-319-65370-9_6}, pages = {93--131}, }
@article{merzaghi_informationen_2017, title = {Informationen finden und {Wissen} verlinken: {Der} {Weg} der {Metadatenstandards} vom {Archivregal} zu den {Linked} {Data}}, copyright = {Copyright ©2020 arbido.}, url = {https://arbido.ch/de/ausgaben-artikel/2017/metadaten-datenqualit%C3%A4t/vom-regal-zum-word-wide-web-die-entwicklung-von-normen-und-standards}, abstract = {Die Informatisierung der Archivarbeit hat die Notwendigkeit von Metadatenstandards aufgezeigt, denn die Metadaten ermöglichen es, analoge und elektronische…}, language = {de}, number = {3}, urldate = {2020-12-28}, journal = {Arbido}, author = {Merzaghi, Michele}, year = {2017}, note = {Archive Location: https://arbido.ch/de/ Publisher: arbido}, }
@incollection{vogeler_content_2016, title = {The {Content} of {Accounts} and {Registers} in their {Digital} {Edition}. {XML}/{TEI}, {Spreadsheets}, and {Semantic} {Web} {Technologies}}, booktitle = {Konzeptionelle Überlegungen zur {Edition} von {Rechnungen} und {Amtsbüchern} des späten {Mittelalters}}, author = {Vogeler, Georg}, year = {2016}, pages = {13--42}, }
@inproceedings{le_boeuf_basic_2015, address = {Cape Town, South Africa}, title = {A {Basic} {Introduction} to {FRBROO} and {PRESSOO}}, url = {https://library.ifla.org/id/eprint/1150/}, abstract = {In addition to FRBR, FRAD, and FRSAD, two conceptual models for bibliographic information were developed these last years: FRBROO (a reformulation of the FRBR/FRAD/FRSAD models) and PRESSOO (more specifically devoted to bibliographic information about continuing resources). This paper provides the explanation that is necessary for a good understanding of the formalism used in these two models. It then shows how some of the basic constructs of FRBR/FRAD/FRSAD are rendered in the FRBROO model. In a very brief third section, it highlights some core aspects of PRESSOO. Keywords: FRBROO, PRESSOO, CIDOC CRM, Conceptual models for cultural heritage information}, booktitle = {{IFLA} {WLIC} 201, {Session} 207 - {Cataloguing}}, author = {Le Boeuf, Patrick}, year = {2015}, keywords = {CIDOC CRM, Conceptual models for cultural heritage information, FRBRoo, PRESSoo}, }
@article{gracy_archival_2015, title = {Archival {Description} and {Linked} {Data}: {A} {Preliminary} {Study} of {Opportunities} and {Implementation} {Challenges}}, volume = {15}, issn = {1389-0166, 1573-7519}, shorttitle = {Archival description and linked data}, url = {10.1007/s10502-014-9216-2}, doi = {10.1007/s10502-014-9216-2}, abstract = {This paper presents the results of a study to investigate how archives can connect their collections to related data sources through the use of Semantic Web technologies, specifically Linked Data. Questions explored included (a) What types of data currently available in archival surrogates such as Encoded Archival Description (EAD) finding aids and Machine-Readable Cataloging (MARC) records may be useful if converted to Linked Data? (b) For those potentially useful data points identified in archival surrogates, how might one align data structures found in those surrogates to the data structures of other relevant internal or external information sources? (c) What features of current standards and data structures present impediments or challenges that must be overcome in order to achieve interoperability among disparate data sources? To answer these questions, the researcher identified metadata elements of potential use as Linked Data in archival surrogates, as well as metadata element sets and vocabularies of data sets that could serve as pathways to relevant external data sources. Data sets chosen for the study included DBpedia and schema.org; metadata element sets examined included Friend of a Friend (FOAF), GeoNames, and Linking Open Description of Events (LODE). The researcher then aligned tags found in the EAD encoding standard to related classes and properties found in these Linked Data sources and metadata element sets. To investigate the third question about impediments to incorporating Linked Data in archival descriptions, the researcher analyzed the locations and frequencies at which controlled and uncontrolled access points (personal and family name, corporate name, geographic name, and genre/form entities) appeared in a sample of MARC and EAD archival descriptive records by using a combination of hand counts and the natural language processing (NLP) tool, OpenCalais. The results of the location and frequency analysis, combined with the results of the alignment process, helped the researcher identify several critical challenges currently impeding interoperability among archival information systems and relevant Linked Data sources, including differences in granularity between archival and other data source vocabularies, and inadequacies of current encoding standards to support semantic tagging of potential access points embedded in free text areas of archival surrogates.}, language = {en}, number = {3}, urldate = {2020-12-16}, journal = {Archival Science}, author = {Gracy, Karen F.}, year = {2015}, pages = {239--294}, }