var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=http%3A%2F%2Frawgit2.com%2FVladimirAlexiev%2Fmy%2Fmaster%2FAlexiev-bibliography.bib&jsonp=1&theme=side&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=http%3A%2F%2Frawgit2.com%2FVladimirAlexiev%2Fmy%2Fmaster%2FAlexiev-bibliography.bib&jsonp=1&theme=side\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=http%3A%2F%2Frawgit2.com%2FVladimirAlexiev%2Fmy%2Fmaster%2FAlexiev-bibliography.bib&jsonp=1&theme=side\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Graphwise Interests in Industrial Data.\n \n \n \n \n\n\n \n Alexiev, V.; and Krüger, L.\n\n\n \n\n\n\n Presentation at Industrial Data Ontology: Meet Graph Software Vendors, December 2024.\n \n\n\n\n
\n\n\n\n \n \n \"Graphwise pdf\n  \n \n \n \"Graphwise slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{IDO-vendor-meeting-2024,\n  author       = {Vladimir Alexiev and Lutz Krüger},\n  title        = {Graphwise Interests in Industrial Data},\n  howpublished = {Presentation at Industrial Data Ontology: Meet Graph Software Vendors},\n  month        = dec,\n  year         = 2024,\n  url_PDF      = {https://drive.google.com/file/d/11xnlpDNVg9lvHVvRrJ1fdwjUwGnjd_iV/view},\n  url_Slides   = {https://docs.google.com/presentation/d/1dRJBTCJmDYKhe4WHrA6p3DF1nqrunWvGjxCAUGmbRnA/edit},\n  keywords     = {Ontology-Based Interoperability, OBI, Industrial Data Ontology, IDO, ISO 23726-3, Digital Product Passports, DPP, Asset Administration Shell, AAS, semantic layer, LLM querying},\n  date         = {2024-12-04},\n  abstract     = {The Ontology-Based Interoperability (OBI) and Industrial Data Ontology (IDO) ISO 23726-3 community invited semantic software vendors to gauge their interest in IDO and outline next steps for its adotion and role in the harmopnization This presentation outlines the interest and experience of Graphwise (Ontotext and Semantic Web Company) in various industrial domains: trade, logistics, AECO, elctricity, oil and gas, energy, manufacturing. It then provides an overview of an important use case (Digital Product Passports) and a conceptual architecture involving semantic layer, knowledge graph, sensor data, LLM querying, etc},\n}\n\n
\n
\n\n\n
\n The Ontology-Based Interoperability (OBI) and Industrial Data Ontology (IDO) ISO 23726-3 community invited semantic software vendors to gauge their interest in IDO and outline next steps for its adotion and role in the harmopnization This presentation outlines the interest and experience of Graphwise (Ontotext and Semantic Web Company) in various industrial domains: trade, logistics, AECO, elctricity, oil and gas, energy, manufacturing. It then provides an overview of an important use case (Digital Product Passports) and a conceptual architecture involving semantic layer, knowledge graph, sensor data, LLM querying, etc\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Raising the Role of Vocabulary Hubs for Semantic Data Interoperability in Dataspaces.\n \n \n \n \n\n\n \n David, R.; Ivanov, P.; and Alexiev, V.\n\n\n \n\n\n\n In Third workshop on Semantic Interoperability in Data Spaces, Budapest, Hungary, October 2024. \n \n\n\n\n
\n\n\n\n \n \n \"Raising proceedings\n  \n \n \n \"Raising slides\n  \n \n \n \"Raising pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{semInteropDataspaces-2024,\n  author       = {Robert David and Petar Ivanov and Vladimir Alexiev},\n  title        = {Raising the Role of Vocabulary Hubs for Semantic Data Interoperability in Dataspaces},\n  booktitle    = {Third workshop on Semantic Interoperability in Data Spaces},\n  year         = 2024,\n  month        = oct,\n  address      = {Budapest, Hungary},\n  url_Proceedings = {https://semantic.internationaldataspaces.org/workshop-2024/},\n  url_Slides   = {https://rawgit2.com/underpin-project/papers/main/EBDVF-2024/EBDVF-2024-presentation/presentation.html},\n  url_PDF      = {https://raw.githubusercontent.com/underpin-project/papers/refs/heads/main/EBDVF-2024/EBDVF-2024-presentation/presentation.pdf},\n  keywords     = {dataspaces, semantic interoperability, semantic technologies, ontologies, vocabulary hub, oil and gas, renewable energy, refineries, windfarms},\n  date         = {2024-10-02},\n  abstract     = {Dataspaces are an important enabler for industrial sharing data (either commercially licensed or private). Europe is investing heavily into sectoral dataspaces, federation and orchestration platforms like SIMPL, Eclipse DSC, GXFS, etc. Still, dataspaces enable shared data access, but do not solve the data interoperability problem. For that, the consumer would like to see the data from different providers in a harmonized and semantically integrated form. The Vocabulary Hub service (part of the IDS RAM) provides a repository for ontologies and vocabularies. We describe an approach of raising the role of the Vocabulary Hub to also allow richer metadata description (e.g. the meaning of every column in a tabular dataset), and binding semantic descriptions to ingested datasets, thus providing on-the-fly data semantization and easing data querying. This is achieved through the integration of two commercial semantic products (PoolParty and GraphDB), leveraging the partnership between the Semantic Web Company and Ontotext, and is being developed within the frame of the Digital Europe project UNDERPIN, with applications to refinery and wind farm data.},\n}\n\n
\n
\n\n\n
\n Dataspaces are an important enabler for industrial sharing data (either commercially licensed or private). Europe is investing heavily into sectoral dataspaces, federation and orchestration platforms like SIMPL, Eclipse DSC, GXFS, etc. Still, dataspaces enable shared data access, but do not solve the data interoperability problem. For that, the consumer would like to see the data from different providers in a harmonized and semantically integrated form. The Vocabulary Hub service (part of the IDS RAM) provides a repository for ontologies and vocabularies. We describe an approach of raising the role of the Vocabulary Hub to also allow richer metadata description (e.g. the meaning of every column in a tabular dataset), and binding semantic descriptions to ingested datasets, thus providing on-the-fly data semantization and easing data querying. This is achieved through the integration of two commercial semantic products (PoolParty and GraphDB), leveraging the partnership between the Semantic Web Company and Ontotext, and is being developed within the frame of the Digital Europe project UNDERPIN, with applications to refinery and wind farm data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n AEC3PO: Architecture, Engineering, Construction Compliance Checking and Permitting Ontology.\n \n \n \n \n\n\n \n Vakaj, E.; Patlakas, P.; Beach, T.; Lefrançois, M.; Dridi, A.; and Alexiev, V.\n\n\n \n\n\n\n February 2024.\n \n\n\n\n
\n\n\n\n \n \n \"AEC3PO:Paper\n  \n \n \n \"AEC3PO: githib\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Manual{AEC3PO-ontology-2024,\n  title        = {{AEC3PO: Architecture, Engineering, Construction Compliance Checking and Permitting Ontology}},\n  author       = {Edlira Vakaj and Panagiotis Patlakas and Thomas Beach and Maxime Lefrançois and Amna Dridi and Vladimir Alexiev},\n  month        = feb,\n  year         = 2024,\n  url          = {https://w3id.org/lbd/aec3po/},\n  keywords     = {AECO, automated compliance checking, building regulations, construction regulations, land use, ontologies, ACCORD, Architecture Engineering and Construction Compliance Checking and Permitting Ontology, AEC3PO, compliance checking, CO2 emission, Sustainability, domain-specific rule language, RASE},\n  url_githib   = {https://github.com/accord-project/aec3po/},\n  date         = {2024-02-15},\n  abstract     = {The Architecture, Engineering, Construction Compliance Checking and Permitting Ontology (AEC3PO) is an ontology developed to support the automated compliance checking of construction, renovation, and demolition works. It has been developed in the context of the Automated Compliance Checking for Construction, Renovation or Demolition Works (ACCORD) project, an ERC/Horizon-funded project that aims to digitalise permitting and compliance processes. AEC3PO aims to capture all aspects of building compliance and building permitting in Architecture, Engineering, and Construction (AEC), across different regulatory systems. It allows the modelling of aspects such as: building and infrastructure codes, regulations, and standards, and their compliance requirements; building and infrastructure permitting processes and documentation; building and infrastructure compliance and permitting actors. The ontology requirements are derived from the rule formalisation methodology that aims to semantise regulations and provide an open format for machine-readable rules. The ontology is built using Semantic Web technologies, adhering to standards like RDF, OWL, and SKOS. It also integrates popular ontologies such as Dublin Core Terms (DCT) and Europe's Legislation Identifier (ELI) in order to create a structured and interconnected knowledge graph. This allows professionals to explore, query, and understand various aspects of the compliance and permitting processes more comprehensively.},\n}\n\n
\n
\n\n\n
\n The Architecture, Engineering, Construction Compliance Checking and Permitting Ontology (AEC3PO) is an ontology developed to support the automated compliance checking of construction, renovation, and demolition works. It has been developed in the context of the Automated Compliance Checking for Construction, Renovation or Demolition Works (ACCORD) project, an ERC/Horizon-funded project that aims to digitalise permitting and compliance processes. AEC3PO aims to capture all aspects of building compliance and building permitting in Architecture, Engineering, and Construction (AEC), across different regulatory systems. It allows the modelling of aspects such as: building and infrastructure codes, regulations, and standards, and their compliance requirements; building and infrastructure permitting processes and documentation; building and infrastructure compliance and permitting actors. The ontology requirements are derived from the rule formalisation methodology that aims to semantise regulations and provide an open format for machine-readable rules. The ontology is built using Semantic Web technologies, adhering to standards like RDF, OWL, and SKOS. It also integrates popular ontologies such as Dublin Core Terms (DCT) and Europe's Legislation Identifier (ELI) in order to create a structured and interconnected knowledge graph. This allows professionals to explore, query, and understand various aspects of the compliance and permitting processes more comprehensively.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Formalization of Building Codes and Regulations in Knowledge Graphs.\n \n \n \n \n\n\n \n Costa, G.; Vakaj, E.; Beach, T.; Lavikka, R.; Lefrançois, M.; Zimmermann, A.; Mecharnia, T.; Alexiev, V.; Dridi, A.; Hettiarachchi, H.; and Keberle, N.\n\n\n \n\n\n\n In Noardo, F.; and Fauth, J., editor(s), Digital Building Permit Conference 2024, pages 142-146, Barcelona, Spain, April 2024. \n \n\n\n\n
\n\n\n\n \n \n \"FormalizationPaper\n  \n \n \n \"Formalization slides\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{DBP2024-buildingCodes,\n  author       = {Gonçal Costa and Edlira Vakaj and Thomas Beach and Rita Lavikka and Maxime Lefrançois and Antoine Zimmermann and Thamer Mecharnia and Vladimir Alexiev and Amna Dridi and Hansi Hettiarachchi and Nataliya Keberle},\n  title        = {Formalization of Building Codes and Regulations in Knowledge Graphs},\n  booktitle    = {Digital Building Permit Conference 2024},\n  year         = 2024,\n  editor       = {Francesca Noardo and Judith Fauth},\n  pages        = {142-146},\n  month        = apr,\n  address      = {Barcelona, Spain},\n  url          = {https://zenodo.org/records/12760552},\n  url_Slides   = {https://docs.google.com/presentation/d/1UO8bH9LY_KprjZhrSHFV7QKq2d8r_fjF/edit},\n  keywords     = {AECO, BIM, regulation checking, automated compliance checking},\n  doi          = {10.5281/zenodo.12760552},\n  abstract     = {The Architecture, Engineering and Construction (AEC) industry is subject to many building codes and regulations that apply to the design and construction of buildings. These regulations often involve complex language and technical vocabulary that can give rise to different interpretations, depending on their context and purpose, and therefore a difficulty in their application. The introduction of Building Information Modelling (BIM), as well as authoring tools capable of creating and exporting 3D representations of buildings, is paving the way for compliance checking to become more automated and less dependent on interpretation. This should allow for better quality by reducing the time needed for checking and avoiding human errors. However, despite attempts to provide new BIM-based methods and approaches to achieve this goal in the past two decades, none of these methods have proven to be close to being a definitive solution. The basis for checking compliance against regulations using a BIM model is to have a description of the regulations in a computable form. In turn, this makes it necessary to define data requirements for models that guarantee that regulations can be checked consistently. Within this framework, several scenarios can be considered to address the problem. One is to consider the descriptive part of the regulation separate from the execution part, that is, compliance checking procedures. Currently, those in charge of writing the regulations typically publish them in plain text documents in PDF format. Therefore, the next evolutionary step is to manage construction regulations in a machine-readable way underpinned by semantics, thus, ensuring they can be interpretated precisely by the software used for checking buildings against them.},\n}\n\n
\n
\n\n\n
\n The Architecture, Engineering and Construction (AEC) industry is subject to many building codes and regulations that apply to the design and construction of buildings. These regulations often involve complex language and technical vocabulary that can give rise to different interpretations, depending on their context and purpose, and therefore a difficulty in their application. The introduction of Building Information Modelling (BIM), as well as authoring tools capable of creating and exporting 3D representations of buildings, is paving the way for compliance checking to become more automated and less dependent on interpretation. This should allow for better quality by reducing the time needed for checking and avoiding human errors. However, despite attempts to provide new BIM-based methods and approaches to achieve this goal in the past two decades, none of these methods have proven to be close to being a definitive solution. The basis for checking compliance against regulations using a BIM model is to have a description of the regulations in a computable form. In turn, this makes it necessary to define data requirements for models that guarantee that regulations can be checked consistently. Within this framework, several scenarios can be considered to address the problem. One is to consider the descriptive part of the regulation separate from the execution part, that is, compliance checking procedures. Currently, those in charge of writing the regulations typically publish them in plain text documents in PDF format. Therefore, the next evolutionary step is to manage construction regulations in a machine-readable way underpinned by semantics, thus, ensuring they can be interpretated precisely by the software used for checking buildings against them.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Checking of Urban Planning Regulations with GeoSPARQL and BIM SPARQL.\n \n \n \n \n\n\n \n Alexiev, V.; and Keberle, N.\n\n\n \n\n\n\n In Digital Building Permit 2024, pages 234, Barcelona, Spain, April 2024. \n \n\n\n\n
\n\n\n\n \n \n \"CheckingPaper\n  \n \n \n \"Checking html\n  \n \n \n \"Checking slides\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{DBP2024-GeoSPARQL,\n  author       = {Vladimir Alexiev and Nataliya Keberle},\n  title        = {Checking of Urban Planning Regulations with GeoSPARQL and BIM SPARQL},\n  booktitle    = {Digital Building Permit 2024},\n  year         = 2024,\n  pages        = 234,\n  month        = apr,\n  address      = {Barcelona, Spain},\n  url          = {https://zenodo.org/records/12760552},\n  url_HTML     = {https://presentations.ontotext.com/2024/checking-of-urban-planning-regulations-with-geosparql-and-bim-sparql/Slides.html},\n  url_Slides   = {https://drive.google.com/file/d/1PtApSPgU2nQjvRBCcgNU8oles2O7EnC6/view},\n  keywords     = {AECO, urban planning, BIM, City Information Management, regulation checking, XPlanung, INSPIRE PLU, CityGML, GeoSPARQL, Berlin Tegel, TXL, Malgrat},\n  doi          = {10.5281/zenodo.12760552},\n  abstract     = {The former Berlin Tegel airport (TXL) will be the site of a university campus (refurbished airport terminal), startups, production facilities (“tech republic”), a living quarter, stores, smart mobility hubs, park and recreation areas, etc. The Tegel Project company (owned by the City of Berlin) has developed detailed urban planning and regulations covering built area use, height restrictions, noise protection, floor space index (buildup density), greenery requirements (vegetation and habitats), etc. The regulations are expressed in XPlanung and INSPIRE PLU. These are GML-based UML and XML models for urban planning: XPlanung is Germany-specific and PLU (Planned Land Use) is part of the INSPIRE initiative. Building designs are expressed in IFC and include simple geometries (for residential buildings) and complex geometries (for the university campus). Compliance checking of urban planning requires accessing two different kinds of data in a harmonized way: BIM (building information) and GIS (also called CIM “city information management” and often represented using GML extension schemas). As part of the Horizon Europe ACCORD project, we plan to do this checking using SPARQL in Ontotext GraphDB. GIS data is covered by the existing GeoSPARQL plugin that supports WKT and GML geometries. BIM data can either be converted to GIS/GML using already developed approaches, or accessed through a future Binary Engineering Data connector for GraphDB based on the HDF5 format. We give an overview of XPlanung, INSPIRE PLU, CityGML and GeoSPARQL 1.0 and 1.1. Then we describe the semantic conversion of XPlanung / INSPIRE PLU data, our approach regarding semantization of BIM data, the overall structure of regulations, the respective geometric and non-geometric checks to be implemented, the use of GeoSPARQL topological relations to leverage planning zone hierarchies and to check which buildings fall in which zones, potential specialized BIM SPARQL functions to be implemented, management of multiple BIM files that need to be checked in concert, and result creation and content.},\n}\n\n
\n
\n\n\n
\n The former Berlin Tegel airport (TXL) will be the site of a university campus (refurbished airport terminal), startups, production facilities (“tech republic”), a living quarter, stores, smart mobility hubs, park and recreation areas, etc. The Tegel Project company (owned by the City of Berlin) has developed detailed urban planning and regulations covering built area use, height restrictions, noise protection, floor space index (buildup density), greenery requirements (vegetation and habitats), etc. The regulations are expressed in XPlanung and INSPIRE PLU. These are GML-based UML and XML models for urban planning: XPlanung is Germany-specific and PLU (Planned Land Use) is part of the INSPIRE initiative. Building designs are expressed in IFC and include simple geometries (for residential buildings) and complex geometries (for the university campus). Compliance checking of urban planning requires accessing two different kinds of data in a harmonized way: BIM (building information) and GIS (also called CIM “city information management” and often represented using GML extension schemas). As part of the Horizon Europe ACCORD project, we plan to do this checking using SPARQL in Ontotext GraphDB. GIS data is covered by the existing GeoSPARQL plugin that supports WKT and GML geometries. BIM data can either be converted to GIS/GML using already developed approaches, or accessed through a future Binary Engineering Data connector for GraphDB based on the HDF5 format. We give an overview of XPlanung, INSPIRE PLU, CityGML and GeoSPARQL 1.0 and 1.1. Then we describe the semantic conversion of XPlanung / INSPIRE PLU data, our approach regarding semantization of BIM data, the overall structure of regulations, the respective geometric and non-geometric checks to be implemented, the use of GeoSPARQL topological relations to leverage planning zone hierarchies and to check which buildings fall in which zones, potential specialized BIM SPARQL functions to be implemented, management of multiple BIM files that need to be checked in concert, and result creation and content.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Using Qualification Instead of Rolification for the Records in Context Ontology (RiC-O).\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n January 2024.\n \n\n\n\n
\n\n\n\n \n \n \"UsingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev-Qualification-vs-Rolification-2024,\n  author       = {Vladimir Alexiev},\n  title        = {{Using Qualification Instead of Rolification for the Records in Context Ontology (RiC-O)}},\n  month        = jan,\n  year         = 2024,\n  url          = {https://github.com/ICA-EGAD/RiC-O/issues/67#issuecomment-1919383104},\n  keywords     = {rolification, qualification, reasoning, GraphDB Rules, GLAM, archives, RiC-O, Records in Context, PROV},\n  abstract     = {The Records in Context Ontology (RiC-O) uses a "Rolification" pattern to derive direct (unqualified) relations from Relation nodes. This involves the use of a "parasitic" self-link on relation nodes (owl:hasSelf) and owl:propertyChainAxiom, which is expensive to implement. Instead, I propose to use the PROV Qualified Relation pattern (associate the direct relation to the Relation class using prov:unqualifiedForm) and implement it with simpler GraphDB rules.}\n}\n\n
\n
\n\n\n
\n The Records in Context Ontology (RiC-O) uses a \"Rolification\" pattern to derive direct (unqualified) relations from Relation nodes. This involves the use of a \"parasitic\" self-link on relation nodes (owl:hasSelf) and owl:propertyChainAxiom, which is expensive to implement. Instead, I propose to use the PROV Qualified Relation pattern (associate the direct relation to the Relation class using prov:unqualifiedForm) and implement it with simpler GraphDB rules.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring FIBO Complexity With Crunchbase: Representing Crunchbase IPOs in FIBO.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report April 2024.\n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n \n \"Exploring github\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev-Crunchbase-Fibo-2023,\n  author       = {Vladimir Alexiev},\n  title        = {{Exploring FIBO Complexity With Crunchbase: Representing Crunchbase IPOs in FIBO}},\n  month        = apr,\n  year         = 2024,\n  url          = {https://rawgit2.com/VladimirAlexiev/crunchbase-fibo/main/README.html},\n  url_Github   = {https://github.com/VladimirAlexiev/crunchbase-fibo/},\n  keywords     = {fintech, Crunchbase, ontologies, semantic modeling, Initial Public Offering, IPO, Financial Industry Business Ontology, FIBO},\n  abstract     = {The Financial Industry Business Ontology (FIBO) by the Enterprise Data Management Council (EDMC) is a family of ontologies and a reference model for representing data in the financial world using semantic technologies. It is used in fintech Knowledge Graph (KG) projects because it offers a comprehensive and principled approach to representing financial data, and a wide set of predefined models that can be used to implement data harmonization and financial data integration. The 2022Q2 FIBO release consists of 290 ontologies using 380 prefixes that cover topics such as legal entities, contracts, agency, trusts, regulators, securities, loans, derivatives, etc. FIBO's reach and flexible ontological approach allow the integration of a wide variety of financial data, but it comes at the price of more complex representation. Crunchbase (CB) is a well-known dataset by TechCrunch that includes companies, key people, funding rounds, acquisitions, Initial Public Offerings (IPOs), etc. It has about 2M companies with a good mix of established enterprises (including 47k public companies), mid-range companies and startups. We (Ontotext and other Wikidata contributors) have matched 72k CB companies to Wikidata, see this query. I explore the representation of Crunchbase data (more specifically IPOs) in FIBO and compare it to the simplest possible semantic representation. I therefore illustrate the complexity of FIBO, and explain its flexibility along the way. I finish with some discussion and conclusions as to when FIBO can bring value to fintech KG projects.},\n}\n\n
\n
\n\n\n
\n The Financial Industry Business Ontology (FIBO) by the Enterprise Data Management Council (EDMC) is a family of ontologies and a reference model for representing data in the financial world using semantic technologies. It is used in fintech Knowledge Graph (KG) projects because it offers a comprehensive and principled approach to representing financial data, and a wide set of predefined models that can be used to implement data harmonization and financial data integration. The 2022Q2 FIBO release consists of 290 ontologies using 380 prefixes that cover topics such as legal entities, contracts, agency, trusts, regulators, securities, loans, derivatives, etc. FIBO's reach and flexible ontological approach allow the integration of a wide variety of financial data, but it comes at the price of more complex representation. Crunchbase (CB) is a well-known dataset by TechCrunch that includes companies, key people, funding rounds, acquisitions, Initial Public Offerings (IPOs), etc. It has about 2M companies with a good mix of established enterprises (including 47k public companies), mid-range companies and startups. We (Ontotext and other Wikidata contributors) have matched 72k CB companies to Wikidata, see this query. I explore the representation of Crunchbase data (more specifically IPOs) in FIBO and compare it to the simplest possible semantic representation. I therefore illustrate the complexity of FIBO, and explain its flexibility along the way. I finish with some discussion and conclusions as to when FIBO can bring value to fintech KG projects.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Existing Ontologies, Standards, and Data Models in the Building Data Domain Relevant to Compliance Checking.\n \n \n \n \n\n\n \n Mecharnia, T.; Lefrançois, M.; Zimmermann, A.; Vakaj, E.; Dridi, A.; Hettiarachchi, H.; Alexiev, V.; Keberle, N.; Tan, H.; Noardo, F.; Makkinga, R.; and Cheung, F.\n\n\n \n\n\n\n Technical Report D2.1, ACCORD Project, August 2023.\n \n\n\n\n
\n\n\n\n \n \n \"ExistingPaper\n  \n \n \n \"Existing library\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{thamermecharniaExistingOntologiesStandards2023,\n  author       = {Thamer Mecharnia and Maxime Lefrançois and Antoine Zimmermann and Edlira Vakaj and Amna Dridi and Hansi Hettiarachchi and Vladimir Alexiev and Nataliya Keberle and He Tan and Francesca Noardo and Rick Makkinga and Franco Cheung},\n  title        = {Existing Ontologies, Standards, and Data Models in the Building Data Domain Relevant to Compliance Checking},\n  institution  = {ACCORD Project},\n  year         = 2023,\n  type         = {Deliverable},\n  number       = {D2.1},\n  month        = aug,\n  url          = {https://accordproject.eu/wp-content/uploads/2023/09/ACCORD_D2.1_Technical_Report_Existing_Models.pdf},\n  keywords     = {Architecture Engineering Construction and Operations (AECO), Ontologies, Review},\n  url_library  = {https://www.zotero.org/groups/3007408/semantic_bim/library},\n  abstract     = {This deliverable presents the results of Task 2.1 (Technical Review of Existing Standards) of the ACCORD project. The ACCORD project employs a semantic approach for validating building permits, eliminating the need for costly centralized systems that are challenging to establish and maintain. The primary aim of the ACCORD project is to digitize permit and compliance procedures to improve the productivity and quality of design and construction processes and facilitate the creation of an environmentally sustainable built environment. This deliverable will review the existing ontologies, standards, and data models in the Architecture, Engineering, and Construction (AEC) domain and how they can be reused for the purpose of the automatic compliance check. More specifically, this deliverable will: 1. Evaluate the AEC domain-related ontologies and propose suggestions on how they can be employed for the development of the Architecture Engineering and Construction Compliance Checking and Permitting Ontology (AEC3PO). 2. Conduct a review of query languages associated with the AEC domain and the semantic web. 3. Compare the rule languages developed or used in AEC projects. 4. Review the standards that may be relevant to different areas in the ACCORD project. 5. Compare the existing reasoners that could be useful to building permitting automatic compliance checking. All the references used in this deliverable are gathered in the open Zotero library for the project. In the AEC industry, several standards and recommendations aim to achieve different levels of data interoperability in systems. This deliverable concentrates on data-related standards such as those that provide syntactic rules and semantics to represent data in a standardized way. Policy and regulatory standards are out of the scope of this deliverable and are addressed in deliverable D1.1 "Landscape Review Report". The outcomes of this deliverable will serve as a reference for other tasks within the project, which will determine the preferred rule language, which ontologies can be reused, aligned, or serve as inspiration for the creation of the AEC3PO to be developed in Task 2.2 of WP2. Furthermore, the standards that will be presented in this deliverable can be employed in various aspects of the ACCORD project. This groundwork will facilitate the development of the AEC3PO ontology as well as the design and implementation of the Rule Formalisation Tool.},\n}\n\n\n
\n
\n\n\n
\n This deliverable presents the results of Task 2.1 (Technical Review of Existing Standards) of the ACCORD project. The ACCORD project employs a semantic approach for validating building permits, eliminating the need for costly centralized systems that are challenging to establish and maintain. The primary aim of the ACCORD project is to digitize permit and compliance procedures to improve the productivity and quality of design and construction processes and facilitate the creation of an environmentally sustainable built environment. This deliverable will review the existing ontologies, standards, and data models in the Architecture, Engineering, and Construction (AEC) domain and how they can be reused for the purpose of the automatic compliance check. More specifically, this deliverable will: 1. Evaluate the AEC domain-related ontologies and propose suggestions on how they can be employed for the development of the Architecture Engineering and Construction Compliance Checking and Permitting Ontology (AEC3PO). 2. Conduct a review of query languages associated with the AEC domain and the semantic web. 3. Compare the rule languages developed or used in AEC projects. 4. Review the standards that may be relevant to different areas in the ACCORD project. 5. Compare the existing reasoners that could be useful to building permitting automatic compliance checking. All the references used in this deliverable are gathered in the open Zotero library for the project. In the AEC industry, several standards and recommendations aim to achieve different levels of data interoperability in systems. This deliverable concentrates on data-related standards such as those that provide syntactic rules and semantics to represent data in a standardized way. Policy and regulatory standards are out of the scope of this deliverable and are addressed in deliverable D1.1 \"Landscape Review Report\". The outcomes of this deliverable will serve as a reference for other tasks within the project, which will determine the preferred rule language, which ontologies can be reused, aligned, or serve as inspiration for the creation of the AEC3PO to be developed in Task 2.2 of WP2. Furthermore, the standards that will be presented in this deliverable can be employed in various aspects of the ACCORD project. This groundwork will facilitate the development of the AEC3PO ontology as well as the design and implementation of the Rule Formalisation Tool.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Natural Language Querying with GPT, SOML and GraphQL.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Ontotext Last Friday Webinar, May 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Natural video\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Misc{NLQ-GPT-SOML-GraphQL-2023,\n  author       = {Vladimir Alexiev},\n  title        = {{Natural Language Querying with GPT, SOML and GraphQL}},\n  howpublished = {Ontotext Last Friday Webinar},\n  month        = may,\n  year         = 2023,\n  url_video    = {https://drive.google.com/file/d/1TOHrtlleOAkv4oZYhlAWa22mUqtvsV7o/view},\n  abstract     = {Clients want to talk to their KG, i.e. ask questions about the schema and data in natural language. LLMs like GPT and LLAMA have opened a revolution in this regard. Currently Ontotext is exploring 8 themes with LLMs.\nNLQ can be accomplished either by:\n- Providing data from GraphDB to the LLM, or\n- Presenting a schema to the LLM and asking it to generate queries.\nIn this talk we explore query generation.\n- SPARQL queries are complex, so even for known schemas (eg Wikidata, DBpedia), GPT has trouble generating good queries, see \\Shared drives\\KGS\\AI-GPT\\GPT-SPARQL. Furthermore, RDF schemas (OWL and SHACL) are complex. But I'm sure there will be fast progress in SPARQL generation, see LlamaIndex advances in GDB-8329\n- GraphQL queries are regular and much simpler, and SOML is a simpler schema language (from which the Ontotext Platform generates GraphQL schema, queries and SHACL shapes). In this talk I'll show how GPT4 can answer questions about a schema, and generate GraphQL to answer questions about data.}\n}\n\n
\n
\n\n\n
\n Clients want to talk to their KG, i.e. ask questions about the schema and data in natural language. LLMs like GPT and LLAMA have opened a revolution in this regard. Currently Ontotext is exploring 8 themes with LLMs. NLQ can be accomplished either by: - Providing data from GraphDB to the LLM, or - Presenting a schema to the LLM and asking it to generate queries. In this talk we explore query generation. - SPARQL queries are complex, so even for known schemas (eg Wikidata, DBpedia), GPT has trouble generating good queries, see §hared drives\\KGS\\AI-GPT\\GPT-SPARQL. Furthermore, RDF schemas (OWL and SHACL) are complex. But I'm sure there will be fast progress in SPARQL generation, see LlamaIndex advances in GDB-8329 - GraphQL queries are regular and much simpler, and SOML is a simpler schema language (from which the Ontotext Platform generates GraphQL schema, queries and SHACL shapes). In this talk I'll show how GPT4 can answer questions about a schema, and generate GraphQL to answer questions about data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semantic bSDD: Improving the GraphQL, JSON and RDF Representations of buildingSmart Data Dictionary.\n \n \n \n \n\n\n \n Alexiev, V.; Radkov, M.; and Keberle, N.\n\n\n \n\n\n\n In Linked Data in Architecture and Construction (LDAC 2023), Matera, Italy, June 2023. \n \n\n\n\n
\n\n\n\n \n \n \"SemanticPaper\n  \n \n \n \"Semantic demo\n  \n \n \n \"Semantic detailed\n  \n \n \n \"Semantic github\n  \n \n \n \"Semantic preprint\n  \n \n \n \"Semantic slides\n  \n \n \n \"Semantic video\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{SemanticBSDD-LDAC-2023,\n  author       = {Vladimir Alexiev and Mihail Radkov and Nataliya Keberle},\n  title        = {{Semantic bSDD: Improving the GraphQL, JSON and RDF Representations of buildingSmart Data Dictionary}},\n  booktitle    = {{Linked Data in Architecture and Construction (LDAC 2023)}},\n  year         = 2023,\n  month        = jun,\n  address      = {Matera, Italy},\n  url          = {https://linkedbuildingdata.net/ldac2023/files/papers/papers/LDAC2023_paper_1547.pdf},\n  url_Demo     = {https://bsdd.ontotext.com/},\n  url_Detailed = {https://bsdd.ontotext.com/README.html},\n  url_Github   = {https://github.com/Accord-Project/bsdd},\n  url_Preprint = {https://bsdd.ontotext.com/paper/paper.pdf},\n  url_Slides   = {https://bsdd.ontotext.com/presentation/presentation.html},\n  url_Video    = {https://drive.google.com/open?id=1Mhts8JwbdJFUmQHGULCqduijZ0NpEoxX},\n  keywords     = {Linked building data, LBD, buildingSMART Data Dictionary, bSDD, FAIR data, data quality},\n  abstract     = {The buildingSmart Data Dictionary (bSDD) is an important shared resource in the Architecture, Engineering, Construction, and Operations (AECO) domain. It is a collection of datasets ("domains") that define various classifications (objects representing building components, products, and materials), their properties, allowed values, etc. bSDD defines a GraphQL API, as well as REST APIs that return JSON and RDF representations. This improves the interoperability of bSDD and its easier deployment in architectural Computer Aided Design (CAD) and other AECO software. However, bSDD data is not structured as well as possible, and data retrieved via different APIs is not identical in content and structure. This lowers bSDD data quality, usability and trust. We conduct a thorough comparison and analysis of bSDD data related to fulfillment of FAIR (findable, accessible, interoperable, and reusable) principles. Based on this analysis, we suggest enhancements to make bSDD data better structured and more FAIR. We implement many of the suggestions by refactoring the original data to make it better structured/interconnected, and more "semantic". We provide a SPARQL endpoint using Ontotext GraphDB, and GraphQL endpoint using Ontotext Platform Semantic Objects. Our detailed work is available at https://github.com/Accord-Project/bsdd (open source) and https://bsdd.ontotext.com (home page, schemas, data, sample queries).},\n}\n\n
\n
\n\n\n
\n The buildingSmart Data Dictionary (bSDD) is an important shared resource in the Architecture, Engineering, Construction, and Operations (AECO) domain. It is a collection of datasets (\"domains\") that define various classifications (objects representing building components, products, and materials), their properties, allowed values, etc. bSDD defines a GraphQL API, as well as REST APIs that return JSON and RDF representations. This improves the interoperability of bSDD and its easier deployment in architectural Computer Aided Design (CAD) and other AECO software. However, bSDD data is not structured as well as possible, and data retrieved via different APIs is not identical in content and structure. This lowers bSDD data quality, usability and trust. We conduct a thorough comparison and analysis of bSDD data related to fulfillment of FAIR (findable, accessible, interoperable, and reusable) principles. Based on this analysis, we suggest enhancements to make bSDD data better structured and more FAIR. We implement many of the suggestions by refactoring the original data to make it better structured/interconnected, and more \"semantic\". We provide a SPARQL endpoint using Ontotext GraphDB, and GraphQL endpoint using Ontotext Platform Semantic Objects. Our detailed work is available at https://github.com/Accord-Project/bsdd (open source) and https://bsdd.ontotext.com (home page, schemas, data, sample queries).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The InnoGraph Artificial Intelligence Taxonomy: A Key to Unlocking AI-Related Entities and Content.\n \n \n \n \n\n\n \n Alexiev, V.; Bechev, B.; and Osytsin, A.\n\n\n \n\n\n\n Technical Report Ontotext Corp, December 2023.\n Introduction: - Potential InnoGraph Datasets and Users - Importance of Topics and A Holistic Approach - Example: Github Topics - Kinds of Topics. Core Topics: Wikipedia Articles: - Wikipedia Categories - Category Pruning. Collaborative Patent Classification: Application Areas: - PatBase Browser - CPC Semantic Data at EPO - Finding All CPC AI Topics - CPC Snowballing - CPC for Application Area Topics. Other Topic Datasets: - ACM CCS - AIDA FAT - AMiner KGs - ANZSRC FOR - arXiv Areas - China NSFC - EU CORDIS EuroSciVoc - Crunchbase Categories - CSO - JEL - MESH - MSC - OpenAlex Topics - SemanticScholar FOS - StackExchange Tags. Conclusion and Future Work: - Acknowledgements - References\n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{InnoGraph-AI-Taxonomy,\n  author       = {Vladimir Alexiev and Boyan Bechev and Alexandr Osytsin},\n  title        = {The InnoGraph Artificial Intelligence Taxonomy: A Key to Unlocking AI-Related Entities and Content},\n  institution  = {Ontotext Corp},\n  year         = 2023,\n  type         = {whitepaper},\n  month        = dec,\n  note         = {Introduction: - Potential InnoGraph Datasets and Users - Importance of Topics and A Holistic Approach - Example: Github Topics - Kinds of Topics. Core Topics: Wikipedia Articles: - Wikipedia Categories - Category Pruning. Collaborative Patent Classification: Application Areas: - PatBase Browser - CPC Semantic Data at EPO - Finding All CPC AI Topics - CPC Snowballing - CPC for Application Area Topics. Other Topic Datasets: - ACM CCS - AIDA FAT - AMiner KGs - ANZSRC FOR - arXiv Areas - China NSFC - EU CORDIS EuroSciVoc - Crunchbase Categories - CSO - JEL - MESH - MSC - OpenAlex Topics - SemanticScholar FOS - StackExchange Tags. Conclusion and Future Work: - Acknowledgements - References},\n  url          = {https://www.ontotext.com/knowledgehub/white_paper/the-innograph-artificial-intelligence-taxonomy/},\n  keywords     = {InnoGraph, Artificial Intelligence, Topics, Taxonomy, InnoGraph},\n  date         = {2023-12},\n  abstract     = {InnoGraph is a holistic Knowledge Graph of innovation based on Artificial Intelligence (AI). AI is the underpinning of much of the world's innovation, therefore it has immense economic and human improvement potential. With the explosive growth of Machine Learning (ML), Deep Learning (DL) and Large Language Models (LLM), it is hard to keep up with all AI development, but also this is a valuable effort. A key to discovering AI elements is to build a comprehensive taxonomy of topics: AI techniques, application areas (verticals). We describe our approach to developing such a taxonomy by integrating and coreferencing data from numerous sources.},\n}\n\n
\n
\n\n\n
\n InnoGraph is a holistic Knowledge Graph of innovation based on Artificial Intelligence (AI). AI is the underpinning of much of the world's innovation, therefore it has immense economic and human improvement potential. With the explosive growth of Machine Learning (ML), Deep Learning (DL) and Large Language Models (LLM), it is hard to keep up with all AI development, but also this is a valuable effort. A key to discovering AI elements is to build a comprehensive taxonomy of topics: AI techniques, application areas (verticals). We describe our approach to developing such a taxonomy by integrating and coreferencing data from numerous sources.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards InnoGraph: A Knowledge Graph for AI Innovation.\n \n \n \n \n\n\n \n Massri, M.; Spahiu, B.; Grobelnik, M.; Alexiev, V.; Palmonari, M.; and Roman, D.\n\n\n \n\n\n\n In 3rd International Workshop on Scientific Knowledge Representation, Discovery, and Assessment (Sci-K 2023). WWW 2023 Companion, Austin, Texas, June 2023. \n \n\n\n\n
\n\n\n\n \n \n \"TowardsPaper\n  \n \n \n \"Towards preprint\n  \n \n \n \"Towards slides\n  \n \n \n \"Towards zenodo\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{TowardsInnograph-SciK-2023,\n  author       = {M.Besher Massri and Blerina Spahiu and Marko Grobelnik and Vladimir Alexiev and Matteo Palmonari and Dumitru Roman},\n  title        = {{Towards InnoGraph: A Knowledge Graph for AI Innovation}},\n  booktitle    = {{3rd International Workshop on Scientific Knowledge Representation, Discovery, and Assessment (Sci-K 2023). WWW 2023 Companion}},\n  year         = 2023,\n  month        = jun,\n  address      = {Austin, Texas},\n  url          = {https://dl.acm.org/doi/10.1145/3543873.3587614},\n  url_Preprint = {https://zenodo.org/record/7750707/files/Towards%20InnoGraph%20A%20Knowledge%20Graph%20for%20AI%20Innovation.pdf?download=1},\n  url_Slides   = {https://zenodo.org/record/7750707/files/Towards%20InnoGraph%20A%20Knowledge%20Graph%20for%20AI%20Innovation.pptx?download=1},\n  url_Zenodo   = {https://zenodo.org/record/7750707},\n  keywords     = {artificial intelligence, innovation, innovation ecosystem, knowledge graph, science knowledge graph, economics knowledge graph},\n  doi          = {10.1145/3543873.3587614},\n  abstract     = {To understand the state-of-the-art innovations in a particular domain, researchers have to explore patents and scientific articles published recently in that particular domain. Innovation ecosystems comprise interconnected information regarding entities, i.e., researchers, institutions, projects, products, and technologies. Representing such information in a machine-readable format is challenging. This is due to the fact that representing concepts like "knowledge" is not straightforward. However, even a partial representation provides valuable information. Representing innovation ecosystems as knowledge graphs (KGs) enables the generation of new insights and would allow advanced data analysis. In this paper, we propose InnoGraph, a KG of the worldwide AI innovation ecosystem.},\n}\n\n
\n
\n\n\n
\n To understand the state-of-the-art innovations in a particular domain, researchers have to explore patents and scientific articles published recently in that particular domain. Innovation ecosystems comprise interconnected information regarding entities, i.e., researchers, institutions, projects, products, and technologies. Representing such information in a machine-readable format is challenging. This is due to the fact that representing concepts like \"knowledge\" is not straightforward. However, even a partial representation provides valuable information. Representing innovation ecosystems as knowledge graphs (KGs) enables the generation of new insights and would allow advanced data analysis. In this paper, we propose InnoGraph, a KG of the worldwide AI innovation ecosystem.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n InnoGraph Datasets.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, January 2023.\n The presentation is not public yet, contact in case of interest\n\n\n\n
\n\n\n\n \n \n \"InnoGraphPaper\n  \n \n \n \"InnoGraph html\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Misc{Alexiev-InnographDatasets-2023,\n  author       = {Vladimir Alexiev},\n  title        = {{InnoGraph Datasets}},\n  howpublished = {presentation},\n  month        = jan,\n  year         = 2023,\n  note         = {The presentation is not public yet, contact in case of interest},\n  url          = {https://github.com/enRichMyData/InnoGraph/blob/main/papers-write/202301-InnoGraph-Datasets/index.org},\n  url_HTML     = {https://rawgit2.com/enRichMyData/InnoGraph/main/papers-write/202301-InnoGraph-Datasets/index.html},\n  address      = {enrichMyData Project Meeting, Milan, Italy},\n  abstract     = {What is InnoGraph: a Holistic KG of the world-wide AI innovation ecosystem. Who are its users/uses: Investment and strategic advice (VCs, M&A): Innovators and startups, Industry convergence and digitization, Strategic gaps, Strategic acquisition targets; Researchers and developers; Policy makers: EU level, National science foundations, OECD.AI, maybe even US and China; Self-use: AI is at cusp (singularity), learn about it in depth!},\n}\n\n
\n
\n\n\n
\n What is InnoGraph: a Holistic KG of the world-wide AI innovation ecosystem. Who are its users/uses: Investment and strategic advice (VCs, M&A): Innovators and startups, Industry convergence and digitization, Strategic gaps, Strategic acquisition targets; Researchers and developers; Policy makers: EU level, National science foundations, OECD.AI, maybe even US and China; Self-use: AI is at cusp (singularity), learn about it in depth!\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generation of Declarative Transformations from Semantic Models.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In European Data Conference on Reference Data and Semantics (ENDORSE 2023), pages 33, 42-59, March 2023. European Commission: Directorate-General for Informatics, Publications Office of the European Union\n \n\n\n\n
\n\n\n\n \n \n \"GenerationPaper\n  \n \n \n \"Generation ppt\n  \n \n \n \"Generation slides\n  \n \n \n \"Generation video\n  \n \n \n \"Generation proceedings\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev-ENDORSE-2023,\n  author       = {Vladimir Alexiev},\n  title        = {{Generation of Declarative Transformations from Semantic Models}},\n  booktitle    = {{European Data Conference on Reference Data and Semantics (ENDORSE 2023)}},\n  year         = 2023,\n  pages        = {33, 42-59},\n  month        = mar,\n  organization = {European Commission: Directorate-General for Informatics, Publications Office of the European Union},\n  url          = {https://drive.google.com/open?id=1Cq5o9th_P812paqGkDsaEomJyAmnypkD},\n  url_PPT      = {https://docs.google.com/presentation/d/1JCMQEH8Tw_F-ta6haIToXMLYJxQ9LRv6/edit},\n  url_Slides   = {https://op.europa.eu/documents/10157494/12134844/DAY1-TRACK2-16.35-16.50-VladimirAlexiev_FORPUB.pdf/6e564f96-6ad6-1464-7a6e-e9533207f281},\n  url_Video    = {https://youtu.be/yL5nI_3ccxs},\n  keywords     = {semantic model, semantic data integration, ETL, semantic conversion, declarative approaches, PlantUML, R2RML, generation, model-driven, RDF by Example, rdfpuml, rdf2rml},\n  isbn         = {978-92-78-43682-7},\n  doi          = {10.2830/343811},\n  annote       = {Catalogue number: OA-04-23-743-EN-N},\n  date         = {2023-08-04},\n  url_proceedings= {https://op.europa.eu/en/publication-detail/-/publication/4db67b35-34df-11ee-bdc3-01aa75ed71a1},\n  abstract     = {The daily work of the Knowledge Graph Solutions group at Ontotext involves KG building activities such as investigating data standards and datasets, ontology engineering, harmonizing data through semantic models, converting or virtualizing data to semantic form, entity matching, semantic text enrichment, etc. Semantic pipelines have a variety of desirable properties, of which maintainability and consistency of the various artefacts are some of the most important ones. Despite significant recent progress (eg in the KG Building W3C community group), semantic conversion still remains one of the difficult steps. We favor generation of semantic transformations from semantic models that are both sufficiently precise, easily understandable, can be used to generate diagrams, and are valid RDF to allow processing with RDF tools. We call this approach "RDF by Example" and have developed a set of open source tools at https://github.com/VladimirAlexiev/rdf2rml. This includes "rdfpuml" for generating diagrams, "rdf2rml" for generating R2RML for semantization of relational data and ONTOP virtualization, "rdf2sparql" for semantization of tabular data with Ontotext Refine or TARQL. We describe our approach and illustrate it with complex and high-performance transformations in a variety of domains, such as company data and NIH research grants.},\n}\n\n
\n
\n\n\n
\n The daily work of the Knowledge Graph Solutions group at Ontotext involves KG building activities such as investigating data standards and datasets, ontology engineering, harmonizing data through semantic models, converting or virtualizing data to semantic form, entity matching, semantic text enrichment, etc. Semantic pipelines have a variety of desirable properties, of which maintainability and consistency of the various artefacts are some of the most important ones. Despite significant recent progress (eg in the KG Building W3C community group), semantic conversion still remains one of the difficult steps. We favor generation of semantic transformations from semantic models that are both sufficiently precise, easily understandable, can be used to generate diagrams, and are valid RDF to allow processing with RDF tools. We call this approach \"RDF by Example\" and have developed a set of open source tools at https://github.com/VladimirAlexiev/rdf2rml. This includes \"rdfpuml\" for generating diagrams, \"rdf2rml\" for generating R2RML for semantization of relational data and ONTOP virtualization, \"rdf2sparql\" for semantization of tabular data with Ontotext Refine or TARQL. We describe our approach and illustrate it with complex and high-performance transformations in a variety of domains, such as company data and NIH research grants.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Migrating J. Paul Getty Museum Agent ID from P2432 to P12040.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Github gist, November 2023.\n \n\n\n\n
\n\n\n\n \n \n \"MigratingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2023-MigratingGettyID,\n  author       = {Vladimir Alexiev},\n  title        = {{Migrating J. Paul Getty Museum Agent ID from P2432 to P12040}},\n  howpublished = {Github gist},\n  month        = nov,\n  year         = 2023,\n  url          = {https://gist.github.com/VladimirAlexiev/e0a7bae256e9646a7b6f47b23184f9a4},\n  keywords     = {Getty Trust, J. Paul Getty Museum, authority control, Wikidata, cultural heritage, GLAM},\n  abstract     = {Previously Wikidata had Getty Museum agent DOR ID (P2432), eg https://www.getty.edu/art/collection/artists/377. But this is an internal ID that redirects to eg https://www.getty.edu/art/collection/person/103JV9. So I made a Wikidata property for the Getty Museum new agent ID (P12040). Using the Getty SPARQL endpoint, I exported 12936 persons and 3616 groups with fields "guid name old new ulan nat role birthDate birthPlace deathDate deathPlace". Then I initiated a discussion how to populate these new ID's to Wikidata, leveraging ULAN and the old DOR ID: https://www.wikidata.org/wiki/Property_talk:P12040#Populating_J._Paul_Getty_Museum_agent_ID. I also found some records without new ID, and started a discussion with Getty to see why that ID was missing},\n}\n\n
\n
\n\n\n
\n Previously Wikidata had Getty Museum agent DOR ID (P2432), eg https://www.getty.edu/art/collection/artists/377. But this is an internal ID that redirects to eg https://www.getty.edu/art/collection/person/103JV9. So I made a Wikidata property for the Getty Museum new agent ID (P12040). Using the Getty SPARQL endpoint, I exported 12936 persons and 3616 groups with fields \"guid name old new ulan nat role birthDate birthPlace deathDate deathPlace\". Then I initiated a discussion how to populate these new ID's to Wikidata, leveraging ULAN and the old DOR ID: https://www.wikidata.org/wiki/Property_talk:P12040#Populating_J._Paul_Getty_Museum_agent_ID. I also found some records without new ID, and started a discussion with Getty to see why that ID was missing\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (12)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Semantic Interoperability for Data Spaces.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In SEMIC: Data Spaces in an Interoperable Europe (SEMIC 2022), December 2022. \n \n\n\n\n
\n\n\n\n \n \n \"SemanticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev-SEMIC-2022,\n  author       = {Vladimir Alexiev},\n  title        = {{Semantic Interoperability for Data Spaces}},\n  booktitle    = {SEMIC: Data Spaces in an Interoperable Europe (SEMIC 2022)},\n  year         = 2022,\n  month        = dec,\n  url          = {https://docs.google.com/presentation/d/1OMxNZItNCjGnod0KQ__Hp9oQ8mwmMyNn},\n  keywords     = {data spaces, interoperability, semantic interoperability, knowledge graphs},\n  howpublished = {presentation},\n  keywords     = {Data Spaces, RDF, Semantic Technology, Polyglot Modeling, Product Classifications, Product Catalogs, Manufacturing Industry, Electricity, Transport and Logistics, Architecture and Construction},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n JSON-LD, YAML-LD and Polyglot Modeling.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, October 2022.\n \n\n\n\n
\n\n\n\n \n \n \"JSON-LD,Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev-JSONLD-YAMLLD-2022,\n  author       = {Vladimir Alexiev},\n  title        = {{JSON-LD, YAML-LD and Polyglot Modeling}},\n  howpublished = {presentation},\n  month        = oct,\n  year         = 2022,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20221028-JSONLD/Slides.html},\n  keywords     = {JSON-LD, YAML-LD, polyglot modeling, GraphDB, rdf4j, Titanium, GS1, EPCIS, Allotrope},\n  address      = {Presentation at Ontotext Last Friday},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Decentralization and Self-Sovereignty, Or how I finally understood what Blockchain is good for.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, February 2022.\n \n\n\n\n
\n\n\n\n \n \n \"DecentralizationPaper\n  \n \n \n \"Decentralization report\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev-decentralization-2022,\n  author       = {Vladimir Alexiev},\n  title        = {{Decentralization and Self-Sovereignty, Or how I finally understood what Blockchain is good for}},\n  howpublished = {presentation},\n  month        = feb,\n  year         = 2022,\n  url          = {https://docs.google.com/presentation/d/1AEwLjM7ry6BeM0XoF8EVbl5zeoMkE-tBht0CcL3cfPk/edit},\n  keywords     = {LD, JSONLD, HDT, HDF5, TPF, LDF, LDP, LDN, SOLID, DID, VC, IDSA RAM},\n  url_report   = {https://docs.google.com/document/d/1qpMAa55SYV6E4D_ffIgsZopmpzrUrjjR9c36SXXCVZQ/edit#},\n  address      = {Presentation at Ontotext Last Friday},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semantic and Polyglot Modeling, Generation of Declarative Transformations, Data Spaces ft. Vladimir Alexiev.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Podcast, October 2022.\n \n\n\n\n
\n\n\n\n \n \n \"SemanticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev-podcast2022,\n  author       = {Vladimir Alexiev},\n  title        = {{Semantic and Polyglot Modeling, Generation of Declarative Transformations, Data Spaces ft. Vladimir Alexiev}},\n  howpublished = {Podcast},\n  address      = {Loose Edges podcast with Marsel Tadjer and Justin Dowdy},\n  month        = oct,\n  year         = 2022,\n  url          = {https://player.fm/series/loose-edges/semantic-and-polyglot-modeling-generation-of-declarative-transformations-data-spaces-ft-vladimir-alexiev},\n  keywords     = {semantic modeling, polyglot modeling, ontology engineering, knowledge graphs, competency questions, upper ontologies, reusable ontologies, GraphDB, GrpahQL, Ontotext Refine, Ontotext Reconcile},\n  abstract     = {In this episode of Loose Edges Marsel and Justin interview Vladimir Alexiev, Chief Data Architect at Ontotext.\n- We explore Application Centric Data and how to catch defects in various modeling approaches.\n- Discuss Ontotext products: new GraphDB capabilities such as search and connectors, GraphQL capabilities, Ontotext Refine, Ontotext Reconcile.\n- Ontotext "10 step guide to KGs". Start a KG project with "competency questions".\n- Semantic transformation best practices and approaches: declarative and generated transformations .\n- Polyglot modeling: what is it and where it is manifesting itself in various data communities (from HL7 FHIR to YAML-LD).\n- Standards. Working Groups. How to get involved, what are some of the best practices from Vladimir's perspective and what should an aspiring semantic engineer and ontologist be aware of.\n- Common upper ontologies / Reusable ontologies / simple vs. broad, hear some examples from a dozen different industries.\n},\nannote        = {\n00:00 - 01:13 Intro and Ontotext News\n01:14 - 04:00 GraphDB features where to use RDF / Use cases and industries\n04:00 - 07:00 Connectors and transformation language for imports\n07:00 - 08:00 Elastic and search connectors\n08:00 - 10:49 GraphQL support / standards / avoiding cartesian product / standardization and full text search.\n10:50 - 16:30 RML / Start ontologies or start with data / templates with standard ttl + generating conversions\n16:35 - 20:00 Ontotext 10 step guide / start KG project with "competency questions"\n20:00 - 23:30 Application Centric Data / Defects in vocabularies / semantic representations / examples of standards settings organizations\n20:30 - 29:40 Polyglot modeling / data modeling / HL7/FHIR / YAML-LD easiness of yaml to read vs json\n30:00 - 32:00 Better modeling with json-ld Frames / community practices\n32:00 - 35:15 Object vs. Literal / Transparency EKG spec (schema.org vs other approach) / Inclusivity of wikidata / "be too demanding"\n35:15 - 39:10 Subject matter under specify deterrent for raising the quality of data / "use wikidata and geonames"\n39:00 - 41:30 Specificity of thinking from files and messages to real world. We are describing "things" in real world\n41:30 - 43:30 Value of descriptions\n43:30 - 47:15 Standards / Working groups / DBPedia vs Wikidata best practices / Ontotext Refine / Ontotext Reconcile\n47:15 - 51:30 Practices with W3C vs ISO standards\n51:30 - 54:45 Advice for upcoming graph specialists - example with internal query itterated from external query posted to SPARQL\n54:45 - 58:30 Justin asks for DPV W3C no consistent worldview / Common upper ontologies / Reusable ontologies / W3C practice with ORG and ADMS "simplify / make usable" / ISO-15926 / C vs lisp\n58:30 - 1:00:00 Start from "common primitives" / define the base is "not free" comes at a price / Crunchbase + IPO examples}\n}\n\n\n
\n
\n\n\n
\n In this episode of Loose Edges Marsel and Justin interview Vladimir Alexiev, Chief Data Architect at Ontotext. - We explore Application Centric Data and how to catch defects in various modeling approaches. - Discuss Ontotext products: new GraphDB capabilities such as search and connectors, GraphQL capabilities, Ontotext Refine, Ontotext Reconcile. - Ontotext \"10 step guide to KGs\". Start a KG project with \"competency questions\". - Semantic transformation best practices and approaches: declarative and generated transformations . - Polyglot modeling: what is it and where it is manifesting itself in various data communities (from HL7 FHIR to YAML-LD). - Standards. Working Groups. How to get involved, what are some of the best practices from Vladimir's perspective and what should an aspiring semantic engineer and ontologist be aware of. - Common upper ontologies / Reusable ontologies / simple vs. broad, hear some examples from a dozen different industries. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data Wants To Be Truly Sovereign: Designing Data Spaces with Linked Data Principles In Mind.\n \n \n \n \n\n\n \n Petkova, T.; and Alexiev, V.\n\n\n \n\n\n\n Ontotext blog post, November 2022.\n \n\n\n\n
\n\n\n\n \n \n \"DataPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Petkova-Alexiev-DataSpaces-2022,\n  author       = {Teodora Petkova and Vladimir Alexiev},\n  title        = {{Data Wants To Be Truly Sovereign: Designing Data Spaces with Linked Data Principles In Mind}},\n  howpublished = {Ontotext blog post},\n  month        = nov,\n  year         = 2022,\n  url          = {https://www.ontotext.com/blog/data-wants-to-be-truly-sovereign-designing-data-spaces/},\n  keywords     = {Data Spaces, Knowledge Graphs, Semantic Data Spaces},\n  abstract     = {Learn how data spaces, being a mechanism to enable efficient commercial data exchange can significantly benefit from the use of Linked Data at the level of data itself},\n}\n\n
\n
\n\n\n
\n Learn how data spaces, being a mechanism to enable efficient commercial data exchange can significantly benefit from the use of Linked Data at the level of data itself\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data Spaces vs Knowledge Graphs: How to Get To Semantic Data Spaces?.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In Data Spaces & Semantic Interoperability Workshop, Vienna, Austria, July 2022. \n \n\n\n\n
\n\n\n\n \n \n \"DataPaper\n  \n \n \n \"Data slides\n  \n \n \n \"Data video\n  \n \n \n \"Data blog\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev-DataSpaces-2022,\n  author       = {Vladimir Alexiev},\n  title        = {{Data Spaces vs Knowledge Graphs: How to Get To Semantic Data Spaces?}},\n  booktitle    = {{Data Spaces & Semantic Interoperability Workshop}},\n  year         = 2022,\n  month        = jul,\n  address      = {Vienna, Austria},\n  url          = {https://drive.google.com/file/d/15RuCfyresjmc0JWoNl8Jpjpbf_O65UkD/view},\n  url_Slides   = {https://docs.google.com/presentation/d/1uujCfAGw7nTwz9c6ItLtUhsKiGEbK2bKCWUOOunpyw0/edit},\n  url_Video    = {https://www.youtube.com/watch?v=RpCVChGczSA},\n  url_Blog     = {https://www.ontotext.com/company/news/ontotext-presents-position-paper-at-data-spaces-and-semantic-interoperability-workshop/},\n  keywords     = {Data Spaces, RDF, Semantic Technology, Polyglot Modeling, Product Classifications, Product Catalogs, Manufacturing Industry, Electricity, Transport and Logistics, Architecture and Construction},\n  abstract     = {EU invests heavily in Data Spaces (DS) as a mechanism to enable commercial data exchange and therefore industry digitalization and proliferation of Data Science (DS) and Artificial Intelligence, in particular Machine Learning (ML). While DSs use heavily semantic technologies, that is limited to describing metadata, license agreements, data market participants, etc. I argue that using Linked Data and semantic technologies for the data itself offers significant benefits regarding more efficient data sharing and use, and improvements to ML and DS processes. I give an overview of the state of semantic data sharing in several industrial domains (Product Classifications and Catalogs, Manufacturing Industry, Electricity, Transport and Logistics, Architecture and Construction; and close with a brief overview of technological enablers required for Semantic Data Spaces.},\n}\n\n
\n
\n\n\n
\n EU invests heavily in Data Spaces (DS) as a mechanism to enable commercial data exchange and therefore industry digitalization and proliferation of Data Science (DS) and Artificial Intelligence, in particular Machine Learning (ML). While DSs use heavily semantic technologies, that is limited to describing metadata, license agreements, data market participants, etc. I argue that using Linked Data and semantic technologies for the data itself offers significant benefits regarding more efficient data sharing and use, and improvements to ML and DS processes. I give an overview of the state of semantic data sharing in several industrial domains (Product Classifications and Catalogs, Manufacturing Industry, Electricity, Transport and Logistics, Architecture and Construction; and close with a brief overview of technological enablers required for Semantic Data Spaces.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ontologies vs Linked Data & Knowledge Graphs.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In First International Workshop on Semantic Industrial Information Modelling (SemIIM 2022 at ESWC 2022), May 2022. \n Panel presentation\n\n\n\n
\n\n\n\n \n \n \"OntologiesPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Alexiev-SemIIM2002,\n  author       = {Vladimir Alexiev},\n  title        = {{Ontologies vs Linked Data & Knowledge Graphs}},\n  booktitle    = {{First International Workshop on Semantic Industrial Information Modelling (SemIIM 2022 at ESWC 2022)}},\n  year         = 2022,\n  month        = may,\n  note         = {Panel presentation},\n  url          = {https://docs.google.com/presentation/d/1lKGZ_6MsTE15E6wFBorsVHmyQ3RjqzznpzQ7xpoDhUU/edit},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transparency EKG Requirements Specification, Architecture and Semantic Model.\n \n \n \n \n\n\n \n Alexiev, V.; Ribchev, V.; Chervenski, M.; Tulechki, N.; Radkov, M.; Kunchev, A.; and Nanov, R.\n\n\n \n\n\n\n Technical Report Ontotext Corp, June 2022.\n \n\n\n\n
\n\n\n\n \n \n \"TransparencyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{AlexievEtAl-TEKG-spec,\n  author       = {Vladimir Alexiev and Viktor Ribchev and Miroslav Chervenski and Nikola Tulechki and Mihail Radkov and Antoniy Kunchev and Radostin Nanov},\n  title        = {{Transparency EKG Requirements Specification, Architecture and Semantic Model}},\n  institution  = {Ontotext Corp},\n  year         = 2022,\n  type         = {Specification},\n  month        = jun,\n  url          = {https://transparency.ontotext.com/spec/},\n  keywords     = {energy, electricity, ENTSO-E, market transparency, knowledge graph, specification, semantic architecture, semantic model},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transparency Energy Knowledge Graph Project: Final Results.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Presentation, October 2022.\n \n\n\n\n
\n\n\n\n \n \n \"TransparencyPaper\n  \n \n \n \"Transparency video\n  \n \n \n \"Transparency bdva\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev-TEKG4,\n  author       = {Vladimir Alexiev},\n  title        = {{Transparency Energy Knowledge Graph Project: Final Results}},\n  howpublished = {Presentation},\n  month        = oct,\n  year         = 2022,\n  url          = {https://docs.google.com/presentation/d/1jpgrBr2eXvOShlOtFmoMeF1jjGIrvt5F},\n  url_Video    = {https://www.youtube.com/watch?v=Lm4Q2riM3Ro},\n  keywords     = {energy, electricity, ENTSO-E, power plant databases, electricity market, energy markets, market transparency, knowledge graph, OpenStreetMap, EIC, validation, SHACL, SHACL Advanced, analytics},\n  url_BDVA     = {https://jam4.sapjam.com/blogs/show/XnKajJjHL6qjJt6dUuPXzI},\n  address      = {Presentation at Ontotext Knowledge Graph Forum 2022},\n  abstract     = {The Transparency Energy KG (TEKG) project converted part of the ENTSO-E electricity market transparency data to a semantic KG and complemented it with external data sources (VIES for VAT validation, OpenStreetMap for power plant maps and coordinates, several power plant databases for correspondences/coreferencing). We have implemented a number of advanced validations over fundamental electricity data such as the EIC file (Energy Identification Code), power plant data, and specific market data observations (time series). We also implemented advanced analytics and map views, including integration of OpenStreetMap maps. KGs afford a holistic view over the data that allow us to uncover a number of data problems, presented in a Data Quality Assessment Dashboard. This could help ENTSO-E and national electricity authorities (Transmission System Operators, TSO) diagnose data quality problems and improve data collection procedures and legislation.},\n}\n\n
\n
\n\n\n
\n The Transparency Energy KG (TEKG) project converted part of the ENTSO-E electricity market transparency data to a semantic KG and complemented it with external data sources (VIES for VAT validation, OpenStreetMap for power plant maps and coordinates, several power plant databases for correspondences/coreferencing). We have implemented a number of advanced validations over fundamental electricity data such as the EIC file (Energy Identification Code), power plant data, and specific market data observations (time series). We also implemented advanced analytics and map views, including integration of OpenStreetMap maps. KGs afford a holistic view over the data that allow us to uncover a number of data problems, presented in a Data Quality Assessment Dashboard. This could help ENTSO-E and national electricity authorities (Transmission System Operators, TSO) diagnose data quality problems and improve data collection procedures and legislation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transparency Energy Knowledge Graphs for Energy Traceability.\n \n \n \n \n\n\n \n \n\n\n \n\n\n\n presentation, September 2022.\n \n\n\n\n
\n\n\n\n \n \n \"TransparencyPaper\n  \n \n \n \"Transparency bdva\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
\n
\n\n\n
\n Ontotext's Transparency Energy KG (TEKG) project converted part of ENTSO-E electricity market transparency data to a semantic KG and complemented it with external data sources (VIES for VAT validation, OpenStreetMap for power plant maps and coordinates, several power plant databases for correspondences/coreferencing). There are at least 8 EU regulations that lay out rules for market transparency, in particular in energy markets. But energy is holistic, so going beyond electricity, ACER tracks at least 20 transparency platforms in various stages of certification, of which 16 operate for Electricity and 16 for Natural Gas. ENTSO-E and ENTSO-G are the central transparency platforms, but there are also platforms run by energy exchanges (e.g. EEX) and nonprofits (e.g. GIE). The ENTSO-G Transparency Platform publishes data about the gas market, and GIE has data about current and future gas infrastructure, including gas storages. ACER also tracks 130 other platforms (104 of which active): marketplaces, Registered Reporting Mechanisms, trade matching systems, etc. This is important data that affects all of us as energy consumers, and becomes even more important given the Russian gas crisis. However, the data is fragmented in distributed databases with their own access modes and only partially harmonized information. KGs and semantic data integration afford holistic views over all data across an industry and facilitate data validation and analyzes that were not previously possible. A number of identifiers can be used to coreference these entities: EIC for all kind of energy resources and players (issued in a decentralized way, no central database exists), 13 database-specific ids of power plants, GIE storage id for gas storages, ACER id for market players, MIC for market places, BIC for bank routing, GLEI for legal entities, GS1 GLN for logistics locations, OpenStreetMap for entities on a map, and Wikidata id for an encyclopedic KG, etc. We have worked with many of these datasets, in particular integrating parts in Wikidata for open semantic integration. We present the TEKG project, then some of the mentioned datasets and our ideas how TEKG could be extended to cover the following cases. UC1: Energy transparency basic data: semantically integrated, verified through blockchain and RDF Validation; including master data UC2: Energy data for market players, exchanges, regulators and policy makers: analysis of energy prices, trading practices, energy mix transformation and evolution UC3: Analysis of the Sustainability of EU Gas and Progress Towards Energy Independence from Russia UC4: Energy Tracing for CO2 Footprint and Pollution Impact, for enterprises who have mandates to progress towards zero emissions \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Advanced SHACL Data Validation for the Transparency Energy KG.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, May 2022.\n \n\n\n\n
\n\n\n\n \n \n \"AdvancedPaper\n  \n \n \n \"Advanced video\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev-TEKG2,\n  author       = {Vladimir Alexiev},\n  title        = {{Advanced SHACL Data Validation for the Transparency Energy KG}},\n  howpublished = {presentation},\n  month        = may,\n  year         = 2022,\n  url          = {https://docs.google.com/presentation/d/1Hhxmx2YDnaxlaU5KeafjRJSDlVgHRz1z/edit},\n  url_Video    = {https://youtu.be/4JGSui7Uq_Y},\n  keywords     = {energy, electricity, ENTSO-E, market transparency, knowledge graph, validation, SHACL, SHACL Advanced},\n  address      = {Presentation at Ontotext Demo Days},\n  abstract     = {The Transparency Energy KG (TEKG) project converts the ENTSO-E electricity market transparency data to a semantic KG and complements with external data sources (VIES for VAT validation, OpenStreetMap for power plant maps and coordinates, several power plant databases for correspondences/coreferencing). We have implemented a number of advanced validations over fundamental electricity data such as the EIC file (Energy Identification Code), power plant data, and specific market data observations (time series). KGs afford a holistic view over the data that allow us to uncover a number of data problems, presented in a Data Quality Assessment Dashboard. This could help ENTSO-E and national electricity authorities (Transmission System Operators, TSO) diagnose data quality problems and improve data collection procedures and legislation.},\n}\n\n
\n
\n\n\n
\n The Transparency Energy KG (TEKG) project converts the ENTSO-E electricity market transparency data to a semantic KG and complements with external data sources (VIES for VAT validation, OpenStreetMap for power plant maps and coordinates, several power plant databases for correspondences/coreferencing). We have implemented a number of advanced validations over fundamental electricity data such as the EIC file (Energy Identification Code), power plant data, and specific market data observations (time series). KGs afford a holistic view over the data that allow us to uncover a number of data problems, presented in a Data Quality Assessment Dashboard. This could help ENTSO-E and national electricity authorities (Transmission System Operators, TSO) diagnose data quality problems and improve data collection procedures and legislation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transparency Energy Knowledge Graph.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, January 2022.\n \n\n\n\n
\n\n\n\n \n \n \"TransparencyPaper\n  \n \n \n \"Transparency pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev-TEKG1,\n  author       = {Vladimir Alexiev},\n  title        = {{Transparency Energy Knowledge Graph}},\n  howpublished = {presentation},\n  month        = jan,\n  year         = 2022,\n  url          = {https://docs.google.com/presentation/d/1I0CKJ_y-Lq0eErnOabBBxmfAuOQHYNey/edit},\n  url_PDF      = {http://interrface.eu/sites/default/files/ontotext_TEKG-20210131.pdf},\n  keywords     = {energy, electricity, ENTSO-E, market transparency, knowledge graph},\n  address      = {Presentation at Joint INTERRFACE Open Call Projects meeting},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Cross-disciplinary ontologies for buildings, infrastructure, smart grid, electricity, energy efficiency.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, November 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Cross-disciplinaryPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev-EBDVF2021,\n  author       = {Vladimir Alexiev},\n  title        = {{Cross-disciplinary ontologies for buildings, infrastructure, smart grid, electricity, energy efficiency}},\n  howpublished = {presentation},\n  month        = nov,\n  year         = 2021,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/EBDVF-2021-(V.Alexiev).pptx},\n  keywords     = {Cross-disciplinary, ontologies, buildings, infrastructure, smart grid, electricity, energy efficiency, energy consumption, AECO, architecture, construction, cadaster, smart city, Manufacturing, Transport/Logistics, Product classification, sensor, CIM, CGMES, IFC, LOIN, IDM, ICDD, COINS, MVD, BCF, bSDD, Data Dictionaries, Data Templates, Object Libraries, Bricks Schema, Haystack, Digital Buildings, Real Estate Core, LBD, BOT, BPO, CDC, CTO, DOT, FOG, OMG CityGML, GeoSPARQL, other OGC, ISO 23262, GIS-BIM interop, FSGIM, OpenADR, DABGEO, EnergyUse, OEMA, EEPSA, PROSG, SEAS, SEMANCO, DogOnt, ThinkHome, OPC UA, AutomationML, RAMI, AdminShell GS1 EPCIS, CBV, WebVoc, Digital Links, TDS identifiers (GTIN, GLN, GRAI, GIAI, GDTN…) COBIE, eClass, IEC CDD, GS1 GPC, UNSPSC, SOSA, SSN, WoT TD, DTML, SAREF, SAREF4ENER, SAREF4BLDG, SAREF4water},\n  address      = {Presentation at European Big Dava Value Forum (EBDVF 2021)},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semantization of Machine Learning and Data Science (a Project Idea).\n \n \n \n \n\n\n \n Alexiev, V.; and Boytcheva, S.\n\n\n \n\n\n\n presentation, September 2021.\n \n\n\n\n
\n\n\n\n \n \n \"SemantizationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{AlexievBoytcheva2021-SemantizationML,\n  author       = {Vladimir Alexiev and Svetla Boytcheva},\n  title        = {{Semantization of Machine Learning and Data Science (a Project Idea)}},\n  howpublished = {presentation},\n  month        = sep,\n  year         = 2021,\n  url          = {https://docs.google.com/presentation/d/1_8LSXa9vVzNwPE6Hjj4cKIJNRRBNz2wP/edit},\n  keywords     = {Ontotext, research projects, knowledge graph, KG technologies, Semantization, Machine Learning, Data Science},\n  address      = {Presentation at Big Dava Value Association Activity Group 45 (BDVA AG 45)},\n  abstract     = {Problem: Data Science, AI & ML are expensive, and that's one of the reasons why relatively few enterprises use them.\nGoal: rationalize and industrialize DS efforts, and make them more reproducible and reusable.\nApproach: capture a lot of semantic info about all DS processes in an enterprise, and thus enable automation, discovery, reusability.\n    \nThe kinds of data we'd like to represent and integrate semantically (part of it is similar to what you can see on the Kaggle and OpenML sites): \n- Business context: goals, motivations, data value, value chain, cost vs benefit analysis, SWOT analysis...\n- DS challenges, where do they come from, datasets that can be leveraged to solve them \n- DS staff, expertise, projects, tasks, risks \n- DS/ML algorithms, implementations, modules, dependencies, software projects, versions, issue trackers \n- Cloud and IT resources: compute, storage; their deployment, management, automation...\n- ML model deployment, performance, model drift, retraining… \n\nEstablished software genres that cover parts of this landscape: \n- ModelOps (devOps for ML), Feature Spaces \n- Enterprise data catalogs (data hubs) vs data marketplaces vs open data catalogs vs EU Data Spaces and their metadata \n- FAIR data, reproducible research, Research Objects, research workflows, \n\nWe've researched over 100 relevant ontologies that can be leveraged, covering \n- Organizations/enterprises, business plans, \n- Ontologies, semantic data, \n- DS challenges, datasets, statistical data, quality assessment \n- DS/ML approaches, software, projects, issues, \n- Data on research/science \n- Project management \n\nFocusing on DS/ML approaches only, a couple of the relevant ontologies or standards are: \n- PMML (predictive modeling markup language) \n- e-LICO, DMEX ontologies for describing DS \n- OntoDM, KDO ontologies for describing DS},\n}\n\n
\n
\n\n\n
\n Problem: Data Science, AI & ML are expensive, and that's one of the reasons why relatively few enterprises use them. Goal: rationalize and industrialize DS efforts, and make them more reproducible and reusable. Approach: capture a lot of semantic info about all DS processes in an enterprise, and thus enable automation, discovery, reusability. The kinds of data we'd like to represent and integrate semantically (part of it is similar to what you can see on the Kaggle and OpenML sites): - Business context: goals, motivations, data value, value chain, cost vs benefit analysis, SWOT analysis... - DS challenges, where do they come from, datasets that can be leveraged to solve them - DS staff, expertise, projects, tasks, risks - DS/ML algorithms, implementations, modules, dependencies, software projects, versions, issue trackers - Cloud and IT resources: compute, storage; their deployment, management, automation... - ML model deployment, performance, model drift, retraining… Established software genres that cover parts of this landscape: - ModelOps (devOps for ML), Feature Spaces - Enterprise data catalogs (data hubs) vs data marketplaces vs open data catalogs vs EU Data Spaces and their metadata - FAIR data, reproducible research, Research Objects, research workflows, We've researched over 100 relevant ontologies that can be leveraged, covering - Organizations/enterprises, business plans, - Ontologies, semantic data, - DS challenges, datasets, statistical data, quality assessment - DS/ML approaches, software, projects, issues, - Data on research/science - Project management Focusing on DS/ML approaches only, a couple of the relevant ontologies or standards are: - PMML (predictive modeling markup language) - e-LICO, DMEX ontologies for describing DS - OntoDM, KDO ontologies for describing DS\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Energy Knowledge Graphs to Facilitate Evolution of the European Energy Market.\n \n \n \n \n\n\n \n Ivanov, C.; and Alexiev, V.\n\n\n \n\n\n\n presentation, October 2021.\n \n\n\n\n
\n\n\n\n \n \n \"EnergyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{IvanovAlexiev2021-EnergyKG,\n  author       = {Chavdar Ivanov and Vladimir Alexiev},\n  title        = {{Energy Knowledge Graphs to Facilitate Evolution of the European Energy Market}},\n  howpublished = {presentation},\n  address      = {Presentation at Ontotext Knowledge Graph Forum 2021},\n  month        = oct,\n  year         = 2021,\n  url          = {https://docs.google.com/presentation/d/1vvrUGtutbOzwUK19Z0nEUhZbP6kw-KiFdNELMG4V3v8/edit},\n  keywords     = {knowledge graph, energy knowledge graph, CIM, CGMES, ENTSOE, Single Energy Market, energy market transparency},\n  abstract     = {Presents the EU Single Electricity Market, IEC Common Information Model (CIM), ENTSOE Common Grid Model Exchange Specification (CGMES), how Energy KGs can improve data integration in the energy domain, ENTSOE market transparency data, and Ontotext's Energy Transparency KG project.},\n}\n\n
\n
\n\n\n
\n Presents the EU Single Electricity Market, IEC Common Information Model (CIM), ENTSOE Common Grid Model Exchange Specification (CGMES), how Energy KGs can improve data integration in the energy domain, ENTSOE market transparency data, and Ontotext's Energy Transparency KG project.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Energy Knowledge Graphs.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, July 2021.\n \n\n\n\n
\n\n\n\n \n \n \"EnergyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2021-EnergyKG,\n  author       = {Vladimir Alexiev},\n  title        = {{Energy Knowledge Graphs}},\n  howpublished = {presentation},\n  month        = jul,\n  year         = 2021,\n  url          = {https://docs.google.com/presentation/d/1GcJqTZFRptX5lAGBA2RXThreGxH9LQAZi5qPnJX1tTQ/edit},\n  keywords     = {knowledge graphs, data spaces, European Energy Data Space, CIM, CGMES, ENTSOE, Single Energy Market, energy market transparency, EU Green Deal, industry digitization},\n  address      = {Presentation to IIA/KeyLogic and US DOE NETL and OSTI},\n  abstract     = {Presents the EU Data Spaces initiatives, Single Electricity Market, ENTSOE market transparency data, IEC Common Information Model (CIM), ENTSOE Common Grid Model Exchange Specification (CGMES), and how Energy KGs can improve data integration in the energy domain.},\n}\n\n
\n
\n\n\n
\n Presents the EU Data Spaces initiatives, Single Electricity Market, ENTSOE market transparency data, IEC Common Information Model (CIM), ENTSOE Common Grid Model Exchange Specification (CGMES), and how Energy KGs can improve data integration in the energy domain.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Diverse Uses of a Semantic Graph Database for Knowledge Organization and Research.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In European Data Conference on Reference Data and Semantics (ENDORSE 2021), pages 47, July 2021. European Commission: Directorate-General for Informatics, Publications Office of the European Union, ISA2 Programme\n \n\n\n\n
\n\n\n\n \n \n \"DiversePaper\n  \n \n \n \"Diverse github\n  \n \n \n \"Diverse ppt\n  \n \n \n \"Diverse slides\n  \n \n \n \"Diverse video\n  \n \n \n \"Diverse zotero\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev-ENDORSE-2021,\n  author       = {Vladimir Alexiev},\n  title        = {{Diverse Uses of a Semantic Graph Database for Knowledge Organization and Research}},\n  booktitle    = {{European Data Conference on Reference Data and Semantics (ENDORSE 2021)}},\n  year         = 2021,\n  pages        = 47,\n  month        = jul,\n  organization = {European Commission: Directorate-General for Informatics, Publications Office of the European Union, ISA2 Programme},\n  url          = {https://op.europa.eu/o/opportal-service/download-handler?identifier=41b06a9b-e388-11eb-895a-01aa75ed71a1&format=pdf&language=en&productionSystem=cellar},\n  url_Github   = {https://github.com/VladimirAlexiev/ontotext-graphdb-applications},\n  url_PPT      = {https://github.com/VladimirAlexiev/ontotext-graphdb-applications/raw/master/Diverse%20Uses%20of%20a%20Semantic%20Graph%20Database%20for%20Knowledge%20Organization%20and%20Research%20(ENDORSE%202021).pptx},\n  url_Slides   = {https://op.europa.eu/documents/7525478/8087182/ALEXIEV_presentation_Diverse+Uses+of+a+Semantic+Graph+Database+for+Knowledge+Organization+and+Research.pdf/b27afc2c-3db7-749b-c50c-52b3ded79f3c},\n  url_Video    = {https://www.youtube.com/watch?v=0q63x2P1V0o&list=PLT5rARDev_rmGr_LJkr7zcI-Qul7yOOHO&index=4&t=4780s},\n  url_Zotero   = {https://www.zotero.org/groups/2744757/ontotext-graphdb},\n  keywords     = {bibliography, semantic database, graph database, semantic repository, knowledge graph, Knowledge Organization System, VocBench, PoolParty, Synaptica, Semaphore, EuroVoc, AgroVoc, Getty Vocabularies, social media analytics, data marketplaces, business process management, enterprise data integration, statistical data, engineering, smart cities, sensor networks, life sciences, biomedical ontologies, medicines, chemistry, linguistic data, semantic publishing, semantic text analysis, geographic information, master data management, academic/research data, COVID, Zika virus, Quran, bilingual data, art history, Holocaust research, musical events, musical adaptations, iconography, food and drink, tourism, investment decision support, economic research, offshore leaks, maritime data, construction projects, building information management, crisis management, critical incidents, data journalism, clinical trials, investment recommendations, data journalism,},\n  doi          = {10.2830/44569},\n  isbn         = {978-92-78-42416-9},\n  annote       = {Catalogue number: OA-03-21-303-EN-N},\n  date         = {2021-07-12},\n  abstract     = {Semantic Graph Databases are the foundation of Enterprise Knowledge Graphs. They are used in numerous industrial applications, but also Knowledge Organization Management systems (thesaurus and ontology management systems), such as VocBench, SWC PoolParty, Synaptica Semaphore. Through VocBench, semantic databases manage or publish some of the most important thesauri: EuroVoc, AgroVoc, the Getty Vocabularies, etc. Semantic databases are also used in a wide variety of research domains and projects. Some have open source or free editions that make them an easy choice for academic research. We searched on Google Scholar and found 1000-1200 academic papers and theses mentioning one of the popular databases. We also found at least 50 books on Google Books that mention it. We started a Zotero bibliography on the topic (currently about 150 papers), and captured about 220 research topics, based on the titles of about 250 papers. We will present an analysis of reference data and research domains using a semantic database. Some of the traditional topics include: social media analytics, data marketplaces, business process management, enterprise data integration, statistical data, engineering, smart cities, sensor networks, life sciences, biomedical ontologies, medicines, chemistry, linguistic data, semantic publishing, semantic text analysis, geographic information, master data management. Newer or more exotic topics include academic/research data, COVID and Zika viruses, Quran and bilingual Arabic-English data, art history, Holocaust research, musical events and adaptations, iconography, food and drink, tourism, investment decision support, economic research, offshore leaks, maritime data, construction projects, building information management, crisis management, critical incidents and infrastructures, data journalism, clinical trials and specific medical topics (e.g. intestinal cells, intracoronal tooth restorations, vaccines, toxicology), investment recommendations, data journalism, etc.},\n}\n\n
\n
\n\n\n
\n Semantic Graph Databases are the foundation of Enterprise Knowledge Graphs. They are used in numerous industrial applications, but also Knowledge Organization Management systems (thesaurus and ontology management systems), such as VocBench, SWC PoolParty, Synaptica Semaphore. Through VocBench, semantic databases manage or publish some of the most important thesauri: EuroVoc, AgroVoc, the Getty Vocabularies, etc. Semantic databases are also used in a wide variety of research domains and projects. Some have open source or free editions that make them an easy choice for academic research. We searched on Google Scholar and found 1000-1200 academic papers and theses mentioning one of the popular databases. We also found at least 50 books on Google Books that mention it. We started a Zotero bibliography on the topic (currently about 150 papers), and captured about 220 research topics, based on the titles of about 250 papers. We will present an analysis of reference data and research domains using a semantic database. Some of the traditional topics include: social media analytics, data marketplaces, business process management, enterprise data integration, statistical data, engineering, smart cities, sensor networks, life sciences, biomedical ontologies, medicines, chemistry, linguistic data, semantic publishing, semantic text analysis, geographic information, master data management. Newer or more exotic topics include academic/research data, COVID and Zika viruses, Quran and bilingual Arabic-English data, art history, Holocaust research, musical events and adaptations, iconography, food and drink, tourism, investment decision support, economic research, offshore leaks, maritime data, construction projects, building information management, crisis management, critical incidents and infrastructures, data journalism, clinical trials and specific medical topics (e.g. intestinal cells, intracoronal tooth restorations, vaccines, toxicology), investment recommendations, data journalism, etc.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The euBusinessGraph Ontology: a Lightweight Ontology for Harmonizing Basic Company Information.\n \n \n \n \n\n\n \n Roman, D.; Alexiev, V.; Paniagua, J.; Elvesaeter, B.; von Zernichow, B. M.; Soylu, A.; Simeonov, B.; and Taggart, C.\n\n\n \n\n\n\n Semantic Web - Interoperability, Usability, Applicability (SWJ),41-68. November 2021.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n \n \"The published\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{EBG-2020-SWJ,\n  author       = {Dumitru Roman and Vladimir Alexiev and Javier Paniagua and Brian Elvesaeter and Bjorn Marius von Zernichow and Ahmet Soylu and Boyan Simeonov and Chris Taggart},\n  title        = {{The euBusinessGraph Ontology: a Lightweight Ontology for Harmonizing Basic Company Information}},\n  journal      = {{Semantic Web - Interoperability, Usability, Applicability (SWJ)}},\n  year         = 2021,\n  pages        = {41-68},\n  month        = nov,\n  url          = {https://www.semantic-web-journal.net/content/eubusinessgraph-ontology-lightweight-ontology-harmonizing-basic-company-information-0},\n  url_Published= {https://content.iospress.com/articles/semantic-web/sw210424},\n  keywords     = {Company data, Knowledge Graph, Ontology, Linked data},\n  issue        = 13,\n  publisher    = {IOS Press},\n  doi          = {10.3233/SW-210424},\n  abstract     = {Company data, ranging from basic company information such as company name(s) and incorporation date to complex balance sheets and personal data about directors and shareholders, are the foundation that many data value chains depend upon in various sectors (e.g., business information, marketing and sales, etc.). Company data becomes a valuable asset when data is collected and integrated from a variety of sources, both authoritative (e.g., national business registers) and non-authoritative (e.g., company websites). Company data integration is however a difficult task primarily due to the heterogeneity and complexity of company data, and the lack of generally agreed upon semantic descriptions of the concepts in this domain. In this article, we introduce the euBusinessGraph ontology as a lightweight mechanism for harmonising company data for the purpose of aggregating, linking, provisioning and analysing basic company data. The article provides an overview of the related work, ontology scope, ontology development process, explanations of core concepts and relationships, and the implementation of the ontology. Furthermore, we present scenarios where the ontology was used, among others, for publishing company data (business knowledge graph) and for comparing data from various company data providers. The euBusinessGraph ontology serves as an asset not only for enabling various tasks related to company data but also on which various extensions can be built upon.},\n}\n\n
\n
\n\n\n
\n Company data, ranging from basic company information such as company name(s) and incorporation date to complex balance sheets and personal data about directors and shareholders, are the foundation that many data value chains depend upon in various sectors (e.g., business information, marketing and sales, etc.). Company data becomes a valuable asset when data is collected and integrated from a variety of sources, both authoritative (e.g., national business registers) and non-authoritative (e.g., company websites). Company data integration is however a difficult task primarily due to the heterogeneity and complexity of company data, and the lack of generally agreed upon semantic descriptions of the concepts in this domain. In this article, we introduce the euBusinessGraph ontology as a lightweight mechanism for harmonising company data for the purpose of aggregating, linking, provisioning and analysing basic company data. The article provides an overview of the related work, ontology scope, ontology development process, explanations of core concepts and relationships, and the implementation of the ontology. Furthermore, we present scenarios where the ontology was used, among others, for publishing company data (business knowledge graph) and for comparing data from various company data providers. The euBusinessGraph ontology serves as an asset not only for enabling various tasks related to company data but also on which various extensions can be built upon.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Canadian Heritage Datasets: Linking and Publishing as LOD.\n \n \n \n \n\n\n \n Alexiev, V.; Michon, P.; and Dunn, H.\n\n\n \n\n\n\n In Wébinaire Wiki, data et GLAM 2021, June 2021. Wikimédia France, Etalab\n \n\n\n\n
\n\n\n\n \n \n \"CanadianPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev2021-Wiki-GLAM,\n  author       = {Vladimir Alexiev and Philippe Michon and Heather Dunn},\n  title        = {{Canadian Heritage Datasets: Linking and Publishing as LOD}},\n  booktitle    = {{Wébinaire Wiki, data et GLAM 2021}},\n  year         = 2021,\n  month        = jun,\n  organization = {Wikimédia France, Etalab},\n  url          = {https://docs.google.com/presentation/d/1yr_FVXeTrFpR-lu8C_QT2ciDtPioEtul2lCyFTh82y4/edit},\n  keywords     = {LOD, GLAM, Wikidata, CHIN, Canadian Heritage, Nomenclature, Creators in Canada, Makers in Canada, Artefacts Canada, DOPHEDA, CIDOC CRM, linked.art},\n  abstract     = {Linked Open Data (LOD) projects underway at the Canadian Heritage Information Network (CHIN) have involved aligning and linking vocabularies used in cultural institutions (among them French thesauri, AAT, and Wikidata), and modeling the semantic representation of heritage content by using commonly established ontologies and aligning to modeling efforts at other GLAM institutions. This presentation will describe some of the problems and successes encountered in these projects, as well as thoughts on future directions and the potential of LOD approaches for heritage content.},\n}\n\n
\n
\n\n\n
\n Linked Open Data (LOD) projects underway at the Canadian Heritage Information Network (CHIN) have involved aligning and linking vocabularies used in cultural institutions (among them French thesauri, AAT, and Wikidata), and modeling the semantic representation of heritage content by using commonly established ontologies and aligning to modeling efforts at other GLAM institutions. This presentation will describe some of the problems and successes encountered in these projects, as well as thoughts on future directions and the potential of LOD approaches for heritage content.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Wikidata and Icons: KGs for GLAM.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, January 2020.\n \n\n\n\n
\n\n\n\n \n \n \"WikidataPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2020-Wikidata-Icons,\n  author       = {Vladimir Alexiev},\n  title        = {{Wikidata and Icons: KGs for GLAM}},\n  howpublished = {presentation},\n  month        = jan,\n  year         = 2020,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20200130-Wikidata-Icons/Slides.html},\n  keywords     = {knowledge graphs, semantic integration, GLAM, Wikidata, OpenRefine, Mix-n-Match, icons, religious icons, iconography, iconographic subject, GLAM, CLADA},\n  address      = {CLADA BG Consortium Meeting, Sofia, Bulgaria},\n  abstract     = {This presentation introduces the concepts of knowledge graphs, semantic integration, Wikidata, and Wikidata "tools of the trade" to a GLAM audience. We then show the current state of religious Icons in Wikidata, and show an example of how to edit various entities: iconographer, iconographic school, erminia, icon, measurements, material, technique, iconographic subject (a title of the Virgin Mary), scientific article, academic journal, issue. We finally show a simplified form of the resulting knowledge graph, including the graph of all these entities, plus links to external sources: Getty AAT, Nomenclature for Museum Cataloging, ISBN, ISSN, academia.edu},\n}\n\n
\n
\n\n\n
\n This presentation introduces the concepts of knowledge graphs, semantic integration, Wikidata, and Wikidata \"tools of the trade\" to a GLAM audience. We then show the current state of religious Icons in Wikidata, and show an example of how to edit various entities: iconographer, iconographic school, erminia, icon, measurements, material, technique, iconographic subject (a title of the Virgin Mary), scientific article, academic journal, issue. We finally show a simplified form of the resulting knowledge graph, including the graph of all these entities, plus links to external sources: Getty AAT, Nomenclature for Museum Cataloging, ISBN, ISSN, academia.edu\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bulgarian Icons in Wikidata and EDM.\n \n \n \n \n\n\n \n Alexiev, V.; Tarkalanov, P.; Georgiev, N.; and Pavlova, L.\n\n\n \n\n\n\n In Digital Presentation and Preservation of Cultural and Scientific Heritage (DIPP 2020), volume 10, Burgas, Bulgaria, September 2020. Institute of Mathematics and Informatics (IMI BAS), Sofia\n \n\n\n\n
\n\n\n\n \n \n \"BulgarianPaper\n  \n \n \n \"Bulgarian slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{AlexievEtAl-Bulgarian-Icons-DIPP2020,\n  author       = {Vladimir Alexiev and Plamen Tarkalanov and Nikola Georgiev and Lilia Pavlova},\n  title        = {{Bulgarian Icons in Wikidata and EDM}},\n  booktitle    = {{Digital Presentation and Preservation of Cultural and Scientific Heritage (DIPP 2020)}},\n  year         = 2020,\n  volume       = 10,\n  month        = sep,\n  address      = {Burgas, Bulgaria},\n  publisher    = {Institute of Mathematics and Informatics (IMI BAS), Sofia},\n  url          = {https://dipp.math.bas.bg/images/2020/045-064_1.2_iDiPP2020-24_v.1c.pdf},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20200703-Bulgarian-icons/Slides.html},\n  keywords     = {Knowledge Graphs, Semantic Integration, GLAM, Wikidata, Mix-n-Match, OpenRefine, Icons, Religious Icons, Iconography, Iconographic Subject, GLAM, CLADA, BIDL, Virtual Encyclopedia of Bulgarian Icons},\n  issn         = {1314-4006},\n  eissn        = {2535-0366},\n  abstract     = {We briefly describe Wikidata, its importance for GLAM institutions, iconographic authorities in Mix-n-Match. Then we propose an Icon Knowledge Graph Model comprising the entities: iconographer, iconographic school, herminia, icon, measurements, material, technique, iconographic subject (saint or a title of the Virgin Mary), location (city, monastery, church, museum), scientific article, academic journal, issue, links to LOD datasets (e.g. VIAF, Getty AAT, ISBN, ISSN, academia.edu). Then we introduce the Virtual Encyclopedia of Bulgarian Icons (BIDL) and describe how we exported it to Wikidata, while coreferencing to iconographers, saints, locations; and the extra info we added for such entities. Finally, we describe a conversion to the Europeana Data Model, including details such as links to Wikidata, bilingual descriptions, language tags, providers. The online version of the paper includes live links; the accompanying presentation includes more images and queries.},\n}\n\n
\n
\n\n\n
\n We briefly describe Wikidata, its importance for GLAM institutions, iconographic authorities in Mix-n-Match. Then we propose an Icon Knowledge Graph Model comprising the entities: iconographer, iconographic school, herminia, icon, measurements, material, technique, iconographic subject (saint or a title of the Virgin Mary), location (city, monastery, church, museum), scientific article, academic journal, issue, links to LOD datasets (e.g. VIAF, Getty AAT, ISBN, ISSN, academia.edu). Then we introduce the Virtual Encyclopedia of Bulgarian Icons (BIDL) and describe how we exported it to Wikidata, while coreferencing to iconographers, saints, locations; and the extra info we added for such entities. Finally, we describe a conversion to the Europeana Data Model, including details such as links to Wikidata, bilingual descriptions, language tags, providers. The online version of the paper includes live links; the accompanying presentation includes more images and queries.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ECLASS RDF Representation.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n 2020.\n \n\n\n\n
\n\n\n\n \n \n \"ECLASSPaper\n  \n \n \n \"ECLASS slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2020-ECLASS,\n  author       = {Vladimir Alexiev},\n  title        = {{ECLASS RDF} Representation},\n  year         = 2020,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20201221-ECLASS-RDF/index.html},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20201221-ECLASS-RDF/Slides.html},\n  keywords     = {product data, product catalogs, product classification, product characteristics, ECLASS, RDF representation, AAS},\n  date         = {21-Dec-2020},\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Bird's-Eye View of euBusinessGraph: A Business Knowledge Graph for Company Data.\n \n \n \n \n\n\n \n Roman, D.; Alexiev, V.; Paniagua, J.; Elvesaeter, B.; Zernichow, B. M. V.; Soylu, A.; Simeonov, B.; and Taggart, C.\n\n\n \n\n\n\n In International Semantic Web Conference: Posters, Demos, and Industry Tracks (ISWC 2020), pages 39-44, November 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{EBG-2020-ISWC,\n  author       = {Dumitru Roman and Vladimir Alexiev and Javier Paniagua and Brian Elvesaeter and Bjorn Marius Von Zernichow and Ahmet Soylu and Boyan Simeonov and Chris Taggart},\n  title        = {{A Bird's-Eye View of euBusinessGraph: A Business Knowledge Graph for Company Data}},\n  booktitle    = {{International Semantic Web Conference: Posters, Demos, and Industry Tracks (ISWC 2020)}},\n  year         = 2020,\n  pages        = {39-44},\n  month        = nov,\n  url          = {https://ceur-ws.org/Vol-2721/paper493.pdf},\n  keywords     = {Company data, Knowledge Graph, Ontology, Linked data},\n  abstract     = {Abstract. This poster paper provides an overview of euBusinessGraph– a business knowledge graph for basic company data, together with related artefacts (datasets, ontology, and infrastructure), and its use for creating a prototype for a data marketplace for basic company data. euBusinessGraph was developed by aggregating, linking, and provisioning data from several distributed data sources.},\n}\n\n
\n
\n\n\n
\n Abstract. This poster paper provides an overview of euBusinessGraph– a business knowledge graph for basic company data, together with related artefacts (datasets, ontology, and infrastructure), and its use for creating a prototype for a data marketplace for basic company data. euBusinessGraph was developed by aggregating, linking, and provisioning data from several distributed data sources.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring Industry 4.0 Standards with the Ontotext Platform.\n \n \n \n \n\n\n \n Alexiev, V.; and Chervenski, M.\n\n\n \n\n\n\n In Semantics 2020 webinar, September 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n \n \"Exploring pdf\n  \n \n \n \"Exploring slides\n  \n \n \n \"Exploring video\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev2020-I40KG,\n  author       = {Vladimir Alexiev and Miroslav Chervenski},\n  title        = {{Exploring Industry 4.0 Standards with the Ontotext Platform}},\n  booktitle    = {{Semantics 2020 webinar}},\n  year         = 2020,\n  month        = sep,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20200908-i40kg-semantics/paper.pdf},\n  url_PDF      = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20200908-i40kg-semantics/presentation.pdf},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20200908-i40kg-semantics/presentation.html},\n  url_Video    = {https://2020-eu.semantics.cc/exploring-industry-40-standards-graph-using-graphql-mapped-sparql},\n  keywords     = {industrial data, Industry 4.0, Industry 4.0 Knowledge Graph, I40KG, ISO 15926, RAMI, AAS, Knowledge Graph, I40KG, GraphQL, Ontotext Platform},\n  abstract     = {Exploring Industry 4.0 Standards Graph using GraphQL mapped to SPARQL via semantic business objects. With the coming of Industry 4.0 and the continuing digitization of manufacturing, construction, oil & gas, automotive etc, a large number of relevant standards have been proposed, approved, put through use cases and interoperability exercises, and some of them found massive adoption in the industry. Older established standards have been integrated or "semanticized", i.e. harmonized using semantic integration techniques. ISO 15926, in development for nearly 30 years, has been touted as the "lingua franca" for global interoperability, but its complexity is difficult for most people to master. The standards landscape is large and puzzling; thanks to the so-called Industry 4.0 Knowledge Graph (formerly Standards Ontology) one can get a good overview and categorization of such relevant standards. We demonstrate a simple way to access Industry 4.0 standards using GraphQL through the Ontotext Platform. The latter features GraphQL interfaces to make it easier for application developers to access knowledge graphs without tedious development of back-end APIs or complex SPARQL. The underlying Semantic Object service implements an efficient GraphQL to SPARQL translation optimized for GraphDB, as well as a generic configurable security model.},\n}\n\n
\n
\n\n\n
\n Exploring Industry 4.0 Standards Graph using GraphQL mapped to SPARQL via semantic business objects. With the coming of Industry 4.0 and the continuing digitization of manufacturing, construction, oil & gas, automotive etc, a large number of relevant standards have been proposed, approved, put through use cases and interoperability exercises, and some of them found massive adoption in the industry. Older established standards have been integrated or \"semanticized\", i.e. harmonized using semantic integration techniques. ISO 15926, in development for nearly 30 years, has been touted as the \"lingua franca\" for global interoperability, but its complexity is difficult for most people to master. The standards landscape is large and puzzling; thanks to the so-called Industry 4.0 Knowledge Graph (formerly Standards Ontology) one can get a good overview and categorization of such relevant standards. We demonstrate a simple way to access Industry 4.0 standards using GraphQL through the Ontotext Platform. The latter features GraphQL interfaces to make it easier for application developers to access knowledge graphs without tedious development of back-end APIs or complex SPARQL. The underlying Semantic Object service implements an efficient GraphQL to SPARQL translation optimized for GraphDB, as well as a generic configurable security model.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Names and Networks. Holocaust Victim Communities.\n \n \n \n \n\n\n \n Eickhoff, M.; de Leeuw, D.; Nikolova, I.; Tagarev, A.; and Alexiev, V.\n\n\n \n\n\n\n Technical Report EHRI, 2019.\n \n\n\n\n
\n\n\n\n \n \n \"NamesPaper\n  \n \n \n \"Names tr\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@TechReport{EHRI-names-2019,\n  author       = {Eickhoff, Martijn and de Leeuw, Daan and Nikolova, Ivelina and Tagarev, Andrey and Alexiev, Vladimir},\n  title        = {{Names and Networks. Holocaust Victim Communities}},\n  institution  = {EHRI},\n  year         = 2019,\n  type         = {Internal document WP14: Report on research use case},\n  url          = {https://pure.knaw.nl/portal/en/publications/names-and-networks-holocaust-victim-communities(cf76f212-80f9-4b31-b693-b98451feb665).html},\n  url_TR       = {https://hdl.handle.net/20.500.11755/cf76f212-80f9-4b31-b693-b98451feb665},\n  language     = {eng},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semantic Integration Is What You Do Before The Deep Learning.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, May 2019.\n \n\n\n\n
\n\n\n\n \n \n \"SemanticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2019-devbg,\n  author       = {Vladimir Alexiev},\n  title        = {{Semantic Integration Is What You Do Before The Deep Learning}},\n  howpublished = {presentation},\n  month        = may,\n  year         = 2019,\n  url          = {https://dev.bg/събитие/machine-learning-semantic-integration-is-what-you-do-before-the-deep-learning/},\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20190513-Semantics-and-ML-dev.bg.pptx},\n  keywords     = {knowledge graph, machine learning, deep learning},\n  address      = {dev.bg Machine Learning interest group, Sofia, Bulgaria},\n  abstract     = {It's well known that 80\\% of the effort of a data scientist is spent on data preparation. Semantic integration is arguably the best way to spend this effort more efficiently and to reuse it between tasks, projects and organizations. Knowledge Graphs (KG) and Linked Open Data (LOD) have become very popular recently. They are used by Google, Amazon, Bing, Samsung, Springer Nature, Microsoft Academic, AirBnb… and any large enterprise that would like to have a holistic (360 degree) view of its business. The Semantic Web (web 3.0) is a way to build a Giant Global Graph, just like the normal web is a Global Web of Documents. IEEE already talks about Big Data Semantics. We review the topic of KGs and their applicability to Machine Learning.},\n}\n\n
\n
\n\n\n
\n It's well known that 80% of the effort of a data scientist is spent on data preparation. Semantic integration is arguably the best way to spend this effort more efficiently and to reuse it between tasks, projects and organizations. Knowledge Graphs (KG) and Linked Open Data (LOD) have become very popular recently. They are used by Google, Amazon, Bing, Samsung, Springer Nature, Microsoft Academic, AirBnb… and any large enterprise that would like to have a holistic (360 degree) view of its business. The Semantic Web (web 3.0) is a way to build a Giant Global Graph, just like the normal web is a Global Web of Documents. IEEE already talks about Big Data Semantics. We review the topic of KGs and their applicability to Machine Learning.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semantic Archive Integration for Holocaust Research: the EHRI Research Infrastructure.\n \n \n \n \n\n\n \n Alexiev, V.; Nikolova, I.; and Hateva, N.\n\n\n \n\n\n\n Umanistica Digitale. March 2019.\n \n\n\n\n
\n\n\n\n \n \n \"SemanticPaper\n  \n \n \n \"Semantic pdf\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{AlexievNikolova2019-EHRI-UmanisticaDigitale,\n  author       = {Vladimir Alexiev and Ivelina Nikolova and Neli Hateva},\n  title        = {{Semantic Archive Integration for Holocaust Research: the EHRI Research Infrastructure}},\n  journal      = {{Umanistica Digitale}},\n  year         = 2019,\n  month        = mar,\n  url          = {https://umanisticadigitale.unibo.it/article/view/9049},\n  url_PDF      = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/AlexievNikolova2018-Semantic-Archive-Integration.pdf},\n  keywords     = {archives, Holocaust research, EHRI, research infrastructure, digital humanities, VRE, semantic integration, semantic archive integration, coreferencing, access points, thesauri, authorities, EAD, OAI PMH, ResourceSync, Geonames, Wikidata, VIAF, person matching, record linking, deduplication},\n  issue        = 4,\n  doi          = {10.6092/issn.2532-8816/9049},\n  publisher    = {Associazione per l'Informatica Umanistica e la Cultural Digitale, Universita di Bologna (AIUCD)},\n  editor       = {Laura Brazzo and Kepa J. Rodriguez},\n  categories   = {Arts and humanities, Digital libraries and archives, Information retrieval, Web searching and information discovery, Document searching, Document metadata, Semantic web description languages, Ontologies, Thesauri},\n  abstract     = {The European Holocaust Research Infrastructure (EHRI) is a large-scale EU project that involves 23 institutions and archives working on Holocaust studies, from Europe, Israel and the US. In its first phase (2011-2015) it aggregated archival descriptions and materials on a large scale and built a Virtual Research Environment (portal) for Holocaust researchers based on a graph database. In its second phase (2015-2019), EHRI2 seeks to enhance the gathered materials using semantic approaches: enrichment, coreferencing, interlinking. Semantic integration involves four of the 14 EHRI2 work packages and helps integrate databases, free text, and metadata to interconnect historical entities (people, organizations, places, historic events) and create networks. We will present some of the EHRI2 technical work, including critical issues we have encountered. WP10 (EAD) converts archival descriptions from various formats to standard EAD XML; transports EADs using OAI PMH or ResourceSync; ingests EADs to the EHRI database; enables use cases such as synchronization; coreferencing of textual Access Points to proper thesaurus references. WP11 (Authorities and Standards) consolidates and enlarges the EHRI authorities to render the indexing and retrieval of information more effective. It addresses Access Points in ingested EADs (normalization of Unicode, spelling, punctuation; deduplication; clustering; coreferencing to authority control), Subjects (deployment of a Thesaurus Management System in support of the EHRI Thesaurus Editorial Board), Places (coreferencing to Geonames); Camps and Ghettos (integrating data with Wikidata); Persons, Corporate Bodies (using USHMM HSV and VIAF); semantic (conceptual) search including hierarchical query expansion; interconnectivity of archival descriptions; permanent URLs; metadata quality; EAD RelaxNG and Schematron schemas and validation, etc. WP13 (Data Infrastructures) builds up domain knowledge bases from institutional databases by using deduplication, semantic data integration, semantic text analysis. It provides the foundation for research use cases on Jewish Social Networks and their impact on the chance of survival. WP14 (Digital Historiography Research) works on semantic text analysis (semantic enrichment), text similarity (e.g. clustering based on Neural Networks, LDA, etc), geo-mapping. It develops Digital Historiography researcher tools, including Prosopographical approaches.},\n}\n\n
\n
\n\n\n
\n The European Holocaust Research Infrastructure (EHRI) is a large-scale EU project that involves 23 institutions and archives working on Holocaust studies, from Europe, Israel and the US. In its first phase (2011-2015) it aggregated archival descriptions and materials on a large scale and built a Virtual Research Environment (portal) for Holocaust researchers based on a graph database. In its second phase (2015-2019), EHRI2 seeks to enhance the gathered materials using semantic approaches: enrichment, coreferencing, interlinking. Semantic integration involves four of the 14 EHRI2 work packages and helps integrate databases, free text, and metadata to interconnect historical entities (people, organizations, places, historic events) and create networks. We will present some of the EHRI2 technical work, including critical issues we have encountered. WP10 (EAD) converts archival descriptions from various formats to standard EAD XML; transports EADs using OAI PMH or ResourceSync; ingests EADs to the EHRI database; enables use cases such as synchronization; coreferencing of textual Access Points to proper thesaurus references. WP11 (Authorities and Standards) consolidates and enlarges the EHRI authorities to render the indexing and retrieval of information more effective. It addresses Access Points in ingested EADs (normalization of Unicode, spelling, punctuation; deduplication; clustering; coreferencing to authority control), Subjects (deployment of a Thesaurus Management System in support of the EHRI Thesaurus Editorial Board), Places (coreferencing to Geonames); Camps and Ghettos (integrating data with Wikidata); Persons, Corporate Bodies (using USHMM HSV and VIAF); semantic (conceptual) search including hierarchical query expansion; interconnectivity of archival descriptions; permanent URLs; metadata quality; EAD RelaxNG and Schematron schemas and validation, etc. WP13 (Data Infrastructures) builds up domain knowledge bases from institutional databases by using deduplication, semantic data integration, semantic text analysis. It provides the foundation for research use cases on Jewish Social Networks and their impact on the chance of survival. WP14 (Digital Historiography Research) works on semantic text analysis (semantic enrichment), text similarity (e.g. clustering based on Neural Networks, LDA, etc), geo-mapping. It develops Digital Historiography researcher tools, including Prosopographical approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Linked Open Data: Ontologies, Datasets, Projects.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In 1st International Conference on Museum Big Data, Doha, Qatar, May 2019. \n \n\n\n\n
\n\n\n\n \n \n \"LinkedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev2019-Qatar,\n  author       = {Vladimir Alexiev},\n  title        = {{Linked Open Data: Ontologies, Datasets, Projects}},\n  booktitle    = {{1st International Conference on Museum Big Data}},\n  year         = 2019,\n  month        = may,\n  address      = {Doha, Qatar},\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20190501-Museum Linked Open Data- Ontologies, Datasets, Projects.pdf},\n  keywords     = {American Art Collaborative, Canadian heritage, Conservation Space, Getty, LOD, LODLAM, ResearchSpace, SPARQL, Wikidata, semantic technologies, museum data},\n  howpublished = {Invited keynote},\n  abstract     = {The Galleries, Libraries, Archives and Museums (GLAM) sector deals with complex and varied data. Integrating that data, especially across institutions, has always been a challenge. Semantic data integration is the best approach to deal with such challenges. Linked Open Data (LOD) enable large-scale Digital Humanities (DH) research, collaboration and aggregation, allowing DH researchers to make connections between (and make sense of) the multitude of digitized Cultural Heritage (CH) available on the web. An upsurge of interest in semtech and LOD has swept the CH and DH communities. An active Linked Open Data for Libraries, Archives and Museums (LODLAM) community exists, CH data is published as LOD, and international collaborations have emerged. The value of LOD is especially high in the GLAM sector, since culture by its very nature is cross-border and interlinked. We present interesting LODLAM projects, datasets, and ontologies, as well as Ontotext's experience in this domain. An extended paper on these topics is also available. It has 77 pages, 67 figures, detailed info about CH content and XML standards, Wikidata and global authority control.},\n}\n\n
\n
\n\n\n
\n The Galleries, Libraries, Archives and Museums (GLAM) sector deals with complex and varied data. Integrating that data, especially across institutions, has always been a challenge. Semantic data integration is the best approach to deal with such challenges. Linked Open Data (LOD) enable large-scale Digital Humanities (DH) research, collaboration and aggregation, allowing DH researchers to make connections between (and make sense of) the multitude of digitized Cultural Heritage (CH) available on the web. An upsurge of interest in semtech and LOD has swept the CH and DH communities. An active Linked Open Data for Libraries, Archives and Museums (LODLAM) community exists, CH data is published as LOD, and international collaborations have emerged. The value of LOD is especially high in the GLAM sector, since culture by its very nature is cross-border and interlinked. We present interesting LODLAM projects, datasets, and ontologies, as well as Ontotext's experience in this domain. An extended paper on these topics is also available. It has 77 pages, 67 figures, detailed info about CH content and XML standards, Wikidata and global authority control.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (9)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n BigDataGrapes D3.1 - Data Modelling and Linking Components.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report BigDataGrapes (H2020 project 780751), October 2018.\n \n\n\n\n
\n\n\n\n \n \n \"BigDataGrapesPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@TechReport{Alexiev2018-BigDataGrapes-D3.1-M9,\n  author       = {Vladimir Alexiev},\n  title        = {{BigDataGrapes D3.1 - Data Modelling and Linking Components}},\n  institution  = {BigDataGrapes (H2020 project 780751)},\n  year         = 2018,\n  type         = {Deliverable},\n  month        = oct,\n  url          = {https://doi.org/10.5281/zenodo.1482757},\n  doi          = {10.5281/zenodo.1482757},\n  abstract     = {WP3 Data & Semantics Layer is a core WP of the project. If we have no data, we cannot achieve almost any of the project objectives. Within this WP3, task T3.1 Data Modelling over Big Data Infrastructures has these objectives: • Explores partner data • Defines competence questions that the data should be able to answer • Studies relevant AgroBio ontologies • defines semantic modelling principles and specific models • Studies user (researcher) requirements for discovering ontologies, mapping data, aligning data, etc. • Implements or adopts tools for these requirements The document has the following structure: • Chapter 1 Introduction describes fundamental AgroBio data (observations and measurements), outlines the ontological representation of measurements, mentions possible alternatives (e.g. following existing AgroBio patterns vs using the W3C CUBE ontology), describes the steps of semantic data integration, and provides links to consortium resources related to the task. • Chapter 2 Relevant AgroBio Ontologies outlines the vast number of potentially relevant ontologies and the terms included in them. We provide some metrics (number of terms) and surveys various Ontology Portals and Tools that are available for browsing, finding and using ontologies; and that can also serve as inspiration for developing requirements for tools to be developed/adopted by the project. • Chapter 3 Improving AgroBio Ontologies describes a variety of problems that we have found in AgroBio ontologies, and the initial steps we have taken to engage with the AgroBio communities to improve the quality of these ontologies. We also show a case of searching for a specific term (NDVI) required by specific partner data in a couple of ontology portals. • Chapter 4 Specific Project Data discusses specific consortium data (including problems of draft semantic data that will be corrected), data processing requirements and data access requirements. • Chapter 5 Conclusions provides conclusions, next steps and a bibliography. Deliverable D3.1 Data Modelling and Linking Components will have 3 iterations at M9, M21, M30. In this first iteration (M9), we describe the first steps taken for the realization of task T3.1. These initial steps were taken to clarify the scope and essential ingredients of the task. Since the project is early in its life cycle, we do not yet have finalized requirements for the tools to be developed by Task 3.1. Section 1.4 outlines the steps that we intend to follow, and the approximate point that we have reached within these steps.},\n}\n\n
\n
\n\n\n
\n WP3 Data & Semantics Layer is a core WP of the project. If we have no data, we cannot achieve almost any of the project objectives. Within this WP3, task T3.1 Data Modelling over Big Data Infrastructures has these objectives: • Explores partner data • Defines competence questions that the data should be able to answer • Studies relevant AgroBio ontologies • defines semantic modelling principles and specific models • Studies user (researcher) requirements for discovering ontologies, mapping data, aligning data, etc. • Implements or adopts tools for these requirements The document has the following structure: • Chapter 1 Introduction describes fundamental AgroBio data (observations and measurements), outlines the ontological representation of measurements, mentions possible alternatives (e.g. following existing AgroBio patterns vs using the W3C CUBE ontology), describes the steps of semantic data integration, and provides links to consortium resources related to the task. • Chapter 2 Relevant AgroBio Ontologies outlines the vast number of potentially relevant ontologies and the terms included in them. We provide some metrics (number of terms) and surveys various Ontology Portals and Tools that are available for browsing, finding and using ontologies; and that can also serve as inspiration for developing requirements for tools to be developed/adopted by the project. • Chapter 3 Improving AgroBio Ontologies describes a variety of problems that we have found in AgroBio ontologies, and the initial steps we have taken to engage with the AgroBio communities to improve the quality of these ontologies. We also show a case of searching for a specific term (NDVI) required by specific partner data in a couple of ontology portals. • Chapter 4 Specific Project Data discusses specific consortium data (including problems of draft semantic data that will be corrected), data processing requirements and data access requirements. • Chapter 5 Conclusions provides conclusions, next steps and a bibliography. Deliverable D3.1 Data Modelling and Linking Components will have 3 iterations at M9, M21, M30. In this first iteration (M9), we describe the first steps taken for the realization of task T3.1. These initial steps were taken to clarify the scope and essential ingredients of the task. Since the project is early in its life cycle, we do not yet have finalized requirements for the tools to be developed by Task 3.1. Section 1.4 outlines the steps that we intend to follow, and the approximate point that we have reached within these steps.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Linked Open Data and Ontotext Projects.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, November 2018.\n \n\n\n\n
\n\n\n\n \n \n \"LinkedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2018-CLADA-BG,\n  author       = {Vladimir Alexiev},\n  title        = {{Linked Open Data and Ontotext Projects}},\n  howpublished = {presentation},\n  month        = nov,\n  year         = 2018,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20181109-CLADA-BG- Ontotext and LOD (68 slides).ppt},\n  keywords     = {LOD, LOD Cloud, semantic technologies, museum data, LODLAM, CIDOC CRM, Wikidata, Ontotext, CLARIN, DARIAH, CLADA},\n  address      = {CLADA-BG Kickoff meeting, BAS IICT, Sofia, Bulgaria},\n  pages        = 68,\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n euBusinessGraph Semantic Data Model.\n \n \n \n \n\n\n \n Alexiev, V.; Tarasova, T.; Paniagua, J.; Taggart, C.; Elvesaeter, B.; Seehusen, F.; Roman, D.; and Norheim, D.\n\n\n \n\n\n\n euBusinessGraph Consortium, June 2018.\n \n\n\n\n
\n\n\n\n \n \n \"euBusinessGraphPaper\n  \n \n \n \"euBusinessGraph source\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Manual{Alexiev2018-EBG-model,\n  title        = {{euBusinessGraph Semantic Data Model}},\n  author       = {Vladimir Alexiev and Tatiana Tarasova and Javier Paniagua and Chris Taggart and Brian Elvesaeter and Fredrik Seehusen and Dumitru Roman and David Norheim},\n  organization = {euBusinessGraph Consortium},\n  month        = jun,\n  year         = 2018,\n  url          = {https://docs.google.com/document/d/1dhMOTlIOC6dOK_jksJRX0CB-GIRoiYY6fWtCnZArUhU/edit},\n  url_Source   = {https://github.com/euBusinessGraph/eubg-data/tree/master/model},\n  abstract     = {This document describes the semantic model used by euBusinessGraph (EBG) to represent companies, their attributes, addresses, directors and CEOs, datasets by the different providers, provenance. It includes an informative description of classes and properties, gives examples and data provider rules, provides schema and instance diagrams. It also provides RDF bindings, i.e. classes and properties to be used for representation of company data. The github project euBusinessGraph/eubg-data/model provides a generated ontology and RDF shapes intended to validate submitted data (these are not yet complete).},\n  keywords     = {euBusinessGraph, firmographics, company data, organisation data, trade registers, POL data, linked data, business graph, economics, W3C Org, W3C RegOrg, Schema.org},\n}\n\n
\n
\n\n\n
\n This document describes the semantic model used by euBusinessGraph (EBG) to represent companies, their attributes, addresses, directors and CEOs, datasets by the different providers, provenance. It includes an informative description of classes and properties, gives examples and data provider rules, provides schema and instance diagrams. It also provides RDF bindings, i.e. classes and properties to be used for representation of company data. The github project euBusinessGraph/eubg-data/model provides a generated ontology and RDF shapes intended to validate submitted data (these are not yet complete).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n euBusinessGraph Ontology.\n \n \n \n \n\n\n \n Alexiev, V.; and Paniagua, J.\n\n\n \n\n\n\n euBusinessGraph Consortium, October 2018.\n \n\n\n\n
\n\n\n\n \n \n \"euBusinessGraph source\n  \n \n \n \"euBusinessGraphPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Manual{Alexiev2018-EBG-ontology,\n  title        = {{euBusinessGraph Ontology}},\n  author       = {Vladimir Alexiev and Javier Paniagua},\n  organization = {euBusinessGraph Consortium},\n  month        = oct,\n  year         = 2018,\n  url_Source   = {https://github.com/euBusinessGraph/eubg-data/raw/master/model/ebg-ontology.ttl},\n  url          = {https://rawgit2.com/euBusinessGraph/eubg-data/master/ontology/_old/index.html},\n  abstract     = {The euBusinessGraph (ebg:) ontology represents companies, type/status/economic classification, addresses, identifiers, (and soon) directors and CEOs, dataset offerings.\nIt uses schema:domain/rangeIncludes (which are polymorphic) to describe which properties are applicable to a class,\nrather than rdfs:domain/rainge (which are monomorphic) to prescribe what classes must be applied to each node using a property.\nWe find that this enables more flexible reuse and combination of different ontologies.\nWe reuse the following ontologies and nomenclatures, and extend them where appropriate with classes and properties:\n- W3C Org, W3C RegOrg (basic company data),\n- W3C Time (officer membership),\n- W3C Locn (addresses),\n- schema.org (domain/rangeIncludes and various properties)\n- DBpedia ontology (jurisdiction)\n- NGEO and Spatial (NUTS administrative divisions)\n- ADMS (identifiers),\n- FOAF, SIOC (blog posts),\n- RAMON, SKOS (NACE economic classifications and various nomenclatures),\n- VOID (dataset descriptions).\nThis is only a reference. See more detail in the EBG Semantic Model document, which includes an informative description of classes and properties, gives examples and data provider rules, and provides more schema and instance diagrams.},\n  keywords     = {euBusinessGraph, firmographics, company data, organisation data, trade registers, POL data, linked data, business graph, economics, W3C Org, W3C RegOrg, Schema.org, domainIncludes, rangeIncludes},\n}\n\n
\n
\n\n\n
\n The euBusinessGraph (ebg:) ontology represents companies, type/status/economic classification, addresses, identifiers, (and soon) directors and CEOs, dataset offerings. It uses schema:domain/rangeIncludes (which are polymorphic) to describe which properties are applicable to a class, rather than rdfs:domain/rainge (which are monomorphic) to prescribe what classes must be applied to each node using a property. We find that this enables more flexible reuse and combination of different ontologies. We reuse the following ontologies and nomenclatures, and extend them where appropriate with classes and properties: - W3C Org, W3C RegOrg (basic company data), - W3C Time (officer membership), - W3C Locn (addresses), - schema.org (domain/rangeIncludes and various properties) - DBpedia ontology (jurisdiction) - NGEO and Spatial (NUTS administrative divisions) - ADMS (identifiers), - FOAF, SIOC (blog posts), - RAMON, SKOS (NACE economic classifications and various nomenclatures), - VOID (dataset descriptions). This is only a reference. See more detail in the EBG Semantic Model document, which includes an informative description of classes and properties, gives examples and data provider rules, and provides more schema and instance diagrams.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Live OWL Documentation Environment (LODE) Extended with Schema and SKOS Properties.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report Ontotext, May 2018.\n \n\n\n\n
\n\n\n\n \n \n \"LivePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev2018-LODE,\n  author       = {Vladimir Alexiev},\n  title        = {{Live OWL Documentation Environment (LODE) Extended with Schema and SKOS Properties}},\n  institution  = {Ontotext},\n  year         = 2018,\n  type         = {software},\n  month        = may,\n  url          = {https://github.com/VladimirAlexiev/LODE},\n  keywords     = {LODE, ontology, ontology documentation, documentation generator, Schema.org, domainIncludes, rangeIncludes},\n  abstract     = {I've extended LODE to handle schema:domain/rangeIncludes in addition to rdfs:domain/range, some SKOS properties (example, scopeNote) and a number of typographic enahncements (eg emit multi-valued fields such as examples as a list). schema:domain/rangeIncludes (which are polymorphic) are used to describe which properties are applicable to a class, rather than rdfs:domain/range (which are monomorphic) that prescribe what classes must be applied to each node using a property. I find that this enables more flexible reuse and combination of different ontologies.},\n}\n\n
\n
\n\n\n
\n I've extended LODE to handle schema:domain/rangeIncludes in addition to rdfs:domain/range, some SKOS properties (example, scopeNote) and a number of typographic enahncements (eg emit multi-valued fields such as examples as a list). schema:domain/rangeIncludes (which are polymorphic) are used to describe which properties are applicable to a class, rather than rdfs:domain/range (which are monomorphic) that prescribe what classes must be applied to each node using a property. I find that this enables more flexible reuse and combination of different ontologies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Museum Linked Open Data: Ontologies, Datasets, Projects (invited report).\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In Digital Presentation and Preservation of Cultural and Scientific Heritage (DIPP 2018), volume 8, pages 19-50, Burgas, Bulgaria, September 2018. Institute of Mathematics and Informatics (IMI BAS), Sofia\n \n\n\n\n
\n\n\n\n \n \n \"MuseumPaper\n  \n \n \n \"Museum slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev2018-MuseumLOD-DIPP2018,\n  author       = {Vladimir Alexiev},\n  title        = {{Museum Linked Open Data: Ontologies, Datasets, Projects (invited report)}},\n  booktitle    = {{Digital Presentation and Preservation of Cultural and Scientific Heritage (DIPP 2018)}},\n  year         = 2018,\n  volume       = 8,\n  pages        = {19-50},\n  month        = sep,\n  address      = {Burgas, Bulgaria},\n  publisher    = {Institute of Mathematics and Informatics (IMI BAS), Sofia},\n  url          = {https://dipp.math.bas.bg/images/2018/019-050_32_11-iDiPP2018-34.pdf},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Museum Linked Open Data (DIPP 2018, 21 slides).ppt},\n  keywords     = {semantic technologies, museum data, LODLAM, CIDOC CRM},\n  abstract     = {The Galleries, Libraries, Archives and Museums (GLAM) sector deals with complex and varied data. Integrating that data, especially across institutions, has always been a challenge. Semantic data integration is the best approach to deal with such challenges. Linked Open Data (LOD) enable large-scale Digital Humanities (DH) research, collaboration and aggregation, allowing DH researchers to make connections between (and make sense of) the multitude of digitized Cultural Heritage (CH) available on the web. An upsurge of interest in semtech and LOD has swept the CH and DH communities. An active Linked Open Data for Libraries, Archives and Museums (LODLAM) community exists, CH data is published as LOD, and international collaborations have emerged. The value of LOD is especially high in the GLAM sector, since culture by its very nature is cross-border and interlinked. We present interesting LODLAM projects, datasets, and ontologies, as well as Ontotext's experience in this domain. An extended version of this paper is available. It has 77 pages, 67 figures, detailed info about CH content and XML standards, Wikidata and global authority control.},\n  issn         = {1314-4006},\n  eissn        = {2535-0366},\n}\n\n
\n
\n\n\n
\n The Galleries, Libraries, Archives and Museums (GLAM) sector deals with complex and varied data. Integrating that data, especially across institutions, has always been a challenge. Semantic data integration is the best approach to deal with such challenges. Linked Open Data (LOD) enable large-scale Digital Humanities (DH) research, collaboration and aggregation, allowing DH researchers to make connections between (and make sense of) the multitude of digitized Cultural Heritage (CH) available on the web. An upsurge of interest in semtech and LOD has swept the CH and DH communities. An active Linked Open Data for Libraries, Archives and Museums (LODLAM) community exists, CH data is published as LOD, and international collaborations have emerged. The value of LOD is especially high in the GLAM sector, since culture by its very nature is cross-border and interlinked. We present interesting LODLAM projects, datasets, and ontologies, as well as Ontotext's experience in this domain. An extended version of this paper is available. It has 77 pages, 67 figures, detailed info about CH content and XML standards, Wikidata and global authority control.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Museum Linked Open Data: Ontologies, Datasets, Projects (extended version).\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report Ontotext, September 2018.\n \n\n\n\n
\n\n\n\n \n \n \"MuseumPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev2018-MuseumLOD-extended,\n  author       = {Vladimir Alexiev},\n  title        = {{Museum Linked Open Data: Ontologies, Datasets, Projects (extended version)}},\n  institution  = {Ontotext},\n  year         = 2018,\n  month        = sep,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Museum Linked Open Data (77 pages).pdf},\n  keywords     = {semantic technologies, museum data, LODLAM, CIDOC CRM, Wikidata, LIDO, CCO, CDWA, CONA},\n  pages        = 77,\n  abstract     = {The Galleries, Libraries, Archives and Museums (GLAM) sector deals with complex and varied data. Integrating that data, especially across institutions, has always been a challenge. Semantic data integration is the best approach to deal with such challenges. Linked Open Data (LOD) enable large-scale Digital Humanities (DH) research, collaboration and aggregation, allowing DH researchers to make connections between (and make sense of) the multitude of digitized Cultural Heritage (CH) available on the web. An upsurge of interest in semtech and LOD has swept the CH and DH communities. An active Linked Open Data for Libraries, Archives and Museums (LODLAM) community exists, CH data is published as LOD, and international collaborations have emerged. The value of LOD is especially high in the GLAM sector, since culture by its very nature is cross-border and interlinked. We present interesting LODLAM projects, datasets, and ontologies, as well as Ontotext's experience in this domain. This is an extended version of an invited report at the DIPP 2018 conference that covers CH content and XML standards, Wikidata and global authority control.},\n}\n\n
\n
\n\n\n
\n The Galleries, Libraries, Archives and Museums (GLAM) sector deals with complex and varied data. Integrating that data, especially across institutions, has always been a challenge. Semantic data integration is the best approach to deal with such challenges. Linked Open Data (LOD) enable large-scale Digital Humanities (DH) research, collaboration and aggregation, allowing DH researchers to make connections between (and make sense of) the multitude of digitized Cultural Heritage (CH) available on the web. An upsurge of interest in semtech and LOD has swept the CH and DH communities. An active Linked Open Data for Libraries, Archives and Museums (LODLAM) community exists, CH data is published as LOD, and international collaborations have emerged. The value of LOD is especially high in the GLAM sector, since culture by its very nature is cross-border and interlinked. We present interesting LODLAM projects, datasets, and ontologies, as well as Ontotext's experience in this domain. This is an extended version of an invited report at the DIPP 2018 conference that covers CH content and XML standards, Wikidata and global authority control.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A multimodal analytics platform for journalists analysing large-scale, heterogeneous multilingual and multimedia content.\n \n \n \n \n\n\n \n Vrochidis, S.; Moumtzidou, A.; Gialampoukidis, I.; Liparas, D.; Casamayor, G.; Wanner, L.; Heise, N.; Wagner, T.; Bilous, A.; Jamin, E.; Simeonov, B.; Alexiev, V.; Busch, R.; Arapakis, I.; and Kompatsiaris, I. Y.\n\n\n \n\n\n\n Frontiers in Robotics and AI, 5. October 2018.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{Vrochidis2018-Multimodal,\n  author       = {Stefanos Vrochidis and Anastasia Moumtzidou and Ilias Gialampoukidis and Dimitris Liparas and Gerard Casamayor and Leo Wanner and Nicolaus Heise and Tilman Wagner and Andriy Bilous and Emmanuel Jamin and Boyan Simeonov and Vladimir Alexiev and Reihard Busch and Ioannis Arapakis and Ioannis Yiannis Kompatsiaris},\n  title        = {{A multimodal analytics platform for journalists analysing large-scale, heterogeneous multilingual and multimedia content}},\n  journal      = {{Frontiers in Robotics and AI}},\n  year         = 2018,\n  volume       = 5,\n  month        = oct,\n  url          = {https://doi.org/10.3389/frobt.2018.00123},\n  keywords     = {multimodal, analytics plaforms, multinlingual, multimedia, analyst, journalist, web crawling, semantic indexing, summarization},\n  topic        = {Machine Learning at Scale: How Big Data and AI are Transforming Engineering},\n  doi          = {10.3389/frobt.2018.00123},\n  issn         = {2296-9144},\n  eissn        = {2296-9144},\n  abstract     = {Analysts and journalists face the problem of having to deal with very large, heterogeneous, and multilingual data volumes that need to be analyzed, understood, and aggregated. Automated and simplified editorial and authoring process could significantly reduce time, labor, and costs. Therefore, there is a need for unified access to multilingual and multicultural news story material, beyond the level of a nation, ensuring context-aware, spatiotemporal, and semantic interpretation, correlating also and summarizing the interpreted material into a coherent gist. In this paper, we present a platform integrating multimodal analytics techniques, which are able to support journalists in handling large streams of real-time and diverse information. Specifically, the platform automatically crawls and indexes multilingual and multimedia information from heterogeneous resources. Textual information is automatically summarized and can be translated (on demand) into the language of the journalist. High-level information is extracted from both textual and multimedia content for fast inspection using concept clouds. The textual and multimedia content is semantically integrated and indexed using a common representation, to be accessible through a web-based search engine. The evaluation of the proposed platform was performed by several groups of journalists revealing satisfaction from the user side.},\n}\n\n
\n
\n\n\n
\n Analysts and journalists face the problem of having to deal with very large, heterogeneous, and multilingual data volumes that need to be analyzed, understood, and aggregated. Automated and simplified editorial and authoring process could significantly reduce time, labor, and costs. Therefore, there is a need for unified access to multilingual and multicultural news story material, beyond the level of a nation, ensuring context-aware, spatiotemporal, and semantic interpretation, correlating also and summarizing the interpreted material into a coherent gist. In this paper, we present a platform integrating multimodal analytics techniques, which are able to support journalists in handling large streams of real-time and diverse information. Specifically, the platform automatically crawls and indexes multilingual and multimedia information from heterogeneous resources. Textual information is automatically summarized and can be translated (on demand) into the language of the journalist. High-level information is extracted from both textual and multimedia content for fast inspection using concept clouds. The textual and multimedia content is semantically integrated and indexed using a common representation, to be accessible through a web-based search engine. The evaluation of the proposed platform was performed by several groups of journalists revealing satisfaction from the user side.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Digital Methods in Holocaust Studies: The European Holocaust Research Infrastructure.\n \n \n \n \n\n\n \n de Leeuw, D.; Bryant, M.; Frankl, M.; Nikolova, I.; and Alexiev, V.\n\n\n \n\n\n\n In 14th IEEE International Conference on eScience, Amsterdam, The Netherlands, October 2018. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"DigitalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{deLeeuw2018-EHRI,\n  author       = {Daan de Leeuw and Mike Bryant and Michal Frankl and Ivelina Nikolova and Vladimir Alexiev},\n  title        = {{Digital Methods in Holocaust Studies: The European Holocaust Research Infrastructure}},\n  booktitle    = {{14th IEEE International Conference on eScience}},\n  year         = 2018,\n  month        = oct,\n  address      = {Amsterdam, The Netherlands},\n  publisher    = {IEEE},\n  url          = {https://ieeexplore.ieee.org/document/8588640},\n  keywords     = {archives, Holocaust research, EHRI, research infrastructure, digital humanities, VRE, semantic integration, semantic archive integration, coreferencing, access points, thesauri, authorities, EADperson matching, record linking, deduplication},\n  abstract     = {Digital methods and tools for the humanities also change historical research into the Holocaust. The European funded Holocaust project EHRI has developed various digital tools and methods that facilitate Holocaust research. This paper will describe a number of them and discuss how they affect scholarship into the annihilation of European Jews.},\n  eisbn        = {978-1-5386-9156-4},\n  doi          = {10.1109/eScience.2018.00021},\n  isbn         = {978-1-5386-9157-1},\n}\n\n
\n
\n\n\n
\n Digital methods and tools for the humanities also change historical research into the Holocaust. The European funded Holocaust project EHRI has developed various digital tools and methods that facilitate Holocaust research. This paper will describe a number of them and discuss how they affect scholarship into the annihilation of European Jews.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (10)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n How Not to Do Open Publications LOD.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Github gist, August 2017.\n \n\n\n\n
\n\n\n\n \n \n \"HowPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2016-How-not-to-do-Open-Publications,\n  author       = {Vladimir Alexiev},\n  title        = {{How Not to Do Open Publications LOD}},\n  howpublished = {Github gist},\n  month        = aug,\n  year         = 2017,\n  url          = {https://gist.github.com/VladimirAlexiev/90753af3a1148b7fd9bb194b2b0d7cbd},\n  keywords     = {OpenAIRE, open access, open publications, open data, linked reasearch, CERIF},\n  abstract     = {A review of the OpenAIRE beta ontology https://lod.openaire.eu/vocab. An EC Research Infrastructure gone awry.},\n}\n\n
\n
\n\n\n
\n A review of the OpenAIRE beta ontology https://lod.openaire.eu/vocab. An EC Research Infrastructure gone awry.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How to use Google Sheets to Manage Wikidata Coreferencing.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Github gist, March 2017.\n \n\n\n\n
\n\n\n\n \n \n \"HowPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2017-GSheet-Wikidata,\n  author       = {Vladimir Alexiev},\n  title        = {{How to use Google Sheets to Manage Wikidata Coreferencing}},\n  howpublished = {Github gist},\n  month        = mar,\n  year         = 2017,\n  url          = {https://gist.github.com/VladimirAlexiev/8201d614a819cb7d4023ce9aa315af65},\n  keywords     = {Google Sheets, Wikidata, GLAM, cultural heritage, Getty Vocabularies, GVP, AAT},\n  abstract     = {A previous post explained how to use SPARQL to find missing data on Wikidata (Getty Museum IDs), how to create such values (from museum webpage URLs) and how to format them properly for QuickStatements. Here I explain how to use Google sheets to manage a more advanced task. The sheet AAT-Wikidata matches about 3k AAT concepts to Wikipedia, WordNet30 and BabelNet (it restored an old mapping to Wordnet, retrieved it from BabelNet, mapped to Wikipedia). For each row, it uses a formula to query the Wikipedia API and get the corresponding Wikidata ID (wikibase_item). This formula asks for results in XML format, and uses a XPath that fetches the WD ID from the resulting XML. Making 3k API calls is slow, so Google sheet initially shows "Loading…" for all rows, and gradually "materializes" the WD IDs (Qnnnn) as they come in. I have periodically sorted the column and used "Edit> Paste special> Values only" for the "materialized" IDs in order to fix them and not cause re-fetching next time when I open the google sheet. Columns C,D,E are specially formatted to produce the required QuickStatements tab-delimited format. The benefit of Google sheets is that they allow easy addition of columns and convenient facilities for manual tasks: Collaborative editing by several people at once. Column A for tracking which rows are checked, which are already inserted to WD, etc. Using filters to find rows of interest. E.g. check=1 means rows that are manually checked and ready for insertion to QuickStatements. After insertion, I change it to check=2 to mark it as already inserted. Column B for tracking already existing WD IDs. Conditional formatting to colour existing WD IDs (column B) that differ from my idea what is the matching WD ID (column C) and therefore must be checked.},\n}\n\n
\n
\n\n\n
\n A previous post explained how to use SPARQL to find missing data on Wikidata (Getty Museum IDs), how to create such values (from museum webpage URLs) and how to format them properly for QuickStatements. Here I explain how to use Google sheets to manage a more advanced task. The sheet AAT-Wikidata matches about 3k AAT concepts to Wikipedia, WordNet30 and BabelNet (it restored an old mapping to Wordnet, retrieved it from BabelNet, mapped to Wikipedia). For each row, it uses a formula to query the Wikipedia API and get the corresponding Wikidata ID (wikibase_item). This formula asks for results in XML format, and uses a XPath that fetches the WD ID from the resulting XML. Making 3k API calls is slow, so Google sheet initially shows \"Loading…\" for all rows, and gradually \"materializes\" the WD IDs (Qnnnn) as they come in. I have periodically sorted the column and used \"Edit> Paste special> Values only\" for the \"materialized\" IDs in order to fix them and not cause re-fetching next time when I open the google sheet. Columns C,D,E are specially formatted to produce the required QuickStatements tab-delimited format. The benefit of Google sheets is that they allow easy addition of columns and convenient facilities for manual tasks: Collaborative editing by several people at once. Column A for tracking which rows are checked, which are already inserted to WD, etc. Using filters to find rows of interest. E.g. check=1 means rows that are manually checked and ready for insertion to QuickStatements. After insertion, I change it to check=2 to mark it as already inserted. Column B for tracking already existing WD IDs. Conditional formatting to colour existing WD IDs (column B) that differ from my idea what is the matching WD ID (column C) and therefore must be checked.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Practical Semantic Modeling, SPARQL, RDF Shapes, IoT/WoT/UoM.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Ontotext Training, 80 Slides, October 2017.\n \n\n\n\n
\n\n\n\n \n \n \"PracticalPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2017-PracticalSemanticModeling,\n  author       = {Vladimir Alexiev},\n  title        = {{Practical Semantic Modeling, SPARQL, RDF Shapes, IoT/WoT/UoM}},\n  howpublished = {Ontotext Training, 80 Slides},\n  month        = oct,\n  year         = 2017,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20171025-Practical Semantic Modeling, SPARQL, RDF Shapes, IoT-WoT-UoM (201710).pptx},\n  keywords     = {RDF, SPARQL, RDF Shapes, rdfpuml, semantic modeling, ontology engineering, IoT, WoT, UoM},\n  abstract     = {RDF Formats. Semantic resolution and content negotiation. Prefixes, URL design (Namespace Carving). RDF Terms, Turtle, SPARQL. Semantic Data Modeling. Modeling vs Ontology Engineering. RDFS vs schema.org. Ontology design patterns. RDF Shapes. Org, RegOrg, Person, Locn Ontologies. euBusinessGraph Data Model. rdfpuml diagramming tool. SPARQL. Getty Sample Queries (https://vocab.getty.org/queries). euBusinessGraph sample queries (https://businessgraph.ontotext.com/sparql, Bulgarian Trade Register). Ontologies for Internet of Things, Web of Things, Units of Measure (IoT, WoT, UoM). Created 25 Oct 2017, Updated 16 Apr 2018},\n}\n\n
\n
\n\n\n
\n RDF Formats. Semantic resolution and content negotiation. Prefixes, URL design (Namespace Carving). RDF Terms, Turtle, SPARQL. Semantic Data Modeling. Modeling vs Ontology Engineering. RDFS vs schema.org. Ontology design patterns. RDF Shapes. Org, RegOrg, Person, Locn Ontologies. euBusinessGraph Data Model. rdfpuml diagramming tool. SPARQL. Getty Sample Queries (https://vocab.getty.org/queries). euBusinessGraph sample queries (https://businessgraph.ontotext.com/sparql, Bulgarian Trade Register). Ontologies for Internet of Things, Web of Things, Units of Measure (IoT, WoT, UoM). Created 25 Oct 2017, Updated 16 Apr 2018\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How to Count ULAN and SNAC in Wikidata.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n blog, September 2017.\n \n\n\n\n
\n\n\n\n \n \n \"HowPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2017-ULAN-SNAC-Wikidata,\n  author       = {Vladimir Alexiev},\n  title        = {{How to Count ULAN and SNAC in Wikidata}},\n  howpublished = {blog},\n  month        = sep,\n  year         = 2017,\n  url          = {https://docs.google.com/document/d/1F4FGJqJ2lWponSnzxQpJR6Nq87Ecv3qXJ4ExcmWgIvU/edit#},\n  keywords     = {Wikidata, Coreferencing, Getty Vocabularies, ULAN, SNAC},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data Visualization with GraphDB and Workbench.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report Ontotext Corp, June 2017.\n \n\n\n\n
\n\n\n\n \n \n \"DataPaper\n  \n \n \n \"Data slides\n  \n \n \n \"Data source\n  \n \n \n \"Data video\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev2017-Visualization,\n  author       = {Vladimir Alexiev},\n  title        = {{Data Visualization with GraphDB and Workbench}},\n  institution  = {Ontotext Corp},\n  year         = 2017,\n  month        = jun,\n  url          = {https://docs.google.com/document/d/e/2PACX-1vTSQPHE0aUO3U1pgtQxk5RrSjsKZFM9Fx425VOdSPURLOGA_5zsGZzmMs9B75krzXh2c9DnHc34WRR8/pub},\n  url_Slides   = {https://docs.google.com/presentation/d/1Udah3b8nc1oxjpi8XHtGX4nxIF6tlyrvo29QJkFKfgo/edit},\n  url_Source   = {https://docs.google.com/document/d/1guwFHi9p4-ujFkrHF6dwMUZndzCmlX_gPyiBi6JlPTs/edit},\n  url_Video    = {https://ontotext.com/knowledgehub/webinars/building-knowledge-data-visualization/},\n  keywords     = {visualization, GraphDB, RDF, SPARQL, CNL, CUBE, OLAP, charts, graphs, NLP, seq2seq},\n  abstract     = {Building Knowledge through Data Visualization. Data visualization enables analysts and organizations to see huge quantities of data clearly and identify patterns quickly. However, data volume, velocity and variety have increased immensely in recent years and so has the need to see the links between data from many sources. By creating visualizations with graph databases, organizations get insights from all perspectives they wish and need to explore. Even a quick glance at the relationship structure reveals where unusually large clusters of nodes or edges are. More traditional charts and statistical visualizations are also very useful to see the structure of data. Expressing relations, graphs and data trends in a visual way turns data into knowledge. The accompanying Webinar is designed to answer a common request from our community - how to make data visualisations from RDF datasets. Are there tools to help with developing queries? How can people who are not conversant with SPARQL get insights into data and understand its structure? How can they run SPARQL queries developed by others? We describe SPARQL editing and data visualization features available in GraphDB Workbench (GDB WB), or such that can be added with little programming. We will also describe SPARQL writing aids and visualization tools that can be integrated with GraphDB. Detailed topics: Writing SPARQL, Built-in SPARQL Result Visualizations, Using SPARQL Results in Spreadsheets, Invoking SPARQL Queries and Query Parameterization, Tools that Help With Writing SPARQL Queries, Translating natural language to SPARQL, Tools for Statistical Visualizations, Graph Visualizations: Built-in to GDB WB, Developing, Visualization Toolkits, Declarative Visualization, RDF by Example, JDBC Data Access API. Last updated: Sep 2022 (HTML report)},\n}\n \n
\n
\n\n\n
\n Building Knowledge through Data Visualization. Data visualization enables analysts and organizations to see huge quantities of data clearly and identify patterns quickly. However, data volume, velocity and variety have increased immensely in recent years and so has the need to see the links between data from many sources. By creating visualizations with graph databases, organizations get insights from all perspectives they wish and need to explore. Even a quick glance at the relationship structure reveals where unusually large clusters of nodes or edges are. More traditional charts and statistical visualizations are also very useful to see the structure of data. Expressing relations, graphs and data trends in a visual way turns data into knowledge. The accompanying Webinar is designed to answer a common request from our community - how to make data visualisations from RDF datasets. Are there tools to help with developing queries? How can people who are not conversant with SPARQL get insights into data and understand its structure? How can they run SPARQL queries developed by others? We describe SPARQL editing and data visualization features available in GraphDB Workbench (GDB WB), or such that can be added with little programming. We will also describe SPARQL writing aids and visualization tools that can be integrated with GraphDB. Detailed topics: Writing SPARQL, Built-in SPARQL Result Visualizations, Using SPARQL Results in Spreadsheets, Invoking SPARQL Queries and Query Parameterization, Tools that Help With Writing SPARQL Queries, Translating natural language to SPARQL, Tools for Statistical Visualizations, Graph Visualizations: Built-in to GDB WB, Developing, Visualization Toolkits, Declarative Visualization, RDF by Example, JDBC Data Access API. Last updated: Sep 2022 (HTML report)\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How to Add Museum IDs to Wikidata.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Github gist, January 2017.\n \n\n\n\n
\n\n\n\n \n \n \"HowPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2017-Wikidata-Museum,\n  author       = {Vladimir Alexiev},\n  title        = {{How to Add Museum IDs to Wikidata}},\n  howpublished = {Github gist},\n  month        = jan,\n  year         = 2017,\n  url          = {https://gist.github.com/VladimirAlexiev/e51e256be18870ac5033901197ee8277},\n  keywords     = {Wikidata, SPARQL, GLAM, cultural heritage, Getty Museum, JPGM},\n  abstract     = {I use Wikidata SPARQL to find J. Paul Getty Museum objects without museum Object ID. Most of these objects have a property "described at URL" that includes the object ID I seek. I use SPARQL to transform the results to Wikidata Quick Statements format, then insert the IDs directly to Wikidata.},\n}\n\n
\n
\n\n\n
\n I use Wikidata SPARQL to find J. Paul Getty Museum objects without museum Object ID. Most of these objects have a property \"described at URL\" that includes the object ID I seek. I use SPARQL to transform the results to Wikidata Quick Statements format, then insert the IDs directly to Wikidata.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Organization Datasets and Ontologies.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, January 2017.\n \n\n\n\n
\n\n\n\n \n \n \"OrganizationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2017-euBusinessGraph-organization-datasets-and-ontologies,\n  author       = {Vladimir Alexiev},\n  title        = {{Organization Datasets and Ontologies}},\n  howpublished = {presentation},\n  month        = jan,\n  year         = 2017,\n  url          = {https://docs.google.com/presentation/d/1s-mQwj_0cpbLFVmhZPUrzB_M5mISkZQug0Wiui6H218/pub},\n  keywords     = {euBusinessGraph, organisation data, POL data, Dun and Bradstreet, DnB, Financial Industry Business Ontology, FIBO), Global Legal Entity Identifier, GLEI, Panama Papers, Linked Leaks, Bulgarian Trade Register, BG TR, Wikidata},\n  address      = {euBusinessGraph Project Kickoff, Oslo, Norway},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n euBusinessGraph: Company and Economic Data for Innovative Products and Services.\n \n \n \n \n\n\n \n Alexiev, V.; Kiryakov, A.; and Tarkalanov, P.\n\n\n \n\n\n\n In 13th International Conference on Semantic Systems (Semantics 2017), September 2017. \n \n\n\n\n
\n\n\n\n \n \n \"euBusinessGraph:Paper\n  \n \n \n \"euBusinessGraph: pdf\n  \n \n \n \"euBusinessGraph: slides\n  \n \n \n \"euBusinessGraph: poster\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{AlexievKiryakovTarkalanov2017-euBusinessGraph-Semantics2017,\n  author       = {Vladimir Alexiev and Atanas Kiryakov and Plamen Tarkalanov},\n  title        = {{euBusinessGraph: Company and Economic Data for Innovative Products and Services}},\n  booktitle    = {{13th International Conference on Semantic Systems (Semantics 2017)}},\n  year         = 2017,\n  month        = sep,\n  url          = {https://rawgit2.com/webdata/SEMANTiCS2017-posters/master/papers_final/163_Alexiev/index.html},\n  url_PDF      = {https://github.com/webdata/SEMANTiCS2017-posters/raw/master/papers_final/163_Alexiev/163_Alexiev.pdf},\n  url_Slides   = {https://www.slideshare.net/valexiev1/eubusinessgraph-company-and-economic-data},\n  url_Poster   = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/euBusinessGraph-poster-Semantics2017.pdf},\n  keywords     = {euBusinessGraph, firmographics, company data, public procurement, linked data, business graph, economics},\n  categories   = {Resource Description Framework (RDF), Ontologies, Enterprise applications},\n  subtitle     = {{Enabling the European Business Graph for Innovative Data Products and Services}},\n  abstract     = {Corporate information, including basic company firmographics (e.g., name(s), incorporation data, registered addresses, ownership and related entities), financials (e.g., balance sheets, ratings), contextual data (e.g., addresses, economic activity classification, key officers, public tenders data, press mentions and events) are the foundation that many data value chains are built on. Furthermore, this type of information contributes to the transparency and accountability of enterprises, is instrumental input to the process of marketing and sales, and plays a key role in many business interactions. Collecting and aggregating data about a business entity from several public sources (be it private/public, official or non-official ones), and especially across country borders and languages is a tedious, time consuming, error prone, and expensive operation which renders many potential business models non-feasible. The euBusinessGraph project integrates European company and economic data from various data providers, including OpenCorporates (the largest open database of company info crawled from official registers), Norway's Bronnoysund Register Center (official register data), SpazioDati (rich IT data from official registers, additional databases, web crawl of company sites, tender info, etc), EventRegistry events, GLEI, Panama Leaks, etc. euBusinessGraph is intended to overcome these barriers and provision several important business cases, such as economic journalism (Deutsche Welle), publication of rich company data (BRC), tender information service (CERVED), business intelligence (EVRY), etc. It will also provide a marketplace of company data, with some free search and faceting, leading to information about richer Data Offerings by specific providers and their pricing. We will present the work done on exploring relevant ontologies and vocabularies for describing companies, systems of identifiers, development of a unified data model, plans for data flows, data aggregation, matching and cross-linking, and the opportunities that lie ahead for the business cases and the data marketplace.},\n}\n
\n
\n\n\n
\n Corporate information, including basic company firmographics (e.g., name(s), incorporation data, registered addresses, ownership and related entities), financials (e.g., balance sheets, ratings), contextual data (e.g., addresses, economic activity classification, key officers, public tenders data, press mentions and events) are the foundation that many data value chains are built on. Furthermore, this type of information contributes to the transparency and accountability of enterprises, is instrumental input to the process of marketing and sales, and plays a key role in many business interactions. Collecting and aggregating data about a business entity from several public sources (be it private/public, official or non-official ones), and especially across country borders and languages is a tedious, time consuming, error prone, and expensive operation which renders many potential business models non-feasible. The euBusinessGraph project integrates European company and economic data from various data providers, including OpenCorporates (the largest open database of company info crawled from official registers), Norway's Bronnoysund Register Center (official register data), SpazioDati (rich IT data from official registers, additional databases, web crawl of company sites, tender info, etc), EventRegistry events, GLEI, Panama Leaks, etc. euBusinessGraph is intended to overcome these barriers and provision several important business cases, such as economic journalism (Deutsche Welle), publication of rich company data (BRC), tender information service (CERVED), business intelligence (EVRY), etc. It will also provide a marketplace of company data, with some free search and faceting, leading to information about richer Data Offerings by specific providers and their pricing. We will present the work done on exploring relevant ontologies and vocabularies for describing companies, systems of identifiers, development of a unified data model, plans for data flows, data aggregation, matching and cross-linking, and the opportunities that lie ahead for the business cases and the data marketplace.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semantic Archive Integration for Holocaust Research: the EHRI Research Infrastructure.\n \n \n \n \n\n\n \n Alexiev, V.; and Nikolova, I.\n\n\n \n\n\n\n In Brazzo, L.; Alexiev, V.; Rodrigues, K.; and Mazzini, S., editor(s), Data Sharing, Holocaust Documentation and the Digital Humanities. Best Practices, Benefits, Case Studies (DSDH 2017), Venice, Italy, June 2017. \n \n\n\n\n
\n\n\n\n \n \n \"SemanticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{AlexievNikolova2017-EHRI-DSDH17,\n  author       = {Vladimir Alexiev and Ivelina Nikolova},\n  title        = {{Semantic Archive Integration for Holocaust Research: the EHRI Research Infrastructure}},\n  booktitle    = {{Data Sharing, Holocaust Documentation and the Digital Humanities. Best Practices, Benefits, Case Studies (DSDH 2017)}},\n  year         = 2017,\n  editor       = {Laura Brazzo and Vladimir Alexiev and Kepa Rodrigues and Silvia Mazzini},\n  month        = jun,\n  address      = {Venice, Italy},\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Semantic Archive Integration for Holocaust Research- the EHRI Research Infrastructure (201707).pdf},\n  keywords     = {archives, Holocaust research, EHRI, research infrastructure, digital humanities, VRE, semantic integration, semantic archive integration, coreferencing, access points, thesauri, authorities, EAD, OAI PMH, ResourceSync, Geonames, Wikidata, VIAF, person matching, record linking, deduplication},\n  categories   = {Arts and humanities, Digital libraries and archives, Information retrieval, Web searching and information discovery, Document searching, Document metadata, Semantic web description languages, Ontologies, Thesauri},\n  abstract     = {The European Holocaust Research Infrastructure (EHRI) is a large-scale EU project that involves 23 institutions and archives working on Holocaust studies, from Europe, Israel and the US. In its first phase (2011-2015) it aggregated archival descriptions and materials on a large scale and built a Virtual Research Environment (portal) for Holocaust researchers based on a graph database. In its second phase (2015-2019), EHRI2 seeks to enhance the gathered materials using semantic approaches: enrichment, coreferencing, interlinking. Semantic integration involves four of the 14 EHRI2 work packages and helps integrate databases, free text, and metadata to interconnect historical entities (people, organizations, places, historic events) and create networks. We will present some of the EHRI2 technical work, including critical issues we have encountered. WP10 (EAD) converts archival descriptions from various formats to standard EAD XML; transports EADs using OAI PMH or ResourceSync; ingests EADs to the EHRI database; enables use cases such as synchronization; coreferencing of textual Access Points to proper thesaurus references. WP11 (Authorities and Standards) consolidates and enlarges the EHRI authorities to render the indexing and retrieval of information more effective. It addresses Access Points in ingested EADs (normalization of Unicode, spelling, punctuation; deduplication; clustering; coreferencing to authority control), Subjects (deployment of a Thesaurus Management System in support of the EHRI Thesaurus Editorial Board), Places (coreferencing to Geonames); Camps and Ghettos (integrating data with Wikidata); Persons, Corporate Bodies (using USHMM HSV and VIAF); semantic (conceptual) search including hierarchical query expansion; interconnectivity of archival descriptions; permanent URLs; metadata quality; EAD RelaxNG and Schematron schemas and validation, etc. WP13 (Data Infrastructures) builds up domain knowledge bases from institutional databases by using deduplication, semantic data integration, semantic text analysis. It provides the foundation for research use cases on Jewish Social Networks and their impact on the chance of survival. WP14 (Digital Historiography Research) works on semantic text analysis (semantic enrichment), text similarity (e.g. clustering based on Neural Networks, LDA, etc), geo-mapping. It develops Digital Historiography researcher tools, including Prosopographical approaches.},\n}\n\n
\n
\n\n\n
\n The European Holocaust Research Infrastructure (EHRI) is a large-scale EU project that involves 23 institutions and archives working on Holocaust studies, from Europe, Israel and the US. In its first phase (2011-2015) it aggregated archival descriptions and materials on a large scale and built a Virtual Research Environment (portal) for Holocaust researchers based on a graph database. In its second phase (2015-2019), EHRI2 seeks to enhance the gathered materials using semantic approaches: enrichment, coreferencing, interlinking. Semantic integration involves four of the 14 EHRI2 work packages and helps integrate databases, free text, and metadata to interconnect historical entities (people, organizations, places, historic events) and create networks. We will present some of the EHRI2 technical work, including critical issues we have encountered. WP10 (EAD) converts archival descriptions from various formats to standard EAD XML; transports EADs using OAI PMH or ResourceSync; ingests EADs to the EHRI database; enables use cases such as synchronization; coreferencing of textual Access Points to proper thesaurus references. WP11 (Authorities and Standards) consolidates and enlarges the EHRI authorities to render the indexing and retrieval of information more effective. It addresses Access Points in ingested EADs (normalization of Unicode, spelling, punctuation; deduplication; clustering; coreferencing to authority control), Subjects (deployment of a Thesaurus Management System in support of the EHRI Thesaurus Editorial Board), Places (coreferencing to Geonames); Camps and Ghettos (integrating data with Wikidata); Persons, Corporate Bodies (using USHMM HSV and VIAF); semantic (conceptual) search including hierarchical query expansion; interconnectivity of archival descriptions; permanent URLs; metadata quality; EAD RelaxNG and Schematron schemas and validation, etc. WP13 (Data Infrastructures) builds up domain knowledge bases from institutional databases by using deduplication, semantic data integration, semantic text analysis. It provides the foundation for research use cases on Jewish Social Networks and their impact on the chance of survival. WP14 (Digital Historiography Research) works on semantic text analysis (semantic enrichment), text similarity (e.g. clustering based on Neural Networks, LDA, etc), geo-mapping. It develops Digital Historiography researcher tools, including Prosopographical approaches.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Domain-specific modeling: a Food and Drink Gazetteer.\n \n \n \n \n\n\n \n Tagarev, A.; Tolosi, L.; and Alexiev, V.\n\n\n \n\n\n\n In Nguyen, N. T.; Kowalczyk, R.; Pinto, A. M.; and Cardoso, J., editor(s), Transactions on Computational Collective Intelligence XXVI, special issue on Keyword Search in Big Data, volume 10190, of LNCS, pages 186-209, July 2017. Springer\n \n\n\n\n
\n\n\n\n \n \n \"Domain-specificPaper\n  \n \n \n \"Domain-specific preprint\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{TagarevTolosiAlexiev2017-FD-extended,\n  author       = {Andrey Tagarev and Laura Tolosi and Vladimir Alexiev},\n  title        = {{Domain-specific modeling: a Food and Drink Gazetteer}},\n  booktitle    = {{Transactions on Computational Collective Intelligence XXVI, special issue on Keyword Search in Big Data}},\n  year         = 2017,\n  editor       = {Ngoc Thanh Nguyen and Ryszard Kowalczyk and Alexandre Miguel Pinto and Jorge Cardoso},\n  volume       = 10190,\n  series       = {LNCS},\n  pages        = {186-209},\n  month        = jul,\n  publisher    = {Springer},\n  url          = {https://link.springer.com/chapter/10.1007/978-3-319-59268-8_9},\n  url_Preprint = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Tagarev2017-DomainSpecificGazetteer.pdf},\n  keywords     = {classification, categorization, Wikipedia, DBpedia, gazetteer, Europeana, Cultural Heritage, concept extraction, semantic enrichment, food and drink},\n  doi          = {10.1007/978-3-319-59268-8_9},\n  abstract     = {Our goal is to build a Food and Drink (FD) gazetteer that can serve for classification of general, FD-related concepts, efficient faceted search or automated semantic enrichment. Fully supervised design of domain-specific models ex novo is not scalable. Integration of several ready knowledge bases is tedious and does not ensure coverage. Completely data-driven approaches require a large amount of training data, which is not always available. For general domains (such as the FD domain), re-using encyclopedic knowledge bases like Wikipedia may be a good idea. We propose here a semi-supervised approach that uses a restricted Wikipedia as a base for the modeling, achieved by selecting a domain-relevant Wikipedia category as root for the model and all its subcategories, combined with expert and data-driven pruning of irrelevant categories.},\n}\n\n
\n
\n\n\n
\n Our goal is to build a Food and Drink (FD) gazetteer that can serve for classification of general, FD-related concepts, efficient faceted search or automated semantic enrichment. Fully supervised design of domain-specific models ex novo is not scalable. Integration of several ready knowledge bases is tedious and does not ensure coverage. Completely data-driven approaches require a large amount of training data, which is not always available. For general domains (such as the FD domain), re-using encyclopedic knowledge bases like Wikipedia may be a good idea. We propose here a semi-supervised approach that uses a restricted Wikipedia as a base for the modeling, achieved by selecting a domain-relevant Wikipedia category as root for the model and all its subcategories, combined with expert and data-driven pruning of irrelevant categories.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (17)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Linked Open Data for Cultural Heritage.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Ontotext webinar, 132 slides, September 2016.\n \n\n\n\n
\n\n\n\n \n \n \"LinkedPaper\n  \n \n \n \"Linked pdf\n  \n \n \n \"Linked slides\n  \n \n \n \"Linked recording\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2016-CH-webinar,\n  author       = {Vladimir Alexiev},\n  title        = {{Linked Open Data for Cultural Heritage}},\n  howpublished = {Ontotext webinar, 132 slides},\n  month        = sep,\n  year         = 2016,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20160929-webinar/index-full.html},\n  url_PDF      = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20160929-webinar/Linked%20Open%20Data%20for%20Cultural%20Heritage.pdf},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20160929-webinar/index.html},\n  keywords     = {AAT, Annotation, BTG, BTI, BTP, British Museum, Broader generic, Broader instantial, Broader partitive, CIDOC CRM, cultural heritage, EDM, ESE, Europeana, FRAD, FRBR, FRBR, FRBRoo, FRBRoo, FRSAD, Fundamental Concepts, Fundamental Relations, GLAM, Geonames, Getty, Getty Museum, ISNI, ISO 25964, LDBC, LOD, Metadata, Museum informatics, OAI, OAI PMH, OWLIM, Ontology, Ontotext GraphDB, Provenance, RDAinfo, RDF, ResearchSpace, SKOS, SKOS-XL, SPARQL, SPECTRUM, Schema, Seeing Standards, TGN, Taxonomy, Thesauri, ULAN, VIAF, Web Annotation, Wikidata, concept extraction, cultural heritage, dataset, endpoint, faceted search, food and drink, gazetteer, inference, knowledge base, knowledge-based system, multimedia annotation, ontology, open data, practical applications, reasoning, semantic application, semantic enrichment, semantic integration, semantic mapping, semantic repository, semantic representation, semantic search, semantic technology, text analysis, thesauri, virtual research environment, visualization, vocabularies},\n  abstract     = {The Internet, global digitization efforts, Europe's Digital Agenda, continuing investments in Europeana, the Digital Public Library of America and many other initiatives, have made millions upon millions of digitized cultural artifacts available on the net. We need to make sense of all this information: aggregate it, integrate it, provide cross-collection search, find links between entities and artefacts, build narratives, analyze data, support the scientific discourse, engage users… From ancient maps to bibliographic records, to paintings, to coins and hoards, to paleographic analysis, to prosopography factoids... everything is becoming more and more connected. A host of ontologies and metadata standards exist in the Cultural Heritage (CH) domain: CIDOC CRM, TEI5, LIDO, SPECTRUM, VRA Core, MPEG7, DC, ESE and EDM, OAI ORE and PMH, IIIIF, ResourceSync... the list goes on and on. How many of the standards listed in Seeing Standards: A Visualization of the Metadata Universe (by Jenn Riley, Associate Dean for Digital Initiatives at McGill University Library) apply to your work? A number of established thesauri and gazetteers exist, and some of them are interconnected: DBPedia; Wikidata, VIAF, FAST, ULAN; GeoNames, Pleiades, TGN; LCSH, AAT, IconClass, Joconde, SVCN, Wordnet, etc etc. The diagram below (by Michiel Hildebrand) shows a small part of this upcoming universe of CH data. How to use them in every-day collection management, cataloging, documentation and research? How to expose your institution's collections and other data to allow interlinking? Digital Humanities (DH) has emerged as a new and promising scientific discipline, with universities like Kings College London establishing new departments devoted to it. As Jeffrey Schnapp writes in the Digital Humanities manifesto 2.0 "Digital Humanities embraces and harnesses the expanded, global nature of today’s research communities as one of the great inter-disciplinary/post-disciplinary opportunities of our time. It dreams of models of knowledge production and reproduction that leverage the increasingly distributed nature of expertise and knowledge and transform this reality into occasions for scholarly innovation, disciplinary cross-fertilization, and the democratization of knowledge". In his keynote address at MCN 2014 Beyond Borders: The Humanities in the Digital Age, James Cuno (President and CEO of the J. Paul Getty Trust) emphasizes the role of modernizing Humanities and the value of Linked Data in cultural heritage informatics. The question also is how to preserve the role of libraries, museums and other Cultural Heritage institutions as centers of wisdom and culture into the new millennium? Aren't Google, Wikipedia, Facebook, Twitter and smart-phone apps becoming the new centers of research and culture (or at least popular culture)? We believe the answers to many of these questions lie with Semantic Technology and Linked Data. They enable large-scale Digital Humanities research, collaboration and aggregation; and technological renewal of CH institutions. The Rosetta Stone was key to the deciphering of Egyptian hieroglyphs, by providing parallel text in three scripts: Ancient Egyptian, Demotic and Ancient Greek. Today semantic technologies play a similar role, allowing the Digital Humanist to make connections between (and make sense of) the multitude of digitized cultural artifacts available on the net. An upsurge of interest in semantic technology has swept the CH and DH communities. Meetups and summits, conferences and un-conferences, residences and hackathons are taking place every week. CH institutions are collaborating actively. An active Linked Open Data for Libraries, Archives and Museums (LODLAM) community has emerged, and the #LODLAM twitter hashtag sees active communication. Established institutions create branches that sound like web startups or Wikipedia offsprings (e.g. British Library Labs; Smithsonian Web-strategy and Smithsonian Commons; UK National Archives department of Web Continuity). The Galleries, Libraries, Archives and Museums (GLAM) sector deals with complex and varied data. Integrating that data, especially across institutions, has always been a challenge. On the other hand, the value of linked data is especially high in this sector, since culture by its very nature is cross-border and interlinked. In this webinar we'll present interesting LODLAM projects, datasets and ontologies, as well as Ontotext's experience in this domain.},\n  url_recording= {https://ontotext.com/knowledgehub/webinars/build-narratives-connect-artifacts-cultural-heritage/},\n}\n\n
\n
\n\n\n
\n The Internet, global digitization efforts, Europe's Digital Agenda, continuing investments in Europeana, the Digital Public Library of America and many other initiatives, have made millions upon millions of digitized cultural artifacts available on the net. We need to make sense of all this information: aggregate it, integrate it, provide cross-collection search, find links between entities and artefacts, build narratives, analyze data, support the scientific discourse, engage users… From ancient maps to bibliographic records, to paintings, to coins and hoards, to paleographic analysis, to prosopography factoids... everything is becoming more and more connected. A host of ontologies and metadata standards exist in the Cultural Heritage (CH) domain: CIDOC CRM, TEI5, LIDO, SPECTRUM, VRA Core, MPEG7, DC, ESE and EDM, OAI ORE and PMH, IIIIF, ResourceSync... the list goes on and on. How many of the standards listed in Seeing Standards: A Visualization of the Metadata Universe (by Jenn Riley, Associate Dean for Digital Initiatives at McGill University Library) apply to your work? A number of established thesauri and gazetteers exist, and some of them are interconnected: DBPedia; Wikidata, VIAF, FAST, ULAN; GeoNames, Pleiades, TGN; LCSH, AAT, IconClass, Joconde, SVCN, Wordnet, etc etc. The diagram below (by Michiel Hildebrand) shows a small part of this upcoming universe of CH data. How to use them in every-day collection management, cataloging, documentation and research? How to expose your institution's collections and other data to allow interlinking? Digital Humanities (DH) has emerged as a new and promising scientific discipline, with universities like Kings College London establishing new departments devoted to it. As Jeffrey Schnapp writes in the Digital Humanities manifesto 2.0 \"Digital Humanities embraces and harnesses the expanded, global nature of today’s research communities as one of the great inter-disciplinary/post-disciplinary opportunities of our time. It dreams of models of knowledge production and reproduction that leverage the increasingly distributed nature of expertise and knowledge and transform this reality into occasions for scholarly innovation, disciplinary cross-fertilization, and the democratization of knowledge\". In his keynote address at MCN 2014 Beyond Borders: The Humanities in the Digital Age, James Cuno (President and CEO of the J. Paul Getty Trust) emphasizes the role of modernizing Humanities and the value of Linked Data in cultural heritage informatics. The question also is how to preserve the role of libraries, museums and other Cultural Heritage institutions as centers of wisdom and culture into the new millennium? Aren't Google, Wikipedia, Facebook, Twitter and smart-phone apps becoming the new centers of research and culture (or at least popular culture)? We believe the answers to many of these questions lie with Semantic Technology and Linked Data. They enable large-scale Digital Humanities research, collaboration and aggregation; and technological renewal of CH institutions. The Rosetta Stone was key to the deciphering of Egyptian hieroglyphs, by providing parallel text in three scripts: Ancient Egyptian, Demotic and Ancient Greek. Today semantic technologies play a similar role, allowing the Digital Humanist to make connections between (and make sense of) the multitude of digitized cultural artifacts available on the net. An upsurge of interest in semantic technology has swept the CH and DH communities. Meetups and summits, conferences and un-conferences, residences and hackathons are taking place every week. CH institutions are collaborating actively. An active Linked Open Data for Libraries, Archives and Museums (LODLAM) community has emerged, and the #LODLAM twitter hashtag sees active communication. Established institutions create branches that sound like web startups or Wikipedia offsprings (e.g. British Library Labs; Smithsonian Web-strategy and Smithsonian Commons; UK National Archives department of Web Continuity). The Galleries, Libraries, Archives and Museums (GLAM) sector deals with complex and varied data. Integrating that data, especially across institutions, has always been a challenge. On the other hand, the value of linked data is especially high in this sector, since culture by its very nature is cross-border and interlinked. In this webinar we'll present interesting LODLAM projects, datasets and ontologies, as well as Ontotext's experience in this domain.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Using DBPedia in Europeana Food and Drink.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, February 2016.\n \n\n\n\n
\n\n\n\n \n \n \"UsingPaper\n  \n \n \n \"Using pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2016-EFD-DBpedia,\n  author       = {Vladimir Alexiev},\n  title        = {{Using DBPedia in Europeana Food and Drink}},\n  howpublished = {presentation},\n  month        = feb,\n  year         = 2016,\n  url          = {https://drive.google.com/file/d/0B7je1jgVmCgIZzNiWmdqTGpDa28/view},\n  url_PDF      = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20160212-Using-DBPedia-in-Europeana-Food-and-Drink.pdf},\n  keywords     = {Europeana, cultural heritage, food and drink, DBpedia, Geonames, semantic application, faceted search, semantic search},\n  address      = {DBpedia Meeting, The Hague, Netherlands},\n  abstract     = {The Europeana Food and Drink project collects cultural heritage objects for and develops applications related to Food and Drink heritage. As part of the project, Ontotext developed a FD Classification based on Wikipedia/DBpedia Categories, a semantic enrichment service that annotates each CHO with FD Topics and Places, and a semantic application (https://efd.ontotext.com/app) that implements hierarchical semantic facets and semantic search for these facets. We'll also be packaging the enrichment as a service for others to use in a crowdsourced annotation application. We will explain how we used Categories to build a domain-specific gazetteer, used external datasets (eg UMBEL domains and DBTax types), correlated DBpedia places to Geonames to use the place hierarchy, and the workings of the semantic application},\n}\n\n
\n
\n\n\n
\n The Europeana Food and Drink project collects cultural heritage objects for and develops applications related to Food and Drink heritage. As part of the project, Ontotext developed a FD Classification based on Wikipedia/DBpedia Categories, a semantic enrichment service that annotates each CHO with FD Topics and Places, and a semantic application (https://efd.ontotext.com/app) that implements hierarchical semantic facets and semantic search for these facets. We'll also be packaging the enrichment as a service for others to use in a crowdsourced annotation application. We will explain how we used Categories to build a domain-specific gazetteer, used external datasets (eg UMBEL domains and DBTax types), correlated DBpedia places to Geonames to use the place hierarchy, and the workings of the semantic application\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Europeana Food and Drink Semantic Demonstrator Extended.\n \n \n \n \n\n\n \n Alexiev, V.; Tagarev, A.; and Tolosi, L.\n\n\n \n\n\n\n Technical Report D3.20d, Europeana Food and Drink project, July 2016.\n \n\n\n\n
\n\n\n\n \n \n \"EuropeanaPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev2016-EFD-semapp-ext,\n  author       = {Vladimir Alexiev and Andrey Tagarev and Laura Tolosi},\n  title        = {{Europeana Food and Drink Semantic Demonstrator Extended}},\n  institution  = {Europeana Food and Drink project},\n  year         = 2016,\n  type         = {Deliverable},\n  number       = {D3.20d},\n  month        = jul,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Europeana-Food-and-Drink-Semantic-Demonstrator-Extended-(D3.20d).pdf},\n  keywords     = {Europeana, cultural heritage, food and drink, semantic application, semantic search, faceted search, semantic enrichment},\n  abstract     = {Describes the additional development on the EFD Semantic Demonstrator performed after the official D3.20 deliverable (M22). It describes work performed between 31 October 2015 and 20 July 2016 (M31), the achieved results, the created data and enrichments, and the extended application functionality.},\n}\n\n
\n
\n\n\n
\n Describes the additional development on the EFD Semantic Demonstrator performed after the official D3.20 deliverable (M22). It describes work performed between 31 October 2015 and 20 July 2016 (M31), the achieved results, the created data and enrichments, and the extended application functionality.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Meet the Europeana Members Council: Vladimir Alexiev.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n blog post, March 2016.\n \n\n\n\n
\n\n\n\n \n \n \"MeetPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2016-EuropeanaMC-blog,\n  author       = {Vladimir Alexiev},\n  title        = {{Meet the Europeana Members Council: Vladimir Alexiev}},\n  howpublished = {blog post},\n  month        = mar,\n  year         = 2016,\n  url          = {https://pro.europeana.eu/blogpost/meet-the-members-council-vladimir-alexiev},\n  keywords     = {cultural heritage, Europeana, EHRI, ResearchSpace, data quality, semantic enrichment},\n  abstract     = {Describes the work of Ontotext and in particular Vladimir Alexiev in applying semantic technologies to cultural heritage},\n}\n\n
\n
\n\n\n
\n Describes the work of Ontotext and in particular Vladimir Alexiev in applying semantic technologies to cultural heritage\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Getty Vocabularies: LOD Sample Queries.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Getty Research Institute, 3.3 edition, May 2016.\n \n\n\n\n
\n\n\n\n \n \n \"GettyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Manual{Alexiev2016-GVP-LOD-queries,\n  title        = {{Getty Vocabularies: LOD Sample Queries}},\n  author       = {Vladimir Alexiev},\n  organization = {Getty Research Institute},\n  edition      = {3.3},\n  month        = may,\n  year         = 2016,\n  url          = {https://vocab.getty.edu/doc/queries/},\n  abstract     = {We provide 120 sample queries for the Getty Vocabularies LOD that should allow you to learn to query the data effectively. We include searching for data, getting all data of a subject, all labels and their attributes, full-text search, getting an ordered hierarchy, charts, etc. The queries are organized in sections: general, TGN-specific, ULAN-specific, Language queries, Counting and descriptive info, Exploring the ontology},\n  keywords     = {Getty, GVP, vocabularies, thesauri, AAT, TGN, ULAN, SPARQL, ontology, SKOS, SKOS-XL, ISO 25964},\n}\n\n
\n
\n\n\n
\n We provide 120 sample queries for the Getty Vocabularies LOD that should allow you to learn to query the data effectively. We include searching for data, getting all data of a subject, all labels and their attributes, full-text search, getting an ordered hierarchy, charts, etc. The queries are organized in sections: general, TGN-specific, ULAN-specific, Language queries, Counting and descriptive info, Exploring the ontology\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How Not to Do Linked Data.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Github gist, December 2016.\n \n\n\n\n
\n\n\n\n \n \n \"HowPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2016-How-not-to-do-LOD,\n  author       = {Vladimir Alexiev},\n  title        = {{How Not to Do Linked Data}},\n  howpublished = {Github gist},\n  month        = dec,\n  year         = 2016,\n  url          = {https://gist.github.com/VladimirAlexiev/090d5e54a525d57acb9b366121e77573},\n  keywords     = {cultural heritage, RDF, LODLAM, CIDOC CRM, mapping, review, data quality},\n  abstract     = {I review the data quality of the LOD publication of a national cultural heritage institution, and show examples of bad practices},\n}\n\n
\n
\n\n\n
\n I review the data quality of the LOD publication of a national cultural heritage institution, and show examples of bad practices\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multisensor RDF Application Profile.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report Multisensor Project, Ontotext Corp, October 2016.\n \n\n\n\n
\n\n\n\n \n \n \"MultisensorPaper\n  \n \n \n \"Multisensor source\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev2016-Multisensor-profile,\n  author       = {Vladimir Alexiev},\n  title        = {{Multisensor RDF Application Profile}},\n  institution  = {Multisensor Project, Ontotext Corp},\n  year         = 2016,\n  month        = oct,\n  abstract     = {The Multisensor project analyzes and extracts data from mass- and social media documents (so-called SIMMOs), including text, images and video, speech recognition and translationn, across several languages. It also handles social network data, statistical data, etc. Early on the project made the decision that all data exchanged between project partners (between modules inside and outside the processing pipeline) will be in RDF JSONLD format. The final data is stored in a semantic repository and is used by various User Interface components for end-user interaction. This final data forms a corpus of semantic data over SIMMOs and is an important outcome of the project. The flexibility of the semantic web model has allowed us to accommodate a huge variety of data in the same extensible model. We use a number of ontologies for representing that data: NIF and OLIA for linguistic info, ITSRDF for NER, DBpedia and Babelnet for entities and concepts, MARL for sentiment, OA for image and cross-article annotations, W3C CUBE for statistical indicators, etc. In addition to applying existing ontologies, we extended them by the Multisensor ontology, and introduced some innovations like embedding FrameNet in NIF. The documentation of this data has been an important ongoing task. It is even more important towards the end of the project, in order to enable the efficient use of MS data by external consumers. This document describes the different RDF patterns used by Multisensor, and how the data fits together. Thus it represents an "RDF Application Profile" for Multisensor. We use an example-based approach, rather than the more formal and labourious approach being standardized by the W3C RDF Shapes working group (still in development). We cover the following areas: 1. Linguistic Linked Data in NLP Interchange Format (NIF), including Part of Speech (POS), dependency parsing, sentiment, Named Entity Recognition (NER), etc. 2. Speech recognition, translation. 3. Multimedia binding and image annotation. 4. Statistical indicators and similar data. 5. Social network popularity and influence, etc.},\n  url          = {https://rawgit2.com/VladimirAlexiev/multisensor/master/index.html},\n  url_Source   = {https://github.com/VladimirAlexiev/multisensor},\n  keywords     = {Multisensor, CUBE, NLP, NLP2RDF, NIF, OLIA, ITSRDF, NERD, MARL, BabelNet, FrameNet, WordNet},\n}\n\n
\n
\n\n\n
\n The Multisensor project analyzes and extracts data from mass- and social media documents (so-called SIMMOs), including text, images and video, speech recognition and translationn, across several languages. It also handles social network data, statistical data, etc. Early on the project made the decision that all data exchanged between project partners (between modules inside and outside the processing pipeline) will be in RDF JSONLD format. The final data is stored in a semantic repository and is used by various User Interface components for end-user interaction. This final data forms a corpus of semantic data over SIMMOs and is an important outcome of the project. The flexibility of the semantic web model has allowed us to accommodate a huge variety of data in the same extensible model. We use a number of ontologies for representing that data: NIF and OLIA for linguistic info, ITSRDF for NER, DBpedia and Babelnet for entities and concepts, MARL for sentiment, OA for image and cross-article annotations, W3C CUBE for statistical indicators, etc. In addition to applying existing ontologies, we extended them by the Multisensor ontology, and introduced some innovations like embedding FrameNet in NIF. The documentation of this data has been an important ongoing task. It is even more important towards the end of the project, in order to enable the efficient use of MS data by external consumers. This document describes the different RDF patterns used by Multisensor, and how the data fits together. Thus it represents an \"RDF Application Profile\" for Multisensor. We use an example-based approach, rather than the more formal and labourious approach being standardized by the W3C RDF Shapes working group (still in development). We cover the following areas: 1. Linguistic Linked Data in NLP Interchange Format (NIF), including Part of Speech (POS), dependency parsing, sentiment, Named Entity Recognition (NER), etc. 2. Speech recognition, translation. 3. Multimedia binding and image annotation. 4. Statistical indicators and similar data. 5. Social network popularity and influence, etc.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How to find Open Data and Ontologies in Linguistics/NLP and Cultural Heritage.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, March 2016.\n \n\n\n\n
\n\n\n\n \n \n \"HowPaper\n  \n \n \n \"How slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2016-OpenData,\n  author       = {Vladimir Alexiev},\n  title        = {{How to find Open Data and Ontologies in Linguistics/NLP and Cultural Heritage}},\n  howpublished = {presentation},\n  month        = mar,\n  year         = 2016,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20160329-OpenData-and-Ontologies/index-full.html},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20160329-OpenData-and-Ontologies/index.html},\n  keywords     = {open data, ontology, linguistiscs, NLP, cultural heritage},\n  address      = {4th Open Data & Linked Data meetup, Sofia, Bulgaria},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multisensor Linked Open Data.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, September 2016.\n \n\n\n\n
\n\n\n\n \n \n \"MultisensorPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2016-dbpedia-multisensor,\n  author       = {Vladimir Alexiev},\n  title        = {{Multisensor Linked Open Data}},\n  month        = sep,\n  year         = 2016,\n  url          = {https://rawgit2.com/VladimirAlexiev/multisensor/master/20160915-Multisensor-LOD/index.html},\n  keywords     = {Multisensor, CUBE, NLP, NLP2RDF, NIF, OLIA, ITSRDF, NERD, MARL, BabelNet, FrameNet, WordNet},\n  booktitle    = {{DBpedia Meeting}},\n  howpublished = {presentation},\n  address      = {Leipzig, Germany},\n  abstract     = {The FP7 Multisensor project analyzes and extracts data from mass- and social media documents, including text, images and video, across several languages. It uses a number of ontologies for representing that data: NIF and OLIA for linguistic info, ITSRDF for NER, DBpedia and Babelnet for entities and concepts, MARL for sentiment, OA for image and cross-article annotations, etc. We'll present how all these ontologies fit together, and some innovations like embedding FrameNet in NIF.},\n}\n\n
\n
\n\n\n
\n The FP7 Multisensor project analyzes and extracts data from mass- and social media documents, including text, images and video, across several languages. It uses a number of ontologies for representing that data: NIF and OLIA for linguistic info, ITSRDF for NER, DBpedia and Babelnet for entities and concepts, MARL for sentiment, OA for image and cross-article annotations, etc. We'll present how all these ontologies fit together, and some innovations like embedding FrameNet in NIF.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Making True RDF Diagrams with rdfpuml.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, March 2016.\n \n\n\n\n
\n\n\n\n \n \n \"MakingPaper\n  \n \n \n \"Making slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2016-rdfpuml,\n  author       = {Vladimir Alexiev},\n  title        = {{Making True RDF Diagrams with rdfpuml}},\n  howpublished = {presentation},\n  month        = mar,\n  year         = 2016,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20160514-rdfpuml/index-full.html},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20160514-rdfpuml/index.html},\n  keywords     = {RDF, visualization, PlantUML, cultural heritage, NLP, NIF, EHRI},\n  abstract     = {RDF is a graph data model, thus often the best way to understand RDF data schemas (ontologies, application profiles, RDF shapes) is with a diagram. We describe a tool (rdfpuml) that makes true diagrams from Turtle examples using PlantUML and GraphViz. Diagram readability is of prime concern, and rdfpuml introduces a few diagram control mechanisms using triples in the puml: namespace. We give examples from Getty CONA (Mappings of museum data to CIDOC CRM), Multisensor (NLP2RDF/NIF, FrameNet), EHRI (Holocaust Research into Jewish social networks), Duraspace (Portland Common Data Model for holding metadata in institutional repositories)},\n}\n\n
\n
\n\n\n
\n RDF is a graph data model, thus often the best way to understand RDF data schemas (ontologies, application profiles, RDF shapes) is with a diagram. We describe a tool (rdfpuml) that makes true diagrams from Turtle examples using PlantUML and GraphViz. Diagram readability is of prime concern, and rdfpuml introduces a few diagram control mechanisms using triples in the puml: namespace. We give examples from Getty CONA (Mappings of museum data to CIDOC CRM), Multisensor (NLP2RDF/NIF, FrameNet), EHRI (Holocaust Research into Jewish social networks), Duraspace (Portland Common Data Model for holding metadata in institutional repositories)\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n RDF by Example: rdfpuml for True RDF Diagrams, rdf2rml for R2RML Generation.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In Semantic Web in Libraries 2016 (SWIB 2016), Bonn, Germany, November 2016. \n \n\n\n\n
\n\n\n\n \n \n \"RDFPaper\n  \n \n \n \"RDF slides\n  \n \n \n \"RDF video\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev2016-rdfpuml-rdf2rml,\n  author       = {Vladimir Alexiev},\n  title        = {{RDF by Example: rdfpuml for True RDF Diagrams, rdf2rml for R2RML Generation}},\n  booktitle    = {{Semantic Web in Libraries 2016 (SWIB 2016)}},\n  year         = 2016,\n  month        = nov,\n  address      = {Bonn, Germany},\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20161128-rdfpuml-rdf2rml/index-full.html},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20161128-rdfpuml-rdf2rml/index.html},\n  url_Video    = {https://youtu.be/4WoYlaGF6DE},\n  keywords     = {RDF, visualization, PlantUML, cultural heritage, NLP, NIF, EHRI, R2RML, generation, model-driven, RDF by Example, rdfpuml, rdf2rml},\n  abstract     = {RDF is a graph data model, so the best way to understand RDF data schemas (ontologies, application profiles, RDF shapes) is with a diagram. Many RDF visualization tools exist, but they either focus on large graphs (where the details are not easily visible), or the visualization results are not satisfactory, or manual tweaking of the diagrams is required. We describe a tool *rdfpuml* that makes true diagrams directly from Turtle examples using PlantUML and GraphViz. Diagram readability is of prime concern, and rdfpuml introduces various diagram control mechanisms using triples in the puml: namespace. Special attention is paid to inlining and visualizing various Reification mechanisms (described with PRV). We give examples from Getty CONA, Getty Museum, AAC (mappings of museum data to CIDOC CRM), Multisensor (NIF and FrameNet), EHRI (Holocaust Research into Jewish social networks), Duraspace (Portland Common Data Model for holding metadata in institutional repositories), Video annotation. If the example instances include SQL queries and embedded field names, they can describe a mapping precisely. Another tool *rdf2rdb* generates R2RML transformations from such examples, saving about 15x in complexity.},\n}\n% Future work: extend RDF by Example to describe RDF Shapes; extend rdf2rml to generate RML instead of only R2RML, i.e. handle XML and JSON data sources\n% https://docs.stardog.com/#_stardog_mapping_syntax is similar: shortcut syntax of R2RML that displays examples\n\n
\n
\n\n\n
\n RDF is a graph data model, so the best way to understand RDF data schemas (ontologies, application profiles, RDF shapes) is with a diagram. Many RDF visualization tools exist, but they either focus on large graphs (where the details are not easily visible), or the visualization results are not satisfactory, or manual tweaking of the diagrams is required. We describe a tool *rdfpuml* that makes true diagrams directly from Turtle examples using PlantUML and GraphViz. Diagram readability is of prime concern, and rdfpuml introduces various diagram control mechanisms using triples in the puml: namespace. Special attention is paid to inlining and visualizing various Reification mechanisms (described with PRV). We give examples from Getty CONA, Getty Museum, AAC (mappings of museum data to CIDOC CRM), Multisensor (NIF and FrameNet), EHRI (Holocaust Research into Jewish social networks), Duraspace (Portland Common Data Model for holding metadata in institutional repositories), Video annotation. If the example instances include SQL queries and embedded field names, they can describe a mapping precisely. Another tool *rdf2rdb* generates R2RML transformations from such examples, saving about 15x in complexity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n FN goes NIF: Integrating FrameNet in the NLP Interchange Format.\n \n \n \n \n\n\n \n Alexiev, V.; and Casamayor, G.\n\n\n \n\n\n\n In Linked Data in Linguistics (LDL-2016): Managing, Building and Using Linked Language Resources, Portorož, Slovenia, May 2016. \n \n\n\n\n
\n\n\n\n \n \n \"FNPaper\n  \n \n \n \"FN slides\n  \n \n \n \"FN html\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{AlexievCasamayor2016-FN-NIF,\n  author       = {Vladimir Alexiev and Gerard Casamayor},\n  title        = {{FN goes NIF: Integrating FrameNet in the NLP Interchange Format}},\n  booktitle    = {{Linked Data in Linguistics (LDL-2016): Managing, Building and Using Linked Language Resources}},\n  year         = 2016,\n  month        = may,\n  address      = {Portorož, Slovenia},\n  url          = {https://rawgit2.com/VladimirAlexiev/multisensor/master/FrameNet/paper.pdf},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/multisensor/master/FrameNet/pres.html},\n  url_HTML     = {https://rawgit2.com/VladimirAlexiev/multisensor/master/FrameNet/pres-full.html},\n  keywords     = {linguistic linked data, FrameNet, NIF, NLP2RDF, RDF, application profile},\n  abstract     = {FrameNet (FN) is a large-scale lexical database for English developed at ICSI Berkeley that describes word senses in terms of Frame semantics. FN has been converted to RDF LOD by ISTC-CNR, together with a large corpus of text annotated with FN. NIF is an RDF/OWL format and protocol for exchanging text annotations between NLP tools as Linguistic Linked Data. This paper reviews the FN-LOD representation, compares it to NIF, and describes a simple way to integrate FN in NIF, which does not use any custom classes or properties.},\n}\n\n
\n
\n\n\n
\n FrameNet (FN) is a large-scale lexical database for English developed at ICSI Berkeley that describes word senses in terms of Frame semantics. FN has been converted to RDF LOD by ISTC-CNR, together with a large corpus of text annotated with FN. NIF is an RDF/OWL format and protocol for exchanging text annotations between NLP tools as Linguistic Linked Data. This paper reviews the FN-LOD representation, compares it to NIF, and describes a simple way to integrate FN in NIF, which does not use any custom classes or properties.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The health care and life sciences community profile for dataset descriptions.\n \n \n \n \n\n\n \n Dumontier, M.; Gray, A. J. G.; Marshall, M. S.; Alexiev, V.; and others\n\n\n \n\n\n\n PeerJ, 4: e2331. August 2016.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{HCLS-paper,\n  author       = {Michel Dumontier and Alasdair J. G. Gray and M. Scott Marshall and Vladimir Alexiev and others},\n  title        = {{The health care and life sciences community profile for dataset descriptions}},\n  journal      = {{PeerJ}},\n  year         = 2016,\n  volume       = 4,\n  pages        = {e2331},\n  month        = aug,\n  url          = {https://peerj.com/articles/2331/},\n  keywords     = {Data profiling, Dataset descriptions, Metadata, Provenance, FAIR data, HCLS, dataset, VOID, ontology, Bioinformatics, Taxonomy},\n  issn         = {2167-8359},\n  doi          = {10.7717/peerj.2331},\n  abstract     = {Access to consistent, high-quality metadata is critical to finding, understanding, and reusing scientific data. However, while there are many relevant vocabularies for the annotation of a dataset, none sufficiently captures all the necessary metadata. This prevents uniform indexing and querying of dataset repositories. Towards providing a practical guide for producing a high quality description of biomedical datasets, the W3C Semantic Web for Health Care and the Life Sciences Interest Group (HCLSIG) identified RDF vocabularies that could be used to specify common metadata elements and their value sets. The resulting guideline covers elements of description, identification, attribution, versioning, provenance, and content summarization. This guideline reuses existing vocabularies, and is intended to meet key functional requirements including indexing, discovery, exchange, query, and retrieval of datasets. The resulting metadata profile is generic and could be used by other domains with an interest in providing machine readable descriptions of versioned datasets.},\n}\n\n
\n
\n\n\n
\n Access to consistent, high-quality metadata is critical to finding, understanding, and reusing scientific data. However, while there are many relevant vocabularies for the annotation of a dataset, none sufficiently captures all the necessary metadata. This prevents uniform indexing and querying of dataset repositories. Towards providing a practical guide for producing a high quality description of biomedical datasets, the W3C Semantic Web for Health Care and the Life Sciences Interest Group (HCLSIG) identified RDF vocabularies that could be used to specify common metadata elements and their value sets. The resulting guideline covers elements of description, identification, attribution, versioning, provenance, and content summarization. This guideline reuses existing vocabularies, and is intended to meet key functional requirements including indexing, discovery, exchange, query, and retrieval of datasets. The resulting metadata profile is generic and could be used by other domains with an interest in providing machine readable descriptions of versioned datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semantic Integration of Web Data for International Investment Decision Support.\n \n \n \n \n\n\n \n Simeonov, B.; Alexiev, V.; Liparas, D.; Puigbo, M.; Vrochidis, S.; Jamin, E.; and Kompatsiaris, I.\n\n\n \n\n\n\n In 3rd International Conference on Internet Science (INSCI 2016), Florence, Italy, September 2016. \n \n\n\n\n
\n\n\n\n \n \n \"SemanticPaper\n  \n \n \n \"Semantic preprint\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{INSCI2016-Multisensor,\n  author       = {Boyan Simeonov and Vladimir Alexiev and Dimitris Liparas and Marti Puigbo and Stefanos Vrochidis and Emmanuel Jamin and Ioannis Kompatsiaris},\n  title        = {{Semantic Integration of Web Data for International Investment Decision Support}},\n  booktitle    = {{3rd International Conference on Internet Science (INSCI 2016)}},\n  year         = 2016,\n  month        = sep,\n  address      = {Florence, Italy},\n  url          = {https://zenodo.org/record/571202},\n  url_Preprint = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/INSCI2016.pdf},\n  keywords     = {Decision support, Indicators, Heterogeneous web resources, SME internationalisation, Semantic integration, SPARQL, statistics ontologies, CUBE},\n  doi          = {10.1007/978-3-319-45982-0_18},\n  abstract     = {Given the current economic situation and the financial crisis in many European countries, Small and Medium Enterprises (SMEs) have found interna- tionalisation and exportation of their products as the main way out of this crisis. In this paper, we provide a decision support system that semantically aggregates information from many heterogeneous web resources and provides guidance to SMEs for their potential investments. The main contributions of this paper are the introduction of SME internationalisation indicators that can be considered for such decisions, as well as the novel decision support system for SME inter- nationalisation based on inference over semantically integrated data from heterogeneous web resources. The system is evaluated by SME experts in realistic scenarios in the section of dairy products.},\n  session      = {13 Sep 14:20: Smart Cities and Data Analysis Issues},\n}\n\n
\n
\n\n\n
\n Given the current economic situation and the financial crisis in many European countries, Small and Medium Enterprises (SMEs) have found interna- tionalisation and exportation of their products as the main way out of this crisis. In this paper, we provide a decision support system that semantically aggregates information from many heterogeneous web resources and provides guidance to SMEs for their potential investments. The main contributions of this paper are the introduction of SME internationalisation indicators that can be considered for such decisions, as well as the novel decision support system for SME inter- nationalisation based on inference over semantically integrated data from heterogeneous web resources. The system is evaluated by SME experts in realistic scenarios in the section of dairy products.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring comparative evaluation of semantic enrichment tools for cultural heritage metadata.\n \n \n \n \n\n\n \n Manguinhas, H.; Freire, N.; Isaac, A.; Stiller, J.; Charles, V.; Soroa, A.; Simon, R.; and Alexiev, V.\n\n\n \n\n\n\n In Fuhr, N.; Kovács, L.; Risse, T.; and Nejdl, W., editor(s), 20th International Conference on Theory and Practice of Digital Libraries (TPDL 2016), Hannover, Germany, September 2016. \n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n \n \"Exploring preprint\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{TPDL2016-semanticEnrichment,\n  author       = {Hugo Manguinhas and Nuno Freire and Antoine Isaac and Juliane Stiller and Valentine Charles and Aitor Soroa and Rainer Simon and Vladimir Alexiev},\n  title        = {{Exploring comparative evaluation of semantic enrichment tools for cultural heritage metadata}},\n  booktitle    = {{20th International Conference on Theory and Practice of Digital Libraries (TPDL 2016)}},\n  year         = 2016,\n  editor       = {Norbert Fuhr and László Kovács and Thomas Risse and Wolfgang Nejdl},\n  month        = sep,\n  address      = {Hannover, Germany},\n  url          = {https://link.springer.com/chapter/10.1007/978-3-319-43997-6_21},\n  url_Preprint = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/TPDL2016.pdf},\n  keywords     = {Europeana, semantic enrichment, evaluation, precision, recall, cultural heritage, metadata},\n  doi          = {10.1007/978-3-319-43997-6_21},\n  abstract     = {Semantic enrichment of metadata is an important and difficult problem for digital heritage efforts such as Europeana. This paper gives motivations and presents the work of a recently completed Task Force that addressed the topic of evaluation of semantic enrichment. We especially report on the design and the results of a comparative evaluation experiment, where we have assessed the enrichments of seven tools (or configurations thereof) on a sample benchmark dataset from Europeana.},\n}\n\n
\n
\n\n\n
\n Semantic enrichment of metadata is an important and difficult problem for digital heritage efforts such as Europeana. This paper gives motivations and presents the work of a recently completed Task Force that addressed the topic of evaluation of semantic enrichment. We especially report on the design and the results of a comparative evaluation experiment, where we have assessed the enrichments of seven tools (or configurations thereof) on a sample benchmark dataset from Europeana.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Domain-specific modeling: Towards a Food and Drink Gazetteer.\n \n \n \n \n\n\n \n Tagarev, A.; Tolosi, L.; and Alexiev, V.\n\n\n \n\n\n\n In Cardoso, J.; Guerra, F.; Houben, G.; Pinto, A. M.; and Velegrakis, Y., editor(s), Semantic Keyword-based Search on Structured Data Sources, volume 9398, of Lecture Notes in Computer Science, pages 182-196, January 2016. Springer\n First COST Action IC1302 International KEYSTONE Conference (IKC 2015), Coimbra, Portugal, September 8-9, 2015. Revised Selected Papers\n\n\n\n
\n\n\n\n \n \n \"Domain-specificPaper\n  \n \n \n \"Domain-specific slides\n  \n \n \n \"Domain-specific preprint\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{TagarevTolosiAlexiev2017-FD,\n  author       = {Andrey Tagarev and Laura Tolosi and Vladimir Alexiev},\n  title        = {{Domain-specific modeling: Towards a Food and Drink Gazetteer}},\n  booktitle    = {{Semantic Keyword-based Search on Structured Data Sources}},\n  year         = 2016,\n  editor       = {Jorge Cardoso and Francesco Guerra and Geert-Jan Houben and Alexandre Miguel Pinto and Yannis Velegrakis},\n  volume       = 9398,\n  series       = {Lecture Notes in Computer Science},\n  pages        = {182-196},\n  month        = jan,\n  publisher    = {Springer},\n  note         = {First COST Action IC1302 International KEYSTONE Conference (IKC 2015), Coimbra, Portugal, September 8-9, 2015. Revised Selected Papers},\n  url          = {https://link.springer.com/chapter/10.1007/978-3-319-27932-9_16},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Tagarev2015-DomainSpecificGazetteer-slides.pdf},\n  url_Preprint = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Tagarev2015-DomainSpecificGazetteer.pdf},\n  keywords     = {classification, categorization, Wikipedia, DBpedia, gazetteer, Europeana, Cultural Heritage, concept extraction, semantic enrichment, food and drink},\n  chapter      = 16,\n  doi          = {10.1007/978-3-319-27932-9_16},\n  isbn         = {978-3-319-27932-9},\n  abstract     = {Our goal is to build a Food and Drink (FD) gazetteer that can serve for classification of general, FD-related concepts, efficient faceted search or automated semantic enrichment. Fully supervised design of a domain-specific models "ex novo" is not scalable. Integration of several ready knowledge bases is tedious and does not ensure coverage. Completely data-driven approaches require a large amount of training data, which is not always available. In cases when the domain is not very specific (as the FD domain), re-using encyclopedic knowledge bases like Wikipedia may be a good idea. We propose here a semi-supervised approach, that uses a restricted Wikipedia as a base for the modeling, achieved by selecting a domain-relevant Wikipedia category as root for the model and all its subcategories, combined with expert and data-driven pruning of irrelevant categories.},\n}\n\n
\n
\n\n\n
\n Our goal is to build a Food and Drink (FD) gazetteer that can serve for classification of general, FD-related concepts, efficient faceted search or automated semantic enrichment. Fully supervised design of a domain-specific models \"ex novo\" is not scalable. Integration of several ready knowledge bases is tedious and does not ensure coverage. Completely data-driven approaches require a large amount of training data, which is not always available. In cases when the domain is not very specific (as the FD domain), re-using encyclopedic knowledge bases like Wikipedia may be a good idea. We propose here a semi-supervised approach, that uses a restricted Wikipedia as a base for the modeling, achieved by selecting a domain-relevant Wikipedia category as root for the model and all its subcategories, combined with expert and data-driven pruning of irrelevant categories.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Linked Open Data for Cultural Heritage Institutions: Build Narratives through Connecting Artifacts.\n \n \n \n \n\n\n \n Uzunov, I.; and Alexiev, V.\n\n\n \n\n\n\n In Museum Exhibits and Standards: A Look Ahead, Sofia, Bulgaria, November 2016. Bulgarian-American Fulbright Commission for Educational Exchange: Bi-National Commission for the Preservation of Bulgaria's Cultural Heritage\n \n\n\n\n
\n\n\n\n \n \n \"LinkedPaper\n  \n \n \n \"Linked pdf\n  \n \n \n \"Linked slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{UzunovAlexiev2016-Fulbright,\n  author       = {Ilian Uzunov and Vladimir Alexiev},\n  title        = {{Linked Open Data for Cultural Heritage Institutions: Build Narratives through Connecting Artifacts}},\n  booktitle    = {{Museum Exhibits and Standards: A Look Ahead}},\n  year         = 2016,\n  month        = nov,\n  address      = {Sofia, Bulgaria},\n  organization = {Bulgarian-American Fulbright Commission for Educational Exchange: Bi-National Commission for the Preservation of Bulgaria's Cultural Heritage},\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20161128-fulbright/index-full.html},\n  url_PDF      = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20161128-fulbright/Linked_Open_Data_for_Cultural_Heritage_Institutions.pdf},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20161128-fulbright/index.html},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (21)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Name Data Sources for Semantic Enrichment.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report Part of Deliverable D2.4, Europeana Creative project, February 2015.\n \n\n\n\n
\n\n\n\n \n \n \"NamePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev2015-CH-names,\n  author       = {Vladimir Alexiev},\n  title        = {{Name Data Sources for Semantic Enrichment}},\n  institution  = {Europeana Creative project},\n  year         = 2015,\n  type         = {Deliverable},\n  number       = {Part of Deliverable D2.4},\n  month        = feb,\n  url          = {https://rawgit2.com/VladimirAlexiev/CH-names/master/README.html},\n  keywords     = {Europeana, semantic enrichment, knowledge base, gazetteer, VIAF, Wikidata, ULAN, ISNI},\n  abstract     = {Semantic enrichment in Europeana is a very difficult task due to several factors: 1. Varying metadata quality across different collections, sometimes including misallocation of metadata fields; 2. Varying metadata formatting practices across different collections, e.g. some collections indicate the role of a creator in brackets after the creator name; 3. Lack of accurate language information. In this report we focus on Person & Institution enrichment (person Named Entity Recognition), which in itself is an ambitious task. Historic people are often referred to by many names. For successful semantic enrichment it's important to integrate high-quality and high-coverage datasets that provide name info. There is a great number of Name Authority files maintained at libraries, museums and other heritage institutions world-wide, e.g. VIAF, ISNI, Getty ULAN, British Museum. Linked Open Data (LOD) datasets also have a plethora of names, e.g. in DBpedia, Wikidata and FreeBase. We analyze some of the available datasets in terms of person coverage, name coverage, language tags, extra features that can be useful for enrichment, quality. We also analyze the important topic of coreferencing, i.e. how connected the sources are to each other.},\n}\n\n
\n
\n\n\n
\n Semantic enrichment in Europeana is a very difficult task due to several factors: 1. Varying metadata quality across different collections, sometimes including misallocation of metadata fields; 2. Varying metadata formatting practices across different collections, e.g. some collections indicate the role of a creator in brackets after the creator name; 3. Lack of accurate language information. In this report we focus on Person & Institution enrichment (person Named Entity Recognition), which in itself is an ambitious task. Historic people are often referred to by many names. For successful semantic enrichment it's important to integrate high-quality and high-coverage datasets that provide name info. There is a great number of Name Authority files maintained at libraries, museums and other heritage institutions world-wide, e.g. VIAF, ISNI, Getty ULAN, British Museum. Linked Open Data (LOD) datasets also have a plethora of names, e.g. in DBpedia, Wikidata and FreeBase. We analyze some of the available datasets in terms of person coverage, name coverage, language tags, extra features that can be useful for enrichment, quality. We also analyze the important topic of coreferencing, i.e. how connected the sources are to each other.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Europeana Food and Drink Classification Scheme.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report D2.2, Europeana Food and Drink project, February 2015.\n \n\n\n\n
\n\n\n\n \n \n \"EuropeanaPaper\n  \n \n \n \"Europeana slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev2015-EFD-classification,\n  author       = {Vladimir Alexiev},\n  title        = {{Europeana Food and Drink Classification Scheme}},\n  institution  = {Europeana Food and Drink project},\n  year         = 2015,\n  type         = {Deliverable},\n  number       = {D2.2},\n  month        = feb,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Europeana-Food-and-Drink-Classification-Scheme-(D2.2).pdf},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Europeana Food and Drink Classification and Ideas for Semantic App (201503).pptx},\n  keywords     = {Europeana, cultural heritage, food and drink, classification, categorization, DBpedia, Wikipedia, AGROVOC, WordNet, UMBEL},\n  abstract     = {The Europeana Food and Drink Classification scheme (EFD classification) is a multi-dimensional scheme for discovering and classifying Cultural Heritage Objects (CHO) related to Food and Drink (FD). The topic of Food and Drink is so pervasive in our daily lives and in our culture that assembling a small "specialist" thesaurus is not feasible (such specialist thesauri were successfully used in other Europeana projects, eg ECLAP on performing arts and PartagePlus on Art Nouveau). We investigate about 20 datasets for their relevance to FD, including the Getty theasuri, Wordnet FD Domain, Wikipedia (in its 2 semantic data representations: DBpedia and Wikidata), AGROVOC, etc. We have selected Wikipedia as the basis for the classification, and plan to use the Wikipedia Categories to construct a hierarchical network to be used for classification. The project will also use innovative semantic technologies to automate the extraction of terms and co-references. The result will be a body of semantically-enriched metadata that can support a wider range of multi-lingual applications such as search, discovery and browsing. (91 pages)},\n}\n\n
\n
\n\n\n
\n The Europeana Food and Drink Classification scheme (EFD classification) is a multi-dimensional scheme for discovering and classifying Cultural Heritage Objects (CHO) related to Food and Drink (FD). The topic of Food and Drink is so pervasive in our daily lives and in our culture that assembling a small \"specialist\" thesaurus is not feasible (such specialist thesauri were successfully used in other Europeana projects, eg ECLAP on performing arts and PartagePlus on Art Nouveau). We investigate about 20 datasets for their relevance to FD, including the Getty theasuri, Wordnet FD Domain, Wikipedia (in its 2 semantic data representations: DBpedia and Wikidata), AGROVOC, etc. We have selected Wikipedia as the basis for the classification, and plan to use the Wikipedia Categories to construct a hierarchical network to be used for classification. The project will also use innovative semantic technologies to automate the extraction of terms and co-references. The result will be a body of semantically-enriched metadata that can support a wider range of multi-lingual applications such as search, discovery and browsing. (91 pages)\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Europeana Food and Drink Semantic Demonstrator Delivery.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report D3.20, Europeana Food and Drink project, October 2015.\n \n\n\n\n
\n\n\n\n \n \n \"EuropeanaPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev2015-EFD-semapp,\n  author       = {Vladimir Alexiev},\n  title        = {{Europeana Food and Drink Semantic Demonstrator Delivery}},\n  institution  = {Europeana Food and Drink project},\n  year         = 2015,\n  type         = {Deliverable},\n  number       = {D3.20},\n  month        = oct,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Europeana-Food-and-Drink-Semantic-Demonstrator-Delivery-(D3.20).pdf},\n  keywords     = {Europeana, cultural heritage, food and drink, semantic application, semantic search, faceted search, semantic enrichment},\n  abstract     = {Describes the development and delivery of the EFD Semantic Demonstrator. We describe all work performed between 1 April 2015 and 31 October 2015, the achieved results, the created data and enrichments, and the developed application.},\n}\n\n
\n
\n\n\n
\n Describes the development and delivery of the EFD Semantic Demonstrator. We describe all work performed between 1 April 2015 and 31 October 2015, the achieved results, the created data and enrichments, and the developed application.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Europeana Food and Drink Semantic Demonstrator M18 Progress Report.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report D3.20a, Europeana Food and Drink project, June 2015.\n \n\n\n\n
\n\n\n\n \n \n \"EuropeanaPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev2015-EFD-semapp-progress1,\n  author       = {Vladimir Alexiev},\n  title        = {{Europeana Food and Drink Semantic Demonstrator M18 Progress Report}},\n  institution  = {Europeana Food and Drink project},\n  year         = 2015,\n  type         = {Progress Report},\n  number       = {D3.20a},\n  month        = jun,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Europeana-Food-and-Drink-Semantic-Demonstrator-M18-Report-(D3.20a).pdf},\n  keywords     = {Europeana, cultural heritage, food and drink, semantic application, semantic search, faceted search, semantic enrichment},\n  abstract     = {Describes the development progress on the Europeana Food and Drink Semantic Demonstrator for the first 2.5 months (between 1 April 2015 and 15 June 2015), the achieved results, and project management considerations.},\n}\n\n
\n
\n\n\n
\n Describes the development progress on the Europeana Food and Drink Semantic Demonstrator for the first 2.5 months (between 1 April 2015 and 15 June 2015), the achieved results, and project management considerations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Europeana Food and Drink Semantic Demonstrator Specification.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report D3.19, Europeana Food and Drink project, March 2015.\n \n\n\n\n
\n\n\n\n \n \n \"EuropeanaPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev2015-EFD-semapp-spec,\n  author       = {Vladimir Alexiev},\n  title        = {{Europeana Food and Drink Semantic Demonstrator Specification}},\n  institution  = {Europeana Food and Drink project},\n  year         = 2015,\n  type         = {Deliverable},\n  number       = {D3.19},\n  month        = mar,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Europeana-Food-and-Drink-Semantic-Demonstrator-Specification-(D3.19).pdf},\n  keywords     = {Europeana, cultural heritage, food and drink, semantic application, semantic search, faceted search, semantic enrichment},\n  abstract     = {The Europeana Food and Drink Semantic Demonstrator (EFD sem app) will allow multi-dimensional semantic exploration and discovery of cultural heritage objects (CHO) related to Food and Drink (FD). It will both apply and augment the EFD Classification scheme, using positive feedback loop mechanisms: the more the classification is used, the better it becomes. It will enable providers to classify their content, and consumers to explore CHOs using semantic search},\n}\n\n
\n
\n\n\n
\n The Europeana Food and Drink Semantic Demonstrator (EFD sem app) will allow multi-dimensional semantic exploration and discovery of cultural heritage objects (CHO) related to Food and Drink (FD). It will both apply and augment the EFD Classification scheme, using positive feedback loop mechanisms: the more the classification is used, the better it becomes. It will enable providers to classify their content, and consumers to explore CHOs using semantic search\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n GLAMs Working with Wikidata.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, May 2015.\n \n\n\n\n
\n\n\n\n \n \n \"GLAMsPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2015-GLAMs-Wikidata,\n  author       = {Vladimir Alexiev},\n  title        = {{GLAMs Working with Wikidata}},\n  howpublished = {presentation},\n  month        = may,\n  year         = 2015,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/20150518-GLAMs-working-with-Wikidata.ppt},\n  keywords     = {Wikidata, Wikipedia, cultural heritage},\n  booktitle    = {{Europeana Food and Drink content provider workshop}},\n  address      = {Athens, Greece},\n  abstract     = {How GLAMs can use Wikipedia/Wikidata to make their collections globally accessible across languages.},\n}\n\n
\n
\n\n\n
\n How GLAMs can use Wikipedia/Wikidata to make their collections globally accessible across languages.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Getty Vocabularies Linked Open Data: Semantic Representation.\n \n \n \n \n\n\n \n Alexiev, V.; Cobb, J.; Garcia, G.; and Harpring, P.\n\n\n \n\n\n\n Getty Research Institute, 3.2 edition, March 2015.\n \n\n\n\n
\n\n\n\n \n \n \"GettyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Manual{Alexiev2015-GVP-LOD-doc,\n  title        = {{Getty Vocabularies Linked Open Data: Semantic Representation}},\n  author       = {Vladimir Alexiev and Joan Cobb and Gregg Garcia and Patricia Harpring},\n  organization = {Getty Research Institute},\n  edition      = {3.2},\n  month        = mar,\n  year         = 2015,\n  url          = {https://vocab.getty.edu/doc/},\n  keywords     = {Getty, GVP, vocabularies, thesauri, AAT, TGN, ULAN, semantic representation, LOD, ontology, SKOS, SKOS-XL, ISO 25964},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Getty Vocabulary Program (GVP) Ontology.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n 3.2 edition, March 2015.\n Ontology\n\n\n\n
\n\n\n\n \n \n \"GettyPaper\n  \n \n \n \"Getty lov\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Manual{Alexiev2015-GVP-ontology,\n  title        = {{Getty Vocabulary Program (GVP) Ontology}},\n  author       = {Vladimir Alexiev},\n  edition      = {3.2},\n  month        = mar,\n  year         = 2015,\n  url          = {https://vocab.getty.edu/ontology},\n  url_LOV      = {https://lov.okfn.org/dataset/lov/details/vocabulary_gvp.html},\n  keywords     = {Getty, GVP, vocabularies, thesauri, AAT, TGN, ULAN, semantic representation, LOD, ontology, SKOS, SKOS-XL, ISO 25964, DC, DCT, BIBO, FOAF, BIO, Schema, PROV, WGS84},\n  institution  = {Getty Research Institute},\n  type         = {Ontology},\n  note         = {Ontology},\n  abstract     = {The GVP Ontology defines classes, properties and values (skos:Concepts) used in GVP LOD. As of version 3.0, it is complete regarding AAT, TGN and ULAN, and will be extended in time with more elements needed for other GVP vocabularies (CONA). It uses the SKOS, SKOS-XL, ISO 25964; DC, DCT, BIBO, FOAF, BIO, Schema, PROV, WGS84 ontologies.},\n}\n\n
\n
\n\n\n
\n The GVP Ontology defines classes, properties and values (skos:Concepts) used in GVP LOD. As of version 3.0, it is complete regarding AAT, TGN and ULAN, and will be extended in time with more elements needed for other GVP vocabularies (CONA). It uses the SKOS, SKOS-XL, ISO 25964; DC, DCT, BIBO, FOAF, BIO, Schema, PROV, WGS84 ontologies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Wikidata, a Target for Europeana's Semantic Strategy.\n \n \n \n \n\n\n \n Alexiev, V.; Charles, V.; and Manguinhas, H.\n\n\n \n\n\n\n presentation, April 2015.\n \n\n\n\n
\n\n\n\n \n \n \"Wikidata,Paper\n  \n \n \n \"Wikidata, other\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2015-Glam-Wiki,\n  author       = {Vladimir Alexiev and Valentine Charles and Hugo Manguinhas},\n  title        = {{Wikidata, a Target for Europeana's Semantic Strategy}},\n  howpublished = {presentation},\n  month        = apr,\n  year         = 2015,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/GLAMwiki2015.ppt},\n  url_Other    = {https://nl.wikimedia.org/wiki/GLAM-WIKI_2015/Programme/Discussions/Strategy#Presentation:_Wikidata.2C_a_target_for_Europeana.E2.80.99s_semantic_strategy.3F},\n  keywords     = {cultural heritage, GLAM, Europeana, semantic enrichment, Wikidata, Wikipedia},\n  booktitle    = {{Glam-Wiki 2015}},\n  address      = {The Hague},\n  abstract     = {For Europeana, the platform for Europe’s digital cultural heritage from libraries, museums and archives, getting richer (semantic and multilingual) metadata is a priority. It improves access to the 40 million cultural heritage objects, notably enabling the multilingual retrieval of documents and creates relations between objects. To enhance data and enable retrieval across languages, Europeana performs automatic enrichment by selecting source metadata field(s) in the Europeana data and creating links to a selected target vocabulary or dataset representing contextual resources such as places, concepts, agents and time periods. Wikidata is since a while on Europeana’s radar as a potential new target for enrichment but how can it be integrated with cultural heritage data?},\n}\n\n
\n
\n\n\n
\n For Europeana, the platform for Europe’s digital cultural heritage from libraries, museums and archives, getting richer (semantic and multilingual) metadata is a priority. It improves access to the 40 million cultural heritage objects, notably enabling the multilingual retrieval of documents and creates relations between objects. To enhance data and enable retrieval across languages, Europeana performs automatic enrichment by selecting source metadata field(s) in the Europeana data and creating links to a selected target vocabulary or dataset representing contextual resources such as places, concepts, agents and time periods. Wikidata is since a while on Europeana’s radar as a potential new target for enrichment but how can it be integrated with cultural heritage data?\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the composition of ISO 25964 hierarchical relations (BTG, BTP, BTI).\n \n \n \n \n\n\n \n Alexiev, V.; Lindenthal, J.; and Isaac, A.\n\n\n \n\n\n\n International Journal on Digital Libraries,1-10. August 2015.\n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n \n \"On pdf\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{Alexiev2015-IJDL,\n  author       = {Vladimir Alexiev and Jutta Lindenthal and Antoine Isaac},\n  title        = {{On the composition of ISO 25964 hierarchical relations (BTG, BTP, BTI)}},\n  journal      = {{International Journal on Digital Libraries}},\n  year         = 2015,\n  pages        = {1-10},\n  month        = aug,\n  url          = {https://link.springer.com/article/10.1007/s00799-015-0162-2},\n  url_PDF      = {https://link.springer.com/content/pdf/10.1007/s00799-015-0162-2.pdf},\n  keywords     = {Thesauri, ISO 25964, BTG, BTP, BTI, Broader generic, Broader partitive, Broader instantial, AAT},\n  issn         = {1432-1300},\n  publisher    = {Springer},\n  language     = {English},\n  doi          = {10.1007/s00799-015-0162-2},\n  abstract     = {Knowledge organization systems (KOS) can use different types of hierarchical relations: broader generic (BTG), broader partitive (BTP), and broader instantial (BTI). The latest ISO standard on thesauri (ISO 25964) has formalized these relations in a corresponding OWL ontology and expressed them as properties: broaderGeneric, broaderPartitive, and broaderInstantial, respectively. These relations are used in actual thesaurus data. The compositionality of these types of hierarchical relations has not been investigated systematically yet. They all contribute to the general broader (BT) thesaurus relation and its transitive generalization broader transitive defined in the SKOS model for representing KOS. But specialized relationship types cannot be arbitrarily combined to produce new statements that have the same semantic precision, leading to cases where inference of broader transitive relationships may be misleading. We define Extended properties (BTGE, BTPE, BTIE) and analyze which compositions of the original “one-step” properties and the Extended properties are appropriate. This enables providing the new properties with valuable semantics usable, e.g., for fine-grained information retrieval purposes. In addition, we relax some of the constraints assigned to the ISO properties, namely the fact that hierarchical relationships apply to SKOS concepts only. This allows us to apply them to the Getty Art and Architecture Thesaurus (AAT), where they are also used for non-concepts (facets, hierarchy names, guide terms). In this paper, we present extensive examples derived from the recent publication of AAT as linked open data.},\n}\n\n
\n
\n\n\n
\n Knowledge organization systems (KOS) can use different types of hierarchical relations: broader generic (BTG), broader partitive (BTP), and broader instantial (BTI). The latest ISO standard on thesauri (ISO 25964) has formalized these relations in a corresponding OWL ontology and expressed them as properties: broaderGeneric, broaderPartitive, and broaderInstantial, respectively. These relations are used in actual thesaurus data. The compositionality of these types of hierarchical relations has not been investigated systematically yet. They all contribute to the general broader (BT) thesaurus relation and its transitive generalization broader transitive defined in the SKOS model for representing KOS. But specialized relationship types cannot be arbitrarily combined to produce new statements that have the same semantic precision, leading to cases where inference of broader transitive relationships may be misleading. We define Extended properties (BTGE, BTPE, BTIE) and analyze which compositions of the original “one-step” properties and the Extended properties are appropriate. This enables providing the new properties with valuable semantics usable, e.g., for fine-grained information retrieval purposes. In addition, we relax some of the constraints assigned to the ISO properties, namely the fact that hierarchical relationships apply to SKOS concepts only. This allows us to apply them to the Getty Art and Architecture Thesaurus (AAT), where they are also used for non-concepts (facets, hierarchy names, guide terms). In this paper, we present extensive examples derived from the recent publication of AAT as linked open data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Smithsonian American Art Museum LOD Review.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Github wiki page, February 2015.\n \n\n\n\n
\n\n\n\n \n \n \"SmithsonianPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2015-SAAM-Review,\n  author       = {Vladimir Alexiev},\n  title        = {{Smithsonian American Art Museum LOD Review}},\n  howpublished = {Github wiki page},\n  month        = feb,\n  year         = 2015,\n  url          = {https://github.com/usc-isi-i2/saam-lod/wiki/SAAM-LOD-Review},\n  keywords     = {cultural heritage, RDF, LODLAM, CIDOC CRM, SAAM, mapping, review, data quality},\n  abstract     = {Review of the initial LOD publication of the Smithsonian American Art Museum and recommendations for improvement},\n}\n\n
\n
\n\n\n
\n Review of the initial LOD publication of the Smithsonian American Art Museum and recommendations for improvement\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sex or Gender?.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n blog post, February 2015.\n \n\n\n\n
\n\n\n\n \n \n \"SexPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Misc{Alexiev2015-SexOrGender,\n  author       = {Vladimir Alexiev},\n  title        = {{Sex or Gender?}},\n  howpublished = {blog post},\n  month        = feb,\n  year         = 2015,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/sex-or-gender/index.html},\n  abstract     = {Considerations about sex/gender enumeration values in LOD. While working on ULAN LOD, I wondered how should we map the ULAN field "sex". So I did a small review of available LOD properties and values.},\n}\n\n
\n
\n\n\n
\n Considerations about sex/gender enumeration values in LOD. While working on ULAN LOD, I wondered how should we map the ULAN field \"sex\". So I did a small review of available LOD properties and values.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n bg.dbpedia.org launched.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, February 2015.\n \n\n\n\n
\n\n\n\n \n \n \"bg.dbpedia.orgPaper\n  \n \n \n \"bg.dbpedia.org slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2015-bg.dbpedia,\n  author       = {Vladimir Alexiev},\n  title        = {{bg.dbpedia.org launched}},\n  howpublished = {presentation},\n  month        = feb,\n  year         = 2015,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20150209-dbpedia/bg-dbpedia-launched-long.html},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20150209-dbpedia/bg-dbpedia-launched.html},\n  keywords     = {DBpedia},\n  booktitle    = {{DBpedia Meeting}},\n  address      = {Dublin, Ireland},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adding a DBpedia Mapping.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, February 2015.\n \n\n\n\n
\n\n\n\n \n \n \"AddingPaper\n  \n \n \n \"Adding slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2015-dbpedia-mapping,\n  author       = {Vladimir Alexiev},\n  title        = {{Adding a DBpedia Mapping}},\n  howpublished = {presentation},\n  month        = feb,\n  year         = 2015,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20150209-dbpedia/add-mapping-long.html},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20150209-dbpedia/add-mapping.html},\n  keywords     = {DBpedia, ontology mapping},\n  booktitle    = {{DBpedia Meeting}},\n  address      = {Dublin, Ireland},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n DBpedia Ontology and Mapping Problems.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, February 2015.\n \n\n\n\n
\n\n\n\n \n \n \"DBpediaPaper\n  \n \n \n \"DBpedia slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2015-dbpedia-problems,\n  author       = {Vladimir Alexiev},\n  title        = {{DBpedia Ontology and Mapping Problems}},\n  howpublished = {presentation},\n  month        = feb,\n  year         = 2015,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20150209-dbpedia/dbpedia-problems-long.html},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20150209-dbpedia/dbpedia-problems.html},\n  keywords     = {DBpedia, ontology, ontology mapping, data quality},\n  booktitle    = {{DBpedia Meeting}},\n  address      = {Dublin, Ireland},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n O is for Open: OAI and SPARQL interfaces for Europeana.\n \n \n \n \n\n\n \n Alexiev, V.; and Angelova, D.\n\n\n \n\n\n\n July 2015.\n \n\n\n\n
\n\n\n\n \n \n \"OPaper\n  \n \n \n \"O slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{AlexievAngelova2015-CultJam15,\n  author       = {Vladimir Alexiev and Dilyana Angelova},\n  title        = {{O is for Open: OAI and SPARQL interfaces for Europeana}},\n  month        = jul,\n  year         = 2015,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/O_is_for_Open_(CultJam_201507)_poster.pdf},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/O_is_for_Open_(CultJam_201507)_slide.pdf},\n  keywords     = {Europeana, OAI, OAI PMH, SPARQL, EDM, semantic repository},\n  type         = {poster},\n  booktitle    = {{Europeana Creative Culture Jam}},\n  address      = {Vienna, Austria},\n  abstract     = {Poster. As part of the Europeana Creative project, Ontotext added 2 additional channels to Europeana Labs: OAI & SPARQL, complementing the API. OAI is used for bulk download (e.g. to update the semantic repository). SPARQL can answer queries that the API cannot, e.g. linking objects, exploring contextual entities (e.g. parent places or author life dates), analytics/charts},\n}\n\n
\n
\n\n\n
\n Poster. As part of the Europeana Creative project, Ontotext added 2 additional channels to Europeana Labs: OAI & SPARQL, complementing the API. OAI is used for bulk download (e.g. to update the semantic repository). SPARQL can answer queries that the API cannot, e.g. linking objects, exploring contextual entities (e.g. parent places or author life dates), analytics/charts\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Europeana Food and Drink Semantic Demonstrator M21 Progress Report.\n \n \n \n \n\n\n \n Alexiev, V.; and Tolosi, L.\n\n\n \n\n\n\n Technical Report D3.20b, Europeana Food and Drink project, October 2015.\n \n\n\n\n
\n\n\n\n \n \n \"EuropeanaPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{AlexievTolosi2015-EFD-semapp-progress2,\n  author       = {Vladimir Alexiev and Laura Tolosi},\n  title        = {{Europeana Food and Drink Semantic Demonstrator M21 Progress Report}},\n  institution  = {Europeana Food and Drink project},\n  year         = 2015,\n  type         = {Progress Report},\n  number       = {D3.20b},\n  month        = oct,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Europeana-Food-and-Drink-Semantic-Demonstrator-M21-Report-(D3.20b).pdf},\n  keywords     = {Europeana, cultural heritage, food and drink, semantic application, semantic search, faceted search, semantic enrichment},\n  abstract     = {Describes the progress on developing the EFD Semantic Demonstrator for the 3 months from 1 Jul 2015 to 1 Oct 2015. We describe all work performed, the achieved results and project management considerations.},\n}\n\n
\n
\n\n\n
\n Describes the progress on developing the EFD Semantic Demonstrator for the 3 months from 1 Jul 2015 to 1 Oct 2015. We describe all work performed, the achieved results and project management considerations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Europeana Report on Enrichment and Evaluation.\n \n \n \n \n\n\n \n Isaac, A.; Manguinhas, H.; Stiller, J.; Charles, V.; and others\n\n\n \n\n\n\n Technical Report Europeana Task Force on Enrichment and Evaluation, October 2015.\n \n\n\n\n
\n\n\n\n \n \n \"EuropeanaPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Europeana2015-evaluation-enrichments,\n  author       = {Antoine Isaac and Hugo Manguinhas and Juliane Stiller and Valentine Charles and others},\n  title        = {{Europeana Report on Enrichment and Evaluation}},\n  institution  = {Europeana Task Force on Enrichment and Evaluation},\n  year         = 2015,\n  month        = oct,\n  url          = {https://pro.europeana.eu/taskforce/evaluation-and-enrichments},\n  keywords     = {cultural heritage, Europeana, task force, semantic enrichment, text analysis, multilingual, evaluation},\n  abstract     = {This report on Evaluation and Enrichment provides an overview of the different processes in semantic enrichment and offers guidance on how to assess each of these steps to implement a coherent enrichment strategy. The report begins by introducing the terminology used in the report. While defining the notion of semantic enrichment, the Task Force has identified several other associated notions that are commonly used in the cultural heritage domain when addressing semantic enrichment. We also provide an overview of the enrichment tools and services developed in the Europeana Network over the past years, reflecting the diversity of processes at hand: tools for manual enrichment and annotation, tools for automatic enrichment and workflow design tools. We also focus on the interoperability issues such as rules for specifying the linking or the format used to describe the enrichment outputs. As well as looking at the details of the enrichment processes we pick up the work done by the previous Task Force by specifying criteria for selecting and assessing target datasets. These criteria are based on vocabularies and datasets examples relevant to the Cultural Heritage domain. This selection strategy is available in a companion document to this report. The last component of the enrichment strategy is the evaluation of the enrichment processes. So far, evaluation in this domain has not been much documented even though a lot of work has been done in the field. We have tried to summarise different evaluation methodologies developed in related projects. These methods highlight the different components of the enrichment process that can be subject to evaluation. In order to validate all the recommendations provided in the previous sections, we have performed a quantitative and qualitative evaluation of seven enrichment services on a same subset of the Europeana dataset. The report of the evaluation is available in a companion document to this report while the main conclusions remain in this report. This report is a result of an inventory of tools, practices and standards that define the current state of the art for semantic enrichment. The analysis and evaluation work done during the course of the Task Force have allowed us to compile a series of lessons learnt that should be considered for the design and enhancement of enrichment services and their evaluation},\n}\n\n
\n
\n\n\n
\n This report on Evaluation and Enrichment provides an overview of the different processes in semantic enrichment and offers guidance on how to assess each of these steps to implement a coherent enrichment strategy. The report begins by introducing the terminology used in the report. While defining the notion of semantic enrichment, the Task Force has identified several other associated notions that are commonly used in the cultural heritage domain when addressing semantic enrichment. We also provide an overview of the enrichment tools and services developed in the Europeana Network over the past years, reflecting the diversity of processes at hand: tools for manual enrichment and annotation, tools for automatic enrichment and workflow design tools. We also focus on the interoperability issues such as rules for specifying the linking or the format used to describe the enrichment outputs. As well as looking at the details of the enrichment processes we pick up the work done by the previous Task Force by specifying criteria for selecting and assessing target datasets. These criteria are based on vocabularies and datasets examples relevant to the Cultural Heritage domain. This selection strategy is available in a companion document to this report. The last component of the enrichment strategy is the evaluation of the enrichment processes. So far, evaluation in this domain has not been much documented even though a lot of work has been done in the field. We have tried to summarise different evaluation methodologies developed in related projects. These methods highlight the different components of the enrichment process that can be subject to evaluation. In order to validate all the recommendations provided in the previous sections, we have performed a quantitative and qualitative evaluation of seven enrichment services on a same subset of the Europeana dataset. The report of the evaluation is available in a companion document to this report while the main conclusions remain in this report. This report is a result of an inventory of tools, practices and standards that define the current state of the art for semantic enrichment. The analysis and evaluation work done during the course of the Task Force have allowed us to compile a series of lessons learnt that should be considered for the design and enhancement of enrichment services and their evaluation\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dataset Descriptions: HCLS Community Profile.\n \n \n \n \n\n\n \n Gray, A. J. G.; Baran, J.; Marshall, M. S.; Dumontier, M.; Alexiev, V.; and others\n\n\n \n\n\n\n Technical Report Semantic Web in Health Care and Life Sciences Interest Group (HCLSIG), May 2015.\n \n\n\n\n
\n\n\n\n \n \n \"DatasetPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{HCLS-profile,\n  title        = {{Dataset Descriptions: HCLS Community Profile}},\n  author       = {Alasdair J. G. Gray and Joachim Baran and M. Scott Marshall and Michel Dumontier and Vladimir Alexiev and others},\n  month        = may,\n  year         = 2015,\n  url          = {https://www.w3.org/TR/hcls-dataset/},\n  keywords     = {Data profiling, Dataset descriptions, Metadata, Provenance, FAIR data, HCLS, dataset, VOID, ontology, Bioinformatics, Taxonomy},\n  institution  = {Semantic Web in Health Care and Life Sciences Interest Group (HCLSIG)},\n  abstract     = {Access to consistent, high-quality metadata is critical to finding, understanding, and reusing scientific data. This document describes a consensus among participating stakeholders in the Health Care and the Life Sciences domain on the description of datasets using the Resource Description Framework (RDF). This specification meets key functional requirements, reuses existing vocabularies to the extent that it is possible, and addresses elements of data description, versioning, provenance, discovery, exchange, query, and retrieval.},\n}\n\n
\n
\n\n\n
\n Access to consistent, high-quality metadata is critical to finding, understanding, and reusing scientific data. This document describes a consensus among participating stakeholders in the Health Care and the Life Sciences domain on the description of datasets using the Resource Description Framework (RDF). This specification meets key functional requirements, reuses existing vocabularies to the extent that it is possible, and addresses elements of data description, versioning, provenance, discovery, exchange, query, and retrieval.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MULTISENSOR: Development of Multimedia Content Integration Technologies for Journalism, Media Monitoring and International Exporting Decision Support.\n \n \n \n \n\n\n \n Vrochidis, S.; Kompatsiaris, I.; Casamayor, G.; Arapakis, I.; Busch, R.; Alexiev, V.; Jamin, E.; Jugov, M.; Heise, N.; Forrellat, T.; Liparas, D.; Wanner, L.; Miliaraki, I.; Aleksic, V.; Simov, K.; Soro, A. M.; Eckhoff, M.; Wagner, T.; and Puigbo, M.\n\n\n \n\n\n\n In 2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW), pages 1-4, Turin, Italy, June 2015. \n \n\n\n\n
\n\n\n\n \n \n \"MULTISENSOR:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{ICMEW2015-Multisensor,\n  author       = {Stefanos Vrochidis and Ioannis Kompatsiaris and Gerard Casamayor and Ioannis Arapakis and Reinhard Busch and Vladimir Alexiev and Emmanuel Jamin and Michael Jugov and Nicolaus Heise and Teresa Forrellat and Dimitris Liparas and Leo Wanner and Iris Miliaraki and Vera Aleksic and Kiril Simov and Alan Mas Soro and Mirja Eckhoff and Tilman Wagner and Marti Puigbo},\n  title        = {{MULTISENSOR: Development of Multimedia Content Integration Technologies for Journalism, Media Monitoring and International Exporting Decision Support}},\n  booktitle    = {{2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)}},\n  year         = 2015,\n  pages        = {1-4},\n  month        = jun,\n  address      = {Turin, Italy},\n  url          = {https://www.computer.org/csdl/proceedings/icmew/2015/7079/00/07169818.pdf},\n  keywords     = {Multisensor, semantic enrichment, NLP, multimedia annotation, Journalism, Media Monitoring, International Export, Decision Support},\n  doi          = {10.1109/ICMEW.2015.7169818},\n  abstract     = {This paper presents an overview and the first results of the FP7 MULTISENSOR project, which deals with multidimensional content integration of multimedia content for intelligent sentiment enriched and context oriented interpretation. MULTISENSOR aims at providing unified access to multilingual, multimedia and multicultural economic, news story material across borders in order to support journalism and media monitoring tasks and provide decision support for internationalisation of companies.},\n}\n\n
\n
\n\n\n
\n This paper presents an overview and the first results of the FP7 MULTISENSOR project, which deals with multidimensional content integration of multimedia content for intelligent sentiment enriched and context oriented interpretation. MULTISENSOR aims at providing unified access to multilingual, multimedia and multicultural economic, news story material across borders in order to support journalism and media monitoring tasks and provide decision support for internationalisation of companies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Innovative Use of KOS that are Published as Linked Open Data (LOD).\n \n \n \n \n\n\n \n Zeng, M.; Clunis, J.; and Alexiev, V.\n\n\n \n\n\n\n presentation, December 2015.\n \n\n\n\n
\n\n\n\n \n \n \"InnovativePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Zeng2015-NKOS,\n  author       = {Marcia Zeng and Julaine Clunis and Vladimir Alexiev},\n  title        = {{Innovative Use of KOS that are Published as Linked Open Data (LOD)}},\n  howpublished = {presentation},\n  month        = dec,\n  year         = 2015,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/MarciaZeng-LODKOSInnovativeUse.pdf},\n  keywords     = {vocabularies, thesauri, SKOS, NKOS, ontology},\n  booktitle    = {{First NKOS Workshop at International Conference on Asian Digital Libraries (ICADL 2015), Yonsei University, Seoul, Korea}},\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (12)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Extending OWL2 Property Constructs with OWLIM Rules.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report Ontotext Corp, September 2014.\n \n\n\n\n
\n\n\n\n \n \n \"ExtendingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev2014-ExtendingOWL2,\n  author       = {Vladimir Alexiev},\n  title        = {{Extending OWL2 Property Constructs with OWLIM Rules}},\n  institution  = {Ontotext Corp},\n  year         = 2014,\n  month        = sep,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/extending-owl2/index.html},\n  keywords     = {ontology, OWL2, Property Chain Axiom, sub-property, property inferencing, transitive properies, Ontotext GraphDB},\n  abstract     = {While OWL2 has very powerful class constructs, its property constructs are quite weak. We propose several extensions that we found useful, and implement them using OWLIM rules},\n}\n\n
\n
\n\n\n
\n While OWL2 has very powerful class constructs, its property constructs are quite weak. We propose several extensions that we found useful, and implement them using OWLIM rules\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Getty Vocabulary Program LOD: Ontologies and Semantic Representation.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In CIDOC Congress, Dresden, Germany, September 2014. \n \n\n\n\n
\n\n\n\n \n \n \"GettyPaper\n  \n \n \n \"Getty slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev2014-GVP-LOD,\n  author       = {Vladimir Alexiev},\n  title        = {{Getty Vocabulary Program LOD: Ontologies and Semantic Representation}},\n  booktitle    = {{CIDOC Congress}},\n  year         = 2014,\n  month        = sep,\n  address      = {Dresden, Germany},\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20140905-CIDOC-GVP/GVP-LOD-CIDOC.pdf},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20140905-CIDOC-GVP/index.html},\n  keywords     = {Getty, GVP, AAT, TGN, ULAN, LOD, thesauri, vocabularies, SKOS, SKOS-XL, ISO 25964},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ontotext GraphDB Rules Optimisations.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n December 2014.\n \n\n\n\n
\n\n\n\n \n \n \"OntotextPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Manual{Alexiev2014-GraphDBRuleProfiling,\n  title        = {{Ontotext GraphDB Rules Optimisations}},\n  author       = {Vladimir Alexiev},\n  month        = dec,\n  year         = 2014,\n  url          = {https://graphdb.ontotext.com/documentation/standard/rules-optimisations.html},\n  keywords     = {Ontotext GraphDB, inference, performance, optimization, profiling},\n  abstract     = {GraphDB 6 includes a useful new feature that allows you to debug rule performance. We also include Optimization Hints for ruleset performance.},\n}\n\n
\n
\n\n\n
\n GraphDB 6 includes a useful new feature that allows you to debug rule performance. We also include Optimization Hints for ruleset performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Linguistic Linked Data.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, October 2014.\n \n\n\n\n
\n\n\n\n \n \n \"LinguisticPaper\n  \n \n \n \"Linguistic slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2014-LinguisticLD,\n  author       = {Vladimir Alexiev},\n  title        = {{Linguistic Linked Data}},\n  howpublished = {presentation},\n  month        = oct,\n  year         = 2014,\n  url          = {https://rawgit2.com/VladimirAlexiev/multisensor/master/20141008-Linguistic-LD/index-full.html},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/multisensor/master/20141008-Linguistic-LD/index.html},\n  keywords     = {Linguistic Linked Data, NLP, NLP2RDF, NIF, OLIA, NERD, MARL, BabelNet, FrameNet, WordNet},\n  booktitle    = {{Multisensor Project Meeting}},\n  address      = {Bonn, Germany},\n  abstract     = {There's been a huge drive in recent years to represent NLP data as RDF. NLP data is usually large, so does it make sense to represent it as RDF? What's the benefit? Ontologies, schemas and groups include: GRaF ITS2 FISE LAF LD4LT LEMON LIME LMF MARL NERD NIF NLP2RDF OLIA OntoLex OntoLing OntoTag Penn Stanford... my oh my! There are a lot of linguistic resources available that can be used profitably: BabelNet FrameNet GOLD ISOcat LemonUBY Multitext OmegaNet UBY VerbNet Wiktionary WordNet.},\n}\n\n
\n
\n\n\n
\n There's been a huge drive in recent years to represent NLP data as RDF. NLP data is usually large, so does it make sense to represent it as RDF? What's the benefit? Ontologies, schemas and groups include: GRaF ITS2 FISE LAF LD4LT LEMON LIME LMF MARL NERD NIF NLP2RDF OLIA OntoLex OntoLing OntoTag Penn Stanford... my oh my! There are a lot of linguistic resources available that can be used profitably: BabelNet FrameNet GOLD ISOcat LemonUBY Multitext OmegaNet UBY VerbNet Wiktionary WordNet.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semantic Technologies for Cultural Heritage.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, August 2014.\n \n\n\n\n
\n\n\n\n \n \n \"SemanticPaper\n  \n \n \n \"Semantic pdf\n  \n \n \n \"Semantic video\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2014-Malmo,\n  author       = {Vladimir Alexiev},\n  title        = {{Semantic Technologies for Cultural Heritage}},\n  howpublished = {presentation},\n  month        = aug,\n  year         = 2014,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20140821-Malmo/index.html},\n  url_PDF      = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20140821-Malmo/SemTechCH-Malmo.pdf},\n  url_Video    = {https://youtu.be/n8oGmOu9JEw},\n  keywords     = {semantic technology, ontology, semantic integration, cultural heritage},\n  booktitle    = {{Malmo Linked Data Meetup}},\n  address      = {Malmo, Sweden},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On Compositionality of ISO 25964 Hierarchical Relations (BTG, BTP, BTI).\n \n \n \n \n\n\n \n Alexiev, V.; Lindenthal, J.; and Isaac, A.\n\n\n \n\n\n\n In 13th European Networked Knowledge Organization Systems (NKOS 2014), London, UK, September 2014. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n \n \"On pdf\n  \n \n \n \"On slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Alexiev2014-NKOS,\n  author       = {Vladimir Alexiev and Jutta Lindenthal and Antoine Isaac},\n  title        = {{On Compositionality of ISO 25964 Hierarchical Relations (BTG, BTP, BTI)}},\n  booktitle    = {{13th European Networked Knowledge Organization Systems (NKOS 2014)}},\n  year         = 2014,\n  month        = sep,\n  address      = {London, UK},\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20140912-NKOS-compositionality/index-full.html},\n  url_PDF      = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20140912-NKOS-compositionality/BTG-BTP-BTI-compositionality.pdf},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20140912-NKOS-compositionality/index.html},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semantic Technologies for Cultural Heritage.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, June 2014.\n \n\n\n\n
\n\n\n\n \n \n \"SemanticPaper\n  \n \n \n \"Semantic pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2014-SmartCulture,\n  author       = {Vladimir Alexiev},\n  title        = {{Semantic Technologies for Cultural Heritage}},\n  howpublished = {presentation},\n  month        = jun,\n  year         = 2014,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20140611-SmartCulture-sem-tech-CH/index.html},\n  url_PDF      = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20140611-SmartCulture-sem-tech-CH/Semantic Technologies for Cultural Heritage.pdf},\n  keywords     = {semantic technology, ontology, semantic integration, cultural heritage},\n  booktitle    = {{SmartCulture Conference}},\n  address      = {Brussels, Belgium},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Doing Business with Semantic Technologies. INFX 598 - Introducing Linked Data: concepts, methods and tools. Information School, University of Washington. Module 9.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, May 2014.\n \n\n\n\n
\n\n\n\n \n \n \"DoingPaper\n  \n \n \n \"Doing video\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2014-University-Washington,\n  author       = {Vladimir Alexiev},\n  title        = {{Doing Business with Semantic Technologies. INFX 598 - Introducing Linked Data: concepts, methods and tools. Information School, University of Washington. Module 9}},\n  howpublished = {Guest lecture},\n  month        = may,\n  year         = 2014,\n  url          = {https://github.com/VladimirAlexiev/my/raw/master/pres/Doing%20Business%20with%20Semantic%20Technologies%20(201405%20guest%20lecture).ppt},\n  url_Video    = {https://voicethread.com/myvoice/#thread/5784646/29625471/31274564},\n  keywords     = {semantic technology},\n  howpublished = {presentation},\n  abstract     = {Introduction to Ontotext and some of its products, clients and projects},\n}\n\n
\n
\n\n\n
\n Introduction to Ontotext and some of its products, clients and projects\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n EuropeanaTech Task Force on a Multilingual and Semantic Enrichment Strategy.\n \n \n \n \n\n\n \n Stiller, J.; Isaac, A.; Petras, V.; and others\n\n\n \n\n\n\n Technical Report Europeana, April 2014.\n \n\n\n\n
\n\n\n\n \n \n \"EuropeanaTechPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Europeana2014-enrichment-strategy,\n  author       = {Juliane Stiller and Antoine Isaac and Vivien Petras and others},\n  title        = {{EuropeanaTech Task Force on a Multilingual and Semantic Enrichment Strategy}},\n  institution  = {Europeana},\n  year         = 2014,\n  month        = apr,\n  url          = {https://pro.europeana.eu/project/multilingual-and-semantic-enrichment-strategy},\n  keywords     = {cultural heritage, Europeana, task force, semantic enrichment, text analysis, multilingual, evaluation},\n  abstract     = {The semantic and multilingual enrichment of metadata in Europeana is a core concern as it improves access to the material, defines relations among objects and enables cross-lingual retrieval of documents. The quality of these enrichments is crucial to ensure that highly curated content from providers gets represented correctly across different languages. To ensure that those enrichments unfold their whole potential and act as facilitators of access, a semantic and multilingual enrichment strategy is needed. The EuropeanaTech Task Force on a Multilingual and Semantic Enrichment Strategy set out to analyze datasets in Europeana and to evaluate them with regard to their enrichment potential and the enrichments that were executed. The goal was to drive a strategy for enriching metadata fields adding value for users. To achieve this, the members of the task force held a one-day workshop in Berlin where they analyzed randomly selected datasets from Europeana, their metadata fields and their enrichment potential. This report aggregates the results and derives findings and recommendations regarding the metadata quality (source), vocabulary used (target) and the enrichment process. It was found that especially during mapping and ingestion time, metadata quality issues arise that influence the success of the enrichments. Tackling these issues with better documentation, training and the establishment of quality scores are some of the recommendations in this field. Furthermore, Europeana should encourage the delivery of specialized vocabularies with resolvable URIs which would also lead to less need for enrichments by Europeana itself. With regard to the enrichment process, clear rules for each field need to be established.},\n}\n\n
\n
\n\n\n
\n The semantic and multilingual enrichment of metadata in Europeana is a core concern as it improves access to the material, defines relations among objects and enables cross-lingual retrieval of documents. The quality of these enrichments is crucial to ensure that highly curated content from providers gets represented correctly across different languages. To ensure that those enrichments unfold their whole potential and act as facilitators of access, a semantic and multilingual enrichment strategy is needed. The EuropeanaTech Task Force on a Multilingual and Semantic Enrichment Strategy set out to analyze datasets in Europeana and to evaluate them with regard to their enrichment potential and the enrichments that were executed. The goal was to drive a strategy for enriching metadata fields adding value for users. To achieve this, the members of the task force held a one-day workshop in Berlin where they analyzed randomly selected datasets from Europeana, their metadata fields and their enrichment potential. This report aggregates the results and derives findings and recommendations regarding the metadata quality (source), vocabulary used (target) and the enrichment process. It was found that especially during mapping and ingestion time, metadata quality issues arise that influence the success of the enrichments. Tackling these issues with better documentation, training and the establishment of quality scores are some of the recommendations in this field. Furthermore, Europeana should encourage the delivery of specialized vocabularies with resolvable URIs which would also lead to less need for enrichments by Europeana itself. With regard to the enrichment process, clear rules for each field need to be established.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semantic Technology in Publishing & Finance.\n \n \n \n \n\n\n \n Kiryakov, A.; and Alexiev, V.\n\n\n \n\n\n\n In Keystone Industrial Panel, ISWC 2014, Riva del Garda, Italy, October 2014. \n \n\n\n\n
\n\n\n\n \n \n \"SemanticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{KiryakovAlexiev2014-Keystone,\n  author       = {Atanas Kiryakov and Vladimir Alexiev},\n  title        = {{Semantic Technology in Publishing & Finance}},\n  booktitle    = {{Keystone Industrial Panel, ISWC 2014}},\n  year         = 2014,\n  month        = Oct,\n  address      = {Riva del Garda, Italy},\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/Semantic Technology in Publishing & Finance (ISWC 2013, Keystone industrial panel).pptx},\n  abstract     = {Triplestores and inference, applications in Finance, text-mining. Projects and solutions for financial media and publishers. Thanks to Atanas Kiryakov for this presentation, I just cut it to size.},\n}\n\n
\n
\n\n\n
\n Triplestores and inference, applications in Finance, text-mining. Projects and solutions for financial media and publishers. Thanks to Atanas Kiryakov for this presentation, I just cut it to size.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Benchmark Design for Reasoning.\n \n \n \n \n\n\n \n Papakonstantinou, V.; Fundulaki, I.; Flouris, G.; and Alexiev, V.\n\n\n \n\n\n\n Technical Report D4.4.2, Linked Data Benchmarking Council project, September 2014.\n \n\n\n\n
\n\n\n\n \n \n \"BenchmarkPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{LDBC-BenchmarkReasoning-2014,\n  author       = {Vassilis Papakonstantinou and Irini Fundulaki and Giorgos Flouris and Vladimir Alexiev},\n  title        = {{Benchmark Design for Reasoning}},\n  institution  = {Linked Data Benchmarking Council project},\n  year         = 2014,\n  type         = {Deliverable},\n  number       = {D4.4.2},\n  month        = sep,\n  url          = {https://ldbcouncil.org/post/owl-empowered-sparql-query-optimization/LDBC_D4.4.2_final.pdf},\n  keywords     = {LDBC, benchmark, reasoning, inference},\n  abstract     = {Reasoning (mainly OWL reasoning) has received increasing attention by ontology designers for more accurately representing the domain at hand. To reflect this importance, one of LDBC’s objectives is to identify a set of interesting use cases that consider OWL reasoning constructs (beyond the usual RDFS constructs) that can be used to challenge existing RDF engines or repositories. This Deliverable has two parts: in the first part, we present four different sets of queries that can be used to determine whether RDF query engines take into account OWL constructs during query plan construction or query execution; in the second part we consider how a repository or query engine incorporates and considers business rules, i.e., domain-specific rules that follow common templates, useful in practical applications.},\n}\n\n
\n
\n\n\n
\n Reasoning (mainly OWL reasoning) has received increasing attention by ontology designers for more accurately representing the domain at hand. To reflect this importance, one of LDBC’s objectives is to identify a set of interesting use cases that consider OWL reasoning constructs (beyond the usual RDFS constructs) that can be used to challenge existing RDF engines or repositories. This Deliverable has two parts: in the first part, we present four different sets of queries that can be used to determine whether RDF query engines take into account OWL constructs during query plan construction or query execution; in the second part we consider how a repository or query engine incorporates and considers business rules, i.e., domain-specific rules that follow common templates, useful in practical applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n LDBC Semantic Publishing Benchmark (SPB) v2.0.\n \n \n \n \n\n\n \n Kotsev, V.; Kiryakov, A.; Fundulaki, I.; and Alexiev, V.\n\n\n \n\n\n\n Technical Report v2.0 First Public Draft Release, Linked Data Benchmarking Council project, 2014.\n \n\n\n\n
\n\n\n\n \n \n \"LDBCPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{LDBC-SemanticPublishing-2014,\n  author       = {Venelin Kotsev and Atanas Kiryakov and Irini Fundulaki and Vladimir Alexiev},\n  title        = {{{LDBC Semantic Publishing Benchmark}} ({{SPB}}) v2.0},\n  institution  = {Linked Data Benchmarking Council project},\n  year         = 2014,\n  number       = {v2.0 First Public Draft Release},\n  url          = {https://ldbcouncil.org/publication/ldbc-spc-specification/},\n  keywords     = {LDBC, benchmark, semantic publishing, graph databases},\n  abstract     = {The Semantic Publishing Benchmark (SPB) is a LDBC benchmark for RDF database engines inspired by the Media/Publishing industry, particularly by the BBC’s Dynamic Semantic Publishing approach. As of June 2014 the benchmark has reached the state of draft publication. This document describes the current state of the Semantic Publishing Benchmark software. The application scenario behind the benchmark considers a media or a publishing organisation that deals with large volume of streaming content, namely articles and other “creative works” and “media assets”. This content is enriched with metadata that describes it and links it to reference knowledge – taxonomies and databases that include relevant concepts, entities and factual information. This metadata allows publishers to efficiently retrieve relevant content, according to their various business models. From a technology standpoint, the benchmark assumes that an RDF database is used to store both the reference knowledge and the metadata. The main interactions with the repository are (i) updates, that add new metadata or alter the repository, and (ii) aggregation queries, that retrieve content according to various criteria. The engine should handle instantly large number of updates in parallel with massive amount of aggregation queries. This document describes all features of the SPB : data (reference data-sets, ontologies, data generation), query workloads (descriptions of queries used, choke point descriptions), validation of query results and instructions (how to configure and use the benchmark driver, execution, auditing and disclosure rules)},\n}\n\n
\n
\n\n\n
\n The Semantic Publishing Benchmark (SPB) is a LDBC benchmark for RDF database engines inspired by the Media/Publishing industry, particularly by the BBC’s Dynamic Semantic Publishing approach. As of June 2014 the benchmark has reached the state of draft publication. This document describes the current state of the Semantic Publishing Benchmark software. The application scenario behind the benchmark considers a media or a publishing organisation that deals with large volume of streaming content, namely articles and other “creative works” and “media assets”. This content is enriched with metadata that describes it and links it to reference knowledge – taxonomies and databases that include relevant concepts, entities and factual information. This metadata allows publishers to efficiently retrieve relevant content, according to their various business models. From a technology standpoint, the benchmark assumes that an RDF database is used to store both the reference knowledge and the metadata. The main interactions with the repository are (i) updates, that add new metadata or alter the repository, and (ii) aggregation queries, that retrieve content according to various criteria. The engine should handle instantly large number of updates in parallel with massive amount of aggregation queries. This document describes all features of the SPB : data (reference data-sets, ontologies, data generation), query workloads (descriptions of queries used, choke point descriptions), validation of query results and instructions (how to configure and use the benchmark driver, execution, auditing and disclosure rules)\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (12)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Large-scale Reasoning with a Complex Cultural Heritage Ontology (CIDOC CRM).\n \n \n \n \n\n\n \n Alexiev, V.; Manov, D.; Parvanova, J.; and Petrov, S.\n\n\n \n\n\n\n In Workshop Practical Experiences with CIDOC CRM and its Extensions (CRMEX 2013) at TPDL 2013, volume 1117, Valetta, Malta, September 2013. CEUR WS\n \n\n\n\n
\n\n\n\n \n \n \"Large-scalePaper\n  \n \n \n \"Large-scale slides\n  \n \n \n \"Large-scale preprint\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev2013-CRM-reasoning,\n  author       = {Vladimir Alexiev and Dimitar Manov and Jana Parvanova and Svetoslav Petrov},\n  title        = {{Large-scale Reasoning with a Complex Cultural Heritage Ontology (CIDOC CRM)}},\n  booktitle    = {{Workshop Practical Experiences with CIDOC CRM and its Extensions (CRMEX 2013) at TPDL 2013}},\n  year         = 2013,\n  volume       = 1117,\n  month        = sep,\n  address      = {Valetta, Malta},\n  publisher    = {CEUR WS},\n  url          = {https://ceur-ws.org/Vol-1117/paper8.pdf},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev2013-CRM-reasoning-slides.ppt},\n  url_Preprint = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev2013-CRM-reasoning.pdf},\n  keywords     = {cultural heritage, semantic technology, ontology, CIDOC CRM, semantic search, Fundamental Concepts, Fundamental Relations, GraphDB, semantic repository, inference, performance, ResearchSpace},\n  abstract     = {The CIDOC Conceptual Reference Model (CRM) is an important ontology in the Cultural Heritage (CH) domain. CRM is intended mostly as a data integration mechanism, allowing reasoning and discoverability across diverse CH sources represented in CRM. CRM data comprises complex graphs of nodes and properties. An important question is how to search through such complex graphs, since the number of possible combinations is staggering. One answer is the "Fundamental Relations" (FR) approach that maps whole networks of CRM properties to fewer FRs, serving as a "search index" over the CRM semantic web. We present performance results for an FR Search implementation based on OWLIM. This search works over a significant CH dataset: almost 1B statements resulting from 2M objects of the British Museum. This is an exciting demonstration of large-scale reasoning with real-world data over a complex ontology (CIDOC CRM). We present volumetrics, hardware specs, compare the numbers to other repositories hosted by Ontotext, performance results, and compare performance of a SPARQL implementation.},\n}\n\n
\n
\n\n\n
\n The CIDOC Conceptual Reference Model (CRM) is an important ontology in the Cultural Heritage (CH) domain. CRM is intended mostly as a data integration mechanism, allowing reasoning and discoverability across diverse CH sources represented in CRM. CRM data comprises complex graphs of nodes and properties. An important question is how to search through such complex graphs, since the number of possible combinations is staggering. One answer is the \"Fundamental Relations\" (FR) approach that maps whole networks of CRM properties to fewer FRs, serving as a \"search index\" over the CRM semantic web. We present performance results for an FR Search implementation based on OWLIM. This search works over a significant CH dataset: almost 1B statements resulting from 2M objects of the British Museum. This is an exciting demonstration of large-scale reasoning with real-world data over a complex ontology (CIDOC CRM). We present volumetrics, hardware specs, compare the numbers to other repositories hosted by Ontotext, performance results, and compare performance of a SPARQL implementation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ResearchSpace as an Example of a VRE Based on CIDOC CRM.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, April 2013.\n \n\n\n\n
\n\n\n\n \n \n \"ResearchSpacePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2013-ResearchSpace,\n  author       = {Vladimir Alexiev},\n  title        = {{ResearchSpace as an Example of a VRE Based on CIDOC CRM}},\n  howpublished = {presentation},\n  month        = apr,\n  year         = 2013,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20130413-ResearchSpace as an Example of a VRE Based on CIDOC CRM.pptx},\n  keywords     = {virtual research environment, ontology, CIDOC CRM, ResearchSpace, VCMS},\n  booktitle    = {{Virtual Center for Medieval Studies (Medioevo Europeo VCMS) Workshop}},\n  address      = {Bucharest, Romania},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SPARQL 1.1 Syntax Diagrams.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Ontotext Corp, April 2013.\n \n\n\n\n
\n\n\n\n \n \n \"SPARQLPaper\n  \n \n \n \"SPARQL github\n  \n \n \n \"SPARQL ebnf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Manual{Alexiev2013-SPARQL-diagrams,\n  title        = {{SPARQL 1.1 Syntax Diagrams}},\n  author       = {Vladimir Alexiev},\n  organization = {Ontotext Corp},\n  month        = apr,\n  year         = 2013,\n  url          = {https://rawgit2.com/VladimirAlexiev/grammar-diagrams/master/sparql11-grammar.xhtml},\n  url_Github   = {https://github.com/VladimirAlexiev/grammar-diagrams},\n  url_ebnf     = {https://rawgit2.com/VladimirAlexiev/grammar-diagrams/master/sparql11-grammar.ebnf},\n  abstract     = {Cross-linked SPARQL 1.1 syntax (railroad) diagrams, one per production (173 total). A bit hard to understand: use this for reference, but not for learning SPARQL. Also available: EBNF syntax rules extracted from the SPARQL 1.1 specification, Text file including production numbers and syntax rules},\n}\n\n
\n
\n\n\n
\n Cross-linked SPARQL 1.1 syntax (railroad) diagrams, one per production (173 total). A bit hard to understand: use this for reference, but not for learning SPARQL. Also available: EBNF syntax rules extracted from the SPARQL 1.1 specification, Text file including production numbers and syntax rules\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n XSPARQL Syntax Diagrams.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Ontotext Corp, April 2013.\n \n\n\n\n
\n\n\n\n \n \n \"XSPARQLPaper\n  \n \n \n \"XSPARQL github\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Manual{Alexiev2013-XSPARQL-diagrams,\n  title        = {{XSPARQL Syntax Diagrams}},\n  author       = {Vladimir Alexiev},\n  organization = {Ontotext Corp},\n  month        = apr,\n  year         = 2013,\n  url          = {https://github.com/VladimirAlexiev/grammar-diagrams#xsparql},\n  url_Github   = {https://github.com/VladimirAlexiev/xsparql/blob/master/doc/},\n  abstract     = {Cross-linked XSPARQL syntax (railroad) diagrams, one per production. XSPARQL is a melding of XQuery and SPARQL. Its syntax is pretty large, so the diagrams help.},\n}\n\n
\n
\n\n\n
\n Cross-linked XSPARQL syntax (railroad) diagrams, one per production. XSPARQL is a melding of XQuery and SPARQL. Its syntax is pretty large, so the diagrams help.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n VCMS Project & Proposal Design.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, October 2013.\n \n\n\n\n
\n\n\n\n \n \n \"VCMSPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2013-VCMS-design,\n  author       = {Vladimir Alexiev},\n  title        = {{VCMS Project & Proposal Design}},\n  howpublished = {presentation},\n  month        = oct,\n  year         = 2013,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20131013-VCMS Project Design.pptx},\n  keywords     = {Medieval studies, virtual research environment, CIDOC CRM, semantic integration},\n  booktitle    = {{Virtual Center for Medieval Studies (Medioevo Europeo VCMS) Workshop}},\n  address      = {Budapest, Hungary},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Europeana Task Force on EDM-FRBRoo Application Profile.\n \n \n \n \n\n\n \n Doerr, M.; Gradmann, S.; and others\n\n\n \n\n\n\n Technical Report Europeana, May 2013.\n \n\n\n\n
\n\n\n\n \n \n \"EuropeanaPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Europeana2013-EDM-FRBRoo,\n  author       = {Martin Doerr and Stefan Gradmann and others},\n  title        = {{Europeana Task Force on EDM-FRBRoo Application Profile}},\n  institution  = {Europeana},\n  year         = 2013,\n  month        = may,\n  url          = {https://pro.europeana.eu/project/edm-frbroo-application-profile},\n  abstract     = {The EDM – FRBRoo Application Profile Task Force (EFAP-TF) was launched in response to the recommendations from the deliverable D3.4 from Europeana V1.0. This deliverable asked for an application profile that would allow a better representation of the FRBR group 1 entities: work, expression, manifestation and item. Additionally, it was to be conceived as an application profile of FRBRoo where each intellectual contribution (e.g., in the publication process) and the related activity are treated as entities in their own right, and does not depend too much on the notion of a bibliographic record. As a starting point they suggested the mapping of FRBRoo and EDM offered by the CIDOC CRM working group. The aim of the EFAP-TF is to extend, correct or restrict this suggested mapping and provide examples for the use of the combined EDM and FRBRoo namespaces.\nThis report delivers combined models in terms of properties and classes of EDM and FRBRoo illustrated by sample data. Smaller groups have worked on three different examples. The report also provides principles for modeling and mapping rules based on the experiments of the working groups.},\nkeywords={Europeana, task force, EDM, FRBR, FRBRoo, CIDOC CRM, ontology}\n}\n\n
\n
\n\n\n
\n The EDM – FRBRoo Application Profile Task Force (EFAP-TF) was launched in response to the recommendations from the deliverable D3.4 from Europeana V1.0. This deliverable asked for an application profile that would allow a better representation of the FRBR group 1 entities: work, expression, manifestation and item. Additionally, it was to be conceived as an application profile of FRBRoo where each intellectual contribution (e.g., in the publication process) and the related activity are treated as entities in their own right, and does not depend too much on the notion of a bibliographic record. As a starting point they suggested the mapping of FRBRoo and EDM offered by the CIDOC CRM working group. The aim of the EFAP-TF is to extend, correct or restrict this suggested mapping and provide examples for the use of the combined EDM and FRBRoo namespaces. This report delivers combined models in terms of properties and classes of EDM and FRBRoo illustrated by sample data. Smaller groups have worked on three different examples. The report also provides principles for modeling and mapping rules based on the experiments of the working groups.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ISO 25964 Part 1: Thesauri for information retrieval: RDF/OWL vocabulary, extension of SKOS and SKOS-XL.\n \n \n \n \n\n\n \n Smedt, J. D.; Isaac, A.; Clarke, S. D.; Lindenthal, J.; Zeng, M. L.; Tudhope, D. S.; Will, L.; and Alexiev, V.\n\n\n \n\n\n\n December 2013.\n Ontology\n\n\n\n
\n\n\n\n \n \n \"ISOPaper\n  \n \n \n \"ISO broken\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Manual{ISO-25964-owl,\n  title        = {{ISO 25964 Part 1: Thesauri for information retrieval: RDF/OWL vocabulary, extension of SKOS and SKOS-XL}},\n  author       = {Johan De Smedt and Antoine Isaac and Stella Dextre Clarke and Jutta Lindenthal and Marcia Lei Zeng and Douglas S. Tudhope and Leonard Will and Vladimir Alexiev},\n  month        = dec,\n  year         = 2013,\n  note         = {Ontology},\n  url          = {https://lov.linkeddata.es/dataset/lov/vocabs/iso-thes},\n  keywords     = {thesauri, vocabularies, ISO 25964, ontology},\n  type         = {Ontology},\n  url_broken   = {https://purl.org/iso25964/skos-thes},\n  abstract     = {OWL ontology representing the newest ISO standard on thesauri},\n}\n\n
\n
\n\n\n
\n OWL ontology representing the newest ISO standard on thesauri\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Europeana Creative. EDM Endpoint. Custom Views.\n \n \n \n \n\n\n \n Ikonomov, N.; Simeonov, B.; Parvanova, J.; and Alexiev, V.\n\n\n \n\n\n\n In Digital Presentation and Preservation of Cultural and Scientific Heritage (DiPP 2013), Veliko Tarnovo, Bulgaria, September 2013. \n \n\n\n\n
\n\n\n\n \n \n \"EuropeanaPaper\n  \n \n \n \"Europeana slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Ikonomov2013-EuropeanaCreative-EDM,\n  author       = {Nikola Ikonomov and Boyan Simeonov and Jana Parvanova and Vladimir Alexiev},\n  title        = {{Europeana Creative. EDM Endpoint. Custom Views}},\n  booktitle    = {{Digital Presentation and Preservation of Cultural and Scientific Heritage (DiPP 2013)}},\n  year         = 2013,\n  month        = sep,\n  address      = {Veliko Tarnovo, Bulgaria},\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Ikonomov2013-EuropeanaCreative-EDM.pdf},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Ikonomov2013-EuropeanaCreative-EDM-slides.pdf},\n  keywords     = {cultural heritage, Europeana, EDM, ESE, semantic technology, RDF, SKOS, URI, Ontotext GraphDB, semantic repository, SPARQL, endpoint},\n  abstract     = {The paper discusses the Europeana Creative project which aims to facilitate re-use of cultural heritage metadata and content by the creative industries. The paper focuses on the contribution of Ontotext to the project activities. The Europeana Data Model (EDM) is further discussed as a new proposal for structuring the data that Europeana will ingest, manage and publish. The advan-tages of using EDM instead of the current ESE metadata set are highlighted. Finally, Ontotext's EDM Endpoint is presented, based on OWLIM semantic re-pository and SPARQL query language. A user-friendly RDF view is presented in order to illustrate the possibilities of Forest - an extensible modular user interface framework for creating linked data and semantic web applications.},\n}\n\n
\n
\n\n\n
\n The paper discusses the Europeana Creative project which aims to facilitate re-use of cultural heritage metadata and content by the creative industries. The paper focuses on the contribution of Ontotext to the project activities. The Europeana Data Model (EDM) is further discussed as a new proposal for structuring the data that Europeana will ingest, manage and publish. The advan-tages of using EDM instead of the current ESE metadata set are highlighted. Finally, Ontotext's EDM Endpoint is presented, based on OWLIM semantic re-pository and SPARQL query language. A user-friendly RDF view is presented in order to illustrate the possibilities of Forest - an extensible modular user interface framework for creating linked data and semantic web applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Conceptual Reference Model Revealed. Quality contextual data for research and engagement: A British Museum case study.\n \n \n \n \n\n\n \n Oldman, D.; Mahmud, J.; and Alexiev, V.\n\n\n \n\n\n\n Technical Report ResearchSpace Project, July 2013.\n Draft 0.98\n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{OldmanMahmudAlexiev2013-CRM-revealed,\n  author       = {Dominic Oldman and Joshan Mahmud and Vladimir Alexiev},\n  title        = {{The Conceptual Reference Model Revealed. Quality contextual data for research and engagement: A British Museum case study}},\n  institution  = {ResearchSpace Project},\n  year         = 2013,\n  note         = {Draft 0.98},\n  month        = jul,\n  url          = {https://github.com/VladimirAlexiev/my/tree/master/pubs/BritishMuseum-CRM-mapping},\n  keywords     = {cultural heritage, museum informatics, ontology, CIDOC CRM, semantic mapping, British Museum, ResearchSpace},\n  pages        = {359 pages},\n  abstract     = {Contents: 169p: Main body, including discussion, illustrations and mapping diagrams. 7p: Association Codes (see details at BM Association Mapping v2). 49p: Example Object Graph. 134p: mapping implementation as RDFer configuration files},\n}\n\n
\n
\n\n\n
\n Contents: 169p: Main body, including discussion, illustrations and mapping diagrams. 7p: Association Codes (see details at BM Association Mapping v2). 49p: Example Object Graph. 134p: mapping implementation as RDFer configuration files\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n British Museum CIDOC CRM Fundamental Relations Implementation.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report ResearchSpace Project, 2013.\n \n\n\n\n
\n\n\n\n \n \n \"BritishPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev2013-FR-implementation,\n  author       = {Vladimir Alexiev},\n  title        = {British Museum CIDOC CRM Fundamental Relations Implementation},\n  institution  = {ResearchSpace Project},\n  year         = 2013,\n  url          = {https://github.com/VladimirAlexiev/my/blob/master/pubs/BritishMuseum-CRM-Fundamental-Relations/README.md},\n  keywords     = {cultural heritage, semantic technology, ontology, CIDOC CRM, semantic search, Fundamental Concepts, Fundamental Relations, GraphDB, semantic repository, inference, performance, ResearchSpace},\n  abstract     = {Detailed description of CIDOC CRM Fundamental Relations Implementation for the British Museum collection using GraphDB rules},\n}\n\n
\n
\n\n\n
\n Detailed description of CIDOC CRM Fundamental Relations Implementation for the British Museum collection using GraphDB rules\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n RDF Data and Image Annotations in ResearchSpace.\n \n \n \n \n\n\n \n Parvanova, J.; Alexiev, V.; and Kostadinov, S.\n\n\n \n\n\n\n In International Workshop on Collaborative Annotations in Shared Environment: metadata, vocabularies and techniques in the Digital Humanities (DH-CASE 2013). Collocated with DocEng 2013, Florence, Italy, September 2013. \n \n\n\n\n
\n\n\n\n \n \n \"RDFPaper\n  \n \n \n \"RDF preprint\n  \n \n \n \"RDF slides\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Parvanova2013-SemanticAnnotation,\n  author       = {Jana Parvanova and Vladimir Alexiev and Stanislav Kostadinov},\n  title        = {{RDF Data and Image Annotations in ResearchSpace}},\n  booktitle    = {{International Workshop on Collaborative Annotations in Shared Environment: metadata, vocabularies and techniques in the Digital Humanities (DH-CASE 2013). Collocated with DocEng 2013}},\n  year         = 2013,\n  month        = sep,\n  address      = {Florence, Italy},\n  url          = {https://dl.acm.org/doi/10.1145/2517978.2517997},\n  url_Preprint = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Parvanova2013-SemanticAnnotation.pdf},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Parvanova2013-SemanticAnnotation-slides.pdf},\n  keywords     = {Computer-supported collaborative work, Annotation, Museum informatics, Cultural heritage, ResearchSpace, SVG, Web Annotation, ontology, British Museum, CIDOC CRM},\n  abstract     = {This paper presents the approaches to data and image annotation in ResearchSpace (https://www.researchspace.org), an Andrew W. Mellon Foundation funded project led by the British Museum aimed at supporting collaborative internet research, information sharing and web applications for the cultural heritage scholarly community},\n  isbn         = {978-1-4503-2199-0},\n  doi          = {10.1145/2517978.2517997},\n}\n\n\n
\n
\n\n\n
\n This paper presents the approaches to data and image annotation in ResearchSpace (https://www.researchspace.org), an Andrew W. Mellon Foundation funded project led by the British Museum aimed at supporting collaborative internet research, information sharing and web applications for the cultural heritage scholarly community\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Practical Experiences with CIDOC CRM and its Extensions (CRMEX 2013), Workshop at 17th International Conference on Theory and Practice of Digital Libraries (TPDL 2013).\n \n \n \n \n\n\n \n Alexiev, V.; Ivanov, V.; and Grinberg, M.,\n editors.\n \n\n\n \n\n\n\n Volume 1117.CEUR WS. Valetta, Malta, September 2013.\n \n\n\n\n
\n\n\n\n \n \n \"PracticalPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Proceedings{TPDL2013-CRMEX2013,\n  title        = {{Practical Experiences with CIDOC CRM and its Extensions (CRMEX 2013), Workshop at 17th International Conference on Theory and Practice of Digital Libraries (TPDL 2013)}},\n  year         = 2013,\n  booktitle    = {{Practical Experiences with CIDOC CRM and its Extensions (CRMEX 2013), Workshop at 17th International Conference on Theory and Practice of Digital Libraries (TPDL 2013)}},\n  editor       = {Vladimir Alexiev and Vladimir Ivanov and Maurice Grinberg},\n  volume       = 1117,\n  address      = {Valetta, Malta},\n  month        = sep,\n  publisher    = {CEUR WS},\n  url          = {https://ceur-ws.org/Vol-1117/},\n  keywords     = {CIDOC CRM, RDF, Ontology, cultural heritage, practical applications},\n  abstract     = {The CIDOC CRM (international standard ISO 21127:2006) is a conceptual model and ontology with a fundamental role in many data integration efforts in the Digital Libraries and Cultural Heritage (CH) domain. The goal of this workshop is to describe and showcase systems using CRM at their core, exchange experience about the practical use of CRM, describe difficulties for the practical application of CRM, and share approaches for overcoming such difficulties. The ultimate objective of this workshop is to encourage the wider practical adoption of CRM},\n}\n\n
\n
\n\n\n
\n The CIDOC CRM (international standard ISO 21127:2006) is a conceptual model and ontology with a fundamental role in many data integration efforts in the Digital Libraries and Cultural Heritage (CH) domain. The goal of this workshop is to describe and showcase systems using CRM at their core, exchange experience about the practical use of CRM, describe difficulties for the practical application of CRM, and share approaches for overcoming such difficulties. The ultimate objective of this workshop is to encourage the wider practical adoption of CRM\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Types and Annotations for CIDOC CRM Properties.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In Digital Presentation and Preservation of Cultural and Scientific Heritage (DiPP2012) conference (Invited report), Veliko Tarnovo, Bulgaria, September 2012. \n \n\n\n\n
\n\n\n\n \n \n \"TypesPaper\n  \n \n \n \"Types slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev2012-CRM-properties,\n  author       = {Vladimir Alexiev},\n  title        = {{Types and Annotations for CIDOC CRM Properties}},\n  booktitle    = {{Digital Presentation and Preservation of Cultural and Scientific Heritage (DiPP2012) conference (Invited report)}},\n  year         = 2012,\n  month        = sep,\n  address      = {Veliko Tarnovo, Bulgaria},\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev2012-CRM-Properties.pdf},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev2012-CRM-Properties-presentation.ppt},\n  keywords     = {cultural heritage, semantic technology, ontology, CIDOC CRM, properties, attribute assignment, reification, property reification},\n  abstract     = {The CIDOC CRM provides an extensive ontology for describing entities and properties appearing in cultural heritage (CH) documentation, history and archeology. CRM provides some means for describing information about properties (property types, attribute assignment, and "long-cuts") and guidelines for extending the vocabulary. However, these means are far from complete, and in some cases there is little guidance how to "implement" them in RDF. In this article we outline the prob-lems, relate them to established RDF patterns and mechanisms, and describe several implementation alternatives.},\n}\n\n
\n
\n\n\n
\n The CIDOC CRM provides an extensive ontology for describing entities and properties appearing in cultural heritage (CH) documentation, history and archeology. CRM provides some means for describing information about properties (property types, attribute assignment, and \"long-cuts\") and guidelines for extending the vocabulary. However, these means are far from complete, and in some cases there is little guidance how to \"implement\" them in RDF. In this article we outline the prob-lems, relate them to established RDF patterns and mechanisms, and describe several implementation alternatives.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Implementing CIDOC CRM Search Based on Fundamental Relations and OWLIM Rules.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In Workshop on Semantic Digital Archives (SDA 2012), part of International Conference on Theory and Practice of Digital Libraries (TPDL 2012), volume 912, Paphos, Cyprus, September 2012. CEUR WS\n \n\n\n\n
\n\n\n\n \n \n \"ImplementingPaper\n  \n \n \n \"Implementing slides\n  \n \n \n \"Implementing published\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev2012-CRM-search,\n  author       = {Vladimir Alexiev},\n  title        = {{Implementing CIDOC CRM Search Based on Fundamental Relations and OWLIM Rules}},\n  booktitle    = {{Workshop on Semantic Digital Archives (SDA 2012), part of International Conference on Theory and Practice of Digital Libraries (TPDL 2012)}},\n  year         = 2012,\n  volume       = 912,\n  month        = sep,\n  address      = {Paphos, Cyprus},\n  publisher    = {CEUR WS},\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev2012-CRM-FR-search.pdf},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev2012-CRM-Search-presentation.pdf},\n  url_Published= {https://ceur-ws.org/Vol-912/paper8.pdf},\n  keywords     = {cultural heritage, semantic technology, ontology, CIDOC CRM, semantic search, Fundamental Concepts, Fundamental Relations, GraphDB, semantic repository, inference, performance, ResearchSpace},\n  abstract     = {The CIDOC CRM provides an ontology for describing entities, properties and relationships appearing in cultural heritage (CH) documentation, history and archeology. CRM promotes shared understanding by providing an extensible semantic framework that any CH information can be mapped to. CRM data is usually represented in semantic web format (RDF) and comprises complex graphs of nodes and properties. An important question is how a user can search through such complex graphs, since the number of possible combinations is staggering. One approach "compresses" the semantic network by mapping many CRM entity classes to a few "Fundamental Concepts" (FC), and mapping whole networks of CRM proper-ties to fewer "Fundamental Relations" (FR). These FC and FRs serve as a "search index" over the CRM semantic web and allow the user to use a simpler query vocabulary. We describe an implementation of CRM FR Search based on OWLIM Rules, done as part of the ResearchSpace (RS) project. We describe the technical de-tails, problems and difficulties encountered, benefits and disadvantages of using OWLIM rules, and preliminary performance results. We provide implementation experience that can be valuable for further implementation, definition and maintenance of CRM FRs.},\n}\n\n
\n
\n\n\n
\n The CIDOC CRM provides an ontology for describing entities, properties and relationships appearing in cultural heritage (CH) documentation, history and archeology. CRM promotes shared understanding by providing an extensible semantic framework that any CH information can be mapped to. CRM data is usually represented in semantic web format (RDF) and comprises complex graphs of nodes and properties. An important question is how a user can search through such complex graphs, since the number of possible combinations is staggering. One approach \"compresses\" the semantic network by mapping many CRM entity classes to a few \"Fundamental Concepts\" (FC), and mapping whole networks of CRM proper-ties to fewer \"Fundamental Relations\" (FR). These FC and FRs serve as a \"search index\" over the CRM semantic web and allow the user to use a simpler query vocabulary. We describe an implementation of CRM FR Search based on OWLIM Rules, done as part of the ResearchSpace (RS) project. We describe the technical de-tails, problems and difficulties encountered, benefits and disadvantages of using OWLIM rules, and preliminary performance results. We provide implementation experience that can be valuable for further implementation, definition and maintenance of CRM FRs.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2011\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n University IT PM Education: NBU and ESI/SEI Masters Programs.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, November 2011.\n \n\n\n\n
\n\n\n\n \n \n \"UniversityPaper\n  \n \n \n \"University other\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2010-PMI-BG-IT-PM,\n  author       = {Vladimir Alexiev},\n  title        = {{University IT PM Education: NBU and ESI/SEI Masters Programs}},\n  howpublished = {presentation},\n  month        = nov,\n  year         = 2011,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20101111-PMI-BG-IT-PM.ppt},\n  url_Other    = {https://docplayer.net/12848719-University-it-pm-education-nbu-and-esi-sei-masters-programs-vladimir-alexiev-phd-pmp-ontotext-lab-sirma-group-holding.html},\n  keywords     = {Masters Program, NBU, European Sofware Institute, ESI, IT project management, PM, university curriculum},\n  booktitle    = {{PMI Bulgaria Meeting}},\n  address      = {Sofia, Bulgaria},\n  abstract     = {Describes the Masters program in IT Project Management at New Bulgarian University, and the development of a Software Engineering Masters Program by ESI Bulgaria, sponsored by CMU SEI and America for Bulgaria Foundation},\n}\n\n
\n
\n\n\n
\n Describes the Masters program in IT Project Management at New Bulgarian University, and the development of a Software Engineering Masters Program by ESI Bulgaria, sponsored by CMU SEI and America for Bulgaria Foundation\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparing Ontotext KIM and Apache Stanbol.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, September 2011.\n \n\n\n\n
\n\n\n\n \n \n \"ComparingPaper\n  \n \n \n \"Comparing appendix\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2011-KIM-Stanbol,\n  author       = {Vladimir Alexiev},\n  title        = {{Comparing Ontotext KIM and Apache Stanbol}},\n  month        = sep,\n  year         = 2011,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/Comparing Ontotext KIM and Apache Stanbol (201109 pres).ppt},\n  url_Appendix = {https://rawgit2.com/VladimirAlexiev/my/master/pres/Comparing Ontotext KIM and Apache Stanbol (201109 appendix).doc},\n  keywords     = {semantic enrichment, text analysis, Ontotext KIM, Apache Stanbol},\n  howpublished = {presentation},\n  institution  = {Ontotext Corp},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semantic Technologies for Cultural Heritage.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, May 2011.\n \n\n\n\n
\n\n\n\n \n \n \"SemanticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2011-SemtechForCulturalHeritage,\n  author       = {Vladimir Alexiev},\n  title        = {{Semantic Technologies for Cultural Heritage}},\n  month        = may,\n  year         = 2011,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev2011-SemtechForCulturalHeritage.pdf},\n  keywords     = {semantic technology, ontology, semantic integration, cultural heritage},\n  howpublished = {presentation},\n  booktitle    = {{Global Smart SOC Initiative Summit}},\n  address      = {Seoul, Korea},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2010\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Cost-effective e-Government Services: Export Control System phase 2 (ECS2).\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, February 2010.\n \n\n\n\n
\n\n\n\n \n \n \"Cost-effectivePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2010-costEffectiveEGov,\n  author       = {Vladimir Alexiev},\n  title        = {{Cost-effective e-Government Services: Export Control System phase 2 (ECS2)}},\n  month        = feb,\n  year         = 2010,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev2010-costEffectiveEGov.pdf},\n  keywords     = {model-driven development, e-customs, e-government, Export Control System},\n  howpublished = {presentation},\n  booktitle    = {{Bulgaria-Korea IT Experts Workshop}},\n  address      = {Sofia, Bulgaria},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Approach to Teaching IT Project Management in a Masters Program.\n \n \n \n \n\n\n \n Alexiev, V.; and Asenova, P.\n\n\n \n\n\n\n In 6th Annual International Conference on Education in Computer Science, Fulda and Munich, Germany, June 2010. \n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{AlexievAsenova2010-TeachingIT_PM,\n  author       = {Vladimir Alexiev and Petya Asenova},\n  title        = {{An Approach to Teaching IT Project Management in a Masters Program}},\n  booktitle    = {{6th Annual International Conference on Education in Computer Science}},\n  year         = 2010,\n  month        = jun,\n  address      = {Fulda and Munich, Germany},\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/AlexievAsenova2010-TeachingIT_PM.pdf},\n  keywords     = {Masters Program, NBU, IT project management, PM, university curriculum},\n  abstract     = {Many Bulgarian IT professionals manage projects but their knowledge and skills in this area are based mainly on their own experience, which is often obtained through trial and error. Although the project manager (PM) has a crucial role for project success, the university curriculum in Bulgaria does not answer sufficiently these business needs. Some aspects of PM are included in university courses on Software Engineering and some short courses on IT PM are offered, but as overall this matter is not covered in depth in any national university. Having in mind this real need, we proposed a new Masters Program on IT PM hoping it will meet the interest of many students representatives of the software business. This paper presents an approach to prepare PMs for the Bulgarian IT industry through a Masters Program, developed in cooperation between the New Bulgarian University (NBU) and the Institute of Mathematics and Informatics (IMI) of the Bulgarian Academy of Science (BAS). We describe the background, objectives and design of the program, and relations with the business.},\n}\n\n
\n
\n\n\n
\n Many Bulgarian IT professionals manage projects but their knowledge and skills in this area are based mainly on their own experience, which is often obtained through trial and error. Although the project manager (PM) has a crucial role for project success, the university curriculum in Bulgaria does not answer sufficiently these business needs. Some aspects of PM are included in university courses on Software Engineering and some short courses on IT PM are offered, but as overall this matter is not covered in depth in any national university. Having in mind this real need, we proposed a new Masters Program on IT PM hoping it will meet the interest of many students representatives of the software business. This paper presents an approach to prepare PMs for the Bulgarian IT industry through a Masters Program, developed in cooperation between the New Bulgarian University (NBU) and the Institute of Mathematics and Informatics (IMI) of the Bulgarian Academy of Science (BAS). We describe the background, objectives and design of the program, and relations with the business.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Implementing complex e-Government solutions with open source and BPM: Architecture of Export Control System phase 2 (ECS2).\n \n \n \n \n\n\n \n Alexiev, V.; Mitev, A.; and Bukev, A.\n\n\n \n\n\n\n presentation, 2010.\n \n\n\n\n
\n\n\n\n \n \n \"ImplementingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{AlexievMitevBukev2010-eGovBPM,\n  author       = {Vladimir Alexiev and Adrian Mitev and Alexander Bukev},\n  title        = {{Implementing complex e-Government solutions with open source and BPM: Architecture of Export Control System phase 2 (ECS2)}},\n  year         = 2010,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/AlexievMitevBukev2010-eGovBPM.pdf},\n  keywords     = {model-driven development, business process management, BPMS, e-customs, e-government, Export Control System, system architecture},\n  howpublished = {presentation},\n  booktitle    = {{Java2Days Conference}},\n  address      = {Sofia, Bulgaria},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2009\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Electronic Export Declarations Ease the Work of the Customs Agency and Traders.\n \n \n \n \n\n\n \n Alexiev, V.; and Martev, T.\n\n\n \n\n\n\n Computerworld (in Bulgarian), 46. December 2009.\n \n\n\n\n
\n\n\n\n \n \n \"ElectronicPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{AlexievMartev2009-ElectronicExportBG,\n  author       = {Vladimir Alexiev and Teodor Martev},\n  title        = {{Electronic Export Declarations Ease the Work of the Customs Agency and Traders}},\n  journal      = {{Computerworld (in Bulgarian)}},\n  year         = 2009,\n  volume       = 46,\n  month        = dec,\n  annote       = {ECS2 was nominated for IT Project of the year 2010},\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/AlexievMartev2009-ElectronicExportBG.pdf},\n  keywords     = {model-driven development, e-customs, e-government, Export Control System},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2005\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Information Integration with Ontologies: Experiences from an Industrial Showcase.\n \n \n \n \n\n\n \n Alexiev, V.; Breu, M.; de Bruijn, J.; Fensel, D.; Lara, R.; and Lausen, H.\n\n\n \n\n\n\n John Wiley and Sons, February 2005.\n \n\n\n\n
\n\n\n\n \n \n \"InformationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Book{AlexievBreuBruijn2005-InformationIntegration,\n  author       = {Vladimir Alexiev and Michael Breu and Jos de Bruijn and Dieter Fensel and Ruben Lara and Holger Lausen},\n  title        = {{Information Integration with Ontologies: Experiences from an Industrial Showcase}},\n  publisher    = {John Wiley and Sons},\n  year         = 2005,\n  month        = feb,\n  url= {https://www.wiley.com/en-gb/Information+Integration+with+Ontologies%3A+Experiences+from+an+Industrial+Showcase-p-9780470010488},\n  keywords     = {semantic integration, ontology, semantic technology, ontology-based data access},\n  chapter      = 2,\n  isbn         = {978-0-470-01048-8},\n  abstract     = {Disparate information, spread over various sources, in various formats, and with inconsistent semantics is a major obstacle for enterprises to use this information at its full potential. Information Grids should allow for the effective access, extraction and linking of dispersed information. Currently Europe's coporations spend over 10 Billion EUR to deal with these problems. This book will demonstrate the applicability of grid technologies to industry. To this end, it gives a detailed insight on how on tology technology can be used to manage dispersed information assets more efficiently. The book is based on experiences from the COG (Corporate Ontology Grid) project, carried out jointly by three leading industrial players and the Digital Enterprise Research Institute Austria. Through comparisons of this project with alternative technologies and projects, it provides hands-on experience and best practice examples to act as a reference guide for their development. Information Integration with Ontologies: Ontology based Information Integration in an Industrial Setting is ideal for technical experts and computer researchers in the IT-area looking to achieve integration of heterogeneous information and apply ontology technologies and techniques in practice. It will also be of great benefit to technical decision makers seeking infor mation about ontology technologies and the scientific audience, interested in achievements towards the application of ontologies in an industrial setting.},\n}\n\n
\n
\n\n\n
\n Disparate information, spread over various sources, in various formats, and with inconsistent semantics is a major obstacle for enterprises to use this information at its full potential. Information Grids should allow for the effective access, extraction and linking of dispersed information. Currently Europe's coporations spend over 10 Billion EUR to deal with these problems. This book will demonstrate the applicability of grid technologies to industry. To this end, it gives a detailed insight on how on tology technology can be used to manage dispersed information assets more efficiently. The book is based on experiences from the COG (Corporate Ontology Grid) project, carried out jointly by three leading industrial players and the Digital Enterprise Research Institute Austria. Through comparisons of this project with alternative technologies and projects, it provides hands-on experience and best practice examples to act as a reference guide for their development. Information Integration with Ontologies: Ontology based Information Integration in an Industrial Setting is ideal for technical experts and computer researchers in the IT-area looking to achieve integration of heterogeneous information and apply ontology technologies and techniques in practice. It will also be of great benefit to technical decision makers seeking infor mation about ontology technologies and the scientific audience, interested in achievements towards the application of ontologies in an industrial setting.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2004\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Data Integration Survey.\n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report European project \"Corporate Ontology Grid\" (COG), September 2004.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@TechReport{Alexiev2004-DataIntegration,\n  author       = {Vladimir Alexiev},\n  title        = {{Data Integration Survey}},\n  institution  = {European project "Corporate Ontology Grid" (COG)},\n  type         = {Deliverable},\n  month        = sep,\n  year         = 2004,\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1999\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Non-deterministic Interaction Nets.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Ph.D. Thesis, University of Alberta, 1999.\n \n\n\n\n
\n\n\n\n \n \n \"Non-deterministicPaper\n  \n \n \n \"Non-deterministic slides\n  \n \n \n \"Non-deterministic other\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@PhdThesis{Alexiev1999-thesis,\n  author       = {Vladimir Alexiev},\n  title        = {{Non-deterministic Interaction Nets}},\n  school       = {University of Alberta},\n  year         = 1999,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev1999-thesis.pdf},\n  url_Slides   = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev1999-thesisPresentation.pdf},\n  url_Other    = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev1999-thesis-2up.pdf},\n  abstract     = {The Interaction Nets (IN) of Lafont are a graphical formalism used to model parallel computation. Their genesis can be traced back to the Proof Nets of Linear Logic. They enjoy several nice theoretical properties, amongst them pure locality of interaction, strong confluence, computational completeness, syntactically-definable deadlock-free fragments, combinatorial completeness (existence of a Universal IN). They also have nice "pragmatic" properties: they are simple and elegant, intuitive, can capture aspects of computation at widely varying levels of abstraction. Compared to term and graph rewriting systems, INs are much simpler (a subset of such systems that imposes several constraints on the rewriting process), but are still computationally complete (can capture the lambda-calculus). INs are a refinement of graph rewriting which keeps only the essential features in the system.\nConventional INs are strongly confluent, and are therefore unsuitable for the modeling of non-deterministic systems such as process calculi and concurrent object-oriented programming. We study four diffrent ways of "breaking" the confluence of INs by introducing various extensions:\n- IN with Multiple (reduction) Rules (INMR); Allow more than one reduction rule per redex.\n- IN with Multiple Principal Ports (INMPP): Allow more than one active port per node.\n- IN with MultiPorts (INMP): Allow more than one connection per port.\n- IN with Multiple Connections (INMC): Allow hyper-edges (in the graph-theoretical sense), i.e. connections between more than two ports.\n\nWe study in considerable detail the relative expressive power of these systems, both by representing various programming examples in them, and by constructing inter-representations that translate nets from one system to another.\nWe study formally a translation from the finite pi-calculus to a system that we call MultiInteraction Nets: MIN=INMP+NMPP. We prove the faithfulness of the translation to the pi-calculus processes that it represents, both structural and operational (completeness and soundness of reduction). We show that unlike the pi-calculus, our translation implements the Prefix operation of the pi-calculus in a distributed and purely local manner, and implements explicitly the distribution and duplication of values to the corresponding occurrences of a variable.\nWe compare our translation to other graphical and combinatory representations of the pi-calculus, such as the pi-nets of Milner, the Interaction Diagrams of Parrow, and the Concurrent Combinators of Honda and Yoshida.\nThe original paper on IN (Lafont, 1990) states that INs were designed to be simple and practical; to be a "programming language that can be used for the design of interactive software". However, to date INs have been used only for theoretical investigations. This thesis is mostly devoted to a hands-on exploration of applications of IN to various "programming problems".}\n}\n\n
\n
\n\n\n
\n The Interaction Nets (IN) of Lafont are a graphical formalism used to model parallel computation. Their genesis can be traced back to the Proof Nets of Linear Logic. They enjoy several nice theoretical properties, amongst them pure locality of interaction, strong confluence, computational completeness, syntactically-definable deadlock-free fragments, combinatorial completeness (existence of a Universal IN). They also have nice \"pragmatic\" properties: they are simple and elegant, intuitive, can capture aspects of computation at widely varying levels of abstraction. Compared to term and graph rewriting systems, INs are much simpler (a subset of such systems that imposes several constraints on the rewriting process), but are still computationally complete (can capture the lambda-calculus). INs are a refinement of graph rewriting which keeps only the essential features in the system. Conventional INs are strongly confluent, and are therefore unsuitable for the modeling of non-deterministic systems such as process calculi and concurrent object-oriented programming. We study four diffrent ways of \"breaking\" the confluence of INs by introducing various extensions: - IN with Multiple (reduction) Rules (INMR); Allow more than one reduction rule per redex. - IN with Multiple Principal Ports (INMPP): Allow more than one active port per node. - IN with MultiPorts (INMP): Allow more than one connection per port. - IN with Multiple Connections (INMC): Allow hyper-edges (in the graph-theoretical sense), i.e. connections between more than two ports. We study in considerable detail the relative expressive power of these systems, both by representing various programming examples in them, and by constructing inter-representations that translate nets from one system to another. We study formally a translation from the finite pi-calculus to a system that we call MultiInteraction Nets: MIN=INMP+NMPP. We prove the faithfulness of the translation to the pi-calculus processes that it represents, both structural and operational (completeness and soundness of reduction). We show that unlike the pi-calculus, our translation implements the Prefix operation of the pi-calculus in a distributed and purely local manner, and implements explicitly the distribution and duplication of values to the corresponding occurrences of a variable. We compare our translation to other graphical and combinatory representations of the pi-calculus, such as the pi-nets of Milner, the Interaction Diagrams of Parrow, and the Concurrent Combinators of Honda and Yoshida. The original paper on IN (Lafont, 1990) states that INs were designed to be simple and practical; to be a \"programming language that can be used for the design of interactive software\". However, to date INs have been used only for theoretical investigations. This thesis is mostly devoted to a hands-on exploration of applications of IN to various \"programming problems\".\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1998\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Distributed Synchronization in a pi-Calculus with Bidirectional Communication.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report University of Alberta, January 1998.\n \n\n\n\n
\n\n\n\n \n \n \"DistributedPaper\n  \n \n \n \"Distributed citeseer\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev1998-distributedSynchronization,\n  author       = {Vladimir Alexiev},\n  title        = {{Distributed Synchronization in a pi-Calculus with Bidirectional Communication}},\n  institution  = {University of Alberta},\n  year         = 1998,\n  month        = jan,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev1998-distributedSynchronization.pdf},\n  url_CiteSeer = {https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.301.8273},\n  keywords     = {pi-calculus, input prefix, distributed synchronization, communication},\n  abstract     = {The (input) prefix operation of the pi-calculus expresses global synchronization (blocking) of the prefixed process. We show how to implement synchronization in a completely distributed manner, by using bidirectional atomic communication and the principle of provision (data-dependency-based synchronization)},\n}\n\n
\n
\n\n\n
\n The (input) prefix operation of the pi-calculus expresses global synchronization (blocking) of the prefixed process. We show how to implement synchronization in a completely distributed manner, by using bidirectional atomic communication and the principle of provision (data-dependency-based synchronization)\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Representing the Finite pi-calculus in Multi-Interaction Nets: Concurrency = Interaction + Non-determinism.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report University of Alberta, April 1998.\n \n\n\n\n
\n\n\n\n \n \n \"RepresentingPaper\n  \n \n \n \"Representing citeseer\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev1998-finitePi,\n  author       = {Vladimir Alexiev},\n  title        = {{Representing the Finite pi-calculus in Multi-Interaction Nets: Concurrency = Interaction + Non-determinism}},\n  institution  = {University of Alberta},\n  year         = 1998,\n  month        = apr,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev1998-finitePi.pdf},\n  url_CiteSeer = {https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.301.8381},\n  keywords     = {pi-calculus, interaction nets, linear logic, concurrent computation,distributed computation},\n  abstract     = {We extend the Interaction Nets of Lafont with some non-determinism capabilities and then show how to implement the finite monadic pi-calculus in that system},\n}\n\n
\n
\n\n\n
\n We extend the Interaction Nets of Lafont with some non-determinism capabilities and then show how to implement the finite monadic pi-calculus in that system\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1996\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Targeted Communication in Linear Objects.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In Artificial Intelligence: Methodology, Systems, Applications (AIMSA'96), September 1996. IOI Press\n Also University of Alberta TR94-14\n\n\n\n
\n\n\n\n \n \n \"TargetedPaper\n  \n \n \n \"Targeted citeseer\n  \n \n \n \"Targeted tr\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev1996-targetedCommunication,\n  author       = {Vladimir Alexiev},\n  title        = {{Targeted Communication in Linear Objects}},\n  booktitle    = {{Artificial Intelligence: Methodology, Systems, Applications (AIMSA'96)}},\n  year         = 1996,\n  month        = sep,\n  publisher    = {IOI Press},\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev1996-targetedCommunication.pdf},\n  url_CiteSeer = {https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.47.7107},\n  url_TR       = {https://era.library.ualberta.ca/downloads/bc386k59r},\n  keywords     = {Linear Objects, communication, broadcasting, object-oriented programming, logic programming, linear logic},\n  note         = {Also University of Alberta TR94-14},\n  abstract     = {Linear Objects (LO) of Andreoli and Pareschi is the first proposal to integrate object-oriented programming into logic programming based on Girard's Linear Logic (LL). In LO each object is represented by a separate open node of a proof tree. This ``insulates'' objects from one another which allows the attributes of an object to be represented as a multiset of atoms and thus facilitates easy retrieval and update of attributes. However this separation hinders communication between objects. Communication in LO is achieved through broadcasting to all objects which in our opinion is infeasible from a computational viewpoint. This paper proposes a refined communication mechanism for LO which uses explicit communication channels specified by the programmer. We name it TCLO which stands for ``Targeted Communication in LO''. Although channel specification puts some burden on the programmer, we demonstrate that the language is expressive enough by redoing some of the examples given for LO. Broadcasting can be done in a controlled manner. LO can be seen as a special case of TCLO where only one global channel (the forum) is used.},\n}\n\n
\n
\n\n\n
\n Linear Objects (LO) of Andreoli and Pareschi is the first proposal to integrate object-oriented programming into logic programming based on Girard's Linear Logic (LL). In LO each object is represented by a separate open node of a proof tree. This ``insulates'' objects from one another which allows the attributes of an object to be represented as a multiset of atoms and thus facilitates easy retrieval and update of attributes. However this separation hinders communication between objects. Communication in LO is achieved through broadcasting to all objects which in our opinion is infeasible from a computational viewpoint. This paper proposes a refined communication mechanism for LO which uses explicit communication channels specified by the programmer. We name it TCLO which stands for ``Targeted Communication in LO''. Although channel specification puts some burden on the programmer, we demonstrate that the language is expressive enough by redoing some of the examples given for LO. Broadcasting can be done in a controlled manner. LO can be seen as a special case of TCLO where only one global channel (the forum) is used.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1995\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n The Event Calculus as a Linear Logic Program.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report TR95-24, University of Alberta, September 1995.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n \n \"The citeseer\n  \n \n \n \"The tr\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev1995-eventCalculus,\n  author       = {Vladimir Alexiev},\n  title        = {{The Event Calculus as a Linear Logic Program}},\n  institution  = {University of Alberta},\n  year         = 1995,\n  number       = {TR95-24},\n  month        = sep,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev1995-eventCalculus.pdf},\n  url_CiteSeer = {https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.30.9953},\n  url_TR       = {https://era.library.ualberta.ca/downloads/9g54xk065},\n  keywords     = {event calculus, linear logic, negation as failure, knowledge update},\n  abstract     = {The traditional presentation of Kowalski's Event Calculus as a logic program uses Negation- as-Failure (NAF) in an essential way to support persistence of fluents. In this paper we present an implementation of Event Calculus as a purely logical (without NAF) Linear Logic (LL) program. This work demonstrates some of the internal non-monotonic features of LL and its suitability for knowledge update (as opposed to knowledge revision). Although NAF is an ontologically sufficient solution to the frame problem, the LL solution is implementationally superior. Handling of incomplete temporal descriptions and support for ramifications (derived fluents) are also considered.},\n  keywords     = {event calculus, linear logic},\n}\n\n
\n
\n\n\n
\n The traditional presentation of Kowalski's Event Calculus as a logic program uses Negation- as-Failure (NAF) in an essential way to support persistence of fluents. In this paper we present an implementation of Event Calculus as a purely logical (without NAF) Linear Logic (LL) program. This work demonstrates some of the internal non-monotonic features of LL and its suitability for knowledge update (as opposed to knowledge revision). Although NAF is an ontologically sufficient solution to the frame problem, the LL solution is implementationally superior. Handling of incomplete temporal descriptions and support for ramifications (derived fluents) are also considered.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Object-Oriented Logic Programming based on Linear Logic.\n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Thesis proposal, February 1995.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev1995-thesisProposal,\n  author       = {Vladimir Alexiev},\n  title        = {{Object-Oriented Logic Programming based on Linear Logic}},\n  month        = feb,\n  year         = 1995,\n  keywords     = {object-oriented programming, logic programming, multiparadigm programming, linear logic},\n  institution  = {University of Alberta},\n  howpublished = {Thesis proposal},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1994\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Applications of Linear Logic to Computation: An Overview.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Logic Journal of the IGPL, 2(1): 77-107. March 1994.\n Also University of Alberta TR93-18, December 1993\n\n\n\n
\n\n\n\n \n \n \"ApplicationsPaper\n  \n \n \n \"Applications published\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@Article{Alexiev1994-applicationsLinearLogic,\n  author       = {Vladimir Alexiev},\n  title        = {{Applications of Linear Logic to Computation: An Overview}},\n  journal      = {{Logic Journal of the IGPL}},\n  year         = 1994,\n  volume       = 2,\n  number       = 1,\n  pages        = {77-107},\n  month        = mar,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev1994-applicationsLinearLogic.pdf},\n  url_Published= {https://jigpal.oxfordjournals.org/content/2/1/77},\n  keywords     = {linear logic, survey},\n  issn         = {1368-9894},\n  note         = {Also University of Alberta TR93-18, December 1993},\n  doi          = {10.1093/jigpal/2.1.77},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Boolean Constraint Propagation Networks.\n \n \n \n \n\n\n \n Marinov, G.; Alexiev, V.; and Djonev, Y.\n\n\n \n\n\n\n In Jorrand, P.; and Sgurev, V., editor(s), Artifical Intelligence: Methodology, Systems, and Applications (AIMSA'94), pages 109-118, Sofia, Bulgaria, September 1994. World Scientific Publishing\n \n\n\n\n
\n\n\n\n \n \n \"BooleanPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{MarinovAlexievDjonev1994-BCPN,\n  author       = {Georgi Marinov and Vladimir Alexiev and Yavor Djonev},\n  title        = {{Boolean Constraint Propagation Networks}},\n  booktitle    = {{Artifical Intelligence: Methodology, Systems, and Applications (AIMSA'94)}},\n  year         = 1994,\n  editor       = {P. Jorrand and V. Sgurev},\n  pages        = {109-118},\n  month        = sep,\n  address      = {Sofia, Bulgaria},\n  publisher    = {World Scientific Publishing},\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/MarinovAlexievDjonev1994-BCPN.pdf},\n  keywords     = {constraint propagation, inference, knowledge-based system, expert system},\n  doi          = {10.5555/212090.212113},\n  isbn         = {981-02-1853-2},\n  abstract     = {This paper describes a particular inference mechanism which has been successfully used for the implementation of an expert system and a generic shell supporting consulting-type expert systems. The main features of Boolean Constraint Propagation Networks (BCPN) are: the inference flows in all directions, unlike inference modes of forward or backward chaining systems; all possible consequences of a fact are derived as soon as the user enters the fact, therefore the system is very interactive; if the user withdraws an assertion then all propositions depending on it are retracted; the inference architecture is simple and uniform. After a general description of BCPN we give an account of the problems encountered and the approaches we used to solve them. Some possible extensions of the mechanism and its applicability to various areas are also discussed. The current version of BCPN is written in C++ and took about one man-year to develop.},\n}\n\n
\n
\n\n\n
\n This paper describes a particular inference mechanism which has been successfully used for the implementation of an expert system and a generic shell supporting consulting-type expert systems. The main features of Boolean Constraint Propagation Networks (BCPN) are: the inference flows in all directions, unlike inference modes of forward or backward chaining systems; all possible consequences of a fact are derived as soon as the user enters the fact, therefore the system is very interactive; if the user withdraws an assertion then all propositions depending on it are retracted; the inference architecture is simple and uniform. After a general description of BCPN we give an account of the problems encountered and the approaches we used to solve them. Some possible extensions of the mechanism and its applicability to various areas are also discussed. The current version of BCPN is written in C++ and took about one man-year to develop.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1993\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A (Not Very Much) Annotated Bibliography on Integrating Object-Oriented and Logic Programming.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report University of Alberta, March 1993.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n \n \"A citeseer\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev1993-annotatedBibliography,\n  author       = {Vladimir Alexiev},\n  title        = {{A (Not Very Much) Annotated Bibliography on Integrating Object-Oriented and Logic Programming}},\n  institution  = {University of Alberta},\n  year         = 1993,\n  month        = mar,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev1993-annotatedBibliography.pdf},\n  url_CiteSeer = {https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.54.6168},\n  keywords     = {object-oriented programming, logic programming, multiparadigm programming, bibliography},\n  abstract     = {An overview of existing applications of Linear Logic (LL) to issues of computation. After a substantial introduction to LL, it discusses the implications of LL to functional programming, logic programming, concurrent and object-oriented programming and some other applications of LL, like semantics of negation in LP, non-monotonic issues in AI planning, etc. Although the overview covers pretty much the state-of-the-art in this area, by necessity many of the works are only mentioned and referenced, but not discussed in any considerable detail. The paper does not presuppose any previous exposition to LL, and is addressed more to computer scientists (probably with a theoretical inclination) than to logicians. The paper contains over 140 references, of which some 80 are about applications of LL.},\n}\n\n
\n
\n\n\n
\n An overview of existing applications of Linear Logic (LL) to issues of computation. After a substantial introduction to LL, it discusses the implications of LL to functional programming, logic programming, concurrent and object-oriented programming and some other applications of LL, like semantics of negation in LP, non-monotonic issues in AI planning, etc. Although the overview covers pretty much the state-of-the-art in this area, by necessity many of the works are only mentioned and referenced, but not discussed in any considerable detail. The paper does not presuppose any previous exposition to LL, and is addressed more to computer scientists (probably with a theoretical inclination) than to logicians. The paper contains over 140 references, of which some 80 are about applications of LL.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Mutable Object State for Object-Oriented Logic Programming: A Survey.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report TR93-15, University of Alberta, August 1993.\n \n\n\n\n
\n\n\n\n \n \n \"MutablePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev1993-mutableObjectState,\n  author       = {Vladimir Alexiev},\n  title        = {{Mutable Object State for Object-Oriented Logic Programming: A Survey}},\n  institution  = {University of Alberta},\n  year         = 1993,\n  number       = {TR93-15},\n  month        = aug,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev1993-mutableObjectState.pdf},\n  keywords     = {object-oriented programming, logic programming, multiparadigm programming, mutable, object state, survey},\n  abstract     = {One of the most difficult problems on the way to an integration of Object-Oriented and Logic Programming is the modeling of changeable object state(i.e. object dynamics) in a particular logic in order not to forfeit the declarative nature of LP. Classical logic is largely unsuitable for such a task, because it adopts a general (both temporally and spatially), Platonic notion of validity, whereas object state changes over time and is local to an object. This paper presents the problem and surveys the state-of-the-art approaches to its solution, as well as some emerging, promising new approaches. The paper tries to relate the different approaches, to evaluate their merits and deficiencies and to identify promising directions for development. The emphasis in this survey is on efficient implementation of state change, one which would be suitable for the lowest fundamental level of a general OOLP language. The following approaches are covered: Assert/Retract, Declarative Database Updates and Transaction Logic, Modal and Dynamic Logics, Perpetual Objects, Logical Objects and Linear Objects, Linear Logic, Rewriting Logic and MaudeLog.},\n}\n\n
\n
\n\n\n
\n One of the most difficult problems on the way to an integration of Object-Oriented and Logic Programming is the modeling of changeable object state(i.e. object dynamics) in a particular logic in order not to forfeit the declarative nature of LP. Classical logic is largely unsuitable for such a task, because it adopts a general (both temporally and spatially), Platonic notion of validity, whereas object state changes over time and is local to an object. This paper presents the problem and surveys the state-of-the-art approaches to its solution, as well as some emerging, promising new approaches. The paper tries to relate the different approaches, to evaluate their merits and deficiencies and to identify promising directions for development. The emphasis in this survey is on efficient implementation of state change, one which would be suitable for the lowest fundamental level of a general OOLP language. The following approaches are covered: Assert/Retract, Declarative Database Updates and Transaction Logic, Modal and Dynamic Logics, Perpetual Objects, Logical Objects and Linear Objects, Linear Logic, Rewriting Logic and MaudeLog.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Object-Oriented and Logic-Based Knowledge Representation.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report University of Alberta, 1993.\n Term project\n\n\n\n
\n\n\n\n \n \n \"Object-OrientedPaper\n  \n \n \n \"Object-Oriented citeseer\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev1993-objectOriented,\n  author       = {Vladimir Alexiev},\n  title        = {{Object-Oriented and Logic-Based Knowledge Representation}},\n  institution  = {University of Alberta},\n  year         = 1993,\n  note         = {Term project},\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pubs/Alexiev1993-objectOriented.pdf},\n  url_CiteSeer = {https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.54.2657},\n  keywords     = {object-oriented programming, logic programming, multiparadigm programming, knowledge representation},\n  abstract     = {This paper is a survey of a number of languages/systems based on both Object-Oriented and Logic Programming and designed expressly for Knowledge Representation tasks. My goal in the paper is to argue that the integration of these two paradigms (particularly the synergism that emerges from such an integration_ forms a stable basis for Knowledge Representation at the symbolic level. I try to support this claim both by examples from the papers surveyed and by considerations in a more general context. Some more advanced topics concerning special-purpose non-classic logics are also discussed.},\n}\n\n
\n
\n\n\n
\n This paper is a survey of a number of languages/systems based on both Object-Oriented and Logic Programming and designed expressly for Knowledge Representation tasks. My goal in the paper is to argue that the integration of these two paradigms (particularly the synergism that emerges from such an integration_ forms a stable basis for Knowledge Representation at the symbolic level. I try to support this claim both by examples from the papers surveyed and by considerations in a more general context. Some more advanced topics concerning special-purpose non-classic logics are also discussed.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);