var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=http://rawgit2.com/VladimirAlexiev/my/master/Alexiev-bibliography.bib&jsonp=1&theme=side&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=http://rawgit2.com/VladimirAlexiev/my/master/Alexiev-bibliography.bib&jsonp=1&theme=side\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=http://rawgit2.com/VladimirAlexiev/my/master/Alexiev-bibliography.bib&jsonp=1&theme=side\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Graphwise Interests in Industrial Data.\n \n \n \n \n\n\n \n Alexiev, V.; and Krüger, L.\n\n\n \n\n\n\n Presentation at Industrial Data Ontology: Meet Graph Software Vendors, December 2024.\n \n\n\n\n
\n\n\n\n \n \n \"Graphwise pdf\n  \n \n \n \"Graphwise slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{IDO-vendor-meeting-2024,\n  author       = {Vladimir Alexiev and Lutz Krüger},\n  title        = {Graphwise Interests in Industrial Data},\n  howpublished = {Presentation at Industrial Data Ontology: Meet Graph Software Vendors},\n  month        = dec,\n  year         = 2024,\n  url_PDF      = {https://drive.google.com/file/d/11xnlpDNVg9lvHVvRrJ1fdwjUwGnjd_iV/view},\n  url_Slides   = {https://docs.google.com/presentation/d/1dRJBTCJmDYKhe4WHrA6p3DF1nqrunWvGjxCAUGmbRnA/edit},\n  keywords     = {Ontology-Based Interoperability, OBI, Industrial Data Ontology, IDO, ISO 23726-3, Digital Product Passports, DPP, Asset Administration Shell, AAS, semantic layer, LLM querying},\n  date         = {2024-12-04},\n  abstract     = {The Ontology-Based Interoperability (OBI) and Industrial Data Ontology (IDO) ISO 23726-3 community invited semantic software vendors to gauge their interest in IDO and outline next steps for its adotion and role in the harmopnization This presentation outlines the interest and experience of Graphwise (Ontotext and Semantic Web Company) in various industrial domains: trade, logistics, AECO, elctricity, oil and gas, energy, manufacturing. It then provides an overview of an important use case (Digital Product Passports) and a conceptual architecture involving semantic layer, knowledge graph, sensor data, LLM querying, etc},\n}\n\n
\n
\n\n\n
\n The Ontology-Based Interoperability (OBI) and Industrial Data Ontology (IDO) ISO 23726-3 community invited semantic software vendors to gauge their interest in IDO and outline next steps for its adotion and role in the harmopnization This presentation outlines the interest and experience of Graphwise (Ontotext and Semantic Web Company) in various industrial domains: trade, logistics, AECO, elctricity, oil and gas, energy, manufacturing. It then provides an overview of an important use case (Digital Product Passports) and a conceptual architecture involving semantic layer, knowledge graph, sensor data, LLM querying, etc\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Raising the Role of Vocabulary Hubs for Semantic Data Interoperability in Dataspaces.\n \n \n \n \n\n\n \n David, R.; Ivanov, P.; and Alexiev, V.\n\n\n \n\n\n\n In Third workshop on Semantic Interoperability in Data Spaces, Budapest, Hungary, October 2024. \n \n\n\n\n
\n\n\n\n \n \n \"Raising proceedings\n  \n \n \n \"Raising slides\n  \n \n \n \"Raising pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{semInteropDataspaces-2024,\n  author       = {Robert David and Petar Ivanov and Vladimir Alexiev},\n  title        = {Raising the Role of Vocabulary Hubs for Semantic Data Interoperability in Dataspaces},\n  booktitle    = {Third workshop on Semantic Interoperability in Data Spaces},\n  year         = 2024,\n  month        = oct,\n  address      = {Budapest, Hungary},\n  url_Proceedings = {https://semantic.internationaldataspaces.org/workshop-2024/},\n  url_Slides   = {https://rawgit2.com/underpin-project/papers/main/EBDVF-2024/EBDVF-2024-presentation/presentation.html},\n  url_PDF      = {https://raw.githubusercontent.com/underpin-project/papers/refs/heads/main/EBDVF-2024/EBDVF-2024-presentation/presentation.pdf},\n  keywords     = {dataspaces, semantic interoperability, semantic technologies, ontologies, vocabulary hub, oil and gas, renewable energy, refineries, windfarms},\n  date         = {2024-10-02},\n  abstract     = {Dataspaces are an important enabler for industrial sharing data (either commercially licensed or private). Europe is investing heavily into sectoral dataspaces, federation and orchestration platforms like SIMPL, Eclipse DSC, GXFS, etc. Still, dataspaces enable shared data access, but do not solve the data interoperability problem. For that, the consumer would like to see the data from different providers in a harmonized and semantically integrated form. The Vocabulary Hub service (part of the IDS RAM) provides a repository for ontologies and vocabularies. We describe an approach of raising the role of the Vocabulary Hub to also allow richer metadata description (e.g. the meaning of every column in a tabular dataset), and binding semantic descriptions to ingested datasets, thus providing on-the-fly data semantization and easing data querying. This is achieved through the integration of two commercial semantic products (PoolParty and GraphDB), leveraging the partnership between the Semantic Web Company and Ontotext, and is being developed within the frame of the Digital Europe project UNDERPIN, with applications to refinery and wind farm data.},\n}\n\n
\n
\n\n\n
\n Dataspaces are an important enabler for industrial sharing data (either commercially licensed or private). Europe is investing heavily into sectoral dataspaces, federation and orchestration platforms like SIMPL, Eclipse DSC, GXFS, etc. Still, dataspaces enable shared data access, but do not solve the data interoperability problem. For that, the consumer would like to see the data from different providers in a harmonized and semantically integrated form. The Vocabulary Hub service (part of the IDS RAM) provides a repository for ontologies and vocabularies. We describe an approach of raising the role of the Vocabulary Hub to also allow richer metadata description (e.g. the meaning of every column in a tabular dataset), and binding semantic descriptions to ingested datasets, thus providing on-the-fly data semantization and easing data querying. This is achieved through the integration of two commercial semantic products (PoolParty and GraphDB), leveraging the partnership between the Semantic Web Company and Ontotext, and is being developed within the frame of the Digital Europe project UNDERPIN, with applications to refinery and wind farm data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n AEC3PO: Architecture, Engineering, Construction Compliance Checking and Permitting Ontology.\n \n \n \n \n\n\n \n Vakaj, E.; Patlakas, P.; Beach, T.; Lefrançois, M.; Dridi, A.; and Alexiev, V.\n\n\n \n\n\n\n February 2024.\n \n\n\n\n
\n\n\n\n \n \n \"AEC3PO:Paper\n  \n \n \n \"AEC3PO: githib\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Manual{AEC3PO-ontology-2024,\n  title        = {{AEC3PO: Architecture, Engineering, Construction Compliance Checking and Permitting Ontology}},\n  author       = {Edlira Vakaj and Panagiotis Patlakas and Thomas Beach and Maxime Lefrançois and Amna Dridi and Vladimir Alexiev},\n  month        = feb,\n  year         = 2024,\n  url          = {https://w3id.org/lbd/aec3po/},\n  keywords     = {AECO, automated compliance checking, building regulations, construction regulations, land use, ontologies, ACCORD, Architecture Engineering and Construction Compliance Checking and Permitting Ontology, AEC3PO, compliance checking, CO2 emission, Sustainability, domain-specific rule language, RASE},\n  url_githib   = {https://github.com/accord-project/aec3po/},\n  date         = {2024-02-15},\n  abstract     = {The Architecture, Engineering, Construction Compliance Checking and Permitting Ontology (AEC3PO) is an ontology developed to support the automated compliance checking of construction, renovation, and demolition works. It has been developed in the context of the Automated Compliance Checking for Construction, Renovation or Demolition Works (ACCORD) project, an ERC/Horizon-funded project that aims to digitalise permitting and compliance processes. AEC3PO aims to capture all aspects of building compliance and building permitting in Architecture, Engineering, and Construction (AEC), across different regulatory systems. It allows the modelling of aspects such as: building and infrastructure codes, regulations, and standards, and their compliance requirements; building and infrastructure permitting processes and documentation; building and infrastructure compliance and permitting actors. The ontology requirements are derived from the rule formalisation methodology that aims to semantise regulations and provide an open format for machine-readable rules. The ontology is built using Semantic Web technologies, adhering to standards like RDF, OWL, and SKOS. It also integrates popular ontologies such as Dublin Core Terms (DCT) and Europe's Legislation Identifier (ELI) in order to create a structured and interconnected knowledge graph. This allows professionals to explore, query, and understand various aspects of the compliance and permitting processes more comprehensively.},\n}\n\n
\n
\n\n\n
\n The Architecture, Engineering, Construction Compliance Checking and Permitting Ontology (AEC3PO) is an ontology developed to support the automated compliance checking of construction, renovation, and demolition works. It has been developed in the context of the Automated Compliance Checking for Construction, Renovation or Demolition Works (ACCORD) project, an ERC/Horizon-funded project that aims to digitalise permitting and compliance processes. AEC3PO aims to capture all aspects of building compliance and building permitting in Architecture, Engineering, and Construction (AEC), across different regulatory systems. It allows the modelling of aspects such as: building and infrastructure codes, regulations, and standards, and their compliance requirements; building and infrastructure permitting processes and documentation; building and infrastructure compliance and permitting actors. The ontology requirements are derived from the rule formalisation methodology that aims to semantise regulations and provide an open format for machine-readable rules. The ontology is built using Semantic Web technologies, adhering to standards like RDF, OWL, and SKOS. It also integrates popular ontologies such as Dublin Core Terms (DCT) and Europe's Legislation Identifier (ELI) in order to create a structured and interconnected knowledge graph. This allows professionals to explore, query, and understand various aspects of the compliance and permitting processes more comprehensively.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Formalization of Building Codes and Regulations in Knowledge Graphs.\n \n \n \n \n\n\n \n Costa, G.; Vakaj, E.; Beach, T.; Lavikka, R.; Lefrançois, M.; Zimmermann, A.; Mecharnia, T.; Alexiev, V.; Dridi, A.; Hettiarachchi, H.; and Keberle, N.\n\n\n \n\n\n\n In Noardo, F.; and Fauth, J., editor(s), Digital Building Permit Conference 2024, pages 142-146, Barcelona, Spain, April 2024. \n \n\n\n\n
\n\n\n\n \n \n \"FormalizationPaper\n  \n \n \n \"Formalization slides\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{DBP2024-buildingCodes,\n  author       = {Gonçal Costa and Edlira Vakaj and Thomas Beach and Rita Lavikka and Maxime Lefrançois and Antoine Zimmermann and Thamer Mecharnia and Vladimir Alexiev and Amna Dridi and Hansi Hettiarachchi and Nataliya Keberle},\n  title        = {Formalization of Building Codes and Regulations in Knowledge Graphs},\n  booktitle    = {Digital Building Permit Conference 2024},\n  year         = 2024,\n  editor       = {Francesca Noardo and Judith Fauth},\n  pages        = {142-146},\n  month        = apr,\n  address      = {Barcelona, Spain},\n  url          = {https://zenodo.org/records/12760552},\n  url_Slides   = {https://docs.google.com/presentation/d/1UO8bH9LY_KprjZhrSHFV7QKq2d8r_fjF/edit},\n  keywords     = {AECO, BIM, regulation checking, automated compliance checking},\n  doi          = {10.5281/zenodo.12760552},\n  abstract     = {The Architecture, Engineering and Construction (AEC) industry is subject to many building codes and regulations that apply to the design and construction of buildings. These regulations often involve complex language and technical vocabulary that can give rise to different interpretations, depending on their context and purpose, and therefore a difficulty in their application. The introduction of Building Information Modelling (BIM), as well as authoring tools capable of creating and exporting 3D representations of buildings, is paving the way for compliance checking to become more automated and less dependent on interpretation. This should allow for better quality by reducing the time needed for checking and avoiding human errors. However, despite attempts to provide new BIM-based methods and approaches to achieve this goal in the past two decades, none of these methods have proven to be close to being a definitive solution. The basis for checking compliance against regulations using a BIM model is to have a description of the regulations in a computable form. In turn, this makes it necessary to define data requirements for models that guarantee that regulations can be checked consistently. Within this framework, several scenarios can be considered to address the problem. One is to consider the descriptive part of the regulation separate from the execution part, that is, compliance checking procedures. Currently, those in charge of writing the regulations typically publish them in plain text documents in PDF format. Therefore, the next evolutionary step is to manage construction regulations in a machine-readable way underpinned by semantics, thus, ensuring they can be interpretated precisely by the software used for checking buildings against them.},\n}\n\n
\n
\n\n\n
\n The Architecture, Engineering and Construction (AEC) industry is subject to many building codes and regulations that apply to the design and construction of buildings. These regulations often involve complex language and technical vocabulary that can give rise to different interpretations, depending on their context and purpose, and therefore a difficulty in their application. The introduction of Building Information Modelling (BIM), as well as authoring tools capable of creating and exporting 3D representations of buildings, is paving the way for compliance checking to become more automated and less dependent on interpretation. This should allow for better quality by reducing the time needed for checking and avoiding human errors. However, despite attempts to provide new BIM-based methods and approaches to achieve this goal in the past two decades, none of these methods have proven to be close to being a definitive solution. The basis for checking compliance against regulations using a BIM model is to have a description of the regulations in a computable form. In turn, this makes it necessary to define data requirements for models that guarantee that regulations can be checked consistently. Within this framework, several scenarios can be considered to address the problem. One is to consider the descriptive part of the regulation separate from the execution part, that is, compliance checking procedures. Currently, those in charge of writing the regulations typically publish them in plain text documents in PDF format. Therefore, the next evolutionary step is to manage construction regulations in a machine-readable way underpinned by semantics, thus, ensuring they can be interpretated precisely by the software used for checking buildings against them.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Checking of Urban Planning Regulations with GeoSPARQL and BIM SPARQL.\n \n \n \n \n\n\n \n Alexiev, V.; and Keberle, N.\n\n\n \n\n\n\n In Digital Building Permit 2024, pages 234, Barcelona, Spain, April 2024. \n \n\n\n\n
\n\n\n\n \n \n \"CheckingPaper\n  \n \n \n \"Checking html\n  \n \n \n \"Checking slides\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{DBP2024-GeoSPARQL,\n  author       = {Vladimir Alexiev and Nataliya Keberle},\n  title        = {Checking of Urban Planning Regulations with GeoSPARQL and BIM SPARQL},\n  booktitle    = {Digital Building Permit 2024},\n  year         = 2024,\n  pages        = 234,\n  month        = apr,\n  address      = {Barcelona, Spain},\n  url          = {https://zenodo.org/records/12760552},\n  url_HTML     = {https://presentations.ontotext.com/2024/checking-of-urban-planning-regulations-with-geosparql-and-bim-sparql/Slides.html},\n  url_Slides   = {https://drive.google.com/file/d/1PtApSPgU2nQjvRBCcgNU8oles2O7EnC6/view},\n  keywords     = {AECO, urban planning, BIM, City Information Management, regulation checking, XPlanung, INSPIRE PLU, CityGML, GeoSPARQL, Berlin Tegel, TXL, Malgrat},\n  doi          = {10.5281/zenodo.12760552},\n  abstract     = {The former Berlin Tegel airport (TXL) will be the site of a university campus (refurbished airport terminal), startups, production facilities (“tech republic”), a living quarter, stores, smart mobility hubs, park and recreation areas, etc. The Tegel Project company (owned by the City of Berlin) has developed detailed urban planning and regulations covering built area use, height restrictions, noise protection, floor space index (buildup density), greenery requirements (vegetation and habitats), etc. The regulations are expressed in XPlanung and INSPIRE PLU. These are GML-based UML and XML models for urban planning: XPlanung is Germany-specific and PLU (Planned Land Use) is part of the INSPIRE initiative. Building designs are expressed in IFC and include simple geometries (for residential buildings) and complex geometries (for the university campus). Compliance checking of urban planning requires accessing two different kinds of data in a harmonized way: BIM (building information) and GIS (also called CIM “city information management” and often represented using GML extension schemas). As part of the Horizon Europe ACCORD project, we plan to do this checking using SPARQL in Ontotext GraphDB. GIS data is covered by the existing GeoSPARQL plugin that supports WKT and GML geometries. BIM data can either be converted to GIS/GML using already developed approaches, or accessed through a future Binary Engineering Data connector for GraphDB based on the HDF5 format. We give an overview of XPlanung, INSPIRE PLU, CityGML and GeoSPARQL 1.0 and 1.1. Then we describe the semantic conversion of XPlanung / INSPIRE PLU data, our approach regarding semantization of BIM data, the overall structure of regulations, the respective geometric and non-geometric checks to be implemented, the use of GeoSPARQL topological relations to leverage planning zone hierarchies and to check which buildings fall in which zones, potential specialized BIM SPARQL functions to be implemented, management of multiple BIM files that need to be checked in concert, and result creation and content.},\n}\n\n
\n
\n\n\n
\n The former Berlin Tegel airport (TXL) will be the site of a university campus (refurbished airport terminal), startups, production facilities (“tech republic”), a living quarter, stores, smart mobility hubs, park and recreation areas, etc. The Tegel Project company (owned by the City of Berlin) has developed detailed urban planning and regulations covering built area use, height restrictions, noise protection, floor space index (buildup density), greenery requirements (vegetation and habitats), etc. The regulations are expressed in XPlanung and INSPIRE PLU. These are GML-based UML and XML models for urban planning: XPlanung is Germany-specific and PLU (Planned Land Use) is part of the INSPIRE initiative. Building designs are expressed in IFC and include simple geometries (for residential buildings) and complex geometries (for the university campus). Compliance checking of urban planning requires accessing two different kinds of data in a harmonized way: BIM (building information) and GIS (also called CIM “city information management” and often represented using GML extension schemas). As part of the Horizon Europe ACCORD project, we plan to do this checking using SPARQL in Ontotext GraphDB. GIS data is covered by the existing GeoSPARQL plugin that supports WKT and GML geometries. BIM data can either be converted to GIS/GML using already developed approaches, or accessed through a future Binary Engineering Data connector for GraphDB based on the HDF5 format. We give an overview of XPlanung, INSPIRE PLU, CityGML and GeoSPARQL 1.0 and 1.1. Then we describe the semantic conversion of XPlanung / INSPIRE PLU data, our approach regarding semantization of BIM data, the overall structure of regulations, the respective geometric and non-geometric checks to be implemented, the use of GeoSPARQL topological relations to leverage planning zone hierarchies and to check which buildings fall in which zones, potential specialized BIM SPARQL functions to be implemented, management of multiple BIM files that need to be checked in concert, and result creation and content.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Using Qualification Instead of Rolification for the Records in Context Ontology (RiC-O).\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n January 2024.\n \n\n\n\n
\n\n\n\n \n \n \"UsingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev-Qualification-vs-Rolification-2024,\n  author       = {Vladimir Alexiev},\n  title        = {{Using Qualification Instead of Rolification for the Records in Context Ontology (RiC-O)}},\n  month        = jan,\n  year         = 2024,\n  url          = {https://github.com/ICA-EGAD/RiC-O/issues/67#issuecomment-1919383104},\n  keywords     = {rolification, qualification, reasoning, GraphDB Rules, GLAM, archives, RiC-O, Records in Context, PROV},\n  abstract     = {The Records in Context Ontology (RiC-O) uses a "Rolification" pattern to derive direct (unqualified) relations from Relation nodes. This involves the use of a "parasitic" self-link on relation nodes (owl:hasSelf) and owl:propertyChainAxiom, which is expensive to implement. Instead, I propose to use the PROV Qualified Relation pattern (associate the direct relation to the Relation class using prov:unqualifiedForm) and implement it with simpler GraphDB rules.}\n}\n\n
\n
\n\n\n
\n The Records in Context Ontology (RiC-O) uses a \"Rolification\" pattern to derive direct (unqualified) relations from Relation nodes. This involves the use of a \"parasitic\" self-link on relation nodes (owl:hasSelf) and owl:propertyChainAxiom, which is expensive to implement. Instead, I propose to use the PROV Qualified Relation pattern (associate the direct relation to the Relation class using prov:unqualifiedForm) and implement it with simpler GraphDB rules.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring FIBO Complexity With Crunchbase: Representing Crunchbase IPOs in FIBO.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Technical Report April 2024.\n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n \n \"Exploring github\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{Alexiev-Crunchbase-Fibo-2023,\n  author       = {Vladimir Alexiev},\n  title        = {{Exploring FIBO Complexity With Crunchbase: Representing Crunchbase IPOs in FIBO}},\n  month        = apr,\n  year         = 2024,\n  url          = {https://rawgit2.com/VladimirAlexiev/crunchbase-fibo/main/README.html},\n  url_Github   = {https://github.com/VladimirAlexiev/crunchbase-fibo/},\n  keywords     = {fintech, Crunchbase, ontologies, semantic modeling, Initial Public Offering, IPO, Financial Industry Business Ontology, FIBO},\n  abstract     = {The Financial Industry Business Ontology (FIBO) by the Enterprise Data Management Council (EDMC) is a family of ontologies and a reference model for representing data in the financial world using semantic technologies. It is used in fintech Knowledge Graph (KG) projects because it offers a comprehensive and principled approach to representing financial data, and a wide set of predefined models that can be used to implement data harmonization and financial data integration. The 2022Q2 FIBO release consists of 290 ontologies using 380 prefixes that cover topics such as legal entities, contracts, agency, trusts, regulators, securities, loans, derivatives, etc. FIBO's reach and flexible ontological approach allow the integration of a wide variety of financial data, but it comes at the price of more complex representation. Crunchbase (CB) is a well-known dataset by TechCrunch that includes companies, key people, funding rounds, acquisitions, Initial Public Offerings (IPOs), etc. It has about 2M companies with a good mix of established enterprises (including 47k public companies), mid-range companies and startups. We (Ontotext and other Wikidata contributors) have matched 72k CB companies to Wikidata, see this query. I explore the representation of Crunchbase data (more specifically IPOs) in FIBO and compare it to the simplest possible semantic representation. I therefore illustrate the complexity of FIBO, and explain its flexibility along the way. I finish with some discussion and conclusions as to when FIBO can bring value to fintech KG projects.},\n}\n\n
\n
\n\n\n
\n The Financial Industry Business Ontology (FIBO) by the Enterprise Data Management Council (EDMC) is a family of ontologies and a reference model for representing data in the financial world using semantic technologies. It is used in fintech Knowledge Graph (KG) projects because it offers a comprehensive and principled approach to representing financial data, and a wide set of predefined models that can be used to implement data harmonization and financial data integration. The 2022Q2 FIBO release consists of 290 ontologies using 380 prefixes that cover topics such as legal entities, contracts, agency, trusts, regulators, securities, loans, derivatives, etc. FIBO's reach and flexible ontological approach allow the integration of a wide variety of financial data, but it comes at the price of more complex representation. Crunchbase (CB) is a well-known dataset by TechCrunch that includes companies, key people, funding rounds, acquisitions, Initial Public Offerings (IPOs), etc. It has about 2M companies with a good mix of established enterprises (including 47k public companies), mid-range companies and startups. We (Ontotext and other Wikidata contributors) have matched 72k CB companies to Wikidata, see this query. I explore the representation of Crunchbase data (more specifically IPOs) in FIBO and compare it to the simplest possible semantic representation. I therefore illustrate the complexity of FIBO, and explain its flexibility along the way. I finish with some discussion and conclusions as to when FIBO can bring value to fintech KG projects.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Existing Ontologies, Standards, and Data Models in the Building Data Domain Relevant to Compliance Checking.\n \n \n \n \n\n\n \n Mecharnia, T.; Lefrançois, M.; Zimmermann, A.; Vakaj, E.; Dridi, A.; Hettiarachchi, H.; Alexiev, V.; Keberle, N.; Tan, H.; Noardo, F.; Makkinga, R.; and Cheung, F.\n\n\n \n\n\n\n Technical Report D2.1, ACCORD Project, August 2023.\n \n\n\n\n
\n\n\n\n \n \n \"ExistingPaper\n  \n \n \n \"Existing library\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{thamermecharniaExistingOntologiesStandards2023,\n  author       = {Thamer Mecharnia and Maxime Lefrançois and Antoine Zimmermann and Edlira Vakaj and Amna Dridi and Hansi Hettiarachchi and Vladimir Alexiev and Nataliya Keberle and He Tan and Francesca Noardo and Rick Makkinga and Franco Cheung},\n  title        = {Existing Ontologies, Standards, and Data Models in the Building Data Domain Relevant to Compliance Checking},\n  institution  = {ACCORD Project},\n  year         = 2023,\n  type         = {Deliverable},\n  number       = {D2.1},\n  month        = aug,\n  url          = {https://accordproject.eu/wp-content/uploads/2023/09/ACCORD_D2.1_Technical_Report_Existing_Models.pdf},\n  keywords     = {Architecture Engineering Construction and Operations (AECO), Ontologies, Review},\n  url_library  = {https://www.zotero.org/groups/3007408/semantic_bim/library},\n  abstract     = {This deliverable presents the results of Task 2.1 (Technical Review of Existing Standards) of the ACCORD project. The ACCORD project employs a semantic approach for validating building permits, eliminating the need for costly centralized systems that are challenging to establish and maintain. The primary aim of the ACCORD project is to digitize permit and compliance procedures to improve the productivity and quality of design and construction processes and facilitate the creation of an environmentally sustainable built environment. This deliverable will review the existing ontologies, standards, and data models in the Architecture, Engineering, and Construction (AEC) domain and how they can be reused for the purpose of the automatic compliance check. More specifically, this deliverable will: 1. Evaluate the AEC domain-related ontologies and propose suggestions on how they can be employed for the development of the Architecture Engineering and Construction Compliance Checking and Permitting Ontology (AEC3PO). 2. Conduct a review of query languages associated with the AEC domain and the semantic web. 3. Compare the rule languages developed or used in AEC projects. 4. Review the standards that may be relevant to different areas in the ACCORD project. 5. Compare the existing reasoners that could be useful to building permitting automatic compliance checking. All the references used in this deliverable are gathered in the open Zotero library for the project. In the AEC industry, several standards and recommendations aim to achieve different levels of data interoperability in systems. This deliverable concentrates on data-related standards such as those that provide syntactic rules and semantics to represent data in a standardized way. Policy and regulatory standards are out of the scope of this deliverable and are addressed in deliverable D1.1 "Landscape Review Report". The outcomes of this deliverable will serve as a reference for other tasks within the project, which will determine the preferred rule language, which ontologies can be reused, aligned, or serve as inspiration for the creation of the AEC3PO to be developed in Task 2.2 of WP2. Furthermore, the standards that will be presented in this deliverable can be employed in various aspects of the ACCORD project. This groundwork will facilitate the development of the AEC3PO ontology as well as the design and implementation of the Rule Formalisation Tool.},\n}\n\n\n
\n
\n\n\n
\n This deliverable presents the results of Task 2.1 (Technical Review of Existing Standards) of the ACCORD project. The ACCORD project employs a semantic approach for validating building permits, eliminating the need for costly centralized systems that are challenging to establish and maintain. The primary aim of the ACCORD project is to digitize permit and compliance procedures to improve the productivity and quality of design and construction processes and facilitate the creation of an environmentally sustainable built environment. This deliverable will review the existing ontologies, standards, and data models in the Architecture, Engineering, and Construction (AEC) domain and how they can be reused for the purpose of the automatic compliance check. More specifically, this deliverable will: 1. Evaluate the AEC domain-related ontologies and propose suggestions on how they can be employed for the development of the Architecture Engineering and Construction Compliance Checking and Permitting Ontology (AEC3PO). 2. Conduct a review of query languages associated with the AEC domain and the semantic web. 3. Compare the rule languages developed or used in AEC projects. 4. Review the standards that may be relevant to different areas in the ACCORD project. 5. Compare the existing reasoners that could be useful to building permitting automatic compliance checking. All the references used in this deliverable are gathered in the open Zotero library for the project. In the AEC industry, several standards and recommendations aim to achieve different levels of data interoperability in systems. This deliverable concentrates on data-related standards such as those that provide syntactic rules and semantics to represent data in a standardized way. Policy and regulatory standards are out of the scope of this deliverable and are addressed in deliverable D1.1 \"Landscape Review Report\". The outcomes of this deliverable will serve as a reference for other tasks within the project, which will determine the preferred rule language, which ontologies can be reused, aligned, or serve as inspiration for the creation of the AEC3PO to be developed in Task 2.2 of WP2. Furthermore, the standards that will be presented in this deliverable can be employed in various aspects of the ACCORD project. This groundwork will facilitate the development of the AEC3PO ontology as well as the design and implementation of the Rule Formalisation Tool.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Natural Language Querying with GPT, SOML and GraphQL.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Ontotext Last Friday Webinar, May 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Natural video\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Misc{NLQ-GPT-SOML-GraphQL-2023,\n  author       = {Vladimir Alexiev},\n  title        = {{Natural Language Querying with GPT, SOML and GraphQL}},\n  howpublished = {Ontotext Last Friday Webinar},\n  month        = may,\n  year         = 2023,\n  url_video    = {https://drive.google.com/file/d/1TOHrtlleOAkv4oZYhlAWa22mUqtvsV7o/view},\n  abstract     = {Clients want to talk to their KG, i.e. ask questions about the schema and data in natural language. LLMs like GPT and LLAMA have opened a revolution in this regard. Currently Ontotext is exploring 8 themes with LLMs.\nNLQ can be accomplished either by:\n- Providing data from GraphDB to the LLM, or\n- Presenting a schema to the LLM and asking it to generate queries.\nIn this talk we explore query generation.\n- SPARQL queries are complex, so even for known schemas (eg Wikidata, DBpedia), GPT has trouble generating good queries, see \\Shared drives\\KGS\\AI-GPT\\GPT-SPARQL. Furthermore, RDF schemas (OWL and SHACL) are complex. But I'm sure there will be fast progress in SPARQL generation, see LlamaIndex advances in GDB-8329\n- GraphQL queries are regular and much simpler, and SOML is a simpler schema language (from which the Ontotext Platform generates GraphQL schema, queries and SHACL shapes). In this talk I'll show how GPT4 can answer questions about a schema, and generate GraphQL to answer questions about data.}\n}\n\n
\n
\n\n\n
\n Clients want to talk to their KG, i.e. ask questions about the schema and data in natural language. LLMs like GPT and LLAMA have opened a revolution in this regard. Currently Ontotext is exploring 8 themes with LLMs. NLQ can be accomplished either by: - Providing data from GraphDB to the LLM, or - Presenting a schema to the LLM and asking it to generate queries. In this talk we explore query generation. - SPARQL queries are complex, so even for known schemas (eg Wikidata, DBpedia), GPT has trouble generating good queries, see §hared drives\\KGS\\AI-GPT\\GPT-SPARQL. Furthermore, RDF schemas (OWL and SHACL) are complex. But I'm sure there will be fast progress in SPARQL generation, see LlamaIndex advances in GDB-8329 - GraphQL queries are regular and much simpler, and SOML is a simpler schema language (from which the Ontotext Platform generates GraphQL schema, queries and SHACL shapes). In this talk I'll show how GPT4 can answer questions about a schema, and generate GraphQL to answer questions about data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semantic bSDD: Improving the GraphQL, JSON and RDF Representations of buildingSmart Data Dictionary.\n \n \n \n \n\n\n \n Alexiev, V.; Radkov, M.; and Keberle, N.\n\n\n \n\n\n\n In Linked Data in Architecture and Construction (LDAC 2023), Matera, Italy, June 2023. \n \n\n\n\n
\n\n\n\n \n \n \"SemanticPaper\n  \n \n \n \"Semantic demo\n  \n \n \n \"Semantic detailed\n  \n \n \n \"Semantic github\n  \n \n \n \"Semantic preprint\n  \n \n \n \"Semantic slides\n  \n \n \n \"Semantic video\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{SemanticBSDD-LDAC-2023,\n  author       = {Vladimir Alexiev and Mihail Radkov and Nataliya Keberle},\n  title        = {{Semantic bSDD: Improving the GraphQL, JSON and RDF Representations of buildingSmart Data Dictionary}},\n  booktitle    = {{Linked Data in Architecture and Construction (LDAC 2023)}},\n  year         = 2023,\n  month        = jun,\n  address      = {Matera, Italy},\n  url          = {https://linkedbuildingdata.net/ldac2023/files/papers/papers/LDAC2023_paper_1547.pdf},\n  url_Demo     = {https://bsdd.ontotext.com/},\n  url_Detailed = {https://bsdd.ontotext.com/README.html},\n  url_Github   = {https://github.com/Accord-Project/bsdd},\n  url_Preprint = {https://bsdd.ontotext.com/paper/paper.pdf},\n  url_Slides   = {https://bsdd.ontotext.com/presentation/presentation.html},\n  url_Video    = {https://drive.google.com/open?id=1Mhts8JwbdJFUmQHGULCqduijZ0NpEoxX},\n  keywords     = {Linked building data, LBD, buildingSMART Data Dictionary, bSDD, FAIR data, data quality},\n  abstract     = {The buildingSmart Data Dictionary (bSDD) is an important shared resource in the Architecture, Engineering, Construction, and Operations (AECO) domain. It is a collection of datasets ("domains") that define various classifications (objects representing building components, products, and materials), their properties, allowed values, etc. bSDD defines a GraphQL API, as well as REST APIs that return JSON and RDF representations. This improves the interoperability of bSDD and its easier deployment in architectural Computer Aided Design (CAD) and other AECO software. However, bSDD data is not structured as well as possible, and data retrieved via different APIs is not identical in content and structure. This lowers bSDD data quality, usability and trust. We conduct a thorough comparison and analysis of bSDD data related to fulfillment of FAIR (findable, accessible, interoperable, and reusable) principles. Based on this analysis, we suggest enhancements to make bSDD data better structured and more FAIR. We implement many of the suggestions by refactoring the original data to make it better structured/interconnected, and more "semantic". We provide a SPARQL endpoint using Ontotext GraphDB, and GraphQL endpoint using Ontotext Platform Semantic Objects. Our detailed work is available at https://github.com/Accord-Project/bsdd (open source) and https://bsdd.ontotext.com (home page, schemas, data, sample queries).},\n}\n\n
\n
\n\n\n
\n The buildingSmart Data Dictionary (bSDD) is an important shared resource in the Architecture, Engineering, Construction, and Operations (AECO) domain. It is a collection of datasets (\"domains\") that define various classifications (objects representing building components, products, and materials), their properties, allowed values, etc. bSDD defines a GraphQL API, as well as REST APIs that return JSON and RDF representations. This improves the interoperability of bSDD and its easier deployment in architectural Computer Aided Design (CAD) and other AECO software. However, bSDD data is not structured as well as possible, and data retrieved via different APIs is not identical in content and structure. This lowers bSDD data quality, usability and trust. We conduct a thorough comparison and analysis of bSDD data related to fulfillment of FAIR (findable, accessible, interoperable, and reusable) principles. Based on this analysis, we suggest enhancements to make bSDD data better structured and more FAIR. We implement many of the suggestions by refactoring the original data to make it better structured/interconnected, and more \"semantic\". We provide a SPARQL endpoint using Ontotext GraphDB, and GraphQL endpoint using Ontotext Platform Semantic Objects. Our detailed work is available at https://github.com/Accord-Project/bsdd (open source) and https://bsdd.ontotext.com (home page, schemas, data, sample queries).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The InnoGraph Artificial Intelligence Taxonomy: A Key to Unlocking AI-Related Entities and Content.\n \n \n \n \n\n\n \n Alexiev, V.; Bechev, B.; and Osytsin, A.\n\n\n \n\n\n\n Technical Report Ontotext Corp, December 2023.\n Introduction: - Potential InnoGraph Datasets and Users - Importance of Topics and A Holistic Approach - Example: Github Topics - Kinds of Topics. Core Topics: Wikipedia Articles: - Wikipedia Categories - Category Pruning. Collaborative Patent Classification: Application Areas: - PatBase Browser - CPC Semantic Data at EPO - Finding All CPC AI Topics - CPC Snowballing - CPC for Application Area Topics. Other Topic Datasets: - ACM CCS - AIDA FAT - AMiner KGs - ANZSRC FOR - arXiv Areas - China NSFC - EU CORDIS EuroSciVoc - Crunchbase Categories - CSO - JEL - MESH - MSC - OpenAlex Topics - SemanticScholar FOS - StackExchange Tags. Conclusion and Future Work: - Acknowledgements - References\n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{InnoGraph-AI-Taxonomy,\n  author       = {Vladimir Alexiev and Boyan Bechev and Alexandr Osytsin},\n  title        = {The InnoGraph Artificial Intelligence Taxonomy: A Key to Unlocking AI-Related Entities and Content},\n  institution  = {Ontotext Corp},\n  year         = 2023,\n  type         = {whitepaper},\n  month        = dec,\n  note         = {Introduction: - Potential InnoGraph Datasets and Users - Importance of Topics and A Holistic Approach - Example: Github Topics - Kinds of Topics. Core Topics: Wikipedia Articles: - Wikipedia Categories - Category Pruning. Collaborative Patent Classification: Application Areas: - PatBase Browser - CPC Semantic Data at EPO - Finding All CPC AI Topics - CPC Snowballing - CPC for Application Area Topics. Other Topic Datasets: - ACM CCS - AIDA FAT - AMiner KGs - ANZSRC FOR - arXiv Areas - China NSFC - EU CORDIS EuroSciVoc - Crunchbase Categories - CSO - JEL - MESH - MSC - OpenAlex Topics - SemanticScholar FOS - StackExchange Tags. Conclusion and Future Work: - Acknowledgements - References},\n  url          = {https://www.ontotext.com/knowledgehub/white_paper/the-innograph-artificial-intelligence-taxonomy/},\n  keywords     = {InnoGraph, Artificial Intelligence, Topics, Taxonomy, InnoGraph},\n  date         = {2023-12},\n  abstract     = {InnoGraph is a holistic Knowledge Graph of innovation based on Artificial Intelligence (AI). AI is the underpinning of much of the world's innovation, therefore it has immense economic and human improvement potential. With the explosive growth of Machine Learning (ML), Deep Learning (DL) and Large Language Models (LLM), it is hard to keep up with all AI development, but also this is a valuable effort. A key to discovering AI elements is to build a comprehensive taxonomy of topics: AI techniques, application areas (verticals). We describe our approach to developing such a taxonomy by integrating and coreferencing data from numerous sources.},\n}\n\n
\n
\n\n\n
\n InnoGraph is a holistic Knowledge Graph of innovation based on Artificial Intelligence (AI). AI is the underpinning of much of the world's innovation, therefore it has immense economic and human improvement potential. With the explosive growth of Machine Learning (ML), Deep Learning (DL) and Large Language Models (LLM), it is hard to keep up with all AI development, but also this is a valuable effort. A key to discovering AI elements is to build a comprehensive taxonomy of topics: AI techniques, application areas (verticals). We describe our approach to developing such a taxonomy by integrating and coreferencing data from numerous sources.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards InnoGraph: A Knowledge Graph for AI Innovation.\n \n \n \n \n\n\n \n Massri, M.; Spahiu, B.; Grobelnik, M.; Alexiev, V.; Palmonari, M.; and Roman, D.\n\n\n \n\n\n\n In 3rd International Workshop on Scientific Knowledge Representation, Discovery, and Assessment (Sci-K 2023). WWW 2023 Companion, Austin, Texas, June 2023. \n \n\n\n\n
\n\n\n\n \n \n \"TowardsPaper\n  \n \n \n \"Towards preprint\n  \n \n \n \"Towards slides\n  \n \n \n \"Towards zenodo\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{TowardsInnograph-SciK-2023,\n  author       = {M.Besher Massri and Blerina Spahiu and Marko Grobelnik and Vladimir Alexiev and Matteo Palmonari and Dumitru Roman},\n  title        = {{Towards InnoGraph: A Knowledge Graph for AI Innovation}},\n  booktitle    = {{3rd International Workshop on Scientific Knowledge Representation, Discovery, and Assessment (Sci-K 2023). WWW 2023 Companion}},\n  year         = 2023,\n  month        = jun,\n  address      = {Austin, Texas},\n  url          = {https://dl.acm.org/doi/10.1145/3543873.3587614},\n  url_Preprint = {https://zenodo.org/record/7750707/files/Towards%20InnoGraph%20A%20Knowledge%20Graph%20for%20AI%20Innovation.pdf?download=1},\n  url_Slides   = {https://zenodo.org/record/7750707/files/Towards%20InnoGraph%20A%20Knowledge%20Graph%20for%20AI%20Innovation.pptx?download=1},\n  url_Zenodo   = {https://zenodo.org/record/7750707},\n  keywords     = {artificial intelligence, innovation, innovation ecosystem, knowledge graph, science knowledge graph, economics knowledge graph},\n  doi          = {10.1145/3543873.3587614},\n  abstract     = {To understand the state-of-the-art innovations in a particular domain, researchers have to explore patents and scientific articles published recently in that particular domain. Innovation ecosystems comprise interconnected information regarding entities, i.e., researchers, institutions, projects, products, and technologies. Representing such information in a machine-readable format is challenging. This is due to the fact that representing concepts like "knowledge" is not straightforward. However, even a partial representation provides valuable information. Representing innovation ecosystems as knowledge graphs (KGs) enables the generation of new insights and would allow advanced data analysis. In this paper, we propose InnoGraph, a KG of the worldwide AI innovation ecosystem.},\n}\n\n
\n
\n\n\n
\n To understand the state-of-the-art innovations in a particular domain, researchers have to explore patents and scientific articles published recently in that particular domain. Innovation ecosystems comprise interconnected information regarding entities, i.e., researchers, institutions, projects, products, and technologies. Representing such information in a machine-readable format is challenging. This is due to the fact that representing concepts like \"knowledge\" is not straightforward. However, even a partial representation provides valuable information. Representing innovation ecosystems as knowledge graphs (KGs) enables the generation of new insights and would allow advanced data analysis. In this paper, we propose InnoGraph, a KG of the worldwide AI innovation ecosystem.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n InnoGraph Datasets.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, January 2023.\n The presentation is not public yet, contact in case of interest\n\n\n\n
\n\n\n\n \n \n \"InnoGraphPaper\n  \n \n \n \"InnoGraph html\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Misc{Alexiev-InnographDatasets-2023,\n  author       = {Vladimir Alexiev},\n  title        = {{InnoGraph Datasets}},\n  howpublished = {presentation},\n  month        = jan,\n  year         = 2023,\n  note         = {The presentation is not public yet, contact in case of interest},\n  url          = {https://github.com/enRichMyData/InnoGraph/blob/main/papers-write/202301-InnoGraph-Datasets/index.org},\n  url_HTML     = {https://rawgit2.com/enRichMyData/InnoGraph/main/papers-write/202301-InnoGraph-Datasets/index.html},\n  address      = {enrichMyData Project Meeting, Milan, Italy},\n  abstract     = {What is InnoGraph: a Holistic KG of the world-wide AI innovation ecosystem. Who are its users/uses: Investment and strategic advice (VCs, M&A): Innovators and startups, Industry convergence and digitization, Strategic gaps, Strategic acquisition targets; Researchers and developers; Policy makers: EU level, National science foundations, OECD.AI, maybe even US and China; Self-use: AI is at cusp (singularity), learn about it in depth!},\n}\n\n
\n
\n\n\n
\n What is InnoGraph: a Holistic KG of the world-wide AI innovation ecosystem. Who are its users/uses: Investment and strategic advice (VCs, M&A): Innovators and startups, Industry convergence and digitization, Strategic gaps, Strategic acquisition targets; Researchers and developers; Policy makers: EU level, National science foundations, OECD.AI, maybe even US and China; Self-use: AI is at cusp (singularity), learn about it in depth!\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generation of Declarative Transformations from Semantic Models.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In European Data Conference on Reference Data and Semantics (ENDORSE 2023), pages 33, 42-59, March 2023. European Commission: Directorate-General for Informatics, Publications Office of the European Union\n \n\n\n\n
\n\n\n\n \n \n \"GenerationPaper\n  \n \n \n \"Generation ppt\n  \n \n \n \"Generation slides\n  \n \n \n \"Generation video\n  \n \n \n \"Generation proceedings\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev-ENDORSE-2023,\n  author       = {Vladimir Alexiev},\n  title        = {{Generation of Declarative Transformations from Semantic Models}},\n  booktitle    = {{European Data Conference on Reference Data and Semantics (ENDORSE 2023)}},\n  year         = 2023,\n  pages        = {33, 42-59},\n  month        = mar,\n  organization = {European Commission: Directorate-General for Informatics, Publications Office of the European Union},\n  url          = {https://drive.google.com/open?id=1Cq5o9th_P812paqGkDsaEomJyAmnypkD},\n  url_PPT      = {https://docs.google.com/presentation/d/1JCMQEH8Tw_F-ta6haIToXMLYJxQ9LRv6/edit},\n  url_Slides   = {https://op.europa.eu/documents/10157494/12134844/DAY1-TRACK2-16.35-16.50-VladimirAlexiev_FORPUB.pdf/6e564f96-6ad6-1464-7a6e-e9533207f281},\n  url_Video    = {https://youtu.be/yL5nI_3ccxs},\n  keywords     = {semantic model, semantic data integration, ETL, semantic conversion, declarative approaches, PlantUML, R2RML, generation, model-driven, RDF by Example, rdfpuml, rdf2rml},\n  isbn         = {978-92-78-43682-7},\n  doi          = {10.2830/343811},\n  annote       = {Catalogue number: OA-04-23-743-EN-N},\n  date         = {2023-08-04},\n  url_proceedings= {https://op.europa.eu/en/publication-detail/-/publication/4db67b35-34df-11ee-bdc3-01aa75ed71a1},\n  abstract     = {The daily work of the Knowledge Graph Solutions group at Ontotext involves KG building activities such as investigating data standards and datasets, ontology engineering, harmonizing data through semantic models, converting or virtualizing data to semantic form, entity matching, semantic text enrichment, etc. Semantic pipelines have a variety of desirable properties, of which maintainability and consistency of the various artefacts are some of the most important ones. Despite significant recent progress (eg in the KG Building W3C community group), semantic conversion still remains one of the difficult steps. We favor generation of semantic transformations from semantic models that are both sufficiently precise, easily understandable, can be used to generate diagrams, and are valid RDF to allow processing with RDF tools. We call this approach "RDF by Example" and have developed a set of open source tools at https://github.com/VladimirAlexiev/rdf2rml. This includes "rdfpuml" for generating diagrams, "rdf2rml" for generating R2RML for semantization of relational data and ONTOP virtualization, "rdf2sparql" for semantization of tabular data with Ontotext Refine or TARQL. We describe our approach and illustrate it with complex and high-performance transformations in a variety of domains, such as company data and NIH research grants.},\n}\n\n
\n
\n\n\n
\n The daily work of the Knowledge Graph Solutions group at Ontotext involves KG building activities such as investigating data standards and datasets, ontology engineering, harmonizing data through semantic models, converting or virtualizing data to semantic form, entity matching, semantic text enrichment, etc. Semantic pipelines have a variety of desirable properties, of which maintainability and consistency of the various artefacts are some of the most important ones. Despite significant recent progress (eg in the KG Building W3C community group), semantic conversion still remains one of the difficult steps. We favor generation of semantic transformations from semantic models that are both sufficiently precise, easily understandable, can be used to generate diagrams, and are valid RDF to allow processing with RDF tools. We call this approach \"RDF by Example\" and have developed a set of open source tools at https://github.com/VladimirAlexiev/rdf2rml. This includes \"rdfpuml\" for generating diagrams, \"rdf2rml\" for generating R2RML for semantization of relational data and ONTOP virtualization, \"rdf2sparql\" for semantization of tabular data with Ontotext Refine or TARQL. We describe our approach and illustrate it with complex and high-performance transformations in a variety of domains, such as company data and NIH research grants.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Migrating J. Paul Getty Museum Agent ID from P2432 to P12040.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Github gist, November 2023.\n \n\n\n\n
\n\n\n\n \n \n \"MigratingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2023-MigratingGettyID,\n  author       = {Vladimir Alexiev},\n  title        = {{Migrating J. Paul Getty Museum Agent ID from P2432 to P12040}},\n  howpublished = {Github gist},\n  month        = nov,\n  year         = 2023,\n  url          = {https://gist.github.com/VladimirAlexiev/e0a7bae256e9646a7b6f47b23184f9a4},\n  keywords     = {Getty Trust, J. Paul Getty Museum, authority control, Wikidata, cultural heritage, GLAM},\n  abstract     = {Previously Wikidata had Getty Museum agent DOR ID (P2432), eg https://www.getty.edu/art/collection/artists/377. But this is an internal ID that redirects to eg https://www.getty.edu/art/collection/person/103JV9. So I made a Wikidata property for the Getty Museum new agent ID (P12040). Using the Getty SPARQL endpoint, I exported 12936 persons and 3616 groups with fields "guid name old new ulan nat role birthDate birthPlace deathDate deathPlace". Then I initiated a discussion how to populate these new ID's to Wikidata, leveraging ULAN and the old DOR ID: https://www.wikidata.org/wiki/Property_talk:P12040#Populating_J._Paul_Getty_Museum_agent_ID. I also found some records without new ID, and started a discussion with Getty to see why that ID was missing},\n}\n\n
\n
\n\n\n
\n Previously Wikidata had Getty Museum agent DOR ID (P2432), eg https://www.getty.edu/art/collection/artists/377. But this is an internal ID that redirects to eg https://www.getty.edu/art/collection/person/103JV9. So I made a Wikidata property for the Getty Museum new agent ID (P12040). Using the Getty SPARQL endpoint, I exported 12936 persons and 3616 groups with fields \"guid name old new ulan nat role birthDate birthPlace deathDate deathPlace\". Then I initiated a discussion how to populate these new ID's to Wikidata, leveraging ULAN and the old DOR ID: https://www.wikidata.org/wiki/Property_talk:P12040#Populating_J._Paul_Getty_Museum_agent_ID. I also found some records without new ID, and started a discussion with Getty to see why that ID was missing\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (12)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Semantic Interoperability for Data Spaces.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In SEMIC: Data Spaces in an Interoperable Europe (SEMIC 2022), December 2022. \n \n\n\n\n
\n\n\n\n \n \n \"SemanticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev-SEMIC-2022,\n  author       = {Vladimir Alexiev},\n  title        = {{Semantic Interoperability for Data Spaces}},\n  booktitle    = {SEMIC: Data Spaces in an Interoperable Europe (SEMIC 2022)},\n  year         = 2022,\n  month        = dec,\n  url          = {https://docs.google.com/presentation/d/1OMxNZItNCjGnod0KQ__Hp9oQ8mwmMyNn},\n  keywords     = {data spaces, interoperability, semantic interoperability, knowledge graphs},\n  howpublished = {presentation},\n  keywords     = {Data Spaces, RDF, Semantic Technology, Polyglot Modeling, Product Classifications, Product Catalogs, Manufacturing Industry, Electricity, Transport and Logistics, Architecture and Construction},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n JSON-LD, YAML-LD and Polyglot Modeling.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, October 2022.\n \n\n\n\n
\n\n\n\n \n \n \"JSON-LD,Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev-JSONLD-YAMLLD-2022,\n  author       = {Vladimir Alexiev},\n  title        = {{JSON-LD, YAML-LD and Polyglot Modeling}},\n  howpublished = {presentation},\n  month        = oct,\n  year         = 2022,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/20221028-JSONLD/Slides.html},\n  keywords     = {JSON-LD, YAML-LD, polyglot modeling, GraphDB, rdf4j, Titanium, GS1, EPCIS, Allotrope},\n  address      = {Presentation at Ontotext Last Friday},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Decentralization and Self-Sovereignty, Or how I finally understood what Blockchain is good for.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, February 2022.\n \n\n\n\n
\n\n\n\n \n \n \"DecentralizationPaper\n  \n \n \n \"Decentralization report\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev-decentralization-2022,\n  author       = {Vladimir Alexiev},\n  title        = {{Decentralization and Self-Sovereignty, Or how I finally understood what Blockchain is good for}},\n  howpublished = {presentation},\n  month        = feb,\n  year         = 2022,\n  url          = {https://docs.google.com/presentation/d/1AEwLjM7ry6BeM0XoF8EVbl5zeoMkE-tBht0CcL3cfPk/edit},\n  keywords     = {LD, JSONLD, HDT, HDF5, TPF, LDF, LDP, LDN, SOLID, DID, VC, IDSA RAM},\n  url_report   = {https://docs.google.com/document/d/1qpMAa55SYV6E4D_ffIgsZopmpzrUrjjR9c36SXXCVZQ/edit#},\n  address      = {Presentation at Ontotext Last Friday},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semantic and Polyglot Modeling, Generation of Declarative Transformations, Data Spaces ft. Vladimir Alexiev.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Podcast, October 2022.\n \n\n\n\n
\n\n\n\n \n \n \"SemanticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev-podcast2022,\n  author       = {Vladimir Alexiev},\n  title        = {{Semantic and Polyglot Modeling, Generation of Declarative Transformations, Data Spaces ft. Vladimir Alexiev}},\n  howpublished = {Podcast},\n  address      = {Loose Edges podcast with Marsel Tadjer and Justin Dowdy},\n  month        = oct,\n  year         = 2022,\n  url          = {https://player.fm/series/loose-edges/semantic-and-polyglot-modeling-generation-of-declarative-transformations-data-spaces-ft-vladimir-alexiev},\n  keywords     = {semantic modeling, polyglot modeling, ontology engineering, knowledge graphs, competency questions, upper ontologies, reusable ontologies, GraphDB, GrpahQL, Ontotext Refine, Ontotext Reconcile},\n  abstract     = {In this episode of Loose Edges Marsel and Justin interview Vladimir Alexiev, Chief Data Architect at Ontotext.\n- We explore Application Centric Data and how to catch defects in various modeling approaches.\n- Discuss Ontotext products: new GraphDB capabilities such as search and connectors, GraphQL capabilities, Ontotext Refine, Ontotext Reconcile.\n- Ontotext "10 step guide to KGs". Start a KG project with "competency questions".\n- Semantic transformation best practices and approaches: declarative and generated transformations .\n- Polyglot modeling: what is it and where it is manifesting itself in various data communities (from HL7 FHIR to YAML-LD).\n- Standards. Working Groups. How to get involved, what are some of the best practices from Vladimir's perspective and what should an aspiring semantic engineer and ontologist be aware of.\n- Common upper ontologies / Reusable ontologies / simple vs. broad, hear some examples from a dozen different industries.\n},\nannote        = {\n00:00 - 01:13 Intro and Ontotext News\n01:14 - 04:00 GraphDB features where to use RDF / Use cases and industries\n04:00 - 07:00 Connectors and transformation language for imports\n07:00 - 08:00 Elastic and search connectors\n08:00 - 10:49 GraphQL support / standards / avoiding cartesian product / standardization and full text search.\n10:50 - 16:30 RML / Start ontologies or start with data / templates with standard ttl + generating conversions\n16:35 - 20:00 Ontotext 10 step guide / start KG project with "competency questions"\n20:00 - 23:30 Application Centric Data / Defects in vocabularies / semantic representations / examples of standards settings organizations\n20:30 - 29:40 Polyglot modeling / data modeling / HL7/FHIR / YAML-LD easiness of yaml to read vs json\n30:00 - 32:00 Better modeling with json-ld Frames / community practices\n32:00 - 35:15 Object vs. Literal / Transparency EKG spec (schema.org vs other approach) / Inclusivity of wikidata / "be too demanding"\n35:15 - 39:10 Subject matter under specify deterrent for raising the quality of data / "use wikidata and geonames"\n39:00 - 41:30 Specificity of thinking from files and messages to real world. We are describing "things" in real world\n41:30 - 43:30 Value of descriptions\n43:30 - 47:15 Standards / Working groups / DBPedia vs Wikidata best practices / Ontotext Refine / Ontotext Reconcile\n47:15 - 51:30 Practices with W3C vs ISO standards\n51:30 - 54:45 Advice for upcoming graph specialists - example with internal query itterated from external query posted to SPARQL\n54:45 - 58:30 Justin asks for DPV W3C no consistent worldview / Common upper ontologies / Reusable ontologies / W3C practice with ORG and ADMS "simplify / make usable" / ISO-15926 / C vs lisp\n58:30 - 1:00:00 Start from "common primitives" / define the base is "not free" comes at a price / Crunchbase + IPO examples}\n}\n\n\n
\n
\n\n\n
\n In this episode of Loose Edges Marsel and Justin interview Vladimir Alexiev, Chief Data Architect at Ontotext. - We explore Application Centric Data and how to catch defects in various modeling approaches. - Discuss Ontotext products: new GraphDB capabilities such as search and connectors, GraphQL capabilities, Ontotext Refine, Ontotext Reconcile. - Ontotext \"10 step guide to KGs\". Start a KG project with \"competency questions\". - Semantic transformation best practices and approaches: declarative and generated transformations . - Polyglot modeling: what is it and where it is manifesting itself in various data communities (from HL7 FHIR to YAML-LD). - Standards. Working Groups. How to get involved, what are some of the best practices from Vladimir's perspective and what should an aspiring semantic engineer and ontologist be aware of. - Common upper ontologies / Reusable ontologies / simple vs. broad, hear some examples from a dozen different industries. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data Wants To Be Truly Sovereign: Designing Data Spaces with Linked Data Principles In Mind.\n \n \n \n \n\n\n \n Petkova, T.; and Alexiev, V.\n\n\n \n\n\n\n Ontotext blog post, November 2022.\n \n\n\n\n
\n\n\n\n \n \n \"DataPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Petkova-Alexiev-DataSpaces-2022,\n  author       = {Teodora Petkova and Vladimir Alexiev},\n  title        = {{Data Wants To Be Truly Sovereign: Designing Data Spaces with Linked Data Principles In Mind}},\n  howpublished = {Ontotext blog post},\n  month        = nov,\n  year         = 2022,\n  url          = {https://www.ontotext.com/blog/data-wants-to-be-truly-sovereign-designing-data-spaces/},\n  keywords     = {Data Spaces, Knowledge Graphs, Semantic Data Spaces},\n  abstract     = {Learn how data spaces, being a mechanism to enable efficient commercial data exchange can significantly benefit from the use of Linked Data at the level of data itself},\n}\n\n
\n
\n\n\n
\n Learn how data spaces, being a mechanism to enable efficient commercial data exchange can significantly benefit from the use of Linked Data at the level of data itself\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data Spaces vs Knowledge Graphs: How to Get To Semantic Data Spaces?.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In Data Spaces & Semantic Interoperability Workshop, Vienna, Austria, July 2022. \n \n\n\n\n
\n\n\n\n \n \n \"DataPaper\n  \n \n \n \"Data slides\n  \n \n \n \"Data video\n  \n \n \n \"Data blog\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev-DataSpaces-2022,\n  author       = {Vladimir Alexiev},\n  title        = {{Data Spaces vs Knowledge Graphs: How to Get To Semantic Data Spaces?}},\n  booktitle    = {{Data Spaces & Semantic Interoperability Workshop}},\n  year         = 2022,\n  month        = jul,\n  address      = {Vienna, Austria},\n  url          = {https://drive.google.com/file/d/15RuCfyresjmc0JWoNl8Jpjpbf_O65UkD/view},\n  url_Slides   = {https://docs.google.com/presentation/d/1uujCfAGw7nTwz9c6ItLtUhsKiGEbK2bKCWUOOunpyw0/edit},\n  url_Video    = {https://www.youtube.com/watch?v=RpCVChGczSA},\n  url_Blog     = {https://www.ontotext.com/company/news/ontotext-presents-position-paper-at-data-spaces-and-semantic-interoperability-workshop/},\n  keywords     = {Data Spaces, RDF, Semantic Technology, Polyglot Modeling, Product Classifications, Product Catalogs, Manufacturing Industry, Electricity, Transport and Logistics, Architecture and Construction},\n  abstract     = {EU invests heavily in Data Spaces (DS) as a mechanism to enable commercial data exchange and therefore industry digitalization and proliferation of Data Science (DS) and Artificial Intelligence, in particular Machine Learning (ML). While DSs use heavily semantic technologies, that is limited to describing metadata, license agreements, data market participants, etc. I argue that using Linked Data and semantic technologies for the data itself offers significant benefits regarding more efficient data sharing and use, and improvements to ML and DS processes. I give an overview of the state of semantic data sharing in several industrial domains (Product Classifications and Catalogs, Manufacturing Industry, Electricity, Transport and Logistics, Architecture and Construction; and close with a brief overview of technological enablers required for Semantic Data Spaces.},\n}\n\n
\n
\n\n\n
\n EU invests heavily in Data Spaces (DS) as a mechanism to enable commercial data exchange and therefore industry digitalization and proliferation of Data Science (DS) and Artificial Intelligence, in particular Machine Learning (ML). While DSs use heavily semantic technologies, that is limited to describing metadata, license agreements, data market participants, etc. I argue that using Linked Data and semantic technologies for the data itself offers significant benefits regarding more efficient data sharing and use, and improvements to ML and DS processes. I give an overview of the state of semantic data sharing in several industrial domains (Product Classifications and Catalogs, Manufacturing Industry, Electricity, Transport and Logistics, Architecture and Construction; and close with a brief overview of technological enablers required for Semantic Data Spaces.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ontologies vs Linked Data & Knowledge Graphs.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In First International Workshop on Semantic Industrial Information Modelling (SemIIM 2022 at ESWC 2022), May 2022. \n Panel presentation\n\n\n\n
\n\n\n\n \n \n \"OntologiesPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Alexiev-SemIIM2002,\n  author       = {Vladimir Alexiev},\n  title        = {{Ontologies vs Linked Data & Knowledge Graphs}},\n  booktitle    = {{First International Workshop on Semantic Industrial Information Modelling (SemIIM 2022 at ESWC 2022)}},\n  year         = 2022,\n  month        = may,\n  note         = {Panel presentation},\n  url          = {https://docs.google.com/presentation/d/1lKGZ_6MsTE15E6wFBorsVHmyQ3RjqzznpzQ7xpoDhUU/edit},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transparency EKG Requirements Specification, Architecture and Semantic Model.\n \n \n \n \n\n\n \n Alexiev, V.; Ribchev, V.; Chervenski, M.; Tulechki, N.; Radkov, M.; Kunchev, A.; and Nanov, R.\n\n\n \n\n\n\n Technical Report Ontotext Corp, June 2022.\n \n\n\n\n
\n\n\n\n \n \n \"TransparencyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{AlexievEtAl-TEKG-spec,\n  author       = {Vladimir Alexiev and Viktor Ribchev and Miroslav Chervenski and Nikola Tulechki and Mihail Radkov and Antoniy Kunchev and Radostin Nanov},\n  title        = {{Transparency EKG Requirements Specification, Architecture and Semantic Model}},\n  institution  = {Ontotext Corp},\n  year         = 2022,\n  type         = {Specification},\n  month        = jun,\n  url          = {https://transparency.ontotext.com/spec/},\n  keywords     = {energy, electricity, ENTSO-E, market transparency, knowledge graph, specification, semantic architecture, semantic model},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transparency Energy Knowledge Graph Project: Final Results.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n Presentation, October 2022.\n \n\n\n\n
\n\n\n\n \n \n \"TransparencyPaper\n  \n \n \n \"Transparency video\n  \n \n \n \"Transparency bdva\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev-TEKG4,\n  author       = {Vladimir Alexiev},\n  title        = {{Transparency Energy Knowledge Graph Project: Final Results}},\n  howpublished = {Presentation},\n  month        = oct,\n  year         = 2022,\n  url          = {https://docs.google.com/presentation/d/1jpgrBr2eXvOShlOtFmoMeF1jjGIrvt5F},\n  url_Video    = {https://www.youtube.com/watch?v=Lm4Q2riM3Ro},\n  keywords     = {energy, electricity, ENTSO-E, power plant databases, electricity market, energy markets, market transparency, knowledge graph, OpenStreetMap, EIC, validation, SHACL, SHACL Advanced, analytics},\n  url_BDVA     = {https://jam4.sapjam.com/blogs/show/XnKajJjHL6qjJt6dUuPXzI},\n  address      = {Presentation at Ontotext Knowledge Graph Forum 2022},\n  abstract     = {The Transparency Energy KG (TEKG) project converted part of the ENTSO-E electricity market transparency data to a semantic KG and complemented it with external data sources (VIES for VAT validation, OpenStreetMap for power plant maps and coordinates, several power plant databases for correspondences/coreferencing). We have implemented a number of advanced validations over fundamental electricity data such as the EIC file (Energy Identification Code), power plant data, and specific market data observations (time series). We also implemented advanced analytics and map views, including integration of OpenStreetMap maps. KGs afford a holistic view over the data that allow us to uncover a number of data problems, presented in a Data Quality Assessment Dashboard. This could help ENTSO-E and national electricity authorities (Transmission System Operators, TSO) diagnose data quality problems and improve data collection procedures and legislation.},\n}\n\n
\n
\n\n\n
\n The Transparency Energy KG (TEKG) project converted part of the ENTSO-E electricity market transparency data to a semantic KG and complemented it with external data sources (VIES for VAT validation, OpenStreetMap for power plant maps and coordinates, several power plant databases for correspondences/coreferencing). We have implemented a number of advanced validations over fundamental electricity data such as the EIC file (Energy Identification Code), power plant data, and specific market data observations (time series). We also implemented advanced analytics and map views, including integration of OpenStreetMap maps. KGs afford a holistic view over the data that allow us to uncover a number of data problems, presented in a Data Quality Assessment Dashboard. This could help ENTSO-E and national electricity authorities (Transmission System Operators, TSO) diagnose data quality problems and improve data collection procedures and legislation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transparency Energy Knowledge Graphs for Energy Traceability.\n \n \n \n \n\n\n \n \n\n\n \n\n\n\n presentation, September 2022.\n \n\n\n\n
\n\n\n\n \n \n \"TransparencyPaper\n  \n \n \n \"Transparency bdva\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
\n
\n\n\n
\n Ontotext's Transparency Energy KG (TEKG) project converted part of ENTSO-E electricity market transparency data to a semantic KG and complemented it with external data sources (VIES for VAT validation, OpenStreetMap for power plant maps and coordinates, several power plant databases for correspondences/coreferencing). There are at least 8 EU regulations that lay out rules for market transparency, in particular in energy markets. But energy is holistic, so going beyond electricity, ACER tracks at least 20 transparency platforms in various stages of certification, of which 16 operate for Electricity and 16 for Natural Gas. ENTSO-E and ENTSO-G are the central transparency platforms, but there are also platforms run by energy exchanges (e.g. EEX) and nonprofits (e.g. GIE). The ENTSO-G Transparency Platform publishes data about the gas market, and GIE has data about current and future gas infrastructure, including gas storages. ACER also tracks 130 other platforms (104 of which active): marketplaces, Registered Reporting Mechanisms, trade matching systems, etc. This is important data that affects all of us as energy consumers, and becomes even more important given the Russian gas crisis. However, the data is fragmented in distributed databases with their own access modes and only partially harmonized information. KGs and semantic data integration afford holistic views over all data across an industry and facilitate data validation and analyzes that were not previously possible. A number of identifiers can be used to coreference these entities: EIC for all kind of energy resources and players (issued in a decentralized way, no central database exists), 13 database-specific ids of power plants, GIE storage id for gas storages, ACER id for market players, MIC for market places, BIC for bank routing, GLEI for legal entities, GS1 GLN for logistics locations, OpenStreetMap for entities on a map, and Wikidata id for an encyclopedic KG, etc. We have worked with many of these datasets, in particular integrating parts in Wikidata for open semantic integration. We present the TEKG project, then some of the mentioned datasets and our ideas how TEKG could be extended to cover the following cases. UC1: Energy transparency basic data: semantically integrated, verified through blockchain and RDF Validation; including master data UC2: Energy data for market players, exchanges, regulators and policy makers: analysis of energy prices, trading practices, energy mix transformation and evolution UC3: Analysis of the Sustainability of EU Gas and Progress Towards Energy Independence from Russia UC4: Energy Tracing for CO2 Footprint and Pollution Impact, for enterprises who have mandates to progress towards zero emissions \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Advanced SHACL Data Validation for the Transparency Energy KG.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, May 2022.\n \n\n\n\n
\n\n\n\n \n \n \"AdvancedPaper\n  \n \n \n \"Advanced video\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev-TEKG2,\n  author       = {Vladimir Alexiev},\n  title        = {{Advanced SHACL Data Validation for the Transparency Energy KG}},\n  howpublished = {presentation},\n  month        = may,\n  year         = 2022,\n  url          = {https://docs.google.com/presentation/d/1Hhxmx2YDnaxlaU5KeafjRJSDlVgHRz1z/edit},\n  url_Video    = {https://youtu.be/4JGSui7Uq_Y},\n  keywords     = {energy, electricity, ENTSO-E, market transparency, knowledge graph, validation, SHACL, SHACL Advanced},\n  address      = {Presentation at Ontotext Demo Days},\n  abstract     = {The Transparency Energy KG (TEKG) project converts the ENTSO-E electricity market transparency data to a semantic KG and complements with external data sources (VIES for VAT validation, OpenStreetMap for power plant maps and coordinates, several power plant databases for correspondences/coreferencing). We have implemented a number of advanced validations over fundamental electricity data such as the EIC file (Energy Identification Code), power plant data, and specific market data observations (time series). KGs afford a holistic view over the data that allow us to uncover a number of data problems, presented in a Data Quality Assessment Dashboard. This could help ENTSO-E and national electricity authorities (Transmission System Operators, TSO) diagnose data quality problems and improve data collection procedures and legislation.},\n}\n\n
\n
\n\n\n
\n The Transparency Energy KG (TEKG) project converts the ENTSO-E electricity market transparency data to a semantic KG and complements with external data sources (VIES for VAT validation, OpenStreetMap for power plant maps and coordinates, several power plant databases for correspondences/coreferencing). We have implemented a number of advanced validations over fundamental electricity data such as the EIC file (Energy Identification Code), power plant data, and specific market data observations (time series). KGs afford a holistic view over the data that allow us to uncover a number of data problems, presented in a Data Quality Assessment Dashboard. This could help ENTSO-E and national electricity authorities (Transmission System Operators, TSO) diagnose data quality problems and improve data collection procedures and legislation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transparency Energy Knowledge Graph.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, January 2022.\n \n\n\n\n
\n\n\n\n \n \n \"TransparencyPaper\n  \n \n \n \"Transparency pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev-TEKG1,\n  author       = {Vladimir Alexiev},\n  title        = {{Transparency Energy Knowledge Graph}},\n  howpublished = {presentation},\n  month        = jan,\n  year         = 2022,\n  url          = {https://docs.google.com/presentation/d/1I0CKJ_y-Lq0eErnOabBBxmfAuOQHYNey/edit},\n  url_PDF      = {http://interrface.eu/sites/default/files/ontotext_TEKG-20210131.pdf},\n  keywords     = {energy, electricity, ENTSO-E, market transparency, knowledge graph},\n  address      = {Presentation at Joint INTERRFACE Open Call Projects meeting},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Cross-disciplinary ontologies for buildings, infrastructure, smart grid, electricity, energy efficiency.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, November 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Cross-disciplinaryPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev-EBDVF2021,\n  author       = {Vladimir Alexiev},\n  title        = {{Cross-disciplinary ontologies for buildings, infrastructure, smart grid, electricity, energy efficiency}},\n  howpublished = {presentation},\n  month        = nov,\n  year         = 2021,\n  url          = {https://rawgit2.com/VladimirAlexiev/my/master/pres/EBDVF-2021-(V.Alexiev).pptx},\n  keywords     = {Cross-disciplinary, ontologies, buildings, infrastructure, smart grid, electricity, energy efficiency, energy consumption, AECO, architecture, construction, cadaster, smart city, Manufacturing, Transport/Logistics, Product classification, sensor, CIM, CGMES, IFC, LOIN, IDM, ICDD, COINS, MVD, BCF, bSDD, Data Dictionaries, Data Templates, Object Libraries, Bricks Schema, Haystack, Digital Buildings, Real Estate Core, LBD, BOT, BPO, CDC, CTO, DOT, FOG, OMG CityGML, GeoSPARQL, other OGC, ISO 23262, GIS-BIM interop, FSGIM, OpenADR, DABGEO, EnergyUse, OEMA, EEPSA, PROSG, SEAS, SEMANCO, DogOnt, ThinkHome, OPC UA, AutomationML, RAMI, AdminShell GS1 EPCIS, CBV, WebVoc, Digital Links, TDS identifiers (GTIN, GLN, GRAI, GIAI, GDTN…) COBIE, eClass, IEC CDD, GS1 GPC, UNSPSC, SOSA, SSN, WoT TD, DTML, SAREF, SAREF4ENER, SAREF4BLDG, SAREF4water},\n  address      = {Presentation at European Big Dava Value Forum (EBDVF 2021)},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semantization of Machine Learning and Data Science (a Project Idea).\n \n \n \n \n\n\n \n Alexiev, V.; and Boytcheva, S.\n\n\n \n\n\n\n presentation, September 2021.\n \n\n\n\n
\n\n\n\n \n \n \"SemantizationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{AlexievBoytcheva2021-SemantizationML,\n  author       = {Vladimir Alexiev and Svetla Boytcheva},\n  title        = {{Semantization of Machine Learning and Data Science (a Project Idea)}},\n  howpublished = {presentation},\n  month        = sep,\n  year         = 2021,\n  url          = {https://docs.google.com/presentation/d/1_8LSXa9vVzNwPE6Hjj4cKIJNRRBNz2wP/edit},\n  keywords     = {Ontotext, research projects, knowledge graph, KG technologies, Semantization, Machine Learning, Data Science},\n  address      = {Presentation at Big Dava Value Association Activity Group 45 (BDVA AG 45)},\n  abstract     = {Problem: Data Science, AI & ML are expensive, and that's one of the reasons why relatively few enterprises use them.\nGoal: rationalize and industrialize DS efforts, and make them more reproducible and reusable.\nApproach: capture a lot of semantic info about all DS processes in an enterprise, and thus enable automation, discovery, reusability.\n    \nThe kinds of data we'd like to represent and integrate semantically (part of it is similar to what you can see on the Kaggle and OpenML sites): \n- Business context: goals, motivations, data value, value chain, cost vs benefit analysis, SWOT analysis...\n- DS challenges, where do they come from, datasets that can be leveraged to solve them \n- DS staff, expertise, projects, tasks, risks \n- DS/ML algorithms, implementations, modules, dependencies, software projects, versions, issue trackers \n- Cloud and IT resources: compute, storage; their deployment, management, automation...\n- ML model deployment, performance, model drift, retraining… \n\nEstablished software genres that cover parts of this landscape: \n- ModelOps (devOps for ML), Feature Spaces \n- Enterprise data catalogs (data hubs) vs data marketplaces vs open data catalogs vs EU Data Spaces and their metadata \n- FAIR data, reproducible research, Research Objects, research workflows, \n\nWe've researched over 100 relevant ontologies that can be leveraged, covering \n- Organizations/enterprises, business plans, \n- Ontologies, semantic data, \n- DS challenges, datasets, statistical data, quality assessment \n- DS/ML approaches, software, projects, issues, \n- Data on research/science \n- Project management \n\nFocusing on DS/ML approaches only, a couple of the relevant ontologies or standards are: \n- PMML (predictive modeling markup language) \n- e-LICO, DMEX ontologies for describing DS \n- OntoDM, KDO ontologies for describing DS},\n}\n\n
\n
\n\n\n
\n Problem: Data Science, AI & ML are expensive, and that's one of the reasons why relatively few enterprises use them. Goal: rationalize and industrialize DS efforts, and make them more reproducible and reusable. Approach: capture a lot of semantic info about all DS processes in an enterprise, and thus enable automation, discovery, reusability. The kinds of data we'd like to represent and integrate semantically (part of it is similar to what you can see on the Kaggle and OpenML sites): - Business context: goals, motivations, data value, value chain, cost vs benefit analysis, SWOT analysis... - DS challenges, where do they come from, datasets that can be leveraged to solve them - DS staff, expertise, projects, tasks, risks - DS/ML algorithms, implementations, modules, dependencies, software projects, versions, issue trackers - Cloud and IT resources: compute, storage; their deployment, management, automation... - ML model deployment, performance, model drift, retraining… Established software genres that cover parts of this landscape: - ModelOps (devOps for ML), Feature Spaces - Enterprise data catalogs (data hubs) vs data marketplaces vs open data catalogs vs EU Data Spaces and their metadata - FAIR data, reproducible research, Research Objects, research workflows, We've researched over 100 relevant ontologies that can be leveraged, covering - Organizations/enterprises, business plans, - Ontologies, semantic data, - DS challenges, datasets, statistical data, quality assessment - DS/ML approaches, software, projects, issues, - Data on research/science - Project management Focusing on DS/ML approaches only, a couple of the relevant ontologies or standards are: - PMML (predictive modeling markup language) - e-LICO, DMEX ontologies for describing DS - OntoDM, KDO ontologies for describing DS\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Energy Knowledge Graphs to Facilitate Evolution of the European Energy Market.\n \n \n \n \n\n\n \n Ivanov, C.; and Alexiev, V.\n\n\n \n\n\n\n presentation, October 2021.\n \n\n\n\n
\n\n\n\n \n \n \"EnergyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{IvanovAlexiev2021-EnergyKG,\n  author       = {Chavdar Ivanov and Vladimir Alexiev},\n  title        = {{Energy Knowledge Graphs to Facilitate Evolution of the European Energy Market}},\n  howpublished = {presentation},\n  address      = {Presentation at Ontotext Knowledge Graph Forum 2021},\n  month        = oct,\n  year         = 2021,\n  url          = {https://docs.google.com/presentation/d/1vvrUGtutbOzwUK19Z0nEUhZbP6kw-KiFdNELMG4V3v8/edit},\n  keywords     = {knowledge graph, energy knowledge graph, CIM, CGMES, ENTSOE, Single Energy Market, energy market transparency},\n  abstract     = {Presents the EU Single Electricity Market, IEC Common Information Model (CIM), ENTSOE Common Grid Model Exchange Specification (CGMES), how Energy KGs can improve data integration in the energy domain, ENTSOE market transparency data, and Ontotext's Energy Transparency KG project.},\n}\n\n
\n
\n\n\n
\n Presents the EU Single Electricity Market, IEC Common Information Model (CIM), ENTSOE Common Grid Model Exchange Specification (CGMES), how Energy KGs can improve data integration in the energy domain, ENTSOE market transparency data, and Ontotext's Energy Transparency KG project.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Energy Knowledge Graphs.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n presentation, July 2021.\n \n\n\n\n
\n\n\n\n \n \n \"EnergyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Misc{Alexiev2021-EnergyKG,\n  author       = {Vladimir Alexiev},\n  title        = {{Energy Knowledge Graphs}},\n  howpublished = {presentation},\n  month        = jul,\n  year         = 2021,\n  url          = {https://docs.google.com/presentation/d/1GcJqTZFRptX5lAGBA2RXThreGxH9LQAZi5qPnJX1tTQ/edit},\n  keywords     = {knowledge graphs, data spaces, European Energy Data Space, CIM, CGMES, ENTSOE, Single Energy Market, energy market transparency, EU Green Deal, industry digitization},\n  address      = {Presentation to IIA/KeyLogic and US DOE NETL and OSTI},\n  abstract     = {Presents the EU Data Spaces initiatives, Single Electricity Market, ENTSOE market transparency data, IEC Common Information Model (CIM), ENTSOE Common Grid Model Exchange Specification (CGMES), and how Energy KGs can improve data integration in the energy domain.},\n}\n\n
\n
\n\n\n
\n Presents the EU Data Spaces initiatives, Single Electricity Market, ENTSOE market transparency data, IEC Common Information Model (CIM), ENTSOE Common Grid Model Exchange Specification (CGMES), and how Energy KGs can improve data integration in the energy domain.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Diverse Uses of a Semantic Graph Database for Knowledge Organization and Research.\n \n \n \n \n\n\n \n Alexiev, V.\n\n\n \n\n\n\n In European Data Conference on Reference Data and Semantics (ENDORSE 2021), pages 47, July 2021. European Commission: Directorate-General for Informatics, Publications Office of the European Union, ISA2 Programme\n \n\n\n\n
\n\n\n\n \n \n \"DiversePaper\n  \n \n \n \"Diverse github\n  \n \n \n \"Diverse ppt\n  \n \n \n \"Diverse slides\n  \n \n \n \"Diverse video\n  \n \n \n \"Diverse zotero\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Alexiev-ENDORSE-2021,\n  author       = {Vladimir Alexiev},\n  title        = {{Diverse Uses of a Semantic Graph Database for Knowledge Organization and Research}},\n  booktitle    = {{European Data Conference on Reference Data and Semantics (ENDORSE 2021)}},\n  year         = 2021,\n  pages        = 47,\n  month        = jul,\n  organization = {European Commission: Directorate-General for Informatics, Publications Office of the European Union, ISA2 Programme},\n  url          = {https://op.europa.eu/o/opportal-service/download-handler?identifier=41b06a9b-e388-11eb-895a-01aa75ed71a1&format=pdf&language=en&productionSystem=cellar},\n  url_Github   = {https://github.com/VladimirAlexiev/ontotext-graphdb-applications},\n  url_PPT      = {https://github.com/VladimirAlexiev/ontotext-graphdb-applications/raw/master/Diverse%20Uses%20of%20a%20Semantic%20Graph%20Database%20for%20Knowledge%20Organization%20and%20Research%20(ENDORSE%202021).pptx},\n  url_Slides   = {https://op.europa.eu/documents/7525478/8087182/ALEXIEV_presentation_Diverse+Uses+of+a+Semantic+Graph+Database+for+Knowledge+Organization+and+Research.pdf/b27afc2c-3db7-749b-c50c-52b3ded79f3c},\n  url_Video    = {https://www.youtube.com/watch?v=0q63x2P1V0o&list=PLT5rARDev_rmGr_LJkr7zcI-Qul7yOOHO&index=4&t=4780s},\n  url_Zotero   = {https://www.zotero.org/groups/2744757/ontotext-graphdb},\n  keywords     = {bibliography, semantic database, graph database, semantic repository, knowledge graph, Knowledge Organization System, VocBench, PoolParty, Synaptica, Semaphore, EuroVoc, AgroVoc, Getty Vocabularies, social media analytics, data marketplaces, business process management, enterprise data integration, statistical data, engineering, smart cities, sensor networks, life sciences, biomedical ontologies, medicines, chemistry, linguistic data, semantic publishing, semantic text analysis, geographic information, master data management, academic/research data, COVID, Zika virus, Quran, bilingual data, art history, Holocaust research, musical events, musical adaptations, iconography, food and drink, tourism, investment decision support, economic research, offshore leaks, maritime data, construction projects, building information management, crisis management, critical incidents, data journalism, clinical trials, investment recommendations, data journalism,},\n  doi          = {10.2830/44569},\n  isbn         = {978-92-78-42416-9},\n  annote       = {Catalogue number: OA-03-21-303-EN-N},\n  date         = {2021-07-12},\n  abstract     = {Semantic Graph Databases are the foundation of Enterprise Knowledge Graphs. They are used in numerous industrial applications, but also Knowledge Organization Management systems (thesaurus and ontology management systems), such as VocBench, SWC PoolParty, Synaptica Semaphore. Through VocBench, semantic databases manage or publish some of the most important thesauri: EuroVoc, AgroVoc, the Getty Vocabularies, etc. Semantic databases are also used in a wide variety of research domains and projects. Some have open source or free editions that make them an easy choice for academic research. We searched on Google Scholar and found 1000-1200 academic papers and theses mentioning one of the popular databases. We also found at least 50 books on Google Books that mention it. We started a Zotero bibliography on the topic (currently about 150 papers), and captured about 220 research topics, based on the titles of about 250 papers. We will present an analysis of reference data and research domains using a semantic database. Some of the traditional topics include: social media analytics, data marketplaces, business process management, enterprise data integration, statistical data, engineering, smart cities, sensor networks, life sciences, biomedical ontologies, medicines, chemistry, linguistic data, semantic publishing, semantic text analysis, geographic information, master data management. Newer or more exotic topics include academic/research data, COVID and Zika viruses, Quran and bilingual Arabic-English data, art history, Holocaust research, musical events and adaptations, iconography, food and drink, tourism, investment decision support, economic research, offshore leaks, maritime data, construction projects, building information management, crisis management, critical incidents and infrastructures, data journalism, clinical trials and specific medical topics (e.g. intestinal cells, intracoronal tooth restorations, vaccines, toxicology), investment recommendations, data journalism, etc.},\n}\n\n
\n
\n\n\n
\n Semantic Graph Databases are the foundation of Enterprise Knowledge Graphs. They are used in numerous industrial applications, but also Knowledge Organization Management systems (thesaurus and ontology management systems), such as VocBench, SWC PoolParty, Synaptica Semaphore. Through VocBench, semantic databases manage or publish some of the most important thesauri: EuroVoc, AgroVoc, the Getty Vocabularies, etc. Semantic databases are also used in a wide variety of research domains and projects. Some have open source or free editions that make them an easy choice for academic research. We searched on Google Scholar and found 1000-1200 academic papers and theses mentioning one of the popular databases. We also found at least 50 books on Google Books that mention it. We started a Zotero bibliography on the topic (currently about 150 papers), and captured about 220 research topics, based on the titles of about 250 papers. We will present an analysis of reference data and research domains using a semantic database. Some of the traditional topics include: social media analytics, data marketplaces, business process management, enterprise data integration, statistical data, engineering, smart cities, sensor networks, life sciences, biomedical ontologies, medicines, chemistry, linguistic data, semantic publishing, semantic text analysis, geographic information, master data management. Newer or more exotic topics include academic/research data, COVID and Zika viruses, Quran and bilingual Arabic-English data, art history, Holocaust research, musical events and adaptations, iconography, food and drink, tourism, investment decision support, economic research, offshore leaks, maritime data, construction projects, building information management, crisis management, critical incidents and infrastructures, data journalism, clinical trials and specific medical topics (e.g. intestinal cells, intracoronal tooth restorations, vaccines, toxicology), investment recommendations, data journalism, etc.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The euBusinessGraph Ontology: a Lightweight Ontology for Harmonizing Basic Company Information.\n \n \n \n \n\n\n \n Roman, D.; Alexiev, V.; Paniagua, J.; Elvesaeter, B.; von Zernichow, B. M.; Soylu, A.; Simeonov, B.; and Taggart, C.\n\n\n \n\n\n\n Semantic Web - Interoperability, Usability, Applicability (SWJ),41-68. November 2021.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n \n \"The published\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{EBG-2020-SWJ,\n  author       = {Dumitru Roman and Vladimir Alexiev and Javier Paniagua and Brian Elvesaeter and Bjorn Marius von Zernichow and Ahmet Soylu and Boyan Simeonov and Chris Taggart},\n  title        = {{The euBusinessGraph Ontology: a Lightweight Ontology for Harmonizing Basic Company Information}},\n  journal      = {{Semantic Web - Interoperability, Usability, Applicability (SWJ)}},\n  year         = 2021,\n  pages        = {41-68},\n  month        = nov,\n  url          = {https://www.semantic-web-journal.net/content/eubusinessgraph-ontology-lightweight-ontology-harmonizing-basic-company-information-0},\n  url_Published= {https://content.iospress.com/articles/semantic-web/sw210424},\n  keywords     = {Company data, Knowledge Graph, Ontology, Linked data},\n  issue        = 13,\n  publisher    = {IOS Press},\n  doi          = {10.3233/SW-210424},\n  abstract     = {Company data, ranging from basic company information such as company name(s) and incorporation date to complex balance sheets and personal data about directors and shareholders, are the foundation that many data value chains depend upon in various sectors (e.g., business information, marketing and sales, etc.