<script src="https://bibbase.org/show?bib=www.polleres.net%2Fmypublications.bib&jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/show?bib=www.polleres.net%2Fmypublications.bib");
print_r($contents);
?>
<iframe src="https://bibbase.org/show?bib=www.polleres.net%2Fmypublications.bib"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
@inproceedings{nand-etal2024EKAW, title={Scholarly {Wikidata}: Population and Exploration of Conference Data in Wikidata using {LLMs}}, author={Nandana Mihindukulasooriya and Sanju Tiwari and Daniil Dobriy and Finn Årup Nielsen and Tek Raj Chhetri and Axel Polleres}, year={2024}, booktitle = {4th International Conference on Knowledge Engineering and Knowledge Management (EKAW-2)}, month = nov, day = {26-28}, abstract = {Several initiatives have been undertaken to conceptually model the domain of scholarly data using ontologies and to create respective Knowledge Graphs. Yet, the full potential seems unleashed, as automated means for automatic population of said ontologies are lacking, and respective initiatives from the Semantic Web community are not necessarily connected: we propose to make scholarly data more sustainably accessible by leveraging Wikidata's infrastructure and automating its population in a sustainable manner through LLMs by tapping into unstructured sources like conference Web sites and proceedings texts as well as already existing structured conference datasets. While an initial analysis shows that Semantic Web conferences are only minimally represented in Wikidata, we argue that our methodology can help to populate, evolve and maintain scholarly data as a community within Wikidata. Our main contributions include (a) an analysis of ontologies for representing scholarly data to identify gaps and relevant entities/properties in Wikidata, (b) semi-automated extraction -- requiring (minimal) manual validation -- of conference metadata (e.g., acceptance rates, organizer roles, programme committee members, best paper awards, keynotes, and sponsors) from websites and proceedings texts using LLMs. Finally, we discuss (c) extensions to visualization tools in the Wikidata context for data exploration of the generated scholarly data. Our study focuses on data from 105 Semantic Web-related conferences and extends/adds more than 6000 entities in Wikidata. It is important to note that the method can be more generally applicable beyond Semantic Web-related conferences for enhancing Wikidata's utility as a comprehensive scholarly resource.}, note = {accepted at EKAW-24, too appear}, eprint={2411.08696}, archivePrefix={arXiv}, primaryClass={cs.DL}, url={https://arxiv.org/abs/2411.08696}, }
@proceedings{dobr-etal-2024-ragekg, Abstract = {Workshop Proceedings. This workshop was co-located with ISWC2024.}, Address = {Hanover, MD}, Title = {RAGE-KG 2024: Retrieval-Augmented Generation Enabled by Knowledge Graphs}, Day = 11, Editor = {Daniil Dobriy and Franceso Osborne and Axel Polleres and Marta Sabou}, Month = nov, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Type = BOOK, Url = {http://ceur-ws.org/TO-APPEAR/}, note = {to appear}, Year = 2024, }
@inproceedings{schu-etal-2024SciK, booktitle = {Proceedings of the 4th International Workshop on Scientific Knowledge (Sci-K), co-located with 23rd International Semantic Web Conference (ISWC 2024)}, editor = {Angelo A. Salatino and Andrea Mannocci and Francesco Osborne and Sonja Schimmler and Georg Rehm}, title = {Assessing the Reliability and Scientific Rigor of References in {Wikidata}}, author = {Hannah Schuster and Amin Anjomshoaa and Axel Polleres}, url = {https://ceur-ws.org/Vol-3780/paper7.pdf}, year = 2024, month = nov, day = 12, abstract = {Wikidata is a rapidly growing user-edited open knowledge graph that provides easy access to structured data. Since Wikidata allows contradictory information, references are crucial for supporting statements and tracking the source of information. Consequently, investigating the use, types, and scientific value of references within Wikidata is essential. In this paper, we will first conduct a heuristic evaluation of Wikidata references using a sampling method. Subsequently, we will focus on a specific category of references, Digital Object Identifiers (DOIs), known for citing scientific publications. Our sampled Wikidata statements analysis indicates widespread adoption of the DOI system within Wikidata. To assess the quality of scholarly resources referenced in Wikidata, we used percentile metrics derived from the OpenAlex platform. Additionally, h-index indicators from OpenAlex were employed to evaluate the credibility of these sources and determine whether the Wikidata citations originated from reputable sources or publishers. Our findings show that papers in the social and physical sciences tend to perform better in Wikidata compared to OpenAlex. Moreover, while top-tier journals dominate citations in OpenAlex—particularly in the health and life sciences— Wikidata shows a higher citation rate for mid-tier and emerging journals. This indicates a broader representation of scholarly contributions within Wikidata.}, series = {{CEUR} Workshop Proceedings}, volume = 3780, publisher = {CEUR-WS.org}, }
@inproceedings{dobr-poll-2024ESWC, title = {{SMW Cloud}: A Corpus of Domain-Specific Knowledge Graphs from {Semantic} {MediaWikis}}, year = 2024, author = {Daniil Dobryi and Axel Polleres}, abstract ={Semantic wikis have become an increasingly popular means of collaboratively managing Knowledge Graphs. They are powered by platforms such as Semantic MediaWiki and Wikibase, both of which enable MediaWiki to store and publish structured data. While there are many semantic wikis currently in use, there has been little effort to collect and analyse their structured data, nor to make it available for the research community. This paper seeks to address this gap by systematically collecting structured data from an extensive corpus of Semantic-MediaWiki-powered portals and providing an in-depth analysis of the ontological diversity and re-use amongst these wikis using a variety of ontological metrics. Our paper aims to demonstrate that semantic wikis are a valuable and extensive part of Linked Open Data and, in fact, may be considered an active ``sub-cloud'' within the Linked Open Data ecosystem, which can provide useful insights into the evolution of small and medium-sized domain-specific Knowledge Graphs.}, booktitle = {Proceedings of the 21st European Semantic Web Conference (ESWC2024)}, pages = {145--161}, day = {26--30}, month = may, Address = {Hersonissos, Greece}, publisher = springer, series = lncs, volume = 14665, doi = {10.1007/978-3-031-60635-9\_9}, editor = {Albert Mero{\~{n}}o{-}Pe{\~{n}}uela and Anastasia Dimou and Rapha{\"{e}}l Troncy and Olaf Hartig and Maribel Acosta and Mehwish Alam and Heiko Paulheim and Pasquale Lisena}, }
@article{ferr-etal-2024SWJ, author = {Nicolas Ferranti and Jairo Francisco de Souza and Shqiponja Ahmetaj and Axel Polleres}, title = {Formalizing and Validating {Wikidata}'s Property Constraints using {SHACL} and {SPARQL}}, journal = SWJ, note = {To appear, accepted for publication}, year = 2024, abstract = {In this paper, we delve into the crucial role of constraints in maintaining data integrity in knowledge graphs with a specific focus on Wikidata, one of the most extensive collaboratively maintained open data knowledge graphs on the Web. The World Wide Web Consortium (W3C) recommends the Shapes Constraint Language (SHACL) as the constraint language for validating Knowledge Graphs, which comes in two different levels of expressivity, SHACL-Core, as well as SHACL-SPARQL. Despite the availability of SHACL, Wikidata currently represents its property constraints through its own RDF data model, which relies on Wikidata's specific reification mechanism based on authoritative namespaces, and - partially ambiguous - natural language definitions. In the present paper, we investigate whether and how the semantics of Wikidata property constraints, can be formalized using SHACL-Core, SHACL-SPARQL, as well as directly as SPARQL queries. While the expressivity of SHACL-Core turns out to be insufficient for expressing all Wikidata property constraint types, we present SPARQL queries to identify violations for all 32 current Wikidata constraint types. We compare the semantics of this unambiguous SPARQL formalization with Wikidata's violation reporting system and discuss limitations in terms of evaluation via Wikidata's public SPARQL query endpoint, due to its current scalability. Our study, on the one hand, sheds light on the unique characteristics of constraints defined by the Wikidata community, in order to improve the quality and accuracy of data in this collaborative knowledge graph. On the other hand, as a ``byproduct'', our formalization extends existing benchmarks for both SHACL and SPARQL with a challenging, large-scale real-world use case.}, url = {https://www.semantic-web-journal.net/content/formalizing-and-validating-wikidatas-property-constraints-using-shacl-and-sparql-0}, }
@article{azza-etal-2024SWJ, author = {Amr Azzam and Axel Polleres and Javier D. Fernandez and Maribel Acosta}, title = {{smart-KG}: Partition-Based Linked Data Fragments for Querying Knowledge Graphs}, journal = SWJ, volume = 15, number = 5, pages = {1791--1835}, doi = {10.3233/SW-243571}, year = 2024, abstract = {RDF and SPARQL provide a uniform way to publish and query billions of triples in open knowledge graphs (KGs) on the Web. Yet, provisioning of a fast, reliable, and responsive live querying solution for open KGs is still hardly possible through SPARQL endpoints alone: while such endpoints provide a remarkable performance for single queries, they typically can not cope with highly concurrent query workloads by multiple clients. To mitigate this, the Linked Data Fragments (LDF) framework sparked the design of different alternative low-cost interfaces such as Triple Pattern Fragments (TPF), that partially offload the query processing workload to the client side. On the downside, such interfaces still come with the expense of unnecessarily high network load due to the necessary transfer of intermediate results to the client, leading to query performance degradation compared with endpoints. To address this problem, in the present work, we investigate alternative interfaces, refining and extending the original TPF idea, which also aims at reducing server-resource consumption, by shipping query-relevant partitions of KGs from the server to the client. To this end, first, we align formal definitions and notations of the original LDF framework to uniformly present existing LDF implements and such “partition-based” LDF approaches. These novel LDF interfaces retrieve, instead of the exact triples matching a particular query pattern, a subset of pre-materialized, compressed, partitions of the original graph, containing all answers to a query pattern, to be further evaluated on the client side. As a concrete representative of partition-based LDF, we present smart-KG+, extending and refining our prior work [1] in several respects. Our proposed approach is a step forward towards a better-balanced share of the query processing load between clients and servers by shipping graph partitions driven by the structure of RDF graphs to group entities described with the same sets of properties and classes, resulting in significant data transfer reduction. Our experiments demonstrate that the smart-KG+ significantly outperforms existing Web SPARQL interfaces on both pre-existing benchmarks for highly concurrent query execution as well as an accustomed query workload inspired by query logs of existing SPARQL endpoints.}, url = {https://www.semantic-web-journal.net/content/smart-kg-partition-based-linked-data-fragments-querying-knowledge-graphs-1}, }
@article{poll-etal-2023TGDK, author = {Axel Polleres and Romana Pernisch and Angela Bonifati and Daniele Dell'Aglio and Daniil Dobriy and Stefania Dumbrava and Lorena Etcheverry and Nicolas Ferranti and Katja Hose and Ernesto Jim{\'{e}}nez{-}Ruiz and Matteo Lissandrini and Ansgar Scherp and Riccardo Tommasini and Johannes Wachs}, abstract = {Openly available, collaboratively edited Knowledge Graphs (KGs) are key platforms for the collective management of evolving knowledge. The present work aims t o provide an analysis of the obstacles related to investigating and processing specifically this central aspect of evolution in KGs. To this end, we discuss (i) the dimensions of evolution in KGs, (ii) the observability of evolution in existing, open, collaboratively constructed Knowledge Graphs over time, and (iii) possible metrics to analyse this evolution. We provide an overview of relevant state-of-the-art research, ranging from metrics developed for Knowledge Graphs specifically to potential methods from related fields such as network science. Additionally, we discuss technical approaches - and their current limitations - related to storing, analysing and processing large and evolving KGs in terms of handling typical KG downstream tasks.}, title = {How Does Knowledge Evolve in Open Knowledge Graphs?}, journal = {{TGDK}}, volume = 1, number = 1, month = dec, pages = {11:1--11:59}, year = 2023, url = {https://doi.org/10.4230/TGDK.1.1.11}, doi = {10.4230/TGDK.1.1.11}, }
@inproceedings{anjo-etal-SWP23, author = {Amin Anjomshoaa and Hannah Schuster and Johannes Wachs and Axel Polleres}, abstract = {Data plays a critical role in crisis response and intervention efforts by providing decision-makers with timely, accurate, and actionable information. During a crisis, data can help organizations and crisis managers identify the most affected populations, track the spread of the crisis, and monitor the effectiveness of their response efforts. We introduce the CRISP Knowledge Graph, constructed from various data resources provided by different stakeholders involved in crisis and disaster management, which presents a uniform view of infrastructure, networks, and services pertinent to crisis management use cases. We also present preliminary results for network and infrastructure analysis which demonstrate how the CRISP KG can address the requirements of crisis management and urban resilience scenarios.}, editor = {Marcela Ruiz and Pnina Soffer}, title = {Towards Crisis Response and Intervention Using Knowledge Graphs - {CRISP} Case Study}, booktitle = {Advanced Information Systems Engineering Workshops - CAiSE 2023 International Workshops, Zaragoza, Spain, June 12-16, 2023, Proceedings}, series = {Lecture Notes in Business Information Processing}, volume = 482, pages = {67--73}, publisher = {Springer}, month = jun, day = {12--16}, year = 2023, doi = {https://doi.org/10.1007/978-3-031-34985-0_7}, }
@inproceedings{dobr-poll-ISWC2023PD, title = "Crawley: A Tool for Web Platform Discovery", author = "Daniil Dobriy and Axel Polleres", year = 2023, abstract = {Crawley, a Python-based command-line tool, provides an automated mechanism for web platform discovery. Incorporating capabilities such as Search Engine crawling, web platform validation and recursive hyperlink traversal, it facilitates the systematic identification and validation of a variety of web platforms. The tool’s effectiveness and versatility are demonstrated via two successful use cases: the identification of Semantic MediaWikis instances, as well as the discovery of Open Data Portals including OpenDataSoft, Socrata, and CKAN. These empirical results underscore Crawley’s capacity to support web-based research. We further outline potential enhancements of the tool, thereby positioning Crawley as a valuable tool in the field of web platform discovery.}, booktitle = {Proceedings of the 22nd International Semantic Web Conference (ISWC2023) -- Posters and Demos Track}, url = {http://polleres.net/publications/dobr-poll-ISWC2023PD.pdf}, note = {To appear}, }
@inproceedings{dobr-poll-2023KCAP, author = {Daniil Dobriy and Axel Polleres}, title = {{O2WB}: A tool enabling ontology reuse in {Wikibase}}, abstract = {The Semantic Web initiative has established standards and practices for publishing interconnected knowledge, where RDF Schema and OWL shall enable the reuse of ontologies as one of these established practices. However, Wikibase, the software behind Wikidata, which is increasingly gaining popularity among data publishers, lacks the functionality to import and reuse existing RDF Schema and OWL ontologies. To facilitate ontology reuse, FAIR data publishing and encourage a tighter connection of existing Linked Data resources with Wikibase instances, we align the Wikibase data model with RDF and present O2WB, a tool for ontology import and export within Wikibase.}, year = 2023, booktitle = {Proceedings of the 12th Knowledge Capture Conference (K-CAP '23)}, month = dec, pages = {101-–104}, doi = {https://doi.org/10.1145/3587259.3627568} }
@inproceedings{varg-etal2023WD, title = {{PhyQus}: Automatic Unit Conversions for {Wikidata} Physical Quantities}, author = {Felipe Vargas-Rojas and Axel Polleres and Llorenç Cabrera-Bosquet and Danai Symeonidou}, year = 2023, abstract={Wikidata is gaining attention to address scientific experimental information. In particular, users can exploit the notions of physical quantities and units of measurements already defined in its knowledge graph. However, when users perform queries over scientific data referring to such data, they can only retrieve the physical quantities in the units of measurement explicitly stored as statements, although the knowledge to transform these quantity values into different units required by the query is already (partially) defined in the units' metadata. We propose PhyQus a query-answering approach that allows to retrieve the physical quantities in any convertible unit by performing unit conversion on the fly based on the query information. To this end, our approach is based in the advanced features of the W3C recommendation SHACL and leverages the ontology of unit of measurements, QUDT. We showcase that the approach is feasible considering two main examples one about cities's area and the other about the boiling point of chemical substances.}, note = {accepted/to appear}, url={https://openreview.net/forum?id=0dKU7Q8MGL}, day=7, month = nov, booktitle = {4rd Wikidata Workshop (co-located with ISWC2023)}, }
@inproceedings{anjo-etal2023D2R2, author = {Amin Anjomshoaa and Hannah Schuster and Johannes Wachs and Axel Polleres}, abstract = {Data integration plays a crucial role in crisis management and city resilience use cases by enabling the consolidation of information from scattere sources into a unified view, thereby allowing decision-makers to gain a more complete and accurate understanding of the situation at hand. In this paper, we introduce the CRISP Knowledge Graph, constructed from various data resources to present a uniform view of infrastructure networks and services pertinent to crisis management to enable informed and targeted interventions to address crises management use cases. We provide a brief explanation of the semantic model and its significance in building a comprehensive knowledge graph and then outline our approach for incorporating some large spatiotemporal datasets into this framework, considering the unique challenges that arise in this process.}, title = {From Data to Insights: Constructing Spatiotemporal Knowledge Graphs for City Resilience Use Cases}, booktitle = {Proceedings of the Second International Workshop on Linked Data-driven Resilience Research 2023 (D2R2 2023), co-located with Extended Semantic Web Conference 2023 (ESWC 2023)}, year = 2023, day = 28, month = may, url = {https://ceur-ws.org/Vol-3401/paper1.pdf}, series = {{CEUR} Workshop Proceedings}, volume = 3401, publisher = {CEUR-WS.org}, }
@inproceedings{schu-etal-2023NetSci, title={Quantifying road network vulnerability by access to healthcare}, author = {Hannah Schuster and Axel Polleres and Johannes Wachs}, year = 2023, booktitle = {NETSCI 2023: INTERNATIONAL SCHOOL AND CONFERENCE ON NETWORK SCIENCE}, abstract = {The resilience of transportation networks is highly variable. Some components are crucially important: their failure can cause problems throughout the system. One way to probe a system for weak points needing reinforcement is via simulated stress tests. Network scientists have applied node or edge removal simulations to many systems: interbank lending markets, power grids, software networks, etc. Reliable transit via roads is especially crucial in healthcare: delays in travel to hospitals have a significant negative effect on patient outcomes including mortality. Yet past studies of road network resilience focus on general mobility or specific kinds of events like floods. And it is unclear how classical resilience analysis applies to geographically embedded road networks with homogeneous degree distribution. We address this gap by using a coarse-grained representation of the Austrian road network in which nodes are municipalities and edges connect municipalities directly via roads. We stress this network, observing changes in accessibility when removing individual edges and groups of edges in geographic clusters using a population-weighted measure of hospital accessibility. Under specific scenarios, certain segments play a critical role in accessibility. We observe changes in burdens on individual hospitals as road closures change which hospitals are nearest. These results are valuable for scheduling road maintenance, extending the road network, or evaluating hospital capacity.}, Note = {conference abstract only, paper forthcoming}, }
@inproceedings{tchat-etal2023ESCW, author = {Gary P. Tchat and Amin Anjomshoaa and Daniil Dobriy and Fajar J. Ekaputra and Elmar Kiesling and Axel Polleres and Marta Sabou}, abstract = {7H15 P4P3r r3V13W5 7H3 3V01U710N 0F 7H3 W15D0M W38 4ND 11NK5 175 D3V310PM3N7 70 r3534rCH 0N 7H3 53M4N71C W38 – Fr0M 175 1NC3P710N 1N 7H3 34r1Y 2157 C3N7UrY 70 175 CUrr3N7 57473 1N 2042. W3 D15CU55 7H3 K3Y M113570N35, CH4113N635 4ND 1NN0V4710N5 7H47 H4V3 5H4P3D 7H3 "W15D0M W38" 14ND5C4P3 0V3r 7H3 P457 D3C4D35, CU1M1N471N6 1N 7H3 H16H1Y 1N73rC0N- N3C73D 1N7311163N7 4ND 3FF1C13N7 6L084L KN0W13D63 5Y573M W3 H4V3 70D4Y. 7H3 P4P3r 5UMM4r1Z35 7H3 F1r57 4U7H0r’5 P3r50N41 V13W5 F0CU51N6 0N 7H3 3V01U710N 0F 7H3 F13LD 51NC3 H3 574r73D H15 PHD 1N 7H3 34r1Y 2020’5. 45 4 r3M1N15C3NC3 70 7H3 3V01U710N 0F 4L50 Wr1773N 14N6U463 51NC3 7H3N, W3 W111 U53 C14551C41 Wr1773N 14N6U463 1N 7H3 r357 0F 7H15 P4P3r. DISCLAIMER: This paper is a work of fiction, written in 2023 and describing research that may be carried out in and until 2043. For this reason, it includes citations to papers produced in the period 2024-2043, which have not been published (yet); all citations prior to 2024 refer instead to papers already in the literature. Any reference or resemblance to actual events or people or businesses, past present or future, is entirely coincidental and the product of the authors’ imagination. Even the imaginary 2043 keynote speaker and first author, who started its PhD in the early 2020’s, is fictitious.}, title = {From Semantic Web to Wisdom Web: A Retrospective on the Journey to 2043}, booktitle = {Proceedings of the ESWC2023 ``The next 20 years (ESWC 2043)'' track}, year= 2023, day = 31, month = may, url = {http://polleres.net/publications/tchat-etal2023ESCW.pdf}, doi = {https://doi.org/10.5281/zenodo.8147589}, }
@inproceedings{klag-poll-TEXT2KG2023, author = {Gerhard Georg Klager and Axel Polleres}, abstract = {In this paper we report about preliminary results on running question answering benchmarks against the recently hyped conversational AI services such as ChatGPT: we focus on questions that are known to be possible to be answered by information in existing Knowledge graphs such as Wikidata. In a preliminary study we experiment, on the one hand, with questions from established KGQA benchmarks, and on the other hand, present a set of questions established in a student experiment, which should be particularly hard for Large Language Models (LLMs) to answer, mainly focusing on questions on recent events. In a second experiment, we assess how far GPT could be used for query generation in SPARQL. While our results are mostly negative for now, we hope to provide insights for further research in this direction, in terms of isolating and discussing the most obvious challenges and gaps, and to provide a research roadmap for a more extensive study planned as a current master thesis project.}, title = {Is {GPT} fit for {KGQA}? -- Preliminary results}, booktitle = {Proceedings of the International Workshop on Knowledge Graph Generation from Text (Text2KG2023), co-located with Extended Semantic Web Conference 2023 (ESWC 2023)}, note = {to appear}, year = 2023, day = 29, month = may, url = {http://polleres.net/publications/klag-poll-TEXT2KG2023.pdf} }
@inproceedings{dobr-poll-2022WD, author = {Daniil Dobriy and Axel Polleres}, title = {Analysing and promoting ontology interoperability in {Wikibase}}, abstract = {Wikibase, the open-source software behind Wikidata, increasingly gains popularity among third-party Linked Data publishers. However, the platform's unique data model decreases the degree of interoperability with existing Semantic Web standards and tools that underlie Linked Data as codified by Linked Data principles. In particular, this unique data model of Wikibase also undermines the direct reuse of ontologies and vocabularies, in a manner compliant with Semantic Web standards and Linked Data principles. To this end, firstly, we compare the Wikibase data model to the established RDF data model. Secondly, we enumerate a series of challenges for importing existing ontologies into Wikibase. Thirdly, we present practical solutions to these challenges and introduce a tool for importing and re-using ontologies within Wikibase. Thus, the paper aims to promote ontology interoperability in Wikibase and by doing so hopes to contribute to higher degree of inter-linkage of Wikibase instances with Linked Open Data.}, year = 2022, month=oct, booktitle = {Proceedings of the 3rd Wikidata Workshop (co-located with ISWC2022)}, url = {http://polleres.net/publications/dobr-poll-2022WD.pdf} }
@inproceedings{ferr-etal-2022WD, title = {Formalizing Property Constraints in {Wikidata}}, author = {Nicolas Ferranti and Axel Polleres and Jairo Francisco De Souza and Shqiponja Ahmetaj}, abstract = {Constraints play an important role to ensure data integrity. While the Shapes Constraint Language (SHACL) provides a W3C recommendation for validating RDF Knowledge Graphs (KG) against such constraints, real-world KG have adopted their own constraint formalisms. Wikidata (WD), one of the largest collaboratively Open Data Knowledge Graphs available on the Web, represents property constraints through its own RDF data model, within its own authoritative namespaces, which might be an indication that the nature of WD property constraints is different from other Knowledge Graphs. In this paper we investigate the semantics of WD constraints, and unambiguously formalize all current constraints using SPARQL to retrieve violations; we also discuss the expressiveness of WD constraint language compared with SHACL core and discuss the evolution of constraint violations. We found that, while all current WD property constraint types can be expressed using SPARQL, only 86\% (26 out of 30) can be expressed using SHACL core: the rest face issues related to using separator properties and arithmetic expressions.}, year = 2022, month=oct, booktitle = {Proceedings of the 3rd Wikidata Workshop (co-located with ISWC2022)}, url = {http://polleres.net/publications/ferr-etal-2022WD.pdf}}
@inproceedings{baro-etal-2022WD, title = {Analysing the Evolution of Community-Driven (Sub-)Schemas within {Wikidata}}, author = {Sofia Baroncini and Margherita Martorana and Mario Scrocca and Zuzanna Smiech and Axel Polleres}, abstract = {Wikidata is a collaborative knowledge graph not structured according to predefined ontologies. Its schema evolves in a bottom-up approach based, defined by its users. In this paper, we propose a methodology to investigate how semantics develop in sub-schemas used by particular, domain-specific communities within the Wikidata knowledge graph: (i) we provide an approach to identify the domain sub-schema from a set of given classes and its related community, considered domain-specific; (ii) we propose an approach for analysing the such identified sub-schemas and communities, including their evolution over time. Finally, we suggest further possible analyses that would give better insights in (i) the communities themselves, (ii) the KG vocabulary accuracy, quality and its evolution over time according to domain areas, raising the potential of Wikidata improvement and its re-use by domain experts.}, year = 2022, month=oct, booktitle = {Proceedings of the 3rd Wikidata Workshop (co-located with ISWC2022)}, url = {http://polleres.net/publications/baro-etal-2022WD.pdf} }
@inproceedings{ahme-etal-2022ISWC, author = {Robert David and Shqiponja Ahmetaj and Mantas \v{S}imkus and Axel Polleres}, title = {Repairing {SHACL} Constraint Violations using Answer Set Programming}, abstract = {The Shapes Constraint Language (SHACL) is a recent W3C recommendation for validating RDF graphs against \emph{shape} constraints to be checked on \emph{target nodes} of the data graph. The standard also describes the notion of \emph{validation reports} for data graphs that violate given constraints, which aims to provide feedback on how the data graph can be fixed to satisfy the constraints. Since the specification left it open to SHACL processors to define such explanations, a recent work proposed the use of explanations in the style of database \emph{repairs}, where a repair is a set of additions to or deletions from the data graph so that the resulting graph validates against the constraints. In this paper, we study such repairs for non-recursive SHACL, the largest fragment of SHACL that is fully defined in the specification. We propose an algorithm to compute repairs by encoding the explanation problem -- using Answer Set Programming (ASP) -- into a logic program, the answer sets of which correspond to (minimal) repairs. We then study a scenario where it is not possible to simultaneously repair all the targets, which may be often the case due to overall unsatisfiability or conflicting constraints. We introduce a relaxed notion of validation, which allows to validate a (maximal) subset of the targets and adapt the ASP translation to take into account this relaxation. Our implementation in Clingo is -- to the best of our knowledge -- the first implementation of a repair generator for SHACL. }, month = oct, day = {23--27}, year = 2022, booktitle = {Proceedings of the 21st International Semantic Web Conference (ISWC 2022)}, address = {Virtual Conference (Hangzhou, China)}, series = LNCS, volume = 13489, pages = {375--391}, publisher = {Springer}, doi = {https://doi.org/10.1007/978-3-031-19433-7_22}, url = {http://polleres.net/publications/ahme-etal-2022ISWC.pdf} }
@article{havu-etal-2022ESWA, author = {Giray Havur and Cristina Cabanillas and Axel Polleres}, journal = {Expert Systems with Applications}, publisher = {Elsevier}, title = {Benchmarking Answer Set Programming Systems for Resource Allocation in Business Processes}, volume = {in press}, abstract = {Declarative logic programming formalisms are well-suited to model various optimization and configuration problems. In particular, Answer Set Programming (ASP) systems have gained popularity, for example, to deal with scheduling problems present in several domains. The main goal of this paper is to devise a benchmark for ASP systems to assess their performance when dealing with complex and realistic resource allocation with objective optimization. To this end, we provide (i) a declarative and compact encoding of the resource allocation problem in ASP (compliant with the ASP Core-2 standard), (ii) a configurable ASP systems benchmark named BRANCH that is equipped with resource allocation instance generators that produce problem instances of different sizes with adjustable parameters (e.g. in terms of process complexity, organizational and temporal constraints), and (iii) an evaluation of four state-of-the-art ASP systems using \textsc{BRANCH}. This solid application-oriented benchmark serves the ASP community with a tool that leads to potential optimizations and improvements in encodings and further drives the development of ASP solvers. On the other hand, resource allocation is an important problem that still lacks adequate automated tool support in the context of Business Process Management (BPM). The ASP problem encoding, ready-to-use ASP systems and problem instance generators benefit the BPM community to tackle the problem at scale and mitigate the lack of openly available problem instance data.}, year = 2022, doi = {https://doi.org/10.1016/j.eswa.2022.117599} }
@inproceedings{bach-etal-2022DEXA, title={Automated Process Knowledge Graph Construction from BPMN models}, abstract = {Enterprise knowledge graphs are increasingly adopted in industrial settings to integrate heterogeneous systems and data landscapes. Manufacturing systems can benefit from knowledge graphs as they contribute towards implementing visions of interconnected, decentralized and flexible smart manufacturing systems. Process knowledge is a key perspective which has so far attracted limited attention in this context, despite its usefulness for cap turing the context in which data are generated. Such knowledge is commonly expressed in diagrammatic languages and the resulting models can not readily be used in knowledge graph construction. We propose BPMN2KG to address this problem. BPMN2KG is a transformation framework from BPMN2.0 process models into knowledge graphs. Thereby BPMN2KG creates a frame for process-centric data integration and analysis with this transformation. We motivate and evaluate our transformation framework with a real-world industrial use case focused on quality management in plastic injection molding for the automotive sector. We use BPMN2KG for process-centric integration of dispersed production systems data that results in an integrated knowledge graph that can be queried using SPARQL, a standardized graph-pattern based query language. By means of several example queries, we illustrate how this knowledge graph benefits data contextualization and integrated analysis.}, author={Stefan Bachhofner and Elmar Kiesling and Kate Revoredo and Philipp Waibel and Axel Polleres}, note={Full paper}, doi = {https://doi.org/10.1007/978-3-031-12423-5_3}, booktitle={3rd DEXA conferences and workshops (DEXA2022)}, year=2022, month=aug, day={22-24}, }
@inproceedings{hall-etal-2022ESWC, year = 2022, title = {An Analysis of Links in {Wikidata}}, abstract = {Wikidata has become one of the most prominent open knowledge graphs (KGs) on the Web. Relying on a community of users with different expertise, this cross-domain KG is directly related to other data sources. This paper investigates how Wikidata is linked to other data sources in the Linked Data ecosystem. To this end, we adapt previous definitions of ontology links and instance links to the terminological part of the Wikidata vocabulary and perform an analysis of the links in Wikidata to external datasets and ontologies from the Linked Data ecosystem. As a side effect, this reveals insights on the ontological expressiveness of meta-properties used in Wikidata. The results of this analysis show that while Wikidata defines a large number of individuals, classes and properties within its own namespace, they are not (yet) extensively linked. We discuss reasons for this and conclude with some suggestions to increase the interconnectedness of Wikidata with other KGs.}, author = {Armin Haller and Axel Polleres and Daniil Dobriy and Nicolas Ferranti and Sergio J. Rodr\'iguez M\'endez}, booktitle = {19th European Semantic Web Conference, ESWC 2022}, publisher = {Springer}, month = may, day = {29--02}, doi = {https://doi.org/10.1007/978-3-031-06981-9_2}, url = {http://polleres.net/publications/halll-etal-2022ESWC.pdf} }
@article{wach-etal-2022, title = {The Geography of Open Source Software: Evidence from {GitHub}}, journal = {Technological Forecasting and Social Change}, volume = {176}, pages = {121478}, month = mar, year = 2022, issn = {0040-1625}, doi = {https://doi.org/10.1016/j.techfore.2022.121478}, url = {https://www.sciencedirect.com/science/article/pii/S0040162522000105}, author = {Johannes Wachs and Mariusz Nitecki and William Schueller and Axel Polleres}, abstract = {Open Source Software (OSS) plays an important role in the digital economy. Yet although software production is amenable to remote collaboration and its outputs are digital, software development seems to cluster geographically in places like Silicon Valley, London, or Berlin. And while OSS activity creates positive externalities which accrue locally through knowledge spillovers and information effects, up-to-date data on the geographic distribution of open source developers is limited. This presents a significant blindspot for policymakers, who often promote OSS at the national level as a cost-saving tool for public sector institutions. We address this gap by geolocating more than half a million active contributors to GitHub in early 2021 at various spatial scales. Compared to results from 2010, we find a significant increase in the share of developers based in Asia, Latin America and Eastern Europe, suggesting a more even spread of OSS developers globally. Within countries, however, we find significant concentration in regions, exceeding the concentration of high-tech employment. Social and economic development indicators predict at most half of regional variation in OSS activity in the EU, suggesting that clusters have idiosyncratic roots. We argue for localized policies to support networks of OSS developers in cities and regions.} }
@inproceedings{havu-etal-2021BPMDemo, title = {{BRANCH}: An {ASP} Systems Benchmark for Resource Allocation in Business Processes}, booktitle = {Proceedings of the Best Dissertation Award, Doctoral Consortium, and Demonstration \& Resources Track at {BPM} 2021}, year = 2021, day = {6--10}, month = sep, author = {Giray Havur and Cristina Cabanillas and Axel Polleres}, abstract = {The goal of BRANCH is to benchmark Answer Set Programming (ASP) systems to test their performance when dealing with the task of automatically allocating resources to business process activities. Like many other scheduling problems, the allocation of resources and starting times to process activities is a challenging optimization problem, yet it is a crucial step for an optimal execution of the processes. BRANCH has been designed as a configurable benchmark equipped with instance generators that produce problem instances of different size and hardness with respect to adjustable parameters. This application-oriented benchmark supports the BPM community to find the ASP systems and implementationsthat perform better in solving the resource allocation problem.}, series = {{CEUR} Workshop Proceedings}, volume = 2973, publisher = {CEUR-WS.org}, pages = {176--180}, url = {http://ceur-ws.org/Vol-2973/paper_285.pdf} }
@inproceedings{ahme-etal-2021kr, title = {Reasoning about Explanations for Non-validation in {SHACL}}, author= {Shqiponja Ahmetaj and Robert David and Magdalena Ortiz and Axel Polleres and Bojken Shehu and Mantas \v{S}imkus}, year = 2021, month = nov, day = {3--12}, abstract = {The Shapes Constraint Language (SHACL) is a recently standardized language for describing and validating constraints over RDF graphs. The SHACL specification describes the so-called \emph{validation reports}, which are meant to explain to the users the outcome of validating an RDF graph against a collection of constraints. Specifically, explaining the reasons why the input graph does not satisfy the constraints is challenging. In fact, the current SHACL standard leaves it open on how such explanations can be provided to the users. In this paper, inspired by works on logic-based abduction and database repairs, we study the problem of explaining non-validation of SHACL constraints. In particular, in our framework non-validation is explained using the notion of a repair, i.e.,a collection of additions and deletions whose application on an input graph results in a repaired graph that does satisfy the given SHACL constraints. We define a collection of decision problems for reasoning about explanations, possibly restricting to explanations that are minimal with respect to cardinality or set inclusion. We provide a detailed characterization of the computational complexity of those reasoning tasks, including the combined and the data complexity.}, url = {http://polleres.net/publications/ahme-etal-2021KR.pdf}, doi = {https://doi.org/10.24963/kr.2021/2}, booktitle = {Proceedings of the 18th International Conference on Principles of Knowledge Representation and Reasoning (KR 2021)}, }
@inproceedings{krab-poll21, author = {Bernhard Krabina and Axel Polleres}, editor = {Lucie{-}Aim{\'{e}}e Kaffee and Simon Razniewski and Aidan Hogan}, title = {Seeding {Wikidata} with Municipal Finance Data}, booktitle = {Proceedings of the 2nd Wikidata Workshop (Wikidata 2021) co-located with {(ISWC} 2021)}, month = oct, series = {{CEUR} Workshop Proceedings}, volume = {2982}, publisher = {CEUR-WS.org}, abstract = {The paradigm shift from cash-based to accrual accounting in the public nances of Austrian municipalities as of 2020, together with the availability of uniform spending data from Austria provides an ideal environment to research the potential of Wikidata for improving awareness of public nance information. The importance of publicly available municipal nance information is of signi cant interest for citizens to ensure trust in public spending and governance at a local level. It is all the more surprising that such spending data is hardly available. The present paper is a rst push towards integrating comparable municipal nance data into Wikidata. Our analysis reveals a lack of joint, standardized representation of common public spending data. Thus we have begun seeding Wikidata with a uni ed corpus of nance data from 379 Austrian municipalities by batch uploading, re-using already existing properties. Our approach is a rst step towards the question whether and how Wikipedia and Wikidata could serve as spaces for information on public nances.}, year = {2021}, url = {http://ceur-ws.org/Vol-2982/paper-9.pdf}, }
@article{hoga-etal-csur2021, title = {Knowledge Graphs}, author={Aidan Hogan and Eva Blomqvist and Michael Cochez and Claudia d'Amato and Gerard de Melo and Claudio Gutierrez and José Emilio Labra Gayo and Sabrina Kirrane and Sebastian Neumaier and Axel Polleres and Roberto Navigli and Axel-Cyrille Ngonga Ngomo and Sabbir M. Rashid and Anisa Rula and Lukas Schmelzeisen and Juan Sequeda and Steffen Staab and Antoine Zimmermann}, abstract = {In this paper we provide a comprehensive introduction to knowledge graphs, which have recently garnered significant attention from both industry and academia in scenarios that require exploiting diverse, dynamic, large-scale collections of data. After some opening remarks, we motivate and contrast various graph-based data models, as well as languages used to query and validate knowledge graphs. We explain how knowledge can be represented and extracted using a combination of deductive and inductive techniques. We conclude with high-level future research directions for knowledge graphs.}, journal={ACM Computing Surveys (CSUR)}, doi = {https://dl.acm.org/doi/10.1145/3447772}, volume = 54, number = 4, pages = {1--37}, year = 2021, month = jul, day = 2, note = {Extended pre-print available at \url{https://arxiv.org/abs/2003.02320}}, }
@inproceedings{azza-etal-2021WWW, title = {{WiseKG: Balanced Access to Web Knowledge Graphs}}, abstract = {SPARQL query services that balance processing between clients and servers become more and more essential to handle the increasing load for open and decentralized knowledge graphs on the Web. To this end, Linked Data Fragments (LDF) have introduced a foundational framework that has sparked research exploring a spectrum of potential Web querying interfaces in between server-side query processing via SPARQL endpoints and client-side query processing of data dumps. Current proposals in between typically suffer from imbalanced load on either the client or the server. In this paper, to the best of our knowledge, we present the first work that combines both client-side and server-side query optimization techniques in a truly dynamic fashion: we introduce WiseKG, a system that employs a cost model that dynamically delegates the load between servers and clients by combining client-side processing of shipped partitions with efficient server-side processing of star-shaped sub-queries, based on current server workload and client capabilities. Our experiments show that WiseKG significantly outperforms state-of-the-art solutions in terms of average total query execution time per client, while at the same time decreasing network traffic and increasing server-side availability.}, year = 2021, booktitle = {Proceedings of the Web Conference 2021}, address = {Ljubljana, Slovenia}, pages = {1422–-1434}, publisher = {{ACM} / {IW3C2}}, doi = {https://doi.org/10.1145/3442381.3449911}, url = {http://www.polleres.net/publications/azza-etal-2021WWW.pdf}, author = {Amr Azzam and Christian Aebeloe and Gabriela Montoya and Ilkcan Keles and Axel Polleres and Katja Hose}, }
@article{filt-etal-2021AILAW, title = {The Linked Legal Data Landscape. Linking Legal Data Across different Countries}, author = {Erwin Filtz and Sabrina Kirrane and Axel Polleres}, abstract = {The European Union is working towards harmonizing legislation across Europe, in order to improve cross-border interchange of legal information. This goal is supported for instance via standards such as the European Law Identifier (ELI) and the European Case Law Identifier (ECLI), which provide technical specifications for Web identifiers and suggestions for vocabularies to be used to describe metadata pertaining to legal documents in a machine readable format. Notably, these ECLI and ELI metadata standards adhere to the RDF data format which forms the basis of Linked Data, and therefore have the potential to form a basis for a pan-European legal Knowledge Graph. Unfortunately, to date said specifications have only been partially adopted by EU member states. In this paper we describe a methodology to transform the existing legal information system used in Austria to such a legal knowledge graph covering different steps from modeling national specific aspects, to population, and finally the integration of legal data from other countries through linked data. We demonstrate the usefulness of this approach by exemplifying practical use cases from legal information search, which are not possible in an automated fashion so far.}, year = 2021, month = feb, day = 25, journal = {Artificial Intelligence and Law}, doi = {10.1007/s10506-021-09282-8}, url = {http://www.polleres.net/publications/filt-etal-2021AILAW.pdf}, volume = 29, pages = {485–-539}, }
@article{OCG2020, Author = {Bernhard Moser and Georg Dorffner and Thomas Eiter and Wolfgang Faber and Günther Klambauer and Robert Legenstein and Bernhard Nessler and Axel Polleres and Stefan Woltran}, Journal = {OCG Journal}, Note = {Invited article (in German)}, Pages = {14--17}, Url = {https://www.ocg.at/sites/ocg.at/files/medien/pdfs/OCG-Journal20-1-2.pdf#page=14}, Title = {{\"Osterreichische AI Strategie aus Sicht der Wissenschaft: Forderungen der ASAI zu einer konkreten AI Strategie in \"Osterreich}}, Abstract = {Die letzte Regierungsumbildung und COVID-19 haben zu einer Unterbrechung des 2018 initiierten Prozesses AIM AT 2030 für eine AI Strategie f\"ur \"Osterreich gef\"uhrt. Daher wurden in den letzten beiden Jahren notwenige Akzente insbesondere in der Forschungsf\"orderung im Bereich des Machine Learning (ML) und der Artifical Intelligence (AI), aber dar\"uber hinaus auch in weiteren f\"ur \"Osterreichs Digitalisierung relevanten Forschungsfeldern, im Gegensatz zum europ\"aischen Umfeld, nicht gesetzt}, Type = MAGAZINE, Volume = {01/2020}, Year = 2020 }
@inproceedings{filt-etal-2020JURIX, author = {Erwin Filtz and Mar{\'{\i}}a Navas{-}Loro and Cristiana Santos and Axel Polleres and Sabrina Kirrane}, editor = {Villata Serena and Jakub Harasta and Petr Kremen}, title = {Events Matter: Extraction of Events from Court Decisions}, abstract = {The analysis of court decisions and associated events is part of the daily life of many legal practitioners. Unfortunately, since court decision texts can often be long and complex, bringing all events relating to a case in order, to understand their connections and durations is a time-consuming task. Automated court decision timeline generation could provide a visual overview of what happened throughout a case by representing the main legal events, together with relevant temporal information. Tools and technologies to extract events from court decisions however are still underdeveloped. To this end, in the current paper we compare the effectiveness of three different extraction mechanisms, namely deep learning, conditional random fields, and rule-based method, to facilitate automated extraction of events and their components (i.e., the event type, who was involved, and when it happened). In addition, we provide a corpus of manually annotated decisions of the European Court of Human Rights, which shall serve as a gold standard not only for our own evaluation, but also for the research community for comparison and further experiments.}, booktitle = {Legal Knowledge and Information Systems - {JURIX} 2020: The Thirty-third Annual Conference, Brno, Czech Republic, December 9-11, 2020}, series = {Frontiers in Artificial Intelligence and Applications}, volume = {334}, pages = {33--42}, publisher = {{IOS} Press}, year = {2020}, url = {https://doi.org/10.3233/FAIA200847}, doi = {10.3233/FAIA200847}, }
@inproceedings{webe-etal-2020ISWC, author = {Thomas Weber and Johann Mitl\"ohner and Sebastian Neumaier and Axel Polleres}, title = {ODArchive - Creating an archive for structured data from Open Data Portals}, abstract = {We present ODArchive, a large corpus of structured data collected from over 260 Open Data portals worldwide, alongside with curated, integrated metadata. Furthermore we enrich the harvested datasets by heuristic annotations using the type hierarchies in existing Knowledge Graphs. We both (i) present the underlying distributed architecture to scale up regular harvesting and monitoring changes on these portals, and (ii) make the corpus available via different APIs. Moreover, we (iii) analys the characteristics of tabular data within the corpus. Our APIs can be used to regularly run such analyses or to reproduce experiments from the literature that have worked on static, not publicly available corpora.}, month = nov, day = {2--6}, year = 2020, booktitle = {Proceedings of the 19th International Semantic Web Conference (ISWC 2020)}, address = {Virtual Conference (Athens, Greece)}, series = LNCS, volume = 12507, pages={311--327}, publisher = {Springer}, url = {http://polleres.net/publications/webe-etal-2020ISWC.pdf}, doi = {https://doi.org/10.1007/978-3-030-62466-8_20} }
@inproceedings{port-etal-2020EKAW, title = {Challenges of Linking Organizational Information in Open Government Data to Knowledge Graphs}, author = {Jan Portisch and Omaima Fallatah and Sebastian Neumaier and Axel Polleres}, booktitle = {22nd International Conference on Knowledge Engineering and Knowledge Management (EKAW 2020)}, address = {Bozen-Bolzano, Italy}, day = {16-20}, month = Sep, year = 2020, abstract = {Open Government Data (OGD) is being published by various public administration organizations around the globe. Within the metadata of OGD data catalogs the publishing organizations (1) are not uniquely and unambiguously identifiable and, even worse, (2) change over time, by public administration units being merged or restructured. In order to enable fine-grained analyzes or searches on Open Government Data on the level of publishing organizations, linking those from OGD portals to publicly available knowledge graphs (KGs) such as \textit{Wikidata} and \textit{DBpedia} seems like an obvious solution. Still, as we show in this position paper, organization linking faces significant challenges, both in terms of available (portal) metadata and KGs in terms of data quality and completeness. We herein specifically highlight five main challenges, namely regarding (1) temporal changes in organizations and in the portal metadata, (2) lack of a base ontology for describing organizational structures and changes in public knowledge graphs, (3) metadata and KG data quality, (4) multilinguality, and (5) disambiguating public sector organizations. Based on available OGD portal metadata from the \textit{Open Data Portal Watch}, we provide an in-depth analysis of these issues, make suggestions for concrete starting points on how to tackle them along with a call to the community to jointly work on these open challenges. }, volume = 12387, series = LNCS, publisher={Springer}, pages={271--286}, url = {https://arxiv.org/abs/2008.06232}, doi = {http://dx.doi.org/10.1007/978-3-030-61244-3_19}, }
@inproceedings{azza-etal-2020WWW, title = {{SMART-KG}: Hybrid Shipping for {SPARQL} Querying on the Web}, abstract = {While Linked Data (LD) provides standards for publishing (RDF) and (SPARQL) querying Knowledge Graphs (KGs) on the Web, serving, accessing and processing such open, decentralized KGs is often practically impossible, as query timeouts on publicly available SPARQL endpoints show. Alternative solutions such as Triple Pattern Fragments (TPF) attempt to tackle the problem of availability by pushing query processing workload to the client side, but suffer from unnecessary transfer of irrelevant data on complex queries with large intermediate results. In this paper we present smart-KG, a novel approach to share the load between servers and clients, while significantly reducing data transfer volume, by combining TPF with shipping compressed KG partitions. Our evaluations show that \approach outperforms state-of-the-art client-side solutions and increases server-side availability towards more cost-effective and balanced hosting of open and decentralized KGs.}, year = 2020, booktitle = {The Web Conference 2020}, address = {Taipei,Taiwan}, note = {Pre-print available at \url{https://epub.wu.ac.at/7428}}, author = {Amr Azzam and Javier D. Fern{\'a}ndez and Maribel Acosta and Martin Beno and Axel Polleres}, doi = {https://doi.org/10.1145/3366423.3380177} }
@article{hall-etal-2020JDIQ, author = {Armin Haller and Javier D. Fern{\'a}ndez and Maulik R. Kamdar and Axel Polleres}, title = {What are Links in Linked Open Data? A Characterization and Evaluation of Links between Knowledge Graphs on the Web}, journal = {ACM Journal of Data and Information Quality (JDIQ)}, note = {Pre-print available at \url{https://epub.wu.ac.at/7193/}}, abstract = {Linked Open Data promises to provide guiding principles to publish interlinked knowledge graphs on the Web in the form of findable, accessible, interoperable and reusable datasets. We argue that while as such, Linked Data may be viewed as a basis for instantiating the FAIR principles, there are still a number of open issues that cause significant data quality issues even when knowledge graphs are published as Linked Data. Firstly, in order to define boundaries of single coherent knowledge graphs within Linked Data, a principled notion of what a dataset is, or, respectively, what links within and between datasets are, has been missing. Secondly, we argue that in order to enable FAIR knowledge graphs, Linked Data misses standardised findability and accessability mechanism, via a single entry link. In order to address the first issue, we (i) propose a rigorous definition of a naming authority for a Linked Data dataset (ii) define different link types for data in Linked datasets, (iii) provide an empirical analysis of linkage among the datasets of the Linked Open Data cloud, and (iv) analyse the dereferenceability of those links. We base our analyses and link computations on a scalable mechanism implemented on top of the HDT format, which allows us to analyse quantity and quality of different link types at scale.}, year = 2020, volume = 2, number = 2, pages = {1–-34}, month = May, doi = {10.1145/3369875} }
@article{poll-etal-2020SWJ10Y_Decentralized, author = {Axel Polleres and Maulik Rajendra Kamdar and Javier D. Fern\'andez and Tania Tudorache and Mark A. Musen}, title = {A More Decentralized Vision for Linked Data}, abstract = {In this deliberately provocative position paper, we claim that more than ten years into Linked Data there are still (too?) many unresolved challenges towards arriving at a truly machine-readable and decentralized Web of data. We take a deeper look at key challenges in usage and adoption of Linked Data from the ever-present ``LOD cloud'' diagram. Herein, we try to highlight and exemplify both key technical and non-technical challenges to the success of LOD, and we outline potential solution strategies. We hope that this paper will serve as a discussion basis for a fresh start towards more actionable, truly decentralized Linked Data, and as a call to the community to join forces.}, year = 2020, note = {SWJ 10-years special issue}, month = jan, volume = {11}, number = 1, pages = {101--113}, url = {http://semantic-web-journal.net/content/more-decentralized-vision-linked-data-0}, doi = {https://doi.org/10.3233/SW-190380}, }
@article{hall-poll-2020SWJ10Y_OneOntology, author = {Armin Haller and Axel Polleres}, title = {Are We Better Off With Just One Ontology on the Web?}, year = 2020, abstract = {Ontologies have been used on the Web to enable semantic interoperability between parties that publish information independently of each other. They have also played an important role in the emergence of Linked Data. However, many ontologies on the Web do not see much use beyond their initial deployment and purpose in one dataset and therefore should rather be called what they are -- (local) schemas, which per se do not provide any interoperable semantics. Only few ontologies are truly used as a shared conceptualization between different parties, mostly in controlled environments such as the BioPortal. In this paper, we discuss open challenges relating to true re-use of ontologies on the Web and raise the question: ``are we better off with just one ontology on the Web?''}, url = {http://semantic-web-journal.net/content/are-we-better-just-one-ontology-web-0}, note = {SWJ 10-years special issue}, month = jan, volume = {11}, number = 1, DOI = {https://doi.org/10.3233/SW-190379}, pages = {87--99}, }
@article{kirr-etal-2020SWJ_SWdecade, title ={A decade of Semantic Web research through the lenses of a mixed methods approach}, abstract = {The identification of research topics and trends is an important scientometric activity, as it can help guide the direction of future research. In the Semantic Web area, initially topic and trend detection was primarily performed through qualitative, top-down style approaches, that rely on expert knowledge. More recently, data-driven, bottom-up approaches have been proposed that offer a quantitative analysis of the evolution of a research domain. In this paper, we aim to provide a broader and more complete picture of Semantic Web topics and trends by adopting a mixed methods methodology, which allows for the combined use of both qualitative and quantitative approaches. Concretely, we build on a qualitative analysis of the main seminal papers, which adopt a top-down approach, and on quantitative results derived with three bottom-up data-driven approaches (Rexplore, Saffron, PoolParty), on a corpus of Semantic Web papers published between 2006 and 2015. In this process, we both use the latter for ``fact-checking'' on the former and also to derive key findings in relation to the strengths and weaknesses of top-down and bottom up approaches to research topic identification. Although we provide a detailed study on the past decade of Semantic Web research, the findings and the methodology are relevant not only for our community but beyond the area of the Semantic Web to other research fields as well.}, year = 2020, month = oct, day = 29, Journal = SWJ, author = {Sabrina Kirrane and Marta Sabou and Javier D. Fern{\'a}ndez and Francesco Osborne and Cécile Robin and Paul Buitelaar and Enrico Motta and Axel Polleres}, Publisher = {IOS Press}, volume = 11, number = 6, pages = {979--1005}, Type = JOURNAL, DOI = {10.3233/SW-200371}, Url = {http://www.semantic-web-journal.net/content/decade-semantic-web-research-through-lenses-mixed-methods-approach-1}, }
@article{fern-etal2020SWJ_HTDcrypt, title ={{HDT$_{crypt}$: Compression and Encryption of RDF Datasets}}, abstract = {The publication and interchange of RDF datasets online has experienced significant growth in recent years, promoted by different but complementary efforts, such as Linked Open Data, the Web of Things and RDF stream processing systems. However, the current Linked Data infrastructure does not cater for the storage and exchange of sensitive or private data. On the one hand, data publishers need means to limit access to confidential data (e.g. health, financial, personal, or other sensitive data). On the other hand, the infrastructure needs to compress RDF graphs in a manner that minimises the amount of data that is both stored and transferred over the wire. In this paper, we demonstrate how HDT - a compressed serialization format for RDF - can be extended to cater for supporting encryption. We propose a number of different graph partitioning strategies and discuss the benefits and tradeoffs of each approach.}, year = 2020, Journal = SWJ, author = {Javier D. Fern{\'a}ndez and Sabrina Kirrane and Axel Polleres and Simon Steyskal}, Publisher = {IOS Press}, Type = JOURNAL, Url = {http://semantic-web-journal.org/content/hdt-crypt-compression-and-encryption-rdf-datasets}, doi = {10.3233/SW-180335}, volume = 11, number = 2, pages = {337--359}, }
@inproceedings{beno-etal-2019SEMANTIiCS_PD, author = {Martin Beno and Erwin Filtz and Sabrina Kirrane and Axel Polleres}, title = {{Doc2RDFa}: Semantic Annotation for Web Documents}, abstract = {Ever since its conception, the amount of data published on the world-wide web has been rapidly growing to the point where it has become an importantsource of both general and domain specific information. However, the majority of documents published online are not machine readable by default. Many researchers believe that the answer to this problem is to semantically annotate these documents, and thereby contribute to the linked ``Web of Data''. Yet, the process of annotating web documents remains an open challenge. While some efforts towards simplifying this process have been made in the recent years, there is still a lack of semantic content creation tools that integrate well with information worker toolsets. Towards this end, we introduce Doc2RDFa, an HTML rich text processor with the ability to automatically and manually annotate domain-specific content.}, booktitle = {Proceedings of the Posters and Demo Track of the 15th International Conference on Semantic Systems (SEMANTiCS 2019)}, address = {Karlsruhe, Germany}, month = sep, editor = {Mehwish Alam and Ricardo Usbeck and Tassilo Pellegrini and Harald Sack and York Sure{-}Vetter}, series = {{CEUR} Workshop Proceedings}, volume = 2451, publisher = {CEUR-WS.org}, day = {9--12}, year = 2019, url = {http://ceur-ws.org/Vol-2451/paper-06.pdf}, }
@inproceedings{havu-etal-2019SEMANTIiCS_PD, author = {Giray Havur and Simon Steyskal and Oleksandra Panasiuk and Anna Fensel and V{\'{\i}}ctor Mireles and Tassilo Pellegrini and Thomas Thurner and Axel Polleres and Sabrina Kirrane}, title = {Automatic License Compatibility Checking}, abstract = {In this paper, we introduce the Data Licenses Clearance Center system, which not only provides a library of machine readable licenses but also allows users to compose their own license. A demonstrator canbe found at \url{https://www.dalicc.net/}}, booktitle = {Proceedings of the Posters and Demo Track of the 15th International Conference on Semantic Systems (SEMANTiCS 2019)}, address = {Karlsruhe, Germany}, month = sep, editor = {Mehwish Alam and Ricardo Usbeck and Tassilo Pellegrini and Harald Sack and York Sure{-}Vetter}, series = {{CEUR} Workshop Proceedings}, volume = 2451, publisher = {CEUR-WS.org}, day = {9--12}, year = 2019, url = {http://ceur-ws.org/Vol-2451/paper-13.pdf}, }
@article{kamd-etal-2019npj, title = {Enabling Web-scale data integration in biomedicine through Linked Open Data}, author = {Maulik R. Kamdar and Javier D. Fern{\'a}ndez and Axel Polleres and Tania Tudorache and Mark A. Musen}, abstract = {The biomedical data landscape is fragmented with several isolated, heterogeneous data and knowledge sources, which use varying formats, syntaxes, schemas, and entity notations, existing on the Web. Biomedical researchers face severe logistical and technical challenges to query, integrate, analyze, and visualize data from multiple diverse sources in the context of available biomedical knowledge. Semantic Web technologies and Linked Data principles may aid toward Web-scale semantic processing and data integration in biomedicine. The biomedical research community has been one of the earliest adopters of these technologies and principles to publish data and knowledge on the Web as linked graphs and ontologies, hence creating the Life Sciences Linked Open Data (LSLOD) cloud. In this paper, we provide our perspective on some opportunities proffered by the use of LSLOD to integrate biomedical data and knowledge in three domains: (1) pharmacology, (2) cancer research, and (3) infectious diseases. We will discuss some of the major challenges that hinder the wide-spread use and consumption of LSLOD by the biomedical research community. Finally, we provide a few technical solutions and insights that can address these challenges. Eventually, LSLOD can enable the development of scalable, intelligent infrastructures that support artificial intelligence methods for augmenting human intelligence to achieve better clinical outcomes for patients, to enhance the quality of biomedical research, and to improve our understanding of living systems.}, journal = {npj Digital Medicine}, month = sep, year = 2019, day = 10, volume = 2, number = 1, pages = 90, isbn = {2398-6352}, doi = {10.1038/s41746-019-0162-5}, url = {https://www.nature.com/articles/s41746-019-0162-5}, publisher = {Springer Nature}, }
@inproceedings{filt-etal-2019COOPIS, title = {Exploiting {EuroVoc}'s Hierarchical Structure for Classifying Legal Documents}, author = {Erwin Filtz and Sabrina Kirrane and Axel Polleres and Gerhard Wohlgenannt}, abstract = {Multi-label document classification is a challenging problem because of the potentially huge number of classes. Furthermore, real-world datasets often exhibit a strongly varying number of labels per document, and a power-law distribution of those class labels. Multi-label classification of legal documents is additionally complicated by long document texts and domain-specific use of language. In this paper we are using different approaches to compare the performance of text classification algorithms on existing datasets and corpora of legal documents, and contrast those experiments with results on general-purpose multi-label text classification datasets. Moreover, for the EUR-Lex legal datasets, we show that exploiting the hierarchy of the EuroVoc thesaurus helps to improve classification performance by reducing the number of potential classes while retaining the informative value of the classification itself.}, year = 2019, month = oct, day = {23-25}, address = {Rhodes, Greece}, booktitle = {On the Move to Meaningful Internet Systems: OTM 2019 Conferences Confederated International Conferences: CoopIS, ODBASE, C\&TC}, note = {27th International Conference on COOPERATIVE INFORMATION SYSTEMS (CoopIS 2019)}, publisher = {Springer}, series = LNCS, volume = 11877, pages = {164--181}, url = {http://www.polleres.net/publications/filt-etal-2019COOPIS.pdf}, doi = {10.1007/978-3-030-33246-4_10}, type = CONF, }
@inproceedings{pand-etal-2019ODBASE, title = {Creating A Vocabulary for Data Privacy -- The First-Year Report of {Data Privacy Vocabularies and Controls Community Group (DPVCG)}}, doi = {10.1007/978-3-030-33246-4_44}, author = {Harshvardhan J. Pandit and Axel Polleres and Bert Bos and Rob Brennan and Bud Bruegger and Fajar J. Ekaputra and Javier D. Fern{\'a}ndez and Roghaiyeh Gachpaz Hamed and Elmar Kiesling and Mark Lizar and Eva Schlehahn and Simon Steyskal and Rigo Wenning}, abstract = {Managing privacy and understanding handling of personal data has turned into a fundamental right, at least within the European Union, with the General Data Protection Regulation (GDPR) being enforced since May 25\textsuperscript{th} 2018. This has led to tools and services that promise compliance to GDPR in terms of consent management and keeping track of personal data being processed. The information recorded within such tools, as well as that for compliance itself, needs to be interoperable to provide sufficient transparency in its usage. Additionally, interoperability is also necessary towards addressing the right to data portability under GDPR as well as creation of user-configurable and manageable privacy policies. We argue that such interoperability can be enabled through agreement over vocabularies using linked data principles. The W3C Data Privacy Vocabulary and Controls Community Group (DPVCG) was set up to jointly develop such vocabularies towards interoperability in the context of data privacy. This paper presents the resulting Data Privacy Vocabulary (DPV), along with a discussion on its potential uses, and an invitation for feedback and participation. }, year = 2019, month = oct, day = {22-23}, address = {Rhodes, Greece}, booktitle = {On the Move to Meaningful Internet Systems: OTM 2019 Conferences Confederated International Conferences: CoopIS, ODBASE, C\&TC}, note = {18th International Conference on Ontologies, DataBases, and Applications of Semantics (ODBASE 2019)}, publisher = {Springer}, series = LNCS, volume = 11877, pages = {714--730}, url = {http://www.polleres.net/publications/pand-etal-2019ODBASE.pdf}, type = CONF, }
@inproceedings{vaku-etal-2019CIKM, author = {Svitlana Vakulenko and Javier Fern{\'a}ndez and Axel Polleres and Maarten de Rijke and Michael Cochez}, abstract = {Question answering over knowledge graphs (KGQA) has evolved from simple single-fact questions to complex questions that require graph traversal and aggregation. We propose a novel approach for complex KGQA that uses unsupervised message passing, which propagates confidence scores obtained by parsing an input question and matching terms in the knowledge graph to a set of possible answers. Our approach outperforms the state-of-the-art on the LC-QuAD benchmark. Moreover, our error analysis reveals correct answers missing from the benchmark dataset and inconsistencies in the DBpedia knowledge graph.}, type = CONF, title = {Message Passing for Complex Question Answering over Knowledge Graphs}, booktitle = {Proceedings of the 28th ACM International Conference on Information and Knowledge Management (CIKM2019}, day = {3--7}, pages = {1431--1440}, publisher = {ACM}, doi = {10.1145/3357384.3358026}, month = nov, year = 2019, Address = {Beijing, China}, url = {https://arxiv.org/abs/1908.06917}, }
@article{nava-etal-TempCourtKER2019, title = {{TempCourt}: Evaluation of Temporal Taggers on a new Corpus of Court Decisions}, abstract = {The extraction and processing of temporal expressions in textual documents has been extensively studied in several domains, however for the legal domain it remains an open challenge. This is possibly due to the scarcity of corpora in the domain and the particularities found in legal document that are highlighted in this paper. Considering the pivotal role played by temporal information when it comes to analyzing legal cases, this paper presents TempCourt, a corpus of manually annotated temporal expressions in 30 judgments from the European Court of Human Rights, the European Court of Justice and the United States Supreme Court. The corpus contains two different temporal annotation sets that adhere to the TimeML standard, the first one capturing all temporal expressions and the second dedicated to temporal expressions that are relevant for the case under judgment (thus excluding dates of previous court decisions). The proposed gold standards are subsequently used to compare ten state-of-the-art cross-domain temporal taggers, and to identify not only the limitations of cross-domain temporal taggers but also limitations of the TimeML standard when applied to legal documents. Finally, the paper identifies the need for dedicated resources and the adaptation of existing tools, and specific annotation guidelines that can be adapted to different types of legal documents.}, author = {Mar\'ia Navas-Loro and Erwin Filtz and V\'ictor Rodr\'iguez Doncel and Axel Polleres and Sabrina Kirrane}, year = 2019, journal = {Knowledge Engineering Review}, publisher = {Cambridge University Press}, doi = {10.1017/S0269888919000195}, volume = 34, pages = {E24}, Type = JOURNAL, }
@misc{uniko_POS_AI_2019, title = {{Positionspapier zur österreichischen Artificial Intelligence Strategie AIM AT 2030}}, note = {Forum Forschung der uniko (Österreichischen Universitätenkonferenz), available at \url{https://uniko.ac.at/positionen/}}, author = {Horst Bischof and Georg Dorffner and Alexander Egyed and Wolfgang Faber and Bernhard Moser and Bernhard Nessler and Axel Polleres and Stefan Woltran and Gerhard Friedrich and {others}}, abstract = {Die Universit\"aten begr\"u{\ss}en die Initiative zur Erstellung einer \"osterreichischen AI Strategie. Die Universit\"aten sind wesentlicher Tr\"ager der AI‐Forschung und Ressource für ma{\ss}gebliche Kompetenzen in diesem Gebiet. F\"orderung der AI Forschung in \"Osterreich muss ma{\ss}geblich unter Involvierung der \"osterreichischen Universit\"aten passieren. Die \"osterreichischen Universit\"aten schlagen eine Reihe konkreter Ma{\ss}nahmen vor, um die Kompetenz \"Osterreichs im Bereich der AI bzw. des maschinellen Lernens (ML) weiter zu st\"arken und vor allem den Forschungsstandort \"Osterreich in der internationalen AI‐Community, im speziellen im Verbund der europ\"aischen Netzwerke ELLIS (ellis.eu) und CLAIRE (www.claire‐ai.org), zu verankern. Die vorgeschlagenen Ma{\ss}nahmen gliedern sich in folgende 3 Kernbereiche: 1. Internationale Vernetzung, 2. Nationale Vernetzung, 3. Schaffung und Ausbau der Infrastruktur. Erarbeitet im Rahmen des Forums Forschung der uniko (Österreichischen Universitätenkonferenz) von den Unterzeichnern der beiliegenden AI‐Deklaration der akademischen Arbeitsgruppe zur Unterstützung der Österreichischen AI‐Strategie}, year = 2019, url = {http://www.polleres.net/publications/uniko_POS_AI_2019.pdf}, month = jun, day = 17, }
@inproceedings{pand-etal-2019ESWC_PD, author = {Harshvardhan J. Pandit and Javier D.Fern\'andez and Christophe Debruyne and Axel Polleres}, abstract = {The General Data Protection Regulation (GDPR) has established far reaching rights to transparency and accountability in the context of personal data usage and collection. While GDPR obligations clearly apply to data explicitly obtained from or provided by data subjects, the situation becomes less clear for data derived from existing personal data. In this paper, we address this issue with an approach for identifying potential data derivations using a rule-based formalisation of examples documented in the literature using Semantic Web standards. Our approach is useful for identifying risks of potential data derivations from given data and provides a starting point towards an open catalogue to document known derivations for the privacy community, but also for data controllers, in order to raise awareness in which sense their data collections could become problematic.}, type = POSTER, note ={Poster abstract}, title = {Towards Cataloguing Potential Derivations Of Personal Data}, booktitle = {The Semantic Web: ESWC 2019 Satellite Events}, day = {2--6}, month = jun, year = 2019, doi = {10.1007/978-3-030-32327-1_29}, Address = {Portoro{\v{z}}, Slovenia}, }
@article{neum-poll-2019jws, title = {Enabling Spatio-Temporal Search in Open Data}, abstract = {Intuitively, most datasets found on governmental Open Data portals are organized by spatio-temporal criteria, that is, single datasets provide data for a certain region, valid for a certain time period. Likewise, for many use cases (such as, for instance, data journalism and fact checking) a pre-dominant need is to scope down the relevant datasets to a particular period or region. Rich spatio-temporal annotations are therefore a crucial need to enable semantic search for (and across) Open Data portals along those dimensions, yet -- to the best of our knowledge -- no working solution exists. To this end, in the present paper we (i) present a scalable approach to construct a spatio-temporal knowledge graph that hierarchically structures geographical as well as temporal entities, (ii) annotate a large corpus of tabular datasets from open data portals with entities from this knowledge graph, and (iii) enable structured, spatio-temporal search and querying over Open Data catalogs, both via a search interface as well as via a SPARQL endpoint, available at http://data.wu.ac.at/odgraphsearch/}, author = {Sebastian Neumaier and Axel Polleres}, year = 2019, journal = JWS, publisher = {Elsevier}, url = {http://epub.wu.ac.at/6764/}, volume = 55, doi = {10.1016/j.websem.2018.12.007}, pages ={21--36}, month = mar, }
@article{bona-etal-DagstuhlReport18371, author = {Piero Andrea Bonatti and Stefan Decker and Axel Polleres and Valentina Presutti}, title = {{Knowledge Graphs: New Directions for Knowledge Representation on the Semantic Web (Dagstuhl Seminar 18371)}}, pages = {29--111}, journal = {Dagstuhl Reports}, ISSN = {2192-5283}, year = {2019}, volume = {8}, number = {9}, editor = {Piero Andrea Bonatti and Stefan Decker and Axel Polleres and Valentina Presutti}, publisher = {Schloss Dagstuhl--Leibniz-Zentrum fuer Informatik}, address = {Dagstuhl, Germany}, URL = {http://drops.dagstuhl.de/opus/volltexte/2019/10328}, URN = {urn:nbn:de:0030-drops-103283}, doi = {10.4230/DagRep.8.9.29}, annote = {Keywords: knowledge graphs, knowledge representation, linked data, ontologies, semantic web}, abstract = {The increasingly pervasive nature of the Web, expanding to devices and things in everyday life, along with new trends in Artificial Intelligence call for new paradigms and a new look on Knowledge Representation and Processing at scale for the Semantic Web. The emerging, but still to be concretely shaped concept of ``Knowledge Graphs'' provides an excellent unifying metaphor for this current status of Semantic Web research. More than two decades of Semantic Web research provides a solid basis and a promising technology and standards stack to interlink data, ontologies and knowledge on the Web. However, neither are applications for Knowledge Graphs as such limited to Linked Open Data, nor are instantiations of Knowledge Graphs in enterprises — while often inspired by — limited to the core Semantic Web stack. This report documents the program and the outcomes of Dagstuhl Seminar 18371 ``Knowledge Graphs: New Directions for Knowledge Representation on the Semantic Web'', where a group of experts from academia and industry discussed fundamental questions around these topics for a week in early September 2018, including the following: what are knowledge graphs? Which applications do we see to emerge? Which open research questions still need be addressed and which technology gaps still need to be closed?}, }
@article{fern-etal-2019SWJ, title = {Evaluating Query and Storage Strategies for {RDF} Archives}, abstract = {There is an emerging demand on efficiently archiving and (temporal) querying different versions of evolving semantic Web data. As novel archiving systems are starting to address this challenge, foundations/standards for benchmarking RDF archives are needed to evaluate its storage space efficiency and the performance of different retrieval operations. To this end, we provide theoretical foundations on the design of data and queries to evaluate emerging RDF archiving systems. Then, we instantiate these foundations along a concrete set of queries on the basis of a real-world evolving datasets. Finally, we perform an extensive empirical evaluation of current archiving techniques and querying strategies, which is meant to serve as a baseline of future developments on querying archives of evolving RDF data.}, Journal = SWJ, author = {Javier D. Fernandez and J\"urgen Umbrich and Axel Polleres and Magnus Knuth}, Publisher = {IOS Press}, volume = 10, number = 2, pages = {247--291}, Type = JOURNAL, Url = {http://www.semantic-web-journal.net/content/evaluating-query-and-storage-strategies-rdf-archives-0}, year = 2019 }
@inproceedings{pate-etal-2018EKAW, title = {Comparative Preferences in {SPARQL}}, author = {Peter F. Patel-Schneider and Axel Polleres and David Martin}, booktitle = {Knowledge Acquisition, Modeling and Management (EKAW2018) -- 21st International Conference}, address = {Nancy, France}, day ={12--16}, month=nov, year = 2018, pages = {289--305}, url = {http://polleres.net/publications/pate-etal-2018EKAW.pdf}, abstract = {Sometimes one does not want all the solutions to a query but instead only those that are most desirable according to user-specified preferences. If a user-specified preference relation is acyclic then its specification and meaning are straightforward. In many settings, however, it is valuable to support preference relations that are not acyclic and that might not even be transitive, in which case though their handling involves some open questions. We discuss a definition of desired solutions for arbitrary preference relations and show its desirable properties. We modify a previous extension to SPARQL for simple preferences to correctly handle any preference relation and provide translations of this extension back into SPARQL that can compute the desired solutions for all preference relations that are acyclic or transitive. We also propose an additional extension that returns solutions at multiple levels of desirability, which adds additional expressiveness over prior work. However, for the latter we conjecture that an effective translation to a single (non-recursive) SPARQL query is not possible. }, editor = {Catherine Faron{-}Zucker and Chiara Ghidini and Amedeo Napoli and Yannick Toussaint}, series = LNCS, doi = {10.1007/978-3-030-03667-6\_19}, volume = {11313}, publisher = {Springer}, }
@inproceedings{azza-etal-2018, title = {Towards Making Distributed {RDF} Processing {FLINKer}}, abstract = {n the last decade, the Resource Description Framework (RDF) has become the de-facto standard for publishing semantic data on the Web. This steady adoption has led to a significant increase in the number and volume of available RDF datasets, exceeding the capabilities of traditional RDF stores. This scenario has introduced severe big semantic data challenges when it comes to managing and querying RDF data at Web scale. Despite the existence of various off-the-shelf Big Data platforms, processing RDF in a distributed environment remains a significant challenge. In this position paper, based on an in-depth analysis of the state of the art, we propose to manage large RDF datasets in Flink, a well-known scalable distributed Big Data processing framework.}, author = {Amr Azzam and Axel Polleres and Sabrina Kirrane}, publisher = {IEEE}, year = {2018}, month = aug, day = {6--8}, doi = {10.1109/Innovate-Data.2018.00009}, url = {https://epub.wu.ac.at/6493/}, address = {Barcelona, Spain}, pages = {9-16}, booktitle = {4th International Conference on Big Data Innovations and Applications (Innovate-Data)}, type = CONF, }
@inproceedings{havu-etal-2018SEMANTiCS_PD, author = {Giray Havur and Simon Steyskal and Oleksandra Panasiuk and Anna Fensel and V{\'i}ctor Mireles and Tassilo Pellegrini and Thomas Thurner and Axel Polleres and Sabrina Kirrane }, abstract = {In this paper we introduce the Data Licenses Clearance Center, which provides a library of machine readable standard licenses and allows users to compose arbitrary licenses. In addition, the system supports the clearance of rights issues by providing users with information about the equivalence, similarity and compatibility of licenses. A beta version of the system is available at \url{https://www.dalicc.net/}.}, title = {DALICC: A Framework for Publishing and Consuming Data Assets Legally}, booktitle = {Proceedings of the Posters and Demos Track of the 14th International Conference on Semantic Systems (SEMANTiCS2018)}, address = {Vienna, Austria}, month = sep, day = {10--13}, year = 2018, note = {Poster abstract}, }
@inproceedings{filt-etal-2018SEMANTiCS_PD, author = {Erwin Filtz and Sabrina Kirrane and Axel Polleres}, abstract = {In recent years, the European Union has been working towards harmonizing legislation thus allowing for easier cross-border access to, exchange and reuse of legal information. This initiative is supported via standardization activities such as the European Law Identifier (ELI) and the European Case Law Identifier (ECLI), which provide technical specifications for web identifiers and vocabularies that can be used to describe metadata pertaining to legal documents. Unfortunately, to date said initiative have only been partially adopted by EU member states, possibly due to the manual effort involved in curating the metadata. As a first step towards streamlining this process, we propose a cross-jurisdictional legal framework that demonstrates how legal information stored in national databases can be linked at a European level using Natural Language Processing together with external knowledgebases to automatically populate the knowledge base.}, title = {Interlinking Legal Data}, booktitle = {Proceedings of the Posters and Demos Track of the 14th International Conference on Semantic Systems (SEMANTiCS2018)}, address = {Vienna, Austria}, month = sep, day = {10--13}, year = 2018, note = {Poster abstract}, url = {http://polleres.net/publications/filt-etal-2018SEMANTiCS_PD.pdf} }
@book{poll-sakr-etal2018foreword, author = {Sherif Sakr and Marcin Wylot and Raghava Mutharaju and Danh Le Phuoc and Irini Fundulaki}, title = {Linked Data: Storing, Querying, and Reasoning}, note = {Foreword by Axel Polleres}, year = 2018, publisher = {Springer}, Type = BC, }
@inproceedings{pate-etal-2018ISWCPD, title = {Fixing Comparative Preferences for {SPARQL}}, author = {Peter F. Patel-Schneider and Axel Polleres and David Martin}, abstract = {Preferences have been part of the goal of the Semantic Web from its inception, but are not currently part of any Semantic Web standards, such as SPARQL. Several proposals have been made to add comparative preferences to SPARQL. Comparative preferences are based on comparing solutions to a query and eliminating ones that come out worse in the comparison, as in searching for gas stations and eliminating any for which there is a closer station serving the same brand of gasoline. The proposals each add an extra construct to SPARQL filtering out non-preferred query solutions. Their preference constructs are of different expressive power but they can each be thought of as providing a skyine operator. In this poster we fix several technical problems of these existing proposals.}, Note = {Poster abstract}, url = {http://polleres.net/pate-etal-2018ISWCPD.pdf}, month = oct, year = 2018, Booktitle = {ISWC 2018 Posters \& Demos}, }
@inproceedings{poll-etal2018DESEMWEB, author = {Axel Polleres and Maulik R. Kamdar and Javier D. Fern{\'a}ndez and Tania Tudorache and Mark A. Musen}, title = {A More Decentralized Vision for Linked Data}, abstract ={We claim that ten years into Linked Data there are still many unresolved challenges towards arriving at a truly machine-readable \emph{and} decentralized Web of data. With a focus on the the biomedical domain---currently, one of the most promising ``adopters'' of Linked Data, we highlight and exemplify key technical and non-technical challenges to the success of Linked Data, and we outline potential solution strategies.}, note = {An extended technical report of this paper is availavble at \url{http://epub.wu.ac.at/6371/}}, series = {CEUR Workshop Proceedings}, Publisher = {CEUR-WS.org}, volume = {2165}, url ={http://ceur-ws.org/Vol-2165/paper1.pdf}, type = WS, booktitle = {Decentralizing the Semantic Web (Workshop of ISWC2018)}, month = oct, year = 2018, }
@inproceedings{bona-etal2018SW4SG, author = {Piero Bonatti and Bert Bos and Stefan Decker and Javier D. Fern{\'a}ndez and Sabrina Kirrane and Vassilios Peristeras and Axel Polleres and Rigo Wenning}, title = {Data Privacy Vocabularies and Controls: Semantic Web for Transparency and Privacy}, abstract ={Managing Privacy and understanding the handling of personal data has turned into a fundamental right - at least for Europeans - since May 25th with the coming into force of the General Data Protection Regulation. Yet, whereas many different tools by different vendors promise companies to guarantee their compliance to GDPR in terms of consent management and keeping track of the personal data they handle in their processes, interoperabilty between such tools as well uniform user facing interfaces will be needed to enable true transparency, user-configurable and -manageable privacy policies and data portability (as also - implicitly - promised by GDPR). We argue that such interoperability can be enabled by agreed upon vocabularies and Linked Data.}, booktitle = {Semantic Web for Social Good Workshop (SWSG) co-located with ISWC2018}, year = 2018, month=oct, series = {CEUR Workshop Proceedings}, Publisher = {CEUR-WS.org}, volume = 2182, url = {http://ceur-ws.org/Vol-2182/paper_3.pdf}, type = WS }
@inproceedings{vaku-etal-2018ISWC, author = {Svitlana Vakulenko and Maarten de Rijke and Michael Cochez and Vadim Savenkov and Axel Polleres}, title = {Measuring Semantic Coherence of a Conversation}, abstract = {Conversational systems have become increasingly popular as a way for humans to interact with computers. To be able to provide intelligent responses, conversational systems must correctly model the structure and semantics of a conversation. In this paper, we introduce the task of measuring semantic (in)coherence in a conversation with respect to background knowledge, which relies on the identification of semantic relations between concepts introduced during a conversation. We propose and evaluate graph-based and machine learning approaches for measuring semantic coherence using knowledge graphs, their vector space embeddings and word embedding models, as sources of background knowledge. We demonstrate in our evaluation results how these approaches are able to uncover different coherence patterns in conversations on the Ubuntu Dialogue Corpus.}, month = oct, day = {8--12}, pages = {634--651}, editor = {Denny Vrande\v{c}i{\'c} and Kalina Bontcheva and Mari Carmen Su{\'a}rez-Figueroa and Valentina Presutti and Irene Celino and Marta Sabou and Lucie-Aim{\'e}e Kaffee and Elena Simperl}, doi = {10.1007/978-3-030-00671-6_37}, year = 2018, booktitle = {Proceedings of the 17th International Semantic Web Conference (ISWC 2018)}, address = {Monterey, CA}, series = LNCS, volume = 11136, publisher = {Springer}, url = {http://polleres.net/publications/vaku-etal-2018ISWC.pdf}, }
@inproceedings{neum-poll-2018SEMANTiCS, title = {Geo-semantic labelling of Open Data}, author = {Sebastian Neumaier and Vadim Savenkov and Axel Polleres}, year = 2018, abstract = {In the past years Open Data has become a trend among governments to increase transparency and public engagement by opening up national, regional, and local datasets. However, while many of these datasets come in semi-structured file formats, they use different schemata and lack geo-references or semantically meaningful links and descriptions of the corresponding geo-entities. We aim to address this by detecting and establishing links to geo-entities in the datasets found in Open Data catalogs and their respective metadata descriptions and link them to a knowledge graph of geo-entities. This knowledge graph does not yet readily exist, though, or at least, not a single one: so, we integrate and interlink several datasets to construct our (extensible) base geo-entities knowledge graph: (i) the openly available geospatial data repository GeoNames, (ii) the map service OpenStreetMap, (iii) country-specific sets of postal codes, and (iv) the European Union's classification system NUTS. As a second step, this base knowledge graph is used to add semantic labels to the open datasets, i.e., we heuristically disambiguate the geo-entities in CSV columns using the context of the labels and the hierarchical graph structure of our base knowledge graph. Finally, in order to interact with and retrieve the content, we index the datasets and provide a demo user interface. Currently we indexed resources from four Open Data portals, and allow search queries for geo-entities as well as full-text matches at \url{http://data.wu.ac.at/odgraph/}.}, booktitle = {Proceedings of the 14th International Conference on Semantic Systems 10th (SEMANTiCS)}, month = sep, url = {http://polleres.net/publications/neum-poll-2018SEMANTiCS.pdf}, editor = {Anna Fensel and Victor de Boer and Tassilo Pellegrini and Elmar Kiesling and Bernhard Haslhofer and Laura Hollink and Alexander Schindler}, pages = {9--20}, day = {10--13}, publisher = {Elsevier}, series = {Procedia Computer Science}, volume = 137, address = {Vienna, Austria}, }
@inproceedings{kirr-etal-2018ESWC_PD, author = { Sabrina Kirrane and Javier D.Fern\'andez and Wouter Dullaert and Uros Milosevic and Axel Polleres and Piero Bonatti and Rigo Wenning and Olha Drozd and Philip Raschke}, abstract = {In this demo we present the SPECIAL consent, transparency and compliance system. The objective of the system is to afford data subjects more control over personal data processing and sharing, while at the same time enabling data controllers and processors to comply with consent and transparency obligations mandated by the European General Data Protection Regulation. A short promotional video can be found at \url{https://purl.com/specialprivacy/demos/ESWC2018}.}, type = DEMO, note ={Demo abstract}, title = {A Scalable Consent, Transparency and Compliance Architecture}, booktitle = {The Semantic Web: ESWC 2018 Satellite Events}, editor = {Aldo Gangemi and Anna Lisa Gentile and Andrea Giovanni Nuzzolese and Sebastian Rudolph and Maria Maleshkova and Heiko Paulheim and Jeff Z Pan and Mehwish Alam}, pages ={131--136}, series = LNCS, number = 11155, day = {3--7}, month = jun, year = 2018, Address = {Heraklion, Greece}, url={http://polleres.net/publications/kirr-etal-2018ESWC_PD.pdf}, }
@inproceedings{rein-etal-2018ESWC, title = {{HDTQ}: Managing {RDF} Datasets in Compressed Space}, year = 2018, author = {Javier D. Fernandez and Miguel A. Mart\'inez-Prieto and Axel Polleres and Julian Reindorf}, abstract ={HDT (Header-Dictionary-Triples) is a well-known compressed representation of RDF data that supports retrieval features without prior decompression. Yet, RDF datasets often contain additional graph information, such as the origin, version or validity time of a triple. Traditional HDT is not capable of handling this additional parameter(s). This work introduces HDTQ (HDT Quads), an extension of HDT, which is able to represent quadruples (or quads) while still being highly compact and \queryable{}. Two approaches of this extension, Annotated Triples and Annotated Graphs, are introduced and their performance is compared to the leading open-source RDF stores on the market, Results show that HDTQ achieves the best compression rates and is a competitive alternative to well-established systems.}, booktitle = {Proceedings of the 15th European Semantic Web Conference (ESWC2018)}, series = LNCS, volume = 10843, publisher = {Springer}, url = {http://polleres.net/publications/fern-etal-ESWC2018.pdf}, doi = {https://doi.org/10.1007/978-3-319-93417-4_13}, day = {3--7}, month = jun, editor = {Aldo Gangemi and Roberto Navigli and Maria-Esther Vidal and Pascal Hitzler and Raphaël Troncy and Laura Hollink and Anna Tordai and Mehwish Alam}, pages={191--208}, Address = {Heraklion, Greece}, }
@book{baro-etal-2018TOIT, Editor = {Cristina Baroglio and Olivier Boissier and Axel Polleres}, Publisher = {ACM}, Title = {Transactions on Internet Technology, Special Issue: Computational Ethics and Accountability}, Note = {Editorial}, url ={https://dl.acm.org/citation.cfm?id=3195835s}, abstract = {Computational Ethics and Accountability are becoming topics of increasing societal impact; in particular, on the one hand, in the context of recent advances in AI and machine-learning techniques, people and organizations accept decisions made for them by machines, be they buy-sell decisions, pre-filtering of applications, deciding which content users are presented, which personal data are shared and used by third parties, up to automated driving. In each of these application scenarios, where algorithms and machines support or even replace human decisions, ethical issues may arise. In the present special issue we take the stance on whether Intelligent Systems and AI themselves can help to enable accountability and transparency, thus, act as technologies to enable rather than endanger ethically compliant, accountable, and eventually sustainable computing. Multi-agent systems, Semantic Web and Agreement Technologies, and Value-Sensitive Design are just some of the research areas whose methods and results can fruitfully support business ethics and social responsibility. In this special issue, you will find a collection of articles that aim to make computational advances by approaching these challenges from different angles.}, Type = JOURNAL, pages = {40:1--40:4}, volume = 18, issue = 4, Year = 2018 }
@article{bisc-etal-2018jws, title = {Enriching Integrated Statistical Open City Data by Combining Equational Knowledge and Missing Value Imputation}, abstract = {Several institutions collect statistical data about cities, regions, and countries for various purposes. Yet, while access to high quality and recent such data is both crucial for decision makers and a means for achieving transparency to the public, all too often such collections of data remain isolated and not re-usable, let alone comparable or properly integrated. In this paper we present the Open City Data Pipeline, a focused attempt to collect, integrate, and enrich statistical data collected at city level worldwide, and re-publish the resulting dataset in a re-usable manner as Linked Data. The main features of the Open City Data Pipeline are: (i) we integrate and cleanse data from several sources in a modular and extensible, always up-to-date fashion; (ii) we use both Machine Learning techniques and reasoning over equational background knowledge to enrich the data by imputing missing values, (iii) we assess the estimated accuracy of such imputations per indicator. Additionally, (iv) we make the integrated and enriched data, including links to external data sources, such as DBpedia, available both in a web browser interface and as machine-readable Linked Data, using standard vocabularies such as QB and PROV. Apart from providing a contribution to the growing collection of data available as Linked Data, our enrichment process for missing values also contributes a novel methodology for combining rule-based inference about equational knowledge with inferences obtained from statistical Machine Learning approaches. While most existing works about inference in Linked Data have focused on ontological reasoning in RDFS and OWL, we believe that these complementary methods and particularly their combination could be fruitfully applied also in many other domains for integrating Statistical Linked Data, independent from our concrete use case of integrating city data.}, author = {Stefan Bischof and Andreas Harth and Benedikt K{\"a}mpgen and Axel Polleres and Patrik Schneider}, year = 2018, journal = JWS, publisher = {Elsevier}, month = Jan, volume = 48, pages = {22--47}, issn = "1570-8268", doi = {10.1016/j.websem.2017.09.003}, url = {http://polleres.net/publications/bisc-etal-2017JWS.pdf} }
@misc{big_data_innovation_datenschutz_endbericht_7_12_17, url = {http://polleres.net/publications/big_data_innovation_datenschutz_endbericht_7_12_17.pdf}, title = {{Big Data, Innovation und Datenschutz}}, abstract = {Mit der Einf\"uhrung der DS-GVO im Mai 2018 m\"ussen Europa\"aische Organisationen, die davon betroffen sind, Datenschutz und die daraus folgenden Beschra\"ankungen beachten. Die Problematik stellt sich besonders für den Innovationstreiber Big Data, da bei derartigen Anwendungen a priori nicht klar ist, welche Daten zu sammeln sind und wie diese in entsprechende Anwendungen eingehen. In einer datenschutzfreien Welt wu\"urden derartige Anwendungen durch om\"oglichst breite, zweckfreie Datensammlung, darauf basierende Datenanalyse zur Entdeckung verborgener Muster und die Entwicklung neuer Funktionen, die den AnwenderInnen anhand ihrer Daten angeboten werden, entstehen. Die in vorliegender Studie vorgenommene rechtliche Analyse der DS-GVO ergibt, dass aufgrund der rechtlichen Anforderungen diese Vorgangsweise zur Entwicklung von Big Data Anwendungen nicht m\"oglich ist: Datensammlungen und Data Mining Analysen mit personenbezogenen Daten sind ohne Einwilligung nicht erlaubt. Als Ausgangspunkt zur Entwicklung eines entsprechenden Maßnahmenb\"undels wird in dieser Studie eine DS-GVO kompatible Vorgangsweise zur Entwicklung einer Big Data Anwendung entwickelt. Basis dieses Vorschlags sind eine rechtlichen Analyse der DS-GVO mit Schwerpunkt Big Data, eine technische Analyse der zur Umsetzung der Auflagen vorhandenen Technologien sowie Gespra\"ache mit Unternehmen und Beho\"orden. Grundidee der Vorgangsweise ist die Einholung der Einwilligung zur Anonymisierung und/oder Datenanalyse bereits bei der Entwicklung des datengenerierenden Systems, einer auf das Testen abgestimmten Einwilligung und einem Opt-in beim Ausrollen der Big Data Anwendung. Desweiteren schl\"agt die Studie strategische Ma{\ss}nahmen in den Bereichen Ausbildung, Forschung, F\"orderung, gesetzliche Rahmenbedingungen, sowie Teilnahme an internationalen Initiativen (wie z.B. MyData) vor.}, author = {Clemens Appl and Andreas Ekelhart and Natascha Fenz and Peter Kieseberg and Hannes Leo and Sabrina Kirrane and Axel Polleres and Alfred Taudes and Veronika Treitl and Christian Singer and Martin Winner}, address = {Vienna, Austria}, note = {Studie im Auftrag des Bundesministeriums f\"ur Verkehr, Innovation und Technologie (BMVIT)}, YEAR = 2017, month = dec, publisher = {Bundesministerium f\"ur Verkehr, Innovation und Technologie} }
@article{beno-etal-2017JEDEM, title = {Perception of Key Barriers in Using and Publishing Open Data}, abstract = {There is a growing body of literature recognizing the benefits of Open Data. However, many potential data providers are unwilling to publish their data and at the same time, data users are often faced with difficulties when attempting to use Open Data in practice. Despite various barriers in using and publishing Open Data still being present, studies which systematically collect and assess these barriers are rare. Based on this observation we present a review on prior literature on barriers and the results of an empirical study aimed at assessing both the users’ and publishers’ views on obstacles regarding Open Data adoption. We collected data with an online survey in Austria and internationally. Using a sample of 183 participants, we draw conclusions about the relative importance of the barriers reported in the literature. In comparison to a previous conference paper presented at the conference for E-Democracy and Open Government, this article includes new additional data from participants outside Austria, reports new analyses, and substantially extends the discussion of results and of possible strategies for the mitigation of Open Data barriers.}, author = {Martin Beno and Kathrin Figl and J{\"u}rgen Umbrich and Axel Polleres}, year = 2017, journal = {eJournal of eDemocracy and Open Government (JeDEM)}, month = Dec, pages = {134--165}, volume = 9, number = 2, issn = {2075-9517}, url = {http://polleres.net/publications/beno-etal-2017JEDEM.pdf} }
@inproceedings{save-etal-2017SEMANTiCS, title = {Counting to k, or how {SPARQL 1.1} could be efficiently enhanced with top k shortest path queries}, author = {Vadim Savenkov and Qaiser Mehmood and J\"urgen Umbrich and Axel Polleres}, year = 2017, abstract = {While graph data on the Web and represented in RDF is growing, SPARQL, as the standard query language for RDF still remains largely unusable for the most typical graph query task: finding paths between selected nodes through the graph. Property Paths, as introduced in SPARQL1.1 turn out to be unfit for this task, as they can only be used for testing path existence and not even allow to count the number of paths between nodes. While such a feature has been shown to theoretically highly intractable, particularly in graphs with a high degree of cyclicity, practical use cases still demand a solution. A common restriction in fact is not to ask for all, but only the $k$-shortest paths between two nodes, in order to obtain at least the most important of potentially infeasibly many possible paths. In this paper, we extend SPARQL 1.1 property paths in a manner that allows to compute and return the $k$ shortest paths matching a property path expression between two nodes. We present an algorithm and implementation and demonstrate in our evaluation that a realtively straightforward solution works (in fact, more efficiently than other, tailored solutions in the literature) in practical use cases.}, booktitle = {13th International Conference on Semantic Systems (SEMANTiCS)}, month = sep, pages = {97--103}, url = {http://polleres.net/publications/save-etal-2017SEMANTICS.pdf}, day = {11--14}, publisher = {ACM}, address = {Amsterdam, the Netherlands}, }
@inproceedings{bona-etal-2017TELERISE, title = {Transparent Personal Data Processing: The Road Ahead}, author = {Piero Bonatti and Sabrina Kirrane and Axel Polleres and Rigo Wenning}, booktitle = {TELERISE: 3rd International Workshop on TEchnical and LEgal aspects of data pRIvacy and SEcurity @ SAFECOMP2017}, year = 2017, month = sep, day = 12, pages = {337-349}, publisher = springer, series = lncs, volume = 10489, abstract = {The European General Data Protection Regulation defines a set of obligations for personal data controllers and processors. Primary obligations include: obtaining explicit consent from the data subject for the processing of personal data, providing full transparency with respect to the processing, and enabling data rectification and erasure (albeit only in certain circumstances). At the core of any transparency architecture is the logging of events in relation to the processing and sharing of personal data. The logs should enable verification that data processors abide by the access and usage control policies that have been associated with the data based on the data subject’s consent and the applicable regulations. In this position paper, we: (i) identify the requirements that need to be satisfied by such a transparency architecture, (ii) examine the suitability of existing logging mechanisms in light of said requirements, and (iii) present a number of open challenges and opportunities.}, address = {Trento, Italy}, url = {http://polleres.net/publications/bona-etal-2017TELERISE.pdf} }
@incollection{neum-etal-RW2017, Abstract = {In this lecture we will discuss and introduce challenges of integrating openly available Web data and how to solve them. Firstly, while we will address this topic from the viewpoint of Semantic Web research, not all data is readily available as RDF or Linked Data, so we will give an introduction to different data formats prevalent on the Web, namely, standard formats for publishing and exchanging tabular, tree-shaped, and graph data. Secondly, not all Open Data is really completely open, so we will discuss and address issues around licences, terms of usage associated with Open Data, as well as documentation of data provenance. Thirdly, we will discuss issues connected with (meta-)data quality issues associated with Open Data on the Web and how Semantic Web techniques and vocabularies can be used to describe and remedy them. Fourth, we will address issues about searchability and integration of Open Data and discuss in how far semantic search can help to overcome these. We close with briefly summarizing further issues not covered explicitly herein, such as multi-linguality, temporal aspects (archiving, evolution, temporal querying), as well as how/whether OWL and RDFS reasoning on top of integrated open data could be help.}, Address = {London, United Kingdom}, Author = {Sebastian Neumaier and Axel Polleres and Simon Steyskal and J\"urgen Umbrich}, Booktitle = {Reasoning Web. Semantic Interoperability on the Web ({Reasoning Web 2017})}, Day = {7--11}, Month = JUL, Publisher = {Springer}, Series = LNCS, volume = 10370, editor = {Giovambattista Ianni and Domenico Lembo and Leopoldo E. Bertossi and Wolfgang Faber and Birte Glimm and Georg Gottlob and Steffen Staab}, pages = {1--28}, Title = {Data Integration for Open Data on the Web}, Type = BC, Url = {http://www.polleres.net/publications/neum-etal-RW2017.pdf}, doi = {https://doi.org/10.1007/978-3-319-61033-7_1}, Year = 2017, }
@inproceedings{fern-etal-ESWC2017, abstract = {The amount of raw data exchanged via web protocols is steadily increasing. Although the Linked Data infrastructure could potentially be used to selectively share RDF data with different individuals or organisations, the primary focus remains on the unrestricted sharing of public data. In order to extend the Linked Data paradigm to cater for closed data, there is a need to augment the existing infrastructure with robust security mechanisms. At the most basic level both access control and encryption mechanisms are required. In this paper, we propose a flexible and dynamic architecture for securely storing and maintaining RDF datasets. By employing an encryption strategy based on Functional Encryption (FE), in which data access is enforced by the cryptographic approach itself, we allow for fine-grained access control over encrypted RDF data while at the same time reducing the administrative overhead associated with access control management.}, author = {Javier Fern{\'a}ndez and Sabrina Kirrane and Axel Polleres and Simon Steyskal}, booktitle = {Proceedings of the 14th European Semantic Web Conference (ESWC2017)}, title = {Self-Enforcing Access Control for Encrypted {RDF}}, series = LNCS, publisher = {Springer}, volume = 10249, pages = {607--622}, url = {http://polleres.net/publications/fern-etal-ESWC2017.pdf}, day = {28--1}, month = may, year = 2017, address = {Portoro{\v{z}}, Slovenia}, }
@inproceedings{ahme-etal-ESWC2017, abstract = {DBpedia crystallized most of the concepts of the Semantic Web using simple mappings to convert Wikipedia articles to RDF data. This ``semantic view'' of wiki content has rapidly become the focal point of the Linked Open Data cloud, but its impact on the original Wikipedia source is limited. In particular, little attention has been paid to the benefits that the semantic infrastructure can bring to maintain the wiki content, for instance to ensure that the effects of a wiki edit are consistent across infoboxes. In this paper, we present an framework for handling ontology-based updates of wiki content. Starting from DBpedia-like mappings converting infoboxes to a fragment of {\tt OWL 2 RL} ontology, we discuss various issues associated with translating SPARQL updates on top of semantic data to the underlying Wiki content. On the one hand, we provide a formalization of DBpedia as an Ontology Based Data Management framework and study its computational properties. On the other hand, we provide a novel approach to the inherently intractable update translation problem, leveraging the pre-existent data for disambiguating updates.}, author = {Albin Ahmeti and Javier Fern{\'a}ndez and Axel Polleres and Vadim Savenkov}, booktitle = {Proceedings of the 14th European Semantic Web Conference (ESWC2017)}, title = {Updating Wikipedia via {DBpedia} Mappings and {SPARQL}}, series = LNCS, publisher = {Springer}, editor = {Eva Blomqvist and Diana Maynard and Aldo Gangemi and Rinke Hoekstra and Pascal Hitzler and Olaf Hartig}, volume = 10249, pages = {485--501}, url = {http://polleres.net/publications/ahme-etal-ESWC2017.pdf}, doi = {https://doi.org/10.1007/978-3-319-58068-5_30 }, day = {28--1}, month = may, year = 2017, address = {Portoro{\v{z}}, Slovenia}, }
@inproceedings{neum-etal-LDOW2017, title = {Lifting data portals to the Web of Data}, author = {Sebastian Neumaier and J{\"u}rgen Umbrich and Axel Polleres}, abstract = {Data portals are central hubs for freely available (governmental) datasets. These portals use different software frameworks to publish their data and the metadata descriptions of these datasets come in different schemas according to the used framework. The present work aims at re-exposing and connecting the metadata descriptions of currently 854k datasets on 261 data portals to the Web of Linked Data by mapping and publishing their homogenized metadata in standard vocabularies such as DCAT and Schema.org. Additionally, we publish existing quality information about the datasets and further enrich their descriptions by automatically generated metadata for CSV resources. In order to make all this information traceable and trustworthy, we annotate the generated data using W3C’s provenance vocabulary. The dataset descriptions are harvested weekly and we offer access to the archived data by providing APIs compliant to the Memento framework. All this data -- a total of about 120 million triples per weekly snapshot -- is queryable at the SPARQL endpoint at \url{http://data.wu.ac.at/portalwatch/sparql}.}, year = 2017, booktitle = {10th Workshop on Linked Data on the Web (LDOW2017)}, address = {Perth, Austrialia}, day = 3, month = apr, url = {http://polleres.net/publications/neum-etal-LDOW2017.pdf} }
@inproceedings{filtz-etal-2017IDSC, author = {Erwin Filtz and Axel Polleres and Roman Karl and Bernhard Haslhofer}, title = {The evolution of the Bitcoin graph}, year = 2017, month = jun, day = {12--13}, address = {Salzburg, Austria}, booktitle = {Proceedings of the 1st International Data Science Conference (iDSC2017)}, url = {http://polleres.net/publications/filtz-etal-2017IDSC.pdf}, abstract = {Bitcoin as a virtual currency provides means to execute payments in an anonymous way without regulations from central authorities. In this paper we analyze structural properties of the Bitcoin graph and investigate how users behave in the Bitcoin system over time. Our analysis shows for instance that Bitcoin has a highly volatile exchange rate which probably makes it uninteresting for long-term investments; moreover we show how transactions "patterns" have evolved over time.}, }
@inproceedings{sperl-etal-2017SIMPDA, author = {Simon Sperl and Giray Havur and Simon Steyskal and Cristina Cabanillas and Axel Polleres and Alois Haselb{\"{o}}ck}, editor = {Paolo Ceravolo and Maurice van Keulen and Kilian Stoffel}, abstract = {An appropriate resource utilization is crucial for organizations in order to avoid, among other things, unnecessary costs (e.g. when resources are under-utilized) and too long execution times (e.g. due to excessive workloads, i.e. resource over-utilization). However, traditional process control and risk measurement approaches do not address resource utilization in processes. We studied an often-encountered industry case for providing large-scale technical infrastructure which requires rigorous testing for the systems deployed and identified the need of projecting resource utilization as a means for measuring the risk of resource underand over-utilization. Consequently, this paper presents a novel predictive model for resource utilization in decision-intensive processes, present in many domains. In particular, we predict the utilization of resources for a desired period of time given a decision-intensive business process that may include nested loops, and historical data (i.e. order and duration of past activity executions, resource profiles and their experience etc.). We have applied our method using a real business process with multiple instances and presented the outcome.}, title = {Resource Utilization Prediction in Decision-Intensive Business Processes}, booktitle = {Proceedings of the 7th International Symposium on Data-driven Process Discovery and Analysis {(SIMPDA} 2017)}, address = {Neuch{\^{a}}tel, Switzerland}, month = Dec, day = {6--8}, year = 2017, series = {{CEUR} Workshop Proceedings}, volume = {2016}, pages = {128--141}, publisher = {CEUR-WS.org}, url = {http://ceur-ws.org/Vol-2016/paper10.pdf}, }
@inproceedings{beno-etal-2017CEDEM, booktitle = {2017 Conference for E-Democracy and Open Government (CeDEM 2017)}, address = {Krems, Austria}, author = {Martin Beno and Kathrin Figl and Axel Polleres and J{\"u}rgen Umbrich}, title = {Open Data Hopes and Fears: Determining the Barriers of Open Data}, abstract = {In recent years, Open Data has gained considerable attention: a steady growth in the number of openly published datasets – mainly by governments and public administrations - can be observed as the demand for Open Data rises. However, many potential providers are still hesitant to open their datasets and at the same time users often face difficulties when attempting to use this data in practice. This indicates that there are still various barriers present both regarding usage and publishing of Open Data, but studies that systematically collect and assess these barriers regarding their impact are rare. Based on this observation we survey prior literature on barriers, and have developed a questionnaire aimed at both assessing the users and publishers views on obstacles regarding Open Data adoption. Using a sample of over 100 participants from Austria who completed our online survey, we draw conclusions about the relative importance of the barriers reported in the literature. The empirical findings presented in this study shall serve as a solid foundation for future research on the mitigation of Open Data barriers.}, note = {\textbf{Nominated for best paper award}}, url = {http://polleres.net/publications/Beno-etal-2017CeDEM.pdf}, year = 2017, month = may, day = {17--19}, }
@book{fern-etal-2016PROPEL, Abstract = {The PROPEL project – Propelling the Potential of Enterprise Linked Data in Austria – surveyed technological challenges, entrepreneurial opportunities, and open research questions on the use of Linked Data in a business context and developed a roadmap and a set of recommendations for policy makers, industry, and the research community. Results are summarized in the present book.}, Author = {Javier Fern{\'a}ndez and Elmar Kiesling and Sabrina Kirrane and Julia Neuschmid and Mika Mizerski and Axel Polleres and Marta Sabou and Thomas Thurner and Peter Wetz}, Month = Dec, Publisher = {edition mono/monochrom}, address = {Zentagasse 31/8, A-1050 Vienna, Austria}, Title = {Propelling the Potential of Enterprise Linked Data in Austria: Roadmap and Report}, Type = BOOK, Url = {https://www.linked-data.at/wp-content/uploads/2016/12/propel_book_web.pdf}, ISBN = {978-3-902796-55-4}, Year = 2016, }
@inproceedings{neum-etal-2016w3c, Abstract = {This report describes our experiences using the mapping of the metadata in CKAN powered Open Data portals to the DCAT model. CKAN is the most prominent portal software framework used for publishing Open Data and used by several governmental portals including data.gov.uk and data.gov. We studied the actual usage of DCAT in 133 existing Open Data portals and report the key findings.}, Address = {Amsterdam, the Netherlands}, Author = {Sebastian Neumaier and J{\"u}rgen Umbrich and Axel Polleres}, Booktitle = {W3C Workshop on Data and Services Integration}, Day = {30--01}, Month = nov, Title = {Challenges of mapping current {CKAN} metadata to {DCAT}}, Type = WS, Url = {https://www.w3.org/2016/11/sdsvoc/SDSVoc16_paper_16}, Year = 2016}
@inproceedings{neum-etal-2016ISWC, author = {Neumaier, Sebastian and Umbrich, J\"urgen and Parreira, Josiane and Polleres, Axel}, title = {Multi-level Semantic Labelling of Numerical Values}, abstract = {With the success of Open Data a huge amount of tabular data sources became available that could potentially be mapped and linked into the Web of (Linked) Data. Most existing approaches to ``semantically label'' such tabular data rely on mappings of textual information to classes, properties, or instances in RDF knowledge bases in order to link -- and eventually transform -- tabular data into RDF. However, as we will illustrate, Open Data tables typically contain a large portion of numerical columns and/or non-textual headers; therefore solutions that solely focus on textual ``cues'' are only partially applicable for mapping such data sources. We propose an approach to find and rank candidates of semantic labels and context descriptions for a given bag of numerical values. To this end, we apply a hierarchical clustering over information taken from DBpedia to build a background knowledge graph of possible ``semantic contexts'' for bags of numerical values, over which we perform a nearest neighbour search to rank the most likely candidates. Our evaluation shows that our approach can assign fine-grained semantic labels, when there is enough supporting evidence in the background knowledge graph. In other cases, our approach can nevertheless assign high level contexts to the data, which could potentially be used in combination with other approaches to narrow down the search space of possible labels.}, note = {\textbf{Nominated for best student paper award}}, month = oct, day = {17--21}, pages = {428--445}, year = 2016, booktitle = {Proceedings of the 15th International Semantic Web Conference (ISWC 2016) - Part I}, volume = 9981, address = {Kobe, Japan}, series = LNCS, publisher = {Springer}, url = {http://polleres.net/publications/neum-etal-2016ISWC.pdf}, doi = {https://doi.org/10.1007/978-3-319-46523-4_26} }
@article{neum-etal-2016JDIQ, author = {Neumaier, Sebastian and Umbrich, J\"urgen and Polleres, Axel}, journal = {ACM Journal of Data and Information Quality (JDIQ)}, keyword = {open data, quality assessment}, abstract = {The Open Data movement has become a driver for publicly available data on the Web. More and more data -- from governments, public institutions but also from the private sector -- is made available online and is mainly published in so called Open Data portals. However, with the increasing number of published resources, there are a number of concerns with regards to the quality of the data sources and the corresponding metadata, which compromise the searchability, discoverability and usability of resources. In order to get a more complete picture of the severity of these issues, the present work aims at developing a generic metadata quality assessment framework for various Open Data portals: we treat data portals independently from the portal software frameworks by mapping the specific metadata of three widely used portal software frameworks (CKAN, Socrata, OpenDataSoft) to the standardized DCAT metadata schema. We subsequently define several quality metrics, which can be evaluated automatically and in a efficient manner. Finally, we report findings based on monitoring a set of over 260 Open Data portals with 1.1M datasets. This includes the discussion of general quality issues, e.g. the retrievability of data, and the analysis of our specific quality metrics.}, volume = 8, number = 1, pages = 2, url = {http://polleres.net/publications/neum-etal-2016JDIQ.pdf}, title = {Automated Quality Assessment of Metadata across Open Data Portals}, year = {2016}, month = nov, doi = {https://doi.org/10.1145/2964909}, }
@inproceedings{havu-etal-2016BPM-Forum, author = {Giray Havur and Cristina Cabanillas and Jan Mendling and Axel Polleres}, title = {Resource Allocation with Dependencies in Business Process Management Systems}, abstract = {Business Process Management Systems (BPMS) facilitate the execution of business processes by coordinating all involved resources. Traditional BPMS assume that these resources are independent from one another, which justifies a greedy allocation strategy of offering each work item as soon as it becomes available. In this paper, we develop a formal technique to derive an optimal schedule for work items that have dependencies and resource conflicts. We build our work on Answer Set Programming (ASP), which is supported by a wide range of efficient solvers. We apply our technique in an industry scenario and evaluate its effectiveness. In this way, we contribute an explicit notion of resource dependencies within BPMS research and a technique to derive optimal schedules.}, booktitle = {Business Process Management Forum - {BPM} Forum 2016}, year = 2016, address = {Rio de Janeiro, Brazil}, month = sep, day = {18--22}, pages = {3--19}, volume = {260}, series = {Lecture Notes in Business Information Processing}, publisher = {Springer}, url = {http://polleres.net/publications/havu-etal-2016BPM-Forum.pdf}, }
@inproceedings{mitl-etal-2016OBD, author = {Mitl\"ohner, Johann and Neumaier, Sebastian and Umbrich, J\"urgen and Polleres, Axel}, booktitle = {2nd International Conference on Open and Big Data}, month = aug, day = {22--24}, note = {Invited paper}, type = CONF, abstract = {This work analyzes an Open Data corpus containing 200K tabular resources with a total file size of 413GB from a data consumer perspective. Our study shows that ∼10\% of the resources in Open Data portals are labelled as a tabular data of which only 50\% can be considered CSV files. The study inspects the general shape of these tabular data, reports on column and row distribution, analyses the availability of (multiple) header rows and if a file contains multiple tables. In addition, we inspect and analyze the table column types, detect missing values and report about the distribution of the values.}, title = {Characteristics of Open Data {CSV} Files}, year = 2016, url = {http://polleres.net/publications/mitl-etal-2016OBD.pdf}, doi = {https://doi.org/10.1109/OBD.2016.18}, }
@inproceedings{fern-etal-2016SEMANTiCS, author = {Fern{\'a}ndez Garcia, Javier David and Umbrich, J\"urgen and Knuth, Magnus and Polleres, Axel}, booktitle = {12th International Conference on Semantic Systems (SEMANTiCS)}, month = sep, day = {12--15}, abstract = {There is an emerging demand on efficiently archiving and (temporal) querying different versions of evolving semantic Web data. As novel archiving systems are starting to address this challenge, foundations/standards for benchmarking RDF archives are needed to evaluate its storage space efficiency and the performance of different retrieval operations. To this end, we provide theoretical foundations on the design of data and queries to evaluate emerg- ing RDF archiving systems. Then, we instantiate these foundations along a concrete set of queries on the basis of a real-world evolving dataset. Finally, we perform an empirical evaluation of various current archiving techniques and querying strategies on this data. Our work comprises -- to the best of our knowledge -- the first benchmark for querying evolving RDF data archives.}, pages = {41-48}, title = {Evaluating Query and Storage Strategies for {RDF} Archives}, year = 2016, publisher = {ACM}, Type = CONF, series = {ACM International Conference Proceedings Series}, url = {http://polleres.net/publications/fern-etal-2016SEMANTiCS.pdf}, }
@inproceedings{posa-etal-2016SEMANTiCS, author = {M{\'{o}}nica Posada{-}S{\'{a}}nchez and Stefan Bischof and Axel Polleres}, abstract = {Access to high quality and updated data is crucial to assess and contextualize city state of a affairs. The City Data Pipeline uses diverse Open Data sources to integrate statistical information about cities. The resulting incomplete dataset is not directly usable for data analysis. We exploit data from a geographic information system, namely OpenStreetMap, to obtain new indicators for cities with better coverage. We show that OpenStreetMap is a promising data source for statistical data about cities.}, title = {Extracting Geo-Semantics About Cities From OpenStreetMap}, booktitle = {Proceedings of the Posters and Demos Track of the 12th International Conference on Semantic Systems (SEMANTiCS2016)}, address = {Leipzig, Germany}, month = sep, day = {12--15}, year = 2016, url = {http://ceur-ws.org/Vol-1695/paper39.pdf}, }
@inproceedings{ahme-etal-2016AMW, Abstract = {DBpedia is a community effort that has created the most important cross-domain datasets in RDF, a focal point of the Linked Open Data (LOD) cloud. In its core there is a set of declarative mappings extracting the data from Wikipedia infoboxes and tables into the RDF. However, while DBpedia focuses on publishing knowledge in a machine-readable way, little attention has been paid to the benefits of supporting machine updates. This greatly restricts the possibilities of automatic curation of the DBpedia data that could be semi-automatically propagated to Wikipedia, and also prevents maintainers from evaluating the impact of their edits on the consistency of knowledge. Excluding the DBpedia tax- onomy from the editing cycle is a major drawback which we aim to address. This paper starts a discussion of DBpedia making a case for a benchmark for Ontology-Based Data Management (OBDM). As we show, although based on fairly restricted mappings (which we cast as a variant of nested tgds here) and minimalistic TBox language, accommodating DBpedia updates is intricate from different perspectives, ranging from conceptual (what is an adequate semantics for DBpe- dia SPARQL updates?) to challenges related to the user interface design. }, Author = {Albin Ahmeti and Javier D. Fern\'andez and Axel Polleres and Vadim Savenkov}, Booktitle = {Alberto Mendelzon International Workshop on Foundations of Data Management (AMW2016)}, Month = jun, Day = {6--10}, Year = 2016, Address = {Panama City, Panama}, Note = {Short paper}, Title = {Towards Updating {Wikipedia} via {DBpedia} Mappings and {SPARQL}}, Type = WS, url = {http://polleres.net/publications/ahme-etal-2016AMW.pdf}, }
@inproceedings{poll-etal-2016AMW, Title = {Nested Constructs vs. Sub-Selects in {SPARQL}}, Author = {Axel Polleres and Juan Reutter and Egor V. Kostylev}, Booktitle = {Alberto Mendelzon International Workshop on Foundations of Data Management (AMW2016)}, Month = jun, Day = {6--10}, Year = 2016, Address = {Panama City, Panama}, Abstract = {The issue of subqueries in SPARQL has appeared in different papers as an extension point to the original SPARQL query language. Particularly, nested CONSTRUCT in FROM clauses are a feature that has been discussed as a potential input for SPARQL 1.1 which was resolved to be left out in favour of select subqueries under the -- unproven -- conjecture that such subqueries can express nested construct queries. In this paper, we show that it is indeed possible to unfold nested SPARQL construct queries into subqueries in SPARQL 1.1; our transformation, however, requires an exponential blowup in the nesting depth. This suggests that nested construct queries are indeed a useful syntactic feature in SPARQL that cannot compactly be replaced by subqueries.}, Type = WS, url = {http://polleres.net/publications/poll-etal-2016AMW.pdf}, }
@inproceedings{guti-etal-2016AMW, Abstract = {The standard semantics of SPARQL and the standard semantics of RDF differ fundamentally, sometimes leading to unintuitive answers. In this paper, we thus motivate an alternative semantics for SPARQL based on certain answers, taking into account the existential nature of blank nodes, the open-world assumption of RDF, and perhaps even the lack of a unique name assumption. We propose that SPARQL is a natural use-case for applying existing techniques that approximate certain answers in relational database settings.}, Author = {Claudio Gutierrez and Aidan Hogan and Daniel Hern\'andez and Axel Polleres}, Booktitle = {Alberto Mendelzon International Workshop on Foundations of Data Management (AMW2016)}, Month = jun, Day = {6--10}, Year = 2016, Address = {Panama City, Panama}, Note = {Short paper}, Title = {Certain Answers for {SPARQL}?}, Type = WS, url = {http://aidanhogan.com/docs/sparql-certain-answers-amw.pdf}, }
@inproceedings{ahme-etal-2016ESWC, title = {Handling Inconsistencies due to Class Disjointness in {SPARQL} Updates}, author = {Albin Ahmeti and Diego Calvanese and Axel Polleres and Vadim Savenkov}, year = 2016, month = jun, day={29--2}, Address = {Heraklion, Greece}, abstract = {The problem of updating ontologies has received increased attention in recent years. In the approaches proposed so far, either the update language is restricted to (sets of) atomic updates, or, where the full SPARQL update language is allowed, the TBox language is restricted so that no inconsistencies can arise. In this paper we discuss directions to overcome these limitations. Starting from a DL-Lite fragment covering RDFS and concept disjointness axioms, we define three semantics for SPARQL update: under cautious semantics, inconsistencies are resolved by rejecting updates potentially introducing conflicts; under brave semantics, instead, conflicts are overridden in favor of new information where possible; finally, the fainthearted semantics is a compromise between the former two approaches, designed to accommodate as much of the new information as possible, as long as consistency with the prior knowledge is not violated. We show how these semantics can be implemented in SPARQL via rewritings of polynomial size and draw first conclusions from their practical evaluation.}, series = LNCS, volume = 9678, pages = {387--404}, editor = {Harald Sack and Eva Blomqvist and Mathieu d'Aquin and Chiara Ghidini and Simone Paolo Ponzetto and Christoph Lange}, booktitle = {Proceedings of the 13th European Semantic Web Conference (ESWC2016)}, url = {http://polleres.net/publications/ahme-etal-2016ESWC.pdf}, Publisher = {Springer}, }
@inproceedings{borr-etal-2016XMLPrague, author = {Marta Borriello and Christian Dirschl and Axel Polleres and Phil Ritchie and Frank Salliau and Felix Sasaki and Giannis Stoitsis}, title = {From XML to RDF step by step: approaches for leveraging XML workflows with linked data}, year = 2016, month = feb, day = {11–-13}, address = {Prague, Czech Republic}, abstract = {There have been many discussions about benefits and drawbacks of XML vs. RDF. In practice more and more XML and linked data technologies are being used together. This leads to opportunities and uncertainties: for years companies have invested heavily in XML workflows. They are not willing to throw them away for the benefits of linked data. This paper aims to start a discussion on approaches for integrating XML and RDF workflows. This should help the incremental adoption of linked data, without the need to throw away XML tooling and to start content processing from scratch.}, booktitle = {XML Prague 2016 -- Conference Proceedings}, pages = {121--138}, isbn = {ISBN 978-80-906259-0-7}, url = {http://archive.xmlprague.cz/2016/files/xmlprague-2016-proceedings.pdf#133}, }
@inproceedings{caba-etal-2015SIMPDA, author = {Cristina Cabanillas and Jan Mendling and Axel Polleres and Alois Haselb{\"{o}}ck}, title = {Safety-critical Human- and Data-centric Process Management in Engineering Projects}, abstract = {Complex technical systems, industrial systems or infrastructure systems are rich of customizable features and raise high demands on quality and safety-critical aspects. The activities to create complete, valid and reliable planning and customization process data for a product deployment are part of an overarching engineering process that is crucial for the successful completion of a project and, particularly, for verifying compliance to existing regulations in a distributed, heterogeneous environment. In this paper, we discuss the challenges that process management needs to address in such complex engineering projects, and present an architecture that comprises the functionality required together with findings and results already obtained for its different components.}, booktitle = {Proceedings of the 5th International Symposium on Data-driven Process Discovery and Analysis {(SIMPDA} 2015)}, address = {Vienna, Austria}, month = Dec, day = {9--11}, pages = {145--148}, year = 2015, series = {CEUR Workshop Proceedings}, volume = 1527, Publisher = {CEUR-WS.org}, url = {http://ceur-ws.org/Vol-1527/paper13.pdf}, }
@article{OCG2015, Author = {Axel Polleres}, Journal = {OCG Journal}, Note = {Invited article (in German)}, Pages = {13--16}, Url = {http://www.ocg.at/sites/ocg.at/files/medien/pdfs/OCG-Journal1503.pdf#13}, Title = {{Das neue Berufsbild ``Data Scientist''}}, Abstract = {Nicht nur in der Informatik, sondern in immer mehr Disziplinen und Branchen spielen immer gr\"o{\ss}er werdende Datenmengen eine entscheidende Rolle. Das Verstehen von Daten bedeutet in vielen Geschäftsbereichen entscheidende Wettbewerbsvorteile und die technische Verarbeitbarkeit immer gr\"o{\ss}erer Datenmengen bietet neue M{\"o}glichkeiten, stellt aber auch neue Herausforderungen an Wirtschaft und Ausbildung. Der Beruf des ``Data Scientist'' wurde vor rund 3 Jahren aufgrund dieser Entwicklungen als ``Sexiest Job of the 21st Century'' tituliert, und schon tauchen Stimmen auf, warum er das schon wieder nicht mehr ist. Dies obgleich ``Data Scientist'' immer noch ein schwammiger Begriff ist und wir uns fragen m{\"u}ssen: Was ist/macht ein Data Scientist eigentlich? Welche Voraussetzungen muss jemand mitbringen wenn er/sie als Data Scientist arbeiten m{\"o}chte? In welche Richtung wird sich dieses Berufsbild in der Zukunft entwickeln? Gibt es spezielle Ausbildungen daf{\"u}r und wo? Auf diese Fragen versuchen wir im folgenden kurz im einzelnen einzugehen und sie -- zumindest aus der Sichtweise des Autors -- zu beantworten.}, Type = MAGAZINE, Volume = {03/2015}, Year = 2015 }
@inproceedings{havur-etal-2015BPI, author = {Giray Havur and Cristina Cabanillas and Axel Polleres and Jan Mendling}, title = {{Automated Resource Allocation in Business Processes with Answer Set Programming}}, booktitle = {11th International Workshop on Business Process Intelligence 2015}, month = aug, abstract = {Human resources are of central importance for executing and supervising business processes. An optimal resource allocation can dramatically improve undesirable consequences of resource shortages. However, existing approaches for resource allocation have some limitations, e.g., they do not consider concurrent process instances or loops in business processes, which may greatly alter resource requirements. This paper introduces a novel approach for automatically allocating resources to process activities in a time optimal way that is designed to tackle the aforementioned shortcomings. We achieve this by representing the resource allocation problem in Answer Set Programming (ASP), which allows us to model the problem in an extensible, modular, and thus maintainable way, and which is supported by various efficient solvers.}, day = 31, address = {Innsbruck, Austria}, url = {http://polleres.net/publications/havur-etal-2015BPI.pdf}, year = {2015} }
@inproceedings{bala-etal-2015BPM, author = {Saimir Bala and Cristina Cabanillas and Jan Mendling and Andreas Rogge{-}Solti and Axel Polleres}, title = {Mining Project-Oriented Business Processes}, booktitle = {13th International Conference on Business Process Management ({BPM} 2015)}, month = aug, pages = {425--440}, series = {Lecture Notes in Computer Science}, volume = {9253}, publisher = {Springer}, year = {2015}, abstract = {Large engineering processes need to be monitored in detail regarding when what was done in order to prove compliance with rules and regulations. A typical problem of these processes is the lack of control that a central process engine provides, such that it is difficult to track the actual course of work even if data is stored in version control systems (VCS). In this paper, we address this problem by defining a mining technique that helps to generate models that visualize the work history as GANTT charts. To this end, we formally define the notion of a project-oriented business process and a corresponding mining algorithm. Our evaluation based on a prototypical implementation demonstrates the benefits in comparison to existing process mining approaches for this specific class of processes.}, day = 31, address = {Innsbruck, Austria}, url = {http://polleres.net/publications/bala-etal-2015BPM.pdf}, }
@inproceedings{bisc-etal-2015ISWC, author = {Stefan Bischof and Christoph Martin and Axel Polleres and Patrik Schneider}, Booktitle = {Proceedings of the 14th International Semantic Web Conference (ISWC 2015) - Part II}, year = 2015, month = oct, day = {11--15}, address = {Bethlehem, Pennsylvania}, abstract = {Access to high quality and recent data is crucial both for decision makers in cities as well as for the public. Likewise, infrastructure providers could offer more tailored solutions to cities based on such data. However, even though there are many data sets containing relevant indicators about cities available as open data, it is cumbersome to integrate and analyze them, since the collection is still a manual process and the sources are not connected to each other upfront. Further, disjoint indicators and cities across the available data sources lead to a large proportion of missing values when integrating these sources. In this paper we present a platform for collecting, integrating, and enriching open data about cities in a reusable and comparable manner: we have integrated various open data sources and present approaches for predicting missing values, where we use standard regression methods in combination with principal component analysis (PCA) to improve quality and amount of predicted values. Since indicators and cities only have partial overlaps across data sets, we particularly focus on predicting indicator values across data sets, where we extend, adapt, and evaluate our prediction model for this particular purpose: as a ``side product'' we learn ontology mappings (simple equations and sub-properties) for pairs of indicators from different data sets. Finally, we republish the integrated and predicted values as linked open data.}, title = {Collecting, Integrating, Enriching and Republishing Open City Data as Linked Data}, url = {http://www.polleres.net/publications/bisc-etal-2015ISWC.pdf}, series = LNCS, volume = 9367, editor = {Marcelo Arenas and Oscar Corcho and Elena Simperl and Markus Strohmaier and Mathieu d'Aquin and Kavitha Srinivas and Paul T. Groth and Michel Dumontier and Jeff Heflin and Krishnaprasad Thirunarayan and Steffen Staab}, publisher = {Springer}, pages = {57-75} }
@inproceedings{belk-etal-2015ISWC, author = {Stefan Belk and Gerhard Wohlgenannt and Axel Polleres}, Booktitle = {ISWC 2015 Posters \& Demos}, Editor = {Jeff Z. Pan and Serena Villata }, Publisher = {CEUR-WS.org}, Volume = 1486, Series = {CEUR Workshop Proceedings}, year = 2015, month = oct, day = {13}, address = {Bethlehem, Pennsylvania}, abstract = {In the Semantic Web, the Web Ontology Language (OWL) vocabulary is used for the representation of formal ontologies, while the Simple Knowledge Organisation System (SKOS) is a vocabulary designed for thesauri or concept taxonomies without formal semantics. Despite their different nature, on the Web these two vocabularies are often used together. Here, we try to explore and exploit the joint usage of OWL and SKOS. More precisely, we first define usage patterns to detect prob- lematic modeling from connections between SKOS and OWL. Next, we also investigate if additional information can be inferred from joint usage with SKOS in order to enrich semantic inferences through OWL alone – although SKOS was designed without formal semantics, we argue for this heretic approach by applicability “in the wild”: the patterns for model- ing errors and inference of new information are transformed to SPARQL queries and applied to real world data from the Billion Triple Challenge 2014; we manually evaluate this corpus and assess the quality of the defined patterns empirically.}, title = {Exploring and Exploiting(?) the Awkward Connections Between {SKOS} and {OWL}}, Note = {Poster abstract}, url = {http://www.polleres.net/publications/belk-etal-2015ISWC_Poster.pdf}, }
@inproceedings{umbr-etal-2015OBD, author = {J\"urgen Umbrich and Sebastian Neumaier and Axel Polleres}, title = {Quality Assessment \& Evolution of Open Data portals}, abstract = {Despite the enthusiasm caused by the availability of a steadily increasing amount of openly available, structured data, first critical voices appear addressing the emerging issue of low quality in the meta data and data source of Open Data portals which is a serious risk that could disrupt the Open Data project. However, there exist no comprehensive reports about the actual quality of Open Data portals. In this work, we present our efforts to monitor and assess the quality of 82 active Open Data portals, powered by organisations across 35 different countries. We discuss our quality metrics and report comprehensive findings by analysing the data and the evolution of the portals since September 2014. Our results include findings about a steady growth of information, a high heterogeneity across the portals for various aspects and also insights on openness, contactability and the availability of meta data.}, year = 2015, day = {24--26}, month = aug, booktitle = {IEEE International Conference on Open and Big Data}, address = {Rome, Italy}, url = {http://www.polleres.net/publications/umbr-etal-2015OBD.pdf}, note = {\textbf{Best paper award}}, }
@inproceedings{stey-poll-2015RuleML, author = {Simon Steyskal and Axel Polleres}, title = {Towards Formal Semantics for {ODRL} Policies}, booktitle = {9th International Web Rule Symposium (RuleML2015)}, address = {Berlin, Germany}, month = aug, day = {2--5}, Publisher = {Springer}, Series = LNCS, number = 9202, pages = {360--375}, Type = CONF, url = {http://www.polleres.net/publications/stey-poll-2015RuleML.pdf}, doi = {https://doi.org/10.1007/978-3-319-21542-6_23}, year = 2015, abstract = {Most policy-based access control frameworks explicitly model whether execution of certain actions (read, write, etc.) on certain assets should be permitted or denied and usually assume that such actions are disjoint from each other, i.e. there does not exist any explicit or implicit dependency between actions of the domain. This in turn means, that conflicts among rules or policies can only occur if those contradictory rules or policies constrain the same action. In the present paper - motivated by the example of ODRL 2.1 as policy expression language - we follow a different approach and shed light on possible dependencies among actions of access control policies. We propose a interpretation of the formal semantics of general ODRL policy expressions and motivate rule-based reasoning over such policy expressions taking both explicit and implicit dependencies among actions into account. Our main contributions are (i) an exploration of different kinds of ambiguities that might emerge based on explicit or implicit dependencies among actions, and (ii) a formal interpretation of the semantics of general ODRL policies based on a defined abstract syntax for ODRL which shall eventually enable to perform rule-based reasoning over a set of such policies.}, }
@inproceedings{umbr-etal-2015diachron, author = {J\"urgen Umbrich and Nina Mrzelj and Axel Polleres}, title = {Towards capturing and preserving changes on the Web of Data}, booktitle = {Managing the Evolution and Preservation of the Data Web - First Diachron Workshop at ESWC 2015}, year = 2015, abstract = {Existing Web archives aim to capture and preserve the changes of documents on the Web and provide data corpora of high value which are used in various areas (e.g. to optimise algorithms or to study the Zeitgeist of a generation). So far, the Web archives concentrate their efforts to capture the large Web of documents with periodic snapshot crawls. Little focus is drawn to preserve the continuously growing Web of Data and actually keeping track of the real frequency of changes. In this work we present our efforts to capture and archive the changes on the Web of Data. We describe our infrastructure and focus on evaluating strategies to accurately capture the changes of data and to also estimate the crawl time for a given set of URLs with the aim to optimally schedule the revising of URLs with limited resources.}, address = {Portoro{\v{z}}, Slovenia}, month = may, day = 31, pages = {50--65}, url = {http://ceur-ws.org/Vol-1377/paper7.pdf}, }
@inproceedings{fern-etal-2015diachron, author = {Javier D. Fern{\'a}ndez and Axel Polleres and J{\"u}rgen Umbrich}, title = {Towards Efficient Archiving of Dynamic Linked Open Data}, pages = {34--49}, booktitle = {Managing the Evolution and Preservation of the Data Web - First Diachron Workshop at ESWC 2015}, year = 2015, abstract = {The Linked Data paradigm has enabled a huge shared infrastructure for connecting data from different domains which can be browsed and queried together as a huge knowledge base. However, structured interlinked datasets in this Web of data are not static but continuously evolving, which suggests the investigation of approaches to preserve Linked data across time. In this article, we survey and analyse current techniques addressing the problem of archiving different versions of semantic Web data, with a focus on their space efficiency, the retrieval functionality they serve, and the performance of such operations.}, address = {Portoro{\v{z}}, Slovenia}, month = may, day = 31, url = {http://ceur-ws.org/Vol-1377/paper6.pdf}, }
@inproceedings{poll-2015DL, author = {Axel Polleres}, booktitle = {28th International Workshop on Description Logics (DL2015)}, title = {Integrating Open Data: (How) Can Description Logics Help me?}, abstract = {In this talk, we will report on experiences and obstacles for collecting and integrating Open Data across various data sets. We wil discuss how both methods from knowledge representation and reasoning as well as from statistics and data mining can be used to tackle some issues we encountered.}, year = 2015, month = jun, day = {7--10}, url = {http://www.polleres.net/publications/poll-2015DL.pdf}, address = {Athens, Greece}, note = {Abstract/Invited Talk.} }
@inproceedings{ahme-etal-2015DL, author = {Albin Ahmeti and Diego Calvanese and Vadim Savenkov and Axel Polleres}, booktitle = {28th International Workshop on Description Logics (DL2015)}, title = {{Dealing with Inconsistencies due to Class Disjointness in SPARQL Update}}, abstract = {The problem of updating ontologies has received increased attention in recent years. In the approaches proposed so far, either the update language is restricted to (sets of) atomic updates, or, where the full SPARQL update language is allowed, the TBox language is restricted so that no inconsistencies can arise. In this paper we discuss directions to overcome these limitations. Starting from a DL-Lite fragment covering RDFS and concept/class disjointness axioms, we define two semantics for SPARQL update: under cautious semantics, inconsistencies are resolved by rejecting updates potentially introducing conflicts; under brave semantics, instead, conflicts are overridden in favor of new information where possible. The latter approach builds upon existing work on the evolution of DL-Lite knowledge bases, setting it in the context of generic SPARQL updates.}, year = 2015, month = jun, day = {7--10}, url = {http://www.polleres.net/publications/ahme-etal-2015DL.pdf}, address = {Athens, Greece}, }
@inproceedings{bisc-etal-2015DL, title = {Schema-Agnostic Query Rewriting for {OWL} {QL}}, booktitle = {28th International Workshop on Description Logics (DL2015)}, abstract = {In this extended abstract, we review our recent research on ontology-based query answering in OWL QL, first published at the International Semantic Web Conference 2014. OWL QL is a popular member of the DL-Lite family that is part of the W3C OWL 2 standard. Typical implementations use the OWL QL TBox to rewrite a conjunctive query into an equivalent set of queries, to be answered against the ABox of the ontology. With the adoption of the recent SPARQL 1.1 standard, however, RDF databases are capable of answering much more expressive queries directly, and we ask how this can be exploited in query rewriting. We find that SPARQL 1.1 is powerful enough to ``implement'' a full-fledged OWL QL reasoner in a single query. Using additional SPARQL 1.1 features, we develop a new method of schema-agnostic query rewriting, where arbitrary conjunctive queries over OWL QL are rewritten into equivalent SPARQL 1.1 queries in a way that is fully independent of the actual schema. This allows us to query RDF data under OWL QL entailment without extracting or preprocessing OWL.}, author = {Stefan Bischof and Markus Kr{\"o}tzsch and Axel Polleres and Sebastian Rudolph}, year = 2015, month = jun, day = {7--10}, url = {http://www.polleres.net/publications/bisc-etal-2015DL.pdf}, address = {Athens, Greece}, note = {Extended Abstract (full paper at ISWC2014).} }
@inproceedings{bisc-etal-2015KnowLOD, author = {Stefan Bischof and Christoph Martin and Axel Polleres and Patrik Schneider}, booktitle = {4th Workshop on Knowledge Discovery and Data Mining Meets Linked Open Data (Know@LOD)}, title = {{Open City Data Pipeline: Collecting, Integrating, and Predicting Open City Data}}, abstract = {Having access to high quality and recent data is crucial both for decision makers in cities as well as for informing the public; likewise, infrastructure providers could offer more tailored solutions to cities based on such data. However, even though there are many data sets containing relevant indicators about cities available as open data, it is cumbersome to integrate and analyze them, since the collection is still a manual process and the sources are not connected to each other upfront. Further, disjoint indicators and cities across the available data sources lead to a large proportion of missing values when integrating these sources. In the present paper we present a platform for collecting, integrating, and enriching open data about cities in a re-usable and comparable manner: we have integrated various open data sources and present approaches for predicting missing values: we use different standard regression methods in combination with principal component analysis to improve quality and amount of predicted values. Further, we re-publish the integrated and predicted values as linked open data.}, year = 2015, month = may, day = {31}, address = {Portoroz, Slovenia}, url = {http://www.polleres.net/publications/bisc-etal-2015KnowLOD.pdf} }
@inproceedings{umbr-etal-2015ODQ, author = {J\"urgen Umbrich and Sebastian Neumaier and Axel Polleres}, title = {Towards assessing the quality evolution of Open Data portals}, abstract = {In this work, we present the Open Data Portal Watch project, a public framework to continuously monitor and assess the (meta-)data quality in Open Data portals. We critically discuss the objectiveness of various quality metrics. Further, we report on early findings based on 22 weekly snapshots of 90 CKAN portals and highlight interesting observations and challenges.}, year=2015, booktitle = {ODQ2015: Open Data Quality: from Theory to Practice Workshop}, address= {Munich, Germany}, month = mar, url={http://polleres.net/publications/umbr-etal-2015ODQ.pdf} }
@article{umb-etal-2015SWJ, title = {Link Traversal Querying for a Diverse Web of Data}, abstract = {Traditional approaches for querying the Web of Data often involve centralised warehouses that replicate remote data. Conversely, Linked Data principles allow for answering queries live over the Web by dereferencing URIs to traverse remote data sources at runtime. A number of authors have looked at answering SPARQL queries in such a manner; these link-traversal based query execution (LTBQE) approaches for Linked Data offer up-to-date results and decentralised (i.e., client-side) execution, but must operate over incomplete dereferenceable knowledge available in remote documents, thus affecting response times and ``recall'' for query answers. In this paper, we study the recall and effectiveness of LTBQE, in practice, for the Web of Data. Furthermore, to integrate data from diverse sources, we propose lightweight reasoning extensions to help find additional answers. From the state-of-the-art which (1) considers only dereferenceable information and (2) follows rdfs:seeAlso links, we propose extensions to consider (3) owl:sameAs links and reasoning, and (4) lightweight RDFS reasoning. We then estimate the recall of link-traversal query techniques in practice: we analyse a large crawl of the Web of Data (the BTC’11 dataset), looking at the ratio of raw data contained in dereferenceable documents vs. the corpus as a whole and determining how much more raw data our extensions make available for query answering. We then stress-test LTBQE (and our extensions) in real-world settings using the FedBench and DBpedia SPARQL Benchmark frameworks, and propose a novel benchmark called QWalk based on random walks through diverse data. We show that link-traversal query approaches often work well in uncontrolled environments for simple queries, but need to retrieve an unfeasible number of sources for more complex queries. We also show that our reasoning extensions increase recall at the cost of slower execution, often increasing the rate at which results returned; conversely, we show that reasoning aggravates performance issues for complex queries.}, Journal = SWJ, author = {J\"urgen Umbrich and Aidan Hogan and Axel Polleres and Stefan Decker}, Volume = 6, number = 6, pages = {585--624}, Publisher = {IOS Press}, Type = JOURNAL, Url = {http://semantic-web-journal.org/content/link-traversal-querying-diverse-web-data-0}, year = 2015 }
@incollection{poll-14, author = {Axel Polleres}, title = {{SPARQL}}, booktitle = {Encyclopedia of Social Network Analysis and Mining}, year = {2014}, pages = {1960--1966}, doi = {10.1007/978-1-4614-6170-8_124}, publisher = {Springer}, editor = {Reda Alhajj and Jon G. Rokne} }
@incollection{poll-stey-2014, Abstract = {The World Wide Web Consortium (W3C) as the main standardization body for Web standards has set a particular focus on publishing and integrating Open Data. In this chapter, the authors explain various standards from the W3C's Semantic Web activity and the—potential—role they play in the context of Open Data: RDF, as a standard data format for publishing and consuming structured information on the Web; the Linked Data principles for interlinking RDF data published across the Web and leveraging a Web of Data; RDFS and OWL to describe vocabularies used in RDF and for describing mappings between such vocabularies. The authors conclude with a review of current deployments of these standards on the Web, particularly within public Open Data initiatives, and discuss potential risks and challenges.}, Author = {Axel Polleres and Simon Steyskal}, Booktitle = {Handbook of Research on Advanced ICT Integration for Governance and Policy Modeling}, Editor = {Miguel-Angel Sicilia and Pablo Serrano-Balazote}, Month = jun, pages = {28--47}, Publisher = {IGI Global}, Title = {Semantic Web Standards for Publishing and Integrating Open Data}, Type = BC, Url = {http://www.igi-global.com/chapter/semantic-web-standards-for-publishing-and-integrating-open-data/116654}, Year = 2014,}
@inproceedings{dell-etal-2014iswc-dev, title = {Querying the Web of Data with {XSPARQL} 1.1}, booktitle = {ISWC2014 Developers Workshop}, author = {Daniele Dell'Aglio and Axel Polleres and Nuno Lopes and Stefan Bischof}, abstract = {On the Web and in corporate environments there exists a lot of XML data in various formats. XQuery and XSLT serve as query and transformation languages for XML. But as RDF also becomes a mainstream format for Web of data, transformations languages between these formats are required. XSPARQL is a hybrid language that provides an integration framework for XML, RDF, but also JSON and relational data by partially combining several languages such as XQuery, SPARQL 1.1 and SQL. In this paper we present the latest open source release of the XSPARQL engine, which is based on standard software components (Jena and Saxon) and outline possible applications of XSPARQL 1.1 to address Web data integration use cases.}, url = {http://www.polleres.net/publications/dell-etal-2014iswc-dev.pdf}, Publisher = {CEUR-WS.org}, Volume = 1268, Series = {CEUR Workshop Proceedings}, year = 2014, month = oct, }
@inproceedings{ahme-etal-2014iswc, title = {Updating {RDFS} {ABoxes} and {TBoxes} in {SPARQL}}, abstract = {Updates in RDF stores have recently been standardised in the SPARQL 1.1 Update specification. However, computing answers entailed by ontologies in triple stores is usually treated orthogonal to updates. Even the W3C's recent SPARQL 1.1 Update language and SPARQL 1.1 Entailment Regimes specifications explicitly exclude a standard behaviour how SPARQL endpoints should treat entailment regimes other than simple entailment in the context of updates. In this paper, we outline different routes to close this gap. We define a fragment of SPARQL basic graph patterns corresponding to (the RDFS fragment of) DL-Lite and the corresponding SPARQL update language, dealing with updates both of ABox and of TBox statements. We discuss possible semantics along with potential strategies for implementing them. We treat both, (i) materialised RDF stores, which store all entailed triples explicitly, and (ii) reduced RDF Stores, that is, redundancy-free RDF stores that do not store any RDF triples (corresponding to DL-Lite ABox statements) entailed by others already.}, author = {Albin Ahmeti and Diego Calvanese and Axel Polleres}, Booktitle = {Proceedings of the 13th International Semantic Web Conference (ISWC 2014)}, Day = {19--23}, Month = oct, Publisher = {Springer}, Series = LNCS, Type = CONF, url = {http://www.polleres.net/publications/ahme-etal-2014iswc.pdf}, Year = 2014, }
@inproceedings{buil-etal-2014iswc, title = {Strategies for Executing Federated Queries in {SPARQL1.1}}, abstract = {A common way for exposing RDF data on the Web is by means of SPARQL endpoints, i.e., Web services that implement the SPARQL protocol and allow end users and applications to query just the RDF data they want. However, servers hosting SPARQL endpoints typically restrict the access to the data by limiting the amount of results returned by user queries or the amount of queries per time and client that may be issued. For addressing these problems we analysed different strategies that shall allow to obtain complete query results for federated queries using SPARQL1.1's federated query extension by rewriting the original query. We show that some seemingly intuitive ``recipes'' for decomposing federated queries to circumvent server limitations provide unsound results in the general case, and provide fixes or discuss under which restrictions these recipes are still applicable. Finally, we evaluate the different proposed strategies in order to check their feasibility in practice.}, author = {Carlos Buil-Aranda and Axel Polleres and J{\"u}rgen Umbrich}, Booktitle = {Proceedings of the 13th International Semantic Web Conference (ISWC 2014)}, Day = {19--23}, Month = oct, Publisher = {Springer}, Series = LNCS, Type = CONF, url = {http://www.polleres.net/publications/buil-etal-2014iswc.pdf}, Year = 2014, }
@inproceedings{bisc-etal-2014iswc, title = {Schema-Agnostic Query Rewriting in {SPARQL 1.1}}, abstract = {SPARQL 1.1 supports the use of ontologies to enrich query results with logical entailments, and OWL 2 provides a dedicated fragment OWL QL for this purpose. Typical implementations use the OWL QL schema to rewrite a conjunctive query into an equivalent set of queries, to be answered against the non-schema part of the data. With the adoption of the recent SPARQL 1.1 standard, however, RDF databases are capable of answering much more expressive queries directly, and we ask how this can be exploited in query rewriting. We find that SPARQL 1.1 is powerful enough to ``implement'' a full-fledged OWL QL reasoner in a single query. Using additional SPARQL 1.1 features, we develop a new method of schema-agnostic query rewriting, where arbitrary conjunctive queries over OWL QL are rewritten into equivalent SPARQL 1.1 queries in a way that is fully independent of the actual schema. This allows us to query RDF data under OWL QL entailment without extracting or preprocessing OWL axioms.}, author = {Stefan Bischof and Markus Kr{\"o}tzsch and Axel Polleres and Sebastian Rudolph}, Booktitle = {Proceedings of the 13th International Semantic Web Conference (ISWC 2014)}, Day = {19--23}, Month = oct, Publisher = {Springer}, Series = LNCS, Type = CONF, url = {http://www.polleres.net/publications/bisc-etal-2014iswc.pdf}, Year = 2014, }
@inproceedings{stey-poll-2014SEMANTiCS, author = {Simon Steyskal and Axel Polleres}, title = {Defining Expressive Access Policies for Linked Data using the {ODRL} Ontology 2.0}, Booktitle = {Proceedings of the SEMANTiCS 2014}, abstract = {Together with the latest efforts in publishing Linked (Open) Data, legal issues around publishing and consuming such data are gaining increased interest. Particular areas of interest include (i) how to define more expressive access policies which go beyond common licenses, (ii) how to introduce pricing models for online datasets (for non-open data) and (iii) how to realize (i)+(ii) while providing descriptions of respective meta data that is both human readable and machine processable. In this paper, we show based on different examples that the Open Digital Rights Language (ODRL) Ontology 2.0 is able to address all previous mentioned issues, i.e. is suitable to express a large variety of different access policies for Linked Data. By defining policies as ODRL in RDF we aim for (i) higher flexibility and simplicity in usage, (ii) machine/human readability and (iii) fine-grained policy expressions for Linked (Open) Data.}, Note = {Short paper}, publisher = {ACM}, Type = CONF, series = {ACM International Conference Proceedings Series}, year = 2014, month = sep, day = {3--5}, address = {Leipzig, Germany}, url = {http://www.polleres.net/publications/stey-poll-2014SEMANTiCS.pdf}, }
@inproceedings{sche-etal-2014ConfigWS, author = {Gottfried Schenner and Stefan Bischof and Axel Polleres and Simon Steyskal}, title = {Integrating Distributed Configurations with {RDFS} and {SPARQL}}, year = 2014, abstract = {Large interconnected technical systems (e.g. railway networks, power grids, computer networks) are often configured with the help of multiple configurators, which store their configurations in separate databases based on heterogenous domain models (ontologies). When users want to ask queries over several distributed configurations, these domain models need to be aligned. To this end, standard mechanisms for ontology and data integration are required that enable combining query answering with reasoning about these distributed configurations. In this paper we describe our experience with using standard Semantic Web technologies (RDFS and SPARQL) in such a context.}, booktitle = {16th International Configuration Workshop}, address = {Novi Sad, Serbia}, url = {http://www.polleres.net/publications/sche-etal-2014ConfigWS.pdf}, month = sep, day = {25--26}, }
@incollection{hitz-etal-2014hhl_logics_semantic_web, author = {Hitzler, Pascal and Lehmann, Jens and Polleres, Axel}, booktitle = {Computational Logic}, editor = {Dov M. Gabbay and J\"org H. Siekmann and John Woods}, publisher = {Elesevier}, series = {Handbook of the History of Logic}, title = {Logics for the Semantic Web}, abstract = {This chapter summarizes the developments of Semantic Web standards such as RDF, OWL, RIF and SPARQL and their foundations in Logics. It aims at providing an entry point particularly for logicians to these standards.}, url = {http://jens-lehmann.org/files/2014/hhl_logics_semantic_web.pdf}, volume = 9, year = 2014, pages = {679--710}, }
@inproceedings{ahme-etal-2014DL, author = {Albin Ahmeti and Diego Calvanese and Axel Polleres}, booktitle = {27th International Workshop on Description Logics (DL2014)}, title = {{SPARQL Update for Materialized Triple Stores under DL-Lite$_{RDFS}$ Entailment}}, abstract = {Updates in RDF stores have recently been standardised in the SPARQL 1.1 Update specification. However, computing answers entailed by ontologies in triple stores is usually treated orthogonally to updates. Even W3C's SPARQL 1.1 Update language and SPARQL 1.1 Entailment Regimes specifications explicitly exclude a standard behaviour for entailment regimes other than simple entailment in the context of updates. In this paper, we take a first step to close this gap. We define a fragment of SPARQL basic graph patterns corresponding to (the RDFS fragment of) DL-Lite and the corresponding SPARQL update language, dealing with updates both of ABox and of TBox statements. We discuss possible semantics along with potential strategies for implementing them. Particularly, we treat materialised RDF stores, which store all entailed triples explicitly, and preservation of materialisation upon ABox and TBox updates.}, year = 2014, month = jul, day = {17--20}, address = {Vienna, Austria}, url = {http://ceur-ws.org/Vol-1193/paper_7.pdf}, }
@inproceedings{poll-buil-2014AMW, Abstract = {The most common way for exposing RDF data on the Web is by means of SPARQL endpoints. These endpoints are Web services that implement the SPARQL protocol and then allow end users and applications to query just the RDF data they want. However, the servers hosting the SPARQL endpoints restrict the access to the data by limiting the amount of results returned by user queries or the amount of queries per time and client that can be issued. For addressing these problems we analysed different alternatives that shall allow to obtain complete query result sets from the SPARQL endpoints by rewriting the original query using SPARQL1.1's federated query extension. We show that some of the commonly used SPARQL query patterns for this task provide unsound results while other patterns are more suitable. We provide equivalent query patterns that help users in obtaining complete result sets circumventing the limitations imposed by servers.}, Author = {Carlos Buil-Aranda and Axel Polleres}, Booktitle = {Alberto Mendelzon International Workshop on Foundations of Data Management (AMW2014)}, Month = jun, Day = {4--6}, Year = 2014, volume = 1189, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, editor = {Georg Gottlob and Jorge P{\'e}rez}}
@article{hoga-etal-2014JWS, title = {Everything You Always Wanted to Know About Blank Nodes}, author = {Aidan Hogan and Marcelo Arenas and Alejandro Mallea and Axel Polleres}, abstract = {In this paper we thoroughly cover the issue of blank nodes, which have been defined in RDF as `existential variables'. We first introduce the theoretical precedent for existential blank nodes from first order logic and incomplete information in database theory. We then cover the different (and sometimes incompatible) treatment of blank nodes across the W3C stack of RDF-related standards. We present an empirical survey of the blank nodes present in a large sample of RDF data published on the Web (the \btc dataset), where we find that 25.7\% of unique RDF terms are blank nodes, that 44.9\% of documents and 66.2\% of domains featured use of at least one blank node, and that aside from one Linked Data domain whose RDF data contains many ``blank node cycles'', the vast majority of blank nodes form tree structures that are efficient to compute simple entailment over. With respect to the RDF-merge of the full data, we show that 6.1\% of blank-nodes are redundant under simple entailment. The vast majority of non-lean cases are isomorphisms resulting from multiple blank nodes with no discriminating information being given within an RDF document or documents being duplicated in multiple Web locations. Although simple entailment is NP-complete and leanness-checking is coNP-complete, in computing this latter result, we demonstrate that in practice, real-world RDF graphs are sufficiently ``rich'' in ground information for problematic cases to be avoided by non-naive algorithms.}, journal = JWS, year = 2014, url = {http://www.websemanticsjournal.org/index.php/ps/article/view/365}, Volume = {27}, pages = {42--69}, }
@incollection{umbr-etal-2014, author = {Umbrich, J\"urgen and Karnstedt, Marcel and Polleres, Axel and Sattler, Kai-Uwe}, title = {Index-Based Source Selection and Optimization}, editor = {Hose, Katja and Schenk, Ralf and Harth, Andreas}, booktitle = {{L}inked {D}ata {M}anagement}, year = 2014, pages = {311--337}, month = may, publisher = {Chapman and Hall/CRC}, Type = BC, url = {https://www.amazon.com/Linked-Management-Emerging-Directions-Database/dp/1466582405} }
@inproceedings{poll-2013Dagstuhl13252, Abstract = {It is probably a good moment to take a step back to critically reflect on which puzzle pieces might be missing to achieve (even more) widespread adoption of Linked Data. Particularly, it seems that more than a few publishing principles and community enthusiasm is necessary to keep the idea of a Web-scale data ecosystem afloat. We outline some challenges and missing building blocks to complement the available standards for Linked Data towards a fully functioning ecosystem}, author = {Axel Polleres}, url = {http://drops.dagstuhl.de/opus/volltexte/2013/4259/}, title = {Building Blocks for a Linked Data Ecosystem}, year = {2013}, booktitle = {Report from Dagstuhl Seminar 13252 Interoperation in Complex Information Ecosystems}, pages = {116-118}, note = {Position Statement}, Publisher = {Schloss Dagstuhl - Leibniz-Zentrum fuer Informatik}, day = {16-19}, month = jun, editor = {Andreas Harth and Craig A. Knoblock and Kai-Uwe Sattler and Rudi Studer} }
@inproceedings{stey-poll-2013OM, Abstract = {We present a novel architecture for combining off-the-shelf ontology matchers based on iterative calls and exchanging information in the form of reference alignments. Unfortunately though, only a few of the matchers contesting in the past years' OAEI campaigns actually allow the provision of reference alignments in the standard OAEI alignment format to support such a combined matching process. We bypass this lacking functionality by using simple URI replacement to ``emulate'' reference alignments in the aligned ontologies. While some matchers still consider classes and proper- ties in ontologies aligned in such fashion as different, we experimentally prove that our iterative approach benefits from this emulation, achieving the best results in terms of F-measure on parts of the OAEI benchmark suite, compared to the single results of the competing matchers as well as their combined results. The new combined matcher -- Mix'n'Match -- integrates different matchers in a multi-threaded architecture and pro- vides an anytime behavior in the sense that it can be stopped anytime with the best combined matchings found so far}, Address = {Sydney, Australia}, Author = {Simon Steyskal and Axel Polleres}, Booktitle = {8th International Workshop on Ontology Matching}, Month = oct, day = 21, Publisher = {CEUR-WS.org}, Volume = 111, Series = {CEUR Workshop Proceedings}, Title = {Mix'n'Match: Iteratively Combining Ontology Matchers in an Anytime Fashion}, Type = ws, url={http://ceur-ws.org/Vol-1111/om2013_poster3.pdf}, Year = 2013, note = {Extended version avaiable at: \url{http://www.steyskal.info/om2013/extendedversion.pdf}} }
@inproceedings{ahme-poll-2013OrdRing, Abstract = {Processing the dynamic evolution of RDF stores has recently been standardized in the SPARQL 1.1 Update specification. However, computing answers entailed by ontologies in triple stores is usually treated orthogonal to updates. Even the W3C's recent SPARQL 1.1 Update language and SPARQL 1.1 Entailment Regimes specifications explicitly exclude a standard behavior how SPARQL endpoints should treat entailment regimes other than simple entailment in the context of updates. In this paper, we take a first step to close this gap, by drawing from query rewriting techniques explored in the context of DL-Lite. We define a fragment of SPARQL basic graph patterns corresponding to (the RDFS fragment of) DL-Lite and the corresponding SPARQL Update language discussing possible semantics along with potential strategies for implementing them. We treat both (i) reduced RDF Stores, that is, redundancy-free RDF stores that do not store any RDF triples (corresponding to DL Lite ABox statements) entailed by others already, and (ii) materialized RDF stores, which store all entailed triples explicitly.}, Address = {Sydney, Australia}, Author = {Albin Ahmeti and Axel Polleres}, Booktitle = {2nd International Workshop on Ordering and Reasoning (OrdRing 2013)}, Month = oct, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Title = {{SPARQL} Update under {RDFS} Entailment in Fully Materialized and Redundancy-Free Triple Stores}, Type = ws, Url = {http://www.polleres.net/publications/ahme-poll-2013OrdRing.pdf}, Year = 2013, Bdsk-Url-1 = {http://www.polleres.net/publications/ahme-poll-2013OrdRing.pdf}}
@inproceedings{guer-etal-2013OrdRing, Abstract = {The volume and diversity of data that is queriable via SPARQL and its increasing integration motivate the desire to query SPARQL information sources via the specification of preferred query outcomes. Such preference-based queries support the ordering of query outcomes with respect to a user's measure of the quality of the response. In this position paper we argue for the incorporation of preference queries into SPARQL. We propose an extension to the SPARQL query language that supports the specification of qualitative and quantitative preferences over query outcomes and examine the realization of the resulting preference-based queries via off-the-shelf SPARQL engines.}, Address = {Sydney, Australia}, Author = {Marina Gueroussova and Axel Polleres and Sheila A. McIlraith}, Editor = {Emanuele Della Valle and Markus Kr{\"o}tzsch and Stefan Schlobach and Irene Celino}, Booktitle = {2nd International Workshop on Ordering and Reasoning (OrdRing 2013)}, Month = oct, Note = {Position Paper. Technical Report version available at: \url{ftp://ftp.cs.toronto.edu/csrg-technical-reports/619/619.pdf}}, Title = {{SPARQL} with Qualitative and Quantitative Preferences}, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Type = ws, Url = {http://www.polleres.net/publications/guer-etal-2013OrdRing.pdf}, Year = 2013, Bdsk-Url-1 = {http://www.polleres.net/publications/guer-etal-2013OrdRing.pdf}}
@inproceedings{bisc-etal-2013ISEM, Abstract = {Some cities publish data in an open form. But even more cities can profit from the data that is already available as open or linked data. Unfortunately open data of different sources is usually given also in different heterogeneous data formats. With the City Data Pipeline we aim to integrate data about cities in a common data model by using Semantic Web technologies. Eventually we want to support city officials with their decisions by providing automated analytics support.}, Address = {Graz, Austria}, Author = {Stefan Bischof and Axel Polleres and Simon Sperl}, Booktitle = {Proceedings of the I-SEMANTICS 2013 Posters {\&} Demonstrations Track}, Day = {4--6}, Editor = {Steffen Lohmann}, Month = Sep, Pages = {45-49}, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Title = {City Data Pipeline - A System for Making Open Data Useful for Cities}, Url = {http://ceur-ws.org/Vol-1026/paper10.pdf}, Volume = {1026}, Year = {2013}, Bdsk-Url-1 = {http://ceur-ws.org/Vol-1026/paper10.pdf}}
@inproceedings{stey-poll-2013ODBASE, Abstract = {The existence of a standardized ontology alignment format promoted by the Ontology Alignment Evaluation Initiative (OAEI) potentially enables different ontology matchers to be combined and used together. Along these lines, we present a novel architecture for combining ontology matchers based on iterative calls of off-the-shelf matchers that exchange information in the form of reference mappings in this standard alignment format. However, we argue that only a few of the matchers contesting in the past years' OAEI campaigns actually allow the provision of reference alignments to support the matching process. We bypass this lacking functionality by introducing an alternative approach for aligning results of different ontology matchers using simple URI replacement in the aligned ontologies. We experimentally prove that our iterative approach benefits from this emulation of reference alignments.}, Address = {Graz, Austria}, Author = {Simon Steyskal and Axel Polleres}, Booktitle = {12th International Conference on Ontologies, DataBases, and Applications of Semantics (ODBASE 2013)}, Day = {10--11}, Month = Sep, url = {http://link.springer.com/chapter/10.1007%2F978-3-642-41030-7_40}, Note = {Short paper.}, Publisher = {Springer}, Series = LNCS, Title = {Mix'n'Match: An Alternative Approach for Combining Ontology Matchers}, Type = CONF, Year = 2013}
@incollection{poll-etal-RW2013, Abstract = {Linked Data promises that a large portion of Web Data will be usable as one big interlinked RDF database against which structured queries can be answered. In this lecture we will show how reasoning -- using RDF Schema (RDFS) and the Web Ontology Language (OWL) -- can help to obtain more complete answers for such queries over Linked Data. We first look at the extent to which RDFS and OWL features are being adopted on the Web. We then introduce two high-level architectures for query answering over Linked Data and outline how these can be enriched by (lightweight) RDFS and OWL reasoning, enumerating the main challenges faced and discussing reasoning methods that make practical and theoretical trade-offs to address these challenges. In the end, we also ask whether or not RDFS and OWL are enough and discuss numeric reasoning methods that are beyond the scope of these standards but that are often important when integrating Linked Data from several, heterogeneous sources.}, Address = {Mannheim, Germany}, Author = {Axel Polleres and Aidan Hogan and Renaud Delbru and J\"urgen Umbrich}, Booktitle = {Reasoning Web. Semantic Technologies for Intelligent Data Access ({Reasoning Web 2013})}, Day = {29--02}, Editor = {Sebastian Rudolph and Georg Gottlob and Ian Horrocks and Frank van Harmelen}, Month = JUL, Pages = {91--149}, Publisher = {Springer}, Series = LNCS, Title = {{RDFS} \& {OWL} Reasoning for Linked Data}, Type = BC, Url = {http://www.polleres.net/publications/poll-etal-RW2013.pdf}, doi = {https://doi.org/10.1007/978-3-642-39784-4_2}, Volume = 8067, Year = 2013, Bdsk-Url-1 = {http://www.polleres.net/publications/poll-etal-RW2013.pdf}}
@inproceedings{poll-etal-LPNMR2013, Abstract = {When deploying Answer Set Programming (ASP)in an industrial context, for instance for (re-)configuration (Friedrich et al., 2011), knowledge engineers need debugging support on non-ground programs. Current approaches to ASP debugging, however, do not cover extended modeling features of ASP, such as choice rules, conditional cardinality and weight constraints. To this end, we encode non-ground ASP programs using extended modeling features into normal logic progams; this encoding extends existing encodings for the case of ground programs to the non-ground case. We subsequently deploy this translation in order to extend ASP debugging for non-ground normal logic programs. We have implemented and tested the approach and provide evaluation results.}, Address = {Corunna, Spain}, Author = {Axel Polleres and Melanie Fr\"uhst\"uck and Gottfried Schenner and Gerhard Friedrich}, Booktitle = {{Proceedings of the 12th International Conference on Logic Programming and Nonmonotonic Reasoning (LPNMR-2013)}}, Date-Modified = {2013-09-20 15:58:24 +0000}, Day = {15--19}, Editor = {Pedro Cabalar and Tran Cao Son}, Month = sep, Pages = {452--464}, Publisher = {Springer}, Read = {0}, Series = LNCS, Title = {Debugging non-ground {ASP} programs with Choice Rules, Cardinality Constraints and Weight Constraints}, Url = {http://www.polleres.net/publications/poll-etal-LPNMR2013.pdf}, Volume = 8148, Year = 2013, Bdsk-Url-1 = {http://www.polleres.net/publications/poll-etal-LPNMR2013.pdf}}
@article{poll-wall-2013JANCL, Abstract = {In the context of the emerging Semantic Web and the quest for a common logical framework underpinning its architecture, the relation of rule-based languages such as Answer Set Programming (ASP) and ontology languages such as OWL has attracted a lot of attention in the literature over the past years. With its roots in Deductive Databases and Datalog though, ASP shares much more commonality with another Semantic Web standard, namely the query language SPARQL. In this paper, we take the forthcoming approval of the SPARQL1.1 standard by the World Wide Web consortium (W3C) as an opportunity to introduce this standard to the Logic Programming community by providing a translation of SPARQL1.1 into ASP. In this translation, we explain and highlight peculiarities of the new W3C standard. Along the way, we survey existing literature on foundations of SPARQL and SPARQL1.1, and also combinations of SPARQL with ontology and rules languages. Thereby, apart from providing means to implement and support SPARQL natively within Logic Programming engines and particularly ASP engines, we hope to pave the way for further research on a common logical framework for Semantic Web languages, including query languages, from an ASP point of view.}, Author = {Axel Polleres and Johannes Wallner}, Editor = {Pedro Cabalar and Agust\'in Valverde}, Journal = {Journal of Applied Non-Classical Logics (JANCL)}, Note = {Special issue on Equilibrium Logic and Answer Set Programming}, Number = {1--2}, Pages = {159--212}, Publisher = {Taylor \& Francis}, Title = {On the relation between {SPARQL1.1} and Answer Set Programming}, doi = {10.1080/11663081.2013.798992}, Volume = {23}, Year = 2013, Bdsk-Url-1 = {http://www.tandfonline.com/doi/abs/10.1080/11663081.2013.798992}}
@inproceedings{bisc-poll-2013ESWC, Abstract = {In addition to taxonomic knowledge about concepts and properties typically expressible in languages such as RDFS and OWL, implicit information in an RDF graph may be likewise determined by arithmetic equations. The main use case here is exploiting knowledge about functional dependencies among numerical attributes expressible by means of such equations. While some of this knowledge can be encoded in rule extensions to ontology languages, we provide an arguably more flexible framework that treats attribute equations as first class citizens in the ontology language. The combination of ontological reasoning and attribute equations is realized by extending query rewriting techniques already successfully applied for ontology languages such as (the DL-Lite-fragment of) RDFS or OWL, respectively. We deploy this technique for rewriting SPARQL queries and discuss the feasibility of alternative implementations, such as rule-based approaches. }, Address = {Montpellier, France}, Author = {Stefan Bischof and Axel Polleres}, Booktitle = {The Semantic Web: Semantics and Big Data -- Proceedings of the 10th ESWC (ESWC2013)}, Day = {26--30}, Editor = {Philipp Cimiano and Oscar Corcho and Valentina Presutti and Laura Hollink and Sebastian Rudolph}, Month = may, Pages = {335--350}, Publisher = {Springer}, Series = LNCS, Title = {{RDFS} with Attribute Equations via {SPARQL} Rewriting}, Url = {http://www.polleres.net/publications/bisc-etal-2013ESWC.pdf}, Volume = 7882, Year = 2013, Bdsk-Url-1 = {http://www.polleres.net/publications/bisc-etal-2013ESWC.pdf}}
@incollection{poll-2013-agreement-technologies, Abstract = {In this chapter we discuss the relationship between Agreement Technologies and the Semantic Web, especially focusing on how Semantic Web standards play a role in the Agreement Technologies stack, but also issues related to Linked Data and the Web of Data. We start the chapter with an account of Semantic Web standards. Then the scientific foundations of Semantic Web standards are discussed. Finally, we relate the work on semantic technologies to other fields of Agreement Technologies, from the point of view of Semantic Web standards.}, Author = {Axel Polleres}, Booktitle = {Agreement Technologies}, Editor = {Sascha Ossowski}, Month = jan, Pages = {57--68}, Publisher = {Springer}, Series = {Law, Governance and Technology Series}, Title = {Agreement Technologies and the Semantic Web}, Type = BC, Url = {http://link.springer.com/chapter/10.1007/978-94-007-5583-3_4}, Volume = 8, Year = 2013, Bdsk-Url-1 = {http://link.springer.com/chapter/10.1007/978-94-007-5583-3_4}}
@incollection{saha-etal-2013, Abstract = {Semantic interoperability facilitates Health Care and Life Sciences (HCLS) systems in connecting stakeholders (e.g., patients, physicians, pharmacies) at various levels as well as ensures seamless use of healthcare resources (e.g., data, schema, applications). Their scope ranges from local (within, e.g., hospitals or hospital networks) to regional, national and cross-border. The use of semantics in delivering interoperable solutions for HCLS systems is weakened by fact that an Ontology Based Information System (OBIS) has restrictions in modeling, aggregating, and interpreting global knowledge (e.g., terminologies for disease, drug, clinical event) in conjunction with local information (e.g., policy, profiles). This chapter presents an example-scenario that shows such limitations and recognizes that enabling two key features, namely the type and scope of knowledge, within a knowledge base could enhance the overall effectiveness of an OBIS. We provide the idea of separating knowledge bases in types (e.g., general or constraint knowledge) with scope (e.g., global or local) of applicability. Then, we propose two concrete solutions on this general notion. Finally, we describe open research issues that may be of interest to knowledge system developers and broader research community.}, Author = {Ratnesh Sahay and Antoine Zimmermann and Ronan Fox and Axel Polleres and Manfred Hauswirth}, Booktitle = {Interoperability in Healthcare Information Systems: Standards, Management, and Technology}, Editor = {Miguel-Angel Sicilia and Pablo Serrano-Balazote}, Month = feb, Publisher = {IGI Global}, Title = {A Formal Investigation of Semantic Interoperability of {HCLS} Systems}, Type = BC, Url = {http://www.igi-global.com/book/interoperability-healthcare-information-systems/70787}, Year = 2013, Bdsk-Url-1 = {http://www.igi-global.com/book/interoperability-healthcare-information-systems/70787}}
@article{fern-etal-2013-HDT-JWS, Abstract = {The current Web of Data is producing increasingly large RDF data sets. Massive publication efforts of RDF data driven by initiatives like the Linked Open Data movement, and the need to exchange large data sets has unveiled the drawbacks of traditional RDF representations, inspired and designed by a document-centric and human-readable Web. Among the main problems are high levels of verbosity/redundancy and weak machine-processable capabilities in the description of these data sets. This scenario calls for efficient formats for publication and exchange. This article presents a binary RDF representation addressing these issues. Based on a set of metrics that characterizes the skewed structure of real-world RDF data, we develop a proposal of an RDF representation that modularly partitions and efficiently represents three components of RDF data sets: Header information, a Dictionary, and the actual Triples structure (thus called HDT). Our experimental evaluation shows that data sets in HDT format can be compacted by more than fifteen times as compared to current naive representations, improving both parsing and processing while keeping a consistent pub- lication scheme. Specific compression techniques over HDT further improve these compression rates and prove to outperform existing compression solutions for efficient RDF exchange.}, Author = {Javier D. Fern{\'a}ndez and Miguel A. Mart{\i}nez-Prieto and Claudio Guti{\'e}rrez and Axel Polleres and Mario Arias}, Journal = JWS, Number = 2, Publisher = {Elsevier}, Title = {{Binary RDF Representation for Publication and Exchange (HDT)}}, Type = JOURNAL, Url = {http://www.polleres.net/publications/fern-etal-2013-HDT-JWS.pdf}, doi = {https://dl.acm.org/doi/10.1016/j.websem.2013.01.002}, Volume = 19, Year = 2013, Bdsk-Url-1 = {http://www.websemanticsjournal.org/index.php/ps/article/view/328}}
@article{buil-etal-2013-fedsparql-JWS, Abstract = {Given the sustained growth that we are experiencing in the number of SPARQL endpoints available, the need to be able to send federated SPARQL queries across these has also grown. To address this use case, the W3C SPARQL working group is defining a federation extension for SPARQL 1.1 which allows for combining graph patterns that can be evaluated over several endpoints within a single query. In this paper, we describe the syntax of that extension and formalize its semantics. Additionally, we describe how a query evaluation system can be implemented for that federation extension, describing some static optimization techniques and reusing a query engine used for data-intensive science, so as to deal with large amounts of intermediate and final results. Finally we carry out a series of experiments that show that our optimizations speed up the federated query evaluation process.}, Author = {Carlos Buil-Aranda and Marcelo Arenas and Oscar Corcho and Axel Polleres}, Journal = JWS, Number = 1, Publisher = {Elsevier}, Title = {Federating Queries in {SPARQL1.1}: Syntax, Semantics and Evaluation}, Type = JOURNAL, Url = {http://www.websemanticsjournal.org/index.php/ps/article/view/321}, doi = {10.2139/ssrn.3198993}, Volume = 18, Year = 2013, Bdsk-Url-1 = {http://www.websemanticsjournal.org/index.php/ps/article/view/321}}
@article{bois-etal-KER-2013, Abstract = {This paper integrates the responses to a set of questions from a distinguished set of panelists involved in a discussion at the Agreement Technologies workshop in Cyprus in December 2009. The panel was concerned with the relationship between the research areas of semantics, norms, and organizations, and the ways in which each may contribute to the development of the others in support of next generation agreement technologies.}, Author = {Olivier Boissier and Marco Colombetti and Michael Luck and John-Jules Meyer and Axel Polleres}, Journal = {The Knowledge Engineering Review}, Month = mar, Number = 1, Pages = {107--116}, Publisher = {Cambridge University Press}, Title = {Norms, Organizations, and Semantics}, doi = {10.1017/S0269888912000367}, Volume = 28, Year = 2013, }
@article{pich-etal-2013SWJ, Abstract = {Based on practical observations on rule-based inference on RDF data, we study the problem of redundancy elimination on RDF graphs in the presence of rules (in the form of Datalog rules) and constraints, (in the form of so-called tuple-generating dependencies), and with respect to queries (ranging from conjunctive queries up to more complex ones, particularly covering features of SPARQL, such as union, negation, or filters). To this end, we investigate the influence of several problem parameters (like restrictions on the size of the rules, the constraints, and/or the queries) on the complexity of detecting redundancy. The main result of this paper is a fine-grained complexity analysis of both graph and rule minimisation in various settings.}, Author = {Reinhard Pichler and Axel Polleres and Sebastian Skritek and Stefan Woltran}, Journal = SWJ, Number = 4, Publisher = {IOS Press}, Title = {Complexity of redundancy detection on RDF graphs in the presence of rules, constraints, and queries}, Type = JOURNAL, Url = {http://www.semantic-web-journal.net/sites/default/files/swj229_0.pdf}, doi = {http://dx.doi.org/10.3233/SW-2012-0076}, Volume = 4, Year = 2013, Bdsk-Url-1 = {http://www.semantic-web-journal.net/sites/default/files/swj229_0.pdf}}
@inproceedings{ryab-etal-2012RR, Abstract = {Constraint-based configuration is -- on the one hand -- one of the classical problem domains in AI and also in industrial practice. Additional problems arise, when configuration objects come from an open environment such as the Web, or in case of a reconfiguration. On the other hand, (re)configuration is a reasoning task very much ignored in the current (Semantic) Web reasoning literature, despite (i) the increased availability of structured data on the Web, particularly due to movements such as the Semantic Web and Linked Data, (ii) numerous practically relevant tasks in terms of using Web data involve (re)configuration. To bridge these gaps, we discuss the challenges and possible approaches for reconfiguration in an open Web environment, based on a practical use case leveraging Linked Data as a ``component catalog'' for configuration. In this paper, we present techniques to enhance existing review management systems with (re)configuration facilities and provide a practical evaluation.}, Address = {Vienna, Austria}, Author = {Anna Ryabokon and Axel Polleres and Gerhard Friedrich and Andreas Falkner and Alois Haselb{\"o}ck and Herwig Schreiner}, Booktitle = {Web Reasoning and Rule Systems -- 6th International Conference, RR2012}, Day = {10--12}, Editor = {Markus Kr\"otzsch and Umberto Straccia}, Month = SEP, Note = {Short paper}, Pages = {258--261}, Publisher = {Springer}, Series = LNCS, Title = {{(Re)Configuration using Web Data: a case study on the reviewer assignment problem}}, doi = {10.1007/978-3-642-33203-6_28}, Type = CONF, Url = {http://www.polleres.net/publications/ryab-etal-2012RR.pdf}, Volume = 7497, Year = 2012, Bdsk-Url-1 = {http://www.polleres.net/publications/ryab-etal-2012RR.pdf}}
@inproceedings{umbr-etal-2012RR, Abstract = {Linked Data principles allow for processing SPARQL queries on-the-fly by dereferencing URIs. Link-traversal query approaches for Linked Data have the benefit of up-to-date results and decentralised execution, but operate only on explicit data from dereferenced documents, affecting recall. In this paper, we show how inferable knowledge -- specifically that found through owl:sameAs and RDFS reasoning -- can improve recall in this setting. We first analyse a corpus featuring 7 million Linked Data sources and 2.1 billion quadruples: we (1) measure expected recall by only considering dereferenceable information, (2) measure the improvement in recall given by considering rdfs:seeAlso links as previous proposals did. We further propose and measure the impact of additionally considering (3) owl:sameAs links, and (4) applying lightweight RDFS reasoning for finding more results, relying on static schema information. We evaluate different configurations for live queries covering different shapes and domains, generated from random walks over our corpus.}, Address = {Vienna, Austria}, Author = {J{\"u}rgen Umbrich and Aidan Hogan and Axel Polleres and Stefan Decker}, Booktitle = {Web Reasoning and Rule Systems -- 6th International Conference, RR2012}, Day = {10--12}, Editor = {Markus Kr\"otzsch and Umberto Straccia}, Month = SEP, Pages = {188--204}, Publisher = {Springer}, Series = LNCS, Title = {Improving the Recall of Live Linked Data Querying through Reasoning}, Type = CONF, doi = {10.1007/978-3-642-33203-6_14}, Url = {http://www.polleres.net/publications/umbr-etal-2012RR.pdf}, Volume = 7497, Year = 2012, Bdsk-Url-1 = {http://www.polleres.net/publications/umbr-etal-2012RR.pdf}}
@inproceedings{poll-2012datalog20, Abstract = {In this tutorial we will give an overview of the W3C standard query language for RDF -- SPARQL -- and its relation to Datalog as well as on the interplay with another W3C standard closely related to Datalog, the Rule Interchange Format (RIF). As we will learn -- while these three interplay nicely on the surface and in academic research papers -- some details within the W3C specs impose challenges on seamlessly integrating Datalog rules and SPARQL.}, Author = {Axel Polleres}, Booktitle = {Datalog in Academia and Industry -- Second International Workshop, Datalog 2.0}, Day = {11--13}, Editor = {Pablo Barcel{\'o} and Reinhard Pichler}, Month = sep, Note = {Invited tutorial, slides available at \url{http://www.polleres.net/presentations/20120913Datalog20_Tutorial.pdf}}, Pages = {27--30}, Publisher = {Springer}, Series = LNCS, Title = {How (Well) Do Datalog, {SPARQL} and {RIF} Interplay?}, Type = CONF, Url = {http://link.springer.com/chapter/10.1007%2F978-3-642-32925-8_4}, Volume = 7494, Year = 2012, Bdsk-Url-1 = {http://link.springer.com/chapter/10.1007%2F978-3-642-32925-8_4}}
@inproceedings{lope-etal-2012iclp, Abstract = {The Resource Description Framework (RDF) is an interoperable data representation format suitable for interchange and integration of data, especially in Open Data contexts. However, RDF is also becoming increasingly attractive in scenarios involving sensitive data, where data protection is a major concern. At its core, RDF does not support any form of access control and current proposals for extending RDF with access control do not fit well with the RDF representation model. Considering an enterprise scenario, we present a modelling that caters for access control over the stored RDF data in an intuitive and transparent manner. For this paper we rely on Annotated RDF, which introduces concepts from Annotated Logic Programming into RDF. Based on this model of the access control annotation domain, we propose a mechanism to manage permissions via application-specific logic rules. Furthermore, we illustrate how our Annotated Query Language (AnQL) provides a secure way to query this access control annotated RDF data.}, Address = {Budapest, Hungary}, Author = {Nuno Lopes and Sabrina Kirrane and Antoine Zimmermann and Axel Polleres and Alessandra Mileo}, Booktitle = {Technical Communications of the ICLP 2012}, Day = {4--8}, Editor = {Agostino Dovier and V{\'\i}tor Santos Costa}, Month = Sep, Note = {Short paper.}, Pages = {381--392}, Publisher = {Schloss Dagstuhl - Leibniz-Zentrum fuer Informatik}, Series = {LIPIcs}, Title = {{A Logic Programming approach for Access Control over RDF}}, Type = CONF, Url = {http://www.polleres.net/publications/lope-etal-2012iclp.pdf}, Volume = {17}, Year = {2012}, Bdsk-Url-1 = {http://www.polleres.net/publications/lope-etal-2012iclp.pdf}}
@article{bisc-etal-2012JoDS, Abstract = {One promise of Semantic Web applications is to seamlessly deal with heterogeneous data. The Extensible Markup Language (XML) has become widely adopted as an almost ubiquitous interchange format for data, along with transformation languages like XSLT and XQuery to translate data from one XML format into another. However, the more recent Resource Description Framework (RDF) has become another popular standard for data representation and exchange, supported by its own query language SPARQL, that enables extraction and transformation of RDF data. Being able to work with XML and RDF using a common framework eliminates several unnecessary steps that are currently required when handling both formats side by side. In this paper we present the XSPARQL language that, by combin- ing XQuery and SPARQL, allows to query XML and RDF data using the same framework and transform data from one format into the other. We focus on the semantics of this combined language and present an implementation, including discussion of query optimisations along with benchmark evaluation.}, Author = {Stefan Bischof and Stefan Decker and Thomas Krennwallner and Nuno Lopes and Axel Polleres}, Journal = JoDS, url = {http://link.springer.com/article/10.1007%2Fs13740-012-0008-7}, Number = 3, Pages = {147--185}, Publisher = {Springer}, Title = {Mapping between {RDF} and {XML} with {XSPARQL}}, Volume = 1, Year = 2012}
@inproceedings{glim-etal-2012LDOW, Abstract = {Seven years on from OWL becoming a W3C recommendation, and two years on from the more recent OWL 2 W3C recommendation, OWL has still experienced only patchy uptake on the Web. Although certain OWL features (like owl:sameAs) are very popular, other features of OWL are largely neglected by publishers in the Linked Data world. This may suggest that despite the promise of easy implementations and the proposal of tractable profiles suggested in OWL's second version, there is still no ``right'' standard fragment for the Linked Data community. In this paper, we (1) analyse uptake of OWL on the Web of Data, (2) gain insights into the OWL fragment that is actually used/usable on the Web, where we arrive at the conclusion that this fragment is likely to be a simplified profile based on OWL RL, (3) propose and discuss such a new fragment, which we call OWL LD (for Linked Data).}, Address = {Lyon, France}, Author = {Birte Glimm and Adian Hogan and Markus Kr{\"o}tzsch and Axel Polleres}, Booktitle = {WWW2012 Workshop on Linked Data on the Web (LDOW2012)}, Month = apr, Title = {{OWL}: Yet to arrive on the Web of Data?}, Type = WS, Url = {http://www.polleres.net/publications/glim-etal-2012LDOW.pdf}, Year = 2012, Bdsk-Url-1 = {http://www.polleres.net/publications/glim-etal-2012LDOW.pdf}}
@inproceedings{kaef-etal-2012LDOW, Abstract = {We describe work-in-progress on the design and methodology of the Dynamic Linked Data Observatory: a framework to monitor Linked Data over an extended period of time. The core goal of our work is to collect frequent, continuous snapshots of a subset of the Web of Data that is interesting for further study and experimentation, with an aim to capture raw data about the dynamics of Linked Data. The resulting corpora will be made openly and continuously available to the Linked Data research community. Herein, we (1) motivate the importance of such a corpus; (2) out- line some of the use-cases and requirements for the resulting snapshots; (3) discuss different ``views'' of the Web of Data which affect how we define a sample to monitor; (4) detail how we select the scope of the monitoring experiment through sampling, (5) discuss the final design of the monitoring framework which will capture regular snapshots of (subsets of) the Web of Data over the coming months and years.}, Address = {Lyon, France}, Author = {Tobias K{\"a}fer and J{\"u}rgen Umbrich and Aidan Hogan and Axel Polleres}, Booktitle = {WWW2012 Workshop on Linked Data on the Web (LDOW2012)}, Month = apr, Title = {Towards a Dynamic Linked Data Observatory}, Type = WS, Url = {http://www.polleres.net/publications/kaef-etal-2012LDOW.pdf}, Year = 2012, Bdsk-Url-1 = {http://www.polleres.net/publications/kaef-etal-2012LDOW.pdf}}
@inproceedings{umbr-etal-2012DESWEB, Abstract = {Enabling the ``Web of Data'' has recently gained increased attention, particularly driven by the success of Linked Data. The agreed need for technologies from the database domain is therein often referred to as the ``Web as a Database'', a concept that is still more a vision than a reality. Meanwhile, the database community proposed the notion of dataspaces managed by support platforms, as an alternative view on the data management problem for small-scale, loosely connected environments of heterogenous data sources. The Web of Data can actually be seen as a collection of inter-connected dataspaces. In this work, we propose a combination of Linked Data and database technologies to provide support platforms for these Web dataspaces. We argue that while separated, Linked Data still lacks database technology and the dataspace idea lacks openness and scale. We put particular focus on the challenge of how to index, search and query structured data on the Web in a way that is appropriate for its dynamic, heterogeneous, loosely connected, and open character. Based on an empirical study, we argue that none of the two extremes on its own -- centralised repositories vs. on-demand distributed querying - can meet all requirements. We propose and discuss an alternative hybrid approach combining the best of both sides to find a better tradeoff between result freshness and fast query response times.}, Address = {Washington DC, USA}, Author = {J\"urgen Umbrich and Marcel Karnstedt and Josiane Xavier Parreira and Axel Polleres and Manfred Hauswirth}, Booktitle = {Proceedings of the 3rd International Workshop on Data Engineering Meets the Semantic Web (DESWEB), co-located with ICDE2012}, Day = {23--27}, Month = apr, Title = {{Linked Data and Live Querying for Enabling Support Platforms for Web Dataspaces}}, Type = WS, Url = {http://www.polleres.net/publications/umbr-etal-2012DESWEB.pdf}, Year = 2012, Bdsk-Url-1 = {http://www.polleres.net/publications/umbr-etal-2012DESWEB.pdf}}
@article{hoga-etal-2012-ldstudy-JWS, Abstract = {There has been a recent, tangible growth in RDF published on the Web in accordance with the Linked Data principles and best practices, the result of which has been dubbed the ``Web of Data''. Linked Data guidelines are designed to facilitate ad hoc re-use and integration of conformant structured data-across the Webby consumer applications; however, thus far, systems have yet to emerge that convincingly demonstrate the potential applications for consuming currently available Linked Data. Herein, we compile a list of fourteen concrete guidelines as given in the ``How to Publish Linked Data on the Web'' tutorial. Thereafter, we evaluate conformance of current RDF data providers with respect to these guidelines. Our evaluation is based on quantitative empirical analyses of a crawl of ~4 million RDF/XML documents constituting over 1 billion quadruples, where we also look at the stability of hosted documents for a corpus consisting of nine monthly snapshots from a sample of 151 thousand documents. Backed by our empirical survey, we provide insights into the current level of conformance with respect to various Linked Data guidelines, enumerating lists of the most (non-)conformant data providers. We show that certain guidelines are broadly adhered to (esp. use HTTP URIs, keep URIs stable), whilst others are commonly overlooked (esp. provide licencing and human-readable meta-data). We also compare PageRank scores for the data-providers and their conformance to Linked Data guidelines, showing that both factors negatively correlate for guidelines restricting use of RDF features, while positively correlating for guidelines encouraging external linkage and vocabulary re-use. Finally, we present a summary of conformance for the different guidelines, and present the top-ranked data providers in terms of a combined PageRank and Linked Data conformance score.}, Author = {Aidan Hogan and J{\"u}rgen Umbrich and Andreas Harth and Richard Cyganiak and Axel Polleres and Stefan Decker}, Journal = JWS, Month = jul, Pages = {14--44}, Projects = {lion2}, Publisher = {Elsevier}, Title = {An empirical survey of Linked Data conformance}, Type = JOURNAL, Url = {http://aidanhogan.com/docs/ldstudy12.pdf}, Volume = 14, Year = 2012, Bdsk-Url-1 = {http://aidanhogan.com/docs/ldstudy12.pdf}}
@article{hoga-etal-2011-ent-cons-JWS, Abstract = {With respect to large-scale, static, Linked Data corpora, in this paper we discuss scalable and distributed methods for: (i) entity consolidation---identifying entities that signify the same referent, aka. smushing, entity resolution, object consolidation, etc.---using explicit \texttt{owl{:}sameAs} relations; (ii) extended entity consolidation based on a subset of OWL 2 RL/RDF rules---particularly over inverse-functional properties, functional-properties and (max-)cardinality restrictions with value one; (iii) deriving weighted concurrence measures between entities in the corpus based on shared inlinks/outlinks and attribute values using statistical analyses; (iv) disambiguating (initially) consolidated entities based on inconsistency detection using OWL 2 RL/RDF rules. Our methods are based upon distributed sorts and scans of the corpus, where we purposefully avoid the requirement for indexing all data. Throughout, we offer evaluation over a diverse Linked Data corpus consisting of 1.118 billion quadruples derived from a domain-agnostic, open crawl of 3.985 million RDF/XML Web documents, demonstrating the feasibility of our methods at that scale, and giving insights into the quality of the results for real-world data.}, Author = {Aidan Hogan and Antoine Zimmermann and J{\"u}rgen Umbrich and Axel Polleres and Stefan Decker}, Journal = JWS, Month = jan, Pages = {76--110}, Projects = {lion2}, Publisher = {Elsevier}, Title = {Scalable and distributed methods for entity matching, consolidation and disambiguation over linked data corpora}, doi = {https://doi.org/10.1016/j.websem.2011.11.002}, Type = JOURNAL, Volume = {10}, Year = 2012 }
@article{zimm-etal-2012-JWS, Abstract = {We describe a generic framework for representing and reasoning with annotated Semantic Web data, a task becoming more important with the recent increased amount of inconsistent and non-reliable meta-data on the web. We formalise the annotated language, the corresponding deductive system and address the query answering problem. Previous contributions on specific RDF annotation domains are encompassed by our unified reasoning formalism as we show by instantiating it on (i) temporal, (ii) fuzzy, and (iii) provenance annotations. Moreover, we provide a generic method for combining multiple annotation domains allowing to represent, e.g., temporally-annotated fuzzy RDF. Furthermore, we address the development of a query language -- AnQL -- that is inspired by SPARQL, including several features of SPARQL 1.1 (subqueries, aggregates, assignment, solution modifiers) along with the formal definitions of their semantics.}, Author = {Antoine Zimmermann and Nuno Lopes and Axel Polleres and Umberto Straccia}, Journal = JWS, Month = mar, Pages = {72--95}, Projects = {lion2}, Publisher = {Elsevier}, Title = {A General Framework for Representing, Reasoning and Querying with Annotated Semantic Web Data}, Type = JOURNAL, Url = {http://polleres.net/publications/zimm-etal-2012-JWS.pdf}, Volume = 12, Year = 2012, Bdsk-Url-1 = {http://www.sciencedirect.com/science/article/pii/S1570826811000771}}
@inproceedings{mall-etal-2011ISWC, Abstract = {Blank nodes are defined in RDF as `existential variables' in the same way that has been used before in mathematical logic. However, evidence suggests that actual usage of RDF does not follow this definition. In this paper we thoroughly cover the issue of blank nodes, from incomplete information in database theory, over different treatments of blank nodes across the W3C stack of RDF-related standards, to empirical analysis of RDF data publicly available on the Web. We then summarize alternative approaches to the problem, weighing up advantages and disadvantages, also discussing proposals for Skolemization.}, Address = {Bonn, Germany}, Author = {Alejandro Mallea and Marcelo Arenas and Aidan Hogan and Axel Polleres}, Booktitle = {Proceedings of the 10th International Semantic Web Conference (ISWC 2011)}, Day = {23--27}, Month = oct, Note = {\textbf{Nominated for best paper award}}, Page = {421--437}, Publisher = {Springer}, Series = LNCS, Title = {{On Blank Nodes}}, Type = CONF, Url = {http://www.polleres.net/publications/mall-etal-2011ISWC.pdf}, Volume = 7031, Year = 2011, Bdsk-Url-1 = {http://www.polleres.net/publications/mall-etal-2011ISWC.pdf}}
@book{poll-etal-2011RW, Abstract = {The Reasoning Web Summer School has become a well-established event in the area of applications of reasoning techniques on the Web both targeting scientific discourse of established researchers and attracting young researchers to this emerging field. After the previous successful editions in Malta (2005), Lisbon (2006), Dresden (2007 and 2010), Venice (2008), and Bressanone-Brixen (2009), this year's edition moved to the west of Ireland, hosted by the Digital Enterprise Research Institute (DERI) at the National University of Ireland, Galway. By co-locating this year's summer school with the 5th International Conference on Web Reasoning and Rule Systems (RR2011) we hope to have further promoted interaction between researchers, practitioners and students. The 2011 school programme focused around the central topic of applications of Reasoning for the emerging ``Web of Data'', with twelve exciting lectures.}, Address = {Galway, Ireland}, Day = {23--27}, Editor = {Axel Polleres and Claudia D'Amato and Marcelo Arenas and Siegfried Handschuh and Paula Kroner and Sascha Ossowski and Peter Patel-Schneider}, Month = AUG, Projects = {lion2,net2}, Publisher = {Springer}, Series = LNCS, Title = {Reasoning Web. Semantic Technologies for the Web of Data. ({Reasoning Web 2011})}, Type = BOOK, Url = {http://www.springerlink.com/content/978-3-642-23031-8/}, Volume = 6848, Year = 2011, Bdsk-Url-1 = {http://www.springerlink.com/content/978-3-642-23031-8/}}
@incollection{hoga-etal-2011RW, Abstract = {The goal of the Scalable OWL 2 Reasoning for Linked Data lecture is twofold: first, to introduce scalable reasoning and querying techniques to Semantic Web researchers as powerful tools to make use of Linked Data and large-scale ontologies, and second, to present interesting research problems for the Semantic Web that arise in dealing with TBox and ABox reasoning in OWL 2. The lecture consists of three parts. The first part will begin with an introduction and motivation for reasoning over Linked Data, including a survey of the use of RDFS and OWL on the Web. The second part will present a scalable, distributed reasoning service for instance data, applying a custom subset of OWL 2 RL/RDF rules (based on a tractable fragment of OWL 2). The third part will present recent work on faithful approximate reasoning for OWL 2 DL. The lecture will include our implementation of the mentioned techniques as well as their evaluations. These notes provide complimentary reference material for the lecture, and follow the three-part structure and content of the lecture.}, Address = {Galway, Ireland}, Author = {Aidan Hogan and Jeff Z. Pan and Axel Polleres and Yuan Ren}, Booktitle = {Reasoning Web. Semantic Technologies for the Web of Data. ({Reasoning Web 2011})}, Day = {23--27}, Editor = {Axel Polleres and Claudia D'Amato and Marcelo Arenas and Siegfried Handschuh and Paula Kroner and Sascha Ossowski and Peter Patel-Schneider}, Month = AUG, Pages = {250--325}, Publisher = {Springer}, Series = LNCS, Title = {Scalable {OWL 2} Reasoning for Linked Data}, Type = BC, Url = {http://aidanhogan.com/docs/rw_2011.pdf}, Volume = 6848, Year = 2011, Bdsk-Url-1 = {http://aidanhogan.com/docs/rw_2011.pdf}}
@incollection{hoga-etal-2011IGI, Abstract = {In this chapter, the authors discuss the challenges of performing reasoning on large scale RDF datasets from the Web. Using ter-Horst's pD* fragment of OWL as a base, the authors compose a rule-based framework for application to Web data: they argue their decisions using observations of undesirable examples taken directly from the Web. The authors further temper their OWL fragment through consideration of ``authoritative sources'' which counter-acts an observed behaviour which they term ``ontology hijackin'': new ontologies published on the Web re-defining the semantics of existing entities resident in other ontologies. They then present their system for performing rule-based forward-chaining reasoning which they call SAOR: Scalable Authoritative OWL Reasoner. Based upon observed characteristics of Web data and reasoning in general, they design their system to scale: the system is based upon a separation of terminological data from assertional data and comprises of a lightweight in-memory index, on-disk sorts and file-scans. The authors evaluate their methods on a dataset in the order of a hundred million statements collected from real-world Web sources and present scale-up experiments on a dataset in the order of a billion statements collected from the Web. In this republished version, the authors also present extended discussion reflecting upon recent developments in the area of scalable RDFS/OWL reasoning, some of which has drawn inspiration from the original publication (Hogan, et al., 2009).}, Author = {Aidan Hogan, Andreas Harth and Axel Polleres}, Booktitle = {Semantic Services, Interoperability and Web Applications: Emerging Concepts}, Editor = {Amit Sheth}, Month = jun, Note = {Invited re-publication}, Pages = {131-177}, Publisher = {IGI Global}, Title = {Scalable Authoritative OWL Reasoning for the Web}, Type = BC, Url = {http://www.igi-global.com/bookstore/titledetails.aspx?titleid=47114&detailstype=chapters}, Year = 2011, Bdsk-Url-1 = {http://www.igi-global.com/bookstore/titledetails.aspx?titleid=47114&detailstype=chapters}}
@inproceedings{dumi-etal-2011CONTEXT, Abstract = {In pervasive environments, presence-based application development via Presence Management Systems (PMSs) is a key factor to optimise the management of communication channels, driving productivity increase. Solutions for presence management should satisfy the interoperability requirements, in turn providing context-centric presence analysis and privacy management. In order to push PMSs towards flexible, open and context-aware presence management, we propose some adaptation of two extensions to standard XML-based XMPP for message exchange in online communication systems. The contribution allows for more complex specification and management of nested group and privacy lists, where semantic technologies are used to map all messages into RDF vocabularies and pave the way for a broader semantic integration of heterogeneous and distributed presence information sources in the standard PMSs framework.}, Address = {Karlsruhe, Germany}, Author = {Anca Dumitrache and Alessandra Mileo and Antoine Zimmermann and Axel Polleres and Philipp Obermeier and Owen Friel}, Booktitle = {7th International and Interdisciplinary Conference on Modeling and Using Context 2011 (CONTEXT'11)}, Day = {26--30}, Month = SEP, Title = {Enabling Privacy-Preserving Semantic Presence in Instant Messaging Systems}, Type = CONF, Url = {http://www.polleres.net/publications/dumi-etal-2011CONTEXT.pdf}, Year = 2011, Bdsk-Url-1 = {http://www.polleres.net/publications/dumi-etal-2011CONTEXT.pdf}}
@inproceedings{delb-etal-2011RR, Abstract = {The Sindice Semantic Web index provides search capabilities over today more than 220 million documents. Reasoning over web data enables to make explicit what would otherwise be implicit knowledge: it adds value to the information and enables Sindice to ultimately be more competitive in terms of precision and recall. However, due to the scale and heterogeneity of web data, a reasoning engine for the Sindice system must (1) scale out through parallelisation over a cluster of machines; and (2) cope with unexpected data usage. In this paper, we report our experiences and lessons learnt in building a large scale reasoning engine for Sindice. The reasoning approach has been deployed, used and improved since 2008 within Sindice and has enabled Sindice to reason over billions of triples. First, we introduce our notion of context-dependent reasoning for RDF entities published on the Web according to the linked data principle. We then illustrate an efficient methodology to perform context-dependent RDFS and partial OWL inference based on a persistent TBox composed of a network of web ontologies. Finally we report performance evaluation results of our implementation underlying the Sindice web data index.}, Address = {Galway, Ireland}, Author = {Renaud Delbru and Giovanni Tummarello and Axel Polleres}, Booktitle = {Web Reasoning and Rule Systems -- Fifth International Conference, RR2011}, Day = {29--30}, Month = AUG, Pages = {46--60}, Publisher = {Springer}, Series = LNCS, Title = {Context-Dependent {OWL} Reasoning in Sindice - Experiences and Lessons Learnt}, Type = CONF, Url = {http://renaud.delbru.fr/doc/pub/rr2011-sindice.pdf}, Volume = 6902, Year = 2011, Bdsk-Url-1 = {http://renaud.delbru.fr/doc/pub/rr2011-sindice.pdf}}
@inproceedings{bisc-etal-2011RR, Abstract = {XSPARQL is a language to transform data between the tree-based XML format and the graph-based RDF format. XML is a widely adopted data exchange format which brings its own query language XQuery along. RDF is the standard data format of the Semantic Web with SPARQL being the corresponding query language. XSPARQL combines XQuery and SPARQL to a unified query language which provides a more intuitive and maintainable way to translate data between the two data formats. A naive implementation of XSPARQL can be inefficient when evaluating nested queries. However, such queries occur often in practice when dealing with XML data. We present and compare several approaches to optimise nested queries. By implementing these optimisations we improve efficiency up to two orders of magnitude in a practical evaluation.}, Address = {Galway, Ireland}, Author = {Stefan Bischof and Nuno Lopes and Axel Polleres}, Booktitle = {Web Reasoning and Rule Systems -- Fifth International Conference, RR2011}, Day = {29--30}, Month = AUG, Note = {Short paper}, Pages = {232--237}, Publisher = {Springer}, Series = LNCS, Title = {Improve Efficiency of Mapping Data between {XML} and {RDF} with {XSPARQL}}, Type = DEMO, Url = {http://www.polleres.net/publications/bisc-etal-2011RR.pdf}, Volume = 6902, Year = 2011, Bdsk-Url-1 = {http://www.polleres.net/publications/bisc-etal-2011RR.pdf}}
@article{hoga-etal-2011-swse-JWS, Abstract = {In this paper, we discuss the architecture and implementation of the Semantic Web Search Engine (SWSE). Following traditional search engine architecture, SWSE consists of crawling, data enhancing, indexing and a user interface for search, browsing and retrieval of information; unlike traditional search engines, SWSE operates over RDF Web data -- loosely also known as Linked Data -- which implies unique challenges for the system design, architecture, algorithms, implementation and user interface. In particular, many challenges exist in adopting Semantic Web technologies for Web data: the unique challenges of the Web -- in terms of scale, unreliability, inconsistency and noise -- are largely overlooked by the current Semantic Web standards. Herein, we describe the current SWSE system, initially detailing the architecture and later elaborating upon the function, design, implementation and performance of each individual component. In so doing, we also give an insight into how current Semantic Web standards can be tailored, in a best-effort manner, for use on Web data. Throughout, we offer evaluation and complementary argumentation to support our design choices, and also offer discussion on future directions and open research questions. Later, we also provide candid discussion relating to the difficulties currently faced in bringing such a search engine into the mainstream, and lessons learnt from roughly six years working on the Semantic Web Search Engine project.}, Author = {Aidan Hogan and Andreas Harth and J\"urgen Umbrich and Sheila Kinsella and Axel Polleres and Stefan Decker}, Journal = JWS, Number = 4, Pages = {365--401}, Projects = {lion2}, Publisher = {Elsevier}, Title = {Searching and Browsing Linked Data with {SWSE}: The Semantic Web Search Engine}, Type = JOURNAL, Url = {http://www.sciencedirect.com/science/article/pii/S1570826811000473}, Volume = 9, Year = 2011, Bdsk-Url-1 = {http://www.sciencedirect.com/science/article/pii/S1570826811000473}}
@inproceedings{lope-etal-2011COLA, Abstract = {XSPARQL is a transformation and query language that caters for heterogenous sources: in its present status it is possible to transform data between XML and RDF formats due to the integration of the XQuery and SPARQL query languages. In this paper we propose an extension of the XSPARQL language to incorporate data contained in relational databases by integrating a subset of SQL in the syntax of XSPARQL. Exposing data contained in relational databases as RDF is a necessary step towards the realisation of the Semantic Web and Web of Data. We present the syntax of an extension of the XSPARQL language catering for the inclusion of the SQL query language along with the semantics based on the XQuery formal semantics and sketch how this extended XSPARQL language can be used to expose RDB2RDF mappings, as currently being discussed in the W3C RDB2RDF Working Group.}, Address = {Lisbon, Portugal}, Author = {Nuno Lopes and Stefan Bischof and Axel Polleres}, Booktitle = {Proceedings of the 15th Portuguese Conference on Artificial Intelligence (EPIA2011) -- Computational Logic with Applications Track}, Day = {10--13}, Month = oct, Projects = {lion2}, Title = {On the Semantics of Heterogeneous Querying of Relational, {XML}, and {RDF} Data with {XSPARQL}}, Type = CONF, Url = {http://www.polleres.net/publications/lope-etal-2011EPIA.pdf}, Year = 2011, Bdsk-Url-1 = {http://www.polleres.net/publications/lope-etal-2011EPIA.pdf}}
@inproceedings{hart-etal-2011w3c, Abstract = {A sizable amount of data on the Web is currently available via Web APIs that expose data in formats such as JSON or XML. Combining data from different APIs and data sources requires glue code which is typically not shared and hence not reused. We derive requirements for a mechanism that brings data and functionality currently available via ad-hoc APIs into a coherent framework. Such standardised access to content and functionality would reduce the effort for data integration and the combination of service functionality, leading to reduced effort in composing data and services from multiple providers.}, Address = {Bedford, MA, USA}, Author = {Harth, Andreas and Norton, Barry and Polleres, Axel and Sapkota, Brahmananda and Speiser, Sebastian and Stadtm\"uller, Steffen and Suominen, Osma}, Booktitle = {W3C Workshop on Data and Services Integration}, Day = {20--21}, Month = OCT, Title = {Towards Uniform Access to Web Data and Services}, Type = WS, Url = {http://www.w3.org/2011/10/integration-workshop/p/paper.pdf}, Year = 2011, Bdsk-Url-1 = {http://www.w3.org/2011/10/integration-workshop/p/paper.pdf}}
@inproceedings{saha-etal-2011SALUS, Abstract = {Healthcare applications are complex in the way data and schemas are organised in their internal systems. Widely deployed healthcare standards like Health Level Seven (HL7) V2 are designed using flexible schemas which allow several choices when constructing clinical messages. The recently emerged HL7 V3 has a centrally consistent information model that controls terminologies and concepts shared by V3 applications. V3 information models are arranged in several layers (abstract to concrete layers). V2 and V3 systems raise interoperability challenges: firstly, how to exchange clinical messages between V2 and V3 applications, and secondly, how to integrate globally defined clinical concepts with locally constructed concepts. The use of ontologies for interoperable healthcare applications has been advocated by domain and knowledge representation specialists. This paper addresses two main areas of an ontology-based integration framework: (1) an ontology building methodology for the HL7 standard where ontologies are developed in separated global and local layers; and (2) aligning V2 and V3 ontologies. We propose solutions that: (1) provide a semi-automatic mechanism to build HL7 ontologies; (2) provide a semi-automatic mechanism to align HL7 ontologies and transform underlying clinical messages. The proposed methodology has developed HL7 ontologies of 300 concepts in average for each version. These ontologies and their alignments are deployed and evaluated under a semantically-enabled healthcare integration framework.}, Address = {Vienna, Austria}, Author = {Ratnesh Sahay and Ronan Fox and Antoine Zimmermann and Axel Polleres and Manfred Hauswrith}, Booktitle = {MISI (Massive Information Sharing and Integration) Conference - Special Track on Eletronic Healthcare (SALUS 2011)}, Day = {22--26}, Month = AUG, Projects = {lion2}, Title = {A Methodological Approach for Ontologising and Aligning Health Level Seven ({HL7}) Applications}, Type = CONF, Url = {http://www.polleres.net/publications/saha-etal-2011SALUS.pdf}, Year = 2011, Bdsk-Url-1 = {http://www.polleres.net/publications/saha-etal-2011SALUS.pdf}}
@phdthesis{poll-2011habil, Abstract = {The Semantic Web is about to grow up. Over the last few years technologies and standards to build up the architecture of this next generation of the Web have matured and are being deployed on large scale in many live Web sites. The underlying technology stack of the Semantic Web consists of several standards endorsed by the World Wide Web consortium (W3C) that provide the formal underpinings of a machine-readable ``Web of Data'': (i) the eXtensible Markup Language (XML) as a uniform exchange syntax; (ii) the Resource Description Framework (RDF) as a uniform data exchange format; (iii) RDF Schema and the Web Ontology Language (OWL) for describig ontologies; (iv) the Rule Interchange Format (RIF) to exchange rules; (v) XQuery and SPARQL as query and transformation languages. The present habilitation thesis comprises a collection of articles reflecting the author's contribution in addressing a number of relevant research problems to close gaps in the Semantic Web architecture regarding the theoretical and practical interplay of these standards.}, Address = {{Wien, {\"O}sterreich}}, Author = {Axel Polleres}, Month = MAR, Note = {Kumulative Habilitationsschrift zur Erlangung der Lehrbefugnis im Fach ``Informationssysteme''}, School = {{Vienna University of Technology}}, Title = {{Semantic Web Technologies: From Theory to Practice}}, Type = THESIS, Url = {http://www.polleres.net/publications/habilitation.pdf}, Year = 2011, Bdsk-Url-1 = {http://www.polleres.net/publications/habilitation.pdf}}
@article{hoga-etal-2011-saor-ann-JWS, Abstract = {In this paper, we leverage annotated logic programs for tracking indicators of provenance and trust during reasoning, specifically focussing on the use-case of applying a scalable subset of OWL 2 RL/RDF rules over static corpora of arbitrary Linked Data (Web data). Our annotations encode three facets of information: (i) \textit{blacklist}: a (possibly manually generated) boolean annotation which indicates that the referent data are known to be harmful and should be ignored during reasoning; (ii) \textit{ranking}: a numeric value derived by a PageRank-inspired technique---adapted for Linked Data---which determines the centrality of certain data artefacts (such as RDF documents and statements); (iii) \textit{authority}: a boolean value which uses Linked Data principles to \textit{conservatively} determine whether or not some terminological information can be trusted. We formalise a logical framework which annotates inferences with the \textit{strength} of derivation along these dimensions of trust and provenance; we formally demonstrate some desirable properties of the deployment of annotated logic programming in our setting, which guarantees (i) a unique minimal model (least fixpoint); (ii) monotonicity; (iii) finitariness; and (iv) finally decidability. In so doing, we also give some formal results which reveal strategies for scalable and efficient implementation of various reasoning tasks one might consider. Thereafter, we discuss scalable and distributed implementation strategies for applying our ranking and reasoning methods over a cluster of commodity hardware; throughout, we provide evaluation of our methods over 1 billion Linked Data quadruples crawled from approximately 4 million individual Web documents, empirically demonstrating the scalability of our approach, and how our annotation values help ensure a more robust form of reasoning. We finally sketch, discuss and evaluate a use-case for a simple repair of inconsistencies detectable within OWL 2 RL/RDF constraint rules using ranking annotations to detect and defeat the ``marginal view'', and in so doing, infer an empirical ``consistency threshold'' for the Web of Data in our setting.}, Author = {Aidan Hogan and Piero Bonatti and Axel Polleres and Luigi Sauro}, Journal = JWS, Number = 2, Pages = {165--201}, Projects = {lion2}, Publisher = {Elsevier}, Title = {Robust and Scalable Linked Data Reasoning Incorporating Provenance and Trust Annotations}, Type = JOURNAL, Url = {http://aidanhogan.com/docs/saor_ann_final.pdf}, Volume = 9, Year = 2011, Bdsk-Url-1 = {http://www.aidanhogan.com/docs/saor_ann/final.pdf}}
@misc{fern-etal-2011-hdt-W3C, Abstract = {RDF HDT (Header-Dictionary-Triples) is a binary format for publishing and exchanging RDF data at large scale. RDF HDT represents RDF in a compact manner, natively supporting splitting huge RDF graphs into several chunks. It is designed to allow high compression rates. This is achieved by organizing and representing the RDF graph in terms of two main components: Dictionary and Triples structure. The Dictionary organizes all vocabulary present in the RDF graph in a manner that permits rapid search and high levels of compression. The Triples component comprises the pure structure of the underlying graph in a compressed form. An additional and RECOMMENDED Header component includes extensible metadata describing the RDF data set and its organization. Further, the document specifies how to efficiently translate between HDT and other RDF representation formats, such as Notation 3.}, Author = {Javier D. Fern{\'a}ndez and Miguel A. Mart{\'\i}nez-Prieto and Claudio Gutierrez and Axel Polleres}, Day = 30, Month = MAR, Note = {W3C member submission}, Title = {{Binary RDF Representation for Publication and Exchange (HDT)}}, Url = {http://www.w3.org/Submission/2011/SUBM-HDT-20110330/}, Year = 2011, Bdsk-Url-1 = {http://www.w3.org/Submission/2011/SUBM-HDT-20110330/}}
@article{umbr-etal-2011WWWJ, Abstract = {A growing amount of Linked Data -- graph-structured data accessible at sources distributed across the Web -- enables advanced data integration and decision-making applications. Typical systems operating on Linked Data collect (crawl) and pre-process (index) large amounts of data, and evaluate queries against a centralised repository. Given that crawling and indexing are time-consuming operations, the data in the centralised index may be out of date at query execution time. An ideal query answering system for querying Linked Data live should return current answers in a reasonable amount of time, even on corpora as large as the Web. In such a live query system source selection -- determining which sources contribute answers to a query -- is a crucial step. In this article we propose to use lightweight data summaries for determining relevant sources during query evaluation. We compare several data structures and hash functions with respect to their suitability for building such summaries, stressing benefits for queries that contain joins and require ranking of results and sources. We elaborate on join variants, join ordering and ranking. We analyse the different approaches theoretically and provide results of an extensive experimental evaluation.}, Author = {J{\"u}rgen Umbrich and Katja Hose and Marcel Karnstedt and Andreas Harth and Axel Polleres}, Journal = {World Wide Web Journal}, Number = {5--6}, Pages = {495--544}, Projects = {lion2}, Publisher = {Springer}, Title = {Comparing Data Summaries for Processing Live Queries over Linked Data}, Type = JOURNAL, Url = {http://www.springerlink.com/content/p72226181132j60l/}, Volume = 14, Year = 2011, Bdsk-Url-1 = {http://www.springerlink.com/content/p72226181132j60l/}}
@article{brui-etal-2011tocl, Abstract = {In the context of the Semantic Web, several approaches to the combination of ontologies, given in terms of theories of classical first-order logic and rule bases, have been proposed. They either cast rules into classical logic or limit the interaction between rules and ontologies. Autoepistemic logic (AEL) is an attractive formalism which allows to overcome these limitations, by serving as a uniform host language to embed ontologies and nonmonotonic logic programs into it. For the latter, so far only the propositional setting has been considered. In this paper, we present three embeddings of normal and three embeddings of disjunctive non-ground logic programs under the stable model semantics into first-order AEL. While the embeddings all correspond with respect to objective ground atoms, differences arise when considering non-atomic formulas and combinations with first-order theories. We compare the embeddings with respect to stable expansions and autoepistemic consequences, considering the embeddings by themselves, as well as combinations with classical theories. Our results reveal differences and correspondences of the embeddings and provide useful guidance in the choice of a particular embedding for knowledge combination.}, Author = {Jos de Bruijn and Thomas Eiter and Axel Polleres and Hans Tompits}, Journal = TOCL, Number = 3, Title = {Embedding Non-Ground Logic Programs into Autoepistemic Logic for Knowledge Base Combination}, Type = JOURNAL, Url = {http://tocl.acm.org/accepted/396bruijn.pdf}, Volume = 12, Year = 2011, Bdsk-Url-1 = {http://tocl.acm.org/accepted/396bruijn.pdf}}
@proceedings{poll-chen-2010-ISWCPD, Abstract = {The posters and demonstrations track of ISWC 2010 continues the established tradition of providing an interaction and connection opportunity for researchers and practitioners to present and demonstrate their new and innovative work-in-progress. The track gives conference attendees a way to learn about novel on-going research projects that might not yet be complete, but whose preliminary results are already interesting. The track also provides presenters with an excellent opportunity to obtain feedback from their peers in an informal setting from knowledgeable sources. New in this year, we also encouraged authors of accepted full research or in-use papers to present a practical demonstration or poster with additional results.}, Address = {Shanghai, China}, Booktitle = {ISWC 2010 Posters \& Demos}, Day = {7--11}, Editor = {Axel Polleres and Huajun Chen}, Month = NOV, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Title = {Proceedings of the ISWC 2010 Posters \& Demonstrations Track: Collected Abstracts}, Type = BOOK, Url = {http://ceur-ws.org/Vol-658}, Volume = 658, Year = 2010, Bdsk-Url-1 = {http://ceur-ws.org/Vol-658}}
@inproceedings{lop-etal-2010ISWC, Abstract = {Starting from the general framework for Annotated RDFS which we presented in previous work (extending Udrea et al.'s Annotated RDF), we address the development of a query language -- AnQL -- that is inspired by SPARQL, including several features of SPARQL 1.1. As a side effect we propose formal definitions of the semantics of these features (subqueries, aggregates, assignment, solution modifiers) which could serve as a basis for the ongoing work in SPARQL 1.1. We demonstrate the value of such a framework by comparing our approach to previously proposed extensions of SPARQL and show that AnQL generalises and extends them.}, Address = {Shanghai, China}, Author = {Nuno Lopes and Axel Polleres and Umberto Straccia and Antoine Zimmermann}, Booktitle = {Proceedings of the 9th International Semantic Web Conference (ISWC 2010)}, Day = {7--11}, Month = nov, Page = {518--533}, Publisher = {Springer}, Series = LNCS, Title = {{AnQL: SPARQLing up annotated RDFS}}, Type = CONF, Url = {http://iswc2010.semanticweb.org/pdf/51.pdf}, Volume = 6496, Year = 2010, Bdsk-Url-1 = {http://iswc2010.semanticweb.org/pdf/51.pdf}}
@inproceedings{hoga-etal-2010ISWC, Abstract = {In this paper, we discuss generic optimisations of rule-based materialisation approaches for reasoning over large static RDF datasets. We generalise and re-formalise what we call the "partial-indexing" approach to scalable rule-based materialisation: the approach is based on a separation of terminological data, which has been shown in previous and related works to enable highly scalable and distributable reasoning for specific rulesets; in so doing, we provide some completeness propositions with respect to semi-naive evaluation. We then show how related work on template rules -- T-Box-specific dynamic rulesets created by binding the terminological patterns in the static ruleset -- can be incorporated in the partial-indexing approach, and optimisations that are possible thereafter. We demonstrate our methods using LUBM(10) for RDFS, pD* (OWL Horst) and OWL 2 RL, and thereafter demonstrate pragmatic distributed reasoning over 1.12b Linked Data triples for a subset of OWL 2 RL we argue to be suitable for the Web use-case.}, Address = {Shanghai, China}, Author = {Aidan Hogan and Jeff Z. Pan and Axel Polleres and Stefan Decker}, Booktitle = {Proceedings of the 9th International Semantic Web Conference (ISWC 2010)}, Day = {7--11}, Month = nov, Page = {337--353}, Publisher = {Springer}, Series = LNCS, Title = {{SAOR}: Template Rule Optimisations for Distributed Reasoning over 1 Billion Linked Data Triples}, Url = {http://iswc2010.semanticweb.org/pdf/305.pdf}, Volume = 6496, Year = 2010, Bdsk-Url-1 = {http://iswc2010.semanticweb.org/pdf/305.pdf}}
@inproceedings{hausw-etal-2010, Abstract = {Presence management, i.e., the ability to automatically identify the status and availability of communication partners, is becoming an invaluable tool for collaboration in enterprise contexts. In this paper, we argue for efficient presence management by means of a holistic view of both physical context and virtual presence in online communication channels. We sketch the components for enabling presence as a service integrating both online information as well as physical sensors, discussing benefits, possible applications on top, and challenges of establishing such a service.}, Address = {Chicago, Illinois, USA}, Author = {Manfred Hauswirth and J{\'e}r{\^o}me Euzenat and Owen Friel and Keith Griffin and Pat Hession and Brendan Jennings and Tudor Groza and Siegfried Handschuh and Ivana Podnar Zarko and Axel Polleres and Antoine Zimmermann}, Booktitle = {The 6th International Conference on Collaborative Computing: Networking, Applications and Worksharing (CollaborateCom 2010)}, Day = {9--12}, Month = Oct, Note = {Invited paper}, Publisher = {IEEE Computer Society}, Title = {Towards Consolidated Presence}, Url = {http://www.polleres.net/publications/hausw-etal-2010CollaborateCom.pdf}, Year = 2010, Bdsk-Url-1 = {http://www.polleres.net/publications/hausw-etal-2010CollaborateCom.pdf}}
@article{poll-etal-2010swj, Abstract = {The Semantic Web is about to grow up. By efforts such as the Linking Open Data initiative, we finally find ourselves at the edge of a Web of Data becoming reality. Standards such as OWL 2, RIF and SPARQL 1.1 shall allow us to reason with and ask complex structured queries on this data, but still they do not play together smoothly and robustly enough to cope with huge amounts of noisy Web data. In this paper, we discuss open challenges relating to querying and reasoning with Web data and raise the question: can the burgeoning Web of Data ever catch up with the now ubiquitous HTML Web?}, Author = {Axel Polleres and Aidan Hogan and Andreas Harth and Stefan Decker}, Journal = swj, Number = {1-2}, Pages = {45--52}, Publisher = {IOS Press}, Title = {Can we ever catch up with the Web?}, Type = JOURNAL, Url = {http://www.semantic-web-journal.net/sites/default/files/swj36_0.pdf}, Volume = 1, Year = 2010, Bdsk-Url-1 = {http://www.semantic-web-journal.net/sites/default/files/swj36_0.pdf}}
@inproceedings{pich-etal-2010RR, Abstract = {Based on practical observations on rule-based inference on RDF data, we study the problem of redundancy elimination on RDF graphs in the presence of rules (in the form of Datalog rules) and con- straints, (in the form of so-called tuple-generating dependencies), and with respect to queries (ranging from conjunctive queries up to more complex ones, particularly covering features of SPARQL, such as union, negation, or filters). To this end, we investigate the influence of several problem parameters (like restrictions on the size of the rules, the con- straints, and/or the queries) on the complexity of detecting redundancy. The main result of this paper is a fine-grained complexity analysis of both graph and rule minimisation in various settings.}, Address = {Bressanone, Italy}, Author = {Reinhard Pichler and Axel Polleres and Sebastian Skritek and Stefan Woltran}, Booktitle = {Web Reasoning and Rule Systems -- Fourth International Conference, RR2010}, Day = {22-24}, Editor = {Pascal Hitzler and Thomas Lukasiewicz}, Month = sep, Note = {\textbf{Best paper award}, technical report version available at \url{http://polleres.net/publications/DERI-TR-2010-04-23.pdf}}, Pages = {133--148}, Publisher = {Springer}, Series = LNCS, Title = {Redundancy Elimination on {RDF} Graphs in the Presence of Rules, Constraints, and Queries}, Type = CONF, Url = {http://www.polleres.net/publications/pich-etal-2010RR.pdf}, Volume = 6333, Year = 2010, Bdsk-Url-1 = {http://www.polleres.net/publications/pich-etal-2010RR.pdf}}
@inproceedings{poll-2010RRtut, Abstract = {In this tutorial we will give an overview of new features in SPARQL 1.1, which the W3C is currently working on, as well as on the interplay with its ''neighbour standards'', OWL2 and RIF. We will also give a rough overview of existing implementations to play around with.}, Address = {Bressanone, Italy}, Author = {Axel Polleres}, Booktitle = {Web Reasoning and Rule Systems -- Fourth International Conference, RR2010}, Day = {22-24}, Editor = {Pascal Hitzler and Thomas Lukasiewicz}, Month = sep, Note = {Slides available at \url{http://www.polleres.net/RR2010_SPARQL11_Tutorial/}}, Pages = {23--26}, Publisher = {Springer}, Series = LNCS, Title = {{SPARQL1.1: new features and friends (OWL2, RIF)}}, Type = OTHER, Url = {http://polleres.net/poll-2010RRtut.pdf}, Volume = 6333, Year = 2010, doi={https://doi.org/10.1007/978-3-642-15918-3_3}, Bdsk-Url-1 = {http://www.inf.unibz.it/krdb/events/rr2010/program/program.html#tutorials}}
@inproceedings{obe-etal-2010RR, Abstract = {We present an extension of the DLVHEX system to support RIF-Core, a dialect of W3C's Rule Interchange Format (RIF), as well as combinations of RIF-Core and OWL2RL ontologies. DLVHEX is a plugin system on top of DLV, a disjunctive Datalog engine which enables higher-order and external atoms, as well as input rewriting capabilities, which are provided as plugins and enable DLVHEX to bidirectionally exchange data with external knowledge bases and consuming input in different Semantic Web languages. In fact, there already exist plugins for languages such as RDF and SPARQL. Our new plugin facilitates consumption and processing of RIF rulesets, as well as OWL2RL reasoning by a 2-step-reduction to DVLHEX via embedding in RIF-Core. The current version implements the translation from OWL2RL to RIF by a static rule set and supports the RIF built-ins mandatory for this reduction trough external atoms in DLVHEX. For the future we plan to switch to a dynamic approach for RIF embedding of OWL2RL and extend the RIF reasoning capabilities to more features of RIF-BLD. We provide a description of our current system, its current development status as well as an illustrative example, and conclude future plans to complete the Semantic Web library of plugins for DLVHEX.}, Address = {Bressanone, Italy}, Author = {Philipp Obermeier and Marco Marano and Axel Polleres}, Booktitle = {Web Reasoning and Rule Systems -- Fourth International Conference, RR 2010}, Day = {22-24}, Editor = {Pascal Hitzler and Thomas Lukasiewicz}, Month = sep, Note = {Demo Paper}, Pages = {244--250}, Publisher = {Springer}, Series = LNCS, Title = {Processing {RIF} and {OWL2RL} within {DLVHEX}}, Type = DEMO, Url = {http://www.polleres.net/publications/obe-etal-2010RR.pdf}, Volume = 6333, Year = 2010, Bdsk-Url-1 = {http://www.polleres.net/publications/obe-etal-2010RR.pdf}}
@inproceedings{poll-2010aics, Abstract = {This paper summarises the evolution of W3C standards in the area of Semantic Web technologies, as well as gaps within these standards still to be filled in terms of standardisation. Moreover, we give a subjective survey of the most influential scientific works which have contributed to the development of these standards and to closing the gaps between them. The Semantic Web proves to become an interesting application field for Artificial Intelligence; we aim here at both giving an overview of own work in the area as well as providing an entry point for researchers interested in the foundations of Semantic Web standards and technologies.}, Address = {Galway, Ireland}, Author = {Axel Polleres}, Booktitle = {21st National Conference on Artificial Intelligence and Cognitive Science (AICS2010)}, Day = {30--1}, Month = aug, Note = {Review paper (appeared in the informal conference proceedings)}, Talk = {Axel Polleres}, Title = {Semantic Web Technologies: From Theory to Standards}, Url = {http://www.polleres.net/publications/poll-2010aics.pdf}, Year = 2010, Bdsk-Url-1 = {http://www.polleres.net/publications/poll-2010aics.pdf}}
@inproceedings{stra-etal-2010, Abstract = {We describe a generic framework for representing and reasoning with annotated Semantic Web data, a task becoming more important with the recent increased amount of inconsistent and non- reliable meta-data on the web. We formalise the annotated language, the corresponding deductive system and address the query answering problem. Our work extends previous contributions on RDF annotations by providing a unified reasoning formalism and allowing the seamless combination of different annotation domains. We show that current RDF stores can easily be extended to our framework. We demonstrate the feasibility of our method by instantiating it on (i) temporal RDF; (ii) fuzzy RDF; (iii) and their combination. A prototype shows that implementing and combining new domains is easy.}, Address = {Atlanta, Georgia, USA}, Author = {Umberto Straccia and Nuno Lopes and Gergely Luk{\'a}csy and Axel Polleres}, Booktitle = {Proceedings of the 24th AAAI Conference on Artificial Intelligence (AAAI 2010), Special Track on Artificial Intelligence and the Web}, Day = {11--15}, Month = jul, Title = {A General Framework for Representing and Reasoning with Annotated Semantic Web Data}, Type = CONF, Url = {http://www.polleres.net/publications/stra-etal-2010AAAI.pdf}, Year = 2010, Bdsk-Url-1 = {http://www.polleres.net/publications/stra-etal-2010AAAI.pdf}}
@inproceedings{lope-etal-2010rdf-ann, Abstract = {While the current mechanism of reification in RDF is without semantics and widely considered inappropriate and cumbersome, some form of reification -- speaking about triples themselves -- is needed in RDF for many reasonable applications: in particular, reification allows for enhancing triples with annotations relating to provenance, spatio-temporal validity, degrees of trust, fuzzy values and/or other contextual information. In this position paper, we argue that -- besides resolving the issue of how to syntactically represent reification in the future (i.e., whether to stick with the current reification mechanism or standardise a different mechanism such as Named Graphs) -- it is time to agree on certain core annotations that are widely needed. We summarise existing work and provide a possible direction towards handling reification by means of a general annotation framework that can be instantiated for those major use cases we currently see arising.}, Address = {Stanford, Palo Alto, CA, USA}, Author = {Nuno Lopes and Antoine Zimmermann and Aidan Hogan and Gergely Luk{\'a}csy and Axel Polleres and Umberto Straccia and Stefan Decker}, Booktitle = {W3C Workshop on RDF Next Steps}, Day = {26--27}, Month = JUN, Title = {{RDF} Needs Annotations}, Type = WS, Url = {http://www.w3.org/2009/12/rdf-ws/papers/ws09}, Year = 2010, Bdsk-Url-1 = {http://www.w3.org/2009/12/rdf-ws/papers/ws09}}
@inproceedings{lope-etal-2010rdf-xsparql, Abstract = {One of the requirements of current Semantic Web applications is to deal with heterogeneous data. The Resource Description Framework (RDF) is the W3C recommended standard for data representation, yet data represented and stored using the Extensible Markup Language (XML) is almost ubiquitous and remains the standard for data exchange. While RDF has a standard XML representation, XML Query languages are of limited use for transformations between natively stored RDF data and XML. Being able to work with both XML and RDF data using a common framework would be a great advantage and eliminate unnecessary intermediate steps that are currently used when handling both formats.}, Address = {Stanford, Palo Alto, CA, USA}, Author = {Nuno Lopes and Stefan Bischof and Orri Erling and Axel Polleres and Alexandre Passant and Diego Berrueta and Antonio Campos and J{\'e}r{\^o}me Euzenat and Kingsley Idehen and Stefan Decker and St{\'e}phane Corlosquet and Jacek Kopeck{\'y} and Janne Saarela and Thomas Krennwallner and Davide Palmisano and Michal Zaremba}, Booktitle = {W3C Workshop on RDF Next Steps}, Day = {26--27}, Month = JUN, Title = {{RDF} and {XML}: Towards a Unified Query Layer}, Type = WS, Url = {http://www.w3.org/2009/12/rdf-ws/papers/ws10}, Year = 2010, Bdsk-Url-1 = {http://www.w3.org/2009/12/rdf-ws/papers/ws10}}
@inproceedings{vida-etal-2010, Abstract = {In SPARQL queries, the combination of triple patterns is expressed by using shared variables across patterns. Based on this characterization, basic graph patterns in a SPARQL query can be partitioned into groups of acyclic pattern combinations that share exactly one variable, or star-shaped groups. We observe that the number of triples in a group is proportional to the number of individuals that play the role of the subject or the object; however, depending on the degree of participation of the subject individuals in the properties, a group could be not much larger than a class or type to which the subject or object belongs. Thus, it may be significantly more efficient to independently evaluate each of the groups, and then merge the resulting sets, than linearly joining all triples in a basic graph pattern. Based on these properties of star-shaped groups, we have developed query optimization and evaluation techniques. We have conducted an empirical analysis on the benefits of the optimization and evaluation techniques in several SPARQL query engines. We observe that our proposed techniques are able to speed up query evaluation time for join queries with star-shaped patterns by at least one order of magnitude.}, Address = {Heraklion, Greece}, Author = {Mar{\'\i}a-Esther Vidal and Edna Ruckhaus and Tomas Lampo and Amad{\'\i}s Mar{\'\i}nez and Javier Sierra and Axel Polleres}, Booktitle = {Proceedings of the 7th European Semantic Web Conference (ESWC2010)}, Day = {30--3}, Month = MAY, Projects = {lion2}, Publisher = {Springer}, Title = {On the Efficiency of Joining Group Patterns in {SPARQL} Queries}, Type = CONF, Url = {http://www.polleres.net/publications/vida-etal-2010eswc.pdf}, Year = 2010, Bdsk-Url-1 = {http://www.polleres.net/publications/vida-etal-2010eswc.pdf}}
@misc{pan-etal-2010-eswc10tut, Abstract = {Tutorial at the 7th Extended Semantic Web Conference (ESWC2010)}, Address = {Heraklion, Greece}, Author = {Jeff Z. Pan and Axel Polleres and Aidan Hogan}, Day = 30, Month = MAY, Note = {Slides available at \url{http://www.abdn.ac.uk/~csc280/tutorial/eswc2010/}}, Title = {Scalable OWL Reasoning for Linked Data}, Type = OTHER, Url = {http://www.eswc2010.org/program-menu/tutorials}, Year = 2010, Bdsk-Url-1 = {http://www.eswc2010.org/program-menu/tutorials}}
@inproceedings{hoga-etal-2010NeFoRS, Abstract = {We propose a method for consolidating entities in RDF data on the Web. Our approach is based on a statistical analysis of the use of predicates and their associated values to identify ``quasi''-key properties. Compared to a purely symbolic based approach, we obtain promising results, retrieving more identical entities with a high precision. We also argue that our technique scales well -- possibly to the size of the current Web of Data -- as opposed to more expensive existing approaches.}, Address = {Heraklion, Greece}, Author = {Aidan Hogan and Axel Polleres and J{\"u}rgen Umbrich and Antoine Zimmermann}, Booktitle = {Workshop on New Forms of Reasoning for the Semantic Web: Scalable \& Dynamic (NeFoRS10)}, Day = {30}, Month = MAY, Projects = {lion2}, Title = {Some entities are more equal than others: statistical methods to consolidate Linked Data}, Type = WS, Url = {http://www.polleres.net/publications/hoga-etal-2010NeFoRS.pdf}, Year = 2010, Bdsk-Url-1 = {http://www.polleres.net/publications/hoga-etal-2010NeFoRS.pdf}}
@proceedings{kaer-etal-2010-spot, Abstract = {Workshop Proceedings. This workshop was co-located with ESWC 2010.}, Address = {Heraklion, Greece}, Booktitle = {SPOT2010}, Day = 31, Editor = {Philipp K{\"a}rger and Daniel Olmedilla and Alexandre Passant and Axel Polleres}, Month = MAY, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Title = {Proceedings of the Second Workshop on Trust and Privacy on the Social and Semantic Web (SPOT2010)}, Type = BOOK, Url = {http://ceur-ws.org/Vol-576/}, Volume = {576}, Year = 2010, Bdsk-Url-1 = {http://ceur-ws.org/Vol-576/}}
@inproceedings{pich-etal-2010AMW, Abstract = {Based on practical observations on rule-based inference on RDF data, we study the problem of redundancy elimination in RDF in the presence of rules (in the form of Datalog rules) and constraints (in the form of so-called tuple-generating dependencies). To this end, we investigate the influence of several problem parameters (like restrictions on the size of the rules and/or the constraints) on the complexity of detecting redundancy. The main result of this paper is a fine-grained complexity analysis of both graph and rule minimisation in various settings.}, Author = {Reinhard Pichler and Axel Polleres and Sebastian Skritek and Stefan Woltran}, Booktitle = {4th Alberto Mendelzon Workshop on Foundations of Data Management}, Month = may, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Title = {Minimising RDF Graphs under Rules and Constraints Revisited}, Type = WS, Url = {http://ceur-ws.org/Vol-619/paper4.pdf}, Volume = {494}, Year = 2010, Bdsk-Url-1 = {http://ceur-ws.org/Vol-619/paper4.pdf}}
@inproceedings{hoga-etal-2010LDOW, Abstract = {Over a decade after RDF has been published as a W3C recommendation, publishing open and machine-readable content on the Web has recently received a lot more attention, including from corporate and governmental bodies; notably thanks to the Linked Open Data community, there now exists a rich vein of heterogeneous RDF data published on the Web (the so-called ``Web of Data'') accessible to all. However, RDF publishers are prone to making errors which compromise the effectiveness of applications leveraging the resulting data. In this paper, we discuss common errors in RDF publishing, their consequences for applications, along with possible publisher-oriented approaches to improve the quality of structured, machine-readable and open data on the Web.}, Address = {Raleigh, USA}, Author = {Aidan Hogan and Andreas Harth and Alexandre Passant and Stefan Decker and Axel Polleres}, Booktitle = {3rd International Workshop on Linked Data on the Web (LDOW2010) at WWW2010}, Month = apr, Title = {Weaving the Pedantic Web}, Type = WS, Url = {http://aidanhogan.com/docs/pedantic_ldow10.pdf}, Year = 2010, Bdsk-Url-1 = {http://aidanhogan.com/docs/pedantic_ldow10.pdf}}
@inproceedings{umbr-etal-2010LDOW, Abstract = {Datasets in the LOD cloud are far from being static in their nature and how they are exposed. As resources are added and new links are set, applications consuming the data should be able to deal with these changes. In this paper we investigate how LOD datasets change and what sensible measures there are to accommodate dataset dynamics. We compare our findings with traditional, document-centric studies concerning the ``freshness'' of the document collections and propose metrics for LOD datasets.}, Address = {Raleigh, USA}, Author = {J{\"u}rgen Umbrich and Michael Hausenblas and Aidan Hogan and Axel Polleres and Stefan Decker}, Booktitle = {3rd International Workshop on Linked Data on the Web (LDOW2010) at WWW2010}, Month = apr, Title = {Towards Dataset Dynamics: Change Frequency of Linked Open Data Sources}, Type = WS, Url = {http://www.polleres.net/publications/umbr-etal-2010.pdf}, Year = 2010, Bdsk-Url-1 = {http://www.polleres.net/publications/umbr-etal-2010.pdf}}
@inproceedings{hart-etal-2010WWW, Abstract = {Typical approaches for search and querying over structured Web Data collect (crawl) and pre-process (index) large amounts of data before allowing for query answering in a central data warehouse. This time-consuming pre-processing phase decreases the freshness of query results and only uses to a limited degree the benefits of Linked Data where structured data is accessible live and up-to-date at distributed Web resources that may change constantly. An ideal query answering system for Linked Data should return always current answers in a reasonable amount of time, even on corpora as large as the web. Query processors evaluating queries directly on the life sources require knowledge of the contents of data sources. In the current paper we develop and evaluate a probabilistic index structure for covering graph-structured content of sources adhering to Linked Data principles, provide an algorithm for answering conjunctive queries over Linked Data on the web exploiting this structure, and evaluate the system using synthetically generated queries. We find that our lightweight index structure enable more complete query results over Linked Data compared to direct lookup approaches, while keeping the overhead for additional lookups and index maintenance low.}, Address = {Raleigh, NC, USA}, Author = {Andreas Harth and Katja Hose and Marcel Karnstedt and Axel Polleres and Kai-Uwe Sattler and J{\"u}rgen Umbrich}, Booktitle = {Proceedings of the 19th World Wide Web Conference (WWW2010)}, Day = {26--30}, Month = apr, Note = {Technical report version available at \url{http://polleres.net/publications/DERI-TR-2009-11-17.pdf}}, Title = {Data Summaries for On-demand Queries over Linked Data}, Type = CONF, Url = {http://www.polleres.net/publications/hart-etal-2010.pdf}, Year = 2010, Bdsk-Url-1 = {http://www.polleres.net/publications/hart-etal-2010.pdf}}
@proceedings{padget-etal-COIN2009, Abstract = {COIN 2009 International Workshops: COIN@AAMAS 2009 Budapest, Hungary, May 2009, COIN@IJCAI 2009, Pasadena, USA, July 2009, COIN@MALLOW 2009,Turin, Italy, September 2009, Revised Selected Papers. This book constitutes the thoroughly refereed post-workshop proceedings of the International Workshop on Coordination, Organization, Institutions and Norms in Agent Systems, COIN 2009. }, Editor = {Julian Padget and Alexander Artikis and Wamberto Vasconcelos and Kostas Stathis and Viviane Torres da Silva and Eric Matson and Axel Polleres}, Isbn = {978-3-642-14961-0}, Publisher = {Springer}, Series = LNAI, Title = {Coordination, Organizations, Institutions, and Norms in Agent Systems V}, Volume = {6069}, Year = {2010}}
@article{debr-etal-2010-kais, Abstract = {In the ongoing discussion about combining rules and Ontologies on the Semantic Web a recurring issue is how to combine first-order classical logic with nonmonotonic rule languages. Whereas several modular approaches to define a combined semantics for such hybrid knowledge bases focus mainly on decidability issues, we tackle the matter from a more general point of view. In this paper we show how Quantified Equilibrium Logic (QEL) can function as a unified framework which embraces classical logic as well as disjunctive logic programs under the (open) answer set semantics. In the proposed variant of QEL we relax the unique names assumption, which was present in earlier versions of QEL. Moreover, we show that this framework elegantly captures the existing modular approaches for hybrid knowledge bases in a unified way.}, Author = {Jos de Bruijn and David Pearce and Axel Polleres and Agust{\'\i}n Valverde}, Issn = {0219-1377}, Journal = {Knowledge and Information Systems (KAIS)}, Number = 1, Pages = {81--104}, Publisher = {Springer}, Title = {A Semantical Framework for Hybrid Knowledge Bases}, Type = JOURNAL, Url = {http://www.polleres.net/publications/debr-etal-2010-kais.pdf}, Volume = 25, Year = 2010, Bdsk-Url-1 = {http://www.polleres.net/publications/debr-etal-2010-kais.pdf}}
@book{poll-huy-2009, Editor = {Axel Polleres and David Huynh}, Publisher = {Elsevier}, Note = {Editorial}, Title = {Journal of Web Semantics, Special Issue: The Web of Data}, Type = JOURNAL, Volume = {7(3)}, Year = 2009}
@proceedings{poll-swif-2009, Abstract = {This book constitutes the refereed proceedings of the Third International Conference on Web Reasoning and Rule Systems, RR 2009, held in Chantilly, VA, USA, in October 2009. The 15 revised full papers presented together with 3 invited papers were carefully reviewed and selected from 41 submissions. The papers address all current topics in Web reasoning and rule systems such as proof/deduction procedures, scalability, uncertainty, knowledge amalgamation and querying, and rules for decision support and production systems.}, Address = {Chantilly, VA, USA}, Booktitle = {RR 2009}, Day = {25--26}, Editor = {Axel Polleres and Terrance Swift}, Month = OCT, Publisher = {Springer}, Series = LNCS, Title = {Web Reasoning and Rule Systems -- Third International Conference, RR2009}, Type = BOOK, Url = {http://www.springer.com/computer/database+management+&+information+retrieval/book/978-3-642-05081-7}, Volume = {5837}, Year = 2009, Bdsk-Url-1 = {http://www.springer.com/computer/database+management+&+information+retrieval/book/978-3-642-05081-7}}
@proceedings{bald-etal-2009-mallow, Abstract = {The Multi-Agent Logics, Languages, and Organisations Federated Workshops (MALLOW for short), in its second edition this year after the success of MALLOW'007 held in Durham (UK), is a forum for researchers interested in sharing their experiences in agents and multi-agent systems. MALLOW'009 was held at the Educatorio della Provvidenza, in Torino (Italy), from September 7th, 2009 through September 10th, 2009. This volume contains the proceedings of the five workshops, for a total of forty-seven high quality papers, which were selected by the programme committees of the workshops for presentation. Each workshop has an introductory essay, authored by the organizers, which presents the workshop.}, Address = {Torino, Italy}, Booktitle = {MALLOW'009}, Day = {7--10}, Editor = {Matteo Baldoni and Cristina Baroglio and Jamal Bentahar and Guido Boella and Massimo Cossentino and Mehdi Dastani and Barbara Dunin-Keplicz and Giancarlo Fortino and Marie-Peirre Gleizes and Jo{\~a}o Leite and Viviana Mascardi and Julian Padget and Juan Pav{\'o}n and Axel Polleres and Amal El Fallah Seghrouchni and Paolo Torroni and Rineke Verbrugge}, Month = SEP, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Title = {Proceedings of the Second Multi-Agent Logics, Languages, and Organisations Federated Workshops (MALLOW'009)}, Type = BOOK, Url = {http://CEUR-WS.org/Vol-494/}, Volume = {494}, Year = 2009, Bdsk-Url-1 = {http://CEUR-WS.org/Vol-494/}}
@inproceedings{zimm-etal-2009, Abstract = {The need for semantics preserving integration of complex data has been widely recognized in the healthcare domain. While standards such as Health Level Seven (HL7) have been developed in this direction, they have mostly been applied in limited, controlled environments, still being used incoherently across countries, organizations, or hospitals. In a more mobile and global society, data and knowledge are going to be commonly exchanged between various systems at Web scale. Specialists in this domain have increasingly argued in favor of using Semantic Web technologies for modeling healthcare data in a well formalized way. This paper provides a reality check in how far current Semantic Web standards can tackle interoperability issues arising in such systems driven by the modeling of concrete use cases on exchanging clinical data and practices. Recognizing the insufficiency of standard OWL to model our scenario, we survey theoretical approaches to extend OWL by modularity and context towards handling heterogeneity in Semantic-Web-enabled health care and life sciences (HCLS) systems. We come to the conclusion that none of these approaches addresses all of our use case heterogeneity aspects in its entirety. We finally sketch paths on how better approaches could be devised by combining several existing techniques.}, Address = {Vilamoura, Algarve, Portugal}, Author = {Antoine Zimmermann and Ratnesh Sahay and Ronan Fox and Axel Polleres}, Booktitle = {OTM 2009, Part II: Proceedings of the 8th International Conference on Ontologies, DataBases, and Applications of Semantics (ODBASE 2009)}, Day = {2--4}, Editor = {Robert Meersman and Tharam S. Dillon and Pilar Herrero}, Month = nov, Pages = {1165--1182}, Project = {lion}, Publisher = {Springer}, Series = LNCS, Title = {Heterogeneity and Context in Semantic-Web-Enabled {HCLS} Systems}, Type = CONF, Url = {http://www.polleres.net/publications/zimm-etal-2009.pdf}, Volume = {5871}, Year = 2009, Bdsk-Url-1 = {http://www.polleres.net/publications/zimm-etal-2009.pdf}}
@inproceedings{iann-etal-2009iswc, Abstract = {RDF Schema (RDFS) as a lightweight ontology language is gaining popularity and, consequently, tools for scalable RDFS inference and querying are needed. SPARQL has become recently a W3C standard for querying RDF data, but it mostly provides means for querying simple RDF graphs only, whereas querying with respect to RDFS or other entailment regimes is left outside the current specification. In this paper, we show that SPARQL faces certain unwanted ramifications when querying ontologies in conjunction with RDF datasets that comprise multiple named graphs, and we provide an extension for SPARQL that remedies these effects. Moreover, since RDFS inference has a close relationship with logic rules, we generalize our approach to select a custom ruleset for specifying inferences to be taken into account in a SPARQL query. We show that our extensions are technically feasible by providing benchmark results for RDFS querying in our prototype system \giabata{}, which uses Datalog coupled with a persistent Relational Database as a back-end for implementing SPARQL with dynamic rule-based inference. By employing different optimization techniques like magic set rewriting our system remains competitive with state-of-the-art RDFS querying systems.}, Address = {Washington D, CUSA}, Author = {Giovambattista Ianni and Thomas Krennwallner and Alessandra Martello and Axel Polleres}, Booktitle = {Proceedings of the 8th International Semantic Web Conference (ISWC 2009)}, Day = {25--29}, Editor = {Abraham Bernstein and David R. Karger and Tom Heath and Lee Feigenbaum and Diana Maynard and Enrico Motta and Krishnaprasad Thirunarayan}, Month = oct, Pages = {310--327}, Project = {lion}, Publisher = {Springer}, Series = LNCS, Title = {Dynamic Querying of Mass-Storage RDF Data with Rule-Based Entailment Regimes}, Type = CONF, Url = {http://www.polleres.net/publications/iann-etal-2009iswc.pdf}, Volume = {5823}, Year = 2009, Bdsk-Url-1 = {http://www.polleres.net/publications/iann-etal-2009iswc.pdf}}
@inproceedings{corl-etal-2009iswc, Abstract = {Currently a large number of Web sites are driven by Content Management Systems (CMS) which manage textual and multimedia content but also - inherently - carry valuable information about a site's structure and content model. Exposing this structured information to the Web of Data has so far required considerable expertise in RDF and OWL modelling and additional programming effort. In this paper we tackle one of the most popular CMS: Drupal. We enable site administrators to export their site content model and data to the Web of Data without requiring extensive knowledge on Semantic Web technologies. Our modules create RDFa annotations and -- optionally -- a SPARQL endpoint for any Drupal site out of the box. Likewise, we add the means to map the site data to existing ontologies on the Web with a search interface to find commonly used ontology terms. We also allow a Drupal site administrator to include existing RDF data from remote SPARQL endpoints on the Web in the site. When brought together, these features allow networked RDF Drupal sites that reuse and enrich Linked Data. We finally discuss the adoption of our modules and report on a use case in the biomedical field and the current status of its deployment.}, Address = {Washington DC, USA}, Author = {St{\'e}phane Corlosquet and Renaud Delbru and Tim Clark and Axel Polleres and Stefan Decker}, Booktitle = {Proceedings of the 8th International Semantic Web Conference (ISWC 2009)}, Day = {25--29}, Editor = {Abraham Bernstein and David R. Karger and Tom Heath and Lee Feigenbaum and Diana Maynard and Enrico Motta and Krishnaprasad Thirunarayan}, Month = oct, Note = {\textbf{Best paper award In-Use track}}, Pages = {763--778}, Project = {lion}, Publisher = {Springer}, Series = LNCS, Title = {Produce and Consume Linked Data with Drupal!}, Type = CONF, Url = {http://www.polleres.net/publications/corl-etal-2009iswc.pdf}, Volume = {5823}, Year = 2009, Bdsk-Url-1 = {http://www.polleres.net/publications/corl-etal-2009iswc.pdf}}
@inproceedings{pass-etal-2009, Abstract = {Based on our recent observations at the 7th International Semantic Web Conference and some related workshops as the ``Social Data on The Web'', as well as other frequent discussion threads on the Web, trust and privacy on the Social Web remains a hot, yet unresolved topic. Indeed, while Web 2.0 helped people to easily produce data, it lead to various issues regarding how to protect and trust this data, especially when it comes to personal data. On the one hand, we are wondering how to protect our private information online, above all when this information is re-used at our disadvantage. On the other hand, information should not only be protected when being published by its owners, but tools should also help users to assess trustworthiness of third-party information online. According to our recent research works, both from a theoretical and practical point of view, we think that Semantic Web technologies can provide at least partial solutions to enable a 'trust and privacy layer' on top of the Social Web. Hence, this position paper will present our work on the topic, that is in our opinion, also particularly relevant to the mobile Web community, according to the advances of ubiquitous Social Networking with, e.g., microblogging from mobile devices. }, Address = {Barcelona, Spain}, Author = {Alexandre Passant and Philipp K{\"a}rger and Michael Hausenblas and Daniel Olmedilla and Axel Polleres and Stefan Decker}, Booktitle = {W3C Workshop on the Future of Social Networking}, Day = {15--16}, Month = JAN, Title = {Enabling Trust and Privacy on the Social Web}, Type = WS, Url = {http://www.w3.org/2008/09/msnws/papers/trustprivacy.html}, Year = 2009, Bdsk-Url-1 = {http://www.w3.org/2008/09/msnws/papers/trustprivacy.html}}
@inproceedings{bres-etal-2009, Abstract = {Sensors have begun to infiltrate people's everyday lives. They can provide information about a car's condition, can enable smart buildings, and are being used in various mobile applications, to name a few. Generally, sensors provide information about various aspects of the real world. Online social networks, another emerging trend over the past six or seven years, can provide insights into the communication links and patterns between people. They have enabled novel developments in communications as well as transforming the Web from a technical infrastructure to a social platform, very much along the lines of the original Web as proposed by Tim Berners-Lee, which is now often referred to as the Social Web. In this position paper, we highlight some of the interesting research areas where sensors and social networks can fruitfully interface, from sensors providing contextual information in context-aware and personalized social applications, to using social networks as ``storage infrastructures'' for sensor information.}, Address = {Barcelona, Spain}, Author = {John Breslin and Stefan Decker and Manfred Hauswirth and Gearoid Hynes and Danh Le Phuoc and Alexandre Passant and Axel Polleres and Cornelius Rabsch and Vinny Reynolds}, Booktitle = {W3C Workshop on the Future of Social Networking}, Day = {15--16}, Month = JAN, Title = {Integrating Social Networks and Sensor Networks}, Type = WS, Url = {http://www.w3.org/2008/09/msnws/papers/sensors.html}, Year = 2009, Bdsk-Url-1 = {http://www.w3.org/2008/09/msnws/papers/sensors.html}}
@misc{poll-etal-2009-xsparql-language-specification, Abstract = {XSPARQL is a query language combining XQuery and SPARQL for transformations between RDF and XML. XSPARQL subsumes XQuery and most of SPARQL (excluding ASK and DESCRIBE). This document defines the XSPARQL language.}, Author = {Axel Polleres and Thomas Krennwallner and Nuno Lopes and Jacek Kopeck{\'y} and Stefan Decker}, Day = 20, Month = JAN, Note = {W3C member submission}, Title = {{XSPARQL Language Specification}}, Url = {http://www.w3.org/Submission/xsparql-language-specification/}, Year = 2009, Bdsk-Url-1 = {http://www.w3.org/Submission/xsparql-language-specification/}}
@misc{kren-etal-2009-xsparql-semantics, Abstract = {XSPARQL is a query language combining XQuery and SPARQL for transformations between RDF and XML. This document defines the semantics of XSPARQL.}, Author = {Thomas Krennwallner and Nuno Lopes and Axel Polleres}, Day = 20, Month = JAN, Note = {W3C member submission}, Title = {{XSPARQL: Semantics}}, Url = {http://www.w3.org/Submission/xsparql-semantics/}, Year = 2009, Bdsk-Url-1 = {http://www.w3.org/Submission/xsparql-semantics/}}
@misc{lope-etal-2009-xsparql-implementation, Abstract = {XSPARQL is a query language combining XQuery and SPARQL for transformations between RDF and XML. This document provides a description of a prototype implementation of the language based on off-the-shelf XQuery and SPARQL engines. Along with a high-level description of the prototype the document presents a set of test queries and their expected output which are to be understood as illustrative help for possible other implementers.}, Author = {Nuno Lopes and Thomas Krennwallner and Axel Polleres and Waseem Akhtar and St{\'e}phane Corlosquet}, Day = 20, Month = JAN, Note = {W3C member submission}, Title = {{XSPARQL: Implementation and Test-cases}}, Url = {http://www.w3.org/Submission/xsparql-implementation/}, Year = 2009, Bdsk-Url-1 = {http://www.w3.org/Submission/xsparql-implementation/}}
@misc{pass-etal-2009-xsparql-use-cases, Abstract = {XSPARQL is a query language combining XQuery and SPARQL for transformations between RDF and XML. This document contains an overview of XSPARQL use cases within various scenarios.}, Author = {Alexandre Passant and Jacek Kopeck{\'y} and St{\'e}phane Corlosquet and Diego Berrueta and Davide Palmisano and Axel Polleres}, Day = 20, Month = JAN, Note = {W3C member submission}, Title = {{XSPARQL: Use cases}}, Url = {http://www.w3.org/Submission/xsparql-use-cases/}, Year = 2009, Bdsk-Url-1 = {http://www.w3.org/Submission/xsparql-use-cases/}}
@proceedings{haus-etal-2009-spot, Abstract = {Workshop Proceedings. This workshop was co-located with ESWC 2009.}, Address = {Heraklion, Greece}, Booktitle = {SPOT2009}, Day = 1, Editor = {Michael Hausenblas and Philipp K{\"a}rger and Daniel Olmedilla and Alexandre Passant and Axel Polleres}, Month = JUN, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Title = {Proceedings of the First Workshop on Trust and Privacy on the Social and Semantic Web (SPOT2009)}, Type = BOOK, Url = {http://CEUR-WS.org/Vol-447/}, Volume = {447}, Year = 2009, Bdsk-Url-1 = {http://CEUR-WS.org/Vol-447/}}
@inproceedings{corl-etal-2009, Abstract = {A large number of web sites are driven by content management systems (CMS), which manage not only textual content but also structured data related to the site's topic. Exposing this information to the Web of Data has so far required considerable expertise in RDF modelling and programming. We present a plugin for the popular CMS Drupal that enables high-quality RDF output with minimal effort from site administrators. This has the potential of greatly increasing the amount and topical range of information available on the Web of Data.}, Address = {Heraklion, Greece}, Author = {St{\'e}phane Corlosquet and Richard Cyganiak and Axel Polleres and Stefan Decker}, Booktitle = {5th Workshop on Scripting and Development for the Semantic Web}, Day = 31, Month = may, Title = {RDFa in Drupal: Bringing Cheese to the Web of Data}, Url = {http://www.polleres.net/publications/corl-etal-2009.pdf}, Year = 2009, Bdsk-Url-1 = {http://www.polleres.net/publications/corl-etal-2009.pdf}}
@article{hoga-etal-2009-ijswis, Abstract = {In this paper we discuss the challenges of performing reasoning on large scale RDF datasets from the Web. Using ter-Horst's pD* fragment of OWL as a base, we compose a rule-based framework for application to web data: we argue our decisions using observations of undesirable examples taken directly from the Web. We further temper our OWL fragment through consideration of ``authoritative sources'' which counter-acts an observed behaviour which we term ``ontology hijacking'': new ontologies published on the Web re-defining the semantics of existing entities resident in other ontologies. We then present our system for performing rule-based forward-chaining reasoning which we call SAOR: Scalable Authoritative OWL Reasoner. Based upon observed characteristics of web data and reasoning in general, we design our system to scale: our system is based upon a separation of terminological data from assertional data and comprises of a lightweight in-memory index, on-disk sorts and file-scans. We evaluate our methods on a dataset in the order of a hundred million statements collected from real-world Web sources and present scale-up experiments on a dataset in the order of a billion statements collected from the Web.}, Author = {Aidan Hogan and Andreas Harth and Axel Polleres}, Journal = {International Journal on Semantic Web and Information Systems (IJSWIS)}, Number = 2, Pages = {49--90}, Publisher = {IGI Global}, Title = {Scalable Authoritative OWL Reasoning for the Web}, Type = JOURNAL, Url = {https://aran.library.nuigalway.ie/bitstream/handle/10379/4891/DERI-TR-2009-04-21.pdf}, Volume = 5, Year = 2009, Bdsk-Url-1 = {https://aran.library.nuigalway.ie/bitstream/handle/10379/4891/DERI-TR-2009-04-21.pdf}}
@inproceedings{iann-etal-2009-GiaBATA, Abstract = {In this demo we present GiaBATA, a system for storing, aggregating, and querying Semantic Web data, based on declarative logic programming technology, namely on the dlvhex system, which allows us to implement a fully SPARQL compliant semantics, and on DLVDB , which extends the DLV system with persistent storage capabilities. Compared with off-the-shelf RDF stores and SPARQL engines, we offer more flexible support for rule-based RDFS and other higher entailment regimes by enabling custom reasoning via rules, and the possibility to choose the reference ontology on a per query basis. Due to the declarative approach, GiaBATA gains the possibility to apply well-known logic-level optimization features of logic programming (LP) and deductive database systems. Moreover, our architecture allows for extensions of SPARQL by non-standard features such as aggregates, custom built-ins, or arbitrary rulesets. With the resulting system we provide a flexible toolbox that embeds Semantic Web data and ontologies in a fully declarative LP environment.}, Address = {Heraklion, Greece}, Author = {Giovambattista Ianni and Thomas Krennwallner and Alessandra Martello and Axel Polleres}, Booktitle = {Proceedings of the 6th European Semantic Web Conference (ESWC2009)}, Day = {31--4}, Month = MAY, Note = {Demo Paper}, Projects = {incontext,lion2}, Publisher = {Springer}, Title = {A Rule System for Querying Persistent RDFS Data}, Type = OTHER, Url = {http://www.polleres.net/publications/iann-etal-2009-GiaBATA.pdf}, Year = 2009, Bdsk-Url-1 = {http://www.polleres.net/publications/iann-etal-2009-GiaBATA.pdf}}
@inproceedings{leph-etal-2009, Abstract = {The use of RDF data published on the Web for applications is still a cumbersome and resource-intensive task due to the limited software support and the lack of standard programming paradigms to deal with everyday problems such as combination of RDF data from different sources, object identifier consolidation, ontology alignment and mediation or plain querying and processing tasks. While in a lot of other areas such tasks are supported by excellent libraries and component-oriented toolboxes of basic processing functionalities, RDF-based Web applications are still largely customized programs for a specific purpose, with little potential for reuse. This increases development costs and incurs a more error-prone development process. Speaking in software engineering terms, this means that a good standard architectural style with good support for rapid application development is still missing. In this paper we present a framework based on the classical abstraction of pipes which tries to remedy this problem and support the fast implementation of software, while preserving desirable properties such as abstraction, encapsulation, component-orientation, code re-usability and maintainability, which are common and well supported in other application areas.}, Address = {Madrid, Spain}, Author = {Danh Le Phuoc and Axel Polleres and Giovanni Tummarello and Christian Morbidoni and Manfred Hauswirth}, Booktitle = {Proceedings of the 18th World Wide Web Conference (WWW2009)}, Day = {20--24}, Month = apr, Pages = {581--590}, Projects = {lion2}, Publisher = {ACM Press}, Title = {Rapid Semantic Web Mashup Development through Semantic Web Pipes}, Type = CONF, Url = {http://www2009.org/proceedings/pdf/p581.pdf}, Year = 2009, Bdsk-Url-1 = {http://www2009.org/proceedings/pdf/p581.pdf}}
@incollection{poll-moch-2009, Abstract = {Im vorliegenden Beitrag diskutieren wir Rahmenbedingungen zur Kombination, Wiederverwendung und Erweiterung bestehender RDF Vokabulare im Social Semantic Web. Hierbei konzentrieren wir uns auf das Anwendungsszenario des Auffindens und Bewerbens von Experten im Web oder Intranet. Wir pr\"asentieren, wie RDF Vokabulare mit zunehmendem Verbreitungsgrad im Semantic Web einerseits und de facto Standardformate, die von t\"aglich verwendeten Applikationen benutzt werden, andererseits (z.B. vCard, iCal oder Dublin Core) kombiniert werden k\"onnen, um konkrete Awendungsf\"alle der Expertensuche und zum Management von Expertise zu l\"osen. Unser Fokus liegt darauf aufzuzeigen, dass f\"ur praktische Anwendungsszenarien nicht notwendigerweise neue Ontologien entwickelt werden m\"ussen, sondern der Schl\"ussel vielmehr in der Integration von bestehenden, weit verbreiteten, und sich erg\"anzenden Formaten zu einem koh\"arenten Netzwerk von Ontologien liegt. Dieser Ansatz garantiert sowohl direkte Anwendbarkeit als auch niedrige Einstiegsbarrieren f\"ur Semantic Web Technologien, sowie einfache Integrierbarkeit in bestehende Applikationen. Die im Web verf\"ugbaren und verwendeten RDF Formate decken zwar einen gro{\ss}en Bereich der Aspekte zur Beschreibung von Personen und Expertisen ab, zeigen aber auch signifikante \"Uberlappungen. Bisher gibt es wenig systematische Ans\"atze, um diese Vokabulare zu verbinden, sei es in Form von allgemeing\"ultigen Praktiken, die definieren, wann welches Format zu benutzen ist, oder in Form von Regeln, die \"Uberlappungen zwischen einzelnen Formaten formalisieren. Der vorliegende Artikel analysiert, wie bestehende Formate zur Beschreibung von Personen, Organisationen und deren Expertise kombiniert und wo n\"otig erweitert werden k\"onnen. Dar\"uber hinaus diskutieren wir Regelsprachen zur Beschreibung von Format\"uberlappungen, sowie deren praktische Verwendbarkeit zur Erstellung eines Ontologie-Netzwerks zur Beschreibung von Experten.}, Author = {Axel Polleres and Malgorzata Mochol}, Booktitle = {Social Semantic Web}, Editor = {Andreas Blumauer and Tassilo Pellegrini}, Isbn = {978-3-540-72215-1}, Note = {in German}, Pages = {175--206}, Project = {incontext,lion}, Publisher = {Springer}, Title = {{Expertise bewerben und finden im Social Semantic Web}}, Type = BC, Url = {http://www.polleres.net/publications/poll-moch-2008.pdf}, Year = 2009, Bdsk-Url-1 = {http://www.polleres.net/publications/poll-moch-2008.pdf}}
@proceedings{debr-etal-2008-alpsws, Abstract = {Workshop Proceedings. This workshop was co-located with ICLP 2008.}, Address = {Udine, Italy}, Booktitle = {ALPSWS2008}, Day = 12, Editor = {Jos de Bruijn and Stijn Heymans and David Pearce and Axel Polleres and Edna Ruckhaus}, Month = DEC, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Title = {Proceedings of the 3rd International Workshop on Applications of Logic Programming to the (Semantic) Web and Web Services (ALPSWS2008)}, Type = BOOK, Url = {http://CEUR-WS.org/Vol-434/}, Volume = {434}, Year = 2008, Bdsk-Url-1 = {http://CEUR-WS.org/Vol-434/}}
@article{euze-etal-2008b, Abstract = {We propose to extend the SPARQL query language to express mapping between ontologies. We use SPARQL queries as a mechanism for translating RDF data of one ontology to another. Such functionality lets users exploit instance data described in one ontology while they work with an application that's been designed for another. An example translation of FOAF (friend-of-a-friend) files into vCards shows how to use queries to extract data from the source ontology and generate new data for the target ontology.}, Author = {J{\'e}r{\^o}me Euzenat and Axel Polleres and Fran\c{c}ois Scharffe}, Journal = {IEEE Intelligent Systems}, Month = nov, Note = {Appeared as part of the article ``Making Ontologies Talk: Knowledge Interoperability in the Semantic Web'', Monika Lanzenberger and Jennifer Sampson (eds.)}, Number = 6, Pages = {82--84}, Publisher = {IEEE Computer Society}, Title = {{SPARQL} Extensions for Processing Alignments}, Type = MAGAZINE, Url = {10.1109/MIS.2008.108}, Volume = {23}, Year = 2008, Bdsk-Url-1 = {http://doi.ieeecomputersociety.org/10.1109/MIS.2008.108}}
@inproceedings{kaer-etal-2008, Abstract = {Logic Programming paradigms that allow for expressing preferences have drawn a lot of research interest over the last few years. Among them, the principle of ordered disjunction was developed to express totally ordered preferences for the alternatives in rule heads. In this paper we introduce an extension of this approach called Disjunctive Logic Programs with Ordered Disjunction (DLPOD) that combines ordered disjunction with common disjunction in rule heads. By this extension, we enhance the preference notions expressible with totally ordered disjunctions to partially ordered preferences. Furthermore, we show that computing optimal stable models for DLPODs still stays in $\Sigma_2^p$ for head-cycle free programs and establish $\Sigma_3^p$ upper bounds for the general case.}, Author = {Philipp K{\"a}rger and Nuno Lopes and Daniel Olmedilla and Axel Polleres}, Booktitle = {Workshop on Answer Set Programming and Other Computing Paradigms (ASPOCP 2008)}, Month = dec, Title = {Towards Logic Programs with Ordered and Unordered Disjunction}, Type = WS, Url = {http://www.polleres.net/publications/kaer-etal-2008.pdf}, Year = 2008, Bdsk-Url-1 = {http://www.polleres.net/publications/kaer-etal-2008.pdf}}
@inproceedings{hoga-etal-2008, Abstract = {In this paper we discuss the challenges of performing reasoning on large scale RDF datasets from the Web. We discuss issues and practical solutions relating to reasoning over web data using a rule-based approach to forward-chaining; in particular, we identify the problem of ontology hijacking: new ontologies published on the Web re-defining the semantics of existing concepts resident in other ontologies. Our solution introduces consideration of authoritative sources. Our system is designed to scale, comprising file-scans and selected lightweight on-disk indices. We evaluate our methods on a dataset in the order of a hundred million statements collected from real-world Web sources.}, Address = {Bankok, Thailand}, Author = {Aidan Hogan and Andreas Harth and Axel Polleres}, Booktitle = {Proceedings of the 3rd Asian Semantic Web Conference (ASWC 2008)}, Day = {8--11}, Editor = {John Domingue and Chutiporn Anutariya}, Month = DEC, Pages = {76--90}, Project = {incontext,lion}, Series = LNCS, Title = {{SAOR: Authoritative Reasoning for the Web}}, Type = CONF, Url = {http://www.polleres.net/publications/hoga-etal-2008.pdf}, Volume = 5367, Year = 2008, Bdsk-Url-1 = {http://www.polleres.net/publications/hoga-etal-2008.pdf}}
@inproceedings{hoga-etal-2008SWC, Abstract = {In this paper we present a scalable algorithm for performing a subset of OWL reasoning over web data using a rule-based approach to forward-chaining; in particular, we identify the problem of ontology hijacking: new ontologies published on the Web re-defining the semantics of existing concepts resident in other ontologies. Our solution introduces consideration of authoritative sources. We present the results of applying our methods on a re-crawl of the billion triple challenge dataset.}, Address = {Karlsruhe, Germany}, Author = {Aidan Hogan and Andreas Harth and Axel Polleres}, Booktitle = {ISWC2008 Semantic Web Challenge 2008 -- Billion Triples Track}, Day = {26--30}, Month = oct, Title = {Scalable Authoritative {OWL} Reasoning on a Billion Triples}, Type = DEMO, Url = {http://www.cs.vu.nl/~pmika/swc-2008/SAOR-Scalable%20Authoritative%20OWL%20Reasoning-billiontc.pdf}, Year = 2008, Bdsk-Url-1 = {http://www.cs.vu.nl/~pmika/swc-2008/SAOR-Scalable%20Authoritative%20OWL%20Reasoning-billiontc.pdf}}
@proceedings{moch-etal-2008, Abstract = {Workshop Proceedings. This workshop was co-located with ISWC 2008. The Semantic Web, Social Networks and other emerging technology streams promise to enable finding experts more efficiently on a Web scale across boundaries. To leverage synergies among these streams, the ExpertFinder Initiative started in 2006 with the aim of devising vocabularies, rule extensions (for e.g. FOAF and SIOC) and best practices to annotate and extract expertise-relevant information from personal and organizational web pages, blogs, wikis, conferences, publication indexes, etc. Following two previous workshops - EFW and FEWS - PICKME2008 solocited new research contributions from the Semantic Web community towards the tasks of formally representing and reusing knowledge of skills and collaborations on the Web and consequently finding people according to their expertise.}, Address = {Karlsruhe, Germany}, Booktitle = {PICKME 2008}, Day = 27, Editor = {Malgorzata Mochol and Anna V. Zhdanova and Lyndon Nixon and John Breslin and Axel Polleres}, Month = OCT, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Title = {Proceedings of the 3rd Expert Finder Workshop on Personal Identification and Collaborations: Knowledge Mediation and Extraction (PICKME 2008)}, Type = BOOK, Url = {http://CEUR-WS.org/Vol-403/}, Volume = {403}, Year = 2008, Bdsk-Url-1 = {http://CEUR-WS.org/Vol-403/}}
@inproceedings{delb-etal-2008, Abstract = {The Sindice Semantic Web index provides search capabilities over today more than 30 million documents. A scalable reasoning mechanism for real-world web data is important in order to increase the precision and recall of the Sindice index by inferring useful information (e.g. RDF Schema features, equality, property characteristic such as inverse functional properties or annotation properties from OWL). In this paper, we introduce our notion of context dependent reasoning for RDF documents published on the Web according to the linked data principle. We then illustrate an efficient methodology to perform context dependent RDFS and partial OWL inference based on a persistent TBox composed of a network of web ontologies. Finally we report preliminary evaluation results of our implementation underlying the Sindice web data index.}, Address = {Karlsruhe, Germany}, Author = {Renaud Delbru and Axel Polleres and Giovanni Tummarello and Stefan Decker}, Booktitle = {Proceedings of the 4th International Workshop on Scalable Semantic Web Knowledge Base Systems (SSWS 2008)}, Day = {26--30}, Month = OCT, Project = {lion}, Talk = {Axel Polleres}, Title = {Context Dependent Reasoning for Semantic Documents in Sindice}, Type = WS, Url = {http://www.polleres.net/publications/delb-etal-2008.pdf}, Year = 2008, Bdsk-Url-1 = {http://www.polleres.net/publications/delb-etal-2008.pdf}}
@inproceedings{fern-etal-2008, Abstract = {Whereas the number of services that are provided online is growing rapidly, current service discovery approaches seem to have problems fulfilling their objectives. These existing approaches are hampered by the complexity of underlying semantic service models and by the fact that they try to impose a technical vocabulary to users. This leads to what we call the service discovery gap. In this paper we envision an approach that allows users first to query or browse services using free text tags, thus providing an interface in terms of the users' vocabulary instead of the service's vocabulary. Unlike simple keyword search, we envision tag clouds associated with services themselves as semantic descriptions carrying collaborative knowledge about the service that can be clustered hierarchically, forming lightweight ``ontologies''. Besides tag-based discovery only describing the service on a global view, we envision refined tags and refined search/discovery in terms of the concepts that are common to all current semantic service description models, i.e. input, output, and operation. We argue that Service matching can be achieved, by applying tag-cloud-based service similarity on the one hand and by clustering services using case based indexing and retrieval techniques on the other hand.}, Address = {Karlsruhe, Germany}, Author = {Alberto Fernandez and Conor Hayes and Nikos Loutas and Vassilios Peristeras and Axel Polleres and Konstantinos Tarabanis}, Booktitle = {Proceedings of 2nd Internatioal Workshop on Service Matchmaking and Resource Retrieval in the Semantic Web (SMR$^\textrm{2}$ 2008)}, Day = 27, Month = OCT, Project = {incontext,lion,swos}, Talk = {Alberto Fernandez}, Title = {{Closing the Service Discovery Gap by Collaborative Tagging and Clustering Techniques}}, Type = WS, Url = {http://www.polleres.net/publications/fern-etal-2008.pdf}, Year = 2008, Bdsk-Url-1 = {http://www.polleres.net/publications/fern-etal-2008.pdf}}
@book{baro-etal-2008, Abstract = {The Reasoning Web summer school series is a well-established event, attracting experts from academy and industry as well as PhD students interested in foundational and applicative aspects of the Semantic Web. This volume contains the lecture notes of the fourth edition, that took place in Venice, Italy, in September 2008. This year, the school has been focussed on some important application domains where semantic web techniques proved to be particularly effective or promising in tackling application needs.}, Address = {San Servolo Island, Venice, Italy}, Day = {7--11}, Editor = {Cristina Baroglio and Piero A. Bonatti and Jan Maluszynski and Massimo Marchiori and Axel Polleres and Sebastian Schaffert}, Month = SEP, Publisher = {Springer}, Series = LNCS, Title = {Reasoning Web 2008}, Type = BOOK, Url = {http://www.springer.com/computer/database+management+%26+information+retrieval/book/978-3-540-85656-6}, Volume = 5224, Year = 2008, Bdsk-Url-1 = {http://www.springer.com/computer/database+management+%26+information+retrieval/book/978-3-540-85656-6}}
@proceedings{klusch-etal-2008, Abstract = {The objective of the international workshop series on cooperative information agents (CIA), since its establishment in 1997, is to provide a distinguished, interdisciplinary forum for researchers, programmers, and managers to get informed about, present, and discuss latest high quality results in research and development of agent-based intelligent and cooperative information systems, and applications for the Internet, Web and Semantic Web. Each event of the series offers regular and invited talks of excellence that are given by renown experts in the field, a selected set of system demonstrations, and honors innovative research and development of information agents by means of a best paper award, and respectively, a system innovation award. The proceedings of the series are regularly published as volumes of the Lecture Notes in Artificial Intelligence (LNAI) series of the Springer Verlag. In keeping with its tradition, this year's workshop featured a sequence of regular and invited talks of excellence given by leading researchers covering a broad area of topics of interest. In particular, CIA 2008 featured five invited and nineteen regular papers selected from thirty-eight submissions. The result of the peer-review of all contributions is included in this volume that is, as we think, again rich of interesting, inspiring, and advanced work on research and development of intelligent information agents worldwide.}, Address = {Prague, Czech Republic}, Booktitle = {CIA 2008}, Day = {10--12}, Editor = {Matthias Klusch and Michal Pechoucek and Axel Polleres}, Month = SEP, Publisher = {Springer}, Series = LNCS, Title = {Cooperative Information Agents XII}, Type = BOOK, Url = {http://www.springer.com/computer/artificial/book/978-3-540-85833-1}, Volume = {5180}, Year = 2008, Bdsk-Url-1 = {http://www.springer.com/computer/artificial/book/978-3-540-85833-1}}
@incollection{eite-etal-2008, Abstract = {Rules and ontologies play a key role in the layered architecture of the Semantic Web, as they are used to ascribe meaning to, and to reason about, data on the Web. While the Ontology Layer of the Semantic Web is quite developed, and the Web Ontology Language (OWL) is a W3C recommendation since a couple of years already, the rules layer is far less developed and an active area of research; a number of initiatives and proposals have been made so far, but no standard as been released yet. Many implementations of rule engines are around which deal with Semantic Web data in one or another way. This article gives a comprehensive, although not exhaustive, overview of such systems, describes their supported languages, and sets them in relation with theoretical approaches for combining rules and ontologies as foreseen in the Semantic Web architecture. In the course of this, we identify desired properties and common features of rule languages and evaluate existing systems against their support. Furthermore, we review technical problems underlying the integration of rules and ontologies, and classify representative proposals for theoretical integration approaches into different categories.}, Address = {San Servolo Island, Venice, Italy}, Author = {Thomas Eiter and Giovambattista Ianni and Thomas Krennwallner and Axel Polleres}, Booktitle = {Reasoning Web 2008}, Day = {7--11}, Editor = {Cristina Baroglio and Piero A. Bonatti and Jan Maluszynski and Massimo Marchiori and Axel Polleres and Sebastian Schaffert}, Month = SEP, Pages = {1--53}, Project = {incontext,lion}, Publisher = {Springer}, Series = LNCS, Title = {Rules and Ontologies for the Semantic Web}, Type = BC, Url = {http://www.polleres.net/publications/eite-etal-2008.pdf}, Volume = 5224, Year = 2008, Bdsk-Url-1 = {http://www.polleres.net/publications/eite-etal-2008.pdf}}
@inproceedings{akht-etal-2008, Abstract = {With currently available tools and languages, translating between an existing XML format and RDF is a tedious and error-prone task. The importance of this problem is acknowledged by the W3C GRDDL working group who faces the issue of extracting RDF data out of existing HTML or XML files, as well as by the Web service community around SAWSDL, who need to perform lowering and lifting between RDF data from a semantic client and XML messages for a Web service. However, at the moment, both these groups rely solely on XSLT transformations between RDF/XML and the respective other XML format at hand. In this paper, we propose a more natural approach for such transformations based on merging XQuery and SPARQL into the novel language XSPARQL. We demonstrate that XSPARQL provides concise and intuitive solutions for mapping between XML and RDF in either direction, addressing both the use cases of GRDDL and SAWSDL. We also provide and describe an initial implementation of an XSPARQL engine, available for user evaluation.}, Address = {Tenerife, Spain}, Author = {Waseem Akhtar and Jacek Kopecky and Thomas Krennwallner and Axel Polleres}, Booktitle = {Proceedings of the 5th European Semantic Web Conference (ESWC2008)}, Day = {1-5}, Month = JUN, Note = {\textbf{Nominated for best paper award}}, Pages = {432--447}, Projects = {incontext,lion}, Publisher = {Springer}, Series = LNCS, Talk = {Thomas Krennwallner}, Title = {{XSPARQL}: Traveling between the {XML} and {RDF} worlds -- and avoiding the {XSLT} pilgrimage}, Type = CONF, Url = {http://www.polleres.net/publications/akht-etal-2008.pdf}, Volume = 5021, Year = 2008, Bdsk-Url-1 = {http://www.polleres.net/publications/akht-etal-2008.pdf}}
@inproceedings{pich-etal-2008, Abstract = {We introduce domain-restricted RDF (dRDF) which allows to associate an RDF graph with a fixed, finite domain that interpretations for it may range over. We show that dRDF is a real extension of RDF and discuss impacts on the complexity of entailment in dRDF. The entailment problem represents the key reasoning task for RDF and is well known to be NP-complete. Remarkably, we show that the restriction of domains in dRDF raises the complexity of entailment from NP- to $\Pi^P_2$-completeness. In order to lower complexity of entailment for both domain-restricted and unrestricted graphs, we take a closer look at the graph structure. For cases where the structure of RDF graphs is restricted via the concept of bounded treewidth, we prove that the entailment is tractable for unrestricted graphs and coNP-complete for domain-restricted graphs. }, Address = {Tenerife, Spain}, Author = {Reinhard Pichler and Axel Polleres and Fang Wei and Stefan Woltran}, Booktitle = {Proceedings of the 5th European Semantic Web Conference (ESWC2008)}, Day = {1-5}, Month = JUN, Pages = {200--214}, Projects = {incontext,lion}, Publisher = {Springer}, Series = LNCS, Talk = {Axel Polleres}, Title = {Entailment for Domain-restricted {RDF}}, Type = CONF, Url = {http://www.polleres.net/publications/pich-etal-2008.pdf}, Volume = 5021, Year = 2008, Bdsk-Url-1 = {http://www.polleres.net/publications/pich-etal-2008.pdf}}
@inproceedings{morb-etal-2008, Abstract = {In this demo we present a first implementation of Semantic Web Pipes, a powerful tool to build RDF-based mashups. Semantic Web pipes are defined in XML and when executed they fetch RDF graphs on the Web, operate on them, and produce an RDF output which is itself accessible via a stable URL. Humans can also use pipes directly thanks to HTML wrapping of the pipe parameters and outputs. The implementation we will demo includes an online AJAX pipe editor and execution engine. Pipes can be published and combined thus fostering collaborative editing and reuse of data mashups.}, Address = {Tenerife, Spain}, Author = {Christian Morbidoni and Danh Le Phuoc and Axel Polleres and Matthias Samwald and Giovanni Tummarello}, Booktitle = {Proceedings of the 5th European Semantic Web Conference (ESWC2008)}, Day = {1-5}, Month = JUN, Note = {Demo Paper}, Pages = {843--848}, Projects = {incontext,lion}, Publisher = {Springer}, Title = {Previewing Semantic Web Pipes}, Type = OTHER, Url = {http://www.polleres.net/publications/morb-etal-2008.pdf}, Year = 2008, Bdsk-Url-1 = {http://www.polleres.net/publications/morb-etal-2008.pdf}}
@inproceedings{truo-etal-2008, Abstract = {Present team members have difficulties in keeping the relations between their various, concurrent activities due to the lack of suitable tools supporting context coupling and sharing. Furthermore, collaboration services are hardly aware of related context of team members and their activities. Such awareness is required to adapt to the dynamics of collaborative teams. In this paper, we discuss the context coupling techniques provided by the inContext project. Utilizing the concept of activity-based context and Web services techniques, we can couple individual and team contexts at runtime, thus improving the context-awareness and adaptation of collaboration services such as email, shared calendars, instant messaging and document management.}, Address = {Lisboa, Portugal}, Author = {Hong-Linh Truong and Christoph Dorn and Giovanni Casella and Axel Polleres and Stephan Reiff-Marganiec and Schahram Dustdar}, Booktitle = {14th International Conference of Concurrent Enterprising (ICE 2008)}, Day = {23--25}, Month = jun, Pages = {225--232}, Title = {inContext: On Coupling and Sharing Context for Collaborative Teams}, Type = CONF, Url = {http://www.cs.le.ac.uk/people/srm13/publications/ice08.pdf}, Year = 2008, Bdsk-Url-1 = {http://www.cs.le.ac.uk/people/srm13/publications/ice08.pdf}}
@inproceedings{truo-etal-2008b, Abstract = {Participants in current team collaborations belong to different organizations, work on multiple objectives at the same time, and frequently change locations. They use different devices and infrastructures in collaboration processes that can last from a few hours to several years. All these factors pose new challenges to the development of collaborative working environments (CWEs). Existing CWEs are unable to support emerging teams because diverse collaboration services are not well integrated or adapting to the team context. We present the inContext approach to providing a novel pervasive CWE infrastructure for emerging team forms. inContext aggregates disparate collaboration services using Web services and Semantic Web technologies and provides a platform that captures diverse dynamic aspects of team collaborations. By utilizing runtime and historical context and interaction information, adaptation techniques can be deployed to cope with the changes of emerging teams.}, Address = {Turku, Finland}, Author = {Hong-Linh Truong and Schahram Dustdar and Dino Baggio and St{\'e}phane Corlosquet and Christoph Dorn and Giovanni Giuliani and Robert Gombotz and Yi Hong and Pete Kendal and Christian Melchiorre and Sarit Moretzky and S{\'e}bastien Peray and Axel Polleres and Stephan Reiff-Marganiec and Daniel Schall and Simona Stringa and Marcel Tilly and Hong Qing Yu}, Booktitle = {International Symposium on Applications and the Internet (SAINT 2008)}, Day = {28-1}, Month = jul, Pages = {118--125}, Project = {incontext}, Publisher = {IEEE Computer Society}, Title = {inContext: a Pervasive and Collaborative Working Environment for Emerging Team Forms}, Type = CONF, Url = {http://www.polleres.net/publications/truo-etal-2008.pdf}, Year = 2008, Bdsk-Url-1 = {http://www.polleres.net/publications/truo-etal-2008.pdf}}
@inproceedings{euze-etal-2008, Abstract = {Solving problems raised by heterogeneous ontologies can be achieved by matching the ontologies and processing the resulting alignments. This is typical of data mediation in which the data must be translated from one knowledge source to another. In this position paper we propose to solve the data translation problem, i.e. the processing part, using the SPARQL query language. Indeed, such a language is particularly adequate for extracting data from one ontology and, through its CONSTRUCT statement, for generating new data. We present examples of such transformations, but we also present a set of example correspondences illustrating the needs for particular representation constructs, such as aggregates, value-generating built-in functions and paths, which are missing from SPARQL. Hence, we advocate the use of two SPARQL extensions providing these missing features.}, Address = {Barcelona, Spain}, Author = {J{\'e}r{\^o}me Euzenat and Axel Polleres and Fran\c{c}ois Scharffe}, Booktitle = {International Workshop on Ontology Alignment and Visualization - OnAV'08, Proceedings of the Second International Conference on Complex, Intelligent and Software Intensive Systems}, Day = {4--7}, Month = mar, Pages = {913--917}, Publisher = {IEEE Computer Society}, Title = {Processing ontology alignments with {SPARQL}}, Type = WS, Url = {http://www.polleres.net/publications/euze-etal-2008.pdf}, Year = 2008, Bdsk-Url-1 = {http://www.polleres.net/publications/euze-etal-2008.pdf}}
@inproceedings{morb-etal-2007b, Abstract = {In this paper we take a view from the bottom to RDF(S) reasoning. We discuss some issues and requirements on reasoning towards effectively building Semantic Web Pipes, aggregating and patching RDF data from various distributed sources. Even if we leave out complex description logics reasoning and restrict ourselves to the RDF world, it turns out that some problems, in particular how to deal with contradicting RDF statements and patching RDF graphs, do not yet find their proper solutions within the current Semantic Web Stack. Besides theoretical solutions which involve full DL reasoning, we believe that more practical and probably more scalable solutions are conceivable one of which we discuss in this paper. Namely, we provide means to express revocations in RDF and resolve such revocations by means of a specialized RDF merge procedure. We have implemented this conflict-resolving merge procedure in the DBin 2.0 system.}, Address = {Bari, Italy}, Author = {Christian Morbidoni and Axel Polleres and Giovanni Tummarello}, Booktitle = {{4th Italian Semantic Web Workshop SEMANTIC WEB APPLICATIONS AND PERSPECTIVES (SWAP)}}, Day = {18--20}, Month = dec, Project = {incontext,lion}, Title = {{Who the FOAF knows Alice? RDF Revocation in DBin 2.0}}, Type = WS, Url = {http://www.polleres.net/publications/morb-etal-2007b.pdf}, Year = 2007, Bdsk-Url-1 = {http://www.polleres.net/publications/morb-etal-2007b.pdf}}
@inproceedings{harth-etal-2007, Abstract = {In this position paper we firstly present the established notion of provenance on the Semantic Web (also referred to as named graphs or contexts), and secondly argue for the benefit of adding to the pure technical notion of provenance a social dimension to associate provenance with the originator (typically a person) of a given piece of information.}, Address = {Edinburgh, Scotland}, Author = {Andreas Harth and Axel Polleres and Stefan Decker}, Booktitle = {Workshop on Principles of Provenance (PrOPr)}, Day = {19--20}, Month = nov, Title = {Towards A Social Provenance Model for the Web}, Type = WS, Url = {http://www.polleres.net/publications/harth-etal-2007.pdf}, Year = 2007, Bdsk-Url-1 = {http://www.polleres.net/publications/harth-etal-2007.pdf}}
@inproceedings{poll-etal-2007, Abstract = {Lightweight ontologies in the form of RDF vocabularies such as SIOC, FOAF, vCard, etc. are increasingly being used and exported by ``serious'' applications recently. Such vocabularies, together with query languages like SPARQL also allow to syndicate resulting RDF data from arbitrary Web sources and open the path to finally bringing the Semantic Web to operation mode. Considering, however, that many of the promoted lightweight ontologies overlap, the lack of suitable standards to describe these overlaps in a declarative fashion becomes evident. In this paper we argue that one does not necessarily need to delve into the huge body of research on ontology mapping for a solution, but \sparql itself might --- with extensions such as external functions and aggregates --- serve as a basis for declaratively describing ontology mappings. We provide the semantic foundations and a path towards implementation for such a mapping language by means of a translation to Datalog with external predicates.}, Address = {Vilamoura, Algarve, Portugal}, Author = {Axel Polleres and Fran\c{c}ois Scharffe and Roman Schindlauer}, Booktitle = {OTM 2007, Part I : Proceedings of the 6th International Conference on Ontologies, DataBases, and Applications of Semantics (ODBASE 2007)}, Day = {27--29}, Month = nov, Pages = {878--896}, Project = {incontext,lion}, Publisher = {Springer}, Series = LNCS, Talk = {Axel Polleres}, Title = {{SPARQL++} for Mapping between {RDF} Vocabularies}, Type = CONF, Url = {http://www.polleres.net/publications/poll-etal-2007.pdf}, Volume = 4803, Year = 2007, Bdsk-Url-1 = {http://www.polleres.net/publications/poll-etal-2007.pdf}}
@inproceedings{morb-etal-2007, Abstract = {In this paper we take a view from the bottom to RDF(S) reasoning. We discuss some issues and requirements on reasoning towards effectively building Semantic Web Pipes, aggregating RDF data from various distributed sources. If we leave out complex description logics reasoning and restrict ourselves to the RDF world, it turns out that some problems, in particular how to deal with contradicting RDF statements, do not yet find their proper solutions within the current Semantic Web Stack. Besides theoretical solutions which involve full DL reasoning, we believe that more practical and probably more scalable solutions are conceivable one of which we discuss in this paper, namely, expressing and resolving conflicting RDF statements by means of a specialized RDF merge procedure. We implemented this conflict-resolving merge procedure in the DBin system.}, Address = {Busan, Korea}, Author = {Christian Morbidoni and Axel Polleres and Giovanni Tummarello}, Booktitle = {ISWC 2007 Workshop on New forms of Reasoning for the Semantic Web: Scaleable, Tolerant and Dynamic}, Day = 11, Month = nov, Project = {incontext,lion}, Title = {{Who the FOAF knows Alice? A needed step towards Semantic Web Pipes}}, Type = WS, Url = {http://www.polleres.net/publications/morb-etal-2007.pdf}, Year = 2007, Bdsk-Url-1 = {http://www.polleres.net/publications/morb-etal-2007.pdf}}
@proceedings{dino-etal-2007, Abstract = {Workshop Proceedings. This workshop was co-located with ISWC 2007 + ASWC 2007.}, Address = {Busan, Korea}, Booktitle = {SMR$^\textrm{2}$ 2007}, Day = 11, Editor = {Tommaso di Noia and Rub{\'e}n Lara and Axel Polleres and Ioan Toma and Takahiro Kawamura and Matthias Klusch and Abraham Bernstein and Massimo Paolucci and Alain Leger and David Martin}, Month = NOV, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Title = {Proceedings of the SMR2 2007 Workshop on Service Matchmaking and Resource Retrieval in the Semantic Web (SMR$^\textrm{2}$ 2007)}, Type = BOOK, Url = {http://CEUR-WS.org/Vol-243/}, Volume = {243}, Year = 2007, Bdsk-Url-1 = {http://CEUR-WS.org/Vol-243/}}
@inproceedings{fern-etal-2007, Abstract = {Several description frameworks to semantically describe and match services on the one hand and service requests on the other have been presented in the literature. Many of the current proposals for defining notions of match between service advertisements and requests are based on subsumption checking in more or less expressive Description Logics, thus providing boolean match functions, rather than a fine-grained, numerical degree of match. By contrast, concept similarity measures investigated in the DL literature explicitely include such a quantitative notion. In this paper we try to take a step forward in this area by means of an analysis of existing approaches from both semantic web service matching and concept similarity, and provide preliminary ideas on how to combine these two building blocks in a unified service selection framework.}, Address = {Busan, Korea}, Author = {Alberto Fernandez and Axel Polleres and Sascha Ossowski}, Booktitle = {Proceedings of the SMR2 2007 Workshop on Service Matchmaking and Resource Retrieval in the Semantic Web (SMR$^\textrm{2}$ 2007)}, Day = 11, Month = NOV, Pages = {31--45}, Project = {incontext,lion,swos}, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Title = {{Towards Fine-grained Service Matchmaking by Using Concept Similarity}}, Type = WS, Url = {http://www.polleres.net/publications/fern-etal-2007.pdf}, Volume = {243}, Year = 2007, Bdsk-Url-1 = {http://www.polleres.net/publications/fern-etal-2007.pdf}}
@misc{boja-etal-2007, Abstract = {The SIOC (Semantically-Interlinked Online Communities) Core Ontology provides the main concepts and properties required to describe information from online communities (e.g., message boards, wikis, weblogs, etc.) on the Semantic Web. This document contains a detailed description of the SIOC Core Ontology.}, Author = {Uldis Boj{\={a}}rs and John G.~Breslin and Diego Berrueta and Dan Brickley and Stefan Decker and Sergio Fern{\'a}ndez and Christoph G{\"o}rn and Andreas Harth and Tom Heath and Kingsley Idehen and Kjetil Kjernsmo and Alistair Miles and Alexandre Passant and Axel Polleres and Luis Polo and Michael Sintek}, Day = 12, Month = JUN, Note = {W3C member submission}, Project = {sioc,lion}, Title = {{SIOC Core Ontology Specification}}, Type = W3C, Url = {http://www.w3.org/Submission/sioc-spec/}, Year = 2007, Bdsk-Url-1 = {http://www.w3.org/Submission/sioc-spec/}}
@proceedings{poll-etal-2007-alpsws, Abstract = {Workshop Proceedings. This workshop was co-located with ICLP 2007.}, Address = {Porto, Portugal}, Booktitle = {ALPSWS2007}, Day = 13, Editor = {Stijn Heymans and David Pearce and Axel Polleres and Edna Ruckhaus and Gopal Gupta}, Month = SEP, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Title = {ALPSWS2007: 2nd International Workshop on Applications of Logic Programming in the Semantic Web and Semantic Web Services. Proceedings}, Type = BOOK, Url = {http://CEUR-WS.org/Vol-287/}, Volume = {287}, Year = 2007, Bdsk-Url-1 = {http://CEUR-WS.org/Vol-287/}}
@inproceedings{poll-schi-2007, Abstract = {This paper describes the dlvhex SPARQL plugin, a query processor for the upcoming Semantic Web query language standard by W3C. We report on the implementation of this languages using dlvhex, a flexible plugin system on top of the DLV solver. This work advances our earlier translation based on the semantics by Perez et al. towards an engine which is fully compliant to the official SPARQL specification. As it turns out, the differences between these two definitions of SPARQL, which might seem moderate at first glance, need some extra machinery. We also briefly report the status of implementation, and extensions currently being implemented, such as handling of aggregates, nested CONSTRUCT queries in the spirit of networked RDF graphs, or partially support of RDFS entailment. For such extensions a tight integration of SPARQL query processing and Answer-Set Programming, the underlying logic programming formalism of our engine, turns out to be particularly useful, as the resulting programs can actually involve unstratified negation.}, Address = {Porto, Portugal}, Author = {Axel Polleres and Roman Schindlauer}, Booktitle = {2nd International Workshop on Applications of Logic Programming to the Web, Semantic Web and Semantic Web Services (ALPSWS2007)}, Day = 13, Month = sep, Pages = {3--12}, Project = {incontext,rewerse,lion,aeci}, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Talk = {Axel Polleres}, Title = {dlvhex-sparql: A {SPARQL}-compliant Query Engine based on dlvhex}, Type = WS, Url = {http://www.polleres.net/publications/poll-schi-2007.pdf}, Volume = {287}, Year = 2007, Bdsk-Url-1 = {http://www.polleres.net/publications/poll-schi-2007.pdf}}
@incollection{bole-etal-2007, Abstract = {Rules play an increasingly important role in a variety of Semantic Web applications as well as in traditional IT systems. As a universal medium for publishing information, the Web is envisioned to become the place for publishing, distributing, and exchanging rule-based knowledge. Realizing the importance and the promise of this vision, the W3C has created the Rule Interchange Format Working Group (RIF WG) and chartered it to develop an interchange format for rules in alignment with the existing standards in the Semantic Web architecture stack. However, creating a generally accepted interchange format is by no means a trivial task. First, there are different understandings of what a ``rule'' is. Researchers and practitioners distinguish between deduction rules, normative rules, production rules, reactive rules, etc. Second, even within the same category of rules, systems use different (often incompatible) semantics and syntaxes. Third, existing Semantic Web standards, such as RDF and OWL, show incompatibilities with many kinds of rule languages at a conceptual level. This article discusses the role that different kinds of rule languages and systems play on the Web, illustrates the problems and opportunities in exchanging rules through a standardized format, and provides a snapshot of the current work of the W3C RIF WG.}, Author = {Harold Boley and Michael Kifer and Paula-Lavinia P\u{a}tr\^{a}njan and Axel Polleres}, Booktitle = {Reasoning Web 2007}, Day = {3--7}, Month = SEP, Pages = {269--309}, Project = {incontext,swos}, Publisher = {Springer}, Series = LNCS, Talk = {Paula-Lavinia P\u{a}tr\^{a}njan and Axel Polleres}, Title = {Rule Interchange on the Web}, Type = BC, Url = {http://www.polleres.net/publications/bole-etal-2007.pdf}, Volume = 4636, Year = 2007, Bdsk-Url-1 = {http://www.polleres.net/publications/bole-etal-2007.pdf}}
@inproceedings{debr-etal-2007b, Abstract = {In the ongoing discussion about combining rules and Ontologies on the Semantic Web a recurring issue is how to combine first-order classical logic with nonmonotonic rule languages. Whereas several modular approaches to define a combined semantics for such hybrid knowledge bases focus mainly on decidability issues, we tackle the matter from a more general point of view. In this paper we show how Quantified Equilibrium Logic (QEL) can function as a unified framework which embraces classical logic as well as disjunctive logic programs under the (open) answer set semantics. In the proposed variant of QEL we relax the unique names assumption, which was present in earlier versions of QEL. Moreover, we show that this framework elegantly captures the existing modular approaches for hybrid knowledge bases in a unified way.}, Address = {Innsbruck, Austria}, Author = {Jos de Bruijn and David Pearce and Axel Polleres and Agust{\'\i}n Valverde}, Booktitle = {First International Conference on Web Reasoning and Rule Systems (RR2007)}, Day = {7--8}, Editor = {Massimo Marchiori and Jeff Z. Pan and Christian de Sainte Marie}, Month = jun, Pages = {58--72}, Publisher = {Springer}, Series = LNCS, Talk = {David Pearce}, Title = {Quantified Equilibrium Logic and Hybrid Rules}, Type = CONF, Url = {http://www.polleres.net/publications/debr-etal-2007b.pdf}, Volume = {4524}, Year = 2007, Bdsk-Url-1 = {http://www.polleres.net/publications/debr-etal-2007b.pdf}}
@inproceedings{alem-etal-2007, Abstract = {This paper presents a framework for the reuse and extension of existing, established vocabularies in the Semantic Web. Driven by the primary application of expert finding, we will explore the reuse of vocabularies that have attracted a considerable user community already (FOAF, SIOC, etc.) or are derived from de facto standards used in tools or industrial practice (such as vCard, iCal and Dublin Core). This focus guarantees direct applicability and low entry barriers, unlike when devising a new ontology from scratch. The Web is already populated with several vocabularies which complement each other (but also have considerable overlap) in that they cover a wide range of necessary features to adequately describe the expert finding domain. Little effort has been made so far to identify and compare existing approaches, and to devise best practices on how to use and extend various vocabularies conjointly. It is the goal of the recently started ExpertFinder initiative to fill this gap. In this paper we present the ExpertFinder framework for reuse and extension of existing vocabularies in the Semantic Web. We provide a practical analysis of overlaps and options for combined use and extensions of several existing vocabularies, as well as a proposal for applying rules and other enabling technologies to the expert finding task.}, Address = {Innsbruck, Austria}, Author = {Boanerges Aleman-Meza and Uldis Boj{\={a}}rs and Harold Boley and John G.~Breslin and Malgorzata Mochol and Lyndon J.B.~Nixon and Axel Polleres and Anna V.~Zhdanova}, Booktitle = {Proceedings of the 4th European Semantic Web Conference (ESWC2007)}, Day = {3--6}, Editor = {Enrico Franconi and Michael Kifer and Wolfgang May}, Month = JUN, Note = {Slides available at \url{http://www.polleres.net/publications/alem-etal-2007eswc-slides.pdf}}, Pages = {235--250}, Publisher = {Springer}, Series = LNCS, Talk = {Axel Polleres}, Title = {Combining {RDF} Vocabularies for Expert Finding}, Type = CONF, Url = {http://www.polleres.net/publications/alem-etal-2007.pdf}, Volume = 4519, Year = 2007, Bdsk-Url-1 = {http://www.polleres.net/publications/alem-etal-2007.pdf}}
@misc{aren-etal-2007-eswc07tut, Abstract = {Tutorial at the 4th European Semantic Web Conference (ESWC2007)}, Address = {Innsbruck, Austria}, Author = {Marcelo Arenas and Claudio Gutierrez and Bijan Parsia and Jorge P{\'e}rez and Axel Polleres and Andy Seaborne}, Day = 2, Month = JUN, Note = {Slides available at \url{http://www.polleres.net/sparqltutorial/}}, Talk = {Claudio Gutierrez and Bijan Parsia and Jorge P{\'e}rez and Axel Polleres and Andy Seaborne}, Title = {{SPARQL} -- Where are we? Current state, theory and practice}, Type = OTHER, Url = {http://www.eswc2007.org/tutorials.cfm#sparql}, Year = 2007, Bdsk-Url-1 = {http://www.eswc2007.org/tutorials.cfm#sparql}}
@incollection{laus-etal-2007, Abstract = {Web Services have added a new level of functionality to the current Web, making the first step to achieve seamless integration of distributed components. Nevertheless, current Web Service technologies only address the syntactical aspects of a Web Service and, therefore, only provide a set of rigid services that cannot adapt to a changing environment without human intervention. The human programmer has to be kept in the loop and scalability as well as economy of Web Services are limited. The description of Web Services in a machine-understandable fashion is expected to have a great impact in areas of e-Commerce and Enterprise Application Integration, as it can enable dynamic and scalable cooperation between different systems and organisations. These great potential benefits have led to the establishment of an important research activity, both in industry and in academia, which aims at realising Semantic Web Services. This chapter outlines aspects of the description of semantic Web Services.}, Author = {Holger Lausen and Rub{\'e}n Lara and Axel Polleres and Jos de Bruijn and Dumitru Roman}, Booktitle = {Semantic Web Services}, Editor = {Rudi Studer and Stephan Grimm and Andreas Abecker}, Isbn = {978-3-540-70893-3}, Pages = {179--209}, Publisher = {Springer}, Title = {Chapter 7: Description -- Semantic Annotation for Web Services}, Type = BC, Url = {http://www.springerlink.com/content/j2uvh275065p0217/}, Year = 2007, Bdsk-Url-1 = {http://www.springerlink.com/content/j2uvh275065p0217/}}
@inproceedings{brai-etal-2007, Abstract = {This position paper raises some issues regarding the output of solvers for Answer Set Programming and discusses experiences made in several different settings. The first set of issues was raised in the context of the first ASP system competition, which led to a first suggestion for a standardised yet miniature output format. We then turn to experiences made in related fields, like Satisfiability Checking, and finally adopt an application point of view by investigating interface issues both with simple tools and in the context of the Semantic Web and query answering.}, Address = {Tempe, AZ}, Author = {Martin Brain and Wolfgang Faber and Marco Maratea and Axel Polleres and Torsten Schaub and Roman Schindlauer}, Booktitle = {First International Workshop on Software Engineering for Answer Set Programming 2007 (SEA'07)}, Day = 14, Editor = {Marina De Vos and Torsten Schaub}, Month = may, Pages = {26--37}, Title = {What should an {ASP} Solver output? A Multiple Position Paper}, Tytpe = WS, Url = {http://sea07.cs.bath.ac.uk/downloads/sea07-proceedings.pdf}, Year = 2007, Bdsk-Url-1 = {http://sea07.cs.bath.ac.uk/downloads/sea07-proceedings.pdf}}
@proceedings{pear-etal-2007-cent, Abstract = {Workshop Proceedings. This workshop was co-located with LPNMR 2007.}, Address = {Tempe, AZ}, Booktitle = {CENT 2007}, Day = 14, Editor = {David Pearce and Axel Polleres and Agust{\'\i}n Valverde and Stefan Woltran}, Month = MAY, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Title = {Workshop on Correspondence and Equivalence for Nonmonotonic Theories (CENT 2007) Working Notes}, Type = BOOK, Url = {http://CEUR-WS.org/Vol-265/}, Volume = {265}, Year = 2007, Bdsk-Url-1 = {http://CEUR-WS.org/Vol-265/}}
@inproceedings{poll-2007, Abstract = {As the data and ontology layers of the Semantic Web stack have achieved a certain level of maturity in standard recommendations such as RDF and OWL, the current focus lies on two related aspects. On the one hand, the definition of a suitable query language for RDF, SPARQL, is close to recommendation status within the W3C. The establishment of the rules layer on top of the existing stack on the other hand marks the next step to be taken, where languages with their roots in Logic Programming and Deductive Databases are receiving considerable attention. The purpose of this paper is threefold. First, we discuss the formal semantics of SPARQL extending recent results in several ways. Second, we provide translations from SPARQL to Datalog with negation as failure. Third, we propose some useful and easy to implement extensions of SPARQL, based on this translation. As it turns out, the combination serves for direct implementations of SPARQL on top of existing rules engines as well as a basis for more general rules and query languages on top of RDF.}, Address = {Banff, Canada}, Author = {Axel Polleres}, Booktitle = {Proceedings of the 16th World Wide Web Conference (WWW2007)}, Day = {8--12}, Month = may, Note = {Extended technical report version available at \url{http://www.polleres.net/TRs/GIA-TR-2006-11-28.pdf}, slides available at \url{http://www.polleres.net/publications/poll-2007www-slides.pdf}}, Pages = {787--796}, Publisher = {ACM Press}, Talk = {Axel Polleres}, Title = {From {SPARQL} to Rules (and back)}, Type = CONF, Url = {http://www2007.org/paper435.php}, doi = {https://doi.org/10.1145/1242572.1242679}, Year = 2007, Bdsk-Url-1 = {http://www2007.org/paper435.php}}
@inproceedings{bres-etal-2007, Abstract = {This position paper on expert finding presents a conceptual framework for the reuse and interlinking of existing, well-established vocabularies in the Semantic Web. Such a framework can be used to connect people with people, based on joint or complementing interests (e.g. the need to develop specific new or existing skills for upcoming projects). Driven by a requirement to find experts using the profiles of people in social networks and using the content they create in online communities, we are exploring the usage of vocabularies in these domains that have already gained considerable momentum and that have suitable concepts for this application area. We will present the relevant properties of the FOAF ontology for matching people and their skills in social networks, then detail the SIOC project and methods for identifying relevant discussion topics/individuals, and finally we will outline a combinatory scenario that will allow people to find individuals with the desired expertise in a particular domain of interest.}, Author = {John G. Breslin and Uldis Boj{\={a}}rs and Boanerges Aleman-Meza and Harold Boley and Malgorzata Mochol and Lyndon J.B. Nixon and Axel Polleres and Anna V. Zhdanova}, Booktitle = {1st International ExpertFinder Workshop}, Day = 16, Month = jan, Title = {Finding experts using Internet-based discussions in online communities and associated social networks}, Type = WS, Url = {http://lsdis.cs.uga.edu/~aleman/efw2007/breslin__efw2007.pdf}, Year = 2007, Bdsk-Url-1 = {http://lsdis.cs.uga.edu/~aleman/efw2007/breslin__efw2007.pdf}}
@inproceedings{debr-etal-2007, Abstract = {In the context of the Semantic Web, several approaches to the combination of ontologies, given in terms of theories of classical first-order logic, and rule bases have been proposed. They either cast rules into classical logic or limit the interaction between rules and ontologies. Autoepistemic logic (AEL) is an attractive formalism which allows to overcome these limitations, by serving as a uniform host language to embed ontologies and nonmonotonic logic programs into it. For the latter, so far only the propositional setting has been considered. In this paper, we present several embeddings of normal and disjunctive non-ground logic programs under the stable-model semantics into first-order AEL, and compare them in combination with classical theories, with respect to stable expansions and autoepistemic consequences. Our results reveal differences and correspondences of the embeddings and provide a useful guidance in the choice of a particular embedding for knowledge combination.}, Address = {Hyderabad, India}, Author = {Jos de Bruijn and Thomas Eiter and Axel Polleres and Hans Tompits}, Booktitle = {Twentieth International Joint Conference on Artificial Intelligence (IJCAI'07)}, Day = {6--12}, Month = jan, Pages = {304--309}, Publisher = {AAAI}, Talk = {Jos de Bruijn}, Title = {Embedding Non-Ground Logic Programs into Autoepistemic Logic for Knowledge-Base Combination}, Type = CONF, Url = {http://www.polleres.net/publications/fo-ael-ijcai07.pdf}, Year = 2007, Bdsk-Url-1 = {http://www.polleres.net/publications/fo-ael-ijcai07.pdf}}
@inproceedings{debr-etal-2006c, Abstract = {In the ongoing discussion about rule extensions for Ontology languages on the Semantic Web a recurring issue is how to combine first-order classical logic with nonmonotonic rule languages. Whereas several modular approaches to define a combined semantics for such hybrid knowledge bases focus mainly on decidability issues, we tackle the matter from a more general point of view. In this paper we show how Quantified Equilibrium Logic (QEL) can function as a unified framework that embraces classical logic as well as disjunctive logic programs under the (open) answer set semantics. In the proposed variant of QEL we relax the unique names assumption from earlier versions. Moreover, we show that this framework elegantly captures several modular approaches to nonmonotonic semantics for hybrid knowledge bases.}, Author = {Jos de Bruijn and David Pearce and Axel Polleres and Agust{\'\i}n Valverde}, Booktitle = {RuleML 2006 Workshop: Ontology and Rule Integration}, Month = nov, Talk = {Axel Polleres}, Title = {A Logic for Hybrid Rules}, Type = WS, Url = {http://www.polleres.net/publications/debr-etal-2006c.pdf}, Year = 2006, Bdsk-Url-1 = {http://www.polleres.net/publications/debr-etal-2006c.pdf}}
@inproceedings{poll-schi-2006, Abstract = {As the data and ontology layers of the Semantic Web stack have achieved a certain level of maturity in standard recommendations such as RDF and OWL, the current focus lies on two related aspects. On the one hand, the definition of a suitable query language for RDF, SPARQL, has just reached candidate recommendation status within the W3C. The establishment of the rules layer on top of the existing stack on the other hand marks the next step to be tackled, where especially languages with their roots in Logic Programming and Deductive Databases are receiving considerable attention. In this work we try to bridge the gap between these two efforts by providing translations between SPARQL and Datalog extended with negation and external built-in predicates. It appears that such a combination serves both as an underpinning for a more general rules and query language on top of RDF and SPARQL as well as for direct implementations of SPARQL on top of existing rules engines. Our prototype implementation is based on the datalog engine DLV. As it turns out, features of the language of this system can be fruitfully combined with SPARQL.}, Address = {Athens, GA, USA}, Author = {Axel Polleres and Roman Schindlauer}, Booktitle = {International Semantic Web Conference ({ISWC2006} -- Posters Track)}, Month = nov, Note = {Abstract, the full poster is available at \url{http://www.polleres.net/publications/poll-schi-2006-poster.pdf}}, Talk = {Axel Polleres}, Title = {{SPAR$^2$QL}: From {SPARQL} to Rules}, Type = OTHER, Url = {http://www.polleres.net/publications/poll-schi-2006.pdf}, Year = 2006, Bdsk-Url-1 = {http://www.polleres.net/publications/poll-schi-2006.pdf}}
@proceedings{poll-etal-2006-alpsws, Abstract = {Workshop Proceedings. This workshop was co-located with ICLP 2006.}, Address = {Seattle, WA}, Booktitle = {ALPSWS2006}, Day = 16, Editor = {Axel Polleres and Stefan Decker and Gopal Gupta and Jos de Bruijn}, Month = AUG, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Title = {ALPSWS2006: Applications of Logic Programming in the Semantic Web and Semantic Web Services. Proceedings}, Type = BOOK, Url = {http://CEUR-WS.org/Vol-196/}, Volume = {196}, Year = 2006, Bdsk-Url-1 = {http://CEUR-WS.org/Vol-196/}}
@incollection{eite-etal-2006, Abstract = {For realizing the Semantic Web vision, extensive work is underway for getting the layers of its conceived architecture ready. Given that the Ontology Layer has reached a certain level of maturity with W3C recommendations such as RDF and the OWL Web Ontology Language, current interest focuses on the Rules Layer and its integration with the Ontology Layer. Several proposals have been made for solving this problem, which does not have a straightforward solution due to various obstacles. One of them is the fact that evaluation principles like the closed-world assumption, which is common in rule languages, are usually not adopted in ontologies. Furthermore, naively adding rules to ontologies raises undecidability issues. In this paper, after giving a brief overview about the current state of the Semantic-Web stack and its components, we will discuss nonmonotonic logic programs under the answer-set semantics as a possible formalism of choice for realizing the Rules Layer. We will briefly discuss open issues in combining rules and ontologies, and survey some existing proposals to facilitate reasoning with rules and ontologies. We will then focus on description-logic programs (or dl-programs, for short), which realize a transparent integration of rules and ontologies supported by existing reasoning engines, based on the answer-set semantics. We will further discuss a generalization of dl-programs, viz. HEX-programs, which offer access to different ontologies as well as higher-order language constructs.}, Author = {Thomas Eiter and Giovambattista Ianni and Axel Polleres and Roman Schindlauer and Hans Tompits}, Booktitle = {Reasoning Web 2006}, Day = {4--8}, Editor = {P. Barahona et al.}, Month = SEP, Pages = {93--127}, Publisher = {Springer}, Series = LNCS, Talk = {Thomas Eiter}, Title = {Reasoning with Rules and Ontologies}, Type = BC, Url = {http://www.polleres.net/publications/eit-etal-2006_rowSchool.pdf}, Volume = 4126, Year = 2006, Bdsk-Url-1 = {http://www.polleres.net/publications/eit-etal-2006_rowSchool.pdf}}
@book{fens-etal-2006, Abstract = {The goal of this book is to provide an insight into and an understanding of the problems faced by Web services and service-oriented architectures, as well as the promises and solutions of the Semantic Web. We focus particularly on the Web Service Modeling Ontology (WSMO), which provides a comprehensive conceptual framework for the fruitful combination of Semantic Web technologies and Web services. With the present book we want to give an overall understanding of the WSMO framework and show how it can be applied to the problems of service-oriented architectures. It is not a ready-to-install ``user manual'' for Semantic Web services that is provided with this book, but rather an in-depth introduction. While many of the related technologies and standards are still under development we nevertheless think it is not too early for such a book: it is important to create an awareness of this technology and think about it today rather than tomorrow. The technology might not be at an industrial strength maturity yet, but the problems are already.}, Author = {Dieter Fensel and Holger Lausen and Axel Polleres and Jos de Bruijn and Michael Stollberg and Dumitru Roman and John Domingue}, Isbn = {3-540-34519-1}, Publisher = {Springer}, Title = {Enabling Semantic Web Services : The Web Service Modeling Ontology}, Type = BOOK, Url = {http://www.springer.com/west/home/business/business+information+systems?SGWID=4-170-22-173663112-0}, Year = 2006, Bdsk-Url-1 = {http://www.springer.com/west/home/business/business+information+systems?SGWID=4-170-22-173663112-0}}
@inproceedings{debr-etal-2006b, Abstract = {In the context of current efforts around Semantic-Web languages, the combination of classical theories in classical first-order logic (and in particular of ontologies in various description logics) with rule languages rooted in logic programming is receiving considerable attention. Existing approaches such as SWRL, dl-programs, and $\mathcal{DL}$+log, differ significantly in the way ontologies interact with (nonmonotonic) rules bases. In this paper, we identify fundamental representational issues which need to be addressed by such combinations and formulate a number of formal principles which help to characterize and classify existing and possible future approaches to the combination of rules and classical theories. We use the formal principles to explicate the underlying assumptions of current approaches. Finally, we propose a number of settings, based on our analysis of the representational issues and the fundamental principles underlying current approaches.}, Address = {Gullin, China}, Author = {Jos de Bruijn and Thomas Eiter and Axel Polleres and Hans Tompits}, Booktitle = {Proceedings of the 1st International Conference on Knowledge Science, Engineering and Management (KSEM'06)}, Day = {5--8}, Month = AUG, Note = {Invited paper}, Pages = {1--22}, Publisher = {Springer}, Series = LNCS, Talk = {Thomas Eiter}, Title = {On Representational Issues about Combinations of Classical Theories with Nonmonotonic Rules}, Type = CONF, Url = {http://polleres.net/publications/deri-tr-2006-05-29.pdf}, Volume = 4092, Year = 2006, Bdsk-Url-1 = {http://polleres.net/publications/deri-tr-2006-05-29.pdf}}
@inproceedings{debr-etal-2006, Abstract = {The Web Service Modeling Language (WSML) is a language for the specification of different aspects of Semantic Web Services. It provides a formal language for the Web Service Modeling Ontology WSMO which is based on well-known logical formalisms, specifying one coherent language framework for the description of Semantic Web Services, starting from the intersection of Datalog and the Description Logic $\mathcal{SHIQ}$. This core language is extended in the directions of Description Logics and Logic Programming in a principled manner with strict layering. WSML distinguishes between conceptual and logical modeling in order to facilitate users who are not familiar with formal logic, while not restricting the expressive power of the language for the expert user. IRIs play a central role in WSML as identifiers. Furthermore, WSML defines XML and RDF serializations for inter-operation over the Semantic Web.}, Address = {Budva, Montenegro}, Author = {Jos de Bruijn and Holger Lausen and Axel Polleres and Dieter Fensel}, Booktitle = {Proceedings of the 3rd European Semantic Web Conference (ESWC2006)}, Day = {11--14}, Month = JUN, Note = {\textbf{Nominated for the 7 Years Most Influential ESWC Paper award at ESWC2013}}, Publisher = {Springer}, Series = LNCS, Talk = {Jos de Bruijn}, Title = {The Web Service Modeling Language: An Overview}, Type = CONF, Url = {http://www.polleres.net/publications/debr-etal-2006.pdf}, doi = {https://doi.org/10.1007/11762256_43}, Volume = 4011, Year = 2006, }
@inproceedings{poll-etal-2006b, Abstract = {Knowledge representation formalisms used on the Semantic Web adhere to a strict open world assumption. Therefore, nonmonotonic reasoning techniques are often viewed with scepticism. Especially negation as failure, which intuitively adopts a closed world view, is often claimed to be unsuitable for the Web where knowledge is notoriously incomplete. Nonetheless, it was suggested in the ongoing discussions around rules extensions for languages like RDF(S) or OWL to allow at least restricted forms of negation as failure, as long as negation has an explicitly defined, finite scope. Yet clear definitions of such ``scoped negation'' as well as formal semantics thereof are missing. We propose logic programs with contexts and scoped negation and discuss two possible semantics with desirable properties. We also argue that this class of logic programs can be viewed as a rule extension to a subset of RDF(S).}, Address = {Budva, Montenegro}, Author = {Axel Polleres and Cristina Feier and Andreas Harth}, Booktitle = {Proceedings of the 3rd European Semantic Web Conference (ESWC2006)}, Day = {11--14}, Month = JUN, Pages = {332--347}, Publisher = {Springer}, Series = LNCS, Talk = {Axel Polleres}, Title = {Rules with Contextually Scoped Negation}, Type = CONF, Url = {http://www.polleres.net/publications/poll-etal-2006b.pdf}, Volume = 4011, Year = 2006, Bdsk-Url-1 = {http://www.polleres.net/publications/poll-etal-2006b.pdf}}
@misc{eite-etal-2006-eswc06tut, Abstract = {Tutorial at the 3rd European Semantic Web Conference (ESWC2006)}, Address = {Budva, Montenegro}, Author = {Thomas Eiter and Giovambattista Ianni and Axel Polleres and Roman Schindlauer}, Day = 11, Month = JUN, Note = {Slides available at \url{http://asptut.gibbi.com/}}, Talk = {Giovambattista Ianni and Axel Polleres and Roman Schindlauer}, Title = {Answer Set Programming for the Semantic Web}, Type = OTHER, Url = {http://www.eswc2006.org/tutorials.html#tutorial1}, Year = 2006, Bdsk-Url-1 = {http://www.eswc2006.org/tutorials.html#tutorial1}}
@inproceedings{poll-2006, Abstract = {The Semantic Web community is currently dominated by knowledge representation formalisms adhering to a strict open world assumption. Nonmonotonic reasoning formalisms are viewed with partial scepticism and it is often argued that nonmonotonic reasoning techniques which adopt a closed world assumption are invalid in an open environment such as the Web where knowledge is notoriously incomplete. Nonetheless, in the ongoing discussion about rule extensions for Semantic Web Languages like RDF(S) or OWL several proposals have been made to partly break with this view and to allow a restricted form of negation as failure. Recently, the term ``scoped negation'' emerged in discussions around this topic, yet a clear definition about the meaning of ``scope'' and ``scoped negation'' and a formal semantics are still missing. In this paper we provide preliminary results towards these missing definitions and define two possible semantics for logic programs with contextually scoped negation, which we propose as an extension of RDFS.}, Address = {Vienna, Austria}, Author = {Axel Polleres}, Booktitle = {20th Workshop on Logic Programming ({WLP 2006})}, Day = {22--24}, Month = FEB, Talk = {Axel Polleres}, Title = {Logic Programs with Contextually Scoped Negation}, Type = WS, Url = {http://www.polleres.net/publications/poll-2006.pdf}, Year = 2006, Bdsk-Url-1 = {http://www.polleres.net/publications/poll-2006.pdf}}
@article{eite-poll-2006, Abstract = {Answer set programming (ASP) with disjunction offers a powerful tool for declaratively representing and solving hard problems. Many NP-complete problems can be encoded in the answer set semantics of logic programs in a very concise and intuitive way, where the encoding reflects the typical ``guess and check'' nature of NP problems: The property is encoded in a way such that polynomial size certificates for it correspond to stable models of a program. However, the problem-solving capacity of full disjunctive logic programs (DLPs) is beyond NP, and captures a class of problems at the second level of the polynomial hierarchy. While these problems also have a clear ``guess and check'' structure, finding an encoding in a DLP reflecting this structure may sometimes be a non-obvious task, in particular if the ``check'' itself is a coNP-complete problem; usually, such problems are solved by interleaving separate guess and check programs, where the check is expressed by inconsistency of the check program. In this paper, we present general transformations of head-cycle free (extended) disjunctive logic programs into stratified and positive (extended) disjunctive logic programs based on meta-interpretation techniques. The answer sets of the original and the transformed program are in simple correspondence, and, moreover, inconsistency of the original program is indicated by a designated answer set of the transformed program. Our transformations facilitate the integration of separate ``guess'' and ``check'' programs, which are often easy to obtain, automatically into a single disjunctive logic program. Our results complement recent results on meta-interpretation in ASP, and extend methods and techniques for a declarative ``guess and check'' problem solving paradigm through ASP.}, Author = {Thomas Eiter and Axel Polleres}, Journal = TPLP, Number = {1-2}, Pages = {23--60}, Title = {Towards Automated Integration of Guess and Check Programs in Answer Set Programming: A Meta-Interpreter and Applications}, Type = JOURNAL, Url = {http://arxiv.org/pdf/cs/0501084}, Volume = 6, Year = 2006, Bdsk-Url-1 = {http://arxiv.org/pdf/cs/0501084}}
@incollection{poll-etal-2006, Abstract = {In diesem Kapitel werden Anwendungsgebiete und Ans\"atze f\"ur die semantische Beschreibung von Web Services behandelt. Bestehende Web Service Technologien leisten einen entscheidenden Beitrag zur Entwicklung verteilter Anwendungen dadurch, dass weithin akzeptierte Standards vorliegen, die die Kommunikation zwischen Anwendungen bestimmen und womit deren Kombination zu komplexeren Einheiten erm \"oglicht wird. Automatisierter Mechanismen zum Auffinden geeigneter Web Services und deren Komposition dagegen werden von bestehenden Technologien in vergleichsweise geringem Ma\"s unterst\"utzt. \"Ahnlich wie bei der Annotation statischer Daten im ``Semantic Web'' setzen Forschung und Industrie grosse Hoffnungen in die semantischen Beschreibung von Web Services zur weitgehenden Automatisierung dieser Aufgaben.}, Author = {Axel Polleres and Holger Lausen and Rub{\'en} Lara}, Booktitle = {Semantic Web -- Wege zur vernetzten Wissensgesellschaft}, Editor = {Tassilo Pellegrini and Andreas Blumauer}, Month = JUN, Note = {(in German)}, Publisher = {Springer}, Title = {{Semantische Beschreibung von Web Services}}, Type = BC, Url = {http://www.polleres.net/publications/poll-etal-2006.pdf}, Year = 2006, Bdsk-Url-1 = {http://www.polleres.net/publications/poll-etal-2006.pdf}}
@proceedings{hepp-etal-2005, Abstract = {Workshop Proceedings. This workshop was co-located with ICSOC 2005.}, Address = {Amsterdam, The Netherlands}, Booktitle = {MEDIATE2005}, Day = 12, Editor = {Martin Hepp and Axel Polleres and Frank van Harmelen and Michael R. Genesereth}, Month = DEC, Publisher = {CEUR-WS.org}, Series = {CEUR Workshop Proceedings}, Title = {Proceedings of the First International Workshop on Mediation in Semantic Web Services (MEDIATE 2005)}, Url = {http://CEUR-WS.org/Vol-168/}, Volume = {168}, Year = 2005, Bdsk-Url-1 = {http://CEUR-WS.org/Vol-168/}}
@inproceedings{krum-etal-2005, Abstract = {A core paradigm of the Web is information exchange via persistent publication, i.e., one party publishes a piece of information on the Web, and any other party who knows the location of the resource can retrieve and process the information at any later point in time and without the need for synchronization with the original publisher. This functionality significantly contributed to the scalability of the Web, since it reduced the amount of interaction between the sender and receiver. Current approaches of extending the World Wide Web from a collection of human-readable information, connecting humans, into a network that connects computing devices based on machine-processable semantics of data lack this feature and are instead based on tightly-coupled message exchange. In this paper, we (1) show that Web services based on the message-exchange paradigm are not fully compliant with core paradigms of the Web itself, (2) outline how the idea of persistent publication as a communication paradigm can be beneficially applied to Web services, and (3) propose a minimal architecture for fully Web-enabled Semantic Web services based on publication in shared information spaces, which we call Triple Space Computing.}, Address = {V{\"a}xj{\"o}, Sweden}, Author = {Reto Krummenacher and Martin Hepp and Axel Polleres and Christoph Bussler and Dieter Fensel}, Booktitle = {Proceedings of the 3rd European Conference on Web Services (ECOWS 2005)}, Day = {14--16}, Editor = {Welf L{\"o}we and Jean-Philippe Martin-Flatin}, Month = NOV, Pages = {235--243}, Publisher = {IEEE Computer Society}, Talk = {Reto Krummenacher}, Title = {{WWW or What is Wrong with Web Services}}, Url = {http://www.polleres.net/publications/krum-etal-2005.pdf}, Year = 2005, Bdsk-Url-1 = {http://www.polleres.net/publications/krum-etal-2005.pdf}}
@misc{ange-etal-2005, Abstract = {The Web Rule Language WRL is a rule-based ontology language for the Semantic Web. The language is located in the Semantic Web stack next to the Description Logic based Ontology language OWL. WRL defines three variants, namely Core, Flight and Full. The Core variant marks the common fragment between WRL and OWL. WRL-Flight is a Datalog-based rule language. WRL-Full is a full-fledged rule language with function symbols and negation under the Well-Founded Semantics.}, Author = {J\"urgen Angele and Harold Boley and Jos de Bruijn and Dieter Fensel and Pascal Hitzler and Michael Kifer and Reto Krummenacher and Holger Lausen and Axel Polleres and Rudi Studer}, Day = 9, Month = SEP, Note = {W3C member submission}, Title = {{Web Rule Language (WRL)}}, Url = {http://www.w3.org/Submission/WRL/}, Year = 2005, Bdsk-Url-1 = {http://www.w3.org/Submission/WRL/}}
@inproceedings{feie-etal-2005, Abstract = {The Semantic Web and Semantic Web Services build a natural application area for Intelligent Agents, namely querying and reasoning about structured knowledge and semantic descriptions of services and their interfaces on the Web. This paper provides an overview of the Web Service Modeling Ontology, a conceptual framework for the semantical description of Web services.}, Address = {Hefei, China}, Author = {Cristina Feier and Roman Dumitru and Axel Polleres and John Domingue and Michael Stollberg and Dieter Fensel}, Booktitle = {Proceedings of the 2005 International Conference on Intelligent Computing (ICIC'05)}, Day = {23--26}, Month = AUG, Talk = {Cristina Feier}, Title = {Towards Intelligent Web Services: The Web Service Modeling Ontology {(WSMO)}}, Url = {http://www.polleres.net/publications/feie-etal-2005.pdf}, Year = 2005, Bdsk-Url-1 = {http://www.polleres.net/publications/feie-etal-2005.pdf}}
@misc{poll-etal-2005, Abstract = {The potential to achieve dynamic, scalable and cost-effective infrastructure for electronic transactions in business and public administration has driven recent research efforts towards so-called Semantic Web services, that is enriching Web services with machine-processable semantics. Supporting this goal, the Web Service Modeling Ontology (WSMO) provides a conceptual framework and a formal language for semantically describing all relevant aspects of Web services in order to facilitate the automation of discovering, combining and invoking electronic services over the Web. This document describes the overall structure of WSMO by its four main elements: ontologies, which provide the terminology used by other WSMO elements, Web service descriptions, which describe the functional and behavioral aspects of a Web service, goals that represent user desires, and mediators, which aim at automatically handling interoperability problems between different WSMO elements. Along with introducing the main elements of WSMO, the syntax of the formal logic language used in WSMO is provided. The semantics and computationally tractable subsets of this logical language are defined and discussed in a separate document of the submission, the Web Service Modeling Language (WSML) document.}, Author = {Jos de Bruijn and Christoph Bussler and John Domingue and Dieter Fensel and Martin Hepp and Uwe Keller and Michael Kifer and Birgitta K{\"o}nig-Ries and Jacek Kopecky and Rub{\'e}n Lara and Holger Lausen and Eyal Oren and Axel Polleres and Dumitru Roman and James Scicluna and Michael Stollberg}, Day = 3, Month = JUN, Note = {W3C member submission}, Title = {{Web Service Modeling Ontology (WSMO)}}, Url = {http://www.w3.org/Submission/WSMO/}, Year = 2005, Bdsk-Url-1 = {http://www.w3.org/Submission/WSMO/}}
@misc{poll-etal-2005b, Abstract = {In this document, we introduce the Web Service Modeling Language WSML which provides a formal syntax and semantics for the Web Service Modeling Ontology WSMO. WSML is based on different logical formalisms, namely, Description Logics, First-Order Logic and Logic Programming, which are useful for the modeling of Semantic Web services. WSML consists of a number of variants based on these different logical formalisms, namely WSML-Core, WSML-DL, WSML-Flight, WSML-Rule and WSML-Full. WSML-Core corresponds with the intersection of Description Logic and Horn Logic. The other WSML variants provide increasing expressiveness in the direction of Description Logics and Logic Programming. Finally, both paradigms are unified in WSML-Full, the most expressive WSML variant. WSML is specified in terms of a normative human-readable syntax. Besides the human-readable syntax, WSML has an XML and an RDF syntax for exchange over the Web and for interoperation with RDF-based applications. Furthermore, we provide a mapping between WSML ontologies and OWL ontologies for interoperation with OWL ontologies through a common semantic subset of OWL and WSML.}, Author = {Jos de Bruijn and Dieter Fensel and Uwe Keller and Michael Kifer and Reto Krummenacher and Holger Lausen and Axel Polleres and Livia Predoiu}, Day = 3, Month = JUN, Note = {W3C member submission}, Title = {{Web Service Modeling Language (WSML)}}, Url = {http://www.w3.org/Submission/WSML/}, Year = 2005, Bdsk-Url-1 = {http://www.w3.org/Submission/WSML/}}
@inproceedings{poll-etal-2005c, Abstract = {The Grid has emerged as a new distributed computing infrastructure for advanced science and engineering aiming at enabling sharing of resources and information towards coordinated problem solving in dynamic environments. Research in Grid Computing and Web Services has recently converged in what is known as the Web Service Resource Framework. While Web Service technologies and standards such as SOAP and WSDL provide the syntactical basis for communication in this framework, a service oriented grid architecture for communication has been defined in the Open Grid Service architecture. Wide agreement that a flexible service Grid is not possible without support by Semantic technologies has lead to the term ``Semantic Grid'' which is at the moment only vaguely defined. In our ongoing work on the Web Service Modeling Ontology (WSMO) we so far concentrated on the semantic description of Web services with respect to applications in Enterprise Application Integration and B2B integration scenarios. Although the typical application areas of Semantic Web services have slightly different requirements than the typical application scenarios in the Grid a big overlap justifies the assumption that most research results in the Semantic Web Services area can be similarly applied in the Semantic Grid. The present abstract summarizes the authors view on how to fruitfully integrate Semantic Web service technologies around WSMO/WSML and WSMX and Grid technologies in a Semantic Service Grid and gives an outlook on further possible directions and research. The reminder of this abstract is structured as follows. After giving a short overview of the current Grid Service architecture and its particular requirements, we shortly review the basic usage tasks for Semantic Web services. We then point out how these crucial tasks of Semantic Web services are to be addressed by WSMO. In turn, we try to analyze which special requirements for Semantic Web Services arise with respect to the Grid. We conclude by giving an outlook on the limitations of current Semantic Web services technologies and how we plan to address these in the future in a common Framework for Semantic Grid services.}, Author = {Axel Polleres and Ioan Toma and Dieter Fensel}, Booktitle = {{The Dagstuhl Seminar 05271 -- Semantic Grid: The Convergence of Technologies}}, Month = MAY, Note = {Extended Abstract}, Talk = {Axel Polleres}, Title = {Modeling Services for the Semantic Grid}, Url = {http://drops.dagstuhl.de/opus/volltexte/2005/394/}, Year = 2005, Bdsk-Url-1 = {http://drops.dagstuhl.de/opus/volltexte/2005/394/}}
@article{roma-etal-2005, Abstract = {The potential to achieve dynamic, scalable and cost-effective marketplaces and eCommerce solutions has driven recent research efforts towards so-called Semantic Web Services that are enriching Web services with machine-processable semantics. To this end, the Web Service Modeling Ontology (WSMO) provides the conceptual underpinning and a formal language for semantically describing all relevant aspects of Web services in order to facilitate the automatization of discovering, combining and invoking electronic services over the Web. In this paper we describe the overall structure of WSMO by its four main elements: ontologies, which provide the terminology used by other WSMO elements, Web services, which provide access to services that, in turn, provide some value in some domain, goals that represent user desires, and mediators, which deal with interoperability problems between different WSMO elements. Along with introducing the main elements of WSMO, we provide a logical language for defining formal statements in WSMO together with some motivating examples from practical use cases which shall demonstrate the benefits of Semantic Web Services.}, Author = {Dumitru Roman and Uwe Keller and Holger Lausen and Jos de Bruijn and Rub{\'e}n Lara and Michael Stollberg and Axel Polleres and Cristina Feier and Cristoph Bussler and Dieter Fensel}, Journal = {Applied Ontology}, Title = {Web Service Modeling Ontology}, Type = JOURNAL, Url = {http://iospress.metapress.com/openurl.asp?genre=article&issn=1570-5838&volume=1&issue=1&spage=77}, Year = 2005, Bdsk-Url-1 = {http://iospress.metapress.com/openurl.asp?genre=article&issn=1570-5838&volume=1&issue=1&spage=77}}
@inproceedings{kell-etal-2005, Abstract = {The automatic location of services that fulfill a given need is seen as a key step towards dynamic and scalable integration. In this paper we present a model for the automatic location of services that considers the static and dynamic aspects of service descriptions and identifies what notions of match and techniques are useful for the matching of both. Our model presents three important features: ease of use for the requester, efficient pre-filtering of relevant services, and accurate contracting of services that fulfill a given requester goal. We further elaborate previous work and results on Web service discovery by analyzing what steps and what kind of descriptions are necessary for an efficient and usable automatic service location. Furthermore, we analyze the intuitive and formal notions of match that are of interest for locating services that fulfill a given goal. Although having a formal underpinning, the proposed model does not impose any restrictions on how to implement it for specific applications, but proposes some useful formalisms for providing such implementation.}, Author = {Uwe Keller and Rub{\'e}n Lara and Holger Lausen and Axel Polleres and Dieter Fensel}, Booktitle = {Proceedings of the 2nd European Semantic Web Conference (ESWC2005)}, Month = MAY, Talk = {Holger Lausen}, Title = {Automatic Location of Services}, Url = {http://www.polleres.net/publications/kell-etal-2005.pdf}, Year = 2005, Bdsk-Url-1 = {http://www.polleres.net/publications/kell-etal-2005.pdf}}
@inproceedings{debr-etal-2005, Abstract = {The Semantic Web languages RDFS and OWL have been around for some time now. However, the presence of these languages has not brought the breakthrough of the Semantic Web the creators of the languages had hoped for. OWL has a number of problems in the area of interoperability and usability in the context of many practical application scenarios which impede the connection to the Software Engineering and Database communities. In this paper we present OWL Flight, which is loosely based on OWL, but the semantics is grounded in Logic Programming rather than Description Logics, and it borrows the constraint-based modeling style common in databases. This results in different types of modeling primitives and enforces a different style of ontology modeling. We analyze the modeling paradigms of OWL DL and OWL Flight, as well as reasoning tasks supported by both languages. We argue that different applications on the Semantic Web require different styles of modeling and thus both types of languages are required for the Semantic Web.}, Address = {Chiba, Japan}, Author = {Jos De Bruijn and Axel Polleres and Rub{\'e}n Lara and Dieter Fensel}, Booktitle = {Proceedings of the 14th World Wide Web Conference (WWW2005)}, Month = MAY, Pages = {623--632}, Publisher = {ACM Press}, Talk = {Jos De Bruijn}, Title = {{OWL DL vs.\ OWL Flight}: Conceptual Modeling and Reasoning for the Semantic Web}, Url = {http://www.polleres.net/publications/debr-etal-2005.pdf}, Year = 2005, Bdsk-Url-1 = {http://www.polleres.net/publications/debr-etal-2005.pdf}}
@inproceedings{poll-2005, Abstract = {In the Semantic Web and Semantic Web Services areas there are still unclear issues concerning an appropriate language. Answer Set Programming and ASP engines can be particularly interesting for Ontological Reasoning, especially in the light of ongoing discussions of non-monotonic extensions for Ontology Languages. Previously, the main concern of discussions was around OWL and Description Logics. Recently many extensions and suggestions for Rule Languages and Semantic Web Languages pop up, particularly in the the context of Semantic Web Services, which involve the meta-data description of Services instaead of static data on the Web only. These lanuages involve SWRL, WSML, SWSL-Rules, etc. I want to give an outline of languages, challenges and initiatives in this area and where I think Answer Set Programming research can hook in.}, Author = {Axel Polleres}, Booktitle = {{The Dagstuhl Seminar 05171 -- Nonmonotonic Reasoning, Answer Set Programming and Constraints}}, Month = MAY, Note = {Extended Abstract}, Talk = {Axel Polleres}, Title = {Semantic Web Languages and Semantic Web Services as Application Areas for Answer Set Programming}, Url = {http://drops.dagstuhl.de/opus/volltexte/2005/263}, Year = 2005, Bdsk-Url-1 = {http://drops.dagstuhl.de/opus/volltexte/2005/263}}
@inproceedings{laus-etal-2005-W3Crules, Abstract = {The Web Service Modeling Language (WSML) provides a framework of different language variants to describe semantic Web services. This paper presents the design rationale and relation with existing language recommendations. WSML is a frame based language with an intuitive human readable syntax and XML and RDF exchange syntaxes, as well as a mapping to OWL. It provides different variants, allowing for open and closed world modeling; it is a fully-fledged ontology and rule language with defined variants grounded in well known formalisms, namely Datalog, Description Logic and Frame Logic. Taking the key aspects of WSML as a starting point, we rationalize the design decisions which we consider relevant in designing a proper layering of ontology and rule languages for the Semantic Web and semantic Web services.}, Address = {Washington, D.C., USA}, Author = {Holger Lausen and Jos de Bruijn and Axel Polleres and Dieter Fensel}, Booktitle = {{W3C} Workshop on Rule Languages for Interoperability}, Day = {27--28}, Month = APR, Title = {{WSML} - a Language Framework for Semantic Web Services}, Url = {http://www.w3.org/2004/12/rules-ws/paper/44}, Year = 2005, Bdsk-Url-1 = {http://www.w3.org/2004/12/rules-ws/paper/44}}
@inproceedings{debr-etal-2005-W3Crules, Abstract = {The Web Service Modeling Language WSML provides a framework for the modeling of ontologies and semantic Web services based on the conceptual model of the Web Service Modeling Ontology. In this paper we describe the two rule-based WSML-variants and outline our position with respect to a rule language for the Semantic Web. The first rule-based WSML variant, WSML-Flight, semantically corresponds to the Datalog fragment of F-Logic, extended with inequality in the body and locally stratified negation under the Perfect model semantics. The second, WSML-Rule, is an extension of WSML-Flight to the logic programming subset of F-Logic which allows the use of function symbols and unsafe rules (i.e., there may be variables in rule heads which do not occur in the body).}, Address = {Washington, D.C., USA}, Author = {Jos de Bruijn and Holger Lausen and Axel Polleres and Dieter Fensel}, Booktitle = {{W3C} Workshop on Rule Languages for Interoperability}, Day = {27--28}, Month = APR, Title = {The {WSML} Rule Languages for the Semantic Web}, Url = {http://www.w3.org/2004/12/rules-ws/paper/128}, Year = 2005, Bdsk-Url-1 = {http://www.w3.org/2004/12/rules-ws/paper/128}}
@inproceedings{scic-poll-2005, Abstract = {The Semantic Web is slowly gathering more importance as both academic and industrial organizations are realizing the potential benefit that might be obtained from it. This is especially true in the areas of tourism in which Semantic Web Services can provide a drastically new way on how to find and book related services such as hotel, flights and taxi transfers. However, many aspects of Semantic Web Services are still under development. This short paper presents issues related to choreography and orchestration representation in the Web Service Modelling Ontology (WSMO) and also how such ideas can be applied to an e-tourism use case.}, Address = {Toulouse, France}, Author = {James Scicluna and Axel Polleres}, Booktitle = {Workshop on Semantic Web Applications at the 11th EUROMEDIA Conference}, Month = APR, Talk = {James Scicluna}, Title = {Semantic Web Service Execution for {WSMO} based Choreographies}, Url = {http://www.polleres.net/publications/scic-poll-2005.pdf}, Year = 2005, Bdsk-Url-1 = {http://www.polleres.net/publications/scic-poll-2005.pdf}}
@inproceedings{fens-etal-2005, Address = {Innsbruck, Austria}, Author = {Dieter Fensel and Uwe Keller and Holger Lausen and Axel Polleres and Ioan Toma}, Booktitle = {{W3C} Workshop on Frameworks for Semantics in Web Services}, Day = {9--10}, Month = JUN, Title = {{What is wrong with Web service discovery}}, Url = {http://www.w3.org/2005/04/FSWS/Submissions/50/WWW_or_What_is_Wrong_with_Web_service_Discovery.pdf}, Year = 2005, Bdsk-Url-1 = {http://www.w3.org/2005/04/FSWS/Submissions/50/WWW_or_What_is_Wrong_with_Web_service_Discovery.pdf}}
@techreport{debr-etal-2004, Author = {Jos De Bruijn and Axel Polleres and Rub{\'e}n Lara and Dieter Fensel}, Institution = DERI, Month = NOV, Number = {DERI-TR-2004-11-10}, Published = {debr-etal-2005}, Title = {{OWL DL vs.\ OWL Flight}: Conceptual Modeling and Reasoning for the Semantic Web}, Url = {http://www.polleres.net/publications/DERI-TR-2004-11-10.pdf}, Year = 2004, Bdsk-Url-1 = {http://www.polleres.net/publications/DERI-TR-2004-11-10.pdf}}
@inproceedings{kife-etal-2004, Address = {Hiroshima, Japan}, Author = {Michael Kifer and Rub{\'e}n Lara and Axel Polleres and Chang Zhao and Uwe Keller and Holger Lausen and Dieter Fensel}, Booktitle = {ISWC 2004 Workshop on Semantic Web Services: Preparing to Meet the World of Business Applications}, Day = 8, Month = NOV, Talk = {Rub{\'e}n Lara}, Title = {A Logical Framework for Web Service Discovery}, Url = {http://www.polleres.net/publications/kife-etal-2004.pdf}, Year = 2004, Bdsk-Url-1 = {http://www.polleres.net/publications/kife-etal-2004.pdf}}
@inproceedings{arro-etal-2004, Address = {Oracle Conference Center, Redwood Shores, CA, USA}, Author = {Sinuh{\'e} Arroyo and Christoph Bussler and Jacek Kopeck{\'y} and Rub{\'e}n Lara and Axel Polleres and Micha{\l} Zaremba}, Booktitle = {{W3C} Workshop on Constraints and Capabilities for Web Services}, Day = {12--13}, Month = OCT, Title = {Web Service Capabilities and Constraints in {WSMO}}, Url = {http://www.w3.org/2004/09/ws-cc-program.html}, Year = 2004, Bdsk-Url-1 = {http://www.w3.org/2004/09/ws-cc-program.html}}
@inproceedings{lara-etal-2004, Address = {Erfurt, Germany}, Author = {Rub{\'e}n Lara and Dumitru Roman and Axel Polleres and Dieter Fensel}, Booktitle = {Proceedings of the European Conference on Web Services (ECOWS 2004)}, Day = {27--30}, Month = SEP, Pages = {254--269}, Series = LNCS, Talk = {Dumitru Roman}, Title = {A Conceptual Comparison of {WSMO} and {OWL-S}}, Url = {http://springerlink.metapress.com/content/p8358uyre5kw3h7h/}, Volume = 3250, Year = 2004, Bdsk-Url-1 = {http://springerlink.metapress.com/content/p8358uyre5kw3h7h/}}
@inproceedings{olme-etal-2004, Address = {San Diego, California, USA}, Author = {Daniel Olmedilla and Rub{\'e}n Lara and Axel Polleres and Holger Lausen}, Booktitle = {Proceedings of the First International Workshop on Semantic Web Services and Web Process Composition (SWSWPC 2004)}, Day = 6, Month = JUL, Talk = {Daniel Olmedilla}, Title = {Trust Negotiation for Semantic Web Services}, Url = {http://www.polleres.net/publications/olme-etal-2004.pdf}, Year = 2004, Bdsk-Url-1 = {http://www.polleres.net/publications/olme-etal-2004.pdf}}
@techreport{debr-poll-2004, Author = {Jos De Bruijn and Axel Polleres}, Institution = DERI, Month = JUN, Number = {DERI-TR-2004-06-30}, Title = {Towards an Ontology Mapping Specification Language for the Semantic Web}, Url = {http://polleres.net/publications/DERI-TR-2004-06-30.pdf}, Year = 2004, Bdsk-Url-1 = {http://polleres.net/publications/DERI-TR-2004-06-30.pdf}}
@incollection{eite-etal-2004-planningbook, Abstract = {This chapter introduces planning and knowledge representation in the declarative action language K. Rooted in the area of Knowledge Representation & Reasoning, action languages like K allow the formalization of complex planning problems involving non-determinism and incomplete knowledge in a very flexible manner. By giving an overview of existing planning languages and comparing these against our language, we aim on further promoting the applicability and usefulness of high-level action languages in the area of planning. As opposed to previously existing languages for modeling actions and change, K adopts a logic programming view where fluents representing the epistemic state of an agent might be true, false or undefined in each state. We will show that this view of knowledge states can be fruitfully applied to several well-known planning domains from the literature as well as novel planning domains. Remarkably, K often allows to model problems more concisely than previous action languages. All the examples given can be tested in an available implementation, the DLV$^K$ planning system.}, Author = {Thomas Eiter and Wolfgang Faber and Gerald Pfeifer and Axel Polleres}, Booktitle = {Intelligent Techniques for Planning}, Editor = {Ioannis Vlahavas and Dimitris Vrakas}, Publisher = {{IDEA} Group Publishing}, Title = {Declarative Planning and Knowledge Representation in an Action Language}, Url = {http://www.idea-group.com/books/details.asp?id=4496}, Year = 2004, Bdsk-Url-1 = {http://www.idea-group.com/books/details.asp?id=4496}}
@inproceedings{eite-poll-2004, Address = {Fort Lauderdale, Florida, USA}, Author = {Thomas Eiter and Axel Polleres}, Booktitle = {{Proceedings of the Seventh International Conference on Logic Programming and Nonmonotonic Reasoning (LPNMR-7)}}, Editor = {Vladimir Lifschitz and Ilkka Niemel{\"a}}, Month = JAN, Number = 2923, Pages = {100--113}, Project = {dlv}, Publisher = {Springer}, Series = LNAI, Talk = {Axel Polleres}, Title = {Towards Automated Integration of Guess and Check Programs in Answer Set Programming}, Url = {http://www.springerlink.com/content/fh3lja3lf1lu4qqd/}, Year = 2004, Bdsk-Url-1 = {http://www.springerlink.com/content/fh3lja3lf1lu4qqd/}}
@article{eite-etal-2001d, Author = {Thomas Eiter and Wolfgang Faber and Nicola Leone and Gerald Pfeifer and Axel Polleres}, Journal = TOCL, Month = APR, Number = 2, Pages = {206--263}, Project = {dlv}, Title = {{A Logic Programming Approach to Knowledge-State Planning: Semantics and Complexity}}, Type = JOURNAL, Url = {http://portal.acm.org/citation.cfm?doid=976706.976708}, doi={10.1145/976706.976708}, Volume = 5, Year = 2004, Bdsk-Url-1 = {http://portal.acm.org/citation.cfm?doid=976706.976708}}
@inproceedings{eite-poll-2003, Address = {Reggio Calabria, Italy}, Author = {Thomas Eiter and Axel Polleres}, Booktitle = {{Proceedings of the 2003 Joint Conference on Declarative Programming APPIA-GULP-PRODE 2003}}, Month = SEP, Project = {dlv}, Talk = {Axel Polleres}, Title = {Transforming {coNP} Checks to Answer Set Computation by Meta-Interpretation}, Url = {http://www.kr.tuwien.ac.at/staff/eiter/et-archive/agp03.pdf}, Year = 2003, Bdsk-Url-1 = {http://www.kr.tuwien.ac.at/staff/eiter/et-archive/agp03.pdf}}
@phdthesis{poll-2003, Address = {{Wien, {\"O}sterreich}}, Author = {Axel Polleres}, Month = SEP, School = {{Institut f{\"u}r Informationssysteme, Technische Universit{\"a}t Wien}}, Title = {{Advances in Answer Set Planning}}, Type = THESIS, Url = {http://www.polleres.net/publications/poll-2003-thesis.pdf}, Year = 2003, Bdsk-Url-1 = {http://www.polleres.net/publications/poll-2003-thesis.pdf}}
@inproceedings{dix-etal-2003, Author = {J{\"u}rgen Dix and Thomas Eiter and Michael Fink and Axel Polleres and Yingqian Zhang}, Booktitle = {{Proceedings of the 26th German Conference on Artificial Intelligence (KI2003)}}, Day = {15--18}, Month = SEP, Pages = {646--660}, Publisher = {Springer}, Series = LNCS, Talk = {Yingqian Zhang}, Title = {{Monitoring Agents using Declarative Planning}}, Url = {http://www.kr.tuwien.ac.at/staff/eiter/et-archive/KI03.pdf}, Volume = 2821, Year = 2003, Bdsk-Url-1 = {http://www.kr.tuwien.ac.at/staff/eiter/et-archive/KI03.pdf}}
@article{dix-etal-2003-FI, Author = {J{\"u}rgen Dix and Thomas Eiter and Michael Fink and Axel Polleres and Yingqian Zhang}, Journal = FI, Month = NOV, Number = 2, Pages = {345--370}, Title = {{Monitoring Agents using Declarative Planning}}, Type = JOURNAL, Url = {http://www.kr.tuwien.ac.at/research/reports/rr0310.ps.gz}, Volume = 57, Year = 2003, Bdsk-Url-1 = {http://www.kr.tuwien.ac.at/research/reports/rr0310.ps.gz}}
@inproceedings{poll-2003-abstract, Author = {Axel Polleres}, Booktitle = {Printed Notes of the ICAPS-03 Doctoral Consortium}, Day = 11, Editor = {Jeremy Frank and Susanne Biundo}, Month = JUN, Pages = {94--98}, Talk = {Axel Polleres}, Title = {The Declarative Planning System {DLV$^K$}: Progress and Extensions}, Url = {http://icaps03.itc.it/satellite_events/documents/dc/21/Polleres.pdf}, Year = 2003, Bdsk-Url-1 = {http://icaps03.itc.it/satellite_events/documents/dc/21/Polleres.pdf}}
@article{eite-etal-2002costs, Author = {Thomas Eiter and Wolfgang Faber and Nicola Leone and Gerald Pfeifer and Axel Polleres}, Journal = JAIR, Pages = {25--71}, Project = {dlv}, Title = {{Answer Set Planning under Action Costs}}, Type = JOURNAL, Url = {https://www.aaai.org/Papers/JAIR/Vol19/JAIR-1903.pdf}, Volume = 19, Year = 2003, Bdsk-Url-1 = {http://www.jair.org/media/1148/live-1148-2166-jair.ps}}
@article{eite-etal-2001e, Author = {Thomas Eiter and Wolfgang Faber and Nicola Leone and Gerald Pfeifer and Axel Polleres}, Journal = AI, Month = MAR, Number = {1--2}, Pages = {157--211}, Project = {dlv}, Title = {{A Logic Programming Approach to Knowledge-State Planning, II: the {\small DLV}$^{\cal K}$ System}}, Type = JOURNAL, Url = {http://www.kr.tuwien.ac.at/staff/eiter/et-archive/dlvk.ps.gz}, Volume = 144, Year = 2003, Bdsk-Url-1 = {http://www.kr.tuwien.ac.at/staff/eiter/et-archive/dlvk.ps.gz}}
@inproceedings{eite-etal-2002c, Address = {Cosenza, Italy}, Author = {Thomas Eiter and Wolfgang Faber and Nicola Leone and Gerald Pfeifer and Axel Polleres}, Booktitle = {{Proceedings of the 8th European Conference on Artificial Intelligence (JELIA)}}, Editor = {Sergio Flesca and Sergio Greco and Giovambattista Ianni and Nicola Leone}, Month = SEP, Pages = {186--197}, Project = {dlv}, Series = LNCS, Talk = {Wolfgang Faber}, Title = {{Answer Set Planning under Action Costs}}, Url = {http://www.springerlink.com/content/4n09qgkbc8kyuy32/}, Volume = 2424, Year = 2002, Bdsk-Url-1 = {http://www.springerlink.com/content/4n09qgkbc8kyuy32/}}
@inproceedings{eite-etal-2002d, Address = {Cosenza, Italy}, Author = {Thomas Eiter and Wolfgang Faber and Nicola Leone and Gerald Pfeifer and Axel Polleres}, Booktitle = {{Proceedings of the 8th European Conference on Artificial Intelligence (JELIA)}}, Editor = {Sergio Flesca and Sergio Greco and Giovambattista Ianni and Nicola Leone}, Month = SEP, Note = {(System Description)}, Pages = {541--544}, Project = {dlv}, Series = LNCS, Talk = {Axel Polleres}, Title = {{The DLV$^{\cal K}$ Planning System: Progress Report}}, Url = {http://www.springerlink.com/content/u41r7dgqllh9t3fh/}, Volume = 2424, Year = 2002, Bdsk-Url-1 = {http://www.springerlink.com/content/u41r7dgqllh9t3fh/}}
@inproceedings{leon-etal-2002, Address = {Cosenza, Italy}, Author = {Nicola Leone and Gerald Pfeifer and Wolfgang Faber and Francesco Calimeri and Tina Dell'Armi and Thomas Eiter and Georg Gottlob and Giovambattista Ianni and Giuseppe Ielpa and Christoph Koch and Simona Perri and Axel Polleres}, Booktitle = {{Proceedings of the 8th European Conference on Logics in Artificial Intelligence (JELIA)}}, Editor = {Sergio Flesca and Sergio Greco and Giovambattista Ianni and Nicola Leone}, Month = SEP, Note = {(System Description)}, Pages = {537--540}, Project = {dlv}, Series = LNCS, Talk = {Gerald Pfeifer}, Title = {{The DLV System}}, Url = {http://www.springerlink.com/content/b4fd88dgfx3pl3pl/}, Volume = 2424, Year = 2002, Bdsk-Url-1 = {http://www.springerlink.com/content/b4fd88dgfx3pl3pl/}}
@techreport{eite-etal-2002costs-TR, Author = {Thomas Eiter and Wolfgang Faber and Nicola Leone and Gerald Pfeifer and Axel Polleres}, Institution = {Institut f{\"u}r Informationssysteme, Technische Universit{\"a}t Wien}, Month = OCT, Note = {Published in {Journal of Artificial Intelligence Research}}, Number = {INFSYS RR-1843-02-13}, Postscript = {http://www.kr.tuwien.ac.at/research/reports/rr0213.ps.gz}, Project = {dlv}, Published = {eite-etal-2002costs}, Title = {{Answer Set Planning under Action Costs}}, Year = 2002}
@article{poll-2002-abstract, Author = {Axel Polleres}, Journal = {{The PLANET Newsletter}}, Pages = {36--37}, Title = {{Answer Set Planning with DLV$^K$}}, Type = MAGAZINE, Url = {http://planet.dfki.de/service/Resources/Newsletter/Planet-News-5.pdf}, Volume = 5, Year = 2002, Bdsk-Url-1 = {http://planet.dfki.de/service/Resources/Newsletter/Planet-News-5.pdf}}
@misc{poll-2002, Author = {Axel Polleres}, Booktitle = {{PLANET'02 International Summer School on AI Planning 2002}}, Day = {16-22}, Month = SEP, Note = {{Poster presented at the PLANET'02 International Summer School on AI Planning 2002}}, Pdf = {http://www.polleres.net/publications/poll-2002-PLANET_Poster.pdf}, Title = {{Answer Set Planning with DLV$^K$: Planning with Action Costs}}, Year = 2002}
@article{OCG2002a, Author = {Axel Polleres}, Journal = {COMPUTER kommunikativ}, Note = {Conference report}, Pages = {28--29}, Title = {{JELIA 2002}}, Type = MAGAZINE, Volume = {5/2002}, Year = 2002}
@article{OCG2002b, Author = {Axel Polleres}, Journal = {COMPUTER kommunikativ}, Note = {Conference report}, Pages = {30--31}, Title = {{Planen in der AI}}, Type = MAGAZINE, Volume = {5/2002}, Year = 2002}
@article{OEGAI2002a, Author = {Axel Polleres}, Journal = {{\"OGAI Journal}}, Note = {Conference report}, Number = 4, Pages = {23--25}, Title = {{JELIA 2002}}, Volume = 21, Year = 2002}
@article{OEGAI2002b, Author = {Axel Polleres}, Journal = {{\"OGAI Journal}}, Note = {Conference report}, Number = 4, Pages = {26--29}, Title = {{PLANET International Summer School on AI Planning 2002, Chalkidiki, Griechenland}}, Volume = 21, Year = 2002}
@mastersthesis{poll-2001, Address = {{Wien, {\"O}sterreich}}, Author = {Axel Polleres}, Day = 1, Month = feb, School = {{Institut f{\"u}r Informationssysteme, Technische Universit{\"a}t Wien}}, Title = {{The DLV$^{\cal K}$ System for Planning with Incomplete Knowledge}}, Type = THESIS, Url = {http://www.polleres.net/publications/poll-2001-masterthesis.pdf}, Year = 2001, Bdsk-Url-1 = {http://www.polleres.net/publications/poll-2001-masterthesis.pdf}}
@inproceedings{eite-etal-2001b, Author = {Thomas Eiter and Wolfgang Faber and Nicola Leone and Gerald Pfeifer and Axel Polleres}, Booktitle = {{IJCAI-01 Workshop on Planning under Uncertainty and Incomplete Information}}, Editor = {Alessandro Cimatti and H{\'e}ctor Geffner and Enrico Giunchiglia and Jussi Rintanen}, Month = AUG, Pages = {76--81}, Project = {dlv}, Talk = {Wolfgang Faber}, Title = {{The DLV$^{\cal K}$ Planning System}}, Year = 2001}
@inproceedings{eite-etal-2001c, Author = {Thomas Eiter and Wolfgang Faber and Nicola Leone and Gerald Pfeifer and Axel Polleres}, Booktitle = {{Logic Programming and Nonmonotonic Reasoning --- 6th International Conference, LPNMR'01, Vienna, Austria, September 2001, Proceedings}}, Day = {17--19}, Editor = {Thomas Eiter and Wolfgang Faber and Miros{\l}aw Truszczy{\'n}ski}, Month = {September}, Number = {2173}, Pages = {413--416}, Project = {dlv}, Publisher = {Springer Verlag}, Series = LNAI, Talk = {Gerald Pfeifer}, Title = {{System Description: The DLV$^{\cal K}$ Planning System}}, Url = {http://www.dbai.tuwien.ac.at/proj/dlv/papers/fabe-etal-2001c.ps.gz}, Year = 2001, Bdsk-Url-1 = {http://www.dbai.tuwien.ac.at/proj/dlv/papers/fabe-etal-2001c.ps.gz}}
@techreport{eite-etal-2001d-TR, Author = {Thomas Eiter and Wolfgang Faber and Nicola Leone and Gerald Pfeifer and Axel Polleres}, Institution = {Institut f{\"u}r Informationssysteme, Technische Universit{\"a}t Wien}, Month = DEC, Number = {INFSYS RR-1843-01-11}, Project = {dlv}, Published = {eite-etal-2001d}, Title = {{A Logic Programming Approach to Knowledge-State Planning: Semantics and Complexity}}, Url = {http://www.kr.tuwien.ac.at/research/reports/rr0111.ps.gz}, Year = 2001, Bdsk-Url-1 = {http://www.kr.tuwien.ac.at/research/reports/rr0111.ps.gz}}
@techreport{eite-etal-2001e-TR, Author = {Thomas Eiter and Wolfgang Faber and Nicola Leone and Gerald Pfeifer and Axel Polleres}, Institution = {Institut f{\"u}r Informationssysteme, Technische Universit{\"a}t Wien}, Month = DEC, Number = {INFSYS RR-1843-01-12}, Project = {dlv}, Published = {eite-etal-2001e}, Title = {{A Logic Programming Approach to Knowledge-State Planning, II: the {\small DLV}$^{\cal K}$ System}}, Url = {http://www.kr.tuwien.ac.at/research/reports/rr0112.ps.gz}, Year = 2001, Bdsk-Url-1 = {http://www.kr.tuwien.ac.at/research/reports/rr0112.ps.gz}}
@inproceedings{eite-etal-00a, Author = {Thomas Eiter and Wolfgang Faber and Nicola Leone and Gerald Pfeifer and Axel Polleres}, Booktitle = {Proceedings of the 14th Workshop on Logic Programming (WLP'99)}, Editor = {Fran\c{c}ois Bry and Ulrich Geske and Dietmar Seipel}, Month = JAN, Note = {ISSN 1435-2702}, Pages = {125--134}, Project = {dlv}, Publisher = {GMD -- Forschungszentrum Informationstechnik GmbH, Berlin}, Talk = {Wolfgang Faber}, Title = {{Using the {\tt dlv} System for Planning and Diagnostic Reasoning}}, Url = {http://www.polleres.net/publications/eite-etal-WLP99.pdf}, Year = 2000, Bdsk-Url-1 = {http://www.polleres.net/publications/eite-etal-WLP99.pdf}}
@inproceedings{eite-etal-2000d, Address = {London, UK}, Author = {Thomas Eiter and Wolfgang Faber and Nicola Leone and Gerald Pfeifer and Axel Polleres}, Booktitle = {Computational Logic - CL 2000, First International Conference, Proceedings}, Editor = {John Lloyd and Veronica Dahl and Ulrich Furbach and Manfred Kerber and Kung-Kiu Lau and Catuscia Palamidessi and Lu{\'\i}s Moniz Pereira and Yehoshua Sagiv and Peter J. Stuckey}, Month = JUL, Number = 1861, Pages = {807--821}, Project = {dlv}, Publisher = {Springer Verlag}, Series = LNAI, Talk = {Nicola Leone}, Title = {{Planning under Incomplete Knowledge}}, Url = {http://www.dbai.tuwien.ac.at/proj/dlv/K/dlvk.ps.gz}, Year = 2000, Bdsk-Url-1 = {http://www.dbai.tuwien.ac.at/proj/dlv/K/dlvk.ps.gz}}
@inproceedings{egly-etal-1999, Author = {Uwe Egly and Michael Fink and Axel Polleres and Hans Tompits}, Booktitle = {Proceedings of the World Conference on the WWW and Internet (WEBNET'99)}, Publisher = {AACE}, Title = {A Web-Based Tutoring Tool for Calculating Default Logic Extensions}, Url = {http://www.kr.tuwien.ac.at/staff/michael/research/webnet99.pdf}, Year = 1999, Bdsk-Url-1 = {http://www.kr.tuwien.ac.at/staff/michael/research/webnet99.pdf}}