var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https://situx.github.io/files/mypubs.bib&token=e3f1d6b40b438f7df43fdd39a05adc93&folding=0&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https://situx.github.io/files/mypubs.bib&token=e3f1d6b40b438f7df43fdd39a05adc93&folding=0&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https://situx.github.io/files/mypubs.bib&token=e3f1d6b40b438f7df43fdd39a05adc93&folding=0&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Preparing multi-layered visualisations of Old Babylonian cuneiform tablets for a machine learning OCR training model towards automated sign recognition.\n \n \n \n \n\n\n \n Hameeuw, H.; De Graef, K.; Ryberg Smidt, G.; Goddeeris, A.; Homburg, T.; and Kumar Thirukokaranam Chandrasekar, K.\n\n\n \n\n\n\n it - Information Technology. jan 2024.\n \n\n\n\n
\n\n\n\n \n \n \"PreparingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{hameeuw2024preparing,\n      title={Preparing multi-layered visualisations of Old Babylonian cuneiform tablets for a machine learning OCR training model towards automated sign recognition}, \n      author={Hameeuw, Hendrik and De Graef, Katrien and Ryberg Smidt, Gustav and Goddeeris, Anne and Homburg, Timo and Kumar Thirukokaranam Chandrasekar, Krishna},\n      year={2024},\n      month={jan},\n      day={2},\n      doi={10.1515/itit-2023-0063},\n      url={https://doi.org/10.1515/itit-2023-0063},\n      journal = {it - Information Technology},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n From an Analog to a Digital Workflow: An Introductory Approach to Digital Editions in Assyriology.\n \n \n \n \n\n\n \n Homburg, T.; Brandes, T.; Huber, E.; and Hedderich, M.\n\n\n \n\n\n\n . dec 2023.\n \n\n\n\n
\n\n\n\n \n \n \"FromPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{homburg2023fromanalog,\n      title={From an Analog to a Digital Workflow: An Introductory Approach to Digital Editions in Assyriology}, \n      author={Homburg, Timo and Brandes, Tim and Huber, Eva-Maria and Hedderich, Michael},\n      year={2023},\n      month={dec},\n      day={24},\n      publisher = {CDLI Journal},\n      url={https://cdli.mpiwg-berlin.mpg.de/articles/cdlb/2023-4}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CNN Based Cuneiform Sign Detection Learned from Annotated 3D Renderings and Mapped Photographs with Illumination Augmentation.\n \n \n \n \n\n\n \n Stötzner, E.; Homburg, T.; and Mara, H.\n\n\n \n\n\n\n In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) Workshops, pages 1680-1688, October 2023. \n \n\n\n\n
\n\n\n\n \n \n \"CNNPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Stotzner_2023_ICCV,\n    author    = {St\\"otzner, Ernst and Homburg, Timo and Mara, Hubert},\n    title     = {CNN Based Cuneiform Sign Detection Learned from Annotated 3D Renderings and Mapped Photographs with Illumination Augmentation},\n    booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) Workshops},\n    month     = {October},\n    year      = {2023},\n    doi       = {10.1109/ICCVW60793.2023.00183},\n    pages     = {1680-1688},\n    url       = {https://openaccess.thecvf.com/content/ICCV2023W/e-Heritage/papers/Stotzner_CNN_Based_Cuneiform_Sign_Detection_Learned_from_Annotated_3D_Renderings_ICCVW_2023_paper.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Spatial Data on the Web Best Practices: W3C Working Group Note.\n \n \n \n \n\n\n \n Tandy, J.; van den Brink, L.; Barnaghi, P.; and Homburg, T.\n\n\n \n\n\n\n . sep 2023.\n \n\n\n\n
\n\n\n\n \n \n \"SpatialPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{homburg2023sdw,\n  title={Spatial Data on the Web Best Practices: W3C Working Group Note},\n  author={Tandy, Jeremy, van den Brink, Linda, Barnaghi, Payam, Homburg, Timo},\n  day={19},\n  month={sep},\n  year={2023},\n  url={https://www.w3.org/TR/sdw-bp/}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MaiCuBeDa Hilprecht - Mainz Cuneiform Benchmark Dataset for the Hilprecht Collection.\n \n \n \n \n\n\n \n Mara, H.; and Homburg, T.\n\n\n \n\n\n\n 2023.\n \n\n\n\n
\n\n\n\n \n \n \"MaiCuBeDaPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@data{QSNIQ2_2023,\nauthor = {Mara, Hubert and Homburg, Timo},\npublisher = {heiDATA},\ntitle = {{MaiCuBeDa Hilprecht - Mainz Cuneiform Benchmark Dataset for the Hilprecht Collection}},\nUNF = {UNF:6:NXlfO+rwTQYYtmBeze9QUw==},\nyear = {2023},\nversion = {V1},\ndoi = {10.11588/data/QSNIQ2},\nurl = {https://doi.org/10.11588/data/QSNIQ2}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n R-CNN based PolygonalWedge Detection Learned from Annotated 3D Renderings and Mapped Photographs of Open Data Cuneiform Tablets.\n \n \n \n\n\n \n Stötzner, E.; Homburg, T.; Bullenkamp, J. P.; and Mara, H.\n\n\n \n\n\n\n In Bucciero, A.; Fanini, B.; Graf, H.; Pescarin, S.; and Rizvic, S., editor(s), Eurographics Workshop on Graphics and Cultural Heritage, sep 2023. The Eurographics Association\n Best Paper Award\n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings {10.2312:gch.20231157,\nbooktitle = {Eurographics Workshop on Graphics and Cultural Heritage},\neditor = {Bucciero, Alberto and Fanini, Bruno and Graf, Holger and Pescarin, Sofia and Rizvic, Selma},\ntitle = {{R-CNN based PolygonalWedge Detection Learned from Annotated 3D Renderings and Mapped Photographs of Open Data Cuneiform Tablets}},\nauthor = {Stötzner, Ernst and Homburg, Timo and Bullenkamp, Jan Philipp and Mara, Hubert},\nyear = {2023},\nmonth = {sep}, \nnote ={Best Paper Award},\npublisher = {The Eurographics Association},\nISSN = {2312-6124},\nISBN = {978-3-03868-217-2},\nDOI = {10.2312/gch.20231157}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CNN based Cuneiform Sign Detection Learned from Annotated 3D Renderings and Mapped Photographs with Illumination Augmentation (Preprint).\n \n \n \n \n\n\n \n Stötzner, E.; Homburg, T.; and Mara, H.\n\n\n \n\n\n\n . aug 2023.\n \n\n\n\n
\n\n\n\n \n \n \"CNNPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{stoetzner2023cnn,\n      title={CNN based Cuneiform Sign Detection Learned from Annotated 3D Renderings and Mapped Photographs with Illumination Augmentation (Preprint)}, \n      author={Ernst Stötzner and Timo Homburg and Hubert Mara},\n      year={2023},\n      month={aug},\n      url={https://arxiv.org/abs/2308.11277},\n      eprint={2308.11277},\n      archivePrefix={arXiv},\n      primaryClass={cs.CV}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n .\n \n \n \n \n\n\n \n Homburg, T.; Klammt, A.; Offert, F.; and Thiery, F.\n\n\n \n\n\n\n Forschungssoftware rezensieren: Konzeption, Durchführung und Umsetzung. Zenodo, March 2023.\n \n\n\n\n
\n\n\n\n \n \n \"ForschungssoftwarePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{homburgworkshop,\n  author       = {Homburg, Timo and Klammt, Anne and Offert, Fabian and Thiery, Florian},\n  title        = {Forschungssoftware rezensieren: Konzeption, Durchführung und Umsetzung},\n  publisher    = {Zenodo},\n  year         = 2023,\n  month        = mar,\n  doi          = {10.5281/zenodo.7688632},\n  url          = {https://doi.org/10.5281/zenodo.7688632}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Software Review for the Software PointSamplingTool.\n \n \n \n \n\n\n \n Thiery, F.; Homburg, T.; Klammt, A.; and Schmidt, S.\n\n\n \n\n\n\n Archäologische Informationen, 44. nov 2022.\n \n\n\n\n
\n\n\n\n \n \n \"SoftwarePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{homburgpointsampling,\ntitle={Software Review for the Software PointSamplingTool},\nauthor={Thiery, Florian and Homburg, Timo and Klammt, Anne and Schmidt, Sophie-Charlotte},\njournal={Archäologische Informationen},\nvolume={44},\nmonth={nov},\nurl={https://dguf.de/fileadmin/AI/archinf-ev_thiery-etal.pdf},\nday={18},\nyear={2022}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 3D Data Derivatives of the Haft Tappeh Processing Pipeline.\n \n \n \n \n\n\n \n Homburg, T.; Zwick, R.; Bruhn, K.; and Mara, H.\n\n\n \n\n\n\n CDLI Journal. oct 2022.\n \n\n\n\n
\n\n\n\n \n \n \"3DPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{homburg3dderivatives,\ntitle={3D Data Derivatives of the Haft Tappeh Processing Pipeline},\nauthor={Homburg, Timo and Zwick, Robert and Bruhn, Kai-Christian and Mara, Hubert},\njournal={CDLI Journal},\nmonth={oct},\nurl={https://cdli.mpiwg-berlin.mpg.de/articles/cdlj/2022-1},\nday={17},\nyear={2022}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Towards the Integration of Cuneiform in the OntoLex-Lemon Framework.\n \n \n \n\n\n \n Homburg, T.; and Declerck, T.\n\n\n \n\n\n\n 2022.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{lemoncuneiform,\nauthor={Homburg, Timo and Declerck, Thierry},\ntitle={Towards the Integration of Cuneiform in the OntoLex-Lemon Framework},\nyear={2022}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Annotated 3D-Models of Cuneiform Tablets.\n \n \n \n \n\n\n \n Homburg, T.; Zwick, R.; Mara, H.; and Bruhn, K.\n\n\n \n\n\n\n Journal of Open Archaeology Data, 10(4). may 2022.\n \n\n\n\n
\n\n\n\n \n \n \"AnnotatedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{bestpractices3d,\ntitle={Annotated 3D-Models of Cuneiform Tablets},\nauthor={Homburg, Timo and Zwick, Robert and Mara, Hubert and Bruhn, Kai-Christian},\njournal={Journal of Open Archaeology Data},\nissn={2049-1565},\nvolume={10},\nnumber={4},\nday = {20},\nmonth = {may},\nyear={2022},\nurl={https://openarchaeologydata.metajnl.com/articles/10.5334/joad.92/},\ndoi={10.5334/joad.92},\nabstract={Our dataset consists of 3D scans of one cuneiform tablet from Haft Tappeh Iran and one cuneiform tablet of the Hilprecht Collection as well as 3D annotations on these 3D meshes, including metadata. The 3D annotations were created with the annotation software Annotorious2 on 2D renderings and reprojected to the original 3D model. The respective 2D renderings and annotations in 2D are also part of this data publication. The annotations might be used in machine learning tasks for character recognition, linguistic studies, or visualization in Assyriology. We publish these data in different formats and guidance on how to use them in different usage scenarios and with several software applications. The data serve as the basis for a detailed description, reasoning, and elaboration of a recommendation for the state-of-the-art handling of 3D data in cuneiform research. The data is stored as an archive on Zenodo and may serve as an example for replication by similar cuneiform projects. }\n}\n\n
\n
\n\n\n
\n Our dataset consists of 3D scans of one cuneiform tablet from Haft Tappeh Iran and one cuneiform tablet of the Hilprecht Collection as well as 3D annotations on these 3D meshes, including metadata. The 3D annotations were created with the annotation software Annotorious2 on 2D renderings and reprojected to the original 3D model. The respective 2D renderings and annotations in 2D are also part of this data publication. The annotations might be used in machine learning tasks for character recognition, linguistic studies, or visualization in Assyriology. We publish these data in different formats and guidance on how to use them in different usage scenarios and with several software applications. The data serve as the basis for a detailed description, reasoning, and elaboration of a recommendation for the state-of-the-art handling of 3D data in cuneiform research. The data is stored as an archive on Zenodo and may serve as an example for replication by similar cuneiform projects. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Proceedings of the 5th International Workshop on Geospatial Linked Data 2022.\n \n \n \n \n\n\n \n Homburg, T.; Yaman, B.; Sherif, M. A.; and Haller, A.,\n editors.\n \n\n\n \n\n\n\n Volume Vol-3157.CEUR-WS. june 2022.\n \n\n\n\n
\n\n\n\n \n \n \"ProceedingsPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@proceedings{geold2022,\ntitle = {Proceedings of the 5th International Workshop on Geospatial Linked Data 2022},\nbooktitle = {Proceedings of the 5th International Workshop on Geospatial Linked Data 2022},\neditor = {Homburg, Timo and Yaman, Beyza and Sherif, Mohamed Ahmed and Haller, Armin},\nvenue={Hersonissos, Greece},\nday = {21},\nmonth = {june},\npublisher={CEUR-WS},\nissn={1613-0073},\nurn={urn:nbn:de:0074-3157-1},\nurl={https://CEUR-WS.org/Vol-3157/},\nvolume={Vol-3157},\nyear={2022}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Implementation and Compliance Benchmarking of a DGGS-enabled, GeoSPARQL-aware Triplestore.\n \n \n \n \n\n\n \n Habgood, D.; Homburg, T.; Car, N. J.; and Jovanovik, M.\n\n\n \n\n\n\n In Homburg, T.; Yaman, B.; Sherif, M. A.; and Haller, A., editor(s), Geospatial Linked Data Workshop 2022, volume Vol-3157, 2022. CEUR-WS\n \n\n\n\n
\n\n\n\n \n \n \"ImplementationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{habgooddggs,\ntitle = {Implementation and Compliance Benchmarking of a DGGS-enabled, GeoSPARQL-aware Triplestore},\nauthor = {Habgood, David and Homburg, Timo and Car, Nicholas J. and Jovanovik, Milos},\npublisher={CEUR-WS},\nissn={1613-0073},\nurn={urn:nbn:de:0074-3157-1},\nurl={https://CEUR-WS.org/Vol-3157/paper7.pdf},\nvolume={Vol-3157},\nbooktitle={Geospatial Linked Data Workshop 2022},\nyear={2022},\nkeywords={Geospatial Data, DGGS, GeoSPARQL, Apache Jena Fuseki, Compliance Benchmarking},\neditor={Homburg, Timo and Yaman, Beyza and Sherif, Mohamed Ahmed and Haller, Armin},\nabstract={We set out to determine the feasibility of implementing Discrete Global Grid System (DGGS) representations of geometry support in a GeoSPARQL-enabled triplestore, and test the GeoSPARQL compliance for it. The implementation is a variant of Apache Jena's existing GeoSPARQL support. Compliance is tested using an adapted implementation of the \\textit{GeoSPARQL Compliance Benchmark} testing system developed previously to test for GeoSPARQL 1.0 compliance. The benchmark results confirm that a majority of the functions which were set out to be implemented in the course of this paper were implemented correctly and points out possible future work for full compliance.}\n}\n\n
\n
\n\n\n
\n We set out to determine the feasibility of implementing Discrete Global Grid System (DGGS) representations of geometry support in a GeoSPARQL-enabled triplestore, and test the GeoSPARQL compliance for it. The implementation is a variant of Apache Jena's existing GeoSPARQL support. Compliance is tested using an adapted implementation of the GeoSPARQL Compliance Benchmark testing system developed previously to test for GeoSPARQL 1.0 compliance. The benchmark results confirm that a majority of the functions which were set out to be implemented in the course of this paper were implemented correctly and points out possible future work for full compliance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n GeoSPARQL 1.1: Motivations, Details and Applications of the Decadal Update to the Most Important Geospatial LOD Standard.\n \n \n \n \n\n\n \n Car, N. J.; and Homburg, T.\n\n\n \n\n\n\n ISPRS International Journal of Geo-Information, 11(2). February 2022.\n \n\n\n\n
\n\n\n\n \n \n \"GeoSPARQLPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{homburggeosparql11isprs,\ntitle = {GeoSPARQL 1.1: Motivations, Details and Applications of the Decadal Update to the Most Important Geospatial LOD Standard},\njournal = {ISPRS International Journal of Geo-Information},\nmonth = feb,\nyear = {2022},\npublisher={MDPI},\nissn={2220-9964},\nvolume={11},\nnumber = {2},\narticle-number = {117},\nauthor = {Car, Nicholas J. and Homburg, Timo},\nDOI = {10.3390/ijgi11020117},\nurl={https://www.mdpi.com/2220-9964/11/2/117},\nkeywords = {GeoSPARQL,SPARQL,geospatial,OGC,Open Geospatial Consortium,spatial,ontology,spatial functions},\nabstract = {In 2012 the Open Geospatial Consortium published GeoSPARQL defining ``an RDF/OWL ontology for [spatial] information'', ``SPARQL extension functions'' for performing spatial operations on RDF data and ``RIF rules'' defining entailments to be drawn from graph pattern matching.\nIn the 8+ years since its publication, GeoSPARQL has become the most important spatial Semantic Web standard, as judged by references to it in other Semantic Web standards and its wide use for Semantic Web data.\nAn update to GeoSPARQL was proposed in 2019 to deliver a version 1.1 with a charter to: handle outstanding change requests and source new ones from the user community and to "better present" the standard, that is to better link all the standard's parts and better  document \\& exemplify elements. Expected updates included new geometry representations, alignments to other ontologies, handling of new spatial referencing systems, and new artifact\npresentation. This paper describes motivating change requests and actual resultant updates in the candidate version 1.1 of the standard alongside reference implementations and usage examples. \nWe also describe the theory behind particular updates, initial implementations of many parts of the standard, and our expectations for GeoSPARQL 1.1's use.}\n}\n\n
\n
\n\n\n
\n In 2012 the Open Geospatial Consortium published GeoSPARQL defining ``an RDF/OWL ontology for [spatial] information'', ``SPARQL extension functions'' for performing spatial operations on RDF data and ``RIF rules'' defining entailments to be drawn from graph pattern matching. In the 8+ years since its publication, GeoSPARQL has become the most important spatial Semantic Web standard, as judged by references to it in other Semantic Web standards and its wide use for Semantic Web data. An update to GeoSPARQL was proposed in 2019 to deliver a version 1.1 with a charter to: handle outstanding change requests and source new ones from the user community and to \"better present\" the standard, that is to better link all the standard's parts and better document & exemplify elements. Expected updates included new geometry representations, alignments to other ontologies, handling of new spatial referencing systems, and new artifact presentation. This paper describes motivating change requests and actual resultant updates in the candidate version 1.1 of the standard alongside reference implementations and usage examples. We also describe the theory behind particular updates, initial implementations of many parts of the standard, and our expectations for GeoSPARQL 1.1's use.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (11)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Cuneiform in the LOD cloud: Connecting 2D and 3D representations of philological objects with linguistic concepts.\n \n \n \n \n\n\n \n Homburg, T.; Mara, H.; and Bruhn, K.\n\n\n \n\n\n\n November 2021.\n Poster at Linked Pasts Conference\n\n\n\n
\n\n\n\n \n \n \"CuneiformPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{homburg_timo_2021_5749763,\n  author       = {Homburg, Timo and\n                  Mara, Hubert and\n                  Bruhn, Kai-Christian},\n  title        = {{Cuneiform in the LOD cloud: Connecting 2D and 3D \n                   representations of philological objects with\n                   linguistic concepts}},\n  month        = nov,\n  year         = 2021,\n  publisher    = {Zenodo},\n  doi          = {10.5281/zenodo.5749763},\n  url          = {https://doi.org/10.5281/zenodo.5749763},\n  note \t       = {Poster at Linked Pasts Conference}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SPARQLing Publication of Irish ᚑᚌᚆᚐᚋ – Ogham Stones as LOD.\n \n \n \n \n\n\n \n Thiery, F.; Schmidt, S.; and Homburg, T.\n\n\n \n\n\n\n In ArcheoFOSS XIV 2020: Open Software, Hardware, Processes, Data and Formats in Archaeological Research, November 2021. Archaeopress Publishing Ltd\n \n\n\n\n
\n\n\n\n \n \n \"SPARQLingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{archeofoss2020_2,\nAUTHOR = {Thiery, Florian and Schmidt, Sophie-Charlotte and Homburg, Timo},\nTITLE = {SPARQLing Publication of Irish ᚑᚌᚆᚐᚋ – Ogham Stones as LOD},\nJOURNAL = {Archaeopress Archaeology},\nYEAR = {2021},\npublisher={Archaeopress Publishing Ltd},\nbooktitle={ArcheoFOSS XIV 2020: Open Software, Hardware, Processes, Data and Formats in Archaeological Research},\nURL = {https://www.archaeopress.com/Archaeopress/Products/9781803271248},\nISBN = {978-1-80327-124-8},\nmonth      = nov,\nday        = 23,\nABSTRACT = {Linked Open Data (LOD) is used to interlink data within the WWW semantically, enables citizen science and improves\ndata sharing practices for academia. We discuss the semantic modelling and publication strategies in Wikidata and\nwithin our own ontology. Using a bespoke ontology gives more freedom for modelling, but requires high technical\nspecialisation. Wikidata facilitates citizen science, but the data author loses data sovereignty. We will exemplify\nthis using Ogham data, which is the main subject of the Ogi Ogham Project as well as the Irish ᚑᚌᚆᚐᚋ Stones in the\nWikimedia Universe project}\n}\n\n
\n
\n\n\n
\n Linked Open Data (LOD) is used to interlink data within the WWW semantically, enables citizen science and improves data sharing practices for academia. We discuss the semantic modelling and publication strategies in Wikidata and within our own ontology. Using a bespoke ontology gives more freedom for modelling, but requires high technical specialisation. Wikidata facilitates citizen science, but the data author loses data sovereignty. We will exemplify this using Ogham data, which is the main subject of the Ogi Ogham Project as well as the Irish ᚑᚌᚆᚐᚋ Stones in the Wikimedia Universe project\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Little Minions and SPARQL Unicorns as tools for archaeology.\n \n \n \n \n\n\n \n Homburg, T.; and Thiery, F.\n\n\n \n\n\n\n In ArcheoFOSS XIV 2020: Open Software, Hardware, Processes, Data and Formats in Archaeological Research, November 2021. Archaeopress Publishing Ltd\n \n\n\n\n
\n\n\n\n \n \n \"LittlePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{archeofoss2020_1,\nAUTHOR = {Homburg, Timo and Thiery, Florian},\nTITLE = {Little Minions and SPARQL Unicorns as tools for archaeology},\nJOURNAL = {Archaeopress Archaeology},\nYEAR = {2021},\nmonth        = nov,\nday        = 23,\npublisher={Archaeopress Publishing Ltd},\nbooktitle={ArcheoFOSS XIV 2020: Open Software, Hardware, Processes, Data and Formats in Archaeological Research},\nURL = {https://www.archaeopress.com/Archaeopress/Products/9781803271248},\nISBN = {978-1-80327-124-8},\nABSTRACT = {We introduce the SPARQLing Unicorn QGIS Plugin as a tool for querying and converting Linked Open Data (LOD)\nresources and making them accessible using QGIS. The plugin enables QGIS to access LOD resources for the first time,\nand can be used to include LOD in QGIS projects to enrich geospatial data sets with information gained from the\nSemantic Web and to prepare geospatial data sets for the publication as LOD resources. We illustrate these functions\nusing examples from an archaeological context, and show how spatial LOD can be published and made accessible\nusing LOD browser implementations}\n}\n\n
\n
\n\n\n
\n We introduce the SPARQLing Unicorn QGIS Plugin as a tool for querying and converting Linked Open Data (LOD) resources and making them accessible using QGIS. The plugin enables QGIS to access LOD resources for the first time, and can be used to include LOD in QGIS projects to enrich geospatial data sets with information gained from the Semantic Web and to prepare geospatial data sets for the publication as LOD resources. We illustrate these functions using examples from an archaeological context, and show how spatial LOD can be published and made accessible using LOD browser implementations\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A GeoSPARQL Compliance Benchmark.\n \n \n \n \n\n\n \n Jovanovik, M.; Homburg, T.; and Spasić, M.\n\n\n \n\n\n\n ISPRS International Journal of Geo-Information, 10(7). July 2021.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{ijgi10070487,\nAUTHOR = {Jovanovik, Milos and Homburg, Timo and Spasić, Mirko},\nTITLE = {A GeoSPARQL Compliance Benchmark},\nJOURNAL = {ISPRS International Journal of Geo-Information},\nVOLUME = {10},\nmonth        = jul,\nYEAR = {2021},\nNUMBER = {7},\nARTICLE-NUMBER = {487},\nURL = {https://www.mdpi.com/2220-9964/10/7/487},\nISSN = {2220-9964},\nABSTRACT = {GeoSPARQL is an important standard for the geospatial linked data community, given that it defines a vocabulary for representing geospatial data in RDF, defines an extension to SPARQL for processing geospatial data, and provides support for both qualitative and quantitative spatial reasoning. However, what the community is missing is a comprehensive and objective way to measure the extent of GeoSPARQL support in GeoSPARQL-enabled RDF triplestores. To fill this gap, we developed the GeoSPARQL compliance benchmark. We propose a series of tests that check for the compliance of RDF triplestores with the GeoSPARQL standard, in order to test how many of the requirements outlined in the standard a tested system supports. This topic is of concern because the support of GeoSPARQL varies greatly between different triplestore implementations, and the extent of support is of great importance for different users. In order to showcase the benchmark and its applicability, we present a comparison of the benchmark results of several triplestores, providing an insight into their current GeoSPARQL support and the overall GeoSPARQL support in the geospatial linked data domain.},\nDOI = {10.3390/ijgi10070487}\n}\n
\n
\n\n\n
\n GeoSPARQL is an important standard for the geospatial linked data community, given that it defines a vocabulary for representing geospatial data in RDF, defines an extension to SPARQL for processing geospatial data, and provides support for both qualitative and quantitative spatial reasoning. However, what the community is missing is a comprehensive and objective way to measure the extent of GeoSPARQL support in GeoSPARQL-enabled RDF triplestores. To fill this gap, we developed the GeoSPARQL compliance benchmark. We propose a series of tests that check for the compliance of RDF triplestores with the GeoSPARQL standard, in order to test how many of the requirements outlined in the standard a tested system supports. This topic is of concern because the support of GeoSPARQL varies greatly between different triplestore implementations, and the extent of support is of great importance for different users. In order to showcase the benchmark and its applicability, we present a comparison of the benchmark results of several triplestores, providing an insight into their current GeoSPARQL support and the overall GeoSPARQL support in the geospatial linked data domain.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Metadata Schema and Ontology for Capturing and Processing of 3D Cultural Heritage Objects.\n \n \n \n \n\n\n \n Homburg, T.; Cramer, A.; Raddatz, L.; and Mara, H.\n\n\n \n\n\n\n Heritage Science. July 2021.\n \n\n\n\n
\n\n\n\n \n \n \"MetadataPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 7 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{homburgheritagescience2021,\n\ttitle        = {Metadata Schema and Ontology for Capturing and Processing of 3D Cultural Heritage Objects},\n\tauthor       = {Homburg, Timo and Cramer, Anja and Raddatz, Laura and Mara, Hubert},\n\tyear         = 2021,\n\tmonth        = jul,\n\tday          = 12,\n\tjournal      = {Heritage Science},\n\tpublisher    = {Springer Nature},\n\taddress      = {New York, USA},\n\tlanguage     = {english},\n\tissn\t     = {2050-7445},\n\tdoi\t     = {10.1186/s40494-021-00561-w},\n\turl          = {https://heritagesciencejournal.springeropen.com/track/pdf/10.1186/s40494-021-00561-w.pdf},\n\tabstract     = {Motivated by the increased use of 3D acquisition of objects by Cultural Heritage institutions, we were investigating ontologies and metadata schemes for the acquisition process to provide details about the 3D capturing, which can be combined with preexisting ontologies describing an object. Therefore we divided the 3D capturing workflow into common steps starting with the object being placed in front of a 3D scanner to preparation and publication of the 3D datasets and/or derived images. While the proposed ontology is well defined on a coarse level of detail for very different techniques, e.g.~\\emph{Stucture from Motion} and \\emph{LiDAR} we elaborated the metadata scheme in very fine detail for 3D scanners available at our institutions. This includes practical experiments with measurement data from past and current projects including datasets published at Zenodo as guiding examples and the source code for their computation. Additionally, the free and Open Source GigaMesh Software Framework's analysis and processing methods have been extended to provide metadata about the 3D processing steps like mesh cleaning as well as 2D image generation. Finally, we discuss the current limitations and give an outlook about future extensions.} \n}\n
\n
\n\n\n
\n Motivated by the increased use of 3D acquisition of objects by Cultural Heritage institutions, we were investigating ontologies and metadata schemes for the acquisition process to provide details about the 3D capturing, which can be combined with preexisting ontologies describing an object. Therefore we divided the 3D capturing workflow into common steps starting with the object being placed in front of a 3D scanner to preparation and publication of the 3D datasets and/or derived images. While the proposed ontology is well defined on a coarse level of detail for very different techniques, e.g. \\emphStucture from Motion and \\emphLiDAR we elaborated the metadata scheme in very fine detail for 3D scanners available at our institutions. This includes practical experiments with measurement data from past and current projects including datasets published at Zenodo as guiding examples and the source code for their computation. Additionally, the free and Open Source GigaMesh Software Framework's analysis and processing methods have been extended to provide metadata about the 3D processing steps like mesh cleaning as well as 2D image generation. Finally, we discuss the current limitations and give an outlook about future extensions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SPARQLing Geodesy for Cultural Heritage – New Opportunities for Publishing and Analysing Volunteered Linked (Geo-)Data.\n \n \n \n \n\n\n \n Thiery, F.; Homburg, T.; Schmidt, S. C.; Voß, J.; and Trognitz, M.\n\n\n \n\n\n\n FIG Journal. May 2021.\n Update paper of the publication of the previous year\n\n\n\n
\n\n\n\n \n \n \"SPARQLingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 9 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{homburgfigjournal2021,\n\ttitle        = {SPARQLing Geodesy for Cultural Heritage – New Opportunities for Publishing and Analysing Volunteered Linked (Geo-)Data},\n\tauthor       = {Thiery, Florian and Homburg, Timo and Schmidt, Sophie Charlotte and Voß, Jakob and Trognitz, Martina},\n\tyear         = 2021,\n\tmonth        = may,\n\tday          = 12,\n\tjournal      = {FIG Journal},\n\tpublisher    = {FIG},\n\taddress      = {Amsterdam, Netherlands},\n\turl          = {https://www.fig.net/resources/publications/prj/showpeerreviewpaper.asp?pubid=11032},\n\tlanguage     = {english},\n\tnote         = {Update paper of the publication of the previous year},\n\tissn\t     = {2307-4086},\n\tisbn         = {978-87-92853-65-3},\n\tabstract     = {Geodesists work in Industry 4.0 and Spatial Information Management by using cross linked machines, people and data. Yet, one of the most popular technologies for interlinking data - Semantic Web technologies - have been largely absent from the geodesy community, because of the slow development of standards, a mandatory non-trivial conversion between geospatial features and graph data, and a lack of commonly available GIS tools to achieve this. This is slowly changing due to an increased awareness of the advantages of Linked Data technology in the GIS community and an improvement of standards in the Semantic Web community. Hence, the importance of open source software, open geodata and open access increases. A fundamental requirement for data sharing is the use of standardised data models. In this paper we compare two different modelling approaches for Irish Ogham Stones as a best practice example for linked open data management: One approach uses Wikidata, and the other a custom ontology. While Wikidata offers direct integration into the Linked Open Data Cloud and needs less technological expertise, using a custom ontology enables the creation of best-fitting data models. Both approaches facilitate the use of new information sources for the geodesy community. We aim to demonstrate how Linked Open Geodata can be re-used and further enriched with information from other open sources such as spatial data from OpenStreetMap. For this purpose, we also present a QGIS plugin and a modified geospatial web service, as well as a geo-optimised linked data browser, as solutions for bridging the gap between geospatial features and Linked Open Data triples.} \n}\n
\n
\n\n\n
\n Geodesists work in Industry 4.0 and Spatial Information Management by using cross linked machines, people and data. Yet, one of the most popular technologies for interlinking data - Semantic Web technologies - have been largely absent from the geodesy community, because of the slow development of standards, a mandatory non-trivial conversion between geospatial features and graph data, and a lack of commonly available GIS tools to achieve this. This is slowly changing due to an increased awareness of the advantages of Linked Data technology in the GIS community and an improvement of standards in the Semantic Web community. Hence, the importance of open source software, open geodata and open access increases. A fundamental requirement for data sharing is the use of standardised data models. In this paper we compare two different modelling approaches for Irish Ogham Stones as a best practice example for linked open data management: One approach uses Wikidata, and the other a custom ontology. While Wikidata offers direct integration into the Linked Open Data Cloud and needs less technological expertise, using a custom ontology enables the creation of best-fitting data models. Both approaches facilitate the use of new information sources for the geodesy community. We aim to demonstrate how Linked Open Geodata can be re-used and further enriched with information from other open sources such as spatial data from OpenStreetMap. For this purpose, we also present a QGIS plugin and a modified geospatial web service, as well as a geo-optimised linked data browser, as solutions for bridging the gap between geospatial features and Linked Open Data triples.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n GeoSPARQL 1.1: an almost decadal update to the most important geospatial LOD standard.\n \n \n \n \n\n\n \n Car, N. J.; and Homburg, T.\n\n\n \n\n\n\n In Yaman, B.; Sherif, M. A.; Ngonga Ngomo, A.; and Haller, A., editor(s), Geospatial Linked Data Workshop 2021, volume Vol-2977, pages 26-33, Hersonissos, Greece, October 2021. CEUR-WS\n \n\n\n\n
\n\n\n\n \n \n \"GeoSPARQLPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{homburggeosparql11,\ntitle = {GeoSPARQL 1.1: an almost decadal update to the most important geospatial LOD standard},\njournal = {GeoLD Workshop ESWC at 2021},\nmonth = oct,\nyear = {2021},\npublisher={CEUR-WS},\nbooktitle={Geospatial Linked Data Workshop 2021},\neditor={Yaman, Beyza and Sherif, Mohamed Ahmed and Ngonga Ngomo, Axel-Cyrille and Haller, Armin},\nissn={1613-0073},\nvolume={Vol-2977},\npages={26-33},\naddress = {Hersonissos, Greece},\nauthor = {Car, Nicholas J. and Homburg, Timo},\nurl={http://ceur-ws.org/Vol-2977/paper4.pdf},\nkeywords = {GeoSPARQL,SPARQL,geospatial,OGC,Open Geospatial Consortium,spatial,ontology,spatial functions},\nabstract = {In 2012 the Open Geospatial Consortium published GeoSPARQL defining "SPARQL extension functions", "RIF rules", "and RDF/OWL ontology for [spatial] information" and supporting vocabularies.\nIn the 8+ years since its publication, GeoSPARQL has become the most important spatial Semantic Web standard, as judged by references to it in other Semantic Web standards and its wide use in Semantic Web data.\nAn update to the standard was proposed in 2019 to deliver GeoSPARQL 1.1 in 2021 with a charter to: handle outstanding change requests and source new ones from the user community and to "better present" the standard, that is to better link all the standard’s parts and better document & exemplify elements. Expected updates included alignments to other ontologies, handling of new spatial referencing systems, new geometry representations, and new artifact presentation.\nIn this paper, we will discuss the submitted change requests and resulting updates to the standard. We will also discuss the theory behind updates and our expectations for GeoSPARQL 1.1's use.}\n}\n
\n
\n\n\n
\n In 2012 the Open Geospatial Consortium published GeoSPARQL defining \"SPARQL extension functions\", \"RIF rules\", \"and RDF/OWL ontology for [spatial] information\" and supporting vocabularies. In the 8+ years since its publication, GeoSPARQL has become the most important spatial Semantic Web standard, as judged by references to it in other Semantic Web standards and its wide use in Semantic Web data. An update to the standard was proposed in 2019 to deliver GeoSPARQL 1.1 in 2021 with a charter to: handle outstanding change requests and source new ones from the user community and to \"better present\" the standard, that is to better link all the standard’s parts and better document & exemplify elements. Expected updates included alignments to other ontologies, handling of new spatial referencing systems, new geometry representations, and new artifact presentation. In this paper, we will discuss the submitted change requests and resulting updates to the standard. We will also discuss the theory behind updates and our expectations for GeoSPARQL 1.1's use.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n PaleoCodage - Enhancing machine-readable cuneiform descriptions using a machine-readable paleographic encoding.\n \n \n \n \n\n\n \n Homburg, T.\n\n\n \n\n\n\n Digital Scholarship in the Humanities, 36(Supplement_2): ii127-ii154. nov 2021.\n \n\n\n\n
\n\n\n\n \n \n \"PaleoCodagePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 31 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{homburgdsh2021,\ntitle = {PaleoCodage - Enhancing machine-readable cuneiform descriptions using a machine-readable paleographic encoding},\njournal = {Digital Scholarship in the Humanities},\nvolume = {36},\nnumber = {Supplement_2},\npages = {ii127-ii154},\nyear = {2021},\nmonth = {nov},\nissn = {2055-7671},\ndoi = {10.1093/llc/fqab038},\nurl = {https://academic.oup.com/dsh/article/36/Supplement_2/ii127/6421811},\npublisher = {Oxford University Press},\nauthor = {Timo Homburg},\nkeywords = {Cuneiform, Paleography, Linked data, Machine Learning},\nabstract = {This publication introduces PaleoCodage, a new machine-readable way to capture cuneiform paleographic shapes. Many different systems of listing and describing cuneiform signs exist, but none of the systems can be used to describe the shape, i.e. paleographic features of a cuneiform signs. PaleoCodage aims to  fill this missing link of description which can benefit the Semantic Web community of linked open data dictionaries, the assyrologist community in documenting signs and their variants and the cuneiform image recognition community in providing a gold standard encoding to match against. The publication evaluates the encoding on more than 200 unicode cuneiform signs, describes already available application cases, a linked data vocabulary for paleography and concludes by describing future work on further validating PaleoCodage and applying the paleographic vocabulary to more languages.}\n}\n
\n
\n\n\n
\n This publication introduces PaleoCodage, a new machine-readable way to capture cuneiform paleographic shapes. Many different systems of listing and describing cuneiform signs exist, but none of the systems can be used to describe the shape, i.e. paleographic features of a cuneiform signs. PaleoCodage aims to fill this missing link of description which can benefit the Semantic Web community of linked open data dictionaries, the assyrologist community in documenting signs and their variants and the cuneiform image recognition community in providing a gold standard encoding to match against. The publication evaluates the encoding on more than 200 unicode cuneiform signs, describes already available application cases, a linked data vocabulary for paleography and concludes by describing future work on further validating PaleoCodage and applying the paleographic vocabulary to more languages.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Software for the GeoSPARQL compliance benchmark.\n \n \n \n \n\n\n \n Jovanovik, M.; Homburg, T.; and Spasić, M.\n\n\n \n\n\n\n Software Impacts, 8: 100071. March 2021.\n \n\n\n\n
\n\n\n\n \n \n \"SoftwarePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{JOVANOVIK2021100071,\ntitle = {Software for the GeoSPARQL compliance benchmark},\njournal = {Software Impacts},\nvolume = {8},\npages = {100071},\nday = 31,\nmonth = mar,\nyear = {2021},\nissn = {2665-9638},\ndoi = {10.1016/j.simpa.2021.100071},\npublisher={Elsevier},\nurl = {https://www.sciencedirect.com/science/article/pii/S2665963821000191},\nauthor = {Jovanovik, Milos and Homburg, Timo and Spasić, Mirko},\nkeywords = {GeoSPARQL, Benchmarking, Compliance, RDF triplestores},\nabstract = {Checking the compliance of geospatial triplestores with the GeoSPARQL standard represents a crucial step for many users when selecting the appropriate storage solution. This publication presents the software which comprises the GeoSPARQL compliance benchmark — a benchmark which checks RDF triplestores for compliance with the requirements of the GeoSPARQL standard. Users can execute this benchmark within the HOBBIT benchmarking platform to quantify the extent to which the GeoSPARQL standard is implemented within the triplestore of interest. This enables users to make an informed decision when choosing an RDF storage solution and helps assess the general state of adoption of geospatial technologies on the Semantic Web.}\n}\n
\n
\n\n\n
\n Checking the compliance of geospatial triplestores with the GeoSPARQL standard represents a crucial step for many users when selecting the appropriate storage solution. This publication presents the software which comprises the GeoSPARQL compliance benchmark — a benchmark which checks RDF triplestores for compliance with the requirements of the GeoSPARQL standard. Users can execute this benchmark within the HOBBIT benchmarking platform to quantify the extent to which the GeoSPARQL standard is implemented within the triplestore of interest. This enables users to make an informed decision when choosing an RDF storage solution and helps assess the general state of adoption of geospatial technologies on the Semantic Web.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A GeoSPARQL Compliance Benchmark - Preprint.\n \n \n \n \n\n\n \n Jovanovik, M.; Homburg, T.; and Spasić, M.\n\n\n \n\n\n\n February 2021.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{jovanovik2021geosparql,\n      title={A GeoSPARQL Compliance Benchmark - Preprint}, \n      author={Jovanovik, Milos and Homburg, Timo and Spasić, Mirko},\n      year={2021},\n      month=feb,\n      day=12,\n      eprint={2102.06139},\n      archivePrefix={arXiv},\n      primaryClass={cs.DB},\n      url={https://arxiv.org/pdf/2102.06139},\n      language={english},\n      abstract={We propose a series of tests that check for the compliance of RDF triplestores with the GeoSPARQL standard. The purpose of the benchmark is to test how many of the requirements outlined in the standard a tested system supports and to push triplestores forward in achieving a full GeoSPARQL compliance. This topic is of concern because the support of GeoSPARQL varies greatly between different triplestore implementations, and such support is of great importance for the domain of geospatial RDF data. Additionally, we present a comprehensive comparison of triplestores, providing an insight into their current GeoSPARQL support.}\n}\n
\n
\n\n\n
\n We propose a series of tests that check for the compliance of RDF triplestores with the GeoSPARQL standard. The purpose of the benchmark is to test how many of the requirements outlined in the standard a tested system supports and to push triplestores forward in achieving a full GeoSPARQL compliance. This topic is of concern because the support of GeoSPARQL varies greatly between different triplestore implementations, and such support is of great importance for the domain of geospatial RDF data. Additionally, we present a comprehensive comparison of triplestores, providing an insight into their current GeoSPARQL support.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Diskussionsbeitrag - Handreichung zur Rezension von Forschungssoftware in den Altertumswissenschaften / Impulse - Recommendations for the review of archaeological research software.\n \n \n \n \n\n\n \n Homburg, T.; Klammt, A.; Mara, H.; Schmid, C.; Schmidt, S. C.; Thiery, F.; and Trognitz, M.\n\n\n \n\n\n\n Archäologische Informationen. January 2021.\n \n\n\n\n
\n\n\n\n \n \n \"DiskussionsbeitragPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 7 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{homburg2020dguf,\n\ttitle        = {Diskussionsbeitrag - Handreichung zur Rezension von Forschungssoftware in den Altertumswissenschaften / Impulse - Recommendations for the review of archaeological research software},\n\tauthor       = {Homburg, Timo and Klammt, Anne and Mara, Hubert and Schmid, Clemens and Schmidt, Sophie Charlotte and Thiery, Florian and Trognitz, Martina},\n\tyear         = 2021,\n\tmonth        = jan,\n\tday          = 19,\n\tpublisher    = {Deutsche Gesellschaft für Ur- und Frühgeschichte (DGUF)},\n\tjournal      = {Archäologische Informationen},\n\tissn\t     = {0341-2873},\n\tdoi          = {10.11588/ai.2020.1.81422},\n\turl          = {https://www.dguf.de/fileadmin/AI/archinf-ev_homburg-etal.pdf},\n\tlanguage     = {german},\n\tabstract     = {Motivated by numerous discussions around the increasing use of research software in the field of archaeology, this article outlines the aspects for its review. The evaluation of software is a complex topic, since its field of application and development context has a\nconsiderable influence. In addition, there are many very different use cases, ranging from students wanting a quick solution for an exercise\nto project developers, which have to integrate a software package into an existing infrastructure for continuous operation. Although this first\nversion of an impulse is based on equal contributions from archaeology and applied computer science, the focus is on evaluation criteria of\nsoftware in the field of archaeology. A major goal of this impulse is to sensitize future reviewers to the complexity of software evaluation. A\nsoftware review should enable the archaeological professional audience to make a quick, critical and – also towards the developers – fair\nassessment of software. Priority recommendations include a description of the context in which the review was written and the requirements for specific use cases. In addition, a short tabular overview should enable a quick assessment of the technical, financial and legal\naspects. The need for future adaptations of this guideline was already identified during its development since both software development\nand its evaluation in the digital age are expected to remain very dynamic.}\n}\n
\n
\n\n\n
\n Motivated by numerous discussions around the increasing use of research software in the field of archaeology, this article outlines the aspects for its review. The evaluation of software is a complex topic, since its field of application and development context has a considerable influence. In addition, there are many very different use cases, ranging from students wanting a quick solution for an exercise to project developers, which have to integrate a software package into an existing infrastructure for continuous operation. Although this first version of an impulse is based on equal contributions from archaeology and applied computer science, the focus is on evaluation criteria of software in the field of archaeology. A major goal of this impulse is to sensitize future reviewers to the complexity of software evaluation. A software review should enable the archaeological professional audience to make a quick, critical and – also towards the developers – fair assessment of software. Priority recommendations include a description of the context in which the review was written and the requirements for specific use cases. In addition, a short tabular overview should enable a quick assessment of the technical, financial and legal aspects. The need for future adaptations of this guideline was already identified during its development since both software development and its evaluation in the digital age are expected to remain very dynamic.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (11)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Connecting Semantic Situation Descriptions with Data Quality Evaluations—Towards a Framework of Automatic Thematic Map Evaluation.\n \n \n \n \n\n\n \n Homburg, T.\n\n\n \n\n\n\n Information. November 2020.\n \n\n\n\n
\n\n\n\n \n \n \"ConnectingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{homburg2020information,\nauthor={Homburg, Timo},\ntitle={Connecting Semantic Situation Descriptions with Data Quality Evaluations—Towards a Framework of Automatic Thematic Map Evaluation},\njournal={Information},\nyear={2020},\nmonth=nov,\ndoi={10.3390/info11110532},\npublisher={MDPI},\nissn={2078-2489},\nurl={https://doi.org/10.3390/info11110532},\nabstract={A continuing question in the geospatial community is the evaluation of fitness for use of map data for a variety of usecases. While data quality metrics and dimensions have\n been discussed broadly in the geospatial community and have been modeled in Semantic Web\n vocabularies, an ontological connection between usecases and data quality expressions allowing\n reasoning approaches to determine the fitness for use of semantic web map data has not yet been\n approached. This publication introduces such an ontological model to represent and link situations\n with geospatial data quality metrics for the purpose of evaluating thematic map contents. The\n ontology model constitutes the data storage element of a framework for usecase based data quality\n assurance which creates suggestions for data quality evaluations which are verified and improved\n upon by end users. So-created requirement profiles are associated and shared to Semantic Web\n concepts and therefore contribute to a pool of linked data described situation-based data quality\n assessments, which may be used by a variety of applications. The framework is tested using two test scenarios which are evaluated and discussed in a wider context.},\nlanguage={english}\n}\n
\n
\n\n\n
\n A continuing question in the geospatial community is the evaluation of fitness for use of map data for a variety of usecases. While data quality metrics and dimensions have been discussed broadly in the geospatial community and have been modeled in Semantic Web vocabularies, an ontological connection between usecases and data quality expressions allowing reasoning approaches to determine the fitness for use of semantic web map data has not yet been approached. This publication introduces such an ontological model to represent and link situations with geospatial data quality metrics for the purpose of evaluating thematic map contents. The ontology model constitutes the data storage element of a framework for usecase based data quality assurance which creates suggestions for data quality evaluations which are verified and improved upon by end users. So-created requirement profiles are associated and shared to Semantic Web concepts and therefore contribute to a pool of linked data described situation-based data quality assessments, which may be used by a variety of applications. The framework is tested using two test scenarios which are evaluated and discussed in a wider context.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n GeoSPARQL+: Syntax, Semantics and System for Integrated Querying of Graph, Raster and Vector Data.\n \n \n \n \n\n\n \n Homburg, T.; Staab, S.; and Janke, D.\n\n\n \n\n\n\n In November 2020. Springer\n Best Student Paper Award\n\n\n\n
\n\n\n\n \n \n \"GeoSPARQL+:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{homburg2020iswc,\nauthor={Homburg, Timo and Staab, Steffen and Janke, Daniel},\ntitle={GeoSPARQL+: Syntax, Semantics and System for Integrated Querying of Graph, Raster and Vector Data},\njournal={ISWC},\nyear={2020},\nmonth=nov,\nnote={Best Student Paper Award},\nabstract={We introduce an approach to semantically represent and query raster data in a Semantic Web graph. We extend the GeoSPARQL vocabulary and query language to support raster data as a new type of geospatial data. We define new filter functions and illustrate our approach using several use cases on real-world data sets. Finally, we describe a prototypical implementation and validate the feasibility of our approach.},\ndoi={10.1007/978-3-030-62419-4_15},\npublisher={Springer},\nurl={https://link.springer.com/chapter/10.1007/978-3-030-62419-4_15},\nlanguage={english}\n}\n
\n
\n\n\n
\n We introduce an approach to semantically represent and query raster data in a Semantic Web graph. We extend the GeoSPARQL vocabulary and query language to support raster data as a new type of geospatial data. We define new filter functions and illustrate our approach using several use cases on real-world data sets. Finally, we describe a prototypical implementation and validate the feasibility of our approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n White paper: OGC Benefits of Representing Spatial Data Using Semantic and Graph Technologies.\n \n \n \n \n\n\n \n Abhayaratna, J.; van den Brink, L.; Car, N.; Atkinson, R.; Homburg, T.; Knibbe, F.; McGlinn, K.; Wagner, A.; Bonduel, M.; Holten Rasmussen, M.; and Thiery, F.\n\n\n \n\n\n\n Technical Report October 2020.\n \n\n\n\n
\n\n\n\n \n \n \"WhitePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{homburg2020ogc,\n\ttitle        = {White paper: OGC Benefits of Representing Spatial Data Using Semantic and Graph Technologies},\n\tauthor       = {Abhayaratna, Joseph and van den Brink, Linda and Car, Nicholas and Atkinson, Rob and Homburg, Timo and Knibbe, Frans and McGlinn, Kris and Wagner, Anna and Bonduel, Mathias and Holten Rasmussen, Mads and Thiery, Florian},\n\tyear         = 2020,\n\tjournal      = {OGC White Paper},\n\tpublisher    = {OGC},\n\tmonth        = oct,\n\ttype \t     = {OGC White Paper},\n\turl          = {http://docs.ogc.org/wp/19-078r1/19-078r1.html},\n\tlanguage     = {english},\n\tabstract     = {The purpose of this document is to outline the benefits of representing geospatial data using semantic and graph technologies. It aims to provide motivation for OGC members to consider the publication of geospatial data using these technologies}\n}\n
\n
\n\n\n
\n The purpose of this document is to outline the benefits of representing geospatial data using semantic and graph technologies. It aims to provide motivation for OGC members to consider the publication of geospatial data using these technologies\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Book of Abstracts. ArcheoFOSS International Conference 2020.\n \n \n \n \n\n\n \n Bogdani, J.; Battistin, F.; Stefano de Angeli, S.; Moresi, F. V.; Pastura, G.; Serpetti, M.; Brandolini, F.; Carrer, F.; Ciccone, G.; D'Erasmo, D.; Ago, R.; Diara, F.; Rinaudo, F.; Fornaciari, L.; Brienza, E.; Caratelli, G.; Giorgi, C.; Malatesta, S. G.; Pellegrino, M.; Coppola, D.; Price, B.; Rosati, P.; Caravale, A.; Piergrossi, A.; Rossi, I.; D'Andrea, A.; Forte, F.; Ducke, B.; Gattiglia, G.; Anichini, F.; Grossi, P.; Ciurcina, M.; Laneri, N.; Brancato, R.; Cristofaro, S.; Figuera, M.; Nicolosi Asmundo, M.; Santamaria, D. F.; Spampinato, D.; Roesler, K.; Auth, F.; Domscheit, W.; Hofmann, K. P.; Schmidt, S. C.; Thiery, F.; Serlorenzi, M.; Montalbano, R.; D'Andrea, A.; Cifarelli, C.; Berto, S.; Demetrescu, E.; Carpanese, I.; Fanini, B.; Homburg, T.; Lewis, J.; and Palombini, A.\n\n\n \n\n\n\n Zenodo, August 2020.\n \n\n\n\n
\n\n\n\n \n \n \"BookPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{bogdani_julian_2020_4002961,\n\ttitle        = {{Book of Abstracts. ArcheoFOSS International Conference 2020}},\n\tauthor       = {Bogdani, Julian and Battistin, Fabiana and Stefano de Angeli, Stefano and Moresi, Federico Valerio and Pastura, Giancarlo and Serpetti, Matteo and Brandolini, Filippo and Carrer, Francesco and Ciccone, Gabriele and D'Erasmo, Domizia and Ago, Renata and Diara, Filippo and Rinaudo, Fulvio and Fornaciari, Lorenzo and Brienza, Emanuele and Caratelli, Giovanni and Giorgi, Cecilia and Malatesta, Saverio Giulio and Pellegrino, Michele and Coppola, Donato and Price, Ben and Rosati, Paolo and Caravale, Alessandra and Piergrossi, Alessandra and Rossi, Irene and D'Andrea, Andrea and Forte, Francesca and Ducke, Benjamin and Gattiglia, Gabriele and Anichini, Francesca and Grossi, Piergiovanna and Ciurcina, Marco and Laneri, Nicola and Brancato, Rodolfo and Cristofaro, Salvatore and Figuera, Marianna and Nicolosi Asmundo, Marianna and Santamaria, Daniele Francesco and Spampinato, Daria and Roesler, Katja and Auth, Frederic and Domscheit, Wenke and Hofmann, Kerstin P. and Schmidt, Sophie C. and Thiery, Florian and Serlorenzi, Mirella and Montalbano, Riccardo and D'Andrea, Ascanio and Cifarelli, Carlo and Berto, Simone and Demetrescu, Emanuel and Carpanese, Irene and Fanini, Bruno and Homburg, Timo and Lewis, Joseph and Palombini, Augusto},\n\tyear         = 2020,\n\tmonth        = aug,\n\tpublisher    = {Zenodo},\n\tdoi          = {10.5281/zenodo.4002961},\n\turl          = {https://doi.org/10.5281/zenodo.4002961}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n OGC GeoSPARQL 2.0 SWG Charter.\n \n \n \n \n\n\n \n Abhayaratna, J.; van den Brink, L.; Car, N.; Homburg, T.; and Knibbe, F.\n\n\n \n\n\n\n August 2020.\n \n\n\n\n
\n\n\n\n \n \n \"OGCPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{homburg2020ogc2,\n\ttitle        = {OGC GeoSPARQL 2.0 SWG Charter},\n\tauthor       = {Abhayaratna, Joseph and van den Brink, Linda and Car, Nicholas and Homburg, Timo and Knibbe, Frans},\n\tyear         = 2020,\n\tjournal      = {OGC Working Group Charter},\n\tpublisher    = {OGC},\n\tmonth        = aug,\n\turl          = {https://portal.ogc.org/files/?artifact_id=94480},\n\tlanguage     = {english}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards Paleographic Linked Open Data (PLOD): A general vocabulary to describe paleographic features.\n \n \n \n \n\n\n \n Homburg, T.\n\n\n \n\n\n\n In Estill, L.; and Guiliano, J., editor(s), 15th Annual International Conference of the Alliance of Digital Humanities Organizations, DH 2020, Ottawa, Canada, July 20-25, 2020, Conference Abstracts, July 2020. \n \n\n\n\n
\n\n\n\n \n \n \"TowardsPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{confdihuHomburg20a,\n\ttitle        = {Towards Paleographic Linked Open Data {(PLOD):} {A} general vocabulary to describe paleographic features},\n\tauthor       = {Timo Homburg},\n\tyear         = 2020,\n\tmonth        = jul,\n\tbooktitle    = {15th Annual International Conference of the Alliance of Digital Humanities Organizations, {DH} 2020, Ottawa, Canada, July 20-25, 2020, Conference Abstracts},\n\turl          = {https://dh2020.adho.org/wp-content/uploads/2020/07/369\\_TowardsPaleographicLinkedOpenDataPLODAgeneralvocabularytodescribepaleographicfeatures.html},\n\teditor       = {Laura Estill and Jennifer Guiliano},\n\ttimestamp    = {Tue, 21 Jul 2020 15:06:22 +0200},\n\tbiburl       = {https://dblp.org/rec/conf/dihu/Homburg20a.bib},\n\tbibsource    = {dblp computer science bibliography, https://dblp.org}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Mind the gap: Filling gaps in cuneiform tablets using Machine Learning Algorithms.\n \n \n \n \n\n\n \n Homburg, T.\n\n\n \n\n\n\n In Estill, L.; and Guiliano, J., editor(s), 15th Annual International Conference of the Alliance of Digital Humanities Organizations, DH 2020, Ottawa, Canada, July 20-25, 2020, Conference Abstracts, July 2020. \n \n\n\n\n
\n\n\n\n \n \n \"MindPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{confdihuHomburg20,\n\ttitle        = {Mind the gap: Filling gaps in cuneiform tablets using Machine Learning Algorithms},\n\tauthor       = {Timo Homburg},\n\tyear         = 2020,\n        month        = jul,\n\tbooktitle    = {15th Annual International Conference of the Alliance of Digital Humanities Organizations, {DH} 2020, Ottawa, Canada, July 20-25, 2020, Conference Abstracts},\n\turl          = {https://dh2020.adho.org/wp-content/uploads/2020/07/151\\_MindthegapFillinggapsincuneiformtabletsusingMachineLearningAlgorithms.html},\n\teditor       = {Laura Estill and Jennifer Guiliano},\n\ttimestamp    = {Tue, 21 Jul 2020 15:06:22 +0200},\n\tbiburl       = {https://dblp.org/rec/conf/dihu/Homburg20.bib},\n\tbibsource    = {dblp computer science bibliography, https://dblp.org}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Linked Open Geodata in GIS? Ein Überblick über Linked Geodata Open Source Software.\n \n \n \n\n\n \n Homburg, T.; and Thiery, F.\n\n\n \n\n\n\n July 2020.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{timo_homburg_florian_thiery_2020,\n\ttitle        = {Linked Open Geodata in GIS? Ein Überblick über Linked Geodata Open Source Software},\n\tauthor       = {Timo Homburg and Florian Thiery},\n\tyear         = 2020,\n\tmonth        = jul,\n\tjournal\t     = {AGIT 2020},\n\ttype\t     = {Presentation},\n\tpublisher    = {Zenodo},\n\tdoi          = {10.5281/zenodo.3931262},\n\tabstract = {<p>Die St&auml;rke von Linked Open Data (LOD) ist die Verkn&uuml;pfung von Informationen aus unterschiedlichsten dezentral gehosteten Wissensdom&auml;nen. F&uuml;r die Geoinformatik haben sich beispielsweise community-basierende Datenrepositorien wie Wikidata, LinkedGeoData oder DBpedia gebildet.</p> <p>Leider haben all diese Linked Data Ressourcen in der Geo-Community bisher leider nur niederrangige Bedeutung erlangt. Die Gr&uuml;nde daf&uuml;r sehen wir in nicht vorhandenen, leicht bedienbaren Tools f&uuml;r: Datenintegration in das Semantic Web (Semantic Uplift), Datenzugriff (Semantic Downlift), fehlende Query Capabilities in GeoSPARQL und die Anreicherung von GIS Daten mit Semantic Web Daten.</p> <p>In unserem Vortrag m&ouml;chten wir Alternativen und Tools f&uuml;r diese Anwendungsf&auml;lle aufzeigen:</p> <p>(1) Das SPARQLing Unicorn QGIS Plugin zum vereinfachten Querying von SPARQL Endpoints, Anreicherung von Geodaten mit Semantic Web Inhalten, Konvertierung von Geodaten in RDF zur Integration in RDF Stores und Bearbeitung/Reprojektion von semantischen Geodaten.</p> <p>(2) postgis-jena und rdf4j-postgis die TripleStore Implementierungen um Query Capabilities, die aktuell noch nicht im GeoSPARQL Standard aber in PostGIS enthalten sind, erweitern.</p> <p>(3) SemanticWFS erlaubt es die Ergebnisse von SPARQL Queries als FeatureTypes eines WFS oder auch &uuml;ber OGC API Features bereitzustellen und somit die Verbindung der beiden Welten Geospatial Semantic Web und GIS Umgebungen.</p>}\n}\n
\n
\n\n\n
\n

Die Stärke von Linked Open Data (LOD) ist die Verknüpfung von Informationen aus unterschiedlichsten dezentral gehosteten Wissensdomänen. Für die Geoinformatik haben sich beispielsweise community-basierende Datenrepositorien wie Wikidata, LinkedGeoData oder DBpedia gebildet.

Leider haben all diese Linked Data Ressourcen in der Geo-Community bisher leider nur niederrangige Bedeutung erlangt. Die Gründe dafür sehen wir in nicht vorhandenen, leicht bedienbaren Tools für: Datenintegration in das Semantic Web (Semantic Uplift), Datenzugriff (Semantic Downlift), fehlende Query Capabilities in GeoSPARQL und die Anreicherung von GIS Daten mit Semantic Web Daten.

In unserem Vortrag möchten wir Alternativen und Tools für diese Anwendungsfälle aufzeigen:

(1) Das SPARQLing Unicorn QGIS Plugin zum vereinfachten Querying von SPARQL Endpoints, Anreicherung von Geodaten mit Semantic Web Inhalten, Konvertierung von Geodaten in RDF zur Integration in RDF Stores und Bearbeitung/Reprojektion von semantischen Geodaten.

(2) postgis-jena und rdf4j-postgis die TripleStore Implementierungen um Query Capabilities, die aktuell noch nicht im GeoSPARQL Standard aber in PostGIS enthalten sind, erweitern.

(3) SemanticWFS erlaubt es die Ergebnisse von SPARQL Queries als FeatureTypes eines WFS oder auch über OGC API Features bereitzustellen und somit die Verbindung der beiden Welten Geospatial Semantic Web und GIS Umgebungen.

\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SPARQLing Geodesy for Cultural Heritage – New Opportunities for Publishing and Analysing Volunteered Linked (Geo-)Data.\n \n \n \n \n\n\n \n Thiery, F.; Homburg, T.; Schmidt, S. C.; Trognitz, M.; and Przybilla, M.\n\n\n \n\n\n\n In Amsterdam, Netherlands, May 2020. \n FIG Article Of the Month October 2020\n\n\n\n
\n\n\n\n \n \n \"SPARQLingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{homburg2020figweek,\n\ttitle        = {SPARQLing Geodesy for Cultural Heritage – New Opportunities for Publishing and Analysing Volunteered Linked (Geo-)Data},\n\tauthor       = {Thiery, Florian and Homburg, Timo and Schmidt, Sophie Charlotte and Trognitz, Martina and Przybilla, Monika},\n\tyear         = 2020,\n\tmonth        = may,\n\tday          = 13,\n\tjournal      = {FIG Working Week},\n\taddress      = {Amsterdam, Netherlands},\n\tdoi          = {10.5281/zenodo.3766154},\n\turl          = {https://fig.net/resources/monthly_articles/2020/Thiery_etal_October_2020.asp},\n\tnote         = {FIG Article Of the Month October 2020},\n\tlanguage     = {english},\n\tisbn\t     = {978-87-92853-93-6},\n\tissn\t     = {2307-4086},\n\tabstract     = {Geodesists are working in Industry 4.0 and Spatial Information Management by using cross linked machines, people and data. Moreover, open source software, open geodata and open access are becoming increasingly important. As part of the Semantic Web, Linked Open Data (LOD) must be created and published in order to provide free open geodata in interoperable formats. With this semantically structured and standardised data it is easy to implement tools for GIS applications e.g. QGIS. In these days, the world’s Cultural Heritage (CH) is being destroyed as a result of wars, sea-level rise, floods and other natural disasters by climate change. Several transnational initiatives try to preserve our CH via digitisation initiatives. As best practice for preserving CH data serves the Ogi Ogam Project with the aim to show an easy volunteered approach to modelling Irish `Ogam Stones` containing Ogham inscriptions in Wikidata and interlinking them with spatial information in OpenStreetMap and (geo)resources on the web.}\n}\n
\n
\n\n\n
\n Geodesists are working in Industry 4.0 and Spatial Information Management by using cross linked machines, people and data. Moreover, open source software, open geodata and open access are becoming increasingly important. As part of the Semantic Web, Linked Open Data (LOD) must be created and published in order to provide free open geodata in interoperable formats. With this semantically structured and standardised data it is easy to implement tools for GIS applications e.g. QGIS. In these days, the world’s Cultural Heritage (CH) is being destroyed as a result of wars, sea-level rise, floods and other natural disasters by climate change. Several transnational initiatives try to preserve our CH via digitisation initiatives. As best practice for preserving CH data serves the Ogi Ogam Project with the aim to show an easy volunteered approach to modelling Irish `Ogam Stones` containing Ogham inscriptions in Wikidata and interlinking them with spatial information in OpenStreetMap and (geo)resources on the web.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n QGIS - A SPARQLing Unicorn? Eine Einführung in Linked Open Geodata zur Integration von RDF in QGIS Plugins.\n \n \n \n\n\n \n Thiery, F.; and Homburg, T.\n\n\n \n\n\n\n In Freiburg, Germany, March 2020. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{homburg2020sparqling,\n\ttitle        = {QGIS - A SPARQLing Unicorn? Eine Einführung in Linked Open Geodata zur Integration von RDF in QGIS Plugins},\n\tauthor       = {Thiery, Florian and Homburg, Timo},\n\tyear         = 2020,\n\tmonth        = mar,\n\tday          = 12,\n\ttype\t     = {Presentation},\n\tjournal      = {FOSSGIS 2020},\n\taddress      = {Freiburg, Germany},\n\tdoi          = {10.5281/zenodo.3706962},\n\tlanguage     = {english},\n\tabstract     = {Die Linked Open Data Cloud bietet seit vielen Jahren große Datenrepositorien im WWW an, die für verschiedene Zwecke von unterschiedlichen Communities genutzt werden können. Die Stärke von Linked Open Data (LOD) ist hierbei die Verknüpfung von Informationen aus unterschiedlichsten dezentral gehosteten Wissensdomänen. Für die Geoinformatik haben sich beispielsweise community-basierende Datenrepositorien wie Wikidata, LinkedGeoData oder DBpedia gebildet. Zudem bieten Gazetteer-Repositorien wie Geonames oder Pleiades für den zeitgeschichtlichen Raum, aber auch administrative Provider wie der Ordnance Survey UK und Ordnance Survey Ireland, ihre Geodaten als LOD an. Leider haben all diese Ressourcen in der Geo-Community bisher leider nur niederrangige Bedeutung erlangt. Den Grund dafür sehen wir in einem nicht vorhandenen Support von GIS Applikationen für die Verarbeitung von LOD. Triplestores und SPARQL werden zur Zeit weder von GIS Software, GeoServer Implementierungen oder OGC-Services unterstützt. Die Linked Data Serialisierung GeoJSON-LD birgt zwar aufgrund einiger noch offener Issues Herausforderungen, wird allerdings nicht oft wie seine ‘unsemantische Schwester’ GeoJSON, in Applikationen genutzt. Genau hier setzt das SPARQLing Unicorn QGIS Plugin an, welches die Ausführung von Linked Data Anfragen in (Geo)SPARQL an ausgewählte Triplestores und geofähigen SPARQL Endpoints ermöglicht. Die Ergebnisse werden in GeoJSON Layer konvertiert, sodass sie direkt in QGIS nutzbar sind. Für die Zukunft soll das SPARQLing Unicorn Plugin mit extrahierten Konzepten ausgewählter Ontologien Benutzern die Möglichkeit bieten, einfache Abfragen wie “Gib mir alle Flughäfen in BOUNDINGBOX mit direkt verbundenen Relationen” oder “Gib mir alle Flughäfen in LAND_X” automatisch zu generieren und somit das Laden dynamischer Inhalte der Datenrepositorien erleichtern. Es ist erwünscht, dass die Geocommunity aktiv an der (Weiter-)Entwicklung des Plugins teilnimmt und so die Welt der LOD im Geokontext weiter bekannt macht. Dazu steht der Quellcode frei auf Github zur Erweiterung zur Verfügung. Der Vortrag gibt einen Einstieg in die Modellierung von Linked Open (Geo-)Data, in die Abfragesprache SPARQL, deren Erweiterung GeoSPARQL, bestehende (Geo-)Ressourcen in der Linked Open Data Cloud, sowie die Funktionsweise des SPARQLing Unicorn QGIS Plugins, dessen zukünftige Erweiterungen und aktuelle Entwicklungen in der Erweiterung der jeweiligen Standards. Das Unicorn freut sich auf Mithilfe aus der Geo-Community!}\n}\n
\n
\n\n\n
\n Die Linked Open Data Cloud bietet seit vielen Jahren große Datenrepositorien im WWW an, die für verschiedene Zwecke von unterschiedlichen Communities genutzt werden können. Die Stärke von Linked Open Data (LOD) ist hierbei die Verknüpfung von Informationen aus unterschiedlichsten dezentral gehosteten Wissensdomänen. Für die Geoinformatik haben sich beispielsweise community-basierende Datenrepositorien wie Wikidata, LinkedGeoData oder DBpedia gebildet. Zudem bieten Gazetteer-Repositorien wie Geonames oder Pleiades für den zeitgeschichtlichen Raum, aber auch administrative Provider wie der Ordnance Survey UK und Ordnance Survey Ireland, ihre Geodaten als LOD an. Leider haben all diese Ressourcen in der Geo-Community bisher leider nur niederrangige Bedeutung erlangt. Den Grund dafür sehen wir in einem nicht vorhandenen Support von GIS Applikationen für die Verarbeitung von LOD. Triplestores und SPARQL werden zur Zeit weder von GIS Software, GeoServer Implementierungen oder OGC-Services unterstützt. Die Linked Data Serialisierung GeoJSON-LD birgt zwar aufgrund einiger noch offener Issues Herausforderungen, wird allerdings nicht oft wie seine ‘unsemantische Schwester’ GeoJSON, in Applikationen genutzt. Genau hier setzt das SPARQLing Unicorn QGIS Plugin an, welches die Ausführung von Linked Data Anfragen in (Geo)SPARQL an ausgewählte Triplestores und geofähigen SPARQL Endpoints ermöglicht. Die Ergebnisse werden in GeoJSON Layer konvertiert, sodass sie direkt in QGIS nutzbar sind. Für die Zukunft soll das SPARQLing Unicorn Plugin mit extrahierten Konzepten ausgewählter Ontologien Benutzern die Möglichkeit bieten, einfache Abfragen wie “Gib mir alle Flughäfen in BOUNDINGBOX mit direkt verbundenen Relationen” oder “Gib mir alle Flughäfen in LAND_X” automatisch zu generieren und somit das Laden dynamischer Inhalte der Datenrepositorien erleichtern. Es ist erwünscht, dass die Geocommunity aktiv an der (Weiter-)Entwicklung des Plugins teilnimmt und so die Welt der LOD im Geokontext weiter bekannt macht. Dazu steht der Quellcode frei auf Github zur Erweiterung zur Verfügung. Der Vortrag gibt einen Einstieg in die Modellierung von Linked Open (Geo-)Data, in die Abfragesprache SPARQL, deren Erweiterung GeoSPARQL, bestehende (Geo-)Ressourcen in der Linked Open Data Cloud, sowie die Funktionsweise des SPARQLing Unicorn QGIS Plugins, dessen zukünftige Erweiterungen und aktuelle Entwicklungen in der Erweiterung der jeweiligen Standards. Das Unicorn freut sich auf Mithilfe aus der Geo-Community!\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n ᚑᚌᚔ Linked Ogham Stones - Semantische Modellierung und prototypische Analyse irischer Ogham-Inschriften.\n \n \n \n\n\n \n Thiery, F.; and Homburg, T.\n\n\n \n\n\n\n In Paderborn, Germany, March 2020. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{homburg2020ogham,\n\ttitle        = {ᚑᚌᚔ Linked Ogham Stones - Semantische Modellierung und prototypische Analyse irischer Ogham-Inschriften},\n\tauthor       = {Thiery, Florian and Homburg, Timo},\n\tyear         = 2020,\n\tmonth        = mar,\n\tday          = 5,\n\tjournal      = {DHd2020},\n\ttype\t     = {Poster},\n\taddress      = {Paderborn, Germany},\n\tdoi          = {10.5281/zenodo.3697060},\n\tlanguage     = {english},\n\tabstract     = {Wir stellen die Ogham-Steine, deren Inhalte, die Beziehungen der auf Steinen vermerkten Personen, ihre Stammeszugehörigkeiten und weitere Metadaten als Linked Data bereit und ermöglichen somit deren Verarbeitung durch eine Reihe von Wissenschafts-Communities. Durch die Verwendung von Vokabularen wie Wikidata (Vrandečić et. al. 2014), FOAF (Brickley 2007) und Lemon (McCrae 2012) gewährleisten wir die Erstellung eines semantischen Wörterbuchs für Ogham, welches wir dynamisch aus Textquellen mittels Natural Language Processing Verfahren der Keyword Extraktion extrahieren. Die für uns relevanten Keywords haben wir aus der Literatur gesammelt und in unserem Repository veröffentlicht.1 Die Erfassung der Ogham-Steine als Linked Data Ressourcen erlaubt es, durch Verknüpfung von Wissen und dessen Anreicherung folgende Forschungsfragen anzugehen: Klassifikation von Steinen (Familienhierarchie, Namensbeschreibung etc.) Visualisierung von Zusammenhängen (Verwandtschaftsbeziehungen, Stammesgrenzen) aus Linked Data generierten Karten Formale Erfassung und maschinenlesbare Kodierung von Ogham-Zeichen nach dem Vorbild von PaleoCodage (Homburg 2019) Als Datenbasis für die Analysen stützen wir uns auf eine Wikidata-Retrodigitalisierung des CIIC Corpus von Macálister (1945,1949), Epidoc-Daten des Ogham in 3D Projekts, sowie auf die Celtic Inscribed Stones Project (CISP2) Datenbank, die uns dankenswerterweise von Dr. Kris Lockyear zur Verfügung gestellt wurde. Des Weiteren pflegen wir aktiv fehlende und passende Elemente in Wikidata ein, um so später die Daten der Research Community im Sinne des SPARQL Unicorn (Thiery and Trognitz 2019a, 2019b) bereitzustellen. Der Sourcecode unserer App steht quelloffen auf GitHub zur Verfügung (Homburg & Thiery 2019).}\n}\n
\n
\n\n\n
\n Wir stellen die Ogham-Steine, deren Inhalte, die Beziehungen der auf Steinen vermerkten Personen, ihre Stammeszugehörigkeiten und weitere Metadaten als Linked Data bereit und ermöglichen somit deren Verarbeitung durch eine Reihe von Wissenschafts-Communities. Durch die Verwendung von Vokabularen wie Wikidata (Vrandečić et. al. 2014), FOAF (Brickley 2007) und Lemon (McCrae 2012) gewährleisten wir die Erstellung eines semantischen Wörterbuchs für Ogham, welches wir dynamisch aus Textquellen mittels Natural Language Processing Verfahren der Keyword Extraktion extrahieren. Die für uns relevanten Keywords haben wir aus der Literatur gesammelt und in unserem Repository veröffentlicht.1 Die Erfassung der Ogham-Steine als Linked Data Ressourcen erlaubt es, durch Verknüpfung von Wissen und dessen Anreicherung folgende Forschungsfragen anzugehen: Klassifikation von Steinen (Familienhierarchie, Namensbeschreibung etc.) Visualisierung von Zusammenhängen (Verwandtschaftsbeziehungen, Stammesgrenzen) aus Linked Data generierten Karten Formale Erfassung und maschinenlesbare Kodierung von Ogham-Zeichen nach dem Vorbild von PaleoCodage (Homburg 2019) Als Datenbasis für die Analysen stützen wir uns auf eine Wikidata-Retrodigitalisierung des CIIC Corpus von Macálister (1945,1949), Epidoc-Daten des Ogham in 3D Projekts, sowie auf die Celtic Inscribed Stones Project (CISP2) Datenbank, die uns dankenswerterweise von Dr. Kris Lockyear zur Verfügung gestellt wurde. Des Weiteren pflegen wir aktiv fehlende und passende Elemente in Wikidata ein, um so später die Daten der Research Community im Sinne des SPARQL Unicorn (Thiery and Trognitz 2019a, 2019b) bereitzustellen. Der Sourcecode unserer App steht quelloffen auf GitHub zur Verfügung (Homburg & Thiery 2019).\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (10)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Evaluating linked data location based services using the example of Stolpersteine.\n \n \n \n \n\n\n \n Homburg, T.; Böhm, K.; Bruhn, N.; and Hubrich, G.\n\n\n \n\n\n\n In Vienna, Austria, November 2019. \n \n\n\n\n
\n\n\n\n \n \n \"EvaluatingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{homburgstolpersteine2019,\n\ttitle        = {Evaluating linked data location based services using the example of Stolpersteine},\n\tauthor       = {Homburg, Timo and Böhm, Klaus and Bruhn, Nicole and Hubrich, Gregor},\n\tyear         = 2019,\n\tmonth        = nov,\n\tday          = 11,\n\tjournal      = {Advances in Cartography and GIScience of the International Cartographic Association (Advances of the ICA)},\n\taddress      = {Vienna, Austria},\n\tdoi          = {10.5194/ica-adv-2-7-2019},\n\tisbn\t     = {978-3-030-36690-2},\n\turl          = {https://www.adv-cartogr-giscience-int-cartogr-assoc.net/2/7/2019/ica-adv-2-7-2019.pdf},\n\tlanguage     = {english},\n\tabstract     = {In this publication we introduce a linked data powered application which assists users to find so-called Stolpersteine, stones commemorating Jewish victims of the second world war. We show the feasibility of a progressive web app using linked data resources and evaluate this app against local datasources to find out if the current linkeddata environment can equally and/or sufficiently support an application in this knowledge domain.}\n}\n
\n
\n\n\n
\n In this publication we introduce a linked data powered application which assists users to find so-called Stolpersteine, stones commemorating Jewish victims of the second world war. We show the feasibility of a progressive web app using linked data resources and evaluate this app against local datasources to find out if the current linkeddata environment can equally and/or sufficiently support an application in this knowledge domain.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards Creating A Best Practice Digital Processing Pipeline For Cuneiform Languages.\n \n \n \n \n\n\n \n Homburg, T.\n\n\n \n\n\n\n In Utrecht, Netherlands, July 2019. \n \n\n\n\n
\n\n\n\n \n \n \"TowardsPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{homburgbestpracticecunei2019,\n\ttitle        = {Towards Creating A Best Practice Digital Processing Pipeline For Cuneiform Languages},\n\tauthor       = {Homburg, Timo},\n\tyear         = 2019,\n\tmonth        = jul,\n\tday          = 12,\n\tjournal      = {DH 2019},\n\taddress      = {Utrecht, Netherlands},\n\turl          = {https://dev.clariah.nl/files/dh2019/boa/1204.html},\n\tlanguage     = {english},\n\tabstract     = {This publication proposes a best practice digital processing pipeline for cuneiform languages. The pipeline includes the following steps: 1. Annotation of cuneiform tablet 3D scans, 2. Creation of transliterations in ATF and using PaleoCodage to capture cuneiform character variants, 3. Conversion and then annotation of transliterations using TEI (structurally, semantically and liguistically), 4. Creation of semantic dictionaries, 5. Export of the results in various formats to support the needs of many research communities. This poster shows how such a pipeline can be realized using a traditional Git versioning system and a variety of web-based tools assisting in the annotation and export.}\n}\n
\n
\n\n\n
\n This publication proposes a best practice digital processing pipeline for cuneiform languages. The pipeline includes the following steps: 1. Annotation of cuneiform tablet 3D scans, 2. Creation of transliterations in ATF and using PaleoCodage to capture cuneiform character variants, 3. Conversion and then annotation of transliterations using TEI (structurally, semantically and liguistically), 4. Creation of semantic dictionaries, 5. Export of the results in various formats to support the needs of many research communities. This poster shows how such a pipeline can be realized using a traditional Git versioning system and a variety of web-based tools assisting in the annotation and export.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Paleo Codage - A machine-readable way to describe cuneiform characters paleographically.\n \n \n \n \n\n\n \n Homburg, T.\n\n\n \n\n\n\n In Utrecht, Netherlands, July 2019. \n \n\n\n\n
\n\n\n\n \n \n \"PaleoPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{homburgpaleo2019,\n\ttitle        = {Paleo Codage - A machine-readable way to describe cuneiform characters paleographically},\n\tauthor       = {Homburg, Timo},\n\tyear         = 2019,\n\tmonth        = jul,\n\tday          = 12,\n\tjournal      = {DH 2019},\n\tdoi\t     = {10.34894/QAVLOY},\n\taddress      = {Utrecht, Netherlands},\n\turl          = {https://dev.clariah.nl/files/dh2019/boa/0259.html},\n\tlanguage     = {english},\n\tabstract     = {Cuneiform characters have been described using various systems in the past and the varieties of systems used in the literature as well as in daily work varies from language to discipline. Commonly, sign lists Borger (1971, 2004), Deimel & Gössmann (1947), Rüster & Neu (1989) are created and published in the form of dictionaries in a non-machine-readable form. Similarly, for computers, the only way to distinguish cuneiform characters is currently to assign them different numbers in a list (e.g. Unicode Unicode Staff (1991)) and consider a distinction on this level. Therefore we are left with many systems and numbers to describe the same cuneiform sign. Contrary to listing cuneiform signs, Gottstein (2012) took another approach in creating a searchable cuneiform character encoding based on wedge types which would be implemented in applications such as CuneiPainter Homburg et al. (2015). Character image recognition has also been performed in the past Mara et al. (2010), but never yielded a machine-readable representation of a cuneiform characters paleographic information which could have been useful as a means of validation for machine learning recognitions. This publication therefore introduces Paleo Codage, a paleographic distinct machinereadable description inspired by the Manuel de Codage encoding van den Berg (1997) for Egyptian Hieroglyphs.}\n}\n
\n
\n\n\n
\n Cuneiform characters have been described using various systems in the past and the varieties of systems used in the literature as well as in daily work varies from language to discipline. Commonly, sign lists Borger (1971, 2004), Deimel & Gössmann (1947), Rüster & Neu (1989) are created and published in the form of dictionaries in a non-machine-readable form. Similarly, for computers, the only way to distinguish cuneiform characters is currently to assign them different numbers in a list (e.g. Unicode Unicode Staff (1991)) and consider a distinction on this level. Therefore we are left with many systems and numbers to describe the same cuneiform sign. Contrary to listing cuneiform signs, Gottstein (2012) took another approach in creating a searchable cuneiform character encoding based on wedge types which would be implemented in applications such as CuneiPainter Homburg et al. (2015). Character image recognition has also been performed in the past Mara et al. (2010), but never yielded a machine-readable representation of a cuneiform characters paleographic information which could have been useful as a means of validation for machine learning recognitions. This publication therefore introduces Paleo Codage, a paleographic distinct machinereadable description inspired by the Manuel de Codage encoding van den Berg (1997) for Egyptian Hieroglyphs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Linked Data & VGI - Eine komparative Qualitätsanalyse für Deutschland, Österreich und die Schweiz auf Basis von Wikidata und OpenStreetMap.\n \n \n \n\n\n \n Homburg, T.; and Neis, P.\n\n\n \n\n\n\n AGIT Journal. jul 2019.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{homburgagit2019,\n\ttitle        = {Linked Data & VGI - Eine komparative Qualitätsanalyse für Deutschland, Österreich und die Schweiz auf Basis von Wikidata und OpenStreetMap},\n\tauthor       = {Homburg, Timo and Neis, Pascal},\n\tyear         = {2019},\n\tmonth        = {jul},\n\tday          = {5},\n\tjournal      = {AGIT Journal},\n\tpublisher    = {Wichmann Verlag},\n\taddress      = {Salzburg, Austria},\n\tdoi          = {10.14627/537669013},\n\tisbn         = {978-3-87907-669-7},\n\tissn         = {2364-9283},\n\tlanguage     = {english},\n\tabstract     = {In this publication we present results of a comparative study of Wikidata and OpenStreetMap (OSM) in the area of Germany, Austria and Switzerland. We include metadata of OSM and Wikidata, compare the two datasets on an object-by-object basis and on equivalent properties as defined by the respective communities. Our results give an indication about the tag coverage of the respective countries, which objects are typically associated with a wikidata tag, which mistakes are commonly made when annotating OSM objects with wikidata and the equality and equivalence of the respective Wikidata and OSM objects.}\n}\n
\n
\n\n\n
\n In this publication we present results of a comparative study of Wikidata and OpenStreetMap (OSM) in the area of Germany, Austria and Switzerland. We include metadata of OSM and Wikidata, compare the two datasets on an object-by-object basis and on equivalent properties as defined by the respective communities. Our results give an indication about the tag coverage of the respective countries, which objects are typically associated with a wikidata tag, which mistakes are commonly made when annotating OSM objects with wikidata and the equality and equivalence of the respective Wikidata and OSM objects.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semantic Data integration and quality assurance of thematic maps in the German geographic authority.\n \n \n \n \n\n\n \n Homburg, T.; Steppan, S.; and Wuerriehausen, F.\n\n\n \n\n\n\n Lecture Notes in Business Information Processing. June 2019.\n \n\n\n\n
\n\n\n\n \n \n \"SemanticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{homburgbkg2019,\n\ttitle        = {Semantic Data integration and quality assurance of thematic maps in the German geographic authority},\n\tauthor       = {Homburg, Timo and Steppan, Sebastian and Wuerriehausen, Falk},\n\tyear         = 2019,\n\tmonth        = jun,\n\tday          = 27,\n\tjournal      = {Lecture Notes in Business Information Processing},\n\taddress      = {Sevilla, Spain},\n\tdoi          = {10.1007/978-3-030-36691-9_46},\n\tisbn         = {978-3-030-36690-2},\n\turl          = {https://link.springer.com/chapter/10.1007/978-3-030-36691-9_46},\n\tlanguage     = {english},\n\tabstract     = {In this paper we present a new concept of geospatial quality assurance that is currently planned to be implemented in the German Federal Agency of Cartography and Geodesy. Linked open data is being enriched with Semantic Web data in order to create thematic maps relevant to the population. We evaluate the quality of such enriched maps using a standardized process and look at the possible impacts of enriching Semantic Web data with open data sets of the Federal Agency of Cartography and Geodesy.}\n}\n
\n
\n\n\n
\n In this paper we present a new concept of geospatial quality assurance that is currently planned to be implemented in the German Federal Agency of Cartography and Geodesy. Linked open data is being enriched with Semantic Web data in order to create thematic maps relevant to the population. We evaluate the quality of such enriched maps using a standardized process and look at the possible impacts of enriching Semantic Web data with open data sets of the Federal Agency of Cartography and Geodesy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Querying spatial data in the SemanticGIS project - Towards a new version of GeoSPARQL?.\n \n \n \n \n\n\n \n Homburg, T.\n\n\n \n\n\n\n 111th OGC Technical Committee, June 2019.\n \n\n\n\n
\n\n\n\n \n \n \"QueryingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{homburgogc2019,\n\ttitle        = {Querying spatial data in the SemanticGIS project - Towards a new version of GeoSPARQL?},\n\tauthor       = {Homburg, Timo},\n\tyear         = 2019,\n\tmonth        = jun,\n\tday          = 26,\n        type\t     = {Presentation},\n\taddress      = {Leuven, Belgium},\n\tdoi          = {10.13140/RG.2.2.25171.32807},\n\turl          = {https://www.researchgate.net/publication/333674385_Querying_spatial_data_in_the_SemanticGIS_project_-_Towards_a_new_version_of_GeoSPARQL},\n\thowpublished = {111th OGC Technical Committee},\n\tlanguage     = {english},\n\tabstract     = {This presentation, meant for a meeting of the W3C Spatial Data On The Web Interest Group discusses shortcomings in the GeoSPARQL query language which have been encountered in the SemanticGIS project. We give suggestions on how to improve the status quo by adding more query functions, literals and datatype definitions to the language.}\n}\n
\n
\n\n\n
\n This presentation, meant for a meeting of the W3C Spatial Data On The Web Interest Group discusses shortcomings in the GeoSPARQL query language which have been encountered in the SemanticGIS project. We give suggestions on how to improve the status quo by adding more query functions, literals and datatype definitions to the language.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Die Keilschrifttexte aus Haft Tappeh – Ein Werkstattbericht.\n \n \n \n \n\n\n \n Brandes, T.; Homburg, T.; and Zalaghi, A.\n\n\n \n\n\n\n Presentation at ICDOG 2019, April 2019.\n \n\n\n\n
\n\n\n\n \n \n \"DiePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{homburgzalaghibrandes2019,\n\ttitle        = {Die Keilschrifttexte aus Haft Tappeh – Ein Werkstattbericht},\n\tauthor       = {Brandes, Tim and Homburg, Timo and Zalaghi, Ali},\n\tyear         = 2019,\n\tmonth        = apr,\n\tday          = 9,\n\ttype\t     = {Presentation},\t\n\taddress      = {Mainz, Germany},\n\turl          = {https://converia.uni-mainz.de/frontend/index.php?folder_id=488},\n\thowpublished = {Presentation at ICDOG 2019},\n\tlanguage     = {german},\n\tabstract     = {This presentation includes recent effort to transliterate, 3D scan and annotate cuneiform texts which have been excavated in Haft Tappeh in Iran recently. The history of the texts and artifacts is described and new digital methods to annotate and to register character variants are introduced to support the creation of a linked data results for cuneiform annotation projects.}\n}\n
\n
\n\n\n
\n This presentation includes recent effort to transliterate, 3D scan and annotate cuneiform texts which have been excavated in Haft Tappeh in Iran recently. The history of the texts and artifacts is described and new digital methods to annotate and to register character variants are introduced to support the creation of a linked data results for cuneiform annotation projects.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ein unscharfer Suchalgorithmus für Transkriptionen von arabischen Ortsnamen.\n \n \n \n \n\n\n \n Scherl, M.; Unold, M.; and Homburg, T.\n\n\n \n\n\n\n In Frankfurt, Germany, March 2019. \n \n\n\n\n
\n\n\n\n \n \n \"EinPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{scherlunoldhomburg2019,\n\ttitle        = {Ein unscharfer Suchalgorithmus für Transkriptionen von arabischen Ortsnamen},\n\tauthor       = {Scherl, Magdalena and Unold, Martin and Homburg, Timo},\n\tyear         = 2019,\n\tmonth        = mar,\n\tday          = 29,\n\tjournal      = {DHd 2019},\n\taddress      = {Frankfurt, Germany},\n\tdoi          = {10.5281/zenodo.2596095},\n\tisbn         = {978-3-00-062166-6},\n\turl          = {https://dhd2019.org/programm/do/1400-1530/session-bestimmen-und-identifizieren/vortrag-182/},\n\tlanguage     = {german},\n\tabstract     = {Digitale Ortsverzeichnisse (Gazetteers) beinhalten Informationen über Orte sowie deren geographische Lage. Eine der grundlegendsten Aufgaben im Umgang mit solchen Ortsverzeichnissen ist die Suche nach Ortsnamen. Diese Suche kann sehr schwierig sein für Ortsnamen, die in verschiedenen Transliterations- oder Transkriptionsvarianten vorliegen, wie es oft bei arabischen Ortsnamen der Fall ist. In diesen Fällen reicht eine reine Volltextsuche nicht aus. Hier können unscharfe String-Matching-Algorithmen eine bessere Trefferquote für Suchen erreichen.Unser Ziel war es, einen Suchalgorithmus zu entwickeln, der in der Lage ist, arabische Ortsnamen in verschiedenen Transliterationen und Transkriptionen zu identifizieren. Einerseits sollte der Algorithmus fehlertolerant sein, sodass er einen Suchbegriff findet, selbst wenn er etwas anders geschrieben wurde als im Ortsverzeichnis hinterlegt. Andererseits sollte er genau genug sein, um nur tatsächliche Transliterations- und Transkriptionsvarianten einzuschließen. Zum Beispiel sollte die Suche nach "Agaga" den Ort "Ajaja" finden, da es sich um verschiedene Transliterationen des selben arabischen Wortes handelt, aber nicht "Dagaga", da dies ein ganz anderer Ort ist. Um diese beiden Ziele zu erreichen, haben wir einen Algorithmus mit einer modifizierten gewichteten Levenshtein-Distanz (Levenshtein 1965) entwickelt. Eine weitere Eigenschaft unseres Suchalgorithmus ist, dass er für andere Anwendungsfälle als arabische Schrift leicht angepasst werden kann. Wir haben daher auch eine Version für Keilschriftsprachen implementiert und auf einem sumerischen Wörterbuch getestet.}\n}\n
\n
\n\n\n
\n Digitale Ortsverzeichnisse (Gazetteers) beinhalten Informationen über Orte sowie deren geographische Lage. Eine der grundlegendsten Aufgaben im Umgang mit solchen Ortsverzeichnissen ist die Suche nach Ortsnamen. Diese Suche kann sehr schwierig sein für Ortsnamen, die in verschiedenen Transliterations- oder Transkriptionsvarianten vorliegen, wie es oft bei arabischen Ortsnamen der Fall ist. In diesen Fällen reicht eine reine Volltextsuche nicht aus. Hier können unscharfe String-Matching-Algorithmen eine bessere Trefferquote für Suchen erreichen.Unser Ziel war es, einen Suchalgorithmus zu entwickeln, der in der Lage ist, arabische Ortsnamen in verschiedenen Transliterationen und Transkriptionen zu identifizieren. Einerseits sollte der Algorithmus fehlertolerant sein, sodass er einen Suchbegriff findet, selbst wenn er etwas anders geschrieben wurde als im Ortsverzeichnis hinterlegt. Andererseits sollte er genau genug sein, um nur tatsächliche Transliterations- und Transkriptionsvarianten einzuschließen. Zum Beispiel sollte die Suche nach \"Agaga\" den Ort \"Ajaja\" finden, da es sich um verschiedene Transliterationen des selben arabischen Wortes handelt, aber nicht \"Dagaga\", da dies ein ganz anderer Ort ist. Um diese beiden Ziele zu erreichen, haben wir einen Algorithmus mit einer modifizierten gewichteten Levenshtein-Distanz (Levenshtein 1965) entwickelt. Eine weitere Eigenschaft unseres Suchalgorithmus ist, dass er für andere Anwendungsfälle als arabische Schrift leicht angepasst werden kann. Wir haben daher auch eine Version für Keilschriftsprachen implementiert und auf einem sumerischen Wörterbuch getestet.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Interpretation and automatic integration of geospatial data into the Semantic Web.\n \n \n \n \n\n\n \n Prudhomme, C.; Homburg, T.; Ponciano, J.; Boochs, F.; Cruz, C.; and Roxin, A.\n\n\n \n\n\n\n Computing,1–27. February 2019.\n \n\n\n\n
\n\n\n\n \n \n \"InterpretationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Prudhomme2019,\n\ttitle        = {Interpretation and automatic integration of geospatial data into the Semantic Web},\n\tauthor       = {Prudhomme, Claire and Homburg, Timo and Ponciano, Jean-Jacques and Boochs, Frank and Cruz, Christophe and Roxin, Ana-Maria},\n\tyear         = 2019,\n\tmonth        = feb,\n\tday          = 13,\n\tjournal      = {Computing},\n\tpublisher    = {Springer International Publishing},\n\taddress      = {Cham, Switzerland},\n\tpages        = {1--27},\n\tdoi          = {10.1007/s00607-019-00701-y},\n\tissn         = {1436-5057},\n\turl          = {https://doi.org/10.1007/s00607-019-00701-y},\n\tlanguage     = {english},\n\tabstract     = {In the context of disaster management, geospatial information plays a crucial role in the decision-making process to protect and save the population. Gathering a maximum of information from different sources to oversee the current situation is a complex task due to the diversity of data formats and structures. Although several approaches have been designed to integrate data from different sources into an ontology, they mainly require background knowledge of the data. However, non-standard data set schema (NSDS) of relational geospatial data retrieved from e.g. web feature services are not always documented. This lack of background knowledge is a major challenge for automatic semantic data integration. Focusing on this problem, this article presents an automatic approach for geospatial data integration in NSDS. This approach does a schema mapping according to the result of an ontology matching corresponding to a semantic interpretation process. This process is based on geocoding and natural language processing. This article extends work done in a previous publication by an improved unit detection algorithm, data quality and provenance enrichments, the detection of feature clusters. It also presents an improved evaluation process to better assess the performance of this approach compared to a manually created ontology. These experiments have shown the automatic approach obtains an error of semantic interpretation around 10\\% according to a manual approach.}\n}\n
\n
\n\n\n
\n In the context of disaster management, geospatial information plays a crucial role in the decision-making process to protect and save the population. Gathering a maximum of information from different sources to oversee the current situation is a complex task due to the diversity of data formats and structures. Although several approaches have been designed to integrate data from different sources into an ontology, they mainly require background knowledge of the data. However, non-standard data set schema (NSDS) of relational geospatial data retrieved from e.g. web feature services are not always documented. This lack of background knowledge is a major challenge for automatic semantic data integration. Focusing on this problem, this article presents an automatic approach for geospatial data integration in NSDS. This approach does a schema mapping according to the result of an ontology matching corresponding to a semantic interpretation process. This process is based on geocoding and natural language processing. This article extends work done in a previous publication by an improved unit detection algorithm, data quality and provenance enrichments, the detection of feature clusters. It also presents an improved evaluation process to better assess the performance of this approach compared to a manually created ontology. These experiments have shown the automatic approach obtains an error of semantic interpretation around 10% according to a manual approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Situation-Dependent Data Quality Analysis for Geospatial Data Using Semantic Technologies.\n \n \n \n \n\n\n \n Homburg, T.; and Boochs, F.\n\n\n \n\n\n\n Lecture Notes in Business Information Processing,566–578. January 2019.\n \n\n\n\n
\n\n\n\n \n \n \"Situation-DependentPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{homburg2018situation,\n\ttitle        = {Situation-Dependent Data Quality Analysis for Geospatial Data Using Semantic Technologies},\n\tauthor       = {Homburg, Timo and Boochs, Frank},\n\tyear         = 2019,\n\tmonth        = jan,\n\tday          = 3,\n\tjournal      = {Lecture Notes in Business Information Processing},\n\tbooktitle    = {Business Information Systems Workshops},\n\tpublisher    = {Springer International Publishing},\n\taddress      = {Cham, Switzerland},\n\tpages        = {566--578},\n\tdoi          = {10.1007/978-3-030-04849-5_49},\n\tisbn         = {978-3-030-04849-5},\n\turl          = {https://link.springer.com/chapter/10.1007/978-3-030-04849-5_49},\n\teditor       = {Abramowicz, Witold and Paschke, Adrian},\n\tlanguage     = {english},\n\tabstract     = {In this paper we present a new way to evaluate geospatial data quality using Semantic technologies. In contrast to non-semantic approaches to evaluate data quality, Semantic technologies allow us to model situations in which geospatial data may be used and to apply costumized geospatial data quality models using reasoning algorithms on a broad scale. We explain how to model data quality using common vocabularies of ontologies in various contexts, apply data quality results using reasoning in a real-world application case using OpenStreetMap as our data source and highlight the results of our findings on the example of disaster management planning for rescue forces. We contribute to the Semantic Web community and the OpenStreetMap community by proposing a semantic framework to combine usecase dependent data quality assignments which can be used as reasoning rules and as data quality assurance tools for both communities respectively.}\n}\n
\n
\n\n\n
\n In this paper we present a new way to evaluate geospatial data quality using Semantic technologies. In contrast to non-semantic approaches to evaluate data quality, Semantic technologies allow us to model situations in which geospatial data may be used and to apply costumized geospatial data quality models using reasoning algorithms on a broad scale. We explain how to model data quality using common vocabularies of ontologies in various contexts, apply data quality results using reasoning in a real-world application case using OpenStreetMap as our data source and highlight the results of our findings on the example of disaster management planning for rescue forces. We contribute to the Semantic Web community and the OpenStreetMap community by proposing a semantic framework to combine usecase dependent data quality assignments which can be used as reasoning rules and as data quality assurance tools for both communities respectively.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Semantic Geographic Information System: Integration and management of heterogeneous geodata.\n \n \n \n \n\n\n \n Homburg, T.; Prudhomme, C.; and Boochs, F.\n\n\n \n\n\n\n In Fachaustausch Geoinformation 2018, November 2018. \n \n\n\n\n
\n\n\n\n \n \n \"SemanticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{homburg2018semgis,\n\ttitle        = {Semantic Geographic Information System: Integration and management of heterogeneous geodata},\n\tauthor       = {Homburg, Timo and Prudhomme, Claire and Boochs, Frank},\n\tyear         = 2018,\n\tmonth        = nov,\n\tday          = 29,\n\ttype\t     = {Presentation},\n\tbooktitle    = {Fachaustausch Geoinformation 2018},\n\tdoi          = {10.13140/RG.2.2.35246.56645},\n\turl          = {https://i3mainz.hs-mainz.de/sites/default/files/public/data/conference_poster_3.pdf},\n\tlanguage     = {english}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semantische Extraktion auf antiken Schriften am Beispiel von Keilschriftsprachen mithilfe semantischer Wörterbücher.\n \n \n \n \n\n\n \n Homburg, T.\n\n\n \n\n\n\n In Extended Abstract Digital Humanities im deutschsprachigen Raum (DHd 2018), February 2018. \n \n\n\n\n
\n\n\n\n \n \n \"SemantischePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{homburg2018semantische,\n\ttitle        = {Semantische Extraktion auf antiken Schriften am Beispiel von Keilschriftsprachen mithilfe semantischer Wörterbücher},\n\tauthor       = {Homburg, Timo},\n\tyear         = 2018,\n\tmonth        = feb,\n\tday          = 27,\n\tbooktitle    = {Extended Abstract Digital Humanities im deutschsprachigen Raum (DHd 2018)},\n\tisbn         = {978-3-946275-02-2},\n\turl          = {http://dhd2018.uni-koeln.de/wp-content/uploads/boa-DHd2018-web-ISBN.pdf},\n\tlanguage     = {german},\n\tabstract     = {Einleitung und Motivation Semantische Extraktionsmechanismen (z.B. Topic Modelling) werden seit vielen Jahren im Bereich des Semantic Web und Natural Language Processings sowie in den Digital Humanities als Verfahren zur Visualisierung und automatischen Kategorisierung von Dokumenten eingesetzt. Oft ergeben sich durch den Einsatz neue Aspekte der Interpretation von Dokumentensammlungen die vorher noch nicht ersichtlich waren. Als Beispiele solcher Verfahren kommen häufig Machine Learning Algorithmen zum Einsatz, welche eine Grobeinordnung von Texten vornehmen können. Gepaart mit Metadaten von Texten können anschließend beispielsweise thematische Übersichten von Dokumenten mit geographischem Bezug auf Kartenmaterialien in GIS Systemen oder mittels historischer Gazetteers zeitliche Zusammenhänge automatisiert dargestellt werden. In dieser Publikation möchten wir die Möglichkeiten der semantischen Extraktion nutzen und diese auf ei464 Digital Humanities im deutschsprachigen Raum 2018 ner Sammlung von Texten in Keilschriftsprachen anwenden. Keilschriftsprachen Keilschriftsprachen haben in den letzten Jahren ein größeres Interesse in der Digital Humanities und Linguistik Community erfahren. (Inglese 2015, Homburg et. al. 2016, Homburg 2017, Sukhareva et. al. 2017). Neben der andauernden Standardisierung in Unicode werden unter anderem Part Of Speech Tagger und Mechanismen der automatisierten Übersetzung erprobt um Keilschrifttexte besser mit dem Computer zu erfassen und zu interpretieren. Desweiteren wurde die Erlernbarkeit der Keilschriftsprachen durch digitale Tools wie Eingabemethoden oder Karteikartenlernprogramme verbessert. (Homburg 2015) Trotz all der erreichten Fortschritte verbleiben jedoch zahlreiche Probleme bei der maschinellen Verarbeitung von Keilschriftsprachen, die unter anderem mit der geringen Verfügbarkeit annotierter Ressourcen und der fehlenden Verfügbarkeit maschinenlesbarer und semantisch sowie linguistisch annotierter Wörterbücher zusammenhängt. Diese Limitierungen hindern viele Natural Language Processing und semantische Extraktionsalgorithmen daran ein besseres Ergebnis zu erzielen. Wir möchten mit dieser Publikation einen Beitrag leisten diese Situation zu verbessern und stellen das "Semantic Dictionary for Ancient Languages" vor, welches ein Versuch ist durch Annotierung vorhandener in der Forschungscommunity anerkannter Wörterbuchressourcen mit Unicode Characters, Semantic Web Konzepten, etymologischen Daten, gemeinsamen Vokabularen und POSTags eine semantische Ressource in RDF für die Optimierung solcher Algorithmen auf Basis der Sprachen Hethitisch, Sumerisch und Akkadisch zu schaffen.Das Wörterbuch basiert auf dem Lemon-Standard, ein W3C Standard der es erlaubt ebenfalls multilinguale Resourcen abzubilden. So können Entwicklungen der Sprache und gemeinsame Vokabulare wie zum Beispiel Akkadogramme und Sumerogramme in Hethitisch mit erfasst werden. Semantisches Wörterbuch und Semantische Extraktion Wir testen die Performance des Wörterbuchs auf einer der größten Sammlungen von digitalen Keilschrifttexten, der CDLI, aus der wir repräsentative Texte in hethitischer, sumerischer und akkadischer Keilschrift aus verschiedenen Epochen extrahieren und mittels Machine Learning klassifizieren, sowie verschlagworten. Das Ergebnis der semantischen Extraktion ist eine Sammlung von Themen pro Keilschrifttafel, die sich wiederum in Überkategorien gruppieren lassen und in einen zeitlichen, sprachlichen, dialektischen, sowie örtlichen Kontext gestellt werden können. Anhand der verschiedenen Metadaten der CDLI war es uns möglich eine thematische Karte der Fundorte der Keilschrifttafeln sowie deren Inhalt pro Epoche darzustellen aus der das relevante Fachpublikum schließen kann welche Themen zu welcher Zeit an welchem Fundort relevant für die Schreiber der jeweiligen Epoche waren. Im Zuge einer Weiterentwicklung möchten wir diese Informationen mit weiteren Metadaten wie beispielsweise der Jurisdiktion, den Daten der jeweiligen Herrscher sowie rekonstruierten Orten aus der antiken Zeit vervollständigen um Rückschlüsse auf interessante historische Ereignisse zu ziehen. Aufbau des Posters Auf unserem Poster möchten wir gerne den Prozess des Aufbaus, sowie die Struktur des semantischen Wörterbuchs sowie die Karte die durch unsere semantische Extraktion entstanden ist präsentieren um die jeweiligen Fachwissenschaftler zur Diskussion über die Entwicklung eines Semantic Web von Keilschriftsprachen und Keilschriftartefakten einzuladen. Desweiteren soll unser Poster eine Reihe von Anwendungen demonstrieren die sich in Zukunft mit unserer semantischen Ressource entwickeln lassen können um einen Beitrag zu einem hoffentlich zukünftig existierenden LinkedData Datensatz für Keilschriftartefakte zur Dokumentation von Keilschrift zu leisten.}\n}\n
\n
\n\n\n
\n Einleitung und Motivation Semantische Extraktionsmechanismen (z.B. Topic Modelling) werden seit vielen Jahren im Bereich des Semantic Web und Natural Language Processings sowie in den Digital Humanities als Verfahren zur Visualisierung und automatischen Kategorisierung von Dokumenten eingesetzt. Oft ergeben sich durch den Einsatz neue Aspekte der Interpretation von Dokumentensammlungen die vorher noch nicht ersichtlich waren. Als Beispiele solcher Verfahren kommen häufig Machine Learning Algorithmen zum Einsatz, welche eine Grobeinordnung von Texten vornehmen können. Gepaart mit Metadaten von Texten können anschließend beispielsweise thematische Übersichten von Dokumenten mit geographischem Bezug auf Kartenmaterialien in GIS Systemen oder mittels historischer Gazetteers zeitliche Zusammenhänge automatisiert dargestellt werden. In dieser Publikation möchten wir die Möglichkeiten der semantischen Extraktion nutzen und diese auf ei464 Digital Humanities im deutschsprachigen Raum 2018 ner Sammlung von Texten in Keilschriftsprachen anwenden. Keilschriftsprachen Keilschriftsprachen haben in den letzten Jahren ein größeres Interesse in der Digital Humanities und Linguistik Community erfahren. (Inglese 2015, Homburg et. al. 2016, Homburg 2017, Sukhareva et. al. 2017). Neben der andauernden Standardisierung in Unicode werden unter anderem Part Of Speech Tagger und Mechanismen der automatisierten Übersetzung erprobt um Keilschrifttexte besser mit dem Computer zu erfassen und zu interpretieren. Desweiteren wurde die Erlernbarkeit der Keilschriftsprachen durch digitale Tools wie Eingabemethoden oder Karteikartenlernprogramme verbessert. (Homburg 2015) Trotz all der erreichten Fortschritte verbleiben jedoch zahlreiche Probleme bei der maschinellen Verarbeitung von Keilschriftsprachen, die unter anderem mit der geringen Verfügbarkeit annotierter Ressourcen und der fehlenden Verfügbarkeit maschinenlesbarer und semantisch sowie linguistisch annotierter Wörterbücher zusammenhängt. Diese Limitierungen hindern viele Natural Language Processing und semantische Extraktionsalgorithmen daran ein besseres Ergebnis zu erzielen. Wir möchten mit dieser Publikation einen Beitrag leisten diese Situation zu verbessern und stellen das \"Semantic Dictionary for Ancient Languages\" vor, welches ein Versuch ist durch Annotierung vorhandener in der Forschungscommunity anerkannter Wörterbuchressourcen mit Unicode Characters, Semantic Web Konzepten, etymologischen Daten, gemeinsamen Vokabularen und POSTags eine semantische Ressource in RDF für die Optimierung solcher Algorithmen auf Basis der Sprachen Hethitisch, Sumerisch und Akkadisch zu schaffen.Das Wörterbuch basiert auf dem Lemon-Standard, ein W3C Standard der es erlaubt ebenfalls multilinguale Resourcen abzubilden. So können Entwicklungen der Sprache und gemeinsame Vokabulare wie zum Beispiel Akkadogramme und Sumerogramme in Hethitisch mit erfasst werden. Semantisches Wörterbuch und Semantische Extraktion Wir testen die Performance des Wörterbuchs auf einer der größten Sammlungen von digitalen Keilschrifttexten, der CDLI, aus der wir repräsentative Texte in hethitischer, sumerischer und akkadischer Keilschrift aus verschiedenen Epochen extrahieren und mittels Machine Learning klassifizieren, sowie verschlagworten. Das Ergebnis der semantischen Extraktion ist eine Sammlung von Themen pro Keilschrifttafel, die sich wiederum in Überkategorien gruppieren lassen und in einen zeitlichen, sprachlichen, dialektischen, sowie örtlichen Kontext gestellt werden können. Anhand der verschiedenen Metadaten der CDLI war es uns möglich eine thematische Karte der Fundorte der Keilschrifttafeln sowie deren Inhalt pro Epoche darzustellen aus der das relevante Fachpublikum schließen kann welche Themen zu welcher Zeit an welchem Fundort relevant für die Schreiber der jeweiligen Epoche waren. Im Zuge einer Weiterentwicklung möchten wir diese Informationen mit weiteren Metadaten wie beispielsweise der Jurisdiktion, den Daten der jeweiligen Herrscher sowie rekonstruierten Orten aus der antiken Zeit vervollständigen um Rückschlüsse auf interessante historische Ereignisse zu ziehen. Aufbau des Posters Auf unserem Poster möchten wir gerne den Prozess des Aufbaus, sowie die Struktur des semantischen Wörterbuchs sowie die Karte die durch unsere semantische Extraktion entstanden ist präsentieren um die jeweiligen Fachwissenschaftler zur Diskussion über die Entwicklung eines Semantic Web von Keilschriftsprachen und Keilschriftartefakten einzuladen. Desweiteren soll unser Poster eine Reihe von Anwendungen demonstrieren die sich in Zukunft mit unserer semantischen Ressource entwickeln lassen können um einen Beitrag zu einem hoffentlich zukünftig existierenden LinkedData Datensatz für Keilschriftartefakte zur Dokumentation von Keilschrift zu leisten.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Map Change Prediction for Quality Assurance.\n \n \n \n \n\n\n \n Homburg, T.; Boochs, F.; Christophe, C.; and Roxin, A.\n\n\n \n\n\n\n In Kiefer, P.; Huang, H.; Van de Weghe, N.; and Raubal, M., editor(s), LBS 2018, pages 194–200, January 2018. ETH Zurich\n \n\n\n\n
\n\n\n\n \n \n \"MapPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{homburg2018integration,\n\ttitle        = {Map Change Prediction for Quality Assurance},\n\tauthor       = {Homburg, Timo and Boochs, Frank and Christophe, Cruz and Roxin, Ana},\n\tyear         = 2018,\n\tmonth        = jan,\n\tbooktitle    = {LBS 2018},\n\tpublisher    = {ETH Zurich},\n\tpages        = {194--200},\n\tdoi          = {10.3929},\n\turl          = {https://doi.org/10.3929/ethz-b-000225617},\n\tabstract     = {Open geospatial datasources like OpenStreetMap are created by a community of mappers of different experience and with different equipment available. It is therefore important to assess the quality of OpenStreetMap-like maps to give recommendations for users in which situations a map is suitable for their needs. In this work we want to use already defined ways to assess the quality of geospatial data and apply them a features to various Machine Learning algorithms to classify which areas are likely to change in future revisions of the map. In a next step we intend to qualify the changes detected by the algorithm and try to find causes of the changes being tracked.},\n\teditor       = {Kiefer, Peter and Huang, Haosheng and Van de Weghe, Nico and Raubal, Martin},\n\tlanguage     = {english}\n}\n
\n
\n\n\n
\n Open geospatial datasources like OpenStreetMap are created by a community of mappers of different experience and with different equipment available. It is therefore important to assess the quality of OpenStreetMap-like maps to give recommendations for users in which situations a map is suitable for their needs. In this work we want to use already defined ways to assess the quality of geospatial data and apply them a features to various Machine Learning algorithms to classify which areas are likely to change in future revisions of the map. In a next step we intend to qualify the changes detected by the algorithm and try to find causes of the changes being tracked.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Integration, Quality Assurance and Usage of Geospatial Data with Semantic Tools.\n \n \n \n \n\n\n \n Homburg, T.; Prudhomme, C.; Boochs, F.; Christophe, C.; and Roxin, A.\n\n\n \n\n\n\n gis.Science - Die Zeitschrift fur Geoinformatik, 3: 91–96. September 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Integration,Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{homburg2017integration,\n\ttitle        = {Integration, Quality Assurance and Usage of Geospatial Data with Semantic Tools},\n\tauthor       = {Homburg, Timo and Prudhomme, Claire and Boochs, Frank and Christophe, Cruz and Roxin, Ana},\n\tyear         = 2017,\n\tmonth        = sep,\n\tjournal      = {gis.Science - Die Zeitschrift fur Geoinformatik},\n\tvolume       = 3,\n\tpages        = {91--96},\n\tissn         = {1869-9391},\n\turl          = {https://gispoint.de/artikelarchiv/gis/2017/gisscience-ausgabe-32017/4201-integration-quality-assurance-and-usage-of-geospatial-data-with-semantic-tools-i-integration-bewertung-und-nutzung-heterogener-datenquellen-mittels-semantischer-werkzeuge-i.html},\n\tabstract     = {In diesem Artikel stellen wir unsere Forschung in der Integration von Geodaten in einen Semantic Web Kontext in unserem Projekt Semantic GIS vor. Zunächst möchten wir den Zweck und die Vorteile einer Integration und Interpretation von Daten in das Semantic Web beleuchten und anschließend unseren Integrationprozess bestehend aus Datengewinnung, automatischer Interpretation, Qualitätssicherung und Provenance sowie den Datenzugriff erklären. Um die Anwendung unserer Forschung zu demonstrieren gehen wir auf zwei Anwendungsfälle in unserem Projekt ein: Die Bewertung von OpenStreetMap Daten und die Verbesserung des Katastrophenschutzes mittels semantischem Reasoning. Wir schließen den Artikel mit einem Fazit, sowie einem kurzen Ausblick auf zukünftige Forschung.},\n\tlanguage     = {german}\n}\n
\n
\n\n\n
\n In diesem Artikel stellen wir unsere Forschung in der Integration von Geodaten in einen Semantic Web Kontext in unserem Projekt Semantic GIS vor. Zunächst möchten wir den Zweck und die Vorteile einer Integration und Interpretation von Daten in das Semantic Web beleuchten und anschließend unseren Integrationprozess bestehend aus Datengewinnung, automatischer Interpretation, Qualitätssicherung und Provenance sowie den Datenzugriff erklären. Um die Anwendung unserer Forschung zu demonstrieren gehen wir auf zwei Anwendungsfälle in unserem Projekt ein: Die Bewertung von OpenStreetMap Daten und die Verbesserung des Katastrophenschutzes mittels semantischem Reasoning. Wir schließen den Artikel mit einem Fazit, sowie einem kurzen Ausblick auf zukünftige Forschung.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n POSTagging and Semantic Dictionary Creation for Hittite Cuneiform.\n \n \n \n \n\n\n \n Homburg, T.\n\n\n \n\n\n\n In Lewis, R.; Raynor, C.; Forest, D.; Sinatra, M.; and Sinclair, S., editor(s), Digital Humanities 2017, DH 2017, Conference Abstracts, McGill University & Université de Montréal, Montréal, Canada, August 8-11, 2017, Montréal, Canada, August 2017. Alliance of Digital Humanities Organizations, Alliance of Digital Humanities Organizations (ADHO)\n \n\n\n\n
\n\n\n\n \n \n \"POSTaggingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{homburg2017postagging,\n\ttitle        = {POSTagging and Semantic Dictionary Creation for Hittite Cuneiform},\n\tauthor       = {Homburg, Timo},\n\tyear         = 2017,\n\tmonth        = aug,\n\tday          = 9,\n\tbooktitle    = {Digital Humanities 2017, {DH} 2017, Conference Abstracts, McGill University {\\&} Universit{\\'{e}} de Montr{\\'{e}}al, Montr{\\'{e}}al, Canada, August 8-11, 2017},\n\tpublisher    = {Alliance of Digital Humanities Organizations {(ADHO)}},\n\taddress      = {Montréal, Canada},\n\turl          = {https://dh2017.adho.org/abstracts/139/139.pdf},\n\tabstract     = {Presentation Topic and State Of The Art On\tour\tposter\twe\twant\tto\tpresent\tongoing\twork\tto create\tan\tautomatic\tnatural\tlanguage\tprocessing\t tool for\t Hittite\t cuneiform.\t Hittite\t cuneiform\t texts\t are\t to this\t day\t manually\t transcribed\t by\t the\t respective\t experts and\t then\t published\t in\t a\t transliteration\t format (commonly\t ATF). Pictures\t of\t the\t original\t cuneiform tablet\t may\t be\t provided\t and\t more\t rarely\t cuneiform representations\tin\tUnicode\tare\tpresent.\tDue\tto\trecent advancements\tin\t the\t field (such\tas\tCuneify) an\tautomatic translation\t of\t many\t Hittite\t cuneiform\t transliterations to\ttheir\trespective\tcuneiform\trepresentation is\tpossible. Research Contributions We\tbuild\tupon\tthis\twork\tby\tcreating\ttools that\taim to\t automatically\t translate\t Hittite\t cuneiform\t texts\t to English\t from\teither\ta\tUnicode\tcuneiform\trepresentation or\ttheir\ttransliteration\trepresentation. POSTagger We\t have\t created\t a\t morphological\t analyzer\t to\t detect nouns,\t verbs,\t several\t kinds\t of\t pronouns,\t their respective\t declinations\t and\t appendices\t as\t well\t as structural\tparticles. On\ta\tsample\tset\tof\tannotated\tHittite texts\t from\t different\t epochs\t in\t cuneiform\t and transliteration\t representation\t we\t have\t evaluated\t the morphological\tanalyzer,\tits\tadvantages,\tproblems\tand possible\tsolutions\tand\tintend\tto\tpresent\tthe\tresults\tas well\tas\t some\t POSTagging\texamples\tin\t section\t one\t of our\tposter. Dictionary Creation Dictionaries\t for\t Hittite\t cuneiform\t exist\t in\t often non-machine\treadable\tformats\tand\twithout\ta\tconnection to\tSemantic\tWeb\t concepts.\tWe\tintend\t to\t change this\t situation\t by\t parsing\t digitally\t available\t nonsemantic dictionaries\tand\tusing\tmatching\talgorithms\tto find\t concepts\t of\t the\tEnglish\t translations\t of\t such\t dictionaries in\tthe\tSemantic\tWeb\te.g.\tDBPedia\tor\tWikidata. Dictionaries\tof\tthis\tkind\tare\tstored\tusing\tthe\tLexical Model\t for\t Ontologies\t (Lemon). In\t addition\t to freely\t available\t dictionaries\t we\t intend\t to\t use\t expert resources developed\t by\t the\t academy\t of\t sciences\t in Mainz/Germany to\t verify\t and\t extend\t our\t generated dictionaries.\tWe\tintend\tto\tpresent\tthe\tdictionary\tcreation process,\tstatistics\tabout\tthe\tcontent\tof\tgenerated dictionaries\t and\t their impact\t in\t section\t two\t of\t our poster. Machine Translation Using\tthe\tnewly\tcreated\tdictionaries\tas\twell\tas\tthe POSTagging\tinformation\twe\tintend\tto\ttest\tseveral\tautomated machine\ttranslation approaches\tof\twhich\twe will\t outline\t the\t process\t and\t possible\t approaches\t in poster\tsection\tthree. Contributions for the Communities With\t our\t approaches\t we\t intend\t to\t contribute to the\tarchaeological\tcommunity\tin\tGermany by analysing Hittite\t cuneiform\t tablets.\t Together\t with\t work from\t the\t University\t of\tHeidelberg\t on\timage\t recognition of\tcuneiform\t tablets, we\twant\t to\t focus\ton\tcreating a\tnatural\tlanguage\tprocessing\tpipeline\tfrom\tscanning cuneiform\t tablets\t to\t an\t available\t translation\t in English.},\n\tlanguage     = {english},\n\teditor       = {Rhian Lewis and Cecily Raynor and Dominic Forest and Michael Sinatra and St{\\'{e}}fan Sinclair},\n\torganization = {Alliance of Digital Humanities Organizations},\n\tkeywords     = {Hittite, Cuneiform, Dictionary, POSTagging, Semantic Web}\n}\n
\n
\n\n\n
\n Presentation Topic and State Of The Art On our poster we want to present ongoing work to create an automatic natural language processing tool for Hittite cuneiform. Hittite cuneiform texts are to this day manually transcribed by the respective experts and then published in a transliteration format (commonly ATF). Pictures of the original cuneiform tablet may be provided and more rarely cuneiform representations in Unicode are present. Due to recent advancements in the field (such as Cuneify) an automatic translation of many Hittite cuneiform transliterations to their respective cuneiform representation is possible. Research Contributions We build upon this work by creating tools that aim to automatically translate Hittite cuneiform texts to English from either a Unicode cuneiform representation or their transliteration representation. POSTagger We have created a morphological analyzer to detect nouns, verbs, several kinds of pronouns, their respective declinations and appendices as well as structural particles. On a sample set of annotated Hittite texts from different epochs in cuneiform and transliteration representation we have evaluated the morphological analyzer, its advantages, problems and possible solutions and intend to present the results as well as some POSTagging examples in section one of our poster. Dictionary Creation Dictionaries for Hittite cuneiform exist in often non-machine readable formats and without a connection to Semantic Web concepts. We intend to change this situation by parsing digitally available nonsemantic dictionaries and using matching algorithms to find concepts of the English translations of such dictionaries in the Semantic Web e.g. DBPedia or Wikidata. Dictionaries of this kind are stored using the Lexical Model for Ontologies (Lemon). In addition to freely available dictionaries we intend to use expert resources developed by the academy of sciences in Mainz/Germany to verify and extend our generated dictionaries. We intend to present the dictionary creation process, statistics about the content of generated dictionaries and their impact in section two of our poster. Machine Translation Using the newly created dictionaries as well as the POSTagging information we intend to test several automated machine translation approaches of which we will outline the process and possible approaches in poster section three. Contributions for the Communities With our approaches we intend to contribute to the archaeological community in Germany by analysing Hittite cuneiform tablets. Together with work from the University of Heidelberg on image recognition of cuneiform tablets, we want to focus on creating a natural language processing pipeline from scanning cuneiform tablets to an available translation in English.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Integration of Spatial Data into the Semantic Web.\n \n \n \n \n\n\n \n Prudhomme, C.; Homburg, T.; Jean-Jacques, P.; Boochs, F.; Roxin, A.; and Cruz, C.\n\n\n \n\n\n\n In Proceedings of the 13th International Conference on Web Information Systems and Technologies - Volume 1: WEBIST,, pages 107–115, April 2017. INSTICC, SciTePress\n Best Student Paper Award\n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{prudhomme2017automatic,\n\ttitle        = {Automatic Integration of Spatial Data into the Semantic Web},\n\tauthor       = {Prudhomme, Claire and Homburg, Timo and Jean-Jacques, Ponciano and Boochs, Frank and Roxin, Ana and Cruz, Christophe},\n\tyear         = 2017,\n\tmonth        = apr,\n\tday          = 26,\n\tbooktitle    = {Proceedings of the 13th International Conference on Web Information Systems and Technologies - Volume 1: WEBIST,},\n\tlocation     = {Porto, Portugal},\n\tpublisher    = {SciTePress},\n\tpages        = {107--115},\n\tdoi          = {10.5220/0006306601070115},\n\tisbn         = {978-989-758-246-2},\n\turl          = {http://www.scitepress.org/digitalLibrary/PublicationsDetail.aspx?ID=9PVXQr5fDjQ=&t=1},\n\tnote         = {Best Student Paper Award},\n\tabstract     = {For several years, many researchers tried to semantically integrate geospatial datasets into the semantic web. Although, there are many general means of integrating interconnected relational datasets (e.g. R2RML), importing schema-less relational geospatial data remains a major challenge in the semantic web community. In our project SemGIS we face significant importation challenges of schema-less geodatasets, in various data formats without relations to the semantic web. We therefore developed an automatic process of semantification for aforementioned data using among others the geometry of spatial objects. We combine Natural Language processing with geographic and semantic tools in order to extract semantic information of spatial data into a local ontology linked to existing semantic web resources. For our experiments, we used LinkedGeoData and Geonames ontologies to link semantic spatial information and compared links with DBpedia and Wikidata for other types of information. The aim of our experiments presented in this paper, is to examine the feasibility and limits of an automated integration of spatial data into a semantic knowledge base and to assess its correctness according to different open datasets. Other ways to link these open datasets have been applied and we used the different results for evaluating our automatic approach.},\n\tkeywords     = {Geospatial Data, Linked Data, Natural Language Processing, Ontology, R2RML, SDI, Semantic Web, Semantification},\n\tlanguage     = {english},\n\torganization = {INSTICC}\n}\n
\n
\n\n\n
\n For several years, many researchers tried to semantically integrate geospatial datasets into the semantic web. Although, there are many general means of integrating interconnected relational datasets (e.g. R2RML), importing schema-less relational geospatial data remains a major challenge in the semantic web community. In our project SemGIS we face significant importation challenges of schema-less geodatasets, in various data formats without relations to the semantic web. We therefore developed an automatic process of semantification for aforementioned data using among others the geometry of spatial objects. We combine Natural Language processing with geographic and semantic tools in order to extract semantic information of spatial data into a local ontology linked to existing semantic web resources. For our experiments, we used LinkedGeoData and Geonames ontologies to link semantic spatial information and compared links with DBpedia and Wikidata for other types of information. The aim of our experiments presented in this paper, is to examine the feasibility and limits of an automated integration of spatial data into a semantic knowledge base and to assess its correctness according to different open datasets. Other ways to link these open datasets have been applied and we used the different results for evaluating our automatic approach.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Using an INSPIRE Ontology to Support Spatial Data Interoperability.\n \n \n \n \n\n\n \n Würriehausen, F.; Homburg, T.; and Müller, H.\n\n\n \n\n\n\n In Barcelona, Spain, September 2016. \n \n\n\n\n
\n\n\n\n \n \n \"UsingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{wurriehausen2016using,\n\ttitle        = {Using an INSPIRE Ontology to Support Spatial Data Interoperability},\n\tauthor       = {W{\\"u}rriehausen, Falk and Homburg, Timo and M{\\"u}ller, Hartmut},\n\tyear         = 2016,\n\tmonth        = sep,\n\tday          = 28,\n\tjournal      = {INSPIRE Conference},\n\taddress      = {Barcelona, Spain},\n\turl          = {https://inspire.ec.europa.eu/events/conferences/inspire_2016/pdfs/2016_psessions/28%20WEDNESDAY_PSESSIONS_H3_14.00-15.30______28_H3_14.15_188_Presentation.pdf},\n\tlanguage     = {english}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Interpreting Heterogeneous Geospatial Data Using Semantic Web Technologies.\n \n \n \n \n\n\n \n Homburg, T.; Prudhomme, C.; Würriehausen, F.; Karmacharya, A.; Boochs, F.; Roxin, A.; and Cruz, C.\n\n\n \n\n\n\n In Gervasi, O.; Murgante, B.; Misra, S.; Rocha, A. M. A.; Torre, C. M.; Taniar, D.; Apduhan, B. O.; Stankova, E.; and Wang, S., editor(s), International Conference on Computational Science and Its Applications, pages 240–255, Beijing, China, July 2016. Springer International Publishing\n \n\n\n\n
\n\n\n\n \n \n \"InterpretingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{homburg2016interpreting,\n\ttitle        = {Interpreting Heterogeneous Geospatial Data Using Semantic Web Technologies},\n\tauthor       = {Homburg, Timo and Prudhomme, Claire and W{\\"u}rriehausen, Falk and Karmacharya, Ashish and Boochs, Frank and Roxin, Ana and Cruz, Christophe},\n\tyear         = 2016,\n\tmonth        = jul,\n\tday          = 6,\n\tbooktitle    = {International Conference on Computational Science and Its Applications},\n\taddress      = {Beijing, China},\n\tpages        = {240--255},\n\tdoi          = {10.1007/978-3-319-42111-7},\n\tisbn         = {978-3-319-42110-0},\n\tissn         = {0302-9743},\n\turl          = {https://link.springer.com/chapter/10.1007/978-3-319-42111-7_19},\n\tchapter      = 19,\n\tlccn         = 2016944355,\n\tabstract     = {The paper presents work on implementation of semantic technologies within a geospatial environment to provide a common base for further semantic interpretation. The work adds on the current works in similar areas where priorities are more on spatial data integration. We assert that having a common unified semantic view on heterogeneous datasets provides a dimension that allows us to extend beyond conventional concepts of searchability, reusability, composability and interoperability of digital geospatial data. It provides contextual understanding on geodata that will enhance effective interpretations through possible reasoning capabilities. We highlight this through use cases in disaster management and planned land use that are significantly different. This paper illustrates the work that firstly follows existing Semantic Web standards when dealing with vector geodata and secondly extends current standards when dealing with raster geodata and more advanced geospatial operations.},\n\teditor       = {Gervasi, Osvaldo and Murgante, Beniamino and Misra, Sanjay and Rocha, Ana Maria A.C.  and Torre, Carmelo M. and Taniar, David and Apduhan, Bernady O. and Stankova, Elena and Wang, Shangguang},\n\tlanguage     = {english},\n\torganization = {Springer International Publishing}\n}\n
\n
\n\n\n
\n The paper presents work on implementation of semantic technologies within a geospatial environment to provide a common base for further semantic interpretation. The work adds on the current works in similar areas where priorities are more on spatial data integration. We assert that having a common unified semantic view on heterogeneous datasets provides a dimension that allows us to extend beyond conventional concepts of searchability, reusability, composability and interoperability of digital geospatial data. It provides contextual understanding on geodata that will enhance effective interpretations through possible reasoning capabilities. We highlight this through use cases in disaster management and planned land use that are significantly different. This paper illustrates the work that firstly follows existing Semantic Web standards when dealing with vector geodata and secondly extends current standards when dealing with raster geodata and more advanced geospatial operations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Word Segmentation for Akkadian Cuneiform.\n \n \n \n \n\n\n \n Homburg, T.; and Chiarcos, C.\n\n\n \n\n\n\n In Calzolari, N.; Choukri, K.; Declerck, T.; Goggi, S.; Grobelnik, M.; Maegaard, B.; Mariani, J.; Mazo, H.; Moreno, A.; Odijk, J.; and Piperidis, S., editor(s), Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), Paris, France, May 2016. European Language Resources Association (ELRA)\n \n\n\n\n
\n\n\n\n \n \n \"WordPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{homburg2016word,\n\ttitle        = {Word Segmentation for Akkadian Cuneiform},\n\tauthor       = {Homburg, Timo and Chiarcos, Christian},\n\tyear         = 2016,\n\tmonth        = may,\n\tday          = 27,\n\tbooktitle    = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},\n\tlocation     = {Portorož, Slovenia},\n\tpublisher    = {European Language Resources Association (ELRA)},\n\taddress      = {Paris, France},\n\tisbn         = {978-2-9517408-9-1},\n\turl          = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/816_Paper.pdf},\n\tabstract     = {We present experiments on word segmentation for Akkadian cuneiform, an ancient writing system and a language used for about 3 millennia in the ancient Near East. To our best knowledge, this is the first study of this kind applied to either the Akkadian language or the cuneiform writing system. As a logosyllabic writing system, cuneiform structurally resembles Eastern Asian writing systems, so, we employ word segmentation algorithms originally developed for Chinese and Japanese. We describe results of rule-based algorithms, dictionary-based algorithms, statistical and machine learning approaches. Our results may indicate possible promising steps in cuneiform word segmentation that can create and improve natural language processing in this area.},\n\teditor       = {Calzolari, Nicoletta and Choukri, Khalid and Declerck, Thierry and Goggi,Sara  and Grobelnik,Marko and Maegaard,Bente and Mariani,Joseph and  Mazo,Hélène and Moreno,Asunción and Odijk,Jan and Piperidis,Stelios},\n\tlanguage     = {english},\n\tdate         = {23-28}\n}\n
\n
\n\n\n
\n We present experiments on word segmentation for Akkadian cuneiform, an ancient writing system and a language used for about 3 millennia in the ancient Near East. To our best knowledge, this is the first study of this kind applied to either the Akkadian language or the cuneiform writing system. As a logosyllabic writing system, cuneiform structurally resembles Eastern Asian writing systems, so, we employ word segmentation algorithms originally developed for Chinese and Japanese. We describe results of rule-based algorithms, dictionary-based algorithms, statistical and machine learning approaches. Our results may indicate possible promising steps in cuneiform word segmentation that can create and improve natural language processing in this area.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Verfahren zur Wortsegmentierung nichtalphabetischer Schriften.\n \n \n \n \n\n\n \n Homburg, T.\n\n\n \n\n\n\n Master's thesis, Institut für Informatik, Goethe Universität Frankfurt, Frankfurt, Germany, March 2015.\n Valedictorian Award Computer Science Master of Science\n\n\n\n
\n\n\n\n \n \n \"VerfahrenPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@mastersthesis{homburg2015verfahren,\n\ttitle        = {Verfahren zur Wortsegmentierung nichtalphabetischer Schriften},\n\tauthor       = {Homburg, Timo},\n\tyear         = 2015,\n\tmonth        = mar,\n\tday          = 18,\n\taddress      = {Frankfurt, Germany},\n\turl          = {https://www.researchgate.net/publication/312605727_Verfahren_zur_Wortsegmentierung_nichtalphabetischer_Schriften},\n\tnote         = {Valedictorian Award Computer Science Master of Science},\n\tlanguage     = {german},\n\tabstract     = {Digital Humanities nowadays gain more and more importance in analyzing texts in new and interesting ways. At the same time, Digital Humanities become recognized by more and more faculties. One example of the application of the Digital Humanities and of computer linguistics can be found in the field of Archaeology. For archaeologists, a decent way of language processing of ancient texts is essential to their work. Many artifacts from ancient civilizations are in need of a textual analysis. Yet many of them have not been analyzed because of a lack of time and human resources or a lack of an automated assisting process for textual analysis. Optimizing the natural language processing chain for an archaeologist can therefore benefit this whole area of expertise. However, in order to analyze texts using a computer, texts have to be broken down to smallest analyzable units, so-called tokens e.g. words. While this has proven to be an easy task to do in European languages like German, French or English (words are usually separated by whitespaces), in non-alphabetic languages like Chinese, Japanese or Korean, word segmentation is a big initial obstacle in analyzing text using a computer. Words in those lan- guages are not clearly separated by distinctive stop chars such as whitespaces. To deal with this obstacle, rulebased, dictionarybased and statistical approaches have been developed for the Chinese and Japanese language. However, to this date no such approaches are known for other nonalphabetic-languages like cuneiform oder Egyptian Hieroglyphs. This Master Thesis applies Chinese and Japanese word segmentation algorithms on the Akkadian language, an ancient language from Mesopotamia written in cuneiform and provides fundamental research in this area. At first, fundamen- tal differences and similarities between Chinese/Japanese and the Akkadian language will be discussed and a model of classification will be introduced. Subsequently, a selection of upto 20 suitable segmentation algorithms adapted from Chinese and Japanese is presented and applied using data from three different epochs of Akkadian history to guarantee representative results. The performance of the algorithms is evaluated afterwards using several evaluation metrics and interpreted to propose further improvements for the segmentation of Akkadian and other related languages.},\n\tschool       = {Institut f{\\"u}r Informatik, Goethe Universit{\\"a}t Frankfurt}\n}\n
\n
\n\n\n
\n Digital Humanities nowadays gain more and more importance in analyzing texts in new and interesting ways. At the same time, Digital Humanities become recognized by more and more faculties. One example of the application of the Digital Humanities and of computer linguistics can be found in the field of Archaeology. For archaeologists, a decent way of language processing of ancient texts is essential to their work. Many artifacts from ancient civilizations are in need of a textual analysis. Yet many of them have not been analyzed because of a lack of time and human resources or a lack of an automated assisting process for textual analysis. Optimizing the natural language processing chain for an archaeologist can therefore benefit this whole area of expertise. However, in order to analyze texts using a computer, texts have to be broken down to smallest analyzable units, so-called tokens e.g. words. While this has proven to be an easy task to do in European languages like German, French or English (words are usually separated by whitespaces), in non-alphabetic languages like Chinese, Japanese or Korean, word segmentation is a big initial obstacle in analyzing text using a computer. Words in those lan- guages are not clearly separated by distinctive stop chars such as whitespaces. To deal with this obstacle, rulebased, dictionarybased and statistical approaches have been developed for the Chinese and Japanese language. However, to this date no such approaches are known for other nonalphabetic-languages like cuneiform oder Egyptian Hieroglyphs. This Master Thesis applies Chinese and Japanese word segmentation algorithms on the Akkadian language, an ancient language from Mesopotamia written in cuneiform and provides fundamental research in this area. At first, fundamen- tal differences and similarities between Chinese/Japanese and the Akkadian language will be discussed and a model of classification will be introduced. Subsequently, a selection of upto 20 suitable segmentation algorithms adapted from Chinese and Japanese is presented and applied using data from three different epochs of Akkadian history to guarantee representative results. The performance of the algorithms is evaluated afterwards using several evaluation metrics and interpreted to propose further improvements for the segmentation of Akkadian and other related languages.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Verfahren zur Wortsegmentierung nicht-alphabetischer Schriften.\n \n \n \n \n\n\n \n Homburg, T.\n\n\n \n\n\n\n March 2015.\n DARIAH-DE Digital Humanities Award 2015\n\n\n\n
\n\n\n\n \n \n \"VerfahrenPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{homburg2015segmentierung,\n\ttitle        = {Verfahren zur Wortsegmentierung nicht-alphabetischer Schriften},\n\tauthor       = {Homburg, Timo},\n\tyear         = 2015,\n\tmonth        = mar,\n\tday          = 3,\n\tpublisher    = {DH Summit 2015},\n\taddress      = {Berlin, Germany},\n\turl          = {https://de.dariah.eu/documents/61689/82910/46_dhaward.pdf/82a466e7-436c-4637-b076-d05171ebd90f},\n\tnote         = {DARIAH-DE Digital Humanities Award 2015},\n\tlanguage     = {german}\n}\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Cuneiform The Modern Way.\n \n \n \n \n\n\n \n Homburg, T.; Chiarcos, C.; Richter, T.; and Wicke, D.\n\n\n \n\n\n\n February 2015.\n \n\n\n\n
\n\n\n\n \n \n \"LearningPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{homburg2015learning,\n\ttitle        = {Learning Cuneiform The Modern Way},\n\tauthor       = {Homburg, Timo and Chiarcos, Christian and Richter, Thomas and Wicke, Dirk},\n\tyear         = 2015,\n\tmonth        = feb,\n\tday          = 25,\n\tbooktitle    = {Extended Abstract Digital Humanities im deutschsprachigen Raum (DHd 2015)},\n\tpublisher    = {DHd 2015},\n\taddress      = {Graz, Austria},\n\turl          = {http://gams.uni-graz.at/o:dhd2015.p.55},\n\tabstract     = {Using a poster we want to propose methods of conveniently typing and learning cuneiform char- acters, words and phrases for the Akkadian, Sumerian and Hittite language using input method engines common in Asian languages and by utilising Anki, a common tool for flash card learning. Up until this date there is no free and convenient way of typing Unicode cuneiform characters other than utilizing the Unicode code tables directly. Online dictio- naries often provide images of cuneiform characters, refer to specific font speci- fications or do not provide a cuneiform representation at all. More often just a transliteration or transcription as a valid representation is provided and contrary to common scientific needs often the only aspect taught in archaelology studies in universities. Clearly, none of those practices are satisfying or easily adaptable for text processing and therefore not useful for computeraided teaching methods. However, an input method engine can act as a suitable tool for solving the men- tioned input and compatibility problems while at the same time being useful for education and language learning purposes. The input method we developed is based on the concept of transliteration input known from Chinese as Pinyin input, the most common way of typing non- alphabetical languages on a computer. To achieve an equivalent input for the aforementioned languages we utilised a given char transliteration to cuneiform table1 to create transliteration to cuneiform mappings of Akkadian, Sumerian and Hittite CDLI corpora respectively. Organised as a tree, thus minimising la- tency, word and charbased input method engines were created for Java (JIME) , JQuery, SCIM and Ibus, thereby covering the most important input method engines on Linux, Web and Java environments. We furthermore utilized the given data to create flash card sets consisting of more than 50000 words for the Anki and AnkiDroid7 flash card learning pro- gram. Anki schedules learning content according to a spaced repetition learning method having proven its positive learning effect over a longer period of time to maximize learning success. Given the two provided concepts teachers can now easily create their own flash cards according to the pace and content of their lectures. Students may enjoy a convenient and scientifically proven way of learning cuneiform vocabulary, as well as a way to prove their learning by utilizing the input method engine to create their own cuneiform texts. In conclusion, a notable improvement in writ- ing and in learning the concerned languages has been realisised and is in general perceived well.},\n\tlanguage     = {german}\n}\n
\n
\n\n\n
\n Using a poster we want to propose methods of conveniently typing and learning cuneiform char- acters, words and phrases for the Akkadian, Sumerian and Hittite language using input method engines common in Asian languages and by utilising Anki, a common tool for flash card learning. Up until this date there is no free and convenient way of typing Unicode cuneiform characters other than utilizing the Unicode code tables directly. Online dictio- naries often provide images of cuneiform characters, refer to specific font speci- fications or do not provide a cuneiform representation at all. More often just a transliteration or transcription as a valid representation is provided and contrary to common scientific needs often the only aspect taught in archaelology studies in universities. Clearly, none of those practices are satisfying or easily adaptable for text processing and therefore not useful for computeraided teaching methods. However, an input method engine can act as a suitable tool for solving the men- tioned input and compatibility problems while at the same time being useful for education and language learning purposes. The input method we developed is based on the concept of transliteration input known from Chinese as Pinyin input, the most common way of typing non- alphabetical languages on a computer. To achieve an equivalent input for the aforementioned languages we utilised a given char transliteration to cuneiform table1 to create transliteration to cuneiform mappings of Akkadian, Sumerian and Hittite CDLI corpora respectively. Organised as a tree, thus minimising la- tency, word and charbased input method engines were created for Java (JIME) , JQuery, SCIM and Ibus, thereby covering the most important input method engines on Linux, Web and Java environments. We furthermore utilized the given data to create flash card sets consisting of more than 50000 words for the Anki and AnkiDroid7 flash card learning pro- gram. Anki schedules learning content according to a spaced repetition learning method having proven its positive learning effect over a longer period of time to maximize learning success. Given the two provided concepts teachers can now easily create their own flash cards according to the pace and content of their lectures. Students may enjoy a convenient and scientifically proven way of learning cuneiform vocabulary, as well as a way to prove their learning by utilizing the input method engine to create their own cuneiform texts. In conclusion, a notable improvement in writ- ing and in learning the concerned languages has been realisised and is in general perceived well.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Towards workflow planning based on semantic eligibility.\n \n \n \n \n\n\n \n Homburg, T.; Schumacher, P.; and Minor, M.\n\n\n \n\n\n\n In The 37th German Conference on Artificial Intelligence, Stuttgart, Germany, September 2014. \n \n\n\n\n
\n\n\n\n \n \n \"TowardsPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 15 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{homburg2014towards,\n\ttitle        = {Towards workflow planning based on semantic eligibility},\n\tauthor       = {Homburg, Timo and Schumacher, Pol and Minor, Mirjam},\n\tyear         = 2014,\n\tmonth        = sep,\n\tday          = 23,\n\tbooktitle    = {The 37th German Conference on Artificial Intelligence},\n\taddress      = {Stuttgart, Germany},\n\turl          = {http://wi.cs.uni-frankfurt.de/webdav/publications/puk2014.pdf},\n\tabstract     = {A major problem in the research for new artificial intelligence methods for workflows is the evaluation. There is a lack of large evaluation corpora. Existing methods manually model workflows or use workflow extraction to automatically extract workflows from text. Both existing approaches have limitations. The manual modeling of workflows requires a lot of human effort and it would be expensive to create a large test corpus. Workflow extraction is limited by the number of existing textual process descriptions and it is not guaranteed that the workflows are semantically correct. In this paper we suggest to set up a planning domain and apply a planner to create a large number of valid plans. Workflows can be derived from plans. The planner uses a semantic eligibility function to determine whether an operator can be applied to a resource or not. We present a first concept and a prototype implementation in the cooking workflow domain.},\n\tlanguage     = {english}\n}\n
\n
\n\n\n
\n A major problem in the research for new artificial intelligence methods for workflows is the evaluation. There is a lack of large evaluation corpora. Existing methods manually model workflows or use workflow extraction to automatically extract workflows from text. Both existing approaches have limitations. The manual modeling of workflows requires a lot of human effort and it would be expensive to create a large test corpus. Workflow extraction is limited by the number of existing textual process descriptions and it is not guaranteed that the workflows are semantically correct. In this paper we suggest to set up a planning domain and apply a planner to create a large number of valid plans. Workflows can be derived from plans. The planner uses a semantic eligibility function to determine whether an operator can be applied to a resource or not. We present a first concept and a prototype implementation in the cooking workflow domain.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n WikiNect: Gestisches Schreiben für kinetische Museumswikis.\n \n \n \n \n\n\n \n Asir, A; Creech, B; Homburg, T.; Hoxha, N; Röhrl, B; Stender, N; Uslu, T; Wiegand, T; Kastrati, L; Valipour, S; and others\n\n\n \n\n\n\n 2013.\n \n\n\n\n
\n\n\n\n \n \n \"WikiNect:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{asirwikinect2013,\n\ttitle        = {WikiNect: Gestisches Schreiben f{\\"u}r kinetische Museumswikis},\n\tauthor       = {Asir, A and Creech, B and Homburg, Timo and Hoxha, N and R{\\"o}hrl, B and Stender, N and Uslu, T and Wiegand, T and Kastrati, L and Valipour, S and others},\n\tyear         = 2013,\n\taddress      = {Frankfurt, Germany},\n\turl          = {https://hucompute.org/applications/wikinect/},\n\tschool       = {Institut f{\\"u}r Informatik, Goethe Universit{\\"a}t Frankfurt}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Entwicklung einer Androidanwendung zur Zustandsanzeige von Messstationen des Deutschen Wetterdienstes.\n \n \n \n \n\n\n \n Homburg, T.\n\n\n \n\n\n\n February 2012.\n Valedictorian Award Computer Science Bachelor of Science\n\n\n\n
\n\n\n\n \n \n \"EntwicklungPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@bachelorsthesis{homburg2012entwicklung,\n\ttitle        = {Entwicklung einer Androidanwendung zur Zustandsanzeige von Messstationen des Deutschen Wetterdienstes},\n\tauthor       = {Homburg, Timo},\n\tyear         = 2012,\n\tmonth        = feb,\n\taddress      = {Wiesbaden, Germany},\n\turl          = {https://hds.hebis.de/hsrm/Record/HEB305977989},\n\tnote         = {Valedictorian Award Computer Science Bachelor of Science},\n\tlanguage     = {german},\n\tabstract     = {This Bachelor thesis describes the development of an Android based mobile application to monitor states of weather monitoring stations for the use of radioactive measurements in the «German National Metrological Service» (DWD). The use case of the application is the scenario of supporting services via mobile phone, which are on duty 24hours. In case of occuring status anomalies, the application should notify the user about the cause of the anomaly and the overall situation in the data collection network.},\n\tschool       = {Hochschule RheinMain}\n}\n
\n
\n\n\n
\n This Bachelor thesis describes the development of an Android based mobile application to monitor states of weather monitoring stations for the use of radioactive measurements in the «German National Metrological Service» (DWD). The use case of the application is the scenario of supporting services via mobile phone, which are on duty 24hours. In case of occuring status anomalies, the application should notify the user about the cause of the anomaly and the overall situation in the data collection network.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);