var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2386895%2Fcollections%2F4YE3UGQK%2Fitems%3Fformat%3Dbibtex%26limit%3D100&jsonp=1&group0=author_short&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2386895%2Fcollections%2F4YE3UGQK%2Fitems%3Fformat%3Dbibtex%26limit%3D100&jsonp=1&group0=author_short\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2386895%2Fcollections%2F4YE3UGQK%2Fitems%3Fformat%3Dbibtex%26limit%3D100&jsonp=1&group0=author_short\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n Antonacopoulos, A.\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A survey of OCR evaluation tools and metrics.\n \n \n \n \n\n\n \n Neudecker, C.; Baierer, K.; Gerber, M.; Clausner, C.; Antonacopoulos, A.; and Pletschacher, S.\n\n\n \n\n\n\n In The 6th International Workshop on Historical Document Imaging and Processing, pages 13–18, Lausanne Switzerland, September 2021. ACM\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{neudecker_survey_2021,\n\taddress = {Lausanne Switzerland},\n\ttitle = {A survey of {OCR} evaluation tools and metrics},\n\tisbn = {978-1-4503-8690-6},\n\turl = {https://dl.acm.org/doi/10.1145/3476887.3476888},\n\tdoi = {10.1145/3476887.3476888},\n\tabstract = {The millions of pages of historical documents that are digitized in libraries are increasingly used in contexts that have more specific requirements for OCR quality than keyword search. How to comprehensively, efficiently and reliably assess the quality of OCR results against the background of mass digitization, when ground truth can only ever be produced for very small numbers? Due to gaps in specifications, results from OCR evaluation tools can return different results, and due to differences in implementation, even commonly used error rates are often not directly comparable. OCR evaluation metrics and sampling methods are also not sufficient where they do not take into account the accuracy of layout analysis, since for advanced use cases like Natural Language Processing or the Digital Humanities, accurate layout analysis and detection of the reading order are crucial. We provide an overview of OCR evaluation metrics and tools, describe two advanced use cases for OCR results, and perform an OCR evaluation experiment with multiple evaluation tools and different metrics for two distinct datasets. We analyze the differences and commonalities in light of the presented use cases and suggest areas for future work.},\n\tlanguage = {en},\n\turldate = {2023-06-21},\n\tbooktitle = {The 6th {International} {Workshop} on {Historical} {Document} {Imaging} and {Processing}},\n\tpublisher = {ACM},\n\tauthor = {Neudecker, Clemens and Baierer, Konstantin and Gerber, Mike and Clausner, Christian and Antonacopoulos, Apostolos and Pletschacher, Stefan},\n\tmonth = sep,\n\tyear = {2021},\n\tpages = {13--18},\n}\n\n
\n
\n\n\n
\n The millions of pages of historical documents that are digitized in libraries are increasingly used in contexts that have more specific requirements for OCR quality than keyword search. How to comprehensively, efficiently and reliably assess the quality of OCR results against the background of mass digitization, when ground truth can only ever be produced for very small numbers? Due to gaps in specifications, results from OCR evaluation tools can return different results, and due to differences in implementation, even commonly used error rates are often not directly comparable. OCR evaluation metrics and sampling methods are also not sufficient where they do not take into account the accuracy of layout analysis, since for advanced use cases like Natural Language Processing or the Digital Humanities, accurate layout analysis and detection of the reading order are crucial. We provide an overview of OCR evaluation metrics and tools, describe two advanced use cases for OCR results, and perform an OCR evaluation experiment with multiple evaluation tools and different metrics for two distinct datasets. We analyze the differences and commonalities in light of the presented use cases and suggest areas for future work.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The PAGE (Page Analysis and Ground-Truth Elements) Format Framework.\n \n \n \n \n\n\n \n Pletschacher, S.; and Antonacopoulos, A.\n\n\n \n\n\n\n In pages 257–260, August 2010. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{pletschacher_page_2010,\n\ttitle = {The {PAGE} ({Page} {Analysis} and {Ground}-{Truth} {Elements}) {Format} {Framework}},\n\tisbn = {978-1-4244-7542-1},\n\turl = {http://ieeexplore.ieee.org/document/5597587/},\n\tdoi = {10.1109/ICPR.2010.72},\n\turldate = {2018-06-22},\n\tpublisher = {IEEE},\n\tauthor = {Pletschacher, Stefan and Antonacopoulos, Apostolos},\n\tmonth = aug,\n\tyear = {2010},\n\tpages = {257--260},\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Baierer, K.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A survey of OCR evaluation tools and metrics.\n \n \n \n \n\n\n \n Neudecker, C.; Baierer, K.; Gerber, M.; Clausner, C.; Antonacopoulos, A.; and Pletschacher, S.\n\n\n \n\n\n\n In The 6th International Workshop on Historical Document Imaging and Processing, pages 13–18, Lausanne Switzerland, September 2021. ACM\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{neudecker_survey_2021,\n\taddress = {Lausanne Switzerland},\n\ttitle = {A survey of {OCR} evaluation tools and metrics},\n\tisbn = {978-1-4503-8690-6},\n\turl = {https://dl.acm.org/doi/10.1145/3476887.3476888},\n\tdoi = {10.1145/3476887.3476888},\n\tabstract = {The millions of pages of historical documents that are digitized in libraries are increasingly used in contexts that have more specific requirements for OCR quality than keyword search. How to comprehensively, efficiently and reliably assess the quality of OCR results against the background of mass digitization, when ground truth can only ever be produced for very small numbers? Due to gaps in specifications, results from OCR evaluation tools can return different results, and due to differences in implementation, even commonly used error rates are often not directly comparable. OCR evaluation metrics and sampling methods are also not sufficient where they do not take into account the accuracy of layout analysis, since for advanced use cases like Natural Language Processing or the Digital Humanities, accurate layout analysis and detection of the reading order are crucial. We provide an overview of OCR evaluation metrics and tools, describe two advanced use cases for OCR results, and perform an OCR evaluation experiment with multiple evaluation tools and different metrics for two distinct datasets. We analyze the differences and commonalities in light of the presented use cases and suggest areas for future work.},\n\tlanguage = {en},\n\turldate = {2023-06-21},\n\tbooktitle = {The 6th {International} {Workshop} on {Historical} {Document} {Imaging} and {Processing}},\n\tpublisher = {ACM},\n\tauthor = {Neudecker, Clemens and Baierer, Konstantin and Gerber, Mike and Clausner, Christian and Antonacopoulos, Apostolos and Pletschacher, Stefan},\n\tmonth = sep,\n\tyear = {2021},\n\tpages = {13--18},\n}\n\n
\n
\n\n\n
\n The millions of pages of historical documents that are digitized in libraries are increasingly used in contexts that have more specific requirements for OCR quality than keyword search. How to comprehensively, efficiently and reliably assess the quality of OCR results against the background of mass digitization, when ground truth can only ever be produced for very small numbers? Due to gaps in specifications, results from OCR evaluation tools can return different results, and due to differences in implementation, even commonly used error rates are often not directly comparable. OCR evaluation metrics and sampling methods are also not sufficient where they do not take into account the accuracy of layout analysis, since for advanced use cases like Natural Language Processing or the Digital Humanities, accurate layout analysis and detection of the reading order are crucial. We provide an overview of OCR evaluation metrics and tools, describe two advanced use cases for OCR results, and perform an OCR evaluation experiment with multiple evaluation tools and different metrics for two distinct datasets. We analyze the differences and commonalities in light of the presented use cases and suggest areas for future work.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Clausner, C.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A survey of OCR evaluation tools and metrics.\n \n \n \n \n\n\n \n Neudecker, C.; Baierer, K.; Gerber, M.; Clausner, C.; Antonacopoulos, A.; and Pletschacher, S.\n\n\n \n\n\n\n In The 6th International Workshop on Historical Document Imaging and Processing, pages 13–18, Lausanne Switzerland, September 2021. ACM\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{neudecker_survey_2021,\n\taddress = {Lausanne Switzerland},\n\ttitle = {A survey of {OCR} evaluation tools and metrics},\n\tisbn = {978-1-4503-8690-6},\n\turl = {https://dl.acm.org/doi/10.1145/3476887.3476888},\n\tdoi = {10.1145/3476887.3476888},\n\tabstract = {The millions of pages of historical documents that are digitized in libraries are increasingly used in contexts that have more specific requirements for OCR quality than keyword search. How to comprehensively, efficiently and reliably assess the quality of OCR results against the background of mass digitization, when ground truth can only ever be produced for very small numbers? Due to gaps in specifications, results from OCR evaluation tools can return different results, and due to differences in implementation, even commonly used error rates are often not directly comparable. OCR evaluation metrics and sampling methods are also not sufficient where they do not take into account the accuracy of layout analysis, since for advanced use cases like Natural Language Processing or the Digital Humanities, accurate layout analysis and detection of the reading order are crucial. We provide an overview of OCR evaluation metrics and tools, describe two advanced use cases for OCR results, and perform an OCR evaluation experiment with multiple evaluation tools and different metrics for two distinct datasets. We analyze the differences and commonalities in light of the presented use cases and suggest areas for future work.},\n\tlanguage = {en},\n\turldate = {2023-06-21},\n\tbooktitle = {The 6th {International} {Workshop} on {Historical} {Document} {Imaging} and {Processing}},\n\tpublisher = {ACM},\n\tauthor = {Neudecker, Clemens and Baierer, Konstantin and Gerber, Mike and Clausner, Christian and Antonacopoulos, Apostolos and Pletschacher, Stefan},\n\tmonth = sep,\n\tyear = {2021},\n\tpages = {13--18},\n}\n\n
\n
\n\n\n
\n The millions of pages of historical documents that are digitized in libraries are increasingly used in contexts that have more specific requirements for OCR quality than keyword search. How to comprehensively, efficiently and reliably assess the quality of OCR results against the background of mass digitization, when ground truth can only ever be produced for very small numbers? Due to gaps in specifications, results from OCR evaluation tools can return different results, and due to differences in implementation, even commonly used error rates are often not directly comparable. OCR evaluation metrics and sampling methods are also not sufficient where they do not take into account the accuracy of layout analysis, since for advanced use cases like Natural Language Processing or the Digital Humanities, accurate layout analysis and detection of the reading order are crucial. We provide an overview of OCR evaluation metrics and tools, describe two advanced use cases for OCR results, and perform an OCR evaluation experiment with multiple evaluation tools and different metrics for two distinct datasets. We analyze the differences and commonalities in light of the presented use cases and suggest areas for future work.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Diem, M.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n READ-BAD: A New Dataset and Evaluation Scheme for Baseline Detection in Archival Documents.\n \n \n \n \n\n\n \n Gruning, T.; Labahn, R.; Diem, M.; Kleber, F.; and Fiel, S.\n\n\n \n\n\n\n In 13th IAPR International Workshop on Document Analysis Systems, DAS 2018, Vienna, Austria, April 24-27, 2018, pages 351–356, 2018. IEEE Computer Society\n \n\n\n\n
\n\n\n\n \n \n \"READ-BAD:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{gruning_read-bad:_2018,\n\ttitle = {{READ}-{BAD}: {A} {New} {Dataset} and {Evaluation} {Scheme} for {Baseline} {Detection} in {Archival} {Documents}},\n\tisbn = {978-1-5386-3346-5},\n\tshorttitle = {{READ}-{BAD}},\n\turl = {http://doi.ieeecomputersociety.org/10.1109/DAS.2018.38},\n\tdoi = {10.1109/DAS.2018.38},\n\turldate = {2018-06-29},\n\tbooktitle = {13th {IAPR} {International} {Workshop} on {Document} {Analysis} {Systems}, {DAS} 2018, {Vienna}, {Austria}, {April} 24-27, 2018},\n\tpublisher = {IEEE Computer Society},\n\tauthor = {Gruning, Tobias and Labahn, Roger and Diem, Markus and Kleber, Florian and Fiel, Stefan},\n\tyear = {2018},\n\tpages = {351--356},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Dixon, S.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Digital performance : a history of new media in theater, dance, performance art, and installation.\n \n \n \n\n\n \n Dixon, S.; and Smith, B.\n\n\n \n\n\n\n of LeonardoThe MIT Press, Cambridge, Massachusetts, London, England, [Paperback edition] edition, 2015.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{dixon_digital_2015,\n\taddress = {Cambridge, Massachusetts, London, England},\n\tedition = {[Paperback edition]},\n\tseries = {Leonardo},\n\ttitle = {Digital performance : a history of new media in theater, dance, performance art, and installation},\n\tisbn = {978-0-262-52752-1},\n\tshorttitle = {Digital performance},\n\tpublisher = {The MIT Press},\n\tauthor = {Dixon, Steve and Smith, Barry},\n\tyear = {2015},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Déjean, H.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n ICDAR 2019 Competition on Table Detection and Recognition (cTDaR).\n \n \n \n \n\n\n \n Déjean, H.; Meunier, J.; Gao, L.; Huang, Y.; Fang, Y.; Kleber, F.; and Lang, E.\n\n\n \n\n\n\n April 2019.\n \n\n\n\n
\n\n\n\n \n \n \"ICDARPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{dejean_icdar_2019,\n\ttitle = {{ICDAR} 2019 {Competition} on {Table} {Detection} and {Recognition} ({cTDaR})},\n\turl = {https://zenodo.org/record/3239032#.X1IyZdbgqrI},\n\tdoi = {10.5281/zenodo.3239032},\n\tabstract = {The aim of this competition is to evaluate the performance of state of the art methods for table detection (TRACK A) and table recognition (TRACK B). For the first track, document images containing one or several tables are provided. For TRACK B two subtracks exist: the first subtrack (B.1) provides the table region. Thus, only the table structure recognition must be performed. The second subtrack (B.2) provides no a-priori information. This means, the table region and table structure detection has to be done. The Ground Truth is provided in a similar format as for the ICDAR 2013 competition (see [2]): {\\textless}?xml version="1.0" encoding="UTF-8"?{\\textgreater} {\\textless}document filename='filename.jpg'{\\textgreater}     {\\textless}table id='Table\\_1540517170416\\_3'{\\textgreater}          {\\textless}Coords points="180,160 4354,160 4354,3287 180,3287"/{\\textgreater}        {\\textless}cell id='TableCell\\_1540517477147\\_58' start-row='0' start-col='0' end-row='1' end-col='2'{\\textgreater}            {\\textless}Coords points="180,160 177,456 614,456 615,163"/{\\textgreater}        {\\textless}/cell{\\textgreater}         ...     {\\textless}/table{\\textgreater}     ... {\\textless}/document{\\textgreater}   The difference to Gobel et al. [2] is the Coords tag which defines a table/cell as a polygon specified by a list of coordinates. For B.1 the table and its coordinates is given together with the input image. Important Note: For the modern dataset, the convex hull of the content describes a cell region. For the historical dataset, it is requested that the output region of a cell is the cell boundary. This is necessary due to the characteristics of handwritten text, which is often overlapping with different cells. See also: http://sac.founderit.com/tasks.html The evaluation tool is available at github: https://github.com/cndplab-founder/ctdar\\_measurement\\_tool},\n\turldate = {2020-09-04},\n\tpublisher = {Zenodo},\n\tauthor = {Déjean, Hervé and Meunier, Jean-Luc and Gao, Liangcai and Huang, Yilun and Fang, Yu and Kleber, Florian and Lang, Eva-Maria},\n\tmonth = apr,\n\tyear = {2019},\n}\n\n
\n
\n\n\n
\n The aim of this competition is to evaluate the performance of state of the art methods for table detection (TRACK A) and table recognition (TRACK B). For the first track, document images containing one or several tables are provided. For TRACK B two subtracks exist: the first subtrack (B.1) provides the table region. Thus, only the table structure recognition must be performed. The second subtrack (B.2) provides no a-priori information. This means, the table region and table structure detection has to be done. The Ground Truth is provided in a similar format as for the ICDAR 2013 competition (see [2]): \\textless?xml version=\"1.0\" encoding=\"UTF-8\"?\\textgreater \\textlessdocument filename='filename.jpg'\\textgreater     \\textlesstable id='Table_1540517170416_3'\\textgreater          \\textlessCoords points=\"180,160 4354,160 4354,3287 180,3287\"/\\textgreater        \\textlesscell id='TableCell_1540517477147_58' start-row='0' start-col='0' end-row='1' end-col='2'\\textgreater            \\textlessCoords points=\"180,160 177,456 614,456 615,163\"/\\textgreater        \\textless/cell\\textgreater         ...     \\textless/table\\textgreater     ... \\textless/document\\textgreater   The difference to Gobel et al. [2] is the Coords tag which defines a table/cell as a polygon specified by a list of coordinates. For B.1 the table and its coordinates is given together with the input image. Important Note: For the modern dataset, the convex hull of the content describes a cell region. For the historical dataset, it is requested that the output region of a cell is the cell boundary. This is necessary due to the characteristics of handwritten text, which is often overlapping with different cells. See also: http://sac.founderit.com/tasks.html The evaluation tool is available at github: https://github.com/cndplab-founder/ctdar_measurement_tool\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Fang, Y.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n ICDAR 2019 Competition on Table Detection and Recognition (cTDaR).\n \n \n \n \n\n\n \n Déjean, H.; Meunier, J.; Gao, L.; Huang, Y.; Fang, Y.; Kleber, F.; and Lang, E.\n\n\n \n\n\n\n April 2019.\n \n\n\n\n
\n\n\n\n \n \n \"ICDARPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{dejean_icdar_2019,\n\ttitle = {{ICDAR} 2019 {Competition} on {Table} {Detection} and {Recognition} ({cTDaR})},\n\turl = {https://zenodo.org/record/3239032#.X1IyZdbgqrI},\n\tdoi = {10.5281/zenodo.3239032},\n\tabstract = {The aim of this competition is to evaluate the performance of state of the art methods for table detection (TRACK A) and table recognition (TRACK B). For the first track, document images containing one or several tables are provided. For TRACK B two subtracks exist: the first subtrack (B.1) provides the table region. Thus, only the table structure recognition must be performed. The second subtrack (B.2) provides no a-priori information. This means, the table region and table structure detection has to be done. The Ground Truth is provided in a similar format as for the ICDAR 2013 competition (see [2]): {\\textless}?xml version="1.0" encoding="UTF-8"?{\\textgreater} {\\textless}document filename='filename.jpg'{\\textgreater}     {\\textless}table id='Table\\_1540517170416\\_3'{\\textgreater}          {\\textless}Coords points="180,160 4354,160 4354,3287 180,3287"/{\\textgreater}        {\\textless}cell id='TableCell\\_1540517477147\\_58' start-row='0' start-col='0' end-row='1' end-col='2'{\\textgreater}            {\\textless}Coords points="180,160 177,456 614,456 615,163"/{\\textgreater}        {\\textless}/cell{\\textgreater}         ...     {\\textless}/table{\\textgreater}     ... {\\textless}/document{\\textgreater}   The difference to Gobel et al. [2] is the Coords tag which defines a table/cell as a polygon specified by a list of coordinates. For B.1 the table and its coordinates is given together with the input image. Important Note: For the modern dataset, the convex hull of the content describes a cell region. For the historical dataset, it is requested that the output region of a cell is the cell boundary. This is necessary due to the characteristics of handwritten text, which is often overlapping with different cells. See also: http://sac.founderit.com/tasks.html The evaluation tool is available at github: https://github.com/cndplab-founder/ctdar\\_measurement\\_tool},\n\turldate = {2020-09-04},\n\tpublisher = {Zenodo},\n\tauthor = {Déjean, Hervé and Meunier, Jean-Luc and Gao, Liangcai and Huang, Yilun and Fang, Yu and Kleber, Florian and Lang, Eva-Maria},\n\tmonth = apr,\n\tyear = {2019},\n}\n\n
\n
\n\n\n
\n The aim of this competition is to evaluate the performance of state of the art methods for table detection (TRACK A) and table recognition (TRACK B). For the first track, document images containing one or several tables are provided. For TRACK B two subtracks exist: the first subtrack (B.1) provides the table region. Thus, only the table structure recognition must be performed. The second subtrack (B.2) provides no a-priori information. This means, the table region and table structure detection has to be done. The Ground Truth is provided in a similar format as for the ICDAR 2013 competition (see [2]): \\textless?xml version=\"1.0\" encoding=\"UTF-8\"?\\textgreater \\textlessdocument filename='filename.jpg'\\textgreater     \\textlesstable id='Table_1540517170416_3'\\textgreater          \\textlessCoords points=\"180,160 4354,160 4354,3287 180,3287\"/\\textgreater        \\textlesscell id='TableCell_1540517477147_58' start-row='0' start-col='0' end-row='1' end-col='2'\\textgreater            \\textlessCoords points=\"180,160 177,456 614,456 615,163\"/\\textgreater        \\textless/cell\\textgreater         ...     \\textless/table\\textgreater     ... \\textless/document\\textgreater   The difference to Gobel et al. [2] is the Coords tag which defines a table/cell as a polygon specified by a list of coordinates. For B.1 the table and its coordinates is given together with the input image. Important Note: For the modern dataset, the convex hull of the content describes a cell region. For the historical dataset, it is requested that the output region of a cell is the cell boundary. This is necessary due to the characteristics of handwritten text, which is often overlapping with different cells. See also: http://sac.founderit.com/tasks.html The evaluation tool is available at github: https://github.com/cndplab-founder/ctdar_measurement_tool\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Fiel, S.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n READ-BAD: A New Dataset and Evaluation Scheme for Baseline Detection in Archival Documents.\n \n \n \n \n\n\n \n Gruning, T.; Labahn, R.; Diem, M.; Kleber, F.; and Fiel, S.\n\n\n \n\n\n\n In 13th IAPR International Workshop on Document Analysis Systems, DAS 2018, Vienna, Austria, April 24-27, 2018, pages 351–356, 2018. IEEE Computer Society\n \n\n\n\n
\n\n\n\n \n \n \"READ-BAD:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{gruning_read-bad:_2018,\n\ttitle = {{READ}-{BAD}: {A} {New} {Dataset} and {Evaluation} {Scheme} for {Baseline} {Detection} in {Archival} {Documents}},\n\tisbn = {978-1-5386-3346-5},\n\tshorttitle = {{READ}-{BAD}},\n\turl = {http://doi.ieeecomputersociety.org/10.1109/DAS.2018.38},\n\tdoi = {10.1109/DAS.2018.38},\n\turldate = {2018-06-29},\n\tbooktitle = {13th {IAPR} {International} {Workshop} on {Document} {Analysis} {Systems}, {DAS} 2018, {Vienna}, {Austria}, {April} 24-27, 2018},\n\tpublisher = {IEEE Computer Society},\n\tauthor = {Gruning, Tobias and Labahn, Roger and Diem, Markus and Kleber, Florian and Fiel, Stefan},\n\tyear = {2018},\n\tpages = {351--356},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Gao, L.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n ICDAR 2019 Competition on Table Detection and Recognition (cTDaR).\n \n \n \n \n\n\n \n Déjean, H.; Meunier, J.; Gao, L.; Huang, Y.; Fang, Y.; Kleber, F.; and Lang, E.\n\n\n \n\n\n\n April 2019.\n \n\n\n\n
\n\n\n\n \n \n \"ICDARPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{dejean_icdar_2019,\n\ttitle = {{ICDAR} 2019 {Competition} on {Table} {Detection} and {Recognition} ({cTDaR})},\n\turl = {https://zenodo.org/record/3239032#.X1IyZdbgqrI},\n\tdoi = {10.5281/zenodo.3239032},\n\tabstract = {The aim of this competition is to evaluate the performance of state of the art methods for table detection (TRACK A) and table recognition (TRACK B). For the first track, document images containing one or several tables are provided. For TRACK B two subtracks exist: the first subtrack (B.1) provides the table region. Thus, only the table structure recognition must be performed. The second subtrack (B.2) provides no a-priori information. This means, the table region and table structure detection has to be done. The Ground Truth is provided in a similar format as for the ICDAR 2013 competition (see [2]): {\\textless}?xml version="1.0" encoding="UTF-8"?{\\textgreater} {\\textless}document filename='filename.jpg'{\\textgreater}     {\\textless}table id='Table\\_1540517170416\\_3'{\\textgreater}          {\\textless}Coords points="180,160 4354,160 4354,3287 180,3287"/{\\textgreater}        {\\textless}cell id='TableCell\\_1540517477147\\_58' start-row='0' start-col='0' end-row='1' end-col='2'{\\textgreater}            {\\textless}Coords points="180,160 177,456 614,456 615,163"/{\\textgreater}        {\\textless}/cell{\\textgreater}         ...     {\\textless}/table{\\textgreater}     ... {\\textless}/document{\\textgreater}   The difference to Gobel et al. [2] is the Coords tag which defines a table/cell as a polygon specified by a list of coordinates. For B.1 the table and its coordinates is given together with the input image. Important Note: For the modern dataset, the convex hull of the content describes a cell region. For the historical dataset, it is requested that the output region of a cell is the cell boundary. This is necessary due to the characteristics of handwritten text, which is often overlapping with different cells. See also: http://sac.founderit.com/tasks.html The evaluation tool is available at github: https://github.com/cndplab-founder/ctdar\\_measurement\\_tool},\n\turldate = {2020-09-04},\n\tpublisher = {Zenodo},\n\tauthor = {Déjean, Hervé and Meunier, Jean-Luc and Gao, Liangcai and Huang, Yilun and Fang, Yu and Kleber, Florian and Lang, Eva-Maria},\n\tmonth = apr,\n\tyear = {2019},\n}\n\n
\n
\n\n\n
\n The aim of this competition is to evaluate the performance of state of the art methods for table detection (TRACK A) and table recognition (TRACK B). For the first track, document images containing one or several tables are provided. For TRACK B two subtracks exist: the first subtrack (B.1) provides the table region. Thus, only the table structure recognition must be performed. The second subtrack (B.2) provides no a-priori information. This means, the table region and table structure detection has to be done. The Ground Truth is provided in a similar format as for the ICDAR 2013 competition (see [2]): \\textless?xml version=\"1.0\" encoding=\"UTF-8\"?\\textgreater \\textlessdocument filename='filename.jpg'\\textgreater     \\textlesstable id='Table_1540517170416_3'\\textgreater          \\textlessCoords points=\"180,160 4354,160 4354,3287 180,3287\"/\\textgreater        \\textlesscell id='TableCell_1540517477147_58' start-row='0' start-col='0' end-row='1' end-col='2'\\textgreater            \\textlessCoords points=\"180,160 177,456 614,456 615,163\"/\\textgreater        \\textless/cell\\textgreater         ...     \\textless/table\\textgreater     ... \\textless/document\\textgreater   The difference to Gobel et al. [2] is the Coords tag which defines a table/cell as a polygon specified by a list of coordinates. For B.1 the table and its coordinates is given together with the input image. Important Note: For the modern dataset, the convex hull of the content describes a cell region. For the historical dataset, it is requested that the output region of a cell is the cell boundary. This is necessary due to the characteristics of handwritten text, which is often overlapping with different cells. See also: http://sac.founderit.com/tasks.html The evaluation tool is available at github: https://github.com/cndplab-founder/ctdar_measurement_tool\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Gatos, Ba\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Goal-oriented performance evaluation methodology for page segmentation techniques.\n \n \n \n\n\n \n Stamatopoulos, Nikolaos; and Gatos, Basilis\n\n\n \n\n\n\n In Proceedings of the 13th international conference on document analysis and recognition (ICDAR), pages 281–285. 2015.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{stamatopoulos_nikolaos_goal-oriented_2015,\n\ttitle = {Goal-oriented performance evaluation methodology for page segmentation techniques},\n\tbooktitle = {Proceedings of the 13th international conference on document analysis and recognition ({ICDAR})},\n\tauthor = {{Stamatopoulos, Nikolaos} and {Gatos, Basilis}},\n\tyear = {2015},\n\tpages = {281--285},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Gerber, M.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A survey of OCR evaluation tools and metrics.\n \n \n \n \n\n\n \n Neudecker, C.; Baierer, K.; Gerber, M.; Clausner, C.; Antonacopoulos, A.; and Pletschacher, S.\n\n\n \n\n\n\n In The 6th International Workshop on Historical Document Imaging and Processing, pages 13–18, Lausanne Switzerland, September 2021. ACM\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{neudecker_survey_2021,\n\taddress = {Lausanne Switzerland},\n\ttitle = {A survey of {OCR} evaluation tools and metrics},\n\tisbn = {978-1-4503-8690-6},\n\turl = {https://dl.acm.org/doi/10.1145/3476887.3476888},\n\tdoi = {10.1145/3476887.3476888},\n\tabstract = {The millions of pages of historical documents that are digitized in libraries are increasingly used in contexts that have more specific requirements for OCR quality than keyword search. How to comprehensively, efficiently and reliably assess the quality of OCR results against the background of mass digitization, when ground truth can only ever be produced for very small numbers? Due to gaps in specifications, results from OCR evaluation tools can return different results, and due to differences in implementation, even commonly used error rates are often not directly comparable. OCR evaluation metrics and sampling methods are also not sufficient where they do not take into account the accuracy of layout analysis, since for advanced use cases like Natural Language Processing or the Digital Humanities, accurate layout analysis and detection of the reading order are crucial. We provide an overview of OCR evaluation metrics and tools, describe two advanced use cases for OCR results, and perform an OCR evaluation experiment with multiple evaluation tools and different metrics for two distinct datasets. We analyze the differences and commonalities in light of the presented use cases and suggest areas for future work.},\n\tlanguage = {en},\n\turldate = {2023-06-21},\n\tbooktitle = {The 6th {International} {Workshop} on {Historical} {Document} {Imaging} and {Processing}},\n\tpublisher = {ACM},\n\tauthor = {Neudecker, Clemens and Baierer, Konstantin and Gerber, Mike and Clausner, Christian and Antonacopoulos, Apostolos and Pletschacher, Stefan},\n\tmonth = sep,\n\tyear = {2021},\n\tpages = {13--18},\n}\n\n
\n
\n\n\n
\n The millions of pages of historical documents that are digitized in libraries are increasingly used in contexts that have more specific requirements for OCR quality than keyword search. How to comprehensively, efficiently and reliably assess the quality of OCR results against the background of mass digitization, when ground truth can only ever be produced for very small numbers? Due to gaps in specifications, results from OCR evaluation tools can return different results, and due to differences in implementation, even commonly used error rates are often not directly comparable. OCR evaluation metrics and sampling methods are also not sufficient where they do not take into account the accuracy of layout analysis, since for advanced use cases like Natural Language Processing or the Digital Humanities, accurate layout analysis and detection of the reading order are crucial. We provide an overview of OCR evaluation metrics and tools, describe two advanced use cases for OCR results, and perform an OCR evaluation experiment with multiple evaluation tools and different metrics for two distinct datasets. We analyze the differences and commonalities in light of the presented use cases and suggest areas for future work.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Gruning, T.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n READ-BAD: A New Dataset and Evaluation Scheme for Baseline Detection in Archival Documents.\n \n \n \n \n\n\n \n Gruning, T.; Labahn, R.; Diem, M.; Kleber, F.; and Fiel, S.\n\n\n \n\n\n\n In 13th IAPR International Workshop on Document Analysis Systems, DAS 2018, Vienna, Austria, April 24-27, 2018, pages 351–356, 2018. IEEE Computer Society\n \n\n\n\n
\n\n\n\n \n \n \"READ-BAD:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{gruning_read-bad:_2018,\n\ttitle = {{READ}-{BAD}: {A} {New} {Dataset} and {Evaluation} {Scheme} for {Baseline} {Detection} in {Archival} {Documents}},\n\tisbn = {978-1-5386-3346-5},\n\tshorttitle = {{READ}-{BAD}},\n\turl = {http://doi.ieeecomputersociety.org/10.1109/DAS.2018.38},\n\tdoi = {10.1109/DAS.2018.38},\n\turldate = {2018-06-29},\n\tbooktitle = {13th {IAPR} {International} {Workshop} on {Document} {Analysis} {Systems}, {DAS} 2018, {Vienna}, {Austria}, {April} 24-27, 2018},\n\tpublisher = {IEEE Computer Society},\n\tauthor = {Gruning, Tobias and Labahn, Roger and Diem, Markus and Kleber, Florian and Fiel, Stefan},\n\tyear = {2018},\n\tpages = {351--356},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Grüning, T.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n System Description of CITlab's Recognition & Retrieval Engine for ICDAR2017 Competition on Information Extraction in Historical Handwritten Records.\n \n \n \n \n\n\n \n Strauss, T.; Weidemann, M.; Michael, J.; Leifert, G.; Grüning, T.; and Labahn, R.\n\n\n \n\n\n\n CoRR, abs/1804.09943. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"SystemPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{strauss_system_2018,\n\ttitle = {System {Description} of {CITlab}'s {Recognition} \\& {Retrieval} {Engine} for {ICDAR2017} {Competition} on {Information} {Extraction} in {Historical} {Handwritten} {Records}},\n\tvolume = {abs/1804.09943},\n\turl = {http://arxiv.org/abs/1804.09943},\n\turldate = {2018-06-29},\n\tjournal = {CoRR},\n\tauthor = {Strauss, Tobias and Weidemann, Max and Michael, Johannes and Leifert, Gundram and Grüning, Tobias and Labahn, Roger},\n\tyear = {2018},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Hodel, T.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n ICFHR2018 Competition on Automated Text Recognition on a READ Dataset.\n \n \n \n\n\n \n Strauß, T.; Leifert, G.; Labahn, R.; Hodel, T.; and Mühlberger, G.\n\n\n \n\n\n\n In 2018 16th International Conference on Frontiers in Handwriting Recognition (ICFHR), pages 477–482, August 2018. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{straus_icfhr2018_2018,\n\ttitle = {{ICFHR2018} {Competition} on {Automated} {Text} {Recognition} on a {READ} {Dataset}},\n\tdoi = {10.1109/ICFHR-2018.2018.00089},\n\tabstract = {We summarize the results of a competition on Automated Text Recognition targeting the effective adaptation of recognition engines to essentially new data. The task consists in achieving a minimum character error rate on a previously unknown text corpus from which only a few pages are available for adjusting an already pre-trained recognition engine. This issue addresses a frequent application scenario where only a small amount of task-specific training data is available, because producing this data usually requires much effort. We present the results of five submission. They show that the task is a challenging issue but for certain documents 16 pages of transcription are sufficient to adapt a pre-trained recognition system.},\n\tbooktitle = {2018 16th {International} {Conference} on {Frontiers} in {Handwriting} {Recognition} ({ICFHR})},\n\tauthor = {Strauß, Tobias and Leifert, Gundram and Labahn, Roger and Hodel, Tobias and Mühlberger, Günter},\n\tmonth = aug,\n\tyear = {2018},\n\tkeywords = {Computational modeling, Data models, Optical imaging, Task analysis, Text recognition, Training, Training data, automated text recognition, fast adaptation, few shot learning, historical documents},\n\tpages = {477--482},\n}\n\n
\n
\n\n\n
\n We summarize the results of a competition on Automated Text Recognition targeting the effective adaptation of recognition engines to essentially new data. The task consists in achieving a minimum character error rate on a previously unknown text corpus from which only a few pages are available for adjusting an already pre-trained recognition engine. This issue addresses a frequent application scenario where only a small amount of task-specific training data is available, because producing this data usually requires much effort. We present the results of five submission. They show that the task is a challenging issue but for certain documents 16 pages of transcription are sufficient to adapt a pre-trained recognition system.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Huang, Y.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n ICDAR 2019 Competition on Table Detection and Recognition (cTDaR).\n \n \n \n \n\n\n \n Déjean, H.; Meunier, J.; Gao, L.; Huang, Y.; Fang, Y.; Kleber, F.; and Lang, E.\n\n\n \n\n\n\n April 2019.\n \n\n\n\n
\n\n\n\n \n \n \"ICDARPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{dejean_icdar_2019,\n\ttitle = {{ICDAR} 2019 {Competition} on {Table} {Detection} and {Recognition} ({cTDaR})},\n\turl = {https://zenodo.org/record/3239032#.X1IyZdbgqrI},\n\tdoi = {10.5281/zenodo.3239032},\n\tabstract = {The aim of this competition is to evaluate the performance of state of the art methods for table detection (TRACK A) and table recognition (TRACK B). For the first track, document images containing one or several tables are provided. For TRACK B two subtracks exist: the first subtrack (B.1) provides the table region. Thus, only the table structure recognition must be performed. The second subtrack (B.2) provides no a-priori information. This means, the table region and table structure detection has to be done. The Ground Truth is provided in a similar format as for the ICDAR 2013 competition (see [2]): {\\textless}?xml version="1.0" encoding="UTF-8"?{\\textgreater} {\\textless}document filename='filename.jpg'{\\textgreater}     {\\textless}table id='Table\\_1540517170416\\_3'{\\textgreater}          {\\textless}Coords points="180,160 4354,160 4354,3287 180,3287"/{\\textgreater}        {\\textless}cell id='TableCell\\_1540517477147\\_58' start-row='0' start-col='0' end-row='1' end-col='2'{\\textgreater}            {\\textless}Coords points="180,160 177,456 614,456 615,163"/{\\textgreater}        {\\textless}/cell{\\textgreater}         ...     {\\textless}/table{\\textgreater}     ... {\\textless}/document{\\textgreater}   The difference to Gobel et al. [2] is the Coords tag which defines a table/cell as a polygon specified by a list of coordinates. For B.1 the table and its coordinates is given together with the input image. Important Note: For the modern dataset, the convex hull of the content describes a cell region. For the historical dataset, it is requested that the output region of a cell is the cell boundary. This is necessary due to the characteristics of handwritten text, which is often overlapping with different cells. See also: http://sac.founderit.com/tasks.html The evaluation tool is available at github: https://github.com/cndplab-founder/ctdar\\_measurement\\_tool},\n\turldate = {2020-09-04},\n\tpublisher = {Zenodo},\n\tauthor = {Déjean, Hervé and Meunier, Jean-Luc and Gao, Liangcai and Huang, Yilun and Fang, Yu and Kleber, Florian and Lang, Eva-Maria},\n\tmonth = apr,\n\tyear = {2019},\n}\n\n
\n
\n\n\n
\n The aim of this competition is to evaluate the performance of state of the art methods for table detection (TRACK A) and table recognition (TRACK B). For the first track, document images containing one or several tables are provided. For TRACK B two subtracks exist: the first subtrack (B.1) provides the table region. Thus, only the table structure recognition must be performed. The second subtrack (B.2) provides no a-priori information. This means, the table region and table structure detection has to be done. The Ground Truth is provided in a similar format as for the ICDAR 2013 competition (see [2]): \\textless?xml version=\"1.0\" encoding=\"UTF-8\"?\\textgreater \\textlessdocument filename='filename.jpg'\\textgreater     \\textlesstable id='Table_1540517170416_3'\\textgreater          \\textlessCoords points=\"180,160 4354,160 4354,3287 180,3287\"/\\textgreater        \\textlesscell id='TableCell_1540517477147_58' start-row='0' start-col='0' end-row='1' end-col='2'\\textgreater            \\textlessCoords points=\"180,160 177,456 614,456 615,163\"/\\textgreater        \\textless/cell\\textgreater         ...     \\textless/table\\textgreater     ... \\textless/document\\textgreater   The difference to Gobel et al. [2] is the Coords tag which defines a table/cell as a polygon specified by a list of coordinates. For B.1 the table and its coordinates is given together with the input image. Important Note: For the modern dataset, the convex hull of the content describes a cell region. For the historical dataset, it is requested that the output region of a cell is the cell boundary. This is necessary due to the characteristics of handwritten text, which is often overlapping with different cells. See also: http://sac.founderit.com/tasks.html The evaluation tool is available at github: https://github.com/cndplab-founder/ctdar_measurement_tool\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Kleber, F.\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n ICDAR 2019 Competition on Table Detection and Recognition (cTDaR).\n \n \n \n \n\n\n \n Déjean, H.; Meunier, J.; Gao, L.; Huang, Y.; Fang, Y.; Kleber, F.; and Lang, E.\n\n\n \n\n\n\n April 2019.\n \n\n\n\n
\n\n\n\n \n \n \"ICDARPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{dejean_icdar_2019,\n\ttitle = {{ICDAR} 2019 {Competition} on {Table} {Detection} and {Recognition} ({cTDaR})},\n\turl = {https://zenodo.org/record/3239032#.X1IyZdbgqrI},\n\tdoi = {10.5281/zenodo.3239032},\n\tabstract = {The aim of this competition is to evaluate the performance of state of the art methods for table detection (TRACK A) and table recognition (TRACK B). For the first track, document images containing one or several tables are provided. For TRACK B two subtracks exist: the first subtrack (B.1) provides the table region. Thus, only the table structure recognition must be performed. The second subtrack (B.2) provides no a-priori information. This means, the table region and table structure detection has to be done. The Ground Truth is provided in a similar format as for the ICDAR 2013 competition (see [2]): {\\textless}?xml version="1.0" encoding="UTF-8"?{\\textgreater} {\\textless}document filename='filename.jpg'{\\textgreater}     {\\textless}table id='Table\\_1540517170416\\_3'{\\textgreater}          {\\textless}Coords points="180,160 4354,160 4354,3287 180,3287"/{\\textgreater}        {\\textless}cell id='TableCell\\_1540517477147\\_58' start-row='0' start-col='0' end-row='1' end-col='2'{\\textgreater}            {\\textless}Coords points="180,160 177,456 614,456 615,163"/{\\textgreater}        {\\textless}/cell{\\textgreater}         ...     {\\textless}/table{\\textgreater}     ... {\\textless}/document{\\textgreater}   The difference to Gobel et al. [2] is the Coords tag which defines a table/cell as a polygon specified by a list of coordinates. For B.1 the table and its coordinates is given together with the input image. Important Note: For the modern dataset, the convex hull of the content describes a cell region. For the historical dataset, it is requested that the output region of a cell is the cell boundary. This is necessary due to the characteristics of handwritten text, which is often overlapping with different cells. See also: http://sac.founderit.com/tasks.html The evaluation tool is available at github: https://github.com/cndplab-founder/ctdar\\_measurement\\_tool},\n\turldate = {2020-09-04},\n\tpublisher = {Zenodo},\n\tauthor = {Déjean, Hervé and Meunier, Jean-Luc and Gao, Liangcai and Huang, Yilun and Fang, Yu and Kleber, Florian and Lang, Eva-Maria},\n\tmonth = apr,\n\tyear = {2019},\n}\n\n
\n
\n\n\n
\n The aim of this competition is to evaluate the performance of state of the art methods for table detection (TRACK A) and table recognition (TRACK B). For the first track, document images containing one or several tables are provided. For TRACK B two subtracks exist: the first subtrack (B.1) provides the table region. Thus, only the table structure recognition must be performed. The second subtrack (B.2) provides no a-priori information. This means, the table region and table structure detection has to be done. The Ground Truth is provided in a similar format as for the ICDAR 2013 competition (see [2]): \\textless?xml version=\"1.0\" encoding=\"UTF-8\"?\\textgreater \\textlessdocument filename='filename.jpg'\\textgreater     \\textlesstable id='Table_1540517170416_3'\\textgreater          \\textlessCoords points=\"180,160 4354,160 4354,3287 180,3287\"/\\textgreater        \\textlesscell id='TableCell_1540517477147_58' start-row='0' start-col='0' end-row='1' end-col='2'\\textgreater            \\textlessCoords points=\"180,160 177,456 614,456 615,163\"/\\textgreater        \\textless/cell\\textgreater         ...     \\textless/table\\textgreater     ... \\textless/document\\textgreater   The difference to Gobel et al. [2] is the Coords tag which defines a table/cell as a polygon specified by a list of coordinates. For B.1 the table and its coordinates is given together with the input image. Important Note: For the modern dataset, the convex hull of the content describes a cell region. For the historical dataset, it is requested that the output region of a cell is the cell boundary. This is necessary due to the characteristics of handwritten text, which is often overlapping with different cells. See also: http://sac.founderit.com/tasks.html The evaluation tool is available at github: https://github.com/cndplab-founder/ctdar_measurement_tool\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n READ-BAD: A New Dataset and Evaluation Scheme for Baseline Detection in Archival Documents.\n \n \n \n \n\n\n \n Gruning, T.; Labahn, R.; Diem, M.; Kleber, F.; and Fiel, S.\n\n\n \n\n\n\n In 13th IAPR International Workshop on Document Analysis Systems, DAS 2018, Vienna, Austria, April 24-27, 2018, pages 351–356, 2018. IEEE Computer Society\n \n\n\n\n
\n\n\n\n \n \n \"READ-BAD:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{gruning_read-bad:_2018,\n\ttitle = {{READ}-{BAD}: {A} {New} {Dataset} and {Evaluation} {Scheme} for {Baseline} {Detection} in {Archival} {Documents}},\n\tisbn = {978-1-5386-3346-5},\n\tshorttitle = {{READ}-{BAD}},\n\turl = {http://doi.ieeecomputersociety.org/10.1109/DAS.2018.38},\n\tdoi = {10.1109/DAS.2018.38},\n\turldate = {2018-06-29},\n\tbooktitle = {13th {IAPR} {International} {Workshop} on {Document} {Analysis} {Systems}, {DAS} 2018, {Vienna}, {Austria}, {April} 24-27, 2018},\n\tpublisher = {IEEE Computer Society},\n\tauthor = {Gruning, Tobias and Labahn, Roger and Diem, Markus and Kleber, Florian and Fiel, Stefan},\n\tyear = {2018},\n\tpages = {351--356},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Labahn, R.\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n ICFHR2018 Competition on Automated Text Recognition on a READ Dataset.\n \n \n \n\n\n \n Strauß, T.; Leifert, G.; Labahn, R.; Hodel, T.; and Mühlberger, G.\n\n\n \n\n\n\n In 2018 16th International Conference on Frontiers in Handwriting Recognition (ICFHR), pages 477–482, August 2018. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{straus_icfhr2018_2018,\n\ttitle = {{ICFHR2018} {Competition} on {Automated} {Text} {Recognition} on a {READ} {Dataset}},\n\tdoi = {10.1109/ICFHR-2018.2018.00089},\n\tabstract = {We summarize the results of a competition on Automated Text Recognition targeting the effective adaptation of recognition engines to essentially new data. The task consists in achieving a minimum character error rate on a previously unknown text corpus from which only a few pages are available for adjusting an already pre-trained recognition engine. This issue addresses a frequent application scenario where only a small amount of task-specific training data is available, because producing this data usually requires much effort. We present the results of five submission. They show that the task is a challenging issue but for certain documents 16 pages of transcription are sufficient to adapt a pre-trained recognition system.},\n\tbooktitle = {2018 16th {International} {Conference} on {Frontiers} in {Handwriting} {Recognition} ({ICFHR})},\n\tauthor = {Strauß, Tobias and Leifert, Gundram and Labahn, Roger and Hodel, Tobias and Mühlberger, Günter},\n\tmonth = aug,\n\tyear = {2018},\n\tkeywords = {Computational modeling, Data models, Optical imaging, Task analysis, Text recognition, Training, Training data, automated text recognition, fast adaptation, few shot learning, historical documents},\n\tpages = {477--482},\n}\n\n
\n
\n\n\n
\n We summarize the results of a competition on Automated Text Recognition targeting the effective adaptation of recognition engines to essentially new data. The task consists in achieving a minimum character error rate on a previously unknown text corpus from which only a few pages are available for adjusting an already pre-trained recognition engine. This issue addresses a frequent application scenario where only a small amount of task-specific training data is available, because producing this data usually requires much effort. We present the results of five submission. They show that the task is a challenging issue but for certain documents 16 pages of transcription are sufficient to adapt a pre-trained recognition system.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n System Description of CITlab's Recognition & Retrieval Engine for ICDAR2017 Competition on Information Extraction in Historical Handwritten Records.\n \n \n \n \n\n\n \n Strauss, T.; Weidemann, M.; Michael, J.; Leifert, G.; Grüning, T.; and Labahn, R.\n\n\n \n\n\n\n CoRR, abs/1804.09943. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"SystemPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{strauss_system_2018,\n\ttitle = {System {Description} of {CITlab}'s {Recognition} \\& {Retrieval} {Engine} for {ICDAR2017} {Competition} on {Information} {Extraction} in {Historical} {Handwritten} {Records}},\n\tvolume = {abs/1804.09943},\n\turl = {http://arxiv.org/abs/1804.09943},\n\turldate = {2018-06-29},\n\tjournal = {CoRR},\n\tauthor = {Strauss, Tobias and Weidemann, Max and Michael, Johannes and Leifert, Gundram and Grüning, Tobias and Labahn, Roger},\n\tyear = {2018},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n READ-BAD: A New Dataset and Evaluation Scheme for Baseline Detection in Archival Documents.\n \n \n \n \n\n\n \n Gruning, T.; Labahn, R.; Diem, M.; Kleber, F.; and Fiel, S.\n\n\n \n\n\n\n In 13th IAPR International Workshop on Document Analysis Systems, DAS 2018, Vienna, Austria, April 24-27, 2018, pages 351–356, 2018. IEEE Computer Society\n \n\n\n\n
\n\n\n\n \n \n \"READ-BAD:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{gruning_read-bad:_2018,\n\ttitle = {{READ}-{BAD}: {A} {New} {Dataset} and {Evaluation} {Scheme} for {Baseline} {Detection} in {Archival} {Documents}},\n\tisbn = {978-1-5386-3346-5},\n\tshorttitle = {{READ}-{BAD}},\n\turl = {http://doi.ieeecomputersociety.org/10.1109/DAS.2018.38},\n\tdoi = {10.1109/DAS.2018.38},\n\turldate = {2018-06-29},\n\tbooktitle = {13th {IAPR} {International} {Workshop} on {Document} {Analysis} {Systems}, {DAS} 2018, {Vienna}, {Austria}, {April} 24-27, 2018},\n\tpublisher = {IEEE Computer Society},\n\tauthor = {Gruning, Tobias and Labahn, Roger and Diem, Markus and Kleber, Florian and Fiel, Stefan},\n\tyear = {2018},\n\tpages = {351--356},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Lang, E.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n ICDAR 2019 Competition on Table Detection and Recognition (cTDaR).\n \n \n \n \n\n\n \n Déjean, H.; Meunier, J.; Gao, L.; Huang, Y.; Fang, Y.; Kleber, F.; and Lang, E.\n\n\n \n\n\n\n April 2019.\n \n\n\n\n
\n\n\n\n \n \n \"ICDARPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{dejean_icdar_2019,\n\ttitle = {{ICDAR} 2019 {Competition} on {Table} {Detection} and {Recognition} ({cTDaR})},\n\turl = {https://zenodo.org/record/3239032#.X1IyZdbgqrI},\n\tdoi = {10.5281/zenodo.3239032},\n\tabstract = {The aim of this competition is to evaluate the performance of state of the art methods for table detection (TRACK A) and table recognition (TRACK B). For the first track, document images containing one or several tables are provided. For TRACK B two subtracks exist: the first subtrack (B.1) provides the table region. Thus, only the table structure recognition must be performed. The second subtrack (B.2) provides no a-priori information. This means, the table region and table structure detection has to be done. The Ground Truth is provided in a similar format as for the ICDAR 2013 competition (see [2]): {\\textless}?xml version="1.0" encoding="UTF-8"?{\\textgreater} {\\textless}document filename='filename.jpg'{\\textgreater}     {\\textless}table id='Table\\_1540517170416\\_3'{\\textgreater}          {\\textless}Coords points="180,160 4354,160 4354,3287 180,3287"/{\\textgreater}        {\\textless}cell id='TableCell\\_1540517477147\\_58' start-row='0' start-col='0' end-row='1' end-col='2'{\\textgreater}            {\\textless}Coords points="180,160 177,456 614,456 615,163"/{\\textgreater}        {\\textless}/cell{\\textgreater}         ...     {\\textless}/table{\\textgreater}     ... {\\textless}/document{\\textgreater}   The difference to Gobel et al. [2] is the Coords tag which defines a table/cell as a polygon specified by a list of coordinates. For B.1 the table and its coordinates is given together with the input image. Important Note: For the modern dataset, the convex hull of the content describes a cell region. For the historical dataset, it is requested that the output region of a cell is the cell boundary. This is necessary due to the characteristics of handwritten text, which is often overlapping with different cells. See also: http://sac.founderit.com/tasks.html The evaluation tool is available at github: https://github.com/cndplab-founder/ctdar\\_measurement\\_tool},\n\turldate = {2020-09-04},\n\tpublisher = {Zenodo},\n\tauthor = {Déjean, Hervé and Meunier, Jean-Luc and Gao, Liangcai and Huang, Yilun and Fang, Yu and Kleber, Florian and Lang, Eva-Maria},\n\tmonth = apr,\n\tyear = {2019},\n}\n\n
\n
\n\n\n
\n The aim of this competition is to evaluate the performance of state of the art methods for table detection (TRACK A) and table recognition (TRACK B). For the first track, document images containing one or several tables are provided. For TRACK B two subtracks exist: the first subtrack (B.1) provides the table region. Thus, only the table structure recognition must be performed. The second subtrack (B.2) provides no a-priori information. This means, the table region and table structure detection has to be done. The Ground Truth is provided in a similar format as for the ICDAR 2013 competition (see [2]): \\textless?xml version=\"1.0\" encoding=\"UTF-8\"?\\textgreater \\textlessdocument filename='filename.jpg'\\textgreater     \\textlesstable id='Table_1540517170416_3'\\textgreater          \\textlessCoords points=\"180,160 4354,160 4354,3287 180,3287\"/\\textgreater        \\textlesscell id='TableCell_1540517477147_58' start-row='0' start-col='0' end-row='1' end-col='2'\\textgreater            \\textlessCoords points=\"180,160 177,456 614,456 615,163\"/\\textgreater        \\textless/cell\\textgreater         ...     \\textless/table\\textgreater     ... \\textless/document\\textgreater   The difference to Gobel et al. [2] is the Coords tag which defines a table/cell as a polygon specified by a list of coordinates. For B.1 the table and its coordinates is given together with the input image. Important Note: For the modern dataset, the convex hull of the content describes a cell region. For the historical dataset, it is requested that the output region of a cell is the cell boundary. This is necessary due to the characteristics of handwritten text, which is often overlapping with different cells. See also: http://sac.founderit.com/tasks.html The evaluation tool is available at github: https://github.com/cndplab-founder/ctdar_measurement_tool\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Leifert, G.\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n ICFHR2018 Competition on Automated Text Recognition on a READ Dataset.\n \n \n \n\n\n \n Strauß, T.; Leifert, G.; Labahn, R.; Hodel, T.; and Mühlberger, G.\n\n\n \n\n\n\n In 2018 16th International Conference on Frontiers in Handwriting Recognition (ICFHR), pages 477–482, August 2018. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{straus_icfhr2018_2018,\n\ttitle = {{ICFHR2018} {Competition} on {Automated} {Text} {Recognition} on a {READ} {Dataset}},\n\tdoi = {10.1109/ICFHR-2018.2018.00089},\n\tabstract = {We summarize the results of a competition on Automated Text Recognition targeting the effective adaptation of recognition engines to essentially new data. The task consists in achieving a minimum character error rate on a previously unknown text corpus from which only a few pages are available for adjusting an already pre-trained recognition engine. This issue addresses a frequent application scenario where only a small amount of task-specific training data is available, because producing this data usually requires much effort. We present the results of five submission. They show that the task is a challenging issue but for certain documents 16 pages of transcription are sufficient to adapt a pre-trained recognition system.},\n\tbooktitle = {2018 16th {International} {Conference} on {Frontiers} in {Handwriting} {Recognition} ({ICFHR})},\n\tauthor = {Strauß, Tobias and Leifert, Gundram and Labahn, Roger and Hodel, Tobias and Mühlberger, Günter},\n\tmonth = aug,\n\tyear = {2018},\n\tkeywords = {Computational modeling, Data models, Optical imaging, Task analysis, Text recognition, Training, Training data, automated text recognition, fast adaptation, few shot learning, historical documents},\n\tpages = {477--482},\n}\n\n
\n
\n\n\n
\n We summarize the results of a competition on Automated Text Recognition targeting the effective adaptation of recognition engines to essentially new data. The task consists in achieving a minimum character error rate on a previously unknown text corpus from which only a few pages are available for adjusting an already pre-trained recognition engine. This issue addresses a frequent application scenario where only a small amount of task-specific training data is available, because producing this data usually requires much effort. We present the results of five submission. They show that the task is a challenging issue but for certain documents 16 pages of transcription are sufficient to adapt a pre-trained recognition system.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n System Description of CITlab's Recognition & Retrieval Engine for ICDAR2017 Competition on Information Extraction in Historical Handwritten Records.\n \n \n \n \n\n\n \n Strauss, T.; Weidemann, M.; Michael, J.; Leifert, G.; Grüning, T.; and Labahn, R.\n\n\n \n\n\n\n CoRR, abs/1804.09943. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"SystemPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{strauss_system_2018,\n\ttitle = {System {Description} of {CITlab}'s {Recognition} \\& {Retrieval} {Engine} for {ICDAR2017} {Competition} on {Information} {Extraction} in {Historical} {Handwritten} {Records}},\n\tvolume = {abs/1804.09943},\n\turl = {http://arxiv.org/abs/1804.09943},\n\turldate = {2018-06-29},\n\tjournal = {CoRR},\n\tauthor = {Strauss, Tobias and Weidemann, Max and Michael, Johannes and Leifert, Gundram and Grüning, Tobias and Labahn, Roger},\n\tyear = {2018},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Meunier, J.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n ICDAR 2019 Competition on Table Detection and Recognition (cTDaR).\n \n \n \n \n\n\n \n Déjean, H.; Meunier, J.; Gao, L.; Huang, Y.; Fang, Y.; Kleber, F.; and Lang, E.\n\n\n \n\n\n\n April 2019.\n \n\n\n\n
\n\n\n\n \n \n \"ICDARPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{dejean_icdar_2019,\n\ttitle = {{ICDAR} 2019 {Competition} on {Table} {Detection} and {Recognition} ({cTDaR})},\n\turl = {https://zenodo.org/record/3239032#.X1IyZdbgqrI},\n\tdoi = {10.5281/zenodo.3239032},\n\tabstract = {The aim of this competition is to evaluate the performance of state of the art methods for table detection (TRACK A) and table recognition (TRACK B). For the first track, document images containing one or several tables are provided. For TRACK B two subtracks exist: the first subtrack (B.1) provides the table region. Thus, only the table structure recognition must be performed. The second subtrack (B.2) provides no a-priori information. This means, the table region and table structure detection has to be done. The Ground Truth is provided in a similar format as for the ICDAR 2013 competition (see [2]): {\\textless}?xml version="1.0" encoding="UTF-8"?{\\textgreater} {\\textless}document filename='filename.jpg'{\\textgreater}     {\\textless}table id='Table\\_1540517170416\\_3'{\\textgreater}          {\\textless}Coords points="180,160 4354,160 4354,3287 180,3287"/{\\textgreater}        {\\textless}cell id='TableCell\\_1540517477147\\_58' start-row='0' start-col='0' end-row='1' end-col='2'{\\textgreater}            {\\textless}Coords points="180,160 177,456 614,456 615,163"/{\\textgreater}        {\\textless}/cell{\\textgreater}         ...     {\\textless}/table{\\textgreater}     ... {\\textless}/document{\\textgreater}   The difference to Gobel et al. [2] is the Coords tag which defines a table/cell as a polygon specified by a list of coordinates. For B.1 the table and its coordinates is given together with the input image. Important Note: For the modern dataset, the convex hull of the content describes a cell region. For the historical dataset, it is requested that the output region of a cell is the cell boundary. This is necessary due to the characteristics of handwritten text, which is often overlapping with different cells. See also: http://sac.founderit.com/tasks.html The evaluation tool is available at github: https://github.com/cndplab-founder/ctdar\\_measurement\\_tool},\n\turldate = {2020-09-04},\n\tpublisher = {Zenodo},\n\tauthor = {Déjean, Hervé and Meunier, Jean-Luc and Gao, Liangcai and Huang, Yilun and Fang, Yu and Kleber, Florian and Lang, Eva-Maria},\n\tmonth = apr,\n\tyear = {2019},\n}\n\n
\n
\n\n\n
\n The aim of this competition is to evaluate the performance of state of the art methods for table detection (TRACK A) and table recognition (TRACK B). For the first track, document images containing one or several tables are provided. For TRACK B two subtracks exist: the first subtrack (B.1) provides the table region. Thus, only the table structure recognition must be performed. The second subtrack (B.2) provides no a-priori information. This means, the table region and table structure detection has to be done. The Ground Truth is provided in a similar format as for the ICDAR 2013 competition (see [2]): \\textless?xml version=\"1.0\" encoding=\"UTF-8\"?\\textgreater \\textlessdocument filename='filename.jpg'\\textgreater     \\textlesstable id='Table_1540517170416_3'\\textgreater          \\textlessCoords points=\"180,160 4354,160 4354,3287 180,3287\"/\\textgreater        \\textlesscell id='TableCell_1540517477147_58' start-row='0' start-col='0' end-row='1' end-col='2'\\textgreater            \\textlessCoords points=\"180,160 177,456 614,456 615,163\"/\\textgreater        \\textless/cell\\textgreater         ...     \\textless/table\\textgreater     ... \\textless/document\\textgreater   The difference to Gobel et al. [2] is the Coords tag which defines a table/cell as a polygon specified by a list of coordinates. For B.1 the table and its coordinates is given together with the input image. Important Note: For the modern dataset, the convex hull of the content describes a cell region. For the historical dataset, it is requested that the output region of a cell is the cell boundary. This is necessary due to the characteristics of handwritten text, which is often overlapping with different cells. See also: http://sac.founderit.com/tasks.html The evaluation tool is available at github: https://github.com/cndplab-founder/ctdar_measurement_tool\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Michael, J.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n System Description of CITlab's Recognition & Retrieval Engine for ICDAR2017 Competition on Information Extraction in Historical Handwritten Records.\n \n \n \n \n\n\n \n Strauss, T.; Weidemann, M.; Michael, J.; Leifert, G.; Grüning, T.; and Labahn, R.\n\n\n \n\n\n\n CoRR, abs/1804.09943. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"SystemPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{strauss_system_2018,\n\ttitle = {System {Description} of {CITlab}'s {Recognition} \\& {Retrieval} {Engine} for {ICDAR2017} {Competition} on {Information} {Extraction} in {Historical} {Handwritten} {Records}},\n\tvolume = {abs/1804.09943},\n\turl = {http://arxiv.org/abs/1804.09943},\n\turldate = {2018-06-29},\n\tjournal = {CoRR},\n\tauthor = {Strauss, Tobias and Weidemann, Max and Michael, Johannes and Leifert, Gundram and Grüning, Tobias and Labahn, Roger},\n\tyear = {2018},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Mühlberger, G.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n ICFHR2018 Competition on Automated Text Recognition on a READ Dataset.\n \n \n \n\n\n \n Strauß, T.; Leifert, G.; Labahn, R.; Hodel, T.; and Mühlberger, G.\n\n\n \n\n\n\n In 2018 16th International Conference on Frontiers in Handwriting Recognition (ICFHR), pages 477–482, August 2018. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{straus_icfhr2018_2018,\n\ttitle = {{ICFHR2018} {Competition} on {Automated} {Text} {Recognition} on a {READ} {Dataset}},\n\tdoi = {10.1109/ICFHR-2018.2018.00089},\n\tabstract = {We summarize the results of a competition on Automated Text Recognition targeting the effective adaptation of recognition engines to essentially new data. The task consists in achieving a minimum character error rate on a previously unknown text corpus from which only a few pages are available for adjusting an already pre-trained recognition engine. This issue addresses a frequent application scenario where only a small amount of task-specific training data is available, because producing this data usually requires much effort. We present the results of five submission. They show that the task is a challenging issue but for certain documents 16 pages of transcription are sufficient to adapt a pre-trained recognition system.},\n\tbooktitle = {2018 16th {International} {Conference} on {Frontiers} in {Handwriting} {Recognition} ({ICFHR})},\n\tauthor = {Strauß, Tobias and Leifert, Gundram and Labahn, Roger and Hodel, Tobias and Mühlberger, Günter},\n\tmonth = aug,\n\tyear = {2018},\n\tkeywords = {Computational modeling, Data models, Optical imaging, Task analysis, Text recognition, Training, Training data, automated text recognition, fast adaptation, few shot learning, historical documents},\n\tpages = {477--482},\n}\n\n
\n
\n\n\n
\n We summarize the results of a competition on Automated Text Recognition targeting the effective adaptation of recognition engines to essentially new data. The task consists in achieving a minimum character error rate on a previously unknown text corpus from which only a few pages are available for adjusting an already pre-trained recognition engine. This issue addresses a frequent application scenario where only a small amount of task-specific training data is available, because producing this data usually requires much effort. We present the results of five submission. They show that the task is a challenging issue but for certain documents 16 pages of transcription are sufficient to adapt a pre-trained recognition system.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Neudecker, C.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A survey of OCR evaluation tools and metrics.\n \n \n \n \n\n\n \n Neudecker, C.; Baierer, K.; Gerber, M.; Clausner, C.; Antonacopoulos, A.; and Pletschacher, S.\n\n\n \n\n\n\n In The 6th International Workshop on Historical Document Imaging and Processing, pages 13–18, Lausanne Switzerland, September 2021. ACM\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{neudecker_survey_2021,\n\taddress = {Lausanne Switzerland},\n\ttitle = {A survey of {OCR} evaluation tools and metrics},\n\tisbn = {978-1-4503-8690-6},\n\turl = {https://dl.acm.org/doi/10.1145/3476887.3476888},\n\tdoi = {10.1145/3476887.3476888},\n\tabstract = {The millions of pages of historical documents that are digitized in libraries are increasingly used in contexts that have more specific requirements for OCR quality than keyword search. How to comprehensively, efficiently and reliably assess the quality of OCR results against the background of mass digitization, when ground truth can only ever be produced for very small numbers? Due to gaps in specifications, results from OCR evaluation tools can return different results, and due to differences in implementation, even commonly used error rates are often not directly comparable. OCR evaluation metrics and sampling methods are also not sufficient where they do not take into account the accuracy of layout analysis, since for advanced use cases like Natural Language Processing or the Digital Humanities, accurate layout analysis and detection of the reading order are crucial. We provide an overview of OCR evaluation metrics and tools, describe two advanced use cases for OCR results, and perform an OCR evaluation experiment with multiple evaluation tools and different metrics for two distinct datasets. We analyze the differences and commonalities in light of the presented use cases and suggest areas for future work.},\n\tlanguage = {en},\n\turldate = {2023-06-21},\n\tbooktitle = {The 6th {International} {Workshop} on {Historical} {Document} {Imaging} and {Processing}},\n\tpublisher = {ACM},\n\tauthor = {Neudecker, Clemens and Baierer, Konstantin and Gerber, Mike and Clausner, Christian and Antonacopoulos, Apostolos and Pletschacher, Stefan},\n\tmonth = sep,\n\tyear = {2021},\n\tpages = {13--18},\n}\n\n
\n
\n\n\n
\n The millions of pages of historical documents that are digitized in libraries are increasingly used in contexts that have more specific requirements for OCR quality than keyword search. How to comprehensively, efficiently and reliably assess the quality of OCR results against the background of mass digitization, when ground truth can only ever be produced for very small numbers? Due to gaps in specifications, results from OCR evaluation tools can return different results, and due to differences in implementation, even commonly used error rates are often not directly comparable. OCR evaluation metrics and sampling methods are also not sufficient where they do not take into account the accuracy of layout analysis, since for advanced use cases like Natural Language Processing or the Digital Humanities, accurate layout analysis and detection of the reading order are crucial. We provide an overview of OCR evaluation metrics and tools, describe two advanced use cases for OCR results, and perform an OCR evaluation experiment with multiple evaluation tools and different metrics for two distinct datasets. We analyze the differences and commonalities in light of the presented use cases and suggest areas for future work.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Pletschacher, S.\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A survey of OCR evaluation tools and metrics.\n \n \n \n \n\n\n \n Neudecker, C.; Baierer, K.; Gerber, M.; Clausner, C.; Antonacopoulos, A.; and Pletschacher, S.\n\n\n \n\n\n\n In The 6th International Workshop on Historical Document Imaging and Processing, pages 13–18, Lausanne Switzerland, September 2021. ACM\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{neudecker_survey_2021,\n\taddress = {Lausanne Switzerland},\n\ttitle = {A survey of {OCR} evaluation tools and metrics},\n\tisbn = {978-1-4503-8690-6},\n\turl = {https://dl.acm.org/doi/10.1145/3476887.3476888},\n\tdoi = {10.1145/3476887.3476888},\n\tabstract = {The millions of pages of historical documents that are digitized in libraries are increasingly used in contexts that have more specific requirements for OCR quality than keyword search. How to comprehensively, efficiently and reliably assess the quality of OCR results against the background of mass digitization, when ground truth can only ever be produced for very small numbers? Due to gaps in specifications, results from OCR evaluation tools can return different results, and due to differences in implementation, even commonly used error rates are often not directly comparable. OCR evaluation metrics and sampling methods are also not sufficient where they do not take into account the accuracy of layout analysis, since for advanced use cases like Natural Language Processing or the Digital Humanities, accurate layout analysis and detection of the reading order are crucial. We provide an overview of OCR evaluation metrics and tools, describe two advanced use cases for OCR results, and perform an OCR evaluation experiment with multiple evaluation tools and different metrics for two distinct datasets. We analyze the differences and commonalities in light of the presented use cases and suggest areas for future work.},\n\tlanguage = {en},\n\turldate = {2023-06-21},\n\tbooktitle = {The 6th {International} {Workshop} on {Historical} {Document} {Imaging} and {Processing}},\n\tpublisher = {ACM},\n\tauthor = {Neudecker, Clemens and Baierer, Konstantin and Gerber, Mike and Clausner, Christian and Antonacopoulos, Apostolos and Pletschacher, Stefan},\n\tmonth = sep,\n\tyear = {2021},\n\tpages = {13--18},\n}\n\n
\n
\n\n\n
\n The millions of pages of historical documents that are digitized in libraries are increasingly used in contexts that have more specific requirements for OCR quality than keyword search. How to comprehensively, efficiently and reliably assess the quality of OCR results against the background of mass digitization, when ground truth can only ever be produced for very small numbers? Due to gaps in specifications, results from OCR evaluation tools can return different results, and due to differences in implementation, even commonly used error rates are often not directly comparable. OCR evaluation metrics and sampling methods are also not sufficient where they do not take into account the accuracy of layout analysis, since for advanced use cases like Natural Language Processing or the Digital Humanities, accurate layout analysis and detection of the reading order are crucial. We provide an overview of OCR evaluation metrics and tools, describe two advanced use cases for OCR results, and perform an OCR evaluation experiment with multiple evaluation tools and different metrics for two distinct datasets. We analyze the differences and commonalities in light of the presented use cases and suggest areas for future work.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The PAGE (Page Analysis and Ground-Truth Elements) Format Framework.\n \n \n \n \n\n\n \n Pletschacher, S.; and Antonacopoulos, A.\n\n\n \n\n\n\n In pages 257–260, August 2010. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{pletschacher_page_2010,\n\ttitle = {The {PAGE} ({Page} {Analysis} and {Ground}-{Truth} {Elements}) {Format} {Framework}},\n\tisbn = {978-1-4244-7542-1},\n\turl = {http://ieeexplore.ieee.org/document/5597587/},\n\tdoi = {10.1109/ICPR.2010.72},\n\turldate = {2018-06-22},\n\tpublisher = {IEEE},\n\tauthor = {Pletschacher, Stefan and Antonacopoulos, Apostolos},\n\tmonth = aug,\n\tyear = {2010},\n\tpages = {257--260},\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Smith, B.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Digital performance : a history of new media in theater, dance, performance art, and installation.\n \n \n \n\n\n \n Dixon, S.; and Smith, B.\n\n\n \n\n\n\n of LeonardoThe MIT Press, Cambridge, Massachusetts, London, England, [Paperback edition] edition, 2015.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{dixon_digital_2015,\n\taddress = {Cambridge, Massachusetts, London, England},\n\tedition = {[Paperback edition]},\n\tseries = {Leonardo},\n\ttitle = {Digital performance : a history of new media in theater, dance, performance art, and installation},\n\tisbn = {978-0-262-52752-1},\n\tshorttitle = {Digital performance},\n\tpublisher = {The MIT Press},\n\tauthor = {Dixon, Steve and Smith, Barry},\n\tyear = {2015},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Stamatopoulos, Ni\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Goal-oriented performance evaluation methodology for page segmentation techniques.\n \n \n \n\n\n \n Stamatopoulos, Nikolaos; and Gatos, Basilis\n\n\n \n\n\n\n In Proceedings of the 13th international conference on document analysis and recognition (ICDAR), pages 281–285. 2015.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{stamatopoulos_nikolaos_goal-oriented_2015,\n\ttitle = {Goal-oriented performance evaluation methodology for page segmentation techniques},\n\tbooktitle = {Proceedings of the 13th international conference on document analysis and recognition ({ICDAR})},\n\tauthor = {{Stamatopoulos, Nikolaos} and {Gatos, Basilis}},\n\tyear = {2015},\n\tpages = {281--285},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Strauss, T.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n System Description of CITlab's Recognition & Retrieval Engine for ICDAR2017 Competition on Information Extraction in Historical Handwritten Records.\n \n \n \n \n\n\n \n Strauss, T.; Weidemann, M.; Michael, J.; Leifert, G.; Grüning, T.; and Labahn, R.\n\n\n \n\n\n\n CoRR, abs/1804.09943. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"SystemPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{strauss_system_2018,\n\ttitle = {System {Description} of {CITlab}'s {Recognition} \\& {Retrieval} {Engine} for {ICDAR2017} {Competition} on {Information} {Extraction} in {Historical} {Handwritten} {Records}},\n\tvolume = {abs/1804.09943},\n\turl = {http://arxiv.org/abs/1804.09943},\n\turldate = {2018-06-29},\n\tjournal = {CoRR},\n\tauthor = {Strauss, Tobias and Weidemann, Max and Michael, Johannes and Leifert, Gundram and Grüning, Tobias and Labahn, Roger},\n\tyear = {2018},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Strauß, T.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n ICFHR2018 Competition on Automated Text Recognition on a READ Dataset.\n \n \n \n\n\n \n Strauß, T.; Leifert, G.; Labahn, R.; Hodel, T.; and Mühlberger, G.\n\n\n \n\n\n\n In 2018 16th International Conference on Frontiers in Handwriting Recognition (ICFHR), pages 477–482, August 2018. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{straus_icfhr2018_2018,\n\ttitle = {{ICFHR2018} {Competition} on {Automated} {Text} {Recognition} on a {READ} {Dataset}},\n\tdoi = {10.1109/ICFHR-2018.2018.00089},\n\tabstract = {We summarize the results of a competition on Automated Text Recognition targeting the effective adaptation of recognition engines to essentially new data. The task consists in achieving a minimum character error rate on a previously unknown text corpus from which only a few pages are available for adjusting an already pre-trained recognition engine. This issue addresses a frequent application scenario where only a small amount of task-specific training data is available, because producing this data usually requires much effort. We present the results of five submission. They show that the task is a challenging issue but for certain documents 16 pages of transcription are sufficient to adapt a pre-trained recognition system.},\n\tbooktitle = {2018 16th {International} {Conference} on {Frontiers} in {Handwriting} {Recognition} ({ICFHR})},\n\tauthor = {Strauß, Tobias and Leifert, Gundram and Labahn, Roger and Hodel, Tobias and Mühlberger, Günter},\n\tmonth = aug,\n\tyear = {2018},\n\tkeywords = {Computational modeling, Data models, Optical imaging, Task analysis, Text recognition, Training, Training data, automated text recognition, fast adaptation, few shot learning, historical documents},\n\tpages = {477--482},\n}\n\n
\n
\n\n\n
\n We summarize the results of a competition on Automated Text Recognition targeting the effective adaptation of recognition engines to essentially new data. The task consists in achieving a minimum character error rate on a previously unknown text corpus from which only a few pages are available for adjusting an already pre-trained recognition engine. This issue addresses a frequent application scenario where only a small amount of task-specific training data is available, because producing this data usually requires much effort. We present the results of five submission. They show that the task is a challenging issue but for certain documents 16 pages of transcription are sufficient to adapt a pre-trained recognition system.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Tomarchio, L.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Mapping Human Landscapes in Muscat, Oman, with Social Media Data.\n \n \n \n \n\n\n \n Tomarchio, L.\n\n\n \n\n\n\n ,38 p.. 2019.\n Artwork Size: 38 p. Medium: application/pdf Publisher: ETH Zurich\n\n\n\n
\n\n\n\n \n \n \"MappingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{tomarchio_mapping_2019,\n\ttitle = {Mapping {Human} {Landscapes} in {Muscat}, {Oman}, with {Social} {Media} {Data}},\n\tcopyright = {Creative Commons Attribution Share Alike 4.0 International, info:eu-repo/semantics/openAccess},\n\turl = {http://hdl.handle.net/20.500.11850/339868},\n\tdoi = {10.3929/ETHZ-B-000339868},\n\tabstract = {The paper presents a mapping process to define activity patterns and reveal the localisation of different city usersin Muscat, Oman, using social media data. The paper has two aims: to present a methodology to map activity patterns in the city in the Omani context, using social media data; to interpret the data and extract valuable narratives for the case study of Muscat. As various social media have penetrated into the daily life of people, these become one important and effective data source to understand how people use the spaces of the city. There is a series of questions related to big data and urban space that emerge such as: can social media data be “mined” in Muscat, Oman, to create design-relevant spatial information? What information about the use of urban space in the context of an Arab city can be extracted from social media data? The case study deals with Muscat, the capital of Oman, a city with peculiar socio-demographic, cultural aspects, influencing the use of the space, particularly when relating to open and public spaces. The proposed study uses data extracted from Twitter and Instagram to perform an analysis of the city of Muscat: The analysis looks at three scales and presents four thematic layers: one layer of generally finding hotspots of activities; two layers of investigating different patterns of activities during the day-night, weekdays-weekends and one layer of looking into the languages spoken in different areas of the city. This results in the mapping of how different sociallinguistic groups possibly move and interact in Muscat. The first part of the paper will present the methodology, from data collection to visualisation. The second part will look in detail at some selected areas and exemplify the narrative so that planners and designers can extract data from this approach and methodology.},\n\tlanguage = {en},\n\turldate = {2021-03-03},\n\tauthor = {Tomarchio, Ludovica},\n\tyear = {2019},\n\tnote = {Artwork Size: 38 p.\nMedium: application/pdf\nPublisher: ETH Zurich},\n\tkeywords = {Digital Humanities, HUMAN GEOGRAPHY, Oman (South West Asia). Sultanat of Oman, social media},\n\tpages = {38 p.},\n}\n\n
\n
\n\n\n
\n The paper presents a mapping process to define activity patterns and reveal the localisation of different city usersin Muscat, Oman, using social media data. The paper has two aims: to present a methodology to map activity patterns in the city in the Omani context, using social media data; to interpret the data and extract valuable narratives for the case study of Muscat. As various social media have penetrated into the daily life of people, these become one important and effective data source to understand how people use the spaces of the city. There is a series of questions related to big data and urban space that emerge such as: can social media data be “mined” in Muscat, Oman, to create design-relevant spatial information? What information about the use of urban space in the context of an Arab city can be extracted from social media data? The case study deals with Muscat, the capital of Oman, a city with peculiar socio-demographic, cultural aspects, influencing the use of the space, particularly when relating to open and public spaces. The proposed study uses data extracted from Twitter and Instagram to perform an analysis of the city of Muscat: The analysis looks at three scales and presents four thematic layers: one layer of generally finding hotspots of activities; two layers of investigating different patterns of activities during the day-night, weekdays-weekends and one layer of looking into the languages spoken in different areas of the city. This results in the mapping of how different sociallinguistic groups possibly move and interact in Muscat. The first part of the paper will present the methodology, from data collection to visualisation. The second part will look in detail at some selected areas and exemplify the narrative so that planners and designers can extract data from this approach and methodology.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Tompkins, J.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Editorial Comment: Theatre, the Digital, and the Analysis and Documentation of Performance.\n \n \n \n \n\n\n \n Tompkins, J.\n\n\n \n\n\n\n Theatre Journal, 68(4): xi–xiv. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"EditorialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{tompkins_editorial_2016,\n\ttitle = {Editorial {Comment}: {Theatre}, the {Digital}, and the {Analysis} and {Documentation} of {Performance}},\n\tvolume = {68},\n\tissn = {1086-332X},\n\tshorttitle = {Editorial {Comment}},\n\turl = {https://muse.jhu.edu/article/645393},\n\tdoi = {10.1353/tj.2016.0103},\n\tabstract = {Throughout its history, theatre has capitalized on advances in technology, from shifts in lighting practices, to the development of machinery for creating special effects, to the advent of multimedia in contemporary performance, and beyond. The authors demystify digital humanities methodologies for theatre and performance research/researchers by demonstrating that theatre history has always employed quantitative, bigger picture approaches in addition to close readings of performances, and that digital technologies facilitate the analysis of larger datasets more effectively.},\n\tlanguage = {en},\n\tnumber = {4},\n\turldate = {2020-02-24},\n\tjournal = {Theatre Journal},\n\tauthor = {Tompkins, Joanne},\n\tyear = {2016},\n\tpages = {xi--xiv},\n}\n\n
\n
\n\n\n
\n Throughout its history, theatre has capitalized on advances in technology, from shifts in lighting practices, to the development of machinery for creating special effects, to the advent of multimedia in contemporary performance, and beyond. The authors demystify digital humanities methodologies for theatre and performance research/researchers by demonstrating that theatre history has always employed quantitative, bigger picture approaches in addition to close readings of performances, and that digital technologies facilitate the analysis of larger datasets more effectively.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n Weidemann, M.\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n System Description of CITlab's Recognition & Retrieval Engine for ICDAR2017 Competition on Information Extraction in Historical Handwritten Records.\n \n \n \n \n\n\n \n Strauss, T.; Weidemann, M.; Michael, J.; Leifert, G.; Grüning, T.; and Labahn, R.\n\n\n \n\n\n\n CoRR, abs/1804.09943. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"SystemPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{strauss_system_2018,\n\ttitle = {System {Description} of {CITlab}'s {Recognition} \\& {Retrieval} {Engine} for {ICDAR2017} {Competition} on {Information} {Extraction} in {Historical} {Handwritten} {Records}},\n\tvolume = {abs/1804.09943},\n\turl = {http://arxiv.org/abs/1804.09943},\n\turldate = {2018-06-29},\n\tjournal = {CoRR},\n\tauthor = {Strauss, Tobias and Weidemann, Max and Michael, Johannes and Leifert, Gundram and Grüning, Tobias and Labahn, Roger},\n\tyear = {2018},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n undefined\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n The Shape of Data in the Digital Humanities: Modeling Texts and Text-based Resources.\n \n \n \n \n\n\n \n Flanders, J.; and Jannidis, F.,\n editors.\n \n\n\n \n\n\n\n Routledge, Abingdon, Oxon ; New York, NY : Routledge, 2019. \\textbar Series: Digital research in the arts and humanities, 1 edition, November 2018.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{flanders_shape_2018,\n\taddress = {Abingdon, Oxon ; New York, NY : Routledge, 2019. {\\textbar} Series: Digital research in the arts and humanities},\n\tedition = {1},\n\ttitle = {The {Shape} of {Data} in the {Digital} {Humanities}: {Modeling} {Texts} and {Text}-based {Resources}},\n\tisbn = {978-1-315-55294-1},\n\tshorttitle = {The {Shape} of {Data} in the {Digital} {Humanities}},\n\turl = {https://www.taylorfrancis.com/books/9781317016151},\n\tlanguage = {en},\n\turldate = {2020-01-14},\n\tpublisher = {Routledge},\n\teditor = {Flanders, Julia and Jannidis, Fotis},\n\tmonth = nov,\n\tyear = {2018},\n\tdoi = {10.4324/9781315552941},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n STTS Tag Table. Institut für Maschinelle Sprachverarbeitung. Universität Stuttgart.\n \n \n \n \n\n\n \n \n\n\n \n\n\n\n 1999.\n \n\n\n\n
\n\n\n\n \n \n \"STTSPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{noauthor_stts_1999,\n\ttitle = {{STTS} {Tag} {Table}. {Institut} für {Maschinelle} {Sprachverarbeitung}. {Universität} {Stuttgart}},\n\tshorttitle = {{STTS}},\n\turl = {http://www.ims.uni-stuttgart.de/forschung/ressourcen/lexika/TagSets/stts-table.html},\n\turldate = {2014-07-29},\n\tjournal = {STTS Tag Table (1995/1999)},\n\tyear = {1999},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);