var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=AnneBeyer.github.io/assets/bib/mine.bib&jsonp=1&authorFirst=1&filter=authors:Beyer&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=AnneBeyer.github.io/assets/bib/mine.bib&jsonp=1&authorFirst=1&filter=authors:Beyer\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=AnneBeyer.github.io/assets/bib/mine.bib&jsonp=1&authorFirst=1&filter=authors:Beyer\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2021\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n Beyer, A.; Loáiciga, S.; and Schlangen, D.\n\n\n \n \n \n \n \n Is Incoherence Surprising? Targeted Evaluation of Coherence Prediction from Language Models.\n \n \n \n \n\n\n \n\n\n\n In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Online, 2021. Association for Computational Linguistics\n \n\n\n\n
\n\n\n\n \n \n \"Is paper\n  \n \n \n \"Is video\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{beyer_incoherence_2021,\n    author = {Beyer, Anne and Lo\\'aiciga, Sharid and Schlangen, David},\n    year = {2021},\n    title = {Is {Incoherence} {Surprising}? {Targeted} {Evaluation} of {Coherence} {Prediction} from {Language} {Models}},\n    abstract = {Coherent discourse is distinguished from a mere collection of utterances by the satisfaction of a diverse set of constraints, for example choice of expression, logical relation between denoted events, and implicit compatibility with world-knowledge. Do neural language models encode such constraints? We design an extendable set of test suites addressing different aspects of discourse and dialogue coherence. Unlike most previous coherence evaluation studies, we address specific linguistic devices beyond sentence order perturbations, allowing for a more fine-grained analysis of what constitutes coherence and what neural models trained on a language modelling objective do encode. Extending the targeted evaluation paradigm for neural language models (Marvin and Linzen, 2018) to phenomena beyond syntax, we show that this paradigm is equally suited to evaluate linguistic qualities that contribute to the notion of coherence.},\n    booktitle = {Proceedings of the 2021 {Conference} of the {North} {A}merican {Chapter} of the {Association} for {Computational} {Linguistics}: {Human} {Language} {Technologies}},\n    publisher = {Association for {Computational} {Linguistics}},\n    url_Paper = {https://arxiv.org/pdf/2105.03495.pdf},\n    url_Video = {https://screencast-o-matic.com/watch/crhXrYVfqo7},\n    address = {Online}\n}\n
\n
\n\n\n
\n Coherent discourse is distinguished from a mere collection of utterances by the satisfaction of a diverse set of constraints, for example choice of expression, logical relation between denoted events, and implicit compatibility with world-knowledge. Do neural language models encode such constraints? We design an extendable set of test suites addressing different aspects of discourse and dialogue coherence. Unlike most previous coherence evaluation studies, we address specific linguistic devices beyond sentence order perturbations, allowing for a more fine-grained analysis of what constitutes coherence and what neural models trained on a language modelling objective do encode. Extending the targeted evaluation paradigm for neural language models (Marvin and Linzen, 2018) to phenomena beyond syntax, we show that this paradigm is equally suited to evaluate linguistic qualities that contribute to the notion of coherence.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n Beyer, A.; Kauermann, G.; and Schütze, H.\n\n\n \n \n \n \n \n Embedding Space Correlation as a Measure of Domain Similarity.\n \n \n \n \n\n\n \n\n\n\n In Proceedings of The 12th Language Resources and Evaluation Conference, pages 2431–2439, Marseille, France, 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Embedding paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{beyer_embedding_2020,\n  author    = {Beyer, Anne  and  Kauermann, Göran  and  Schütze, Hinrich},\n  title     = {Embedding Space Correlation as a Measure of Domain Similarity},\n  booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference},\n  year      = {2020},\n  address   = {Marseille, France},\n  pages     = {2431--2439},\n  abstract  = {Prior work has determined domain similarity using text-based features of a corpus. However, when using pre-trained word embeddings, the underlying text corpus might not be accessible anymore. Therefore, we propose the CCA measure, a new measure of domain similarity based directly on the dimension-wise correlations between corresponding embedding spaces. Our results suggest that an inherent notion of domain can be captured this way, as we are able to reproduce our findings for different domain comparisons for English, German, Spanish and Czech as well as in cross-lingual comparisons. We further find a threshold at which the CCA measure indicates that two corpora come from the same domain in a monolingual setting by applying permutation tests. By evaluating the usability of the CCA measure in a domain adaptation application, we also show that it can be used to determine which corpora are more similar to each other in a cross-domain sentiment detection task.},\n  url_Paper = {https://www.aclweb.org/anthology/2020.lrec-1.296.pdf}\n}\n\n
\n
\n\n\n
\n Prior work has determined domain similarity using text-based features of a corpus. However, when using pre-trained word embeddings, the underlying text corpus might not be accessible anymore. Therefore, we propose the CCA measure, a new measure of domain similarity based directly on the dimension-wise correlations between corresponding embedding spaces. Our results suggest that an inherent notion of domain can be captured this way, as we are able to reproduce our findings for different domain comparisons for English, German, Spanish and Czech as well as in cross-lingual comparisons. We further find a threshold at which the CCA measure indicates that two corpora come from the same domain in a monolingual setting by applying permutation tests. By evaluating the usability of the CCA measure in a domain adaptation application, we also show that it can be used to determine which corpora are more similar to each other in a cross-domain sentiment detection task.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n Beyer, A.; Macketanz, V.; Burchardt, A.; and Williams, P.\n\n\n \n \n \n \n \n Can Out-of-the-box NMT Beat a Domain-trained Moses on Technical Data?.\n \n \n \n \n\n\n \n\n\n\n In Proceedings for EAMT 2017 User Studies and Project/Product Descriptions, pages 41–46, Prague, Czech Republic, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Can paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{beyer_can_2017,\n  address = {Prague, Czech Republic},\n  title = {Can {Out}-of-the-box {NMT} {Beat} a {Domain}-trained {Moses} on {Technical} {Data}?},\n  url_Paper = {https://ufal.mff.cuni.cz/eamt2017/user-project-product-papers/papers/user/EAMT2017_paper_32.pdf},\n  abstract = {In the last year, we have seen a lot of evidence about the superiority of neural machine translation approaches (NMT) over phrase-based statistical approaches (PBMT). This trend has shown for the general domain at public competitions such as the WMT challenges as well as in the obvious quality increase in online translation services that have changed their technology. In this paper, we take the perspective of an LSP. The questions we want to answer with this study is if now is already the time to invest in the new technology. To answer this question, we have collected evidence as to whether an existing stateof-the-art NMT system for the general domain can already compete with a domaintrained and optimised Moses (PBMT) system or if it is maybe already better. As it is well known that automatic quality measures are not reliable for comparing the performance of different system types, we have performed a detailed manual evaluation based on a test suite of domain segments.},\n  language = {en},\n  booktitle = {Proceedings for {EAMT} 2017 {User} {Studies} and {Project}/{Product} {Descriptions}},\n  author = {Beyer, Anne and Macketanz, Vivien and Burchardt, Aljoscha and Williams, Philip},\n  year = {2017},\n  pages = {41--46}\n}\n\n
\n
\n\n\n
\n In the last year, we have seen a lot of evidence about the superiority of neural machine translation approaches (NMT) over phrase-based statistical approaches (PBMT). This trend has shown for the general domain at public competitions such as the WMT challenges as well as in the obvious quality increase in online translation services that have changed their technology. In this paper, we take the perspective of an LSP. The questions we want to answer with this study is if now is already the time to invest in the new technology. To answer this question, we have collected evidence as to whether an existing stateof-the-art NMT system for the general domain can already compete with a domaintrained and optimised Moses (PBMT) system or if it is maybe already better. As it is well known that automatic quality measures are not reliable for comparing the performance of different system types, we have performed a detailed manual evaluation based on a test suite of domain segments.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);