Analysing Neural Language Models: Contextual Decomposition Reveals Default Reasoning in Number and Gender Assignment. Jumelet, J., Zuidema, W., & Hupkes, D. In Proceedings of the Conference on Computational Natural Language Learning (CoNLL), 2019. Paper abstract bibtex Extensive research has recently shown that recurrent neural language models are able to process a wide range of grammatical phenomena. How these models are able to perform these remarkable feats so well, however, is still an open question. To gain more insight into what information LSTMs base their decisions on, we propose a generalisation of Contextual Decomposition (GCD). In particular, this setup enables us to accurately distil which part of a prediction stems from semantic heuristics, which part truly emanates from syntactic cues and which part arise from the model biases themselves instead. We investigate this technique on tasks pertaining to syntactic agreement and co-reference resolution and discover that the model strongly relies on a default reasoning effect to perform these tasks.
@inproceedings{Jumelet2019,
abstract = {Extensive research has recently shown that recurrent neural language models are able to process a wide range of grammatical phenomena. How these models are able to perform these remarkable feats so well, however, is still an open question. To gain more insight into what information LSTMs base their decisions on, we propose a generalisation of Contextual Decomposition (GCD). In particular, this setup enables us to accurately distil which part of a prediction stems from semantic heuristics, which part truly emanates from syntactic cues and which part arise from the model biases themselves instead. We investigate this technique on tasks pertaining to syntactic agreement and co-reference resolution and discover that the model strongly relies on a default reasoning effect to perform these tasks.},
archivePrefix = {arXiv},
arxivId = {1909.08975},
author = {Jumelet, Jaap and Zuidema, Willem and Hupkes, Dieuwke},
booktitle = {Proceedings of the Conference on Computational Natural Language Learning (CoNLL)},
eprint = {1909.08975},
file = {:Users/shanest/Documents/Library/Jumelet, Zuidema, Hupkes/Proceedings of the Conference on Computational Natural Language Learning (CoNLL)/Jumelet, Zuidema, Hupkes - 2019 - Analysing Neural Language Models Contextual Decomposition Reveals Default Reasoning in Number and Gend.pdf:pdf},
keywords = {method: contextual decomposition,phenomenon: gender agreement,phenomenon: number agreement},
title = {{Analysing Neural Language Models: Contextual Decomposition Reveals Default Reasoning in Number and Gender Assignment}},
url = {http://arxiv.org/abs/1909.08975},
year = {2019}
}
Downloads: 0
{"_id":"zipqxNoahq5KDccte","bibbaseid":"jumelet-zuidema-hupkes-analysingneurallanguagemodelscontextualdecompositionrevealsdefaultreasoninginnumberandgenderassignment-2019","authorIDs":[],"author_short":["Jumelet, J.","Zuidema, W.","Hupkes, D."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","abstract":"Extensive research has recently shown that recurrent neural language models are able to process a wide range of grammatical phenomena. How these models are able to perform these remarkable feats so well, however, is still an open question. To gain more insight into what information LSTMs base their decisions on, we propose a generalisation of Contextual Decomposition (GCD). In particular, this setup enables us to accurately distil which part of a prediction stems from semantic heuristics, which part truly emanates from syntactic cues and which part arise from the model biases themselves instead. We investigate this technique on tasks pertaining to syntactic agreement and co-reference resolution and discover that the model strongly relies on a default reasoning effect to perform these tasks.","archiveprefix":"arXiv","arxivid":"1909.08975","author":[{"propositions":[],"lastnames":["Jumelet"],"firstnames":["Jaap"],"suffixes":[]},{"propositions":[],"lastnames":["Zuidema"],"firstnames":["Willem"],"suffixes":[]},{"propositions":[],"lastnames":["Hupkes"],"firstnames":["Dieuwke"],"suffixes":[]}],"booktitle":"Proceedings of the Conference on Computational Natural Language Learning (CoNLL)","eprint":"1909.08975","file":":Users/shanest/Documents/Library/Jumelet, Zuidema, Hupkes/Proceedings of the Conference on Computational Natural Language Learning (CoNLL)/Jumelet, Zuidema, Hupkes - 2019 - Analysing Neural Language Models Contextual Decomposition Reveals Default Reasoning in Number and Gend.pdf:pdf","keywords":"method: contextual decomposition,phenomenon: gender agreement,phenomenon: number agreement","title":"Analysing Neural Language Models: Contextual Decomposition Reveals Default Reasoning in Number and Gender Assignment","url":"http://arxiv.org/abs/1909.08975","year":"2019","bibtex":"@inproceedings{Jumelet2019,\nabstract = {Extensive research has recently shown that recurrent neural language models are able to process a wide range of grammatical phenomena. How these models are able to perform these remarkable feats so well, however, is still an open question. To gain more insight into what information LSTMs base their decisions on, we propose a generalisation of Contextual Decomposition (GCD). In particular, this setup enables us to accurately distil which part of a prediction stems from semantic heuristics, which part truly emanates from syntactic cues and which part arise from the model biases themselves instead. We investigate this technique on tasks pertaining to syntactic agreement and co-reference resolution and discover that the model strongly relies on a default reasoning effect to perform these tasks.},\narchivePrefix = {arXiv},\narxivId = {1909.08975},\nauthor = {Jumelet, Jaap and Zuidema, Willem and Hupkes, Dieuwke},\nbooktitle = {Proceedings of the Conference on Computational Natural Language Learning (CoNLL)},\neprint = {1909.08975},\nfile = {:Users/shanest/Documents/Library/Jumelet, Zuidema, Hupkes/Proceedings of the Conference on Computational Natural Language Learning (CoNLL)/Jumelet, Zuidema, Hupkes - 2019 - Analysing Neural Language Models Contextual Decomposition Reveals Default Reasoning in Number and Gend.pdf:pdf},\nkeywords = {method: contextual decomposition,phenomenon: gender agreement,phenomenon: number agreement},\ntitle = {{Analysing Neural Language Models: Contextual Decomposition Reveals Default Reasoning in Number and Gender Assignment}},\nurl = {http://arxiv.org/abs/1909.08975},\nyear = {2019}\n}\n","author_short":["Jumelet, J.","Zuidema, W.","Hupkes, D."],"key":"Jumelet2019","id":"Jumelet2019","bibbaseid":"jumelet-zuidema-hupkes-analysingneurallanguagemodelscontextualdecompositionrevealsdefaultreasoninginnumberandgenderassignment-2019","role":"author","urls":{"Paper":"http://arxiv.org/abs/1909.08975"},"keyword":["method: contextual decomposition","phenomenon: gender agreement","phenomenon: number agreement"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"inproceedings","biburl":"https://www.shane.st/teaching/575/win20/MachineLearning-interpretability.bib","creationDate":"2020-01-05T04:04:02.868Z","downloads":0,"keywords":["method: contextual decomposition","phenomenon: gender agreement","phenomenon: number agreement"],"search_terms":["analysing","neural","language","models","contextual","decomposition","reveals","default","reasoning","number","gender","assignment","jumelet","zuidema","hupkes"],"title":"Analysing Neural Language Models: Contextual Decomposition Reveals Default Reasoning in Number and Gender Assignment","year":2019,"dataSources":["okYcdTpf4JJ2zkj7A","znj7izS5PeehdLR3G"]}