Visualisation and `Diagnostic Classifiers' Reveal how Recurrent and Recursive Neural Networks Process Hierarchical Structure. Hupkes, D., Veldhoen, S., & Zuidema, W. Journal of Artificial Intelligence Research, 61:907–926, 2018. Paper doi abstract bibtex We investigate how neural networks can learn and process languages with hierarchical, compositional semantics. To this end, we define the artificial task of processing nested arithmetic expressions, and study whether different types of neural networks can learn to compute their meaning. We find that recursive neural networks can find a generalising solution to this problem, and we visualise this solution by breaking it up in three steps: project, sum and squash. As a next step, we investigate recurrent neural networks, and show that a gated recurrent unit, that processes its input incrementally, also performs very well on this task. To develop an understanding of what the recurrent network encodes, visualisation techniques alone do not suffice. Therefore, we develop an approach where we formulate and test multiple hypotheses on the information encoded and processed by the network. For each hypothesis, we derive predictions about features of the hidden state representations at each time step, and train 'diagnostic classifiers' to test those predictions. Our results indicate that the networks follow a strategy similar to our hypothesised 'cumulative strategy', which explains the high accuracy of the network on novel expressions, the generalisation to longer expressions than seen in training, and the mild deterioration with increasing length. This is turn shows that diagnostic classifiers can be a useful technique for opening up the black box of neural networks. We argue that diagnostic classification, unlike most visualisation techniques, does scale up from small networks in a toy domain, to larger and deeper recurrent networks dealing with real-life data, and may therefore contribute to a better understanding of the internal dynamics of current state-of-the-art models in natural language processing.
@article{Hupkes2018,
abstract = {We investigate how neural networks can learn and process languages with hierarchical, compositional semantics. To this end, we define the artificial task of processing nested arithmetic expressions, and study whether different types of neural networks can learn to compute their meaning. We find that recursive neural networks can find a generalising solution to this problem, and we visualise this solution by breaking it up in three steps: project, sum and squash. As a next step, we investigate recurrent neural networks, and show that a gated recurrent unit, that processes its input incrementally, also performs very well on this task. To develop an understanding of what the recurrent network encodes, visualisation techniques alone do not suffice. Therefore, we develop an approach where we formulate and test multiple hypotheses on the information encoded and processed by the network. For each hypothesis, we derive predictions about features of the hidden state representations at each time step, and train 'diagnostic classifiers' to test those predictions. Our results indicate that the networks follow a strategy similar to our hypothesised 'cumulative strategy', which explains the high accuracy of the network on novel expressions, the generalisation to longer expressions than seen in training, and the mild deterioration with increasing length. This is turn shows that diagnostic classifiers can be a useful technique for opening up the black box of neural networks. We argue that diagnostic classification, unlike most visualisation techniques, does scale up from small networks in a toy domain, to larger and deeper recurrent networks dealing with real-life data, and may therefore contribute to a better understanding of the internal dynamics of current state-of-the-art models in natural language processing.},
archivePrefix = {arXiv},
arxivId = {arXiv:1711.10203v2},
author = {Hupkes, Dieuwke and Veldhoen, Sara and Zuidema, Willem},
doi = {10.1613/jair.1.11196},
eprint = {arXiv:1711.10203v2},
file = {:Users/shanest/Documents/Library/Hupkes, Veldhoen, Zuidema/Journal of Artificial Intelligence Research/Hupkes, Veldhoen, Zuidema - 2018 - Visualisation and `Diagnostic Classifiers' Reveal how Recurrent and Recursive Neural Networks Process.pdf:pdf},
journal = {Journal of Artificial Intelligence Research},
keywords = {method: diagnostic classifier},
pages = {907--926},
title = {{Visualisation and `Diagnostic Classifiers' Reveal how Recurrent and Recursive Neural Networks Process Hierarchical Structure}},
url = {https://doi.org/10.1613/jair.1.11196},
volume = {61},
year = {2018}
}
Downloads: 0
{"_id":"uLZ924EpzfaFvTaBm","bibbaseid":"hupkes-veldhoen-zuidema-visualisationanddiagnosticclassifiersrevealhowrecurrentandrecursiveneuralnetworksprocesshierarchicalstructure-2018","authorIDs":[],"author_short":["Hupkes, D.","Veldhoen, S.","Zuidema, W."],"bibdata":{"bibtype":"article","type":"article","abstract":"We investigate how neural networks can learn and process languages with hierarchical, compositional semantics. To this end, we define the artificial task of processing nested arithmetic expressions, and study whether different types of neural networks can learn to compute their meaning. We find that recursive neural networks can find a generalising solution to this problem, and we visualise this solution by breaking it up in three steps: project, sum and squash. As a next step, we investigate recurrent neural networks, and show that a gated recurrent unit, that processes its input incrementally, also performs very well on this task. To develop an understanding of what the recurrent network encodes, visualisation techniques alone do not suffice. Therefore, we develop an approach where we formulate and test multiple hypotheses on the information encoded and processed by the network. For each hypothesis, we derive predictions about features of the hidden state representations at each time step, and train 'diagnostic classifiers' to test those predictions. Our results indicate that the networks follow a strategy similar to our hypothesised 'cumulative strategy', which explains the high accuracy of the network on novel expressions, the generalisation to longer expressions than seen in training, and the mild deterioration with increasing length. This is turn shows that diagnostic classifiers can be a useful technique for opening up the black box of neural networks. We argue that diagnostic classification, unlike most visualisation techniques, does scale up from small networks in a toy domain, to larger and deeper recurrent networks dealing with real-life data, and may therefore contribute to a better understanding of the internal dynamics of current state-of-the-art models in natural language processing.","archiveprefix":"arXiv","arxivid":"arXiv:1711.10203v2","author":[{"propositions":[],"lastnames":["Hupkes"],"firstnames":["Dieuwke"],"suffixes":[]},{"propositions":[],"lastnames":["Veldhoen"],"firstnames":["Sara"],"suffixes":[]},{"propositions":[],"lastnames":["Zuidema"],"firstnames":["Willem"],"suffixes":[]}],"doi":"10.1613/jair.1.11196","eprint":"arXiv:1711.10203v2","file":":Users/shanest/Documents/Library/Hupkes, Veldhoen, Zuidema/Journal of Artificial Intelligence Research/Hupkes, Veldhoen, Zuidema - 2018 - Visualisation and `Diagnostic Classifiers' Reveal how Recurrent and Recursive Neural Networks Process.pdf:pdf","journal":"Journal of Artificial Intelligence Research","keywords":"method: diagnostic classifier","pages":"907–926","title":"Visualisation and `Diagnostic Classifiers' Reveal how Recurrent and Recursive Neural Networks Process Hierarchical Structure","url":"https://doi.org/10.1613/jair.1.11196","volume":"61","year":"2018","bibtex":"@article{Hupkes2018,\nabstract = {We investigate how neural networks can learn and process languages with hierarchical, compositional semantics. To this end, we define the artificial task of processing nested arithmetic expressions, and study whether different types of neural networks can learn to compute their meaning. We find that recursive neural networks can find a generalising solution to this problem, and we visualise this solution by breaking it up in three steps: project, sum and squash. As a next step, we investigate recurrent neural networks, and show that a gated recurrent unit, that processes its input incrementally, also performs very well on this task. To develop an understanding of what the recurrent network encodes, visualisation techniques alone do not suffice. Therefore, we develop an approach where we formulate and test multiple hypotheses on the information encoded and processed by the network. For each hypothesis, we derive predictions about features of the hidden state representations at each time step, and train 'diagnostic classifiers' to test those predictions. Our results indicate that the networks follow a strategy similar to our hypothesised 'cumulative strategy', which explains the high accuracy of the network on novel expressions, the generalisation to longer expressions than seen in training, and the mild deterioration with increasing length. This is turn shows that diagnostic classifiers can be a useful technique for opening up the black box of neural networks. We argue that diagnostic classification, unlike most visualisation techniques, does scale up from small networks in a toy domain, to larger and deeper recurrent networks dealing with real-life data, and may therefore contribute to a better understanding of the internal dynamics of current state-of-the-art models in natural language processing.},\narchivePrefix = {arXiv},\narxivId = {arXiv:1711.10203v2},\nauthor = {Hupkes, Dieuwke and Veldhoen, Sara and Zuidema, Willem},\ndoi = {10.1613/jair.1.11196},\neprint = {arXiv:1711.10203v2},\nfile = {:Users/shanest/Documents/Library/Hupkes, Veldhoen, Zuidema/Journal of Artificial Intelligence Research/Hupkes, Veldhoen, Zuidema - 2018 - Visualisation and `Diagnostic Classifiers' Reveal how Recurrent and Recursive Neural Networks Process.pdf:pdf},\njournal = {Journal of Artificial Intelligence Research},\nkeywords = {method: diagnostic classifier},\npages = {907--926},\ntitle = {{Visualisation and `Diagnostic Classifiers' Reveal how Recurrent and Recursive Neural Networks Process Hierarchical Structure}},\nurl = {https://doi.org/10.1613/jair.1.11196},\nvolume = {61},\nyear = {2018}\n}\n","author_short":["Hupkes, D.","Veldhoen, S.","Zuidema, W."],"key":"Hupkes2018","id":"Hupkes2018","bibbaseid":"hupkes-veldhoen-zuidema-visualisationanddiagnosticclassifiersrevealhowrecurrentandrecursiveneuralnetworksprocesshierarchicalstructure-2018","role":"author","urls":{"Paper":"https://doi.org/10.1613/jair.1.11196"},"keyword":["method: diagnostic classifier"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"article","biburl":"https://www.shane.st/teaching/575/win20/MachineLearning-interpretability.bib","creationDate":"2020-12-30T01:26:34.628Z","downloads":0,"keywords":["method: diagnostic classifier"],"search_terms":["visualisation","diagnostic","classifiers","reveal","recurrent","recursive","neural","networks","process","hierarchical","structure","hupkes","veldhoen","zuidema"],"title":"Visualisation and `Diagnostic Classifiers' Reveal how Recurrent and Recursive Neural Networks Process Hierarchical Structure","year":2018,"dataSources":["okYcdTpf4JJ2zkj7A","znj7izS5PeehdLR3G"]}