Language Modeling Teaches You More than Translation Does: Lessons Learned Through Auxiliary Syntactic Task Analysis. Zhang, K. & Bowman, S. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 359–361, Stroudsburg, PA, USA, 2018. Association for Computational Linguistics. Paper doi abstract bibtex Recent work using auxiliary prediction task classifiers to investigate the properties of LSTM representations has begun to shed light on why pretrained representations, like ELMo (Peters et al., 2018) and CoVe (McCann et al., 2017), are so beneficial for neural language understanding models. We still, though, do not yet have a clear understanding of how the choice of pretraining objective affects the type of linguistic information that models learn. With this in mind, we compare four objectives—language modeling, translation, skip-thought, and autoencoding—on their ability to induce syntactic and part-of-speech information. We make a fair comparison between the tasks by holding constant the quantity and genre of the training data, as well as the LSTM architecture. We find that representations from language models consistently perform best on our syntactic auxiliary prediction tasks, even when trained on relatively small amounts of data. These results suggest that language modeling may be the best data-rich pretraining task for transfer learning applications requiring syntactic information. We also find that the representations from randomly-initialized, frozen LSTMs perform strikingly well on our syntactic auxiliary tasks, but this effect disappears when the amount of training data for the auxiliary tasks is reduced.
@inproceedings{Zhang2018b,
abstract = {Recent work using auxiliary prediction task classifiers to investigate the properties of LSTM representations has begun to shed light on why pretrained representations, like ELMo (Peters et al., 2018) and CoVe (McCann et al., 2017), are so beneficial for neural language understanding models. We still, though, do not yet have a clear understanding of how the choice of pretraining objective affects the type of linguistic information that models learn. With this in mind, we compare four objectives---language modeling, translation, skip-thought, and autoencoding---on their ability to induce syntactic and part-of-speech information. We make a fair comparison between the tasks by holding constant the quantity and genre of the training data, as well as the LSTM architecture. We find that representations from language models consistently perform best on our syntactic auxiliary prediction tasks, even when trained on relatively small amounts of data. These results suggest that language modeling may be the best data-rich pretraining task for transfer learning applications requiring syntactic information. We also find that the representations from randomly-initialized, frozen LSTMs perform strikingly well on our syntactic auxiliary tasks, but this effect disappears when the amount of training data for the auxiliary tasks is reduced.},
address = {Stroudsburg, PA, USA},
archivePrefix = {arXiv},
arxivId = {1809.10040},
author = {Zhang, Kelly and Bowman, Samuel},
booktitle = {Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP},
doi = {10.18653/v1/W18-5448},
eprint = {1809.10040},
file = {:Users/shanest/Documents/Library/Zhang, Bowman/Proceedings of the 2018 EMNLP Workshop BlackboxNLP Analyzing and Interpreting Neural Networks for NLP/Zhang, Bowman - 2018 - Language Modeling Teaches You More than Translation Does Lessons Learned Through Auxiliary Syntactic Task Analysi.pdf:pdf},
keywords = {method: diagnostic classifier,method: layer-wise analysis,method: pre-training task comparison},
pages = {359--361},
publisher = {Association for Computational Linguistics},
title = {{Language Modeling Teaches You More than Translation Does: Lessons Learned Through Auxiliary Syntactic Task Analysis}},
url = {http://aclweb.org/anthology/W18-5448},
year = {2018}
}
Downloads: 0
{"_id":"t88veEioB2Hu6ocSc","bibbaseid":"zhang-bowman-languagemodelingteachesyoumorethantranslationdoeslessonslearnedthroughauxiliarysyntactictaskanalysis-2018","authorIDs":[],"author_short":["Zhang, K.","Bowman, S."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","abstract":"Recent work using auxiliary prediction task classifiers to investigate the properties of LSTM representations has begun to shed light on why pretrained representations, like ELMo (Peters et al., 2018) and CoVe (McCann et al., 2017), are so beneficial for neural language understanding models. We still, though, do not yet have a clear understanding of how the choice of pretraining objective affects the type of linguistic information that models learn. With this in mind, we compare four objectives—language modeling, translation, skip-thought, and autoencoding—on their ability to induce syntactic and part-of-speech information. We make a fair comparison between the tasks by holding constant the quantity and genre of the training data, as well as the LSTM architecture. We find that representations from language models consistently perform best on our syntactic auxiliary prediction tasks, even when trained on relatively small amounts of data. These results suggest that language modeling may be the best data-rich pretraining task for transfer learning applications requiring syntactic information. We also find that the representations from randomly-initialized, frozen LSTMs perform strikingly well on our syntactic auxiliary tasks, but this effect disappears when the amount of training data for the auxiliary tasks is reduced.","address":"Stroudsburg, PA, USA","archiveprefix":"arXiv","arxivid":"1809.10040","author":[{"propositions":[],"lastnames":["Zhang"],"firstnames":["Kelly"],"suffixes":[]},{"propositions":[],"lastnames":["Bowman"],"firstnames":["Samuel"],"suffixes":[]}],"booktitle":"Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP","doi":"10.18653/v1/W18-5448","eprint":"1809.10040","file":":Users/shanest/Documents/Library/Zhang, Bowman/Proceedings of the 2018 EMNLP Workshop BlackboxNLP Analyzing and Interpreting Neural Networks for NLP/Zhang, Bowman - 2018 - Language Modeling Teaches You More than Translation Does Lessons Learned Through Auxiliary Syntactic Task Analysi.pdf:pdf","keywords":"method: diagnostic classifier,method: layer-wise analysis,method: pre-training task comparison","pages":"359–361","publisher":"Association for Computational Linguistics","title":"Language Modeling Teaches You More than Translation Does: Lessons Learned Through Auxiliary Syntactic Task Analysis","url":"http://aclweb.org/anthology/W18-5448","year":"2018","bibtex":"@inproceedings{Zhang2018b,\nabstract = {Recent work using auxiliary prediction task classifiers to investigate the properties of LSTM representations has begun to shed light on why pretrained representations, like ELMo (Peters et al., 2018) and CoVe (McCann et al., 2017), are so beneficial for neural language understanding models. We still, though, do not yet have a clear understanding of how the choice of pretraining objective affects the type of linguistic information that models learn. With this in mind, we compare four objectives---language modeling, translation, skip-thought, and autoencoding---on their ability to induce syntactic and part-of-speech information. We make a fair comparison between the tasks by holding constant the quantity and genre of the training data, as well as the LSTM architecture. We find that representations from language models consistently perform best on our syntactic auxiliary prediction tasks, even when trained on relatively small amounts of data. These results suggest that language modeling may be the best data-rich pretraining task for transfer learning applications requiring syntactic information. We also find that the representations from randomly-initialized, frozen LSTMs perform strikingly well on our syntactic auxiliary tasks, but this effect disappears when the amount of training data for the auxiliary tasks is reduced.},\naddress = {Stroudsburg, PA, USA},\narchivePrefix = {arXiv},\narxivId = {1809.10040},\nauthor = {Zhang, Kelly and Bowman, Samuel},\nbooktitle = {Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP},\ndoi = {10.18653/v1/W18-5448},\neprint = {1809.10040},\nfile = {:Users/shanest/Documents/Library/Zhang, Bowman/Proceedings of the 2018 EMNLP Workshop BlackboxNLP Analyzing and Interpreting Neural Networks for NLP/Zhang, Bowman - 2018 - Language Modeling Teaches You More than Translation Does Lessons Learned Through Auxiliary Syntactic Task Analysi.pdf:pdf},\nkeywords = {method: diagnostic classifier,method: layer-wise analysis,method: pre-training task comparison},\npages = {359--361},\npublisher = {Association for Computational Linguistics},\ntitle = {{Language Modeling Teaches You More than Translation Does: Lessons Learned Through Auxiliary Syntactic Task Analysis}},\nurl = {http://aclweb.org/anthology/W18-5448},\nyear = {2018}\n}\n","author_short":["Zhang, K.","Bowman, S."],"key":"Zhang2018b","id":"Zhang2018b","bibbaseid":"zhang-bowman-languagemodelingteachesyoumorethantranslationdoeslessonslearnedthroughauxiliarysyntactictaskanalysis-2018","role":"author","urls":{"Paper":"http://aclweb.org/anthology/W18-5448"},"keyword":["method: diagnostic classifier","method: layer-wise analysis","method: pre-training task comparison"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"inproceedings","biburl":"https://www.shane.st/teaching/575/win20/MachineLearning-interpretability.bib","creationDate":"2020-01-09T18:15:21.651Z","downloads":0,"keywords":["method: diagnostic classifier","method: layer-wise analysis","method: pre-training task comparison"],"search_terms":["language","modeling","teaches","more","translation","lessons","learned","through","auxiliary","syntactic","task","analysis","zhang","bowman"],"title":"Language Modeling Teaches You More than Translation Does: Lessons Learned Through Auxiliary Syntactic Task Analysis","year":2018,"dataSources":["okYcdTpf4JJ2zkj7A","znj7izS5PeehdLR3G"]}