Language Models are Unsupervised Multitask Learners. Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., & Sutskever, I. 2019.
Paper abstract bibtex Natural language processing tasks, such as question answering, machine translation, reading comprehension , and summarization, are typically approached with supervised learning on task-specific datasets. We demonstrate that language models begin to learn these tasks without any explicit supervision when trained on a new dataset of millions of webpages called WebText. When conditioned on a document plus questions, the answers generated by the language model reach 55 F1 on the CoQA dataset-matching or exceeding the performance of 3 out of 4 baseline systems without using the 127,000+ training examples. The capacity of the language model is essential to the success of zero-shot task transfer and increasing it improves performance in a log-linear fashion across tasks. Our largest model, GPT-2, is a 1.5B parameter Transformer that achieves state of the art results on 7 out of 8 tested language modeling datasets in a zero-shot setting but still underfits WebText. Samples from the model reflect these improvements and contain coherent paragraphs of text. These findings suggest a promising path towards building language processing systems which learn to perform tasks from their naturally occurring demonstrations.
@article{Radford2019,
abstract = {Natural language processing tasks, such as question answering, machine translation, reading comprehension , and summarization, are typically approached with supervised learning on task-specific datasets. We demonstrate that language models begin to learn these tasks without any explicit supervision when trained on a new dataset of millions of webpages called WebText. When conditioned on a document plus questions, the answers generated by the language model reach 55 F1 on the CoQA dataset-matching or exceeding the performance of 3 out of 4 baseline systems without using the 127,000+ training examples. The capacity of the language model is essential to the success of zero-shot task transfer and increasing it improves performance in a log-linear fashion across tasks. Our largest model, GPT-2, is a 1.5B parameter Transformer that achieves state of the art results on 7 out of 8 tested language modeling datasets in a zero-shot setting but still underfits WebText. Samples from the model reflect these improvements and contain coherent paragraphs of text. These findings suggest a promising path towards building language processing systems which learn to perform tasks from their naturally occurring demonstrations.},
author = {Radford, Alec and Wu, Jeffrey and Child, Rewon and Luan, David and Amodei, Dario and Sutskever, Ilya},
file = {:Users/shanest/Documents/Library/Radford et al/Unknown/Radford et al. - 2019 - Language Models are Unsupervised Multitask Learners.pdf:pdf},
keywords = {model},
title = {{Language Models are Unsupervised Multitask Learners}},
url = {https://openai.com/blog/better-language-models/},
year = {2019}
}
Downloads: 0
{"_id":"AM8kKCLoKt8CogCJ3","bibbaseid":"radford-wu-child-luan-amodei-sutskever-languagemodelsareunsupervisedmultitasklearners-2019","authorIDs":[],"author_short":["Radford, A.","Wu, J.","Child, R.","Luan, D.","Amodei, D.","Sutskever, I."],"bibdata":{"bibtype":"article","type":"article","abstract":"Natural language processing tasks, such as question answering, machine translation, reading comprehension , and summarization, are typically approached with supervised learning on task-specific datasets. We demonstrate that language models begin to learn these tasks without any explicit supervision when trained on a new dataset of millions of webpages called WebText. When conditioned on a document plus questions, the answers generated by the language model reach 55 F1 on the CoQA dataset-matching or exceeding the performance of 3 out of 4 baseline systems without using the 127,000+ training examples. The capacity of the language model is essential to the success of zero-shot task transfer and increasing it improves performance in a log-linear fashion across tasks. Our largest model, GPT-2, is a 1.5B parameter Transformer that achieves state of the art results on 7 out of 8 tested language modeling datasets in a zero-shot setting but still underfits WebText. Samples from the model reflect these improvements and contain coherent paragraphs of text. These findings suggest a promising path towards building language processing systems which learn to perform tasks from their naturally occurring demonstrations.","author":[{"propositions":[],"lastnames":["Radford"],"firstnames":["Alec"],"suffixes":[]},{"propositions":[],"lastnames":["Wu"],"firstnames":["Jeffrey"],"suffixes":[]},{"propositions":[],"lastnames":["Child"],"firstnames":["Rewon"],"suffixes":[]},{"propositions":[],"lastnames":["Luan"],"firstnames":["David"],"suffixes":[]},{"propositions":[],"lastnames":["Amodei"],"firstnames":["Dario"],"suffixes":[]},{"propositions":[],"lastnames":["Sutskever"],"firstnames":["Ilya"],"suffixes":[]}],"file":":Users/shanest/Documents/Library/Radford et al/Unknown/Radford et al. - 2019 - Language Models are Unsupervised Multitask Learners.pdf:pdf","keywords":"model","title":"Language Models are Unsupervised Multitask Learners","url":"https://openai.com/blog/better-language-models/","year":"2019","bibtex":"@article{Radford2019,\nabstract = {Natural language processing tasks, such as question answering, machine translation, reading comprehension , and summarization, are typically approached with supervised learning on task-specific datasets. We demonstrate that language models begin to learn these tasks without any explicit supervision when trained on a new dataset of millions of webpages called WebText. When conditioned on a document plus questions, the answers generated by the language model reach 55 F1 on the CoQA dataset-matching or exceeding the performance of 3 out of 4 baseline systems without using the 127,000+ training examples. The capacity of the language model is essential to the success of zero-shot task transfer and increasing it improves performance in a log-linear fashion across tasks. Our largest model, GPT-2, is a 1.5B parameter Transformer that achieves state of the art results on 7 out of 8 tested language modeling datasets in a zero-shot setting but still underfits WebText. Samples from the model reflect these improvements and contain coherent paragraphs of text. These findings suggest a promising path towards building language processing systems which learn to perform tasks from their naturally occurring demonstrations.},\nauthor = {Radford, Alec and Wu, Jeffrey and Child, Rewon and Luan, David and Amodei, Dario and Sutskever, Ilya},\nfile = {:Users/shanest/Documents/Library/Radford et al/Unknown/Radford et al. - 2019 - Language Models are Unsupervised Multitask Learners.pdf:pdf},\nkeywords = {model},\ntitle = {{Language Models are Unsupervised Multitask Learners}},\nurl = {https://openai.com/blog/better-language-models/},\nyear = {2019}\n}\n","author_short":["Radford, A.","Wu, J.","Child, R.","Luan, D.","Amodei, D.","Sutskever, I."],"key":"Radford2019","id":"Radford2019","bibbaseid":"radford-wu-child-luan-amodei-sutskever-languagemodelsareunsupervisedmultitasklearners-2019","role":"author","urls":{"Paper":"https://openai.com/blog/better-language-models/"},"keyword":["model"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"article","biburl":"https://www.shane.st/teaching/575/win20/MachineLearning-interpretability.bib","creationDate":"2020-01-06T20:16:55.588Z","downloads":0,"keywords":["model"],"search_terms":["language","models","unsupervised","multitask","learners","radford","wu","child","luan","amodei","sutskever"],"title":"Language Models are Unsupervised Multitask Learners","year":2019,"dataSources":["okYcdTpf4JJ2zkj7A","Wsv2bQ4jPuc7qme8R","znj7izS5PeehdLR3G","cx4WvnDhXJhiLqdQo","3gTBYW5YxtNcnhN2g"]}