BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. Devlin, J., Chang, M., Lee, K., & Toutanova, K. arXiv:1810.04805 [cs], 5, 2019. Paper Website abstract bibtex We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5\% (7.7\% point absolute improvement), MultiNLI accuracy to 86.7\% (4.6\% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).
@article{
title = {BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding},
type = {article},
year = {2019},
keywords = {Computer Science - Computation and Language},
websites = {http://arxiv.org/abs/1810.04805},
month = {5},
id = {a0eb3207-90f4-357d-be04-93b4cc4481d4},
created = {2022-03-28T09:45:01.966Z},
accessed = {2022-03-27},
file_attached = {true},
profile_id = {235249c2-3ed4-314a-b309-b1ea0330f5d9},
group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},
last_modified = {2022-03-29T08:03:04.316Z},
read = {false},
starred = {false},
authored = {false},
confirmed = {true},
hidden = {false},
citation_key = {devlinBERTPretrainingDeep2019},
source_type = {article},
short_title = {BERT},
notes = {arXiv: 1810.04805},
private_publication = {false},
abstract = {We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5\% (7.7\% point absolute improvement), MultiNLI accuracy to 86.7\% (4.6\% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).},
bibtype = {article},
author = {Devlin, Jacob and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina},
journal = {arXiv:1810.04805 [cs]}
}
Downloads: 0
{"_id":"wozhgKuesEmbqT2vE","bibbaseid":"devlin-chang-lee-toutanova-bertpretrainingofdeepbidirectionaltransformersforlanguageunderstanding-2019","authorIDs":[],"author_short":["Devlin, J.","Chang, M.","Lee, K.","Toutanova, K."],"bibdata":{"title":"BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding","type":"article","year":"2019","keywords":"Computer Science - Computation and Language","websites":"http://arxiv.org/abs/1810.04805","month":"5","id":"a0eb3207-90f4-357d-be04-93b4cc4481d4","created":"2022-03-28T09:45:01.966Z","accessed":"2022-03-27","file_attached":"true","profile_id":"235249c2-3ed4-314a-b309-b1ea0330f5d9","group_id":"1ff583c0-be37-34fa-9c04-73c69437d354","last_modified":"2022-03-29T08:03:04.316Z","read":false,"starred":false,"authored":false,"confirmed":"true","hidden":false,"citation_key":"devlinBERTPretrainingDeep2019","source_type":"article","short_title":"BERT","notes":"arXiv: 1810.04805","private_publication":false,"abstract":"We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5\\% (7.7\\% point absolute improvement), MultiNLI accuracy to 86.7\\% (4.6\\% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).","bibtype":"article","author":"Devlin, Jacob and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina","journal":"arXiv:1810.04805 [cs]","bibtex":"@article{\n title = {BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding},\n type = {article},\n year = {2019},\n keywords = {Computer Science - Computation and Language},\n websites = {http://arxiv.org/abs/1810.04805},\n month = {5},\n id = {a0eb3207-90f4-357d-be04-93b4cc4481d4},\n created = {2022-03-28T09:45:01.966Z},\n accessed = {2022-03-27},\n file_attached = {true},\n profile_id = {235249c2-3ed4-314a-b309-b1ea0330f5d9},\n group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},\n last_modified = {2022-03-29T08:03:04.316Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {devlinBERTPretrainingDeep2019},\n source_type = {article},\n short_title = {BERT},\n notes = {arXiv: 1810.04805},\n private_publication = {false},\n abstract = {We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5\\% (7.7\\% point absolute improvement), MultiNLI accuracy to 86.7\\% (4.6\\% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).},\n bibtype = {article},\n author = {Devlin, Jacob and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina},\n journal = {arXiv:1810.04805 [cs]}\n}","author_short":["Devlin, J.","Chang, M.","Lee, K.","Toutanova, K."],"urls":{"Paper":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c/file/6375d223-e085-74b3-392f-f3fed829cd72/Devlin_et_al___2019___BERT_Pre_training_of_Deep_Bidirectional_Transform.pdf.pdf","Website":"http://arxiv.org/abs/1810.04805"},"biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibbaseid":"devlin-chang-lee-toutanova-bertpretrainingofdeepbidirectionaltransformersforlanguageunderstanding-2019","role":"author","keyword":["Computer Science - Computation and Language"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"article","biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","creationDate":"2020-01-05T22:03:38.499Z","downloads":0,"keywords":["computer science - computation and language"],"search_terms":["bert","pre","training","deep","bidirectional","transformers","language","understanding","devlin","chang","lee","toutanova"],"title":"BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding","year":2019,"dataSources":["okYcdTpf4JJ2zkj7A","QGwcHf7xnb5mCCQi7","ya2CyA73rpZseyrZ8","g9KCRaSxC6wqS4oGp","aXmRAq63YsH7a3ufx","CmHEoydhafhbkXXt5","SzwP89Pgwvcu5euzJ","N4kJAiLiJ7kxfNsoh","pzyFFGWvxG2bs63zP","Wsv2bQ4jPuc7qme8R","WKomETnBq4AHwocGn","2252seNhipfTmjEBQ","znj7izS5PeehdLR3G","JFDnASMkoQCjjGL8E","h7kKWXpJh2iaX92T5"]}