Evaluating Commonsense in Pre-trained Language Models. Zhou, X., Zhang, Y., Cui, L., & Huang, D. In Association for the Advancement of Artificial Intelligence (AAAI), 2020. Paper abstract bibtex 1 download Contextualized representations trained over large raw text data have given remarkable improvements for NLP tasks including question answering and reading comprehension. There have been works showing that syntactic, semantic and word sense knowledge are contained in such representations, which explains why they benefit such tasks. However, relatively little work has been done investigating commonsense knowledge contained in contextualized representations, which is crucial for human question answering and reading comprehension. We study the commonsense ability of GPT, BERT, XLNet, and RoBERTa by testing them on seven challenging benchmarks, finding that language modeling and its variants are effective objectives for promoting models' commonsense ability while bi-directional context and larger training set are bonuses. We additionally find that current models do poorly on tasks require more necessary inference steps. Finally, we test the robustness of models by making dual test cases, which are correlated so that the correct prediction of one sample should lead to correct prediction of the other. Interestingly, the models show confusion on these test cases, which suggests that they learn commonsense at the surface rather than the deep level. We release a test set, named CATs publicly, for future research.
@inproceedings{Zhou2020,
abstract = {Contextualized representations trained over large raw text data have given remarkable improvements for NLP tasks including question answering and reading comprehension. There have been works showing that syntactic, semantic and word sense knowledge are contained in such representations, which explains why they benefit such tasks. However, relatively little work has been done investigating commonsense knowledge contained in contextualized representations, which is crucial for human question answering and reading comprehension. We study the commonsense ability of GPT, BERT, XLNet, and RoBERTa by testing them on seven challenging benchmarks, finding that language modeling and its variants are effective objectives for promoting models' commonsense ability while bi-directional context and larger training set are bonuses. We additionally find that current models do poorly on tasks require more necessary inference steps. Finally, we test the robustness of models by making dual test cases, which are correlated so that the correct prediction of one sample should lead to correct prediction of the other. Interestingly, the models show confusion on these test cases, which suggests that they learn commonsense at the surface rather than the deep level. We release a test set, named CATs publicly, for future research.},
archivePrefix = {arXiv},
arxivId = {1911.11931},
author = {Zhou, Xuhui and Zhang, Yue and Cui, Leyang and Huang, Dandan},
booktitle = {Association for the Advancement of Artificial Intelligence (AAAI)},
eprint = {1911.11931},
file = {:Users/shanest/Documents/Library/Zhou et al/Association for the Advancement of Artificial Intelligence (AAAI)/Zhou et al. - 2020 - Evaluating Commonsense in Pre-trained Language Models.pdf:pdf},
keywords = {dataset,method: new data,phenomenon: commonsense},
title = {{Evaluating Commonsense in Pre-trained Language Models}},
url = {http://arxiv.org/abs/1911.11931},
year = {2020}
}
Downloads: 1
{"_id":"MCzgDB7ZijJLq9Smc","bibbaseid":"zhou-zhang-cui-huang-evaluatingcommonsenseinpretrainedlanguagemodels-2020","authorIDs":[],"author_short":["Zhou, X.","Zhang, Y.","Cui, L.","Huang, D."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","abstract":"Contextualized representations trained over large raw text data have given remarkable improvements for NLP tasks including question answering and reading comprehension. There have been works showing that syntactic, semantic and word sense knowledge are contained in such representations, which explains why they benefit such tasks. However, relatively little work has been done investigating commonsense knowledge contained in contextualized representations, which is crucial for human question answering and reading comprehension. We study the commonsense ability of GPT, BERT, XLNet, and RoBERTa by testing them on seven challenging benchmarks, finding that language modeling and its variants are effective objectives for promoting models' commonsense ability while bi-directional context and larger training set are bonuses. We additionally find that current models do poorly on tasks require more necessary inference steps. Finally, we test the robustness of models by making dual test cases, which are correlated so that the correct prediction of one sample should lead to correct prediction of the other. Interestingly, the models show confusion on these test cases, which suggests that they learn commonsense at the surface rather than the deep level. We release a test set, named CATs publicly, for future research.","archiveprefix":"arXiv","arxivid":"1911.11931","author":[{"propositions":[],"lastnames":["Zhou"],"firstnames":["Xuhui"],"suffixes":[]},{"propositions":[],"lastnames":["Zhang"],"firstnames":["Yue"],"suffixes":[]},{"propositions":[],"lastnames":["Cui"],"firstnames":["Leyang"],"suffixes":[]},{"propositions":[],"lastnames":["Huang"],"firstnames":["Dandan"],"suffixes":[]}],"booktitle":"Association for the Advancement of Artificial Intelligence (AAAI)","eprint":"1911.11931","file":":Users/shanest/Documents/Library/Zhou et al/Association for the Advancement of Artificial Intelligence (AAAI)/Zhou et al. - 2020 - Evaluating Commonsense in Pre-trained Language Models.pdf:pdf","keywords":"dataset,method: new data,phenomenon: commonsense","title":"Evaluating Commonsense in Pre-trained Language Models","url":"http://arxiv.org/abs/1911.11931","year":"2020","bibtex":"@inproceedings{Zhou2020,\nabstract = {Contextualized representations trained over large raw text data have given remarkable improvements for NLP tasks including question answering and reading comprehension. There have been works showing that syntactic, semantic and word sense knowledge are contained in such representations, which explains why they benefit such tasks. However, relatively little work has been done investigating commonsense knowledge contained in contextualized representations, which is crucial for human question answering and reading comprehension. We study the commonsense ability of GPT, BERT, XLNet, and RoBERTa by testing them on seven challenging benchmarks, finding that language modeling and its variants are effective objectives for promoting models' commonsense ability while bi-directional context and larger training set are bonuses. We additionally find that current models do poorly on tasks require more necessary inference steps. Finally, we test the robustness of models by making dual test cases, which are correlated so that the correct prediction of one sample should lead to correct prediction of the other. Interestingly, the models show confusion on these test cases, which suggests that they learn commonsense at the surface rather than the deep level. We release a test set, named CATs publicly, for future research.},\narchivePrefix = {arXiv},\narxivId = {1911.11931},\nauthor = {Zhou, Xuhui and Zhang, Yue and Cui, Leyang and Huang, Dandan},\nbooktitle = {Association for the Advancement of Artificial Intelligence (AAAI)},\neprint = {1911.11931},\nfile = {:Users/shanest/Documents/Library/Zhou et al/Association for the Advancement of Artificial Intelligence (AAAI)/Zhou et al. - 2020 - Evaluating Commonsense in Pre-trained Language Models.pdf:pdf},\nkeywords = {dataset,method: new data,phenomenon: commonsense},\ntitle = {{Evaluating Commonsense in Pre-trained Language Models}},\nurl = {http://arxiv.org/abs/1911.11931},\nyear = {2020}\n}\n","author_short":["Zhou, X.","Zhang, Y.","Cui, L.","Huang, D."],"key":"Zhou2020","id":"Zhou2020","bibbaseid":"zhou-zhang-cui-huang-evaluatingcommonsenseinpretrainedlanguagemodels-2020","role":"author","urls":{"Paper":"http://arxiv.org/abs/1911.11931"},"keyword":["dataset","method: new data","phenomenon: commonsense"],"metadata":{"authorlinks":{}},"downloads":1},"bibtype":"inproceedings","biburl":"https://www.shane.st/teaching/575/win20/MachineLearning-interpretability.bib","creationDate":"2020-01-09T18:15:21.663Z","downloads":1,"keywords":["dataset","method: new data","phenomenon: commonsense"],"search_terms":["evaluating","commonsense","pre","trained","language","models","zhou","zhang","cui","huang"],"title":"Evaluating Commonsense in Pre-trained Language Models","year":2020,"dataSources":["okYcdTpf4JJ2zkj7A","mkMPR3Lfz5vmkF7ur","znj7izS5PeehdLR3G"]}