Explain Yourself! Leveraging Language Models for Commonsense Reasoning. Rajani, N. F., McCann, B., Xiong, C., & Socher, R. , 2019. abstract bibtex Deep learning models perform poorly on tasks that require commonsense reasoning, which often necessitates some form of world-knowledge or reasoning over information not immediately present in the input. We collect human explanations for commonsense reasoning in the form of natural language sequences and highlighted annotations in a new dataset called Common Sense Explanations (CoS-E). We use CoS-E to train language models to automatically generate explanations that can be used during training and inference in a novel Commonsense Auto-Generated Explanation (CAGE) framework. CAGE improves the state-of-the-art by 10% on the challenging CommonsenseQA task. We further study commonsense reasoning in DNNs using both human and auto-generated explanations including transfer to out-of-domain tasks. Empirical results indicate that we can effectively leverage language models for commonsense reasoning.
@Article{Rajani2019,
author = {Rajani, Nazneen Fatema and McCann, Bryan and Xiong, Caiming and Socher, Richard},
title = {Explain Yourself! Leveraging Language Models for Commonsense Reasoning},
journal = {},
volume = {},
number = {},
pages = {},
year = {2019},
abstract = {Deep learning models perform poorly on tasks that require commonsense reasoning, which often necessitates some form of world-knowledge or reasoning over information not immediately present in the input. We collect human explanations for commonsense reasoning in the form of natural language sequences and highlighted annotations in a new dataset called Common Sense Explanations (CoS-E). We use CoS-E to train language models to automatically generate explanations that can be used during training and inference in a novel Commonsense Auto-Generated Explanation (CAGE) framework. CAGE improves the state-of-the-art by 10\% on the challenging CommonsenseQA task. We further study commonsense reasoning in DNNs using both human and auto-generated explanations including transfer to out-of-domain tasks. Empirical results indicate that we can effectively leverage language models for commonsense reasoning.},
location = {},
keywords = {}}
Downloads: 0
{"_id":"3uSWzkeBz5SF3LGtn","bibbaseid":"rajani-mccann-xiong-socher-explainyourselfleveraginglanguagemodelsforcommonsensereasoning-2019","authorIDs":[],"author_short":["Rajani, N. F.","McCann, B.","Xiong, C.","Socher, R."],"bibdata":{"bibtype":"article","type":"article","author":[{"propositions":[],"lastnames":["Rajani"],"firstnames":["Nazneen","Fatema"],"suffixes":[]},{"propositions":[],"lastnames":["McCann"],"firstnames":["Bryan"],"suffixes":[]},{"propositions":[],"lastnames":["Xiong"],"firstnames":["Caiming"],"suffixes":[]},{"propositions":[],"lastnames":["Socher"],"firstnames":["Richard"],"suffixes":[]}],"title":"Explain Yourself! Leveraging Language Models for Commonsense Reasoning","journal":"","volume":"","number":"","pages":"","year":"2019","abstract":"Deep learning models perform poorly on tasks that require commonsense reasoning, which often necessitates some form of world-knowledge or reasoning over information not immediately present in the input. We collect human explanations for commonsense reasoning in the form of natural language sequences and highlighted annotations in a new dataset called Common Sense Explanations (CoS-E). We use CoS-E to train language models to automatically generate explanations that can be used during training and inference in a novel Commonsense Auto-Generated Explanation (CAGE) framework. CAGE improves the state-of-the-art by 10% on the challenging CommonsenseQA task. We further study commonsense reasoning in DNNs using both human and auto-generated explanations including transfer to out-of-domain tasks. Empirical results indicate that we can effectively leverage language models for commonsense reasoning.","location":"","keywords":"","bibtex":"@Article{Rajani2019,\nauthor = {Rajani, Nazneen Fatema and McCann, Bryan and Xiong, Caiming and Socher, Richard}, \ntitle = {Explain Yourself! Leveraging Language Models for Commonsense Reasoning}, \njournal = {}, \nvolume = {}, \nnumber = {}, \npages = {}, \nyear = {2019}, \nabstract = {Deep learning models perform poorly on tasks that require commonsense reasoning, which often necessitates some form of world-knowledge or reasoning over information not immediately present in the input. We collect human explanations for commonsense reasoning in the form of natural language sequences and highlighted annotations in a new dataset called Common Sense Explanations (CoS-E). We use CoS-E to train language models to automatically generate explanations that can be used during training and inference in a novel Commonsense Auto-Generated Explanation (CAGE) framework. CAGE improves the state-of-the-art by 10\\% on the challenging CommonsenseQA task. We further study commonsense reasoning in DNNs using both human and auto-generated explanations including transfer to out-of-domain tasks. Empirical results indicate that we can effectively leverage language models for commonsense reasoning.}, \nlocation = {}, \nkeywords = {}}\n\n\n","author_short":["Rajani, N. F.","McCann, B.","Xiong, C.","Socher, R."],"key":"Rajani2019","id":"Rajani2019","bibbaseid":"rajani-mccann-xiong-socher-explainyourselfleveraginglanguagemodelsforcommonsensereasoning-2019","role":"author","urls":{},"downloads":0},"bibtype":"article","biburl":"https://gist.githubusercontent.com/stuhlmueller/a37ef2ef4f378ebcb73d249fe0f8377a/raw/6f96f6f779501bd9482896af3e4db4de88c35079/references.bib","creationDate":"2020-01-27T02:13:33.866Z","downloads":0,"keywords":[],"search_terms":["explain","yourself","leveraging","language","models","commonsense","reasoning","rajani","mccann","xiong","socher"],"title":"Explain Yourself! Leveraging Language Models for Commonsense Reasoning","year":2019,"dataSources":["hEoKh4ygEAWbAZ5iy"]}