Teaching Machine Comprehension with Compositional Explanations. Ye, Q., Huang, X., Boschee, E., & Ren, X. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 1599–1615, Online, November, 2020. Association for Computational Linguistics. Paper doi abstract bibtex Advances in machine reading comprehension (MRC) rely heavily on the collection of large scale human-annotated examples in the form of (question, paragraph, answer) triples. In contrast, humans are typically able to generalize with only a few examples, relying on deeper underlying world knowledge, linguistic sophistication, and/or simply superior deductive powers. In this paper, we focus on ``teaching'' machines reading comprehension, using a small number of semi-structured explanations that explicitly inform machines why answer spans are correct. We extract structured variables and rules from explanations and compose neural module teachers that annotate instances for training downstream MRC models. We use learnable neural modules and soft logic to handle linguistic variation and overcome sparse coverage; the modules are jointly optimized with the MRC model to improve final performance. On the SQuAD dataset, our proposed method achieves 70.14% F1 score with supervision from 26 explanations, comparable to plain supervised learning using 1,100 labeled instances, yielding a 12x speed up.
@inproceedings{ye-etal-2020-teaching,
title = "Teaching Machine Comprehension with Compositional Explanations",
author = "Ye, Qinyuan and
Huang, Xiao and
Boschee, Elizabeth and
Ren, Xiang",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.findings-emnlp.145",
doi = "10.18653/v1/2020.findings-emnlp.145",
pages = "1599--1615",
abstract = "Advances in machine reading comprehension (MRC) rely heavily on the collection of large scale human-annotated examples in the form of (question, paragraph, answer) triples. In contrast, humans are typically able to generalize with only a few examples, relying on deeper underlying world knowledge, linguistic sophistication, and/or simply superior deductive powers. In this paper, we focus on {``}teaching{''} machines reading comprehension, using a small number of semi-structured explanations that explicitly inform machines why answer spans are correct. We extract structured variables and rules from explanations and compose neural module teachers that annotate instances for training downstream MRC models. We use learnable neural modules and soft logic to handle linguistic variation and overcome sparse coverage; the modules are jointly optimized with the MRC model to improve final performance. On the SQuAD dataset, our proposed method achieves 70.14{\%} F1 score with supervision from 26 explanations, comparable to plain supervised learning using 1,100 labeled instances, yielding a 12x speed up.",
}
Downloads: 0
{"_id":"NBpdaJZjcnbpqwbWd","bibbaseid":"ye-huang-boschee-ren-teachingmachinecomprehensionwithcompositionalexplanations-2020","author_short":["Ye, Q.","Huang, X.","Boschee, E.","Ren, X."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","title":"Teaching Machine Comprehension with Compositional Explanations","author":[{"propositions":[],"lastnames":["Ye"],"firstnames":["Qinyuan"],"suffixes":[]},{"propositions":[],"lastnames":["Huang"],"firstnames":["Xiao"],"suffixes":[]},{"propositions":[],"lastnames":["Boschee"],"firstnames":["Elizabeth"],"suffixes":[]},{"propositions":[],"lastnames":["Ren"],"firstnames":["Xiang"],"suffixes":[]}],"booktitle":"Findings of the Association for Computational Linguistics: EMNLP 2020","month":"November","year":"2020","address":"Online","publisher":"Association for Computational Linguistics","url":"https://www.aclweb.org/anthology/2020.findings-emnlp.145","doi":"10.18653/v1/2020.findings-emnlp.145","pages":"1599–1615","abstract":"Advances in machine reading comprehension (MRC) rely heavily on the collection of large scale human-annotated examples in the form of (question, paragraph, answer) triples. In contrast, humans are typically able to generalize with only a few examples, relying on deeper underlying world knowledge, linguistic sophistication, and/or simply superior deductive powers. In this paper, we focus on ``teaching'' machines reading comprehension, using a small number of semi-structured explanations that explicitly inform machines why answer spans are correct. We extract structured variables and rules from explanations and compose neural module teachers that annotate instances for training downstream MRC models. We use learnable neural modules and soft logic to handle linguistic variation and overcome sparse coverage; the modules are jointly optimized with the MRC model to improve final performance. On the SQuAD dataset, our proposed method achieves 70.14% F1 score with supervision from 26 explanations, comparable to plain supervised learning using 1,100 labeled instances, yielding a 12x speed up.","bibtex":"@inproceedings{ye-etal-2020-teaching,\r\n title = \"Teaching Machine Comprehension with Compositional Explanations\",\r\n author = \"Ye, Qinyuan and\r\n Huang, Xiao and\r\n Boschee, Elizabeth and\r\n Ren, Xiang\",\r\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2020\",\r\n month = nov,\r\n year = \"2020\",\r\n address = \"Online\",\r\n publisher = \"Association for Computational Linguistics\",\r\n url = \"https://www.aclweb.org/anthology/2020.findings-emnlp.145\",\r\n doi = \"10.18653/v1/2020.findings-emnlp.145\",\r\n pages = \"1599--1615\",\r\n abstract = \"Advances in machine reading comprehension (MRC) rely heavily on the collection of large scale human-annotated examples in the form of (question, paragraph, answer) triples. In contrast, humans are typically able to generalize with only a few examples, relying on deeper underlying world knowledge, linguistic sophistication, and/or simply superior deductive powers. In this paper, we focus on {``}teaching{''} machines reading comprehension, using a small number of semi-structured explanations that explicitly inform machines why answer spans are correct. We extract structured variables and rules from explanations and compose neural module teachers that annotate instances for training downstream MRC models. We use learnable neural modules and soft logic to handle linguistic variation and overcome sparse coverage; the modules are jointly optimized with the MRC model to improve final performance. On the SQuAD dataset, our proposed method achieves 70.14{\\%} F1 score with supervision from 26 explanations, comparable to plain supervised learning using 1,100 labeled instances, yielding a 12x speed up.\",\r\n}\r\n\r\n","author_short":["Ye, Q.","Huang, X.","Boschee, E.","Ren, X."],"bibbaseid":"ye-huang-boschee-ren-teachingmachinecomprehensionwithcompositionalexplanations-2020","role":"author","urls":{"Paper":"https://www.aclweb.org/anthology/2020.findings-emnlp.145"},"metadata":{"authorlinks":{}}},"bibtype":"inproceedings","biburl":"https://bibbase.org/f/SKBwv9n9W4YYh9SfC/boschee-2023.bib","dataSources":["eShyn9ox8xhiJBHq2","6xESkCofuRDYuE4dM","dfnxo2P7wcDdnT5Pz"],"keywords":[],"search_terms":["teaching","machine","comprehension","compositional","explanations","ye","huang","boschee","ren"],"title":"Teaching Machine Comprehension with Compositional Explanations","year":2020}