Can Neural Networks Understand Monotonicity Reasoning?. Yanaka, H., Mineshima, K., Bekki, D., Inui, K., Sekine, S., Abzianidze, L., & Bos, J. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 31–40, Stroudsburg, PA, USA, 2019. Association for Computational Linguistics. Paper doi abstract bibtex Monotonicity reasoning is one of the important reasoning skills for any intelligent natural language inference (NLI) model in that it requires the ability to capture the interaction between lexical and syntactic structures. Since no test set has been developed for monotonicity reasoning with wide coverage, it is still unclear whether neural models can perform monotonicity reasoning in a proper way. To investigate this issue, we introduce the Monotonicity Entailment Dataset (MED). Performance by state-of-the-art NLI models on the new test set is substantially worse, under 55%, especially on downward reasoning. In addition, analysis using a monotonicity-driven data augmentation method showed that these models might be limited in their generalization ability in upward and downward reasoning.
@inproceedings{Yanaka2019,
abstract = {Monotonicity reasoning is one of the important reasoning skills for any intelligent natural language inference (NLI) model in that it requires the ability to capture the interaction between lexical and syntactic structures. Since no test set has been developed for monotonicity reasoning with wide coverage, it is still unclear whether neural models can perform monotonicity reasoning in a proper way. To investigate this issue, we introduce the Monotonicity Entailment Dataset (MED). Performance by state-of-the-art NLI models on the new test set is substantially worse, under 55%, especially on downward reasoning. In addition, analysis using a monotonicity-driven data augmentation method showed that these models might be limited in their generalization ability in upward and downward reasoning.},
address = {Stroudsburg, PA, USA},
author = {Yanaka, Hitomi and Mineshima, Koji and Bekki, Daisuke and Inui, Kentaro and Sekine, Satoshi and Abzianidze, Lasha and Bos, Johan},
booktitle = {Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP},
doi = {10.18653/v1/W19-4804},
file = {:Users/shanest/Documents/Library/Yanaka et al/Proceedings of the 2019 ACL Workshop BlackboxNLP Analyzing and Interpreting Neural Networks for NLP/Yanaka et al. - 2019 - Can Neural Networks Understand Monotonicity Reasoning.pdf:pdf},
keywords = {dataset,method: new data},
pages = {31--40},
publisher = {Association for Computational Linguistics},
title = {{Can Neural Networks Understand Monotonicity Reasoning?}},
url = {https://www.aclweb.org/anthology/W19-4804},
year = {2019}
}
Downloads: 0
{"_id":"75v2TFWpg4pW77Pd8","bibbaseid":"yanaka-mineshima-bekki-inui-sekine-abzianidze-bos-canneuralnetworksunderstandmonotonicityreasoning-2019","authorIDs":[],"author_short":["Yanaka, H.","Mineshima, K.","Bekki, D.","Inui, K.","Sekine, S.","Abzianidze, L.","Bos, J."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","abstract":"Monotonicity reasoning is one of the important reasoning skills for any intelligent natural language inference (NLI) model in that it requires the ability to capture the interaction between lexical and syntactic structures. Since no test set has been developed for monotonicity reasoning with wide coverage, it is still unclear whether neural models can perform monotonicity reasoning in a proper way. To investigate this issue, we introduce the Monotonicity Entailment Dataset (MED). Performance by state-of-the-art NLI models on the new test set is substantially worse, under 55%, especially on downward reasoning. In addition, analysis using a monotonicity-driven data augmentation method showed that these models might be limited in their generalization ability in upward and downward reasoning.","address":"Stroudsburg, PA, USA","author":[{"propositions":[],"lastnames":["Yanaka"],"firstnames":["Hitomi"],"suffixes":[]},{"propositions":[],"lastnames":["Mineshima"],"firstnames":["Koji"],"suffixes":[]},{"propositions":[],"lastnames":["Bekki"],"firstnames":["Daisuke"],"suffixes":[]},{"propositions":[],"lastnames":["Inui"],"firstnames":["Kentaro"],"suffixes":[]},{"propositions":[],"lastnames":["Sekine"],"firstnames":["Satoshi"],"suffixes":[]},{"propositions":[],"lastnames":["Abzianidze"],"firstnames":["Lasha"],"suffixes":[]},{"propositions":[],"lastnames":["Bos"],"firstnames":["Johan"],"suffixes":[]}],"booktitle":"Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP","doi":"10.18653/v1/W19-4804","file":":Users/shanest/Documents/Library/Yanaka et al/Proceedings of the 2019 ACL Workshop BlackboxNLP Analyzing and Interpreting Neural Networks for NLP/Yanaka et al. - 2019 - Can Neural Networks Understand Monotonicity Reasoning.pdf:pdf","keywords":"dataset,method: new data","pages":"31–40","publisher":"Association for Computational Linguistics","title":"Can Neural Networks Understand Monotonicity Reasoning?","url":"https://www.aclweb.org/anthology/W19-4804","year":"2019","bibtex":"@inproceedings{Yanaka2019,\nabstract = {Monotonicity reasoning is one of the important reasoning skills for any intelligent natural language inference (NLI) model in that it requires the ability to capture the interaction between lexical and syntactic structures. Since no test set has been developed for monotonicity reasoning with wide coverage, it is still unclear whether neural models can perform monotonicity reasoning in a proper way. To investigate this issue, we introduce the Monotonicity Entailment Dataset (MED). Performance by state-of-the-art NLI models on the new test set is substantially worse, under 55%, especially on downward reasoning. In addition, analysis using a monotonicity-driven data augmentation method showed that these models might be limited in their generalization ability in upward and downward reasoning.},\naddress = {Stroudsburg, PA, USA},\nauthor = {Yanaka, Hitomi and Mineshima, Koji and Bekki, Daisuke and Inui, Kentaro and Sekine, Satoshi and Abzianidze, Lasha and Bos, Johan},\nbooktitle = {Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP},\ndoi = {10.18653/v1/W19-4804},\nfile = {:Users/shanest/Documents/Library/Yanaka et al/Proceedings of the 2019 ACL Workshop BlackboxNLP Analyzing and Interpreting Neural Networks for NLP/Yanaka et al. - 2019 - Can Neural Networks Understand Monotonicity Reasoning.pdf:pdf},\nkeywords = {dataset,method: new data},\npages = {31--40},\npublisher = {Association for Computational Linguistics},\ntitle = {{Can Neural Networks Understand Monotonicity Reasoning?}},\nurl = {https://www.aclweb.org/anthology/W19-4804},\nyear = {2019}\n}\n","author_short":["Yanaka, H.","Mineshima, K.","Bekki, D.","Inui, K.","Sekine, S.","Abzianidze, L.","Bos, J."],"key":"Yanaka2019","id":"Yanaka2019","bibbaseid":"yanaka-mineshima-bekki-inui-sekine-abzianidze-bos-canneuralnetworksunderstandmonotonicityreasoning-2019","role":"author","urls":{"Paper":"https://www.aclweb.org/anthology/W19-4804"},"keyword":["dataset","method: new data"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"inproceedings","biburl":"https://www.shane.st/teaching/575/win20/MachineLearning-interpretability.bib","creationDate":"2020-01-05T04:04:02.880Z","downloads":0,"keywords":["dataset","method: new data"],"search_terms":["neural","networks","understand","monotonicity","reasoning","yanaka","mineshima","bekki","inui","sekine","abzianidze","bos"],"title":"Can Neural Networks Understand Monotonicity Reasoning?","year":2019,"dataSources":["okYcdTpf4JJ2zkj7A","znj7izS5PeehdLR3G"]}