On Compositional Generalization of Neural Machine Translation. Li, Y., Yin, Y., Chen, Y., & Zhang, Y. In Zong, C., Xia, F., Li, W., & Navigli, R., editors, Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 4767–4780, Online, August, 2021. Association for Computational Linguistics.
Paper doi abstract bibtex Modern neural machine translation (NMT) models have achieved competitive performance in standard benchmarks such as WMT. However, there still exist significant issues such as robustness, domain generalization, etc. In this paper, we study NMT models from the perspective of compositional generalization by building a benchmark dataset, CoGnition, consisting of 216k clean and consistent sentence pairs. We quantitatively analyze effects of various factors using compound translation error rate, then demonstrate that the NMT model fails badly on compositional generalization, although it performs remarkably well under traditional metrics.
@inproceedings{liCompositionalGeneralizationNeural2021,
title = {On {{Compositional Generalization}} of {{Neural Machine Translation}}},
booktitle = {Proceedings of the 59th {{Annual Meeting}} of the {{Association}} for {{Computational Linguistics}} and the 11th {{International Joint Conference}} on {{Natural Language Processing}} ({{Volume}} 1: {{Long Papers}})},
author = {Li, Yafu and Yin, Yongjing and Chen, Yulong and Zhang, Yue},
editor = {Zong, Chengqing and Xia, Fei and Li, Wenjie and Navigli, Roberto},
year = {2021},
month = aug,
pages = {4767--4780},
publisher = {Association for Computational Linguistics},
address = {Online},
doi = {10.18653/v1/2021.acl-long.368},
url = {https://aclanthology.org/2021.acl-long.368},
urldate = {2024-03-19},
abstract = {Modern neural machine translation (NMT) models have achieved competitive performance in standard benchmarks such as WMT. However, there still exist significant issues such as robustness, domain generalization, etc. In this paper, we study NMT models from the perspective of compositional generalization by building a benchmark dataset, CoGnition, consisting of 216k clean and consistent sentence pairs. We quantitatively analyze effects of various factors using compound translation error rate, then demonstrate that the NMT model fails badly on compositional generalization, although it performs remarkably well under traditional metrics.},
file = {/Users/shanest/sync/library/Li et al/2021/Li et al. - 2021 - On Compositional Generalization of Neural Machine .pdf}
}
Downloads: 0
{"_id":"CPekTEtokCdowygtz","bibbaseid":"li-yin-chen-zhang-oncompositionalgeneralizationofneuralmachinetranslation-2021","author_short":["Li, Y.","Yin, Y.","Chen, Y.","Zhang, Y."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","title":"On Compositional Generalization of Neural Machine Translation","booktitle":"Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)","author":[{"propositions":[],"lastnames":["Li"],"firstnames":["Yafu"],"suffixes":[]},{"propositions":[],"lastnames":["Yin"],"firstnames":["Yongjing"],"suffixes":[]},{"propositions":[],"lastnames":["Chen"],"firstnames":["Yulong"],"suffixes":[]},{"propositions":[],"lastnames":["Zhang"],"firstnames":["Yue"],"suffixes":[]}],"editor":[{"propositions":[],"lastnames":["Zong"],"firstnames":["Chengqing"],"suffixes":[]},{"propositions":[],"lastnames":["Xia"],"firstnames":["Fei"],"suffixes":[]},{"propositions":[],"lastnames":["Li"],"firstnames":["Wenjie"],"suffixes":[]},{"propositions":[],"lastnames":["Navigli"],"firstnames":["Roberto"],"suffixes":[]}],"year":"2021","month":"August","pages":"4767–4780","publisher":"Association for Computational Linguistics","address":"Online","doi":"10.18653/v1/2021.acl-long.368","url":"https://aclanthology.org/2021.acl-long.368","urldate":"2024-03-19","abstract":"Modern neural machine translation (NMT) models have achieved competitive performance in standard benchmarks such as WMT. However, there still exist significant issues such as robustness, domain generalization, etc. In this paper, we study NMT models from the perspective of compositional generalization by building a benchmark dataset, CoGnition, consisting of 216k clean and consistent sentence pairs. We quantitatively analyze effects of various factors using compound translation error rate, then demonstrate that the NMT model fails badly on compositional generalization, although it performs remarkably well under traditional metrics.","file":"/Users/shanest/sync/library/Li et al/2021/Li et al. - 2021 - On Compositional Generalization of Neural Machine .pdf","bibtex":"@inproceedings{liCompositionalGeneralizationNeural2021,\n title = {On {{Compositional Generalization}} of {{Neural Machine Translation}}},\n booktitle = {Proceedings of the 59th {{Annual Meeting}} of the {{Association}} for {{Computational Linguistics}} and the 11th {{International Joint Conference}} on {{Natural Language Processing}} ({{Volume}} 1: {{Long Papers}})},\n author = {Li, Yafu and Yin, Yongjing and Chen, Yulong and Zhang, Yue},\n editor = {Zong, Chengqing and Xia, Fei and Li, Wenjie and Navigli, Roberto},\n year = {2021},\n month = aug,\n pages = {4767--4780},\n publisher = {Association for Computational Linguistics},\n address = {Online},\n doi = {10.18653/v1/2021.acl-long.368},\n url = {https://aclanthology.org/2021.acl-long.368},\n urldate = {2024-03-19},\n abstract = {Modern neural machine translation (NMT) models have achieved competitive performance in standard benchmarks such as WMT. However, there still exist significant issues such as robustness, domain generalization, etc. In this paper, we study NMT models from the perspective of compositional generalization by building a benchmark dataset, CoGnition, consisting of 216k clean and consistent sentence pairs. We quantitatively analyze effects of various factors using compound translation error rate, then demonstrate that the NMT model fails badly on compositional generalization, although it performs remarkably well under traditional metrics.},\n file = {/Users/shanest/sync/library/Li et al/2021/Li et al. - 2021 - On Compositional Generalization of Neural Machine .pdf}\n}\n\n","author_short":["Li, Y.","Yin, Y.","Chen, Y.","Zhang, Y."],"editor_short":["Zong, C.","Xia, F.","Li, W.","Navigli, R."],"key":"liCompositionalGeneralizationNeural2021","id":"liCompositionalGeneralizationNeural2021","bibbaseid":"li-yin-chen-zhang-oncompositionalgeneralizationofneuralmachinetranslation-2021","role":"author","urls":{"Paper":"https://aclanthology.org/2021.acl-long.368"},"metadata":{"authorlinks":{}}},"bibtype":"inproceedings","biburl":"https://www.shane.st/teaching/575/spr24/575_Compositionality.bib","dataSources":["TPZs4iPqAgE5a8mjq"],"keywords":[],"search_terms":["compositional","generalization","neural","machine","translation","li","yin","chen","zhang"],"title":"On Compositional Generalization of Neural Machine Translation","year":2021}