Can Transformers Jump Around Right in Natural Language? Assessing Performance Transfer from SCAN. Chaabouni, R., Dessì, Roberto, & Kharitonov, E. In Bastings, J., Belinkov, Y., Dupoux, E., Giulianelli, M., Hupkes, D., Pinter, Y., & Sajjad, H., editors, Proceedings of the Fourth BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP, pages 136–148, Punta Cana, Dominican Republic, November, 2021. Association for Computational Linguistics. Paper doi abstract bibtex Despite their failure to solve the compositional SCAN dataset, seq2seq architectures still achieve astonishing success on more practical tasks. This observation pushes us to question the usefulness of SCAN-style compositional generalization in realistic NLP tasks. In this work, we study the benefit that such compositionality brings about to several machine translation tasks. We present several focused modifications of Transformer that greatly improve generalization capabilities on SCAN and select one that remains on par with a vanilla Transformer on a standard machine translation (MT) task. Next, we study its performance in low-resource settings and on a newly introduced distribution-shifted English-French translation task. Overall, we find that improvements of a SCAN-capable model do not directly transfer to the resource-rich MT setup. In contrast, in the low-resource setup, general modifications lead to an improvement of up to 13.1% BLEU score w.r.t. a vanilla Transformer. Similarly, an improvement of 14% in an accuracy-based metric is achieved in the introduced compositional English-French translation task. This provides experimental evidence that the compositional generalization assessed in SCAN is particularly useful in resource-starved and domain-shifted scenarios.
@inproceedings{chaabouniCanTransformersJump2021,
title = {Can {{Transformers Jump Around Right}} in {{Natural Language}}? {{Assessing Performance Transfer}} from {{SCAN}}},
shorttitle = {Can {{Transformers Jump Around Right}} in {{Natural Language}}?},
booktitle = {Proceedings of the {{Fourth BlackboxNLP Workshop}} on {{Analyzing}} and {{Interpreting Neural Networks}} for {{NLP}}},
author = {Chaabouni, Rahma and Dess{\`i}, Roberto and Kharitonov, Eugene},
editor = {Bastings, Jasmijn and Belinkov, Yonatan and Dupoux, Emmanuel and Giulianelli, Mario and Hupkes, Dieuwke and Pinter, Yuval and Sajjad, Hassan},
year = {2021},
month = nov,
pages = {136--148},
publisher = {Association for Computational Linguistics},
address = {Punta Cana, Dominican Republic},
doi = {10.18653/v1/2021.blackboxnlp-1.9},
url = {https://aclanthology.org/2021.blackboxnlp-1.9},
urldate = {2024-03-19},
abstract = {Despite their failure to solve the compositional SCAN dataset, seq2seq architectures still achieve astonishing success on more practical tasks. This observation pushes us to question the usefulness of SCAN-style compositional generalization in realistic NLP tasks. In this work, we study the benefit that such compositionality brings about to several machine translation tasks. We present several focused modifications of Transformer that greatly improve generalization capabilities on SCAN and select one that remains on par with a vanilla Transformer on a standard machine translation (MT) task. Next, we study its performance in low-resource settings and on a newly introduced distribution-shifted English-French translation task. Overall, we find that improvements of a SCAN-capable model do not directly transfer to the resource-rich MT setup. In contrast, in the low-resource setup, general modifications lead to an improvement of up to 13.1\% BLEU score w.r.t. a vanilla Transformer. Similarly, an improvement of 14\% in an accuracy-based metric is achieved in the introduced compositional English-French translation task. This provides experimental evidence that the compositional generalization assessed in SCAN is particularly useful in resource-starved and domain-shifted scenarios.},
file = {/Users/shanest/sync/library/Chaabouni et al/2021/Chaabouni et al. - 2021 - Can Transformers Jump Around Right in Natural Lang.pdf}
}
Downloads: 0
{"_id":"4WSADEgt4smAgaPEx","bibbaseid":"chaabouni-dessi-kharitonov-cantransformersjumparoundrightinnaturallanguageassessingperformancetransferfromscan-2021","author_short":["Chaabouni, R.","Dessì, Roberto","Kharitonov, E."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","title":"Can Transformers Jump Around Right in Natural Language? Assessing Performance Transfer from SCAN","shorttitle":"Can Transformers Jump Around Right in Natural Language?","booktitle":"Proceedings of the Fourth BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP","author":[{"propositions":[],"lastnames":["Chaabouni"],"firstnames":["Rahma"],"suffixes":[]},{"firstnames":[],"propositions":[],"lastnames":["Dessì, Roberto"],"suffixes":[]},{"propositions":[],"lastnames":["Kharitonov"],"firstnames":["Eugene"],"suffixes":[]}],"editor":[{"propositions":[],"lastnames":["Bastings"],"firstnames":["Jasmijn"],"suffixes":[]},{"propositions":[],"lastnames":["Belinkov"],"firstnames":["Yonatan"],"suffixes":[]},{"propositions":[],"lastnames":["Dupoux"],"firstnames":["Emmanuel"],"suffixes":[]},{"propositions":[],"lastnames":["Giulianelli"],"firstnames":["Mario"],"suffixes":[]},{"propositions":[],"lastnames":["Hupkes"],"firstnames":["Dieuwke"],"suffixes":[]},{"propositions":[],"lastnames":["Pinter"],"firstnames":["Yuval"],"suffixes":[]},{"propositions":[],"lastnames":["Sajjad"],"firstnames":["Hassan"],"suffixes":[]}],"year":"2021","month":"November","pages":"136–148","publisher":"Association for Computational Linguistics","address":"Punta Cana, Dominican Republic","doi":"10.18653/v1/2021.blackboxnlp-1.9","url":"https://aclanthology.org/2021.blackboxnlp-1.9","urldate":"2024-03-19","abstract":"Despite their failure to solve the compositional SCAN dataset, seq2seq architectures still achieve astonishing success on more practical tasks. This observation pushes us to question the usefulness of SCAN-style compositional generalization in realistic NLP tasks. In this work, we study the benefit that such compositionality brings about to several machine translation tasks. We present several focused modifications of Transformer that greatly improve generalization capabilities on SCAN and select one that remains on par with a vanilla Transformer on a standard machine translation (MT) task. Next, we study its performance in low-resource settings and on a newly introduced distribution-shifted English-French translation task. Overall, we find that improvements of a SCAN-capable model do not directly transfer to the resource-rich MT setup. In contrast, in the low-resource setup, general modifications lead to an improvement of up to 13.1% BLEU score w.r.t. a vanilla Transformer. Similarly, an improvement of 14% in an accuracy-based metric is achieved in the introduced compositional English-French translation task. This provides experimental evidence that the compositional generalization assessed in SCAN is particularly useful in resource-starved and domain-shifted scenarios.","file":"/Users/shanest/sync/library/Chaabouni et al/2021/Chaabouni et al. - 2021 - Can Transformers Jump Around Right in Natural Lang.pdf","bibtex":"@inproceedings{chaabouniCanTransformersJump2021,\n title = {Can {{Transformers Jump Around Right}} in {{Natural Language}}? {{Assessing Performance Transfer}} from {{SCAN}}},\n shorttitle = {Can {{Transformers Jump Around Right}} in {{Natural Language}}?},\n booktitle = {Proceedings of the {{Fourth BlackboxNLP Workshop}} on {{Analyzing}} and {{Interpreting Neural Networks}} for {{NLP}}},\n author = {Chaabouni, Rahma and Dess{\\`i}, Roberto and Kharitonov, Eugene},\n editor = {Bastings, Jasmijn and Belinkov, Yonatan and Dupoux, Emmanuel and Giulianelli, Mario and Hupkes, Dieuwke and Pinter, Yuval and Sajjad, Hassan},\n year = {2021},\n month = nov,\n pages = {136--148},\n publisher = {Association for Computational Linguistics},\n address = {Punta Cana, Dominican Republic},\n doi = {10.18653/v1/2021.blackboxnlp-1.9},\n url = {https://aclanthology.org/2021.blackboxnlp-1.9},\n urldate = {2024-03-19},\n abstract = {Despite their failure to solve the compositional SCAN dataset, seq2seq architectures still achieve astonishing success on more practical tasks. This observation pushes us to question the usefulness of SCAN-style compositional generalization in realistic NLP tasks. In this work, we study the benefit that such compositionality brings about to several machine translation tasks. We present several focused modifications of Transformer that greatly improve generalization capabilities on SCAN and select one that remains on par with a vanilla Transformer on a standard machine translation (MT) task. Next, we study its performance in low-resource settings and on a newly introduced distribution-shifted English-French translation task. Overall, we find that improvements of a SCAN-capable model do not directly transfer to the resource-rich MT setup. In contrast, in the low-resource setup, general modifications lead to an improvement of up to 13.1\\% BLEU score w.r.t. a vanilla Transformer. Similarly, an improvement of 14\\% in an accuracy-based metric is achieved in the introduced compositional English-French translation task. This provides experimental evidence that the compositional generalization assessed in SCAN is particularly useful in resource-starved and domain-shifted scenarios.},\n file = {/Users/shanest/sync/library/Chaabouni et al/2021/Chaabouni et al. - 2021 - Can Transformers Jump Around Right in Natural Lang.pdf}\n}\n\n","author_short":["Chaabouni, R.","Dessì, Roberto","Kharitonov, E."],"editor_short":["Bastings, J.","Belinkov, Y.","Dupoux, E.","Giulianelli, M.","Hupkes, D.","Pinter, Y.","Sajjad, H."],"key":"chaabouniCanTransformersJump2021","id":"chaabouniCanTransformersJump2021","bibbaseid":"chaabouni-dessi-kharitonov-cantransformersjumparoundrightinnaturallanguageassessingperformancetransferfromscan-2021","role":"author","urls":{"Paper":"https://aclanthology.org/2021.blackboxnlp-1.9"},"metadata":{"authorlinks":{}}},"bibtype":"inproceedings","biburl":"https://www.shane.st/teaching/575/spr24/575_Compositionality.bib","dataSources":["TPZs4iPqAgE5a8mjq"],"keywords":[],"search_terms":["transformers","jump","around","right","natural","language","assessing","performance","transfer","scan","chaabouni","dessì","kharitonov"],"title":"Can Transformers Jump Around Right in Natural Language? Assessing Performance Transfer from SCAN","year":2021}