The Devil Is in the Detail: Simple Tricks Improve Systematic Generalization of Transformers. Csordás, R., Irie, K., & Schmidhuber, J. In Moens, M., Huang, X., Specia, L., & Yih, S. W., editors, Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 619–634, Online and Punta Cana, Dominican Republic, November, 2021. Association for Computational Linguistics. Paper doi abstract bibtex Recently, many datasets have been proposed to test the systematic generalization ability of neural networks. The companion baseline Transformers, typically trained with default hyper-parameters from standard tasks, are shown to fail dramatically. Here we demonstrate that by revisiting model configurations as basic as scaling of embeddings, early stopping, relative positional embedding, and Universal Transformer variants, we can drastically improve the performance of Transformers on systematic generalization. We report improvements on five popular datasets: SCAN, CFQ, PCFG, COGS, and Mathematics dataset. Our models improve accuracy from 50% to 85% on the PCFG productivity split, and from 35% to 81% on COGS. On SCAN, relative positional embedding largely mitigates the EOS decision problem (Newman et al., 2020), yielding 100% accuracy on the length split with a cutoff at 26. Importantly, performance differences between these models are typically invisible on the IID data split. This calls for proper generalization validation sets for developing neural networks that generalize systematically. We publicly release the code to reproduce our results.
@inproceedings{csordasDevilDetailSimple2021,
title = {The {{Devil}} Is in the {{Detail}}: {{Simple Tricks Improve Systematic Generalization}} of {{Transformers}}},
shorttitle = {The {{Devil}} Is in the {{Detail}}},
booktitle = {Proceedings of the 2021 {{Conference}} on {{Empirical Methods}} in {{Natural Language Processing}}},
author = {Csord{\'a}s, R{\'o}bert and Irie, Kazuki and Schmidhuber, Juergen},
editor = {Moens, Marie-Francine and Huang, Xuanjing and Specia, Lucia and Yih, Scott Wen-tau},
year = {2021},
month = nov,
pages = {619--634},
publisher = {Association for Computational Linguistics},
address = {Online and Punta Cana, Dominican Republic},
doi = {10.18653/v1/2021.emnlp-main.49},
url = {https://aclanthology.org/2021.emnlp-main.49},
urldate = {2024-03-19},
abstract = {Recently, many datasets have been proposed to test the systematic generalization ability of neural networks. The companion baseline Transformers, typically trained with default hyper-parameters from standard tasks, are shown to fail dramatically. Here we demonstrate that by revisiting model configurations as basic as scaling of embeddings, early stopping, relative positional embedding, and Universal Transformer variants, we can drastically improve the performance of Transformers on systematic generalization. We report improvements on five popular datasets: SCAN, CFQ, PCFG, COGS, and Mathematics dataset. Our models improve accuracy from 50\% to 85\% on the PCFG productivity split, and from 35\% to 81\% on COGS. On SCAN, relative positional embedding largely mitigates the EOS decision problem (Newman et al., 2020), yielding 100\% accuracy on the length split with a cutoff at 26. Importantly, performance differences between these models are typically invisible on the IID data split. This calls for proper generalization validation sets for developing neural networks that generalize systematically. We publicly release the code to reproduce our results.},
file = {/Users/shanest/sync/library/Csordás et al/2021/Csordás et al. - 2021 - The Devil is in the Detail Simple Tricks Improve .pdf}
}
Downloads: 0
{"_id":"tktocZdJYfkJAvZDF","bibbaseid":"csords-irie-schmidhuber-thedevilisinthedetailsimpletricksimprovesystematicgeneralizationoftransformers-2021","author_short":["Csordás, R.","Irie, K.","Schmidhuber, J."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","title":"The Devil Is in the Detail: Simple Tricks Improve Systematic Generalization of Transformers","shorttitle":"The Devil Is in the Detail","booktitle":"Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing","author":[{"propositions":[],"lastnames":["Csordás"],"firstnames":["Róbert"],"suffixes":[]},{"propositions":[],"lastnames":["Irie"],"firstnames":["Kazuki"],"suffixes":[]},{"propositions":[],"lastnames":["Schmidhuber"],"firstnames":["Juergen"],"suffixes":[]}],"editor":[{"propositions":[],"lastnames":["Moens"],"firstnames":["Marie-Francine"],"suffixes":[]},{"propositions":[],"lastnames":["Huang"],"firstnames":["Xuanjing"],"suffixes":[]},{"propositions":[],"lastnames":["Specia"],"firstnames":["Lucia"],"suffixes":[]},{"propositions":[],"lastnames":["Yih"],"firstnames":["Scott","Wen-tau"],"suffixes":[]}],"year":"2021","month":"November","pages":"619–634","publisher":"Association for Computational Linguistics","address":"Online and Punta Cana, Dominican Republic","doi":"10.18653/v1/2021.emnlp-main.49","url":"https://aclanthology.org/2021.emnlp-main.49","urldate":"2024-03-19","abstract":"Recently, many datasets have been proposed to test the systematic generalization ability of neural networks. The companion baseline Transformers, typically trained with default hyper-parameters from standard tasks, are shown to fail dramatically. Here we demonstrate that by revisiting model configurations as basic as scaling of embeddings, early stopping, relative positional embedding, and Universal Transformer variants, we can drastically improve the performance of Transformers on systematic generalization. We report improvements on five popular datasets: SCAN, CFQ, PCFG, COGS, and Mathematics dataset. Our models improve accuracy from 50% to 85% on the PCFG productivity split, and from 35% to 81% on COGS. On SCAN, relative positional embedding largely mitigates the EOS decision problem (Newman et al., 2020), yielding 100% accuracy on the length split with a cutoff at 26. Importantly, performance differences between these models are typically invisible on the IID data split. This calls for proper generalization validation sets for developing neural networks that generalize systematically. We publicly release the code to reproduce our results.","file":"/Users/shanest/sync/library/Csordás et al/2021/Csordás et al. - 2021 - The Devil is in the Detail Simple Tricks Improve .pdf","bibtex":"@inproceedings{csordasDevilDetailSimple2021,\n title = {The {{Devil}} Is in the {{Detail}}: {{Simple Tricks Improve Systematic Generalization}} of {{Transformers}}},\n shorttitle = {The {{Devil}} Is in the {{Detail}}},\n booktitle = {Proceedings of the 2021 {{Conference}} on {{Empirical Methods}} in {{Natural Language Processing}}},\n author = {Csord{\\'a}s, R{\\'o}bert and Irie, Kazuki and Schmidhuber, Juergen},\n editor = {Moens, Marie-Francine and Huang, Xuanjing and Specia, Lucia and Yih, Scott Wen-tau},\n year = {2021},\n month = nov,\n pages = {619--634},\n publisher = {Association for Computational Linguistics},\n address = {Online and Punta Cana, Dominican Republic},\n doi = {10.18653/v1/2021.emnlp-main.49},\n url = {https://aclanthology.org/2021.emnlp-main.49},\n urldate = {2024-03-19},\n abstract = {Recently, many datasets have been proposed to test the systematic generalization ability of neural networks. The companion baseline Transformers, typically trained with default hyper-parameters from standard tasks, are shown to fail dramatically. Here we demonstrate that by revisiting model configurations as basic as scaling of embeddings, early stopping, relative positional embedding, and Universal Transformer variants, we can drastically improve the performance of Transformers on systematic generalization. We report improvements on five popular datasets: SCAN, CFQ, PCFG, COGS, and Mathematics dataset. Our models improve accuracy from 50\\% to 85\\% on the PCFG productivity split, and from 35\\% to 81\\% on COGS. On SCAN, relative positional embedding largely mitigates the EOS decision problem (Newman et al., 2020), yielding 100\\% accuracy on the length split with a cutoff at 26. Importantly, performance differences between these models are typically invisible on the IID data split. This calls for proper generalization validation sets for developing neural networks that generalize systematically. We publicly release the code to reproduce our results.},\n file = {/Users/shanest/sync/library/Csordás et al/2021/Csordás et al. - 2021 - The Devil is in the Detail Simple Tricks Improve .pdf}\n}\n\n","author_short":["Csordás, R.","Irie, K.","Schmidhuber, J."],"editor_short":["Moens, M.","Huang, X.","Specia, L.","Yih, S. W."],"key":"csordasDevilDetailSimple2021","id":"csordasDevilDetailSimple2021","bibbaseid":"csords-irie-schmidhuber-thedevilisinthedetailsimpletricksimprovesystematicgeneralizationoftransformers-2021","role":"author","urls":{"Paper":"https://aclanthology.org/2021.emnlp-main.49"},"metadata":{"authorlinks":{}}},"bibtype":"inproceedings","biburl":"https://www.shane.st/teaching/575/spr24/575_Compositionality.bib","dataSources":["TPZs4iPqAgE5a8mjq"],"keywords":[],"search_terms":["devil","detail","simple","tricks","improve","systematic","generalization","transformers","csordás","irie","schmidhuber"],"title":"The Devil Is in the Detail: Simple Tricks Improve Systematic Generalization of Transformers","year":2021}