How Do In-Context Examples Affect Compositional Generalization?. An, S., Lin, Z., Fu, Q., Chen, B., Zheng, N., Lou, J., & Zhang, D. In Rogers, A., Boyd-Graber, J., & Okazaki, N., editors, Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 11027–11052, Toronto, Canada, July, 2023. Association for Computational Linguistics.
Paper doi abstract bibtex 2 downloads Compositional generalization–understanding unseen combinations of seen primitives–is an essential reasoning capability in human intelligence. The AI community mainly studies this capability by fine-tuning neural networks on lots of training samples, while it is still unclear whether and how in-context learning–the prevailing few-shot paradigm based on large language models–exhibits compositional generalization. In this paper, we present CoFe, a test suite to investigate in-context compositional generalization. We find that the compositional generalization performance can be easily affected by the selection of in-context examples, thus raising the research question what the key factors are to make good in-context examples for compositional generalization. We study three potential factors: similarity, diversity and complexity. Our systematic experiments indicate that in-context examples should be structurally similar to the test case, diverse from each other, and individually simple. Furthermore, two strong limitations are observed: in-context compositional generalization on fictional words is much weaker than that on commonly used ones; it is still critical that the in-context examples should cover required linguistic structures, even though the backbone model has been pre-trained on large corpus. We hope our analysis would facilitate the understanding and utilization of in-context learning paradigm.
@inproceedings{anHowInContextExamples2023,
title = {How {{Do In-Context Examples Affect Compositional Generalization}}?},
booktitle = {Proceedings of the 61st {{Annual Meeting}} of the {{Association}} for {{Computational Linguistics}} ({{Volume}} 1: {{Long Papers}})},
author = {An, Shengnan and Lin, Zeqi and Fu, Qiang and Chen, Bei and Zheng, Nanning and Lou, Jian-Guang and Zhang, Dongmei},
editor = {Rogers, Anna and {Boyd-Graber}, Jordan and Okazaki, Naoaki},
year = {2023},
month = jul,
pages = {11027--11052},
publisher = {Association for Computational Linguistics},
address = {Toronto, Canada},
doi = {10.18653/v1/2023.acl-long.618},
url = {https://aclanthology.org/2023.acl-long.618},
urldate = {2024-03-19},
abstract = {Compositional generalization--understanding unseen combinations of seen primitives--is an essential reasoning capability in human intelligence. The AI community mainly studies this capability by fine-tuning neural networks on lots of training samples, while it is still unclear whether and how in-context learning--the prevailing few-shot paradigm based on large language models--exhibits compositional generalization. In this paper, we present CoFe, a test suite to investigate in-context compositional generalization. We find that the compositional generalization performance can be easily affected by the selection of in-context examples, thus raising the research question what the key factors are to make good in-context examples for compositional generalization. We study three potential factors: similarity, diversity and complexity. Our systematic experiments indicate that in-context examples should be structurally similar to the test case, diverse from each other, and individually simple. Furthermore, two strong limitations are observed: in-context compositional generalization on fictional words is much weaker than that on commonly used ones; it is still critical that the in-context examples should cover required linguistic structures, even though the backbone model has been pre-trained on large corpus. We hope our analysis would facilitate the understanding and utilization of in-context learning paradigm.},
file = {/Users/shanest/sync/library/An et al/2023/An et al. - 2023 - How Do In-Context Examples Affect Compositional Ge.pdf}
}
Downloads: 2
{"_id":"XiXF8zAt2zieSx84s","bibbaseid":"an-lin-fu-chen-zheng-lou-zhang-howdoincontextexamplesaffectcompositionalgeneralization-2023","author_short":["An, S.","Lin, Z.","Fu, Q.","Chen, B.","Zheng, N.","Lou, J.","Zhang, D."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","title":"How Do In-Context Examples Affect Compositional Generalization?","booktitle":"Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)","author":[{"propositions":[],"lastnames":["An"],"firstnames":["Shengnan"],"suffixes":[]},{"propositions":[],"lastnames":["Lin"],"firstnames":["Zeqi"],"suffixes":[]},{"propositions":[],"lastnames":["Fu"],"firstnames":["Qiang"],"suffixes":[]},{"propositions":[],"lastnames":["Chen"],"firstnames":["Bei"],"suffixes":[]},{"propositions":[],"lastnames":["Zheng"],"firstnames":["Nanning"],"suffixes":[]},{"propositions":[],"lastnames":["Lou"],"firstnames":["Jian-Guang"],"suffixes":[]},{"propositions":[],"lastnames":["Zhang"],"firstnames":["Dongmei"],"suffixes":[]}],"editor":[{"propositions":[],"lastnames":["Rogers"],"firstnames":["Anna"],"suffixes":[]},{"propositions":[],"lastnames":["Boyd-Graber"],"firstnames":["Jordan"],"suffixes":[]},{"propositions":[],"lastnames":["Okazaki"],"firstnames":["Naoaki"],"suffixes":[]}],"year":"2023","month":"July","pages":"11027–11052","publisher":"Association for Computational Linguistics","address":"Toronto, Canada","doi":"10.18653/v1/2023.acl-long.618","url":"https://aclanthology.org/2023.acl-long.618","urldate":"2024-03-19","abstract":"Compositional generalization–understanding unseen combinations of seen primitives–is an essential reasoning capability in human intelligence. The AI community mainly studies this capability by fine-tuning neural networks on lots of training samples, while it is still unclear whether and how in-context learning–the prevailing few-shot paradigm based on large language models–exhibits compositional generalization. In this paper, we present CoFe, a test suite to investigate in-context compositional generalization. We find that the compositional generalization performance can be easily affected by the selection of in-context examples, thus raising the research question what the key factors are to make good in-context examples for compositional generalization. We study three potential factors: similarity, diversity and complexity. Our systematic experiments indicate that in-context examples should be structurally similar to the test case, diverse from each other, and individually simple. Furthermore, two strong limitations are observed: in-context compositional generalization on fictional words is much weaker than that on commonly used ones; it is still critical that the in-context examples should cover required linguistic structures, even though the backbone model has been pre-trained on large corpus. We hope our analysis would facilitate the understanding and utilization of in-context learning paradigm.","file":"/Users/shanest/sync/library/An et al/2023/An et al. - 2023 - How Do In-Context Examples Affect Compositional Ge.pdf","bibtex":"@inproceedings{anHowInContextExamples2023,\n title = {How {{Do In-Context Examples Affect Compositional Generalization}}?},\n booktitle = {Proceedings of the 61st {{Annual Meeting}} of the {{Association}} for {{Computational Linguistics}} ({{Volume}} 1: {{Long Papers}})},\n author = {An, Shengnan and Lin, Zeqi and Fu, Qiang and Chen, Bei and Zheng, Nanning and Lou, Jian-Guang and Zhang, Dongmei},\n editor = {Rogers, Anna and {Boyd-Graber}, Jordan and Okazaki, Naoaki},\n year = {2023},\n month = jul,\n pages = {11027--11052},\n publisher = {Association for Computational Linguistics},\n address = {Toronto, Canada},\n doi = {10.18653/v1/2023.acl-long.618},\n url = {https://aclanthology.org/2023.acl-long.618},\n urldate = {2024-03-19},\n abstract = {Compositional generalization--understanding unseen combinations of seen primitives--is an essential reasoning capability in human intelligence. The AI community mainly studies this capability by fine-tuning neural networks on lots of training samples, while it is still unclear whether and how in-context learning--the prevailing few-shot paradigm based on large language models--exhibits compositional generalization. In this paper, we present CoFe, a test suite to investigate in-context compositional generalization. We find that the compositional generalization performance can be easily affected by the selection of in-context examples, thus raising the research question what the key factors are to make good in-context examples for compositional generalization. We study three potential factors: similarity, diversity and complexity. Our systematic experiments indicate that in-context examples should be structurally similar to the test case, diverse from each other, and individually simple. Furthermore, two strong limitations are observed: in-context compositional generalization on fictional words is much weaker than that on commonly used ones; it is still critical that the in-context examples should cover required linguistic structures, even though the backbone model has been pre-trained on large corpus. We hope our analysis would facilitate the understanding and utilization of in-context learning paradigm.},\n file = {/Users/shanest/sync/library/An et al/2023/An et al. - 2023 - How Do In-Context Examples Affect Compositional Ge.pdf}\n}\n\n","author_short":["An, S.","Lin, Z.","Fu, Q.","Chen, B.","Zheng, N.","Lou, J.","Zhang, D."],"editor_short":["Rogers, A.","Boyd-Graber, J.","Okazaki, N."],"key":"anHowInContextExamples2023","id":"anHowInContextExamples2023","bibbaseid":"an-lin-fu-chen-zheng-lou-zhang-howdoincontextexamplesaffectcompositionalgeneralization-2023","role":"author","urls":{"Paper":"https://aclanthology.org/2023.acl-long.618"},"metadata":{"authorlinks":{}},"downloads":2},"bibtype":"inproceedings","biburl":"https://www.shane.st/teaching/575/spr24/575_Compositionality.bib","dataSources":["TPZs4iPqAgE5a8mjq"],"keywords":[],"search_terms":["context","examples","affect","compositional","generalization","an","lin","fu","chen","zheng","lou","zhang"],"title":"How Do In-Context Examples Affect Compositional Generalization?","year":2023,"downloads":2}