ChatAug: Leveraging ChatGPT for Text Data Augmentation. Dai, H., Liu, Z., Liao, W., Huang, X., Wu, Z., Zhao, L., Liu, W., Liu, N., Li, S., Zhu, D., Cai, H., Li, Q., Shen, D., Liu, T., & Li, X. arXiv.org, February, 2023. Place: Ithaca Publisher: Cornell University Library, arXiv.org
Paper abstract bibtex Text data augmentation is an effective strategy for overcoming the challenge of limited sample sizes in many natural language processing (NLP) tasks. This challenge is especially prominent in the few-shot learning scenario, where the data in the target domain is generally much scarcer and of lowered quality. A natural and widely-used strategy to mitigate such challenges is to perform data augmentation on the training data to better capture the data invariance and increase the sample size. However, current text data augmentation methods either can not ensure the correct labeling of the generated data (lacking faithfulness) or can not ensure sufficient diversity in the generated data (lacking completeness), or both. Inspired by the recent success of large language models, especially the development of ChatGPT, which demonstrated improved language comprehension abilities, in this work, we propose a text data augmentation approach based on ChatGPT (named ChatAug). ChatGPT is trained on data with unparalleled linguistic richness and employs a reinforcement training process with large-scale human feedback, which endows the model with affinity to the naturalness of human language. Our text data augmentation approach ChatAug rephrases each sentence in the training samples into multiple conceptually similar but semantically different samples. The augmented samples can then be used in downstream model training. Experiment results on few-shot learning text classification tasks show the superior performance of the proposed ChatAug approach over state-of-the-art text data augmentation methods in terms of testing accuracy and distribution of the augmented samples.
@article{dai_chataug_2023,
title = {{ChatAug}: {Leveraging} {ChatGPT} for {Text} {Data} {Augmentation}},
url = {https://www.proquest.com/working-papers/chataug-leveraging-chatgpt-text-data-augmentation/docview/2781020946/se-2},
abstract = {Text data augmentation is an effective strategy for overcoming the challenge of limited sample sizes in many natural language processing (NLP) tasks. This challenge is especially prominent in the few-shot learning scenario, where the data in the target domain is generally much scarcer and of lowered quality. A natural and widely-used strategy to mitigate such challenges is to perform data augmentation on the training data to better capture the data invariance and increase the sample size. However, current text data augmentation methods either can not ensure the correct labeling of the generated data (lacking faithfulness) or can not ensure sufficient diversity in the generated data (lacking completeness), or both. Inspired by the recent success of large language models, especially the development of ChatGPT, which demonstrated improved language comprehension abilities, in this work, we propose a text data augmentation approach based on ChatGPT (named ChatAug). ChatGPT is trained on data with unparalleled linguistic richness and employs a reinforcement training process with large-scale human feedback, which endows the model with affinity to the naturalness of human language. Our text data augmentation approach ChatAug rephrases each sentence in the training samples into multiple conceptually similar but semantically different samples. The augmented samples can then be used in downstream model training. Experiment results on few-shot learning text classification tasks show the superior performance of the proposed ChatAug approach over state-of-the-art text data augmentation methods in terms of testing accuracy and distribution of the augmented samples.},
language = {English},
journal = {arXiv.org},
author = {Dai, Haixing and Liu, Zhengliang and Liao, Wenxiong and Huang, Xiaoke and Wu, Zihao and Zhao, Lin and Liu, Wei and Liu, Ninghao and Li, Sheng and Zhu, Dajiang and Cai, Hongmin and Li, Quanzheng and Shen, Dinggang and Liu, Tianming and Li, Xiang},
month = feb,
year = {2023},
note = {Place: Ithaca
Publisher: Cornell University Library, arXiv.org},
keywords = {Artificial intelligence, Chatbots, Artificial Intelligence, Machine Learning, Learning, Business And Economics--Banking And Finance, Training, Computation and Language, Natural language processing, Data augmentation},
annote = {Copyright - © 2023. This work is published under http://creativecommons.org/licenses/by/4.0/ (the “License”). Notwithstanding the ProQuest Terms and Conditions, you may use this content in accordance with the terms of the License.},
annote = {Última actualización - 2023-03-09},
}
Downloads: 0
{"_id":"QNAH6J9p7E9dCQ4PM","bibbaseid":"dai-liu-liao-huang-wu-zhao-liu-liu-etal-chataugleveragingchatgptfortextdataaugmentation-2023","author_short":["Dai, H.","Liu, Z.","Liao, W.","Huang, X.","Wu, Z.","Zhao, L.","Liu, W.","Liu, N.","Li, S.","Zhu, D.","Cai, H.","Li, Q.","Shen, D.","Liu, T.","Li, X."],"bibdata":{"bibtype":"article","type":"article","title":"ChatAug: Leveraging ChatGPT for Text Data Augmentation","url":"https://www.proquest.com/working-papers/chataug-leveraging-chatgpt-text-data-augmentation/docview/2781020946/se-2","abstract":"Text data augmentation is an effective strategy for overcoming the challenge of limited sample sizes in many natural language processing (NLP) tasks. This challenge is especially prominent in the few-shot learning scenario, where the data in the target domain is generally much scarcer and of lowered quality. A natural and widely-used strategy to mitigate such challenges is to perform data augmentation on the training data to better capture the data invariance and increase the sample size. However, current text data augmentation methods either can not ensure the correct labeling of the generated data (lacking faithfulness) or can not ensure sufficient diversity in the generated data (lacking completeness), or both. Inspired by the recent success of large language models, especially the development of ChatGPT, which demonstrated improved language comprehension abilities, in this work, we propose a text data augmentation approach based on ChatGPT (named ChatAug). ChatGPT is trained on data with unparalleled linguistic richness and employs a reinforcement training process with large-scale human feedback, which endows the model with affinity to the naturalness of human language. Our text data augmentation approach ChatAug rephrases each sentence in the training samples into multiple conceptually similar but semantically different samples. The augmented samples can then be used in downstream model training. Experiment results on few-shot learning text classification tasks show the superior performance of the proposed ChatAug approach over state-of-the-art text data augmentation methods in terms of testing accuracy and distribution of the augmented samples.","language":"English","journal":"arXiv.org","author":[{"propositions":[],"lastnames":["Dai"],"firstnames":["Haixing"],"suffixes":[]},{"propositions":[],"lastnames":["Liu"],"firstnames":["Zhengliang"],"suffixes":[]},{"propositions":[],"lastnames":["Liao"],"firstnames":["Wenxiong"],"suffixes":[]},{"propositions":[],"lastnames":["Huang"],"firstnames":["Xiaoke"],"suffixes":[]},{"propositions":[],"lastnames":["Wu"],"firstnames":["Zihao"],"suffixes":[]},{"propositions":[],"lastnames":["Zhao"],"firstnames":["Lin"],"suffixes":[]},{"propositions":[],"lastnames":["Liu"],"firstnames":["Wei"],"suffixes":[]},{"propositions":[],"lastnames":["Liu"],"firstnames":["Ninghao"],"suffixes":[]},{"propositions":[],"lastnames":["Li"],"firstnames":["Sheng"],"suffixes":[]},{"propositions":[],"lastnames":["Zhu"],"firstnames":["Dajiang"],"suffixes":[]},{"propositions":[],"lastnames":["Cai"],"firstnames":["Hongmin"],"suffixes":[]},{"propositions":[],"lastnames":["Li"],"firstnames":["Quanzheng"],"suffixes":[]},{"propositions":[],"lastnames":["Shen"],"firstnames":["Dinggang"],"suffixes":[]},{"propositions":[],"lastnames":["Liu"],"firstnames":["Tianming"],"suffixes":[]},{"propositions":[],"lastnames":["Li"],"firstnames":["Xiang"],"suffixes":[]}],"month":"February","year":"2023","note":"Place: Ithaca Publisher: Cornell University Library, arXiv.org","keywords":"Artificial intelligence, Chatbots, Artificial Intelligence, Machine Learning, Learning, Business And Economics–Banking And Finance, Training, Computation and Language, Natural language processing, Data augmentation","annote":"Última actualización - 2023-03-09","bibtex":"@article{dai_chataug_2023,\n\ttitle = {{ChatAug}: {Leveraging} {ChatGPT} for {Text} {Data} {Augmentation}},\n\turl = {https://www.proquest.com/working-papers/chataug-leveraging-chatgpt-text-data-augmentation/docview/2781020946/se-2},\n\tabstract = {Text data augmentation is an effective strategy for overcoming the challenge of limited sample sizes in many natural language processing (NLP) tasks. This challenge is especially prominent in the few-shot learning scenario, where the data in the target domain is generally much scarcer and of lowered quality. A natural and widely-used strategy to mitigate such challenges is to perform data augmentation on the training data to better capture the data invariance and increase the sample size. However, current text data augmentation methods either can not ensure the correct labeling of the generated data (lacking faithfulness) or can not ensure sufficient diversity in the generated data (lacking completeness), or both. Inspired by the recent success of large language models, especially the development of ChatGPT, which demonstrated improved language comprehension abilities, in this work, we propose a text data augmentation approach based on ChatGPT (named ChatAug). ChatGPT is trained on data with unparalleled linguistic richness and employs a reinforcement training process with large-scale human feedback, which endows the model with affinity to the naturalness of human language. Our text data augmentation approach ChatAug rephrases each sentence in the training samples into multiple conceptually similar but semantically different samples. The augmented samples can then be used in downstream model training. Experiment results on few-shot learning text classification tasks show the superior performance of the proposed ChatAug approach over state-of-the-art text data augmentation methods in terms of testing accuracy and distribution of the augmented samples.},\n\tlanguage = {English},\n\tjournal = {arXiv.org},\n\tauthor = {Dai, Haixing and Liu, Zhengliang and Liao, Wenxiong and Huang, Xiaoke and Wu, Zihao and Zhao, Lin and Liu, Wei and Liu, Ninghao and Li, Sheng and Zhu, Dajiang and Cai, Hongmin and Li, Quanzheng and Shen, Dinggang and Liu, Tianming and Li, Xiang},\n\tmonth = feb,\n\tyear = {2023},\n\tnote = {Place: Ithaca\nPublisher: Cornell University Library, arXiv.org},\n\tkeywords = {Artificial intelligence, Chatbots, Artificial Intelligence, Machine Learning, Learning, Business And Economics--Banking And Finance, Training, Computation and Language, Natural language processing, Data augmentation},\n\tannote = {Copyright - © 2023. This work is published under http://creativecommons.org/licenses/by/4.0/ (the “License”). Notwithstanding the ProQuest Terms and Conditions, you may use this content in accordance with the terms of the License.},\n\tannote = {Última actualización - 2023-03-09},\n}\n\n","author_short":["Dai, H.","Liu, Z.","Liao, W.","Huang, X.","Wu, Z.","Zhao, L.","Liu, W.","Liu, N.","Li, S.","Zhu, D.","Cai, H.","Li, Q.","Shen, D.","Liu, T.","Li, X."],"key":"dai_chataug_2023","id":"dai_chataug_2023","bibbaseid":"dai-liu-liao-huang-wu-zhao-liu-liu-etal-chataugleveragingchatgptfortextdataaugmentation-2023","role":"author","urls":{"Paper":"https://www.proquest.com/working-papers/chataug-leveraging-chatgpt-text-data-augmentation/docview/2781020946/se-2"},"keyword":["Artificial intelligence","Chatbots","Artificial Intelligence","Machine Learning","Learning","Business And Economics–Banking And Finance","Training","Computation and Language","Natural language processing","Data augmentation"],"metadata":{"authorlinks":{}}},"bibtype":"article","biburl":"https://bibbase.org/network/files/22WYpzbBvi3hDHX7Y","dataSources":["cYu6uhMkeFHgRrEty","hLMh7bwHyFsPNWAEL","LKW3iRvnztCpLNTW7","TLD9JxqHfSQQ4r268","X9BvByJrC3kGJexn8","iovNvcnNYDGJcuMq2","NjZJ5ZmWhTtMZBfje"],"keywords":["artificial intelligence","chatbots","artificial intelligence","machine learning","learning","business and economics–banking and finance","training","computation and language","natural language processing","data augmentation"],"search_terms":["chataug","leveraging","chatgpt","text","data","augmentation","dai","liu","liao","huang","wu","zhao","liu","liu","li","zhu","cai","li","shen","liu","li"],"title":"ChatAug: Leveraging ChatGPT for Text Data Augmentation","year":2023}