{"_id":"S7aigvR8b3xrwqKmS","bibbaseid":"xiang-cui-cheng-wang-zhang-huang-xie-xu-etal-zeroshotinformationextractionviachattingwithchatgpt-2023","author_short":["Xiang, W.","Cui, X.","Cheng, N.","Wang, X.","Zhang, X.","Huang, S.","Xie, P.","Xu, J.","Chen, Y.","Zhang, M.","Jiang, Y.","Han, W."],"bibdata":{"bibtype":"article","type":"article","title":"Zero-Shot Information Extraction via Chatting with ChatGPT","url":"https://www.proquest.com/working-papers/zero-shot-information-extraction-via-chatting/docview/2778775259/se-2","abstract":"Zero-shot information extraction (IE) aims to build IE systems from the unannotated text. It is challenging due to involving little human intervention. Challenging but worthwhile, zero-shot IE reduces the time and effort that data labeling takes. Recent efforts on large language models (LLMs, e.g., GPT-3, ChatGPT) show promising performance on zero-shot settings, thus inspiring us to explore prompt-based methods. In this work, we ask whether strong IE models can be constructed by directly prompting LLMs. Specifically, we transform the zero-shot IE task into a multi-turn question-answering problem with a two-stage framework (ChatIE). With the power of ChatGPT, we extensively evaluate our framework on three IE tasks: entity-relation triple extract, named entity recognition, and event extraction. Empirical results on six datasets across two languages show that ChatIE achieves impressive performance and even surpasses some full-shot models on several datasets (e.g., NYT11-HRL). We believe that our work could shed light on building IE models with limited resources.","language":"English","journal":"arXiv.org","author":[{"propositions":[],"lastnames":["Xiang"],"firstnames":["Wei"],"suffixes":[]},{"propositions":[],"lastnames":["Cui"],"firstnames":["Xingyu"],"suffixes":[]},{"propositions":[],"lastnames":["Cheng"],"firstnames":["Ning"],"suffixes":[]},{"propositions":[],"lastnames":["Wang"],"firstnames":["Xiaobin"],"suffixes":[]},{"propositions":[],"lastnames":["Zhang"],"firstnames":["Xin"],"suffixes":[]},{"propositions":[],"lastnames":["Huang"],"firstnames":["Shen"],"suffixes":[]},{"propositions":[],"lastnames":["Xie"],"firstnames":["Pengjun"],"suffixes":[]},{"propositions":[],"lastnames":["Xu"],"firstnames":["Jinan"],"suffixes":[]},{"propositions":[],"lastnames":["Chen"],"firstnames":["Yufeng"],"suffixes":[]},{"propositions":[],"lastnames":["Zhang"],"firstnames":["Meishan"],"suffixes":[]},{"propositions":[],"lastnames":["Jiang"],"firstnames":["Yong"],"suffixes":[]},{"propositions":[],"lastnames":["Han"],"firstnames":["Wenjuan"],"suffixes":[]}],"month":"February","year":"2023","note":"Place: Ithaca Publisher: Cornell University Library, arXiv.org","keywords":"Chatbots, Business And Economics–Banking And Finance, Datasets, Computation and Language, Information retrieval","bibtex":"@article{xiang_zero-shot_2023,\n\ttitle = {Zero-{Shot} {Information} {Extraction} via {Chatting} with {ChatGPT}},\n\turl = {https://www.proquest.com/working-papers/zero-shot-information-extraction-via-chatting/docview/2778775259/se-2},\n\tabstract = {Zero-shot information extraction (IE) aims to build IE systems from the unannotated text. It is challenging due to involving little human intervention. Challenging but worthwhile, zero-shot IE reduces the time and effort that data labeling takes. Recent efforts on large language models (LLMs, e.g., GPT-3, ChatGPT) show promising performance on zero-shot settings, thus inspiring us to explore prompt-based methods. In this work, we ask whether strong IE models can be constructed by directly prompting LLMs. Specifically, we transform the zero-shot IE task into a multi-turn question-answering problem with a two-stage framework (ChatIE). With the power of ChatGPT, we extensively evaluate our framework on three IE tasks: entity-relation triple extract, named entity recognition, and event extraction. Empirical results on six datasets across two languages show that ChatIE achieves impressive performance and even surpasses some full-shot models on several datasets (e.g., NYT11-HRL). We believe that our work could shed light on building IE models with limited resources.},\n\tlanguage = {English},\n\tjournal = {arXiv.org},\n\tauthor = {Xiang, Wei and Cui, Xingyu and Cheng, Ning and Wang, Xiaobin and Zhang, Xin and Huang, Shen and Xie, Pengjun and Xu, Jinan and Chen, Yufeng and Zhang, Meishan and Jiang, Yong and Han, Wenjuan},\n\tmonth = feb,\n\tyear = {2023},\n\tnote = {Place: Ithaca\nPublisher: Cornell University Library, arXiv.org},\n\tkeywords = {Chatbots, Business And Economics--Banking And Finance, Datasets, Computation and Language, Information retrieval},\n}\n\n","author_short":["Xiang, W.","Cui, X.","Cheng, N.","Wang, X.","Zhang, X.","Huang, S.","Xie, P.","Xu, J.","Chen, Y.","Zhang, M.","Jiang, Y.","Han, W."],"key":"xiang_zero-shot_2023","id":"xiang_zero-shot_2023","bibbaseid":"xiang-cui-cheng-wang-zhang-huang-xie-xu-etal-zeroshotinformationextractionviachattingwithchatgpt-2023","role":"author","urls":{"Paper":"https://www.proquest.com/working-papers/zero-shot-information-extraction-via-chatting/docview/2778775259/se-2"},"keyword":["Chatbots","Business And Economics–Banking And Finance","Datasets","Computation and Language","Information retrieval"],"metadata":{"authorlinks":{}}},"bibtype":"article","biburl":"https://bibbase.org/network/files/srCiZydJWRzXD39Ko","dataSources":["cYu6uhMkeFHgRrEty","hLMh7bwHyFsPNWAEL","LKW3iRvnztCpLNTW7","TLD9JxqHfSQQ4r268","X9BvByJrC3kGJexn8","iovNvcnNYDGJcuMq2","NjZJ5ZmWhTtMZBfje"],"keywords":["chatbots","business and economics–banking and finance","datasets","computation and language","information retrieval"],"search_terms":["zero","shot","information","extraction","via","chatting","chatgpt","xiang","cui","cheng","wang","zhang","huang","xie","xu","chen","zhang","jiang","han"],"title":"Zero-Shot Information Extraction via Chatting with ChatGPT","year":2023}