TRIGO: Benchmarking Formal Mathematical Proof Reduction for Generative Language Models. Xiong, J., Shen, J., Yuan, Y., Wang, H., Yin, Y., Liu, Z., Li, L., Guo, Z., Cao, Q., Huang, Y., Zheng, C., Liang, X., Zhang, M., & Liu, Q. October, 2023. arXiv:2310.10180 [cs]Paper doi abstract bibtex Automated theorem proving (ATP) has become an appealing domain for exploring the reasoning ability of the recent successful generative language models. However, current ATP benchmarks mainly focus on symbolic inference, but rarely involve the understanding of complex number combination reasoning. In this work, we propose TRIGO, an ATP benchmark that not only requires a model to reduce a trigonometric expression with step-by-step proofs but also evaluates a generative LM's reasoning ability on formulas and its capability to manipulate, group, and factor number terms. We gather trigonometric expressions and their reduced forms from the web, annotate the simplification process manually, and translate it into the Lean formal language system. We then automatically generate additional examples from the annotated samples to expand the dataset. Furthermore, we develop an automatic generator based on Lean-Gym to create dataset splits of varying difficulties and distributions in order to thoroughly analyze the model's generalization ability. Our extensive experiments show our proposed TRIGO poses a new challenge for advanced generative LM's including GPT-4 which is pre-trained on a considerable amount of open-source formal theorem-proving language data, and provide a new tool to study the generative LM's ability on both formal and mathematical reasoning.
@misc{xiong_trigo_2023,
title = {{TRIGO}: {Benchmarking} {Formal} {Mathematical} {Proof} {Reduction} for {Generative} {Language} {Models}},
shorttitle = {{TRIGO}},
url = {http://arxiv.org/abs/2310.10180},
doi = {10.48550/arXiv.2310.10180},
abstract = {Automated theorem proving (ATP) has become an appealing domain for exploring the reasoning ability of the recent successful generative language models. However, current ATP benchmarks mainly focus on symbolic inference, but rarely involve the understanding of complex number combination reasoning. In this work, we propose TRIGO, an ATP benchmark that not only requires a model to reduce a trigonometric expression with step-by-step proofs but also evaluates a generative LM's reasoning ability on formulas and its capability to manipulate, group, and factor number terms. We gather trigonometric expressions and their reduced forms from the web, annotate the simplification process manually, and translate it into the Lean formal language system. We then automatically generate additional examples from the annotated samples to expand the dataset. Furthermore, we develop an automatic generator based on Lean-Gym to create dataset splits of varying difficulties and distributions in order to thoroughly analyze the model's generalization ability. Our extensive experiments show our proposed TRIGO poses a new challenge for advanced generative LM's including GPT-4 which is pre-trained on a considerable amount of open-source formal theorem-proving language data, and provide a new tool to study the generative LM's ability on both formal and mathematical reasoning.},
urldate = {2024-01-16},
publisher = {arXiv},
author = {Xiong, Jing and Shen, Jianhao and Yuan, Ye and Wang, Haiming and Yin, Yichun and Liu, Zhengying and Li, Lin and Guo, Zhijiang and Cao, Qingxing and Huang, Yinya and Zheng, Chuanyang and Liang, Xiaodan and Zhang, Ming and Liu, Qun},
month = oct,
year = {2023},
note = {arXiv:2310.10180 [cs]},
keywords = {artificial intelligence, computation and language, generative language model, uses sympy},
}
Downloads: 0
{"_id":"5gJHd7RjeKXKcGy6D","bibbaseid":"xiong-shen-yuan-wang-yin-liu-li-guo-etal-trigobenchmarkingformalmathematicalproofreductionforgenerativelanguagemodels-2023","author_short":["Xiong, J.","Shen, J.","Yuan, Y.","Wang, H.","Yin, Y.","Liu, Z.","Li, L.","Guo, Z.","Cao, Q.","Huang, Y.","Zheng, C.","Liang, X.","Zhang, M.","Liu, Q."],"bibdata":{"bibtype":"misc","type":"misc","title":"TRIGO: Benchmarking Formal Mathematical Proof Reduction for Generative Language Models","shorttitle":"TRIGO","url":"http://arxiv.org/abs/2310.10180","doi":"10.48550/arXiv.2310.10180","abstract":"Automated theorem proving (ATP) has become an appealing domain for exploring the reasoning ability of the recent successful generative language models. However, current ATP benchmarks mainly focus on symbolic inference, but rarely involve the understanding of complex number combination reasoning. In this work, we propose TRIGO, an ATP benchmark that not only requires a model to reduce a trigonometric expression with step-by-step proofs but also evaluates a generative LM's reasoning ability on formulas and its capability to manipulate, group, and factor number terms. We gather trigonometric expressions and their reduced forms from the web, annotate the simplification process manually, and translate it into the Lean formal language system. We then automatically generate additional examples from the annotated samples to expand the dataset. Furthermore, we develop an automatic generator based on Lean-Gym to create dataset splits of varying difficulties and distributions in order to thoroughly analyze the model's generalization ability. Our extensive experiments show our proposed TRIGO poses a new challenge for advanced generative LM's including GPT-4 which is pre-trained on a considerable amount of open-source formal theorem-proving language data, and provide a new tool to study the generative LM's ability on both formal and mathematical reasoning.","urldate":"2024-01-16","publisher":"arXiv","author":[{"propositions":[],"lastnames":["Xiong"],"firstnames":["Jing"],"suffixes":[]},{"propositions":[],"lastnames":["Shen"],"firstnames":["Jianhao"],"suffixes":[]},{"propositions":[],"lastnames":["Yuan"],"firstnames":["Ye"],"suffixes":[]},{"propositions":[],"lastnames":["Wang"],"firstnames":["Haiming"],"suffixes":[]},{"propositions":[],"lastnames":["Yin"],"firstnames":["Yichun"],"suffixes":[]},{"propositions":[],"lastnames":["Liu"],"firstnames":["Zhengying"],"suffixes":[]},{"propositions":[],"lastnames":["Li"],"firstnames":["Lin"],"suffixes":[]},{"propositions":[],"lastnames":["Guo"],"firstnames":["Zhijiang"],"suffixes":[]},{"propositions":[],"lastnames":["Cao"],"firstnames":["Qingxing"],"suffixes":[]},{"propositions":[],"lastnames":["Huang"],"firstnames":["Yinya"],"suffixes":[]},{"propositions":[],"lastnames":["Zheng"],"firstnames":["Chuanyang"],"suffixes":[]},{"propositions":[],"lastnames":["Liang"],"firstnames":["Xiaodan"],"suffixes":[]},{"propositions":[],"lastnames":["Zhang"],"firstnames":["Ming"],"suffixes":[]},{"propositions":[],"lastnames":["Liu"],"firstnames":["Qun"],"suffixes":[]}],"month":"October","year":"2023","note":"arXiv:2310.10180 [cs]","keywords":"artificial intelligence, computation and language, generative language model, uses sympy","bibtex":"@misc{xiong_trigo_2023,\n\ttitle = {{TRIGO}: {Benchmarking} {Formal} {Mathematical} {Proof} {Reduction} for {Generative} {Language} {Models}},\n\tshorttitle = {{TRIGO}},\n\turl = {http://arxiv.org/abs/2310.10180},\n\tdoi = {10.48550/arXiv.2310.10180},\n\tabstract = {Automated theorem proving (ATP) has become an appealing domain for exploring the reasoning ability of the recent successful generative language models. However, current ATP benchmarks mainly focus on symbolic inference, but rarely involve the understanding of complex number combination reasoning. In this work, we propose TRIGO, an ATP benchmark that not only requires a model to reduce a trigonometric expression with step-by-step proofs but also evaluates a generative LM's reasoning ability on formulas and its capability to manipulate, group, and factor number terms. We gather trigonometric expressions and their reduced forms from the web, annotate the simplification process manually, and translate it into the Lean formal language system. We then automatically generate additional examples from the annotated samples to expand the dataset. Furthermore, we develop an automatic generator based on Lean-Gym to create dataset splits of varying difficulties and distributions in order to thoroughly analyze the model's generalization ability. Our extensive experiments show our proposed TRIGO poses a new challenge for advanced generative LM's including GPT-4 which is pre-trained on a considerable amount of open-source formal theorem-proving language data, and provide a new tool to study the generative LM's ability on both formal and mathematical reasoning.},\n\turldate = {2024-01-16},\n\tpublisher = {arXiv},\n\tauthor = {Xiong, Jing and Shen, Jianhao and Yuan, Ye and Wang, Haiming and Yin, Yichun and Liu, Zhengying and Li, Lin and Guo, Zhijiang and Cao, Qingxing and Huang, Yinya and Zheng, Chuanyang and Liang, Xiaodan and Zhang, Ming and Liu, Qun},\n\tmonth = oct,\n\tyear = {2023},\n\tnote = {arXiv:2310.10180 [cs]},\n\tkeywords = {artificial intelligence, computation and language, generative language model, uses sympy},\n}\n\n\n\n\n\n\n\n\n\n\n\n","author_short":["Xiong, J.","Shen, J.","Yuan, Y.","Wang, H.","Yin, Y.","Liu, Z.","Li, L.","Guo, Z.","Cao, Q.","Huang, Y.","Zheng, C.","Liang, X.","Zhang, M.","Liu, Q."],"key":"xiong_trigo_2023","id":"xiong_trigo_2023","bibbaseid":"xiong-shen-yuan-wang-yin-liu-li-guo-etal-trigobenchmarkingformalmathematicalproofreductionforgenerativelanguagemodels-2023","role":"author","urls":{"Paper":"http://arxiv.org/abs/2310.10180"},"keyword":["artificial intelligence","computation and language","generative language model","uses sympy"],"metadata":{"authorlinks":{}}},"bibtype":"misc","biburl":"https://bibbase.org/zotero-group/nicoguaro/525293","dataSources":["YtBDXPDiQEyhyEDZC"],"keywords":["artificial intelligence","computation and language","generative language model","uses sympy"],"search_terms":["trigo","benchmarking","formal","mathematical","proof","reduction","generative","language","models","xiong","shen","yuan","wang","yin","liu","li","guo","cao","huang","zheng","liang","zhang","liu"],"title":"TRIGO: Benchmarking Formal Mathematical Proof Reduction for Generative Language Models","year":2023}