Enhancing Zero-Shot Chain-of-Thought Reasoning in Large Language Models through Logic. Zhao, X., Li, M., Lu, W., Weber, C., Lee, J. H., Chu, K., & Wermter, S. arXiv preprint arXiv:2309.13339, arXiv, 2023. abstract bibtex Recent advancements in large language models have showcased their remarkable generalizability across various domains. However, their reasoning abilities still have significant room for improvement, especially when confronted with scenarios requiring multi-step reasoning. Although large language models possess extensive knowledge, their behavior, particularly in terms of reasoning, often fails to effectively utilize this knowledge to establish a coherent thinking paradigm. Generative language models sometimes show hallucinations as their reasoning procedures are unconstrained by logical principles. Aiming to improve the zero-shot chain-of-thought reasoning ability of large language models, we propose Logical Chain-of-Thought (LogiCoT), a neurosymbolic framework that leverages principles from symbolic logic to verify and revise the reasoning processes accordingly. Experimental evaluations conducted on language tasks in diverse domains, including arithmetic, commonsense, symbolic, causal inference, and social problems, demonstrate the efficacy of the enhanced reasoning paradigm by logic.
@article{zhao_enhancing_2023,
title = {Enhancing {{Zero-Shot Chain-of-Thought Reasoning}} in {{Large Language Models}} through {{Logic}}},
author = {Zhao, Xufeng and Li, Mengdi and Lu, Wenhao and Weber, Cornelius and Lee, Jae Hee and Chu, Kun and Wermter, Stefan},
year = {2023},
eprint = {2309.13339},
primaryclass = {cs},
publisher = {arXiv},
urldate = {2023-10-05},
abstract = {Recent advancements in large language models have showcased their remarkable generalizability across various domains. However, their reasoning abilities still have significant room for improvement, especially when confronted with scenarios requiring multi-step reasoning. Although large language models possess extensive knowledge, their behavior, particularly in terms of reasoning, often fails to effectively utilize this knowledge to establish a coherent thinking paradigm. Generative language models sometimes show hallucinations as their reasoning procedures are unconstrained by logical principles. Aiming to improve the zero-shot chain-of-thought reasoning ability of large language models, we propose Logical Chain-of-Thought (LogiCoT), a neurosymbolic framework that leverages principles from symbolic logic to verify and revise the reasoning processes accordingly. Experimental evaluations conducted on language tasks in diverse domains, including arithmetic, commonsense, symbolic, causal inference, and social problems, demonstrate the efficacy of the enhanced reasoning paradigm by logic.},
archiveprefix = {arxiv},
copyright = {All rights reserved},
file = {/Users/jae/Zotero/storage/MSI8BZEY/Zhao et al. - 2023 - Enhancing Zero-Shot Chain-of-Thought Reasoning in .pdf},
journal = {arXiv preprint arXiv:2309.13339}
}
Downloads: 0
{"_id":"K7EipRRH9gbz3EKnb","bibbaseid":"zhao-li-lu-weber-lee-chu-wermter-enhancingzeroshotchainofthoughtreasoninginlargelanguagemodelsthroughlogic-2023","author_short":["Zhao, X.","Li, M.","Lu, W.","Weber, C.","Lee, J. H.","Chu, K.","Wermter, S."],"bibdata":{"bibtype":"article","type":"article","title":"Enhancing Zero-Shot Chain-of-Thought Reasoning in Large Language Models through Logic","author":[{"propositions":[],"lastnames":["Zhao"],"firstnames":["Xufeng"],"suffixes":[]},{"propositions":[],"lastnames":["Li"],"firstnames":["Mengdi"],"suffixes":[]},{"propositions":[],"lastnames":["Lu"],"firstnames":["Wenhao"],"suffixes":[]},{"propositions":[],"lastnames":["Weber"],"firstnames":["Cornelius"],"suffixes":[]},{"propositions":[],"lastnames":["Lee"],"firstnames":["Jae","Hee"],"suffixes":[]},{"propositions":[],"lastnames":["Chu"],"firstnames":["Kun"],"suffixes":[]},{"propositions":[],"lastnames":["Wermter"],"firstnames":["Stefan"],"suffixes":[]}],"year":"2023","eprint":"2309.13339","primaryclass":"cs","publisher":"arXiv","urldate":"2023-10-05","abstract":"Recent advancements in large language models have showcased their remarkable generalizability across various domains. However, their reasoning abilities still have significant room for improvement, especially when confronted with scenarios requiring multi-step reasoning. Although large language models possess extensive knowledge, their behavior, particularly in terms of reasoning, often fails to effectively utilize this knowledge to establish a coherent thinking paradigm. Generative language models sometimes show hallucinations as their reasoning procedures are unconstrained by logical principles. Aiming to improve the zero-shot chain-of-thought reasoning ability of large language models, we propose Logical Chain-of-Thought (LogiCoT), a neurosymbolic framework that leverages principles from symbolic logic to verify and revise the reasoning processes accordingly. Experimental evaluations conducted on language tasks in diverse domains, including arithmetic, commonsense, symbolic, causal inference, and social problems, demonstrate the efficacy of the enhanced reasoning paradigm by logic.","archiveprefix":"arxiv","copyright":"All rights reserved","file":"/Users/jae/Zotero/storage/MSI8BZEY/Zhao et al. - 2023 - Enhancing Zero-Shot Chain-of-Thought Reasoning in .pdf","journal":"arXiv preprint arXiv:2309.13339","bibtex":"@article{zhao_enhancing_2023,\n title = {Enhancing {{Zero-Shot Chain-of-Thought Reasoning}} in {{Large Language Models}} through {{Logic}}},\n author = {Zhao, Xufeng and Li, Mengdi and Lu, Wenhao and Weber, Cornelius and Lee, Jae Hee and Chu, Kun and Wermter, Stefan},\n year = {2023},\n eprint = {2309.13339},\n primaryclass = {cs},\n publisher = {arXiv},\n urldate = {2023-10-05},\n abstract = {Recent advancements in large language models have showcased their remarkable generalizability across various domains. However, their reasoning abilities still have significant room for improvement, especially when confronted with scenarios requiring multi-step reasoning. Although large language models possess extensive knowledge, their behavior, particularly in terms of reasoning, often fails to effectively utilize this knowledge to establish a coherent thinking paradigm. Generative language models sometimes show hallucinations as their reasoning procedures are unconstrained by logical principles. Aiming to improve the zero-shot chain-of-thought reasoning ability of large language models, we propose Logical Chain-of-Thought (LogiCoT), a neurosymbolic framework that leverages principles from symbolic logic to verify and revise the reasoning processes accordingly. Experimental evaluations conducted on language tasks in diverse domains, including arithmetic, commonsense, symbolic, causal inference, and social problems, demonstrate the efficacy of the enhanced reasoning paradigm by logic.},\n archiveprefix = {arxiv},\n copyright = {All rights reserved},\n file = {/Users/jae/Zotero/storage/MSI8BZEY/Zhao et al. - 2023 - Enhancing Zero-Shot Chain-of-Thought Reasoning in .pdf},\n journal = {arXiv preprint arXiv:2309.13339}\n}\n","author_short":["Zhao, X.","Li, M.","Lu, W.","Weber, C.","Lee, J. H.","Chu, K.","Wermter, S."],"bibbaseid":"zhao-li-lu-weber-lee-chu-wermter-enhancingzeroshotchainofthoughtreasoninginlargelanguagemodelsthroughlogic-2023","role":"author","urls":{},"metadata":{"authorlinks":{}}},"bibtype":"article","biburl":"https://bibbase.org/f/LH9Dpt57nicKPPtLv/Exported Items.bib","dataSources":["cwGdKRqJNaMgaAhQN","cEc4uKmbByTGnEFSd","QLKcoK33WeKznwHFz","3aNkZpB96uqzFFxLW"],"keywords":[],"search_terms":["enhancing","zero","shot","chain","thought","reasoning","large","language","models","through","logic","zhao","li","lu","weber","lee","chu","wermter"],"title":"Enhancing Zero-Shot Chain-of-Thought Reasoning in Large Language Models through Logic","year":2023}