SELF-EXPLAIN: Teaching Large Language Models to Reason Complex Questions by Themselves. Zhao, J., Yao, Z., Yang, Z., & Yu, H. November, 2023. Number: arXiv:2311.06985 arXiv:2311.06985 [cs] R0-FoMo: Workshop on Robustness of Few-shot and Zero-shot Learning in Foundation Models at NeurIPS 2023.Paper abstract bibtex Large language models (LLMs) can generate intermediate reasoning steps. To elicit the reliable reasoning, the common practice is to employ few-shot chain-of-thought prompting, where several in-context demonstrations for reasoning are prepended to the question. However, such chain-of-thought examples are expensive to craft, especially for professional domains, and can have high variance depending on human annotators. Therefore, this work investigates whether LLMs can teach themselves to reason without human-crafted demonstrations. We propose SELF-EXPLAIN to generate CoT examples by LLMs inspired by "encoding specificity" in human memory retrieval. We find using self-explanations makes LLMs more confident, more calibrated and less biased when answering complex questions. Moreover, we find prompting with self-explanations can even significantly outperform using human-crafted CoTs on several complex question answering dataset.
@misc{zhao_self-explain_2023,
title = {{SELF}-{EXPLAIN}: {Teaching} {Large} {Language} {Models} to {Reason} {Complex} {Questions} by {Themselves}},
shorttitle = {{SELF}-{EXPLAIN}},
url = {http://arxiv.org/abs/2311.06985},
abstract = {Large language models (LLMs) can generate intermediate reasoning steps. To elicit the reliable reasoning, the common practice is to employ few-shot chain-of-thought prompting, where several in-context demonstrations for reasoning are prepended to the question. However, such chain-of-thought examples are expensive to craft, especially for professional domains, and can have high variance depending on human annotators. Therefore, this work investigates whether LLMs can teach themselves to reason without human-crafted demonstrations. We propose SELF-EXPLAIN to generate CoT examples by LLMs inspired by "encoding specificity" in human memory retrieval. We find using self-explanations makes LLMs more confident, more calibrated and less biased when answering complex questions. Moreover, we find prompting with self-explanations can even significantly outperform using human-crafted CoTs on several complex question answering dataset.},
urldate = {2023-11-14},
publisher = {arXiv},
author = {Zhao, Jiachen and Yao, Zonghai and Yang, Zhichao and Yu, Hong},
month = nov,
year = {2023},
note = {Number: arXiv:2311.06985
arXiv:2311.06985 [cs]
R0-FoMo: Workshop on Robustness of Few-shot and Zero-shot Learning in Foundation Models at NeurIPS 2023.},
keywords = {Computer Science - Computation and Language},
}
Downloads: 0
{"_id":"aYxLB6YRnW2ifqjzB","bibbaseid":"zhao-yao-yang-yu-selfexplainteachinglargelanguagemodelstoreasoncomplexquestionsbythemselves-2023","author_short":["Zhao, J.","Yao, Z.","Yang, Z.","Yu, H."],"bibdata":{"bibtype":"misc","type":"misc","title":"SELF-EXPLAIN: Teaching Large Language Models to Reason Complex Questions by Themselves","shorttitle":"SELF-EXPLAIN","url":"http://arxiv.org/abs/2311.06985","abstract":"Large language models (LLMs) can generate intermediate reasoning steps. To elicit the reliable reasoning, the common practice is to employ few-shot chain-of-thought prompting, where several in-context demonstrations for reasoning are prepended to the question. However, such chain-of-thought examples are expensive to craft, especially for professional domains, and can have high variance depending on human annotators. Therefore, this work investigates whether LLMs can teach themselves to reason without human-crafted demonstrations. We propose SELF-EXPLAIN to generate CoT examples by LLMs inspired by \"encoding specificity\" in human memory retrieval. We find using self-explanations makes LLMs more confident, more calibrated and less biased when answering complex questions. Moreover, we find prompting with self-explanations can even significantly outperform using human-crafted CoTs on several complex question answering dataset.","urldate":"2023-11-14","publisher":"arXiv","author":[{"propositions":[],"lastnames":["Zhao"],"firstnames":["Jiachen"],"suffixes":[]},{"propositions":[],"lastnames":["Yao"],"firstnames":["Zonghai"],"suffixes":[]},{"propositions":[],"lastnames":["Yang"],"firstnames":["Zhichao"],"suffixes":[]},{"propositions":[],"lastnames":["Yu"],"firstnames":["Hong"],"suffixes":[]}],"month":"November","year":"2023","note":"Number: arXiv:2311.06985 arXiv:2311.06985 [cs] R0-FoMo: Workshop on Robustness of Few-shot and Zero-shot Learning in Foundation Models at NeurIPS 2023.","keywords":"Computer Science - Computation and Language","bibtex":"@misc{zhao_self-explain_2023,\n\ttitle = {{SELF}-{EXPLAIN}: {Teaching} {Large} {Language} {Models} to {Reason} {Complex} {Questions} by {Themselves}},\n\tshorttitle = {{SELF}-{EXPLAIN}},\n\turl = {http://arxiv.org/abs/2311.06985},\n\tabstract = {Large language models (LLMs) can generate intermediate reasoning steps. To elicit the reliable reasoning, the common practice is to employ few-shot chain-of-thought prompting, where several in-context demonstrations for reasoning are prepended to the question. However, such chain-of-thought examples are expensive to craft, especially for professional domains, and can have high variance depending on human annotators. Therefore, this work investigates whether LLMs can teach themselves to reason without human-crafted demonstrations. We propose SELF-EXPLAIN to generate CoT examples by LLMs inspired by \"encoding specificity\" in human memory retrieval. We find using self-explanations makes LLMs more confident, more calibrated and less biased when answering complex questions. Moreover, we find prompting with self-explanations can even significantly outperform using human-crafted CoTs on several complex question answering dataset.},\n\turldate = {2023-11-14},\n\tpublisher = {arXiv},\n\tauthor = {Zhao, Jiachen and Yao, Zonghai and Yang, Zhichao and Yu, Hong},\n\tmonth = nov,\n\tyear = {2023},\n\tnote = {Number: arXiv:2311.06985\narXiv:2311.06985 [cs]\nR0-FoMo: Workshop on Robustness of Few-shot and Zero-shot Learning in Foundation Models at NeurIPS 2023.},\n\tkeywords = {Computer Science - Computation and Language},\n}\n\n","author_short":["Zhao, J.","Yao, Z.","Yang, Z.","Yu, H."],"key":"zhao_self-explain_2023","id":"zhao_self-explain_2023","bibbaseid":"zhao-yao-yang-yu-selfexplainteachinglargelanguagemodelstoreasoncomplexquestionsbythemselves-2023","role":"author","urls":{"Paper":"http://arxiv.org/abs/2311.06985"},"keyword":["Computer Science - Computation and Language"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"misc","biburl":"http://fenway.cs.uml.edu/papers/pubs-all.bib","dataSources":["TqaA9miSB65nRfS5H"],"keywords":["computer science - computation and language"],"search_terms":["self","explain","teaching","large","language","models","reason","complex","questions","themselves","zhao","yao","yang","yu"],"title":"SELF-EXPLAIN: Teaching Large Language Models to Reason Complex Questions by Themselves","year":2023}