Working Memory Capacity of ChatGPT: An Empirical Study. Gong, D., Wan, X., & Wang, D. February, 2024. arXiv:2305.03731 [cs, q-bio]
Paper doi abstract bibtex Working memory is a critical aspect of both human intelligence and artificial intelligence, serving as a workspace for the temporary storage and manipulation of information. In this paper, we systematically assess the working memory capacity of ChatGPT, a large language model developed by OpenAI, by examining its performance in verbal and spatial n-back tasks under various conditions. Our experiments reveal that ChatGPT has a working memory capacity limit strikingly similar to that of humans. Furthermore, we investigate the impact of different instruction strategies on ChatGPT's performance and observe that the fundamental patterns of a capacity limit persist. From our empirical findings, we propose that n-back tasks may serve as tools for benchmarking the working memory capacity of large language models and hold potential for informing future efforts aimed at enhancing AI working memory.
@misc{gong_working_2024,
title = {Working {Memory} {Capacity} of {ChatGPT}: {An} {Empirical} {Study}},
shorttitle = {Working {Memory} {Capacity} of {ChatGPT}},
url = {http://arxiv.org/abs/2305.03731},
doi = {10.48550/arXiv.2305.03731},
abstract = {Working memory is a critical aspect of both human intelligence and artificial intelligence, serving as a workspace for the temporary storage and manipulation of information. In this paper, we systematically assess the working memory capacity of ChatGPT, a large language model developed by OpenAI, by examining its performance in verbal and spatial n-back tasks under various conditions. Our experiments reveal that ChatGPT has a working memory capacity limit strikingly similar to that of humans. Furthermore, we investigate the impact of different instruction strategies on ChatGPT's performance and observe that the fundamental patterns of a capacity limit persist. From our empirical findings, we propose that n-back tasks may serve as tools for benchmarking the working memory capacity of large language models and hold potential for informing future efforts aimed at enhancing AI working memory.},
urldate = {2024-02-16},
publisher = {arXiv},
author = {Gong, Dongyu and Wan, Xingchen and Wang, Dingmin},
month = feb,
year = {2024},
note = {arXiv:2305.03731 [cs, q-bio]},
keywords = {Computer Science - Artificial Intelligence, Computer Science - Computation and Language, Quantitative Biology - Neurons and Cognition},
}
Downloads: 0
{"_id":"eMMBiJxDTmShFg7XW","bibbaseid":"gong-wan-wang-workingmemorycapacityofchatgptanempiricalstudy-2024","author_short":["Gong, D.","Wan, X.","Wang, D."],"bibdata":{"bibtype":"misc","type":"misc","title":"Working Memory Capacity of ChatGPT: An Empirical Study","shorttitle":"Working Memory Capacity of ChatGPT","url":"http://arxiv.org/abs/2305.03731","doi":"10.48550/arXiv.2305.03731","abstract":"Working memory is a critical aspect of both human intelligence and artificial intelligence, serving as a workspace for the temporary storage and manipulation of information. In this paper, we systematically assess the working memory capacity of ChatGPT, a large language model developed by OpenAI, by examining its performance in verbal and spatial n-back tasks under various conditions. Our experiments reveal that ChatGPT has a working memory capacity limit strikingly similar to that of humans. Furthermore, we investigate the impact of different instruction strategies on ChatGPT's performance and observe that the fundamental patterns of a capacity limit persist. From our empirical findings, we propose that n-back tasks may serve as tools for benchmarking the working memory capacity of large language models and hold potential for informing future efforts aimed at enhancing AI working memory.","urldate":"2024-02-16","publisher":"arXiv","author":[{"propositions":[],"lastnames":["Gong"],"firstnames":["Dongyu"],"suffixes":[]},{"propositions":[],"lastnames":["Wan"],"firstnames":["Xingchen"],"suffixes":[]},{"propositions":[],"lastnames":["Wang"],"firstnames":["Dingmin"],"suffixes":[]}],"month":"February","year":"2024","note":"arXiv:2305.03731 [cs, q-bio]","keywords":"Computer Science - Artificial Intelligence, Computer Science - Computation and Language, Quantitative Biology - Neurons and Cognition","bibtex":"@misc{gong_working_2024,\n\ttitle = {Working {Memory} {Capacity} of {ChatGPT}: {An} {Empirical} {Study}},\n\tshorttitle = {Working {Memory} {Capacity} of {ChatGPT}},\n\turl = {http://arxiv.org/abs/2305.03731},\n\tdoi = {10.48550/arXiv.2305.03731},\n\tabstract = {Working memory is a critical aspect of both human intelligence and artificial intelligence, serving as a workspace for the temporary storage and manipulation of information. In this paper, we systematically assess the working memory capacity of ChatGPT, a large language model developed by OpenAI, by examining its performance in verbal and spatial n-back tasks under various conditions. Our experiments reveal that ChatGPT has a working memory capacity limit strikingly similar to that of humans. Furthermore, we investigate the impact of different instruction strategies on ChatGPT's performance and observe that the fundamental patterns of a capacity limit persist. From our empirical findings, we propose that n-back tasks may serve as tools for benchmarking the working memory capacity of large language models and hold potential for informing future efforts aimed at enhancing AI working memory.},\n\turldate = {2024-02-16},\n\tpublisher = {arXiv},\n\tauthor = {Gong, Dongyu and Wan, Xingchen and Wang, Dingmin},\n\tmonth = feb,\n\tyear = {2024},\n\tnote = {arXiv:2305.03731 [cs, q-bio]},\n\tkeywords = {Computer Science - Artificial Intelligence, Computer Science - Computation and Language, Quantitative Biology - Neurons and Cognition},\n}\n\n\n\n\n\n\n\n","author_short":["Gong, D.","Wan, X.","Wang, D."],"key":"gong_working_2024-1","id":"gong_working_2024-1","bibbaseid":"gong-wan-wang-workingmemorycapacityofchatgptanempiricalstudy-2024","role":"author","urls":{"Paper":"http://arxiv.org/abs/2305.03731"},"keyword":["Computer Science - Artificial Intelligence","Computer Science - Computation and Language","Quantitative Biology - Neurons and Cognition"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"misc","biburl":"https://bibbase.org/zotero/saurabhr","dataSources":["nxjWwW7fWbb5tfpKz"],"keywords":["computer science - artificial intelligence","computer science - computation and language","quantitative biology - neurons and cognition"],"search_terms":["working","memory","capacity","chatgpt","empirical","study","gong","wan","wang"],"title":"Working Memory Capacity of ChatGPT: An Empirical Study","year":2024}