Case-Based Reasoning Meets Large Language Models: A Research Manifesto For Open Challenges and Research Directions. Bach, K., Bergmann, R., Brand, F., Caro-Martínez, Marta, Eisenstadt, V., W. Floyd, M., Jayawardena, L., Leake, D., Lenz, M., Malburg, L., H. Ménager, D., Minor, M., Schack, B., Watson, I., Wilkerson, K., & Wiratunga, N. 3, 2025. working paper or preprintabstract bibtex In recent years, the surge of Generative Artificial Intelligence (GenAI), particularly Large Language Models (LLMs), has led to a significant increase in the use of hybrid systems, which combine the strengths of different Artificial Intelligence (AI) paradigms to achieve better performance and efficiency. Although LLMs demonstrate remarkable effectiveness across numerous tasks due to their flexibility and general knowledge, they often face challenges related to accuracy, explainability, and their limited memory. CaseBased Reasoning (CBR), on the other hand, excels by recalling past experiences and using them to solve new problems, making it particularly well suited for tasks that require contextual understanding and decisionmaking. However, CBR systems suffer from issues such as the acquisition of various kinds of knowledge and the application of methods during the 4R cycle. In this paper, we identify several challenges plaguing LLMs and CBR systems and propose opportunities to combine the strengths of both methodologies to address these challenges. In addition, we outline future research directions for the community to explore.
@misc{BachEtAl_CBRMeetsLLMs_2025,
abstract = {In recent years, the surge of Generative Artificial Intelligence (GenAI), particularly Large Language Models (LLMs), has led to a significant increase in the use of hybrid systems, which combine the strengths of different Artificial Intelligence (AI) paradigms to achieve better performance and efficiency. Although LLMs demonstrate remarkable effectiveness across numerous tasks due to their flexibility and general knowledge, they often face challenges related to accuracy, explainability, and their limited memory. CaseBased Reasoning (CBR), on the other hand, excels by recalling past experiences and using them to solve new problems, making it particularly well suited for tasks that require contextual understanding and decisionmaking. However, CBR systems suffer from issues such as the acquisition of various kinds of knowledge and the application of methods during the 4R cycle. In this paper, we identify several challenges plaguing LLMs and CBR systems and propose opportunities to combine the strengths of both methodologies to address these challenges. In addition, we outline future research directions for the community to explore.},
month = {3},
year = {2025},
title = {{Case-Based Reasoning Meets Large Language Models: A Research Manifesto For Open Challenges and Research Directions}},
note = {working paper or preprint},
author = {Bach, Kerstin and Bergmann, Ralph and Brand, Florian and Caro-Mart{\'i}nez, Marta and Eisenstadt, Viktor and W. Floyd, Michael and Jayawardena, Lasal and Leake, David and Lenz, Mirko and Malburg, Lukas and H. M{\'e}nager, David and Minor, Mirjam and Schack, Brian and Watson, Ian and Wilkerson, Kaitlynne and Wiratunga, Nirmalie},
keywords = {Large Language Models, Case-Based Reasoning, Large Reasoning Models, Retrieval Augmented Generation, Challenges and Opportunities}
}
Downloads: 0
{"_id":"k86GjPcYsmcJRPQjx","bibbaseid":"bach-bergmann-brand-caromartinez-eisenstadt-wfloyd-jayawardena-leake-etal-casebasedreasoningmeetslargelanguagemodelsaresearchmanifestoforopenchallengesandresearchdirections-2025","author_short":["Bach, K.","Bergmann, R.","Brand, F.","Caro-Martínez, Marta","Eisenstadt, V.","W. Floyd, M.","Jayawardena, L.","Leake, D.","Lenz, M.","Malburg, L.","H. Ménager, D.","Minor, M.","Schack, B.","Watson, I.","Wilkerson, K.","Wiratunga, N."],"bibdata":{"bibtype":"misc","type":"misc","abstract":"In recent years, the surge of Generative Artificial Intelligence (GenAI), particularly Large Language Models (LLMs), has led to a significant increase in the use of hybrid systems, which combine the strengths of different Artificial Intelligence (AI) paradigms to achieve better performance and efficiency. Although LLMs demonstrate remarkable effectiveness across numerous tasks due to their flexibility and general knowledge, they often face challenges related to accuracy, explainability, and their limited memory. CaseBased Reasoning (CBR), on the other hand, excels by recalling past experiences and using them to solve new problems, making it particularly well suited for tasks that require contextual understanding and decisionmaking. However, CBR systems suffer from issues such as the acquisition of various kinds of knowledge and the application of methods during the 4R cycle. In this paper, we identify several challenges plaguing LLMs and CBR systems and propose opportunities to combine the strengths of both methodologies to address these challenges. In addition, we outline future research directions for the community to explore.","month":"3","year":"2025","title":"Case-Based Reasoning Meets Large Language Models: A Research Manifesto For Open Challenges and Research Directions","note":"working paper or preprint","author":[{"propositions":[],"lastnames":["Bach"],"firstnames":["Kerstin"],"suffixes":[]},{"propositions":[],"lastnames":["Bergmann"],"firstnames":["Ralph"],"suffixes":[]},{"propositions":[],"lastnames":["Brand"],"firstnames":["Florian"],"suffixes":[]},{"firstnames":[],"propositions":[],"lastnames":["Caro-Martínez, Marta"],"suffixes":[]},{"propositions":[],"lastnames":["Eisenstadt"],"firstnames":["Viktor"],"suffixes":[]},{"propositions":[],"lastnames":["W.","Floyd"],"firstnames":["Michael"],"suffixes":[]},{"propositions":[],"lastnames":["Jayawardena"],"firstnames":["Lasal"],"suffixes":[]},{"propositions":[],"lastnames":["Leake"],"firstnames":["David"],"suffixes":[]},{"propositions":[],"lastnames":["Lenz"],"firstnames":["Mirko"],"suffixes":[]},{"propositions":[],"lastnames":["Malburg"],"firstnames":["Lukas"],"suffixes":[]},{"propositions":[],"lastnames":["H.","Ménager"],"firstnames":["David"],"suffixes":[]},{"propositions":[],"lastnames":["Minor"],"firstnames":["Mirjam"],"suffixes":[]},{"propositions":[],"lastnames":["Schack"],"firstnames":["Brian"],"suffixes":[]},{"propositions":[],"lastnames":["Watson"],"firstnames":["Ian"],"suffixes":[]},{"propositions":[],"lastnames":["Wilkerson"],"firstnames":["Kaitlynne"],"suffixes":[]},{"propositions":[],"lastnames":["Wiratunga"],"firstnames":["Nirmalie"],"suffixes":[]}],"keywords":"Large Language Models, Case-Based Reasoning, Large Reasoning Models, Retrieval Augmented Generation, Challenges and Opportunities","bibtex":"@misc{BachEtAl_CBRMeetsLLMs_2025,\n abstract = {In recent years, the surge of Generative Artificial Intelligence (GenAI), particularly Large Language Models (LLMs), has led to a significant increase in the use of hybrid systems, which combine the strengths of different Artificial Intelligence (AI) paradigms to achieve better performance and efficiency. Although LLMs demonstrate remarkable effectiveness across numerous tasks due to their flexibility and general knowledge, they often face challenges related to accuracy, explainability, and their limited memory. CaseBased Reasoning (CBR), on the other hand, excels by recalling past experiences and using them to solve new problems, making it particularly well suited for tasks that require contextual understanding and decisionmaking. However, CBR systems suffer from issues such as the acquisition of various kinds of knowledge and the application of methods during the 4R cycle. In this paper, we identify several challenges plaguing LLMs and CBR systems and propose opportunities to combine the strengths of both methodologies to address these challenges. In addition, we outline future research directions for the community to explore.},\n month = {3},\n year = {2025},\n title = {{Case-Based Reasoning Meets Large Language Models: A Research Manifesto For Open Challenges and Research Directions}},\n note = {working paper or preprint},\n author = {Bach, Kerstin and Bergmann, Ralph and Brand, Florian and Caro-Mart{\\'i}nez, Marta and Eisenstadt, Viktor and W. Floyd, Michael and Jayawardena, Lasal and Leake, David and Lenz, Mirko and Malburg, Lukas and H. M{\\'e}nager, David and Minor, Mirjam and Schack, Brian and Watson, Ian and Wilkerson, Kaitlynne and Wiratunga, Nirmalie},\n\tkeywords = {Large Language Models, Case-Based Reasoning, Large Reasoning Models, Retrieval Augmented Generation, Challenges and Opportunities}\n}\n\n","author_short":["Bach, K.","Bergmann, R.","Brand, F.","Caro-Martínez, Marta","Eisenstadt, V.","W. Floyd, M.","Jayawardena, L.","Leake, D.","Lenz, M.","Malburg, L.","H. Ménager, D.","Minor, M.","Schack, B.","Watson, I.","Wilkerson, K.","Wiratunga, N."],"key":"BachEtAl_CBRMeetsLLMs_2025","id":"BachEtAl_CBRMeetsLLMs_2025","bibbaseid":"bach-bergmann-brand-caromartinez-eisenstadt-wfloyd-jayawardena-leake-etal-casebasedreasoningmeetslargelanguagemodelsaresearchmanifestoforopenchallengesandresearchdirections-2025","role":"author","urls":{},"keyword":["Large Language Models","Case-Based Reasoning","Large Reasoning Models","Retrieval Augmented Generation","Challenges and Opportunities"],"metadata":{"authorlinks":{}}},"bibtype":"misc","biburl":"https://web.wi2.uni-trier.de/publications/PublicationsMalburg.bib","dataSources":["MSp3DzP4ToPojqkFy","J3orK6zvpR7d8vDmC"],"keywords":["large language models","case-based reasoning","large reasoning models","retrieval augmented generation","challenges and opportunities"],"search_terms":["case","based","reasoning","meets","large","language","models","research","manifesto","open","challenges","research","directions","bach","bergmann","brand","caro-martínez","eisenstadt","w. floyd","jayawardena","leake","lenz","malburg","h. ménager","minor","schack","watson","wilkerson","wiratunga"],"title":"Case-Based Reasoning Meets Large Language Models: A Research Manifesto For Open Challenges and Research Directions","year":2025}