LLsiM: Large Language Models for Similarity Assessment in Case-Based Reasoning. Lenz, M., Hoffmann, M., & Bergmann, R. In Bichindaritz, I. & Lopez, B., editors, Case-Based Reasoning Research and Development - 33rd International Conference, ICCBR 2025, Biarritz, France, June 30 - July 3, 2025, Proceedings, of Lecture Notes in Computer Science, 2025. Springer.. Accepted for Publication.abstract bibtex In Case-Based Reasoning (CBR), past experience is used to solve new problems. Determining the most relevant cases is a crucial aspect of this process and is typically based on one or multiple manuallydefined similarity measures, requiring deep domain knowledge. To overcome the knowledge-acquisition bottleneck, we propose the use of Large Language Models (LLMs) to automatically assess similarities between cases. We present three distinct approaches where the model is used for different tasks: (i) to predict similarity scores, (ii) to assess pairwise preferences, and (iii) to automatically configure similarity measures. Our conceptual work is accompanied by an open-source Python implementation that we use to evaluate the approaches on three different domains by comparing them to manually crafted similarity measures. Our results show that directly using LLM-based scores does not align well with the baseline rankings, but letting the LLM automatically configure the measures yields rankings that closely resemble the expert-defined ones.
@inproceedings{Lenz2025LLsiMLargeLanguage,
title = {{{LLsiM}}: {{Large Language Models}} for {{Similarity Assessment}} in {{Case-Based Reasoning}}},
shorttitle = {{{LLsiM}}},
booktitle = {{Case-Based Reasoning Research and Development - 33rd International Conference, {ICCBR} 2025, Biarritz, France, June 30 - July 3, 2025, Proceedings}},
author = {Lenz, Mirko and Hoffmann, Maximilian and Bergmann, Ralph},
editor = {Bichindaritz, Isabelle and Lopez, Beatriz},
year = {2025},
series = {Lecture {{Notes}} in {{Computer Science}}},
publisher = {Springer.},
note = {{Accepted for Publication.}},
abstract = {In Case-Based Reasoning (CBR), past experience is used to solve new problems. Determining the most relevant cases is a crucial aspect of this process and is typically based on one or multiple manuallydefined similarity measures, requiring deep domain knowledge. To overcome the knowledge-acquisition bottleneck, we propose the use of Large Language Models (LLMs) to automatically assess similarities between cases. We present three distinct approaches where the model is used for different tasks: (i) to predict similarity scores, (ii) to assess pairwise preferences, and (iii) to automatically configure similarity measures. Our conceptual work is accompanied by an open-source Python implementation that we use to evaluate the approaches on three different domains by comparing them to manually crafted similarity measures. Our results show that directly using LLM-based scores does not align well with the baseline rankings, but letting the LLM automatically configure the measures yields rankings that closely resemble the expert-defined ones.},
langid = {english}
}
Downloads: 0
{"_id":"kZCH9Kj398YHQGvTY","bibbaseid":"lenz-hoffmann-bergmann-llsimlargelanguagemodelsforsimilarityassessmentincasebasedreasoning-2025","author_short":["Lenz, M.","Hoffmann, M.","Bergmann, R."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","title":"LLsiM: Large Language Models for Similarity Assessment in Case-Based Reasoning","shorttitle":"LLsiM","booktitle":"Case-Based Reasoning Research and Development - 33rd International Conference, ICCBR 2025, Biarritz, France, June 30 - July 3, 2025, Proceedings","author":[{"propositions":[],"lastnames":["Lenz"],"firstnames":["Mirko"],"suffixes":[]},{"propositions":[],"lastnames":["Hoffmann"],"firstnames":["Maximilian"],"suffixes":[]},{"propositions":[],"lastnames":["Bergmann"],"firstnames":["Ralph"],"suffixes":[]}],"editor":[{"propositions":[],"lastnames":["Bichindaritz"],"firstnames":["Isabelle"],"suffixes":[]},{"propositions":[],"lastnames":["Lopez"],"firstnames":["Beatriz"],"suffixes":[]}],"year":"2025","series":"Lecture Notes in Computer Science","publisher":"Springer.","note":"Accepted for Publication.","abstract":"In Case-Based Reasoning (CBR), past experience is used to solve new problems. Determining the most relevant cases is a crucial aspect of this process and is typically based on one or multiple manuallydefined similarity measures, requiring deep domain knowledge. To overcome the knowledge-acquisition bottleneck, we propose the use of Large Language Models (LLMs) to automatically assess similarities between cases. We present three distinct approaches where the model is used for different tasks: (i) to predict similarity scores, (ii) to assess pairwise preferences, and (iii) to automatically configure similarity measures. Our conceptual work is accompanied by an open-source Python implementation that we use to evaluate the approaches on three different domains by comparing them to manually crafted similarity measures. Our results show that directly using LLM-based scores does not align well with the baseline rankings, but letting the LLM automatically configure the measures yields rankings that closely resemble the expert-defined ones.","langid":"english","bibtex":"@inproceedings{Lenz2025LLsiMLargeLanguage,\n title = {{{LLsiM}}: {{Large Language Models}} for {{Similarity Assessment}} in {{Case-Based Reasoning}}},\n shorttitle = {{{LLsiM}}},\n booktitle = {{Case-Based Reasoning Research and Development - 33rd International Conference, {ICCBR} 2025, Biarritz, France, June 30 - July 3, 2025, Proceedings}},\n author = {Lenz, Mirko and Hoffmann, Maximilian and Bergmann, Ralph},\n editor = {Bichindaritz, Isabelle and Lopez, Beatriz},\n year = {2025},\n series = {Lecture {{Notes}} in {{Computer Science}}},\n publisher = {Springer.},\n note \t\t = {{Accepted for Publication.}},\n abstract = {In Case-Based Reasoning (CBR), past experience is used to solve new problems. Determining the most relevant cases is a crucial aspect of this process and is typically based on one or multiple manuallydefined similarity measures, requiring deep domain knowledge. To overcome the knowledge-acquisition bottleneck, we propose the use of Large Language Models (LLMs) to automatically assess similarities between cases. We present three distinct approaches where the model is used for different tasks: (i) to predict similarity scores, (ii) to assess pairwise preferences, and (iii) to automatically configure similarity measures. Our conceptual work is accompanied by an open-source Python implementation that we use to evaluate the approaches on three different domains by comparing them to manually crafted similarity measures. Our results show that directly using LLM-based scores does not align well with the baseline rankings, but letting the LLM automatically configure the measures yields rankings that closely resemble the expert-defined ones.},\n langid = {english}\n}\n\n\n","author_short":["Lenz, M.","Hoffmann, M.","Bergmann, R."],"editor_short":["Bichindaritz, I.","Lopez, B."],"key":"Lenz2025LLsiMLargeLanguage","id":"Lenz2025LLsiMLargeLanguage","bibbaseid":"lenz-hoffmann-bergmann-llsimlargelanguagemodelsforsimilarityassessmentincasebasedreasoning-2025","role":"author","urls":{},"metadata":{"authorlinks":{}}},"bibtype":"inproceedings","biburl":"https://web.wi2.uni-trier.de/publications/WI2Publikationen.bib","dataSources":["MSp3DzP4ToPojqkFy"],"keywords":[],"search_terms":["llsim","large","language","models","similarity","assessment","case","based","reasoning","lenz","hoffmann","bergmann"],"title":"LLsiM: Large Language Models for Similarity Assessment in Case-Based Reasoning","year":2025}