AstroVisBench: A Code Benchmark for Scientific Computing and Visualization in Astronomy. Joseph, S. A., Husain, S. M., Offner, S. S. R., Juneau, S., Torrey, P., Bolton, A. S., Farias, J. P., Gaffney, N., Durrett, G., & Li, J. J. October, 2025. arXiv:2505.20538 [cs]
Paper doi abstract bibtex Large Language Models (LLMs) are being explored for applications in scientific research, including their capabilities to synthesize literature, answer research questions, generate research ideas, and even conduct computational experiments. Ultimately, our goal is for these to help scientists derive novel scientific insights. In many areas of science, such insights often arise from processing and visualizing data to understand its patterns. However, evaluating whether an LLM-mediated scientific workflow produces outputs conveying the correct scientific insights is challenging to evaluate and has not been addressed in past work. We introduce ASTROVISBENCH, the first benchmark for both scientific computing and visualization in the astronomy domain. ASTROVISBENCH judges a language model’s ability to both (1) create astronomy-specific workflows to process and analyze data and (2) visualize the results of these workflows through complex plots. Our evaluation of visualizations uses a novel LLM-as-a-judge workflow, which is validated against annotation by five professional astronomers. Using ASTROVISBENCH we present an evaluation of state-of-the-art language models, showing a significant gap in their ability to engage in astronomy research as useful assistants. This evaluation provides a strong end-to-end evaluation for AI scientists that offers a path forward for the development of visualization-based workflows, which are central to a broad range of domains from physics to biology. We release the code and data for ASTROVISBENCH at astrovisbench.github.io.
@misc{joseph_astrovisbench_2025,
title = {{AstroVisBench}: {A} {Code} {Benchmark} for {Scientific} {Computing} and {Visualization} in {Astronomy}},
shorttitle = {{AstroVisBench}},
url = {http://arxiv.org/abs/2505.20538},
doi = {10.48550/arXiv.2505.20538},
abstract = {Large Language Models (LLMs) are being explored for applications in scientific research, including their capabilities to synthesize literature, answer research questions, generate research ideas, and even conduct computational experiments. Ultimately, our goal is for these to help scientists derive novel scientific insights. In many areas of science, such insights often arise from processing and visualizing data to understand its patterns. However, evaluating whether an LLM-mediated scientific workflow produces outputs conveying the correct scientific insights is challenging to evaluate and has not been addressed in past work. We introduce ASTROVISBENCH, the first benchmark for both scientific computing and visualization in the astronomy domain. ASTROVISBENCH judges a language model’s ability to both (1) create astronomy-specific workflows to process and analyze data and (2) visualize the results of these workflows through complex plots. Our evaluation of visualizations uses a novel LLM-as-a-judge workflow, which is validated against annotation by five professional astronomers. Using ASTROVISBENCH we present an evaluation of state-of-the-art language models, showing a significant gap in their ability to engage in astronomy research as useful assistants. This evaluation provides a strong end-to-end evaluation for AI scientists that offers a path forward for the development of visualization-based workflows, which are central to a broad range of domains from physics to biology. We release the code and data for ASTROVISBENCH at astrovisbench.github.io.},
language = {en},
urldate = {2026-01-16},
publisher = {arXiv},
author = {Joseph, Sebastian Antony and Husain, Syed Murtaza and Offner, Stella S. R. and Juneau, Stéphanie and Torrey, Paul and Bolton, Adam S. and Farias, Juan P. and Gaffney, Niall and Durrett, Greg and Li, Junyi Jessy},
month = oct,
year = {2025},
note = {arXiv:2505.20538 [cs]},
keywords = {Accelerated, Astrophysics - Instrumentation and Methods for Astrophysics, Computer Science - Computation and Language, Computer Science - Machine Learning, Explorable},
}
Downloads: 0
{"_id":"ojvsAqYwmrkhGSLyw","bibbaseid":"joseph-husain-offner-juneau-torrey-bolton-farias-gaffney-etal-astrovisbenchacodebenchmarkforscientificcomputingandvisualizationinastronomy-2025","author_short":["Joseph, S. A.","Husain, S. M.","Offner, S. S. R.","Juneau, S.","Torrey, P.","Bolton, A. S.","Farias, J. P.","Gaffney, N.","Durrett, G.","Li, J. J."],"bibdata":{"bibtype":"misc","type":"misc","title":"AstroVisBench: A Code Benchmark for Scientific Computing and Visualization in Astronomy","shorttitle":"AstroVisBench","url":"http://arxiv.org/abs/2505.20538","doi":"10.48550/arXiv.2505.20538","abstract":"Large Language Models (LLMs) are being explored for applications in scientific research, including their capabilities to synthesize literature, answer research questions, generate research ideas, and even conduct computational experiments. Ultimately, our goal is for these to help scientists derive novel scientific insights. In many areas of science, such insights often arise from processing and visualizing data to understand its patterns. However, evaluating whether an LLM-mediated scientific workflow produces outputs conveying the correct scientific insights is challenging to evaluate and has not been addressed in past work. We introduce ASTROVISBENCH, the first benchmark for both scientific computing and visualization in the astronomy domain. ASTROVISBENCH judges a language model’s ability to both (1) create astronomy-specific workflows to process and analyze data and (2) visualize the results of these workflows through complex plots. Our evaluation of visualizations uses a novel LLM-as-a-judge workflow, which is validated against annotation by five professional astronomers. Using ASTROVISBENCH we present an evaluation of state-of-the-art language models, showing a significant gap in their ability to engage in astronomy research as useful assistants. This evaluation provides a strong end-to-end evaluation for AI scientists that offers a path forward for the development of visualization-based workflows, which are central to a broad range of domains from physics to biology. We release the code and data for ASTROVISBENCH at astrovisbench.github.io.","language":"en","urldate":"2026-01-16","publisher":"arXiv","author":[{"propositions":[],"lastnames":["Joseph"],"firstnames":["Sebastian","Antony"],"suffixes":[]},{"propositions":[],"lastnames":["Husain"],"firstnames":["Syed","Murtaza"],"suffixes":[]},{"propositions":[],"lastnames":["Offner"],"firstnames":["Stella","S.","R."],"suffixes":[]},{"propositions":[],"lastnames":["Juneau"],"firstnames":["Stéphanie"],"suffixes":[]},{"propositions":[],"lastnames":["Torrey"],"firstnames":["Paul"],"suffixes":[]},{"propositions":[],"lastnames":["Bolton"],"firstnames":["Adam","S."],"suffixes":[]},{"propositions":[],"lastnames":["Farias"],"firstnames":["Juan","P."],"suffixes":[]},{"propositions":[],"lastnames":["Gaffney"],"firstnames":["Niall"],"suffixes":[]},{"propositions":[],"lastnames":["Durrett"],"firstnames":["Greg"],"suffixes":[]},{"propositions":[],"lastnames":["Li"],"firstnames":["Junyi","Jessy"],"suffixes":[]}],"month":"October","year":"2025","note":"arXiv:2505.20538 [cs]","keywords":"Accelerated, Astrophysics - Instrumentation and Methods for Astrophysics, Computer Science - Computation and Language, Computer Science - Machine Learning, Explorable","bibtex":"@misc{joseph_astrovisbench_2025,\n\ttitle = {{AstroVisBench}: {A} {Code} {Benchmark} for {Scientific} {Computing} and {Visualization} in {Astronomy}},\n\tshorttitle = {{AstroVisBench}},\n\turl = {http://arxiv.org/abs/2505.20538},\n\tdoi = {10.48550/arXiv.2505.20538},\n\tabstract = {Large Language Models (LLMs) are being explored for applications in scientific research, including their capabilities to synthesize literature, answer research questions, generate research ideas, and even conduct computational experiments. Ultimately, our goal is for these to help scientists derive novel scientific insights. In many areas of science, such insights often arise from processing and visualizing data to understand its patterns. However, evaluating whether an LLM-mediated scientific workflow produces outputs conveying the correct scientific insights is challenging to evaluate and has not been addressed in past work. We introduce ASTROVISBENCH, the first benchmark for both scientific computing and visualization in the astronomy domain. ASTROVISBENCH judges a language model’s ability to both (1) create astronomy-specific workflows to process and analyze data and (2) visualize the results of these workflows through complex plots. Our evaluation of visualizations uses a novel LLM-as-a-judge workflow, which is validated against annotation by five professional astronomers. Using ASTROVISBENCH we present an evaluation of state-of-the-art language models, showing a significant gap in their ability to engage in astronomy research as useful assistants. This evaluation provides a strong end-to-end evaluation for AI scientists that offers a path forward for the development of visualization-based workflows, which are central to a broad range of domains from physics to biology. We release the code and data for ASTROVISBENCH at astrovisbench.github.io.},\n\tlanguage = {en},\n\turldate = {2026-01-16},\n\tpublisher = {arXiv},\n\tauthor = {Joseph, Sebastian Antony and Husain, Syed Murtaza and Offner, Stella S. R. and Juneau, Stéphanie and Torrey, Paul and Bolton, Adam S. and Farias, Juan P. and Gaffney, Niall and Durrett, Greg and Li, Junyi Jessy},\n\tmonth = oct,\n\tyear = {2025},\n\tnote = {arXiv:2505.20538 [cs]},\n\tkeywords = {Accelerated, Astrophysics - Instrumentation and Methods for Astrophysics, Computer Science - Computation and Language, Computer Science - Machine Learning, Explorable},\n}\n\n\n\n\n\n\n\n","author_short":["Joseph, S. A.","Husain, S. M.","Offner, S. S. R.","Juneau, S.","Torrey, P.","Bolton, A. S.","Farias, J. P.","Gaffney, N.","Durrett, G.","Li, J. J."],"key":"joseph_astrovisbench_2025","id":"joseph_astrovisbench_2025","bibbaseid":"joseph-husain-offner-juneau-torrey-bolton-farias-gaffney-etal-astrovisbenchacodebenchmarkforscientificcomputingandvisualizationinastronomy-2025","role":"author","urls":{"Paper":"http://arxiv.org/abs/2505.20538"},"keyword":["Accelerated","Astrophysics - Instrumentation and Methods for Astrophysics","Computer Science - Computation and Language","Computer Science - Machine Learning","Explorable"],"metadata":{"authorlinks":{}}},"bibtype":"misc","biburl":"https://bibbase.org/zotero-group/pratikmhatre/5933976","dataSources":["yJr5AAtJ5Sz3Q4WT4"],"keywords":["accelerated","astrophysics - instrumentation and methods for astrophysics","computer science - computation and language","computer science - machine learning","explorable"],"search_terms":["astrovisbench","code","benchmark","scientific","computing","visualization","astronomy","joseph","husain","offner","juneau","torrey","bolton","farias","gaffney","durrett","li"],"title":"AstroVisBench: A Code Benchmark for Scientific Computing and Visualization in Astronomy","year":2025}