Etalumis: Bringing Probabilistic Programming to Scientific Simulators at Scale. Baydin, A. G., Shao, L., Bhimji, W., Heinrich, L., Meadows, L., Liu, J., Munk, A., Naderiparizi, S., Gram-Hansen, B., Louppe, G., & others In the International Conference for High Performance Computing, Networking, Storage and Analysis (SC ’19), 2019. Paper Arxiv doi abstract bibtex 1 download Probabilistic programming languages (PPLs) are receiving widespread attention for performing Bayesian inference in complex generative models. However, applications to science remain limited because of the impracticability of rewriting complex scientific simulators in a PPL, the computational cost of inference, and the lack of scalable implementations. To address these, we present a novel PPL framework that couples directly to existing scientific simulators through a cross-platform probabilistic execution protocol and provides Markov chain Monte Carlo (MCMC) and deep-learning-based inference compilation (IC) engines for tractable inference. To guide IC inference, we perform distributed training of a dynamic 3DCNN–LSTM architecture with a PyTorch-MPI-based framework on 1,024 32-core CPU nodes of the Cori supercomputer with a global minibatch size of 128k: achieving a performance of 450 Tflop/s through enhancements to PyTorch. We demonstrate a Large Hadron Collider (LHC) use-case with the C++ Sherpa simulator and achieve the largest-scale posterior inference in a Turing-complete PPL.
@inproceedings{BAY-19,
title={Etalumis: Bringing Probabilistic Programming to Scientific Simulators at Scale},
author={Baydin, At{\i}l{\i}m G{\"u}ne{\c{s}} and Shao, Lei and Bhimji, Wahid and Heinrich, Lukas and Meadows, Lawrence and Liu, Jialin and Munk, Andreas and Naderiparizi, Saeid and Gram-Hansen, Bradley and Louppe, Gilles and others},
booktitle={the International Conference for High Performance Computing, Networking, Storage and Analysis (SC ’19)},
archiveprefix = {arXiv},
eprint = {1907.03382},
support = {D3M,ETALUMIS},
url_Paper={https://arxiv.org/pdf/1907.03382.pdf},
url_ArXiv={https://arxiv.org/abs/1907.03382},
abstract={Probabilistic programming languages (PPLs) are receiving widespread attention for performing Bayesian inference in complex generative models. However, applications to science remain limited because of the impracticability of rewriting complex scientific simulators in a PPL, the computational cost of inference, and the lack of scalable implementations. To address these, we present a novel PPL framework that couples directly to existing scientific simulators through a cross-platform probabilistic execution protocol and provides Markov chain Monte Carlo (MCMC) and deep-learning-based inference compilation (IC) engines for tractable inference. To guide IC inference, we perform distributed training of a dynamic 3DCNN--LSTM architecture with a PyTorch-MPI-based framework on 1,024 32-core CPU nodes of the Cori supercomputer with a global minibatch size of 128k: achieving a performance of 450 Tflop/s through enhancements to PyTorch. We demonstrate a Large Hadron Collider (LHC) use-case with the C++ Sherpa simulator and achieve the largest-scale posterior inference in a Turing-complete PPL.},
year={2019},
doi={10.1145/3295500.3356180}
}
Downloads: 1
{"_id":"jXcGXi6Ddvj8dphFm","bibbaseid":"baydin-shao-bhimji-heinrich-meadows-liu-munk-naderiparizi-etal-etalumisbringingprobabilisticprogrammingtoscientificsimulatorsatscale-2019","authorIDs":["2MhrdqhNCwABiAH3E","5e309447cb949bdf01000179","5e30abb4c99510de0100012e","5e30afefc99510de0100016e","5e30b7262a1e4bde01000036","5e30c2c22a1e4bde01000108","5e5d975d5726ecdf010000b6","7vkTiDFWqjzv59gw8","HZZ9Y5BbvYTYdSyTR","JDgF3AcAX7jqZdYJD","MdPkpujkGCiQF3etm","fRCPkcuRyGqzAreMH","qoKuX5u8wxEGQGNjH"],"author_short":["Baydin, A. G.","Shao, L.","Bhimji, W.","Heinrich, L.","Meadows, L.","Liu, J.","Munk, A.","Naderiparizi, S.","Gram-Hansen, B.","Louppe, G.","others"],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","title":"Etalumis: Bringing Probabilistic Programming to Scientific Simulators at Scale","author":[{"propositions":[],"lastnames":["Baydin"],"firstnames":["Atılım","Güneş"],"suffixes":[]},{"propositions":[],"lastnames":["Shao"],"firstnames":["Lei"],"suffixes":[]},{"propositions":[],"lastnames":["Bhimji"],"firstnames":["Wahid"],"suffixes":[]},{"propositions":[],"lastnames":["Heinrich"],"firstnames":["Lukas"],"suffixes":[]},{"propositions":[],"lastnames":["Meadows"],"firstnames":["Lawrence"],"suffixes":[]},{"propositions":[],"lastnames":["Liu"],"firstnames":["Jialin"],"suffixes":[]},{"propositions":[],"lastnames":["Munk"],"firstnames":["Andreas"],"suffixes":[]},{"propositions":[],"lastnames":["Naderiparizi"],"firstnames":["Saeid"],"suffixes":[]},{"propositions":[],"lastnames":["Gram-Hansen"],"firstnames":["Bradley"],"suffixes":[]},{"propositions":[],"lastnames":["Louppe"],"firstnames":["Gilles"],"suffixes":[]},{"firstnames":[],"propositions":[],"lastnames":["others"],"suffixes":[]}],"booktitle":"the International Conference for High Performance Computing, Networking, Storage and Analysis (SC ’19)","archiveprefix":"arXiv","eprint":"1907.03382","support":"D3M,ETALUMIS","url_paper":"https://arxiv.org/pdf/1907.03382.pdf","url_arxiv":"https://arxiv.org/abs/1907.03382","abstract":"Probabilistic programming languages (PPLs) are receiving widespread attention for performing Bayesian inference in complex generative models. However, applications to science remain limited because of the impracticability of rewriting complex scientific simulators in a PPL, the computational cost of inference, and the lack of scalable implementations. To address these, we present a novel PPL framework that couples directly to existing scientific simulators through a cross-platform probabilistic execution protocol and provides Markov chain Monte Carlo (MCMC) and deep-learning-based inference compilation (IC) engines for tractable inference. To guide IC inference, we perform distributed training of a dynamic 3DCNN–LSTM architecture with a PyTorch-MPI-based framework on 1,024 32-core CPU nodes of the Cori supercomputer with a global minibatch size of 128k: achieving a performance of 450 Tflop/s through enhancements to PyTorch. We demonstrate a Large Hadron Collider (LHC) use-case with the C++ Sherpa simulator and achieve the largest-scale posterior inference in a Turing-complete PPL.","year":"2019","doi":"10.1145/3295500.3356180","bibtex":"@inproceedings{BAY-19,\n title={Etalumis: Bringing Probabilistic Programming to Scientific Simulators at Scale},\n author={Baydin, At{\\i}l{\\i}m G{\\\"u}ne{\\c{s}} and Shao, Lei and Bhimji, Wahid and Heinrich, Lukas and Meadows, Lawrence and Liu, Jialin and Munk, Andreas and Naderiparizi, Saeid and Gram-Hansen, Bradley and Louppe, Gilles and others},\n booktitle={the International Conference for High Performance Computing, Networking, Storage and Analysis (SC ’19)},\n archiveprefix = {arXiv},\n eprint = {1907.03382},\n support = {D3M,ETALUMIS},\n url_Paper={https://arxiv.org/pdf/1907.03382.pdf},\n url_ArXiv={https://arxiv.org/abs/1907.03382},\n abstract={Probabilistic programming languages (PPLs) are receiving widespread attention for performing Bayesian inference in complex generative models. However, applications to science remain limited because of the impracticability of rewriting complex scientific simulators in a PPL, the computational cost of inference, and the lack of scalable implementations. To address these, we present a novel PPL framework that couples directly to existing scientific simulators through a cross-platform probabilistic execution protocol and provides Markov chain Monte Carlo (MCMC) and deep-learning-based inference compilation (IC) engines for tractable inference. To guide IC inference, we perform distributed training of a dynamic 3DCNN--LSTM architecture with a PyTorch-MPI-based framework on 1,024 32-core CPU nodes of the Cori supercomputer with a global minibatch size of 128k: achieving a performance of 450 Tflop/s through enhancements to PyTorch. We demonstrate a Large Hadron Collider (LHC) use-case with the C++ Sherpa simulator and achieve the largest-scale posterior inference in a Turing-complete PPL.},\n year={2019},\n doi={10.1145/3295500.3356180}\n}\n\n","author_short":["Baydin, A. G.","Shao, L.","Bhimji, W.","Heinrich, L.","Meadows, L.","Liu, J.","Munk, A.","Naderiparizi, S.","Gram-Hansen, B.","Louppe, G.","others"],"key":"BAY-19","id":"BAY-19","bibbaseid":"baydin-shao-bhimji-heinrich-meadows-liu-munk-naderiparizi-etal-etalumisbringingprobabilisticprogrammingtoscientificsimulatorsatscale-2019","role":"author","urls":{" paper":"https://arxiv.org/pdf/1907.03382.pdf"," arxiv":"https://arxiv.org/abs/1907.03382"},"metadata":{"authorlinks":{}},"downloads":1},"bibtype":"inproceedings","biburl":"https://raw.githubusercontent.com/plai-group/bibliography/master/group_publications.bib","creationDate":"2020-01-28T20:18:01.861Z","downloads":1,"keywords":[],"search_terms":["etalumis","bringing","probabilistic","programming","scientific","simulators","scale","baydin","shao","bhimji","heinrich","meadows","liu","munk","naderiparizi","gram-hansen","louppe","others"],"title":"Etalumis: Bringing Probabilistic Programming to Scientific Simulators at Scale","year":2019,"dataSources":["7avRLRrz2ifJGMKcD","BKH7YtW7K7WNMA3cj","wyN5DxtoT6AQuiXnm"]}