The Thermodynamic Variational Objective. Masrani, V., Le, T. A., & Wood, F. In Thirty-third Conference on Neural Information Processing Systems (NeurIPS), 2019. Paper Arxiv Poster abstract bibtex 2 downloads We introduce the thermodynamic variational objective (TVO) for learning in both continuous and discrete deep generative models. The TVO arises from a key connection between variational inference and thermodynamic integration that results in a tighter lower bound to the log marginal likelihood than the standard variational variational evidence lower bound (ELBO) while remaining as broadly applicable. We provide a computationally efficient gradient estimator for the TVO that applies to continuous, discrete, and non-reparameterizable distributions and show that the objective functions used in variational inference, variational autoencoders, wake sleep, and inference compilation are all special cases of the TVO. We use the TVO to learn both discrete and continuous deep generative models and empirically demonstrate state of the art model and inference network learning.
@inproceedings{MAS-19,
title={The Thermodynamic Variational Objective},
author={Masrani, Vaden and Le, Tuan Anh and Wood, Frank},
booktitle={Thirty-third Conference on Neural Information Processing Systems (NeurIPS)},
archiveprefix = {arXiv},
eprint = {1907.00031},
url_Paper={https://arxiv.org/pdf/1907.00031.pdf},
url_ArXiv={https://arxiv.org/abs/1907.00031},
url_Poster={https://github.com/plai-group/bibliography/blob/master/presentations_posters/neurips_tvo_poster.pdf},
support = {D3M},
abstract={We introduce the thermodynamic variational objective (TVO) for learning in both continuous and discrete deep generative models. The TVO arises from a key connection between variational inference and thermodynamic integration that results in a tighter lower bound to the log marginal likelihood than the standard variational variational evidence lower bound (ELBO) while remaining as broadly applicable. We provide a computationally efficient gradient estimator for the TVO that applies to continuous, discrete, and non-reparameterizable distributions and show that the objective functions used in variational inference, variational autoencoders, wake sleep, and inference compilation are all special cases of the TVO. We use the TVO to learn both discrete and continuous deep generative models and empirically demonstrate state of the art model and inference network learning.},
year={2019}
}
Downloads: 2
{"_id":"XcSagRhNfwuknQxKh","bibbaseid":"masrani-le-wood-thethermodynamicvariationalobjective-2019","authorIDs":["5e309447cb949bdf01000179","5e30abb4c99510de0100012e","5e30afefc99510de0100016e","5e30b7262a1e4bde01000036","5e30c2c22a1e4bde01000108"],"author_short":["Masrani, V.","Le, T. A.","Wood, F."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","title":"The Thermodynamic Variational Objective","author":[{"propositions":[],"lastnames":["Masrani"],"firstnames":["Vaden"],"suffixes":[]},{"propositions":[],"lastnames":["Le"],"firstnames":["Tuan","Anh"],"suffixes":[]},{"propositions":[],"lastnames":["Wood"],"firstnames":["Frank"],"suffixes":[]}],"booktitle":"Thirty-third Conference on Neural Information Processing Systems (NeurIPS)","archiveprefix":"arXiv","eprint":"1907.00031","url_paper":"https://arxiv.org/pdf/1907.00031.pdf","url_arxiv":"https://arxiv.org/abs/1907.00031","url_poster":"https://github.com/plai-group/bibliography/blob/master/presentations_posters/neurips_tvo_poster.pdf","support":"D3M","abstract":"We introduce the thermodynamic variational objective (TVO) for learning in both continuous and discrete deep generative models. The TVO arises from a key connection between variational inference and thermodynamic integration that results in a tighter lower bound to the log marginal likelihood than the standard variational variational evidence lower bound (ELBO) while remaining as broadly applicable. We provide a computationally efficient gradient estimator for the TVO that applies to continuous, discrete, and non-reparameterizable distributions and show that the objective functions used in variational inference, variational autoencoders, wake sleep, and inference compilation are all special cases of the TVO. We use the TVO to learn both discrete and continuous deep generative models and empirically demonstrate state of the art model and inference network learning.","year":"2019","bibtex":"@inproceedings{MAS-19,\n title={The Thermodynamic Variational Objective},\n author={Masrani, Vaden and Le, Tuan Anh and Wood, Frank},\n booktitle={Thirty-third Conference on Neural Information Processing Systems (NeurIPS)},\n archiveprefix = {arXiv},\n eprint = {1907.00031},\n url_Paper={https://arxiv.org/pdf/1907.00031.pdf},\n url_ArXiv={https://arxiv.org/abs/1907.00031},\n url_Poster={https://github.com/plai-group/bibliography/blob/master/presentations_posters/neurips_tvo_poster.pdf},\n support = {D3M},\n abstract={We introduce the thermodynamic variational objective (TVO) for learning in both continuous and discrete deep generative models. The TVO arises from a key connection between variational inference and thermodynamic integration that results in a tighter lower bound to the log marginal likelihood than the standard variational variational evidence lower bound (ELBO) while remaining as broadly applicable. We provide a computationally efficient gradient estimator for the TVO that applies to continuous, discrete, and non-reparameterizable distributions and show that the objective functions used in variational inference, variational autoencoders, wake sleep, and inference compilation are all special cases of the TVO. We use the TVO to learn both discrete and continuous deep generative models and empirically demonstrate state of the art model and inference network learning.},\n year={2019}\n}\n\n\n","author_short":["Masrani, V.","Le, T. A.","Wood, F."],"key":"MAS-19","id":"MAS-19","bibbaseid":"masrani-le-wood-thethermodynamicvariationalobjective-2019","role":"author","urls":{" paper":"https://arxiv.org/pdf/1907.00031.pdf"," arxiv":"https://arxiv.org/abs/1907.00031"," poster":"https://github.com/plai-group/bibliography/blob/master/presentations_posters/neurips_tvo_poster.pdf"},"metadata":{"authorlinks":{}},"downloads":2},"bibtype":"inproceedings","biburl":"https://raw.githubusercontent.com/plai-group/bibliography/master/group_publications.bib","creationDate":"2019-10-14T03:16:44.530Z","downloads":2,"keywords":[],"search_terms":["thermodynamic","variational","objective","masrani","le","wood"],"title":"The Thermodynamic Variational Objective","year":2019,"dataSources":["7avRLRrz2ifJGMKcD","BKH7YtW7K7WNMA3cj","wyN5DxtoT6AQuiXnm"]}