Learning Approximately Objective Priors. Nalisnick, E. & Smyth, P. arXiv:1704.01168 [stat], August, 2017. arXiv: 1704.01168Paper abstract bibtex Informative Bayesian priors are often difficult to elicit, and when this is the case, modelers usually turn to noninformative or objective priors. However, objective priors such as the Jeffreys and reference priors are not tractable to derive for many models of interest. We address this issue by proposing techniques for learning reference prior approximations: we select a parametric family and optimize a black-box lower bound on the reference prior objective to find the member of the family that serves as a good approximation. We experimentally demonstrate the method’s effectiveness by recovering Jeffreys priors and learning the Variational Autoencoder’s reference prior.
@article{nalisnick_learning_2017,
title = {Learning {Approximately} {Objective} {Priors}},
url = {http://arxiv.org/abs/1704.01168},
abstract = {Informative Bayesian priors are often difficult to elicit, and when this is the case, modelers usually turn to noninformative or objective priors. However, objective priors such as the Jeffreys and reference priors are not tractable to derive for many models of interest. We address this issue by proposing techniques for learning reference prior approximations: we select a parametric family and optimize a black-box lower bound on the reference prior objective to find the member of the family that serves as a good approximation. We experimentally demonstrate the method’s effectiveness by recovering Jeffreys priors and learning the Variational Autoencoder’s reference prior.},
language = {en},
urldate = {2022-01-19},
journal = {arXiv:1704.01168 [stat]},
author = {Nalisnick, Eric and Smyth, Padhraic},
month = aug,
year = {2017},
note = {arXiv: 1704.01168},
keywords = {/unread, Statistics - Computation, Statistics - Machine Learning, ⛔ No DOI found},
}
Downloads: 0
{"_id":"Xby9dwtEBcFWNsYp7","bibbaseid":"nalisnick-smyth-learningapproximatelyobjectivepriors-2017","author_short":["Nalisnick, E.","Smyth, P."],"bibdata":{"bibtype":"article","type":"article","title":"Learning Approximately Objective Priors","url":"http://arxiv.org/abs/1704.01168","abstract":"Informative Bayesian priors are often difficult to elicit, and when this is the case, modelers usually turn to noninformative or objective priors. However, objective priors such as the Jeffreys and reference priors are not tractable to derive for many models of interest. We address this issue by proposing techniques for learning reference prior approximations: we select a parametric family and optimize a black-box lower bound on the reference prior objective to find the member of the family that serves as a good approximation. We experimentally demonstrate the method’s effectiveness by recovering Jeffreys priors and learning the Variational Autoencoder’s reference prior.","language":"en","urldate":"2022-01-19","journal":"arXiv:1704.01168 [stat]","author":[{"propositions":[],"lastnames":["Nalisnick"],"firstnames":["Eric"],"suffixes":[]},{"propositions":[],"lastnames":["Smyth"],"firstnames":["Padhraic"],"suffixes":[]}],"month":"August","year":"2017","note":"arXiv: 1704.01168","keywords":"/unread, Statistics - Computation, Statistics - Machine Learning, ⛔ No DOI found","bibtex":"@article{nalisnick_learning_2017,\n\ttitle = {Learning {Approximately} {Objective} {Priors}},\n\turl = {http://arxiv.org/abs/1704.01168},\n\tabstract = {Informative Bayesian priors are often difficult to elicit, and when this is the case, modelers usually turn to noninformative or objective priors. However, objective priors such as the Jeffreys and reference priors are not tractable to derive for many models of interest. We address this issue by proposing techniques for learning reference prior approximations: we select a parametric family and optimize a black-box lower bound on the reference prior objective to find the member of the family that serves as a good approximation. We experimentally demonstrate the method’s effectiveness by recovering Jeffreys priors and learning the Variational Autoencoder’s reference prior.},\n\tlanguage = {en},\n\turldate = {2022-01-19},\n\tjournal = {arXiv:1704.01168 [stat]},\n\tauthor = {Nalisnick, Eric and Smyth, Padhraic},\n\tmonth = aug,\n\tyear = {2017},\n\tnote = {arXiv: 1704.01168},\n\tkeywords = {/unread, Statistics - Computation, Statistics - Machine Learning, ⛔ No DOI found},\n}\n\n","author_short":["Nalisnick, E.","Smyth, P."],"key":"nalisnick_learning_2017","id":"nalisnick_learning_2017","bibbaseid":"nalisnick-smyth-learningapproximatelyobjectivepriors-2017","role":"author","urls":{"Paper":"http://arxiv.org/abs/1704.01168"},"keyword":["/unread","Statistics - Computation","Statistics - Machine Learning","⛔ No DOI found"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"article","biburl":"https://bibbase.org/zotero/victorjhu","dataSources":["CmHEoydhafhbkXXt5"],"keywords":["/unread","statistics - computation","statistics - machine learning","⛔ no doi found"],"search_terms":["learning","approximately","objective","priors","nalisnick","smyth"],"title":"Learning Approximately Objective Priors","year":2017}