Revisiting Reweighted Wake-Sleep for Models with Stochastic Control Flow. Le, T. A., Kosiorek, A. R., Siddharth, N., Teh, Y. W., & Wood, F. In Adams, R. P. & Gogate, V., editors, volume 115, of Proceedings of the 35th conference on Uncertainty in Artificial Intelligence (UAI), pages 1039–1049, Tel Aviv, Israel, 22–25 Jul, 2020. PMLR.
Link
Paper
Arxiv abstract bibtex 3 downloads Stochastic control-flow models (SCFMs) are a class of generative models that involve branching on choices from discrete random variables. Amortized gradient-based learning of SCFMs is challenging as most approaches targeting discrete variables rely on their continuous relaxations—which can be intractable in SCFMs, as branching on relaxations requires evaluating all (exponentially many) branching paths. Tractable alternatives mainly combine REINFORCE with complex control-variate schemes to improve the variance of naive estimators. Here, we revisit the reweighted wake-sleep (RWS) [5] algorithm, and through extensive evaluations, show that it outperforms current state-of-the-art methods in learning SCFMs. Further, in contrast to the importance weighted autoencoder, we observe that RWS learns better models and inference networks with increasing numbers of particles. Our results suggest that RWS is a competitive, often preferable, alternative for learning SCFMs.
@InProceedings{Le-20,
title = {Revisiting Reweighted Wake-Sleep for Models with Stochastic Control Flow},
author = {Le, Tuan Anh and Kosiorek, Adam R. and Siddharth, N. and Teh, Yee Whye and Wood, Frank},
pages = {1039--1049},
year = {2020},
editor = {Ryan P. Adams and Vibhav Gogate},
volume = {115},
series = {Proceedings of the 35th conference on Uncertainty in Artificial Intelligence (UAI)},
address = {Tel Aviv, Israel},
month = {22--25 Jul},
publisher = {PMLR},
url_Link = {http://proceedings.mlr.press/v115/le20a.html},
url_Paper = {http://proceedings.mlr.press/v115/le20a/le20a.pdf},
url_ArXiv={https://arxiv.org/abs/1805.10469},
support = {D3M},
abstract = {Stochastic control-flow models (SCFMs) are a class of generative models that involve branching on choices from discrete random variables. Amortized gradient-based learning of SCFMs is challenging as most approaches targeting discrete variables rely on their continuous relaxations—which can be intractable in SCFMs, as branching on relaxations requires evaluating all (exponentially many) branching paths. Tractable alternatives mainly combine REINFORCE with complex control-variate schemes to improve the variance of naive estimators. Here, we revisit the reweighted wake-sleep (RWS) [5] algorithm, and through extensive evaluations, show that it outperforms current state-of-the-art methods in learning SCFMs. Further, in contrast to the importance weighted autoencoder, we observe that RWS learns better models and inference networks with increasing numbers of particles. Our results suggest that RWS is a competitive, often preferable, alternative for learning SCFMs.}
}
Downloads: 3
{"_id":"RSQsK86E9TubT8aje","bibbaseid":"le-kosiorek-siddharth-teh-wood-revisitingreweightedwakesleepformodelswithstochasticcontrolflow-2020","authorIDs":[],"author_short":["Le, T. A.","Kosiorek, A. R.","Siddharth, N.","Teh, Y. W.","Wood, F."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","title":"Revisiting Reweighted Wake-Sleep for Models with Stochastic Control Flow","author":[{"propositions":[],"lastnames":["Le"],"firstnames":["Tuan","Anh"],"suffixes":[]},{"propositions":[],"lastnames":["Kosiorek"],"firstnames":["Adam","R."],"suffixes":[]},{"propositions":[],"lastnames":["Siddharth"],"firstnames":["N."],"suffixes":[]},{"propositions":[],"lastnames":["Teh"],"firstnames":["Yee","Whye"],"suffixes":[]},{"propositions":[],"lastnames":["Wood"],"firstnames":["Frank"],"suffixes":[]}],"pages":"1039–1049","year":"2020","editor":[{"firstnames":["Ryan","P."],"propositions":[],"lastnames":["Adams"],"suffixes":[]},{"firstnames":["Vibhav"],"propositions":[],"lastnames":["Gogate"],"suffixes":[]}],"volume":"115","series":"Proceedings of the 35th conference on Uncertainty in Artificial Intelligence (UAI)","address":"Tel Aviv, Israel","month":"22–25 Jul","publisher":"PMLR","url_link":"http://proceedings.mlr.press/v115/le20a.html","url_paper":"http://proceedings.mlr.press/v115/le20a/le20a.pdf","url_arxiv":"https://arxiv.org/abs/1805.10469","support":"D3M","abstract":"Stochastic control-flow models (SCFMs) are a class of generative models that involve branching on choices from discrete random variables. Amortized gradient-based learning of SCFMs is challenging as most approaches targeting discrete variables rely on their continuous relaxations—which can be intractable in SCFMs, as branching on relaxations requires evaluating all (exponentially many) branching paths. Tractable alternatives mainly combine REINFORCE with complex control-variate schemes to improve the variance of naive estimators. Here, we revisit the reweighted wake-sleep (RWS) [5] algorithm, and through extensive evaluations, show that it outperforms current state-of-the-art methods in learning SCFMs. Further, in contrast to the importance weighted autoencoder, we observe that RWS learns better models and inference networks with increasing numbers of particles. Our results suggest that RWS is a competitive, often preferable, alternative for learning SCFMs.","bibtex":"@InProceedings{Le-20, \n title = {Revisiting Reweighted Wake-Sleep for Models with Stochastic Control Flow}, \n author = {Le, Tuan Anh and Kosiorek, Adam R. and Siddharth, N. and Teh, Yee Whye and Wood, Frank}, \n pages = {1039--1049}, \n year = {2020}, \n editor = {Ryan P. Adams and Vibhav Gogate}, \n volume = {115}, \n series = {Proceedings of the 35th conference on Uncertainty in Artificial Intelligence (UAI)}, \n address = {Tel Aviv, Israel}, \n month = {22--25 Jul}, \n publisher = {PMLR}, \n url_Link = {http://proceedings.mlr.press/v115/le20a.html}, \n url_Paper = {http://proceedings.mlr.press/v115/le20a/le20a.pdf}, \n url_ArXiv={https://arxiv.org/abs/1805.10469},\n support = {D3M},\n abstract = {Stochastic control-flow models (SCFMs) are a class of generative models that involve branching on choices from discrete random variables. Amortized gradient-based learning of SCFMs is challenging as most approaches targeting discrete variables rely on their continuous relaxations—which can be intractable in SCFMs, as branching on relaxations requires evaluating all (exponentially many) branching paths. Tractable alternatives mainly combine REINFORCE with complex control-variate schemes to improve the variance of naive estimators. Here, we revisit the reweighted wake-sleep (RWS) [5] algorithm, and through extensive evaluations, show that it outperforms current state-of-the-art methods in learning SCFMs. Further, in contrast to the importance weighted autoencoder, we observe that RWS learns better models and inference networks with increasing numbers of particles. Our results suggest that RWS is a competitive, often preferable, alternative for learning SCFMs.} \n }\n\n","author_short":["Le, T. A.","Kosiorek, A. R.","Siddharth, N.","Teh, Y. W.","Wood, F."],"editor_short":["Adams, R. P.","Gogate, V."],"key":"Le-20","id":"Le-20","bibbaseid":"le-kosiorek-siddharth-teh-wood-revisitingreweightedwakesleepformodelswithstochasticcontrolflow-2020","role":"author","urls":{" link":"http://proceedings.mlr.press/v115/le20a.html"," paper":"http://proceedings.mlr.press/v115/le20a/le20a.pdf"," arxiv":"https://arxiv.org/abs/1805.10469"},"metadata":{"authorlinks":{}},"downloads":3},"bibtype":"inproceedings","biburl":"https://raw.githubusercontent.com/plai-group/bibliography/master/group_publications.bib","creationDate":"2020-10-21T21:22:06.507Z","downloads":3,"keywords":[],"search_terms":["revisiting","reweighted","wake","sleep","models","stochastic","control","flow","le","kosiorek","siddharth","teh","wood"],"title":"Revisiting Reweighted Wake-Sleep for Models with Stochastic Control Flow","year":2020,"dataSources":["7avRLRrz2ifJGMKcD","BKH7YtW7K7WNMA3cj","wyN5DxtoT6AQuiXnm"]}