Adversarial Autoencoders. Makhzani, A., Shlens, J., Jaitly, N., Goodfellow, I., & Frey, B. 2015. Paper Website abstract bibtex In this paper, we propose the "adversarial autoencoder" (AAE), which is a probabilistic autoencoder that uses the recently proposed generative adversarial networks (GAN) to perform variational inference by matching the aggregated posterior of the hidden code vector of the autoencoder with an arbitrary prior distribution. Matching the aggregated posterior to the prior ensures that generating from any part of prior space results in meaningful samples. As a result, the decoder of the adversarial autoencoder learns a deep generative model that maps the imposed prior to the data distribution. We show how the adversarial autoencoder can be used in applications such as semi-supervised classification, disentangling style and content of images, unsupervised clustering, dimensionality reduction and data visualization. We performed experiments on MNIST, Street View House Numbers and Toronto Face datasets and show that adversarial autoencoders achieve competitive results in generative modeling and semi-supervised classification tasks.
@article{
title = {Adversarial Autoencoders},
type = {article},
year = {2015},
websites = {http://arxiv.org/abs/1511.05644},
id = {ddca915a-2df3-317f-9c40-ebbddcefef55},
created = {2021-10-01T11:39:50.424Z},
file_attached = {true},
profile_id = {235249c2-3ed4-314a-b309-b1ea0330f5d9},
group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},
last_modified = {2022-03-28T09:45:09.684Z},
read = {true},
starred = {false},
authored = {false},
confirmed = {true},
hidden = {false},
citation_key = {Makhzani2015},
folder_uuids = {1853f94b-7af1-40fa-b068-4758e9a02bc4},
private_publication = {false},
abstract = {In this paper, we propose the "adversarial autoencoder" (AAE), which is a probabilistic autoencoder that uses the recently proposed generative adversarial networks (GAN) to perform variational inference by matching the aggregated posterior of the hidden code vector of the autoencoder with an arbitrary prior distribution. Matching the aggregated posterior to the prior ensures that generating from any part of prior space results in meaningful samples. As a result, the decoder of the adversarial autoencoder learns a deep generative model that maps the imposed prior to the data distribution. We show how the adversarial autoencoder can be used in applications such as semi-supervised classification, disentangling style and content of images, unsupervised clustering, dimensionality reduction and data visualization. We performed experiments on MNIST, Street View House Numbers and Toronto Face datasets and show that adversarial autoencoders achieve competitive results in generative modeling and semi-supervised classification tasks.},
bibtype = {article},
author = {Makhzani, Alireza and Shlens, Jonathon and Jaitly, Navdeep and Goodfellow, Ian and Frey, Brendan}
}
Downloads: 0
{"_id":"cyBMftavjgN4CwXMm","bibbaseid":"makhzani-shlens-jaitly-goodfellow-frey-adversarialautoencoders-2015","author_short":["Makhzani, A.","Shlens, J.","Jaitly, N.","Goodfellow, I.","Frey, B."],"bibdata":{"title":"Adversarial Autoencoders","type":"article","year":"2015","websites":"http://arxiv.org/abs/1511.05644","id":"ddca915a-2df3-317f-9c40-ebbddcefef55","created":"2021-10-01T11:39:50.424Z","file_attached":"true","profile_id":"235249c2-3ed4-314a-b309-b1ea0330f5d9","group_id":"1ff583c0-be37-34fa-9c04-73c69437d354","last_modified":"2022-03-28T09:45:09.684Z","read":"true","starred":false,"authored":false,"confirmed":"true","hidden":false,"citation_key":"Makhzani2015","folder_uuids":"1853f94b-7af1-40fa-b068-4758e9a02bc4","private_publication":false,"abstract":"In this paper, we propose the \"adversarial autoencoder\" (AAE), which is a probabilistic autoencoder that uses the recently proposed generative adversarial networks (GAN) to perform variational inference by matching the aggregated posterior of the hidden code vector of the autoencoder with an arbitrary prior distribution. Matching the aggregated posterior to the prior ensures that generating from any part of prior space results in meaningful samples. As a result, the decoder of the adversarial autoencoder learns a deep generative model that maps the imposed prior to the data distribution. We show how the adversarial autoencoder can be used in applications such as semi-supervised classification, disentangling style and content of images, unsupervised clustering, dimensionality reduction and data visualization. We performed experiments on MNIST, Street View House Numbers and Toronto Face datasets and show that adversarial autoencoders achieve competitive results in generative modeling and semi-supervised classification tasks.","bibtype":"article","author":"Makhzani, Alireza and Shlens, Jonathon and Jaitly, Navdeep and Goodfellow, Ian and Frey, Brendan","bibtex":"@article{\n title = {Adversarial Autoencoders},\n type = {article},\n year = {2015},\n websites = {http://arxiv.org/abs/1511.05644},\n id = {ddca915a-2df3-317f-9c40-ebbddcefef55},\n created = {2021-10-01T11:39:50.424Z},\n file_attached = {true},\n profile_id = {235249c2-3ed4-314a-b309-b1ea0330f5d9},\n group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},\n last_modified = {2022-03-28T09:45:09.684Z},\n read = {true},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {Makhzani2015},\n folder_uuids = {1853f94b-7af1-40fa-b068-4758e9a02bc4},\n private_publication = {false},\n abstract = {In this paper, we propose the \"adversarial autoencoder\" (AAE), which is a probabilistic autoencoder that uses the recently proposed generative adversarial networks (GAN) to perform variational inference by matching the aggregated posterior of the hidden code vector of the autoencoder with an arbitrary prior distribution. Matching the aggregated posterior to the prior ensures that generating from any part of prior space results in meaningful samples. As a result, the decoder of the adversarial autoencoder learns a deep generative model that maps the imposed prior to the data distribution. We show how the adversarial autoencoder can be used in applications such as semi-supervised classification, disentangling style and content of images, unsupervised clustering, dimensionality reduction and data visualization. We performed experiments on MNIST, Street View House Numbers and Toronto Face datasets and show that adversarial autoencoders achieve competitive results in generative modeling and semi-supervised classification tasks.},\n bibtype = {article},\n author = {Makhzani, Alireza and Shlens, Jonathon and Jaitly, Navdeep and Goodfellow, Ian and Frey, Brendan}\n}","author_short":["Makhzani, A.","Shlens, J.","Jaitly, N.","Goodfellow, I.","Frey, B."],"urls":{"Paper":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c/file/3f706015-6bf8-0131-44d0-f93e563adb2a/Makhzani_et_al___2016___Adversarial_Autoencoders.pdf.pdf","Website":"http://arxiv.org/abs/1511.05644"},"biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibbaseid":"makhzani-shlens-jaitly-goodfellow-frey-adversarialautoencoders-2015","role":"author","metadata":{"authorlinks":{}}},"bibtype":"article","biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","dataSources":["TJkbwzD8s2wCxBy6Y","ya2CyA73rpZseyrZ8","2252seNhipfTmjEBQ"],"keywords":[],"search_terms":["adversarial","autoencoders","makhzani","shlens","jaitly","goodfellow","frey"],"title":"Adversarial Autoencoders","year":2015}