Representation learning: A review and new perspectives. Bengio, Y., Courville, A., & Vincent, P. IEEE Transactions on Pattern Analysis and Machine Intelligence, 35(8):1798-1828, 2013. Paper doi abstract bibtex The success of machine learning algorithms generally depends on data representation, and we hypothesize that this is because different representations can entangle and hide more or less the different explanatory factors of variation behind the data. Although specific domain knowledge can be used to help design representations, learning with generic priors can also be used, and the quest for AI is motivating the design of more powerful representation-learning algorithms implementing such priors. This paper reviews recent work in the area of unsupervised feature learning and deep learning, covering advances in probabilistic models, autoencoders, manifold learning, and deep networks. This motivates longer term unanswered questions about the appropriate objectives for learning good representations, for computing representations (i.e., inference), and the geometrical connections between representation learning, density estimation, and manifold learning. © 1979-2012 IEEE.
@article{
title = {Representation learning: A review and new perspectives},
type = {article},
year = {2013},
keywords = {Boltzmann machine,Deep learning,autoencoder,feature learning,neural nets,representation learning,unsupervised learning},
pages = {1798-1828},
volume = {35},
id = {7c6ec643-6263-3d29-a460-33bbdada9f53},
created = {2022-03-23T06:17:59.434Z},
file_attached = {true},
profile_id = {235249c2-3ed4-314a-b309-b1ea0330f5d9},
group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},
last_modified = {2022-03-28T09:45:10.554Z},
read = {true},
starred = {false},
authored = {false},
confirmed = {true},
hidden = {false},
citation_key = {Bengio2013},
folder_uuids = {1853f94b-7af1-40fa-b068-4758e9a02bc4},
private_publication = {false},
abstract = {The success of machine learning algorithms generally depends on data representation, and we hypothesize that this is because different representations can entangle and hide more or less the different explanatory factors of variation behind the data. Although specific domain knowledge can be used to help design representations, learning with generic priors can also be used, and the quest for AI is motivating the design of more powerful representation-learning algorithms implementing such priors. This paper reviews recent work in the area of unsupervised feature learning and deep learning, covering advances in probabilistic models, autoencoders, manifold learning, and deep networks. This motivates longer term unanswered questions about the appropriate objectives for learning good representations, for computing representations (i.e., inference), and the geometrical connections between representation learning, density estimation, and manifold learning. © 1979-2012 IEEE.},
bibtype = {article},
author = {Bengio, Yoshua and Courville, Aaron and Vincent, Pascal},
doi = {10.1109/TPAMI.2013.50},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
number = {8}
}
Downloads: 0
{"_id":"nFMSJzs7SgimbrmDe","bibbaseid":"bengio-courville-vincent-representationlearningareviewandnewperspectives-2013","author_short":["Bengio, Y.","Courville, A.","Vincent, P."],"bibdata":{"title":"Representation learning: A review and new perspectives","type":"article","year":"2013","keywords":"Boltzmann machine,Deep learning,autoencoder,feature learning,neural nets,representation learning,unsupervised learning","pages":"1798-1828","volume":"35","id":"7c6ec643-6263-3d29-a460-33bbdada9f53","created":"2022-03-23T06:17:59.434Z","file_attached":"true","profile_id":"235249c2-3ed4-314a-b309-b1ea0330f5d9","group_id":"1ff583c0-be37-34fa-9c04-73c69437d354","last_modified":"2022-03-28T09:45:10.554Z","read":"true","starred":false,"authored":false,"confirmed":"true","hidden":false,"citation_key":"Bengio2013","folder_uuids":"1853f94b-7af1-40fa-b068-4758e9a02bc4","private_publication":false,"abstract":"The success of machine learning algorithms generally depends on data representation, and we hypothesize that this is because different representations can entangle and hide more or less the different explanatory factors of variation behind the data. Although specific domain knowledge can be used to help design representations, learning with generic priors can also be used, and the quest for AI is motivating the design of more powerful representation-learning algorithms implementing such priors. This paper reviews recent work in the area of unsupervised feature learning and deep learning, covering advances in probabilistic models, autoencoders, manifold learning, and deep networks. This motivates longer term unanswered questions about the appropriate objectives for learning good representations, for computing representations (i.e., inference), and the geometrical connections between representation learning, density estimation, and manifold learning. © 1979-2012 IEEE.","bibtype":"article","author":"Bengio, Yoshua and Courville, Aaron and Vincent, Pascal","doi":"10.1109/TPAMI.2013.50","journal":"IEEE Transactions on Pattern Analysis and Machine Intelligence","number":"8","bibtex":"@article{\n title = {Representation learning: A review and new perspectives},\n type = {article},\n year = {2013},\n keywords = {Boltzmann machine,Deep learning,autoencoder,feature learning,neural nets,representation learning,unsupervised learning},\n pages = {1798-1828},\n volume = {35},\n id = {7c6ec643-6263-3d29-a460-33bbdada9f53},\n created = {2022-03-23T06:17:59.434Z},\n file_attached = {true},\n profile_id = {235249c2-3ed4-314a-b309-b1ea0330f5d9},\n group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},\n last_modified = {2022-03-28T09:45:10.554Z},\n read = {true},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {Bengio2013},\n folder_uuids = {1853f94b-7af1-40fa-b068-4758e9a02bc4},\n private_publication = {false},\n abstract = {The success of machine learning algorithms generally depends on data representation, and we hypothesize that this is because different representations can entangle and hide more or less the different explanatory factors of variation behind the data. Although specific domain knowledge can be used to help design representations, learning with generic priors can also be used, and the quest for AI is motivating the design of more powerful representation-learning algorithms implementing such priors. This paper reviews recent work in the area of unsupervised feature learning and deep learning, covering advances in probabilistic models, autoencoders, manifold learning, and deep networks. This motivates longer term unanswered questions about the appropriate objectives for learning good representations, for computing representations (i.e., inference), and the geometrical connections between representation learning, density estimation, and manifold learning. © 1979-2012 IEEE.},\n bibtype = {article},\n author = {Bengio, Yoshua and Courville, Aaron and Vincent, Pascal},\n doi = {10.1109/TPAMI.2013.50},\n journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},\n number = {8}\n}","author_short":["Bengio, Y.","Courville, A.","Vincent, P."],"urls":{"Paper":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c/file/37ef0671-3b8a-462d-4ec9-0012c55dc27a/Bengio_et_al___2014___Representation_Learning_A_Review_and_New_Perspect.pdf.pdf"},"biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibbaseid":"bengio-courville-vincent-representationlearningareviewandnewperspectives-2013","role":"author","keyword":["Boltzmann machine","Deep learning","autoencoder","feature learning","neural nets","representation learning","unsupervised learning"],"metadata":{"authorlinks":{}}},"bibtype":"article","biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","dataSources":["iwKepCrWBps7ojhDx","pQYee5oovEhJF75n6","7jg7aEafvXsPnP9Wf","pzyFFGWvxG2bs63zP"],"keywords":["boltzmann machine","deep learning","autoencoder","feature learning","neural nets","representation learning","unsupervised learning"],"search_terms":["representation","learning","review","new","perspectives","bengio","courville","vincent"],"title":"Representation learning: A review and new perspectives","year":2013}