The Supervised IBP: Neighbourhood Preserving Infinite Latent Feature Models. Quadrianto, N., Sharmanska, V., <b>Knowles</b>, D. A, & Ghahramani, Z. In *Conference on Uncertainty in Artificial Intelligence*, volume 29, 2013. Paper abstract bibtex We propose a probabilistic model to infer supervised latent variables in the Hamming space from observed data. Our model allows simultaneous inference of the number of binary latent variables, and their values. The latent variables preserve neighbourhood structure of the data in a sense that objects in the same semantic concept have similar latent values, and objects in different concepts have dissimilar latent values. We formulate the supervised infinite latent variable problem based on an intuitive principle of pulling objects together if they are of the same type, and pushing them apart if they are not. We then combine this principle with a flexible Indian Buffet Process prior on the latent variables. We show that the inferred supervised latent variables can be directly used to perform a nearest neighbour search for the purpose of retrieval. We introduce a new application of dynamically extending hash codes, and show how to effectively couple the structure of the hash codes with continuously growing structure of the neighbourhood preserving infinite latent feature space.

@inproceedings{quadrianto2013supervised,
abstract = {We propose a probabilistic model to infer supervised latent variables in the Hamming space from observed data. Our model allows simultaneous inference of the number of binary latent variables, and their values. The latent variables preserve neighbourhood structure of the data in a sense that objects in the same semantic concept have similar latent values, and objects in different concepts have dissimilar latent values. We formulate the supervised infinite latent variable problem based on an intuitive principle of pulling objects together if they are of the same type, and pushing them apart if they are not. We then combine this principle with a flexible Indian Buffet Process prior on the latent variables. We show that the inferred supervised latent variables can be directly used to perform a nearest neighbour search for the purpose of retrieval. We introduce a new application of dynamically extending hash codes, and show how to effectively couple the structure of the hash codes with continuously growing structure of the neighbourhood preserving infinite latent feature space.},
archivePrefix = {arXiv},
arxivId = {1309.6858},
author = {Quadrianto, Novi and Sharmanska, Viktoriia and <b>Knowles</b>, David A and Ghahramani, Zoubin},
eprint = {1309.6858},
booktitle = {Conference on Uncertainty in Artificial Intelligence},
volume={29},
title = {{The Supervised IBP: Neighbourhood Preserving Infinite Latent Feature Models}},
url = {http://mlg.eng.cam.ac.uk/pub/pdf/QuaShaKnoGha13.pdf},
keywords = {Machine Learning/Statistics},
year = {2013}
}

Downloads: 0

{"_id":"o9bxR4QJRAxnTZjHH","bibbaseid":"quadrianto-sharmanska-bknowlesb-ghahramani-thesupervisedibpneighbourhoodpreservinginfinitelatentfeaturemodels-2013","downloads":0,"creationDate":"2017-05-18T03:55:31.758Z","title":"The Supervised IBP: Neighbourhood Preserving Infinite Latent Feature Models","author_short":["Quadrianto, N.","Sharmanska, V.","<b>Knowles</b>, D. A","Ghahramani, Z."],"year":2013,"bibtype":"inproceedings","biburl":"http://cs.stanford.edu/people/davidknowles/my_publications.bib","bibdata":{"bibtype":"inproceedings","type":"inproceedings","abstract":"We propose a probabilistic model to infer supervised latent variables in the Hamming space from observed data. Our model allows simultaneous inference of the number of binary latent variables, and their values. The latent variables preserve neighbourhood structure of the data in a sense that objects in the same semantic concept have similar latent values, and objects in different concepts have dissimilar latent values. We formulate the supervised infinite latent variable problem based on an intuitive principle of pulling objects together if they are of the same type, and pushing them apart if they are not. We then combine this principle with a flexible Indian Buffet Process prior on the latent variables. We show that the inferred supervised latent variables can be directly used to perform a nearest neighbour search for the purpose of retrieval. We introduce a new application of dynamically extending hash codes, and show how to effectively couple the structure of the hash codes with continuously growing structure of the neighbourhood preserving infinite latent feature space.","archiveprefix":"arXiv","arxivid":"1309.6858","author":[{"propositions":[],"lastnames":["Quadrianto"],"firstnames":["Novi"],"suffixes":[]},{"propositions":[],"lastnames":["Sharmanska"],"firstnames":["Viktoriia"],"suffixes":[]},{"propositions":[],"lastnames":["<b>Knowles</b>"],"firstnames":["David","A"],"suffixes":[]},{"propositions":[],"lastnames":["Ghahramani"],"firstnames":["Zoubin"],"suffixes":[]}],"eprint":"1309.6858","booktitle":"Conference on Uncertainty in Artificial Intelligence","volume":"29","title":"The Supervised IBP: Neighbourhood Preserving Infinite Latent Feature Models","url":"http://mlg.eng.cam.ac.uk/pub/pdf/QuaShaKnoGha13.pdf","keywords":"Machine Learning/Statistics","year":"2013","bibtex":"@inproceedings{quadrianto2013supervised,\nabstract = {We propose a probabilistic model to infer supervised latent variables in the Hamming space from observed data. Our model allows simultaneous inference of the number of binary latent variables, and their values. The latent variables preserve neighbourhood structure of the data in a sense that objects in the same semantic concept have similar latent values, and objects in different concepts have dissimilar latent values. We formulate the supervised infinite latent variable problem based on an intuitive principle of pulling objects together if they are of the same type, and pushing them apart if they are not. We then combine this principle with a flexible Indian Buffet Process prior on the latent variables. We show that the inferred supervised latent variables can be directly used to perform a nearest neighbour search for the purpose of retrieval. We introduce a new application of dynamically extending hash codes, and show how to effectively couple the structure of the hash codes with continuously growing structure of the neighbourhood preserving infinite latent feature space.},\narchivePrefix = {arXiv},\narxivId = {1309.6858},\nauthor = {Quadrianto, Novi and Sharmanska, Viktoriia and <b>Knowles</b>, David A and Ghahramani, Zoubin},\neprint = {1309.6858},\nbooktitle = {Conference on Uncertainty in Artificial Intelligence},\nvolume={29},\ntitle = {{The Supervised IBP: Neighbourhood Preserving Infinite Latent Feature Models}},\nurl = {http://mlg.eng.cam.ac.uk/pub/pdf/QuaShaKnoGha13.pdf},\nkeywords = {Machine Learning/Statistics},\nyear = {2013}\n}\n","author_short":["Quadrianto, N.","Sharmanska, V.","<b>Knowles</b>, D. A","Ghahramani, Z."],"key":"quadrianto2013supervised","id":"quadrianto2013supervised","bibbaseid":"quadrianto-sharmanska-bknowlesb-ghahramani-thesupervisedibpneighbourhoodpreservinginfinitelatentfeaturemodels-2013","role":"author","urls":{"Paper":"http://mlg.eng.cam.ac.uk/pub/pdf/QuaShaKnoGha13.pdf"},"keyword":["Machine Learning/Statistics"],"downloads":0},"search_terms":["supervised","ibp","neighbourhood","preserving","infinite","latent","feature","models","quadrianto","sharmanska","<b>knowles</b>","ghahramani"],"keywords":["machine learning/statistics"],"authorIDs":["591d2ef1f23861d07500001a"],"dataSources":["hxiRh73hmfi3m787q"]}