earning Shared, Discriminative, and Compact Representations for Visual Recognition. Lobel, H., Vidal, R., & Soto, A. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2015.
Paper abstract bibtex Dictionary-based and part-based methods are among the most popular approaches to visual recognition. In both methods, a mid-level representation is built on top of low-level image descriptors and high-level classifiers are trained on top of the mid-level representation. While earlier methods built the mid-level representation without supervision, there is currently great interest in learning both representations jointly to make the mid-level representation more discriminative. In this work we propose a new approach to visual recognition that jointly learns a shared, discriminative, and compact mid-level representation and a compact high-level representation. By using a structured output learning framework, our approach directly handles the multiclass case at both levels of abstraction. Moreover, by using a group-sparse prior in the structured output learning framework, our approach encourages sharing of visual words and thus reduces the number of words used to represent each class. We test our proposed method on several popular benchmarks. Our results show that, by jointly learning mid- and high-level representations, and fostering the sharing of discriminative visual words among target classes, we are able to achieve state-of-the-art recognition performance using far less visual words than previous approaches.
@article{Lobel:EtAl:2015,
Author = {H. Lobel and R. Vidal and A. Soto},
Title = {earning Shared, Discriminative, and Compact Representations for
Visual Recognition},
Journal = {{IEEE} Transactions on Pattern Analysis and Machine Intelligence},
Volume = {37},
Number = {11},
Year = {2015},
abstract = {Dictionary-based and part-based methods are among the most popular approaches to
visual recognition. In both methods, a mid-level representation is built on top
of low-level image descriptors and high-level classifiers are trained on top of
the mid-level representation. While earlier methods built the mid-level
representation without supervision, there is currently great interest in
learning both representations jointly to make the mid-level representation more
discriminative. In this work we propose a new approach to visual recognition that
jointly learns a shared, discriminative, and compact mid-level representation
and a compact high-level representation. By using a structured output learning framework, our
approach directly handles the multiclass case at both levels of abstraction.
Moreover, by using a group-sparse prior in the structured output learning
framework, our approach encourages sharing of visual words and thus reduces the
number of words used to represent each class. We test our proposed method on
several popular benchmarks. Our results show that, by jointly learning mid- and
high-level representations, and fostering the sharing of discriminative visual
words among target classes, we are able to achieve state-of-the-art recognition
performance using far less visual words than previous approaches.},
url = {http://saturno.ing.puc.cl/media/papers_alvaro/Hans-FINAL-PAMI-2015.pdf}
}
Downloads: 0
{"_id":"2ugH8Lfuw3ovLGtJW","bibbaseid":"lobel-vidal-soto-earningshareddiscriminativeandcompactrepresentationsforvisualrecognition-2015","downloads":0,"creationDate":"2016-04-26T18:37:41.672Z","title":"earning Shared, Discriminative, and Compact Representations for Visual Recognition","author_short":["Lobel, H.","Vidal, R.","Soto, A."],"year":2015,"bibtype":"article","biburl":"http://saturno.ing.puc.cl/static/papersalvaro.bib","bibdata":{"bibtype":"article","type":"article","author":[{"firstnames":["H."],"propositions":[],"lastnames":["Lobel"],"suffixes":[]},{"firstnames":["R."],"propositions":[],"lastnames":["Vidal"],"suffixes":[]},{"firstnames":["A."],"propositions":[],"lastnames":["Soto"],"suffixes":[]}],"title":"earning Shared, Discriminative, and Compact Representations for Visual Recognition","journal":"IEEE Transactions on Pattern Analysis and Machine Intelligence","volume":"37","number":"11","year":"2015","abstract":"Dictionary-based and part-based methods are among the most popular approaches to visual recognition. In both methods, a mid-level representation is built on top of low-level image descriptors and high-level classifiers are trained on top of the mid-level representation. While earlier methods built the mid-level representation without supervision, there is currently great interest in learning both representations jointly to make the mid-level representation more discriminative. In this work we propose a new approach to visual recognition that jointly learns a shared, discriminative, and compact mid-level representation and a compact high-level representation. By using a structured output learning framework, our approach directly handles the multiclass case at both levels of abstraction. Moreover, by using a group-sparse prior in the structured output learning framework, our approach encourages sharing of visual words and thus reduces the number of words used to represent each class. We test our proposed method on several popular benchmarks. Our results show that, by jointly learning mid- and high-level representations, and fostering the sharing of discriminative visual words among target classes, we are able to achieve state-of-the-art recognition performance using far less visual words than previous approaches.","url":"http://saturno.ing.puc.cl/media/papers_alvaro/Hans-FINAL-PAMI-2015.pdf","bibtex":"@article{Lobel:EtAl:2015,\n Author = {H. Lobel and R. Vidal and A. Soto},\n Title = {earning Shared, Discriminative, and Compact Representations for \nVisual Recognition},\n Journal = {{IEEE} Transactions on Pattern Analysis and Machine Intelligence},\n Volume = {37},\n Number = {11},\n Year = {2015},\n abstract = {Dictionary-based and part-based methods are among the most popular approaches to\nvisual recognition. In both methods, a mid-level representation is built on top\nof low-level image descriptors and high-level classifiers are trained on top of\nthe mid-level representation. While earlier methods built the mid-level\nrepresentation without supervision, there is currently great interest in\nlearning both representations jointly to make the mid-level representation more\ndiscriminative. In this work we propose a new approach to visual recognition that\njointly learns a shared, discriminative, and compact mid-level representation\nand a compact high-level representation. By using a structured output learning framework, our\napproach directly handles the multiclass case at both levels of abstraction.\nMoreover, by using a group-sparse prior in the structured output learning\nframework, our approach encourages sharing of visual words and thus reduces the\nnumber of words used to represent each class. We test our proposed method on\nseveral popular benchmarks. Our results show that, by jointly learning mid- and\nhigh-level representations, and fostering the sharing of discriminative visual\nwords among target classes, we are able to achieve state-of-the-art recognition\nperformance using far less visual words than previous approaches.},\nurl = {http://saturno.ing.puc.cl/media/papers_alvaro/Hans-FINAL-PAMI-2015.pdf}\n}\n\n\n","author_short":["Lobel, H.","Vidal, R.","Soto, A."],"key":"Lobel:EtAl:2015","id":"Lobel:EtAl:2015","bibbaseid":"lobel-vidal-soto-earningshareddiscriminativeandcompactrepresentationsforvisualrecognition-2015","role":"author","urls":{"Paper":"http://saturno.ing.puc.cl/media/papers_alvaro/Hans-FINAL-PAMI-2015.pdf"},"downloads":0},"search_terms":["earning","shared","discriminative","compact","representations","visual","recognition","lobel","vidal","soto"],"keywords":[],"authorIDs":[],"dataSources":["Hgw6JLCLakSBs88au"]}