Learning visual saliency by combining feature maps in a nonlinear manner using AdaBoost. Zhao, Q. & Koch, C. Journal of Vision, 12(6):22, June, 2012. 00039
Paper doi abstract bibtex To predict where subjects look under natural viewing conditions, biologically inspired saliency models decompose visual input into a set of feature maps across spatial scales. The output of these feature maps are summed to yield the final saliency map. We studied the integration of bottom-up feature maps across multiple spatial scales by using eye movement data from four recent eye tracking datasets. We use AdaBoost as the central computational module that takes into account feature selection, thresholding, weight assignment, and integration in a principled and nonlinear learning framework. By combining the output of feature maps via a series of nonlinear classifiers, the new model consistently predicts eye movements better than any of its competitors.
@article{ zhao_learning_2012,
title = {Learning visual saliency by combining feature maps in a nonlinear manner using {AdaBoost}},
volume = {12},
issn = {, 1534-7362},
url = {http://www.journalofvision.org/content/12/6/22},
doi = {10.1167/12.6.22},
abstract = {To predict where subjects look under natural viewing conditions, biologically inspired saliency models decompose visual input into a set of feature maps across spatial scales. The output of these feature maps are summed to yield the final saliency map. We studied the integration of bottom-up feature maps across multiple spatial scales by using eye movement data from four recent eye tracking datasets. We use AdaBoost as the central computational module that takes into account feature selection, thresholding, weight assignment, and integration in a principled and nonlinear learning framework. By combining the output of feature maps via a series of nonlinear classifiers, the new model consistently predicts eye movements better than any of its competitors.},
language = {en},
number = {6},
urldate = {2015-03-10TZ},
journal = {Journal of Vision},
author = {Zhao, Qi and Koch, Christof},
month = {June},
year = {2012},
pmid = {22707429},
note = {00039 },
pages = {22}
}
Downloads: 0
{"_id":"oRnvphzE7R9kBbkTD","bibbaseid":"zhao-koch-learningvisualsaliencybycombiningfeaturemapsinanonlinearmannerusingadaboost-2012","downloads":0,"creationDate":"2015-09-03T07:18:47.948Z","title":"Learning visual saliency by combining feature maps in a nonlinear manner using AdaBoost","author_short":["Zhao, Q.","Koch, C."],"year":2012,"bibtype":"article","biburl":"http://bibbase.org/zotero/fred.qi","bibdata":{"abstract":"To predict where subjects look under natural viewing conditions, biologically inspired saliency models decompose visual input into a set of feature maps across spatial scales. The output of these feature maps are summed to yield the final saliency map. We studied the integration of bottom-up feature maps across multiple spatial scales by using eye movement data from four recent eye tracking datasets. We use AdaBoost as the central computational module that takes into account feature selection, thresholding, weight assignment, and integration in a principled and nonlinear learning framework. By combining the output of feature maps via a series of nonlinear classifiers, the new model consistently predicts eye movements better than any of its competitors.","author":["Zhao, Qi","Koch, Christof"],"author_short":["Zhao, Q.","Koch, C."],"bibtex":"@article{ zhao_learning_2012,\n title = {Learning visual saliency by combining feature maps in a nonlinear manner using {AdaBoost}},\n volume = {12},\n issn = {, 1534-7362},\n url = {http://www.journalofvision.org/content/12/6/22},\n doi = {10.1167/12.6.22},\n abstract = {To predict where subjects look under natural viewing conditions, biologically inspired saliency models decompose visual input into a set of feature maps across spatial scales. The output of these feature maps are summed to yield the final saliency map. We studied the integration of bottom-up feature maps across multiple spatial scales by using eye movement data from four recent eye tracking datasets. We use AdaBoost as the central computational module that takes into account feature selection, thresholding, weight assignment, and integration in a principled and nonlinear learning framework. By combining the output of feature maps via a series of nonlinear classifiers, the new model consistently predicts eye movements better than any of its competitors.},\n language = {en},\n number = {6},\n urldate = {2015-03-10TZ},\n journal = {Journal of Vision},\n author = {Zhao, Qi and Koch, Christof},\n month = {June},\n year = {2012},\n pmid = {22707429},\n note = {00039 },\n pages = {22}\n}","bibtype":"article","doi":"10.1167/12.6.22","id":"zhao_learning_2012","issn":", 1534-7362","journal":"Journal of Vision","key":"zhao_learning_2012","language":"en","month":"June","note":"00039","number":"6","pages":"22","pmid":"22707429","title":"Learning visual saliency by combining feature maps in a nonlinear manner using AdaBoost","type":"article","url":"http://www.journalofvision.org/content/12/6/22","urldate":"2015-03-10TZ","volume":"12","year":"2012","bibbaseid":"zhao-koch-learningvisualsaliencybycombiningfeaturemapsinanonlinearmannerusingadaboost-2012","role":"author","urls":{"Paper":"http://www.journalofvision.org/content/12/6/22"},"downloads":0},"search_terms":["learning","visual","saliency","combining","feature","maps","nonlinear","manner","using","adaboost","zhao","koch"],"keywords":[],"authorIDs":[],"dataSources":["y8en6y5RHukfeLuPH"]}