A learning-based visual saliency fusion model for High Dynamic Range video (LBVS-HDR). Banitalebi-Dehkordi, A., Dong, Y., Pourazad, M. T., & Nasiopoulos, P. In 2015 23rd European Signal Processing Conference (EUSIPCO), pages 1541-1545, Aug, 2015.
Paper doi abstract bibtex Saliency prediction for Standard Dynamic Range (SDR) videos has been well explored in the last decade. However, limited studies are available on High Dynamic Range (HDR) Visual Attention Models (VAMs). Considering that the characteristic of HDR content in terms of dynamic range and color gamut is quite different than those of SDR content, it is essential to identify the importance of different saliency attributes of HDR videos for designing a VAM and understand how to combine these features. To this end we propose a learning-based visual saliency fusion method for HDR content (LVBS-HDR) to combine various visual saliency features. In our approach various conspicuity maps are extracted from HDR data, and then for fusing conspicuity maps, a Random Forests algorithm is used to train a model based on the collected data from an eye-tracking experiment. Performance evaluations demonstrate the superiority of the proposed fusion method against other existing fusion methods.
@InProceedings{7362642,
author = {A. Banitalebi-Dehkordi and Y. Dong and M. T. Pourazad and P. Nasiopoulos},
booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},
title = {A learning-based visual saliency fusion model for High Dynamic Range video (LBVS-HDR)},
year = {2015},
pages = {1541-1545},
abstract = {Saliency prediction for Standard Dynamic Range (SDR) videos has been well explored in the last decade. However, limited studies are available on High Dynamic Range (HDR) Visual Attention Models (VAMs). Considering that the characteristic of HDR content in terms of dynamic range and color gamut is quite different than those of SDR content, it is essential to identify the importance of different saliency attributes of HDR videos for designing a VAM and understand how to combine these features. To this end we propose a learning-based visual saliency fusion method for HDR content (LVBS-HDR) to combine various visual saliency features. In our approach various conspicuity maps are extracted from HDR data, and then for fusing conspicuity maps, a Random Forests algorithm is used to train a model based on the collected data from an eye-tracking experiment. Performance evaluations demonstrate the superiority of the proposed fusion method against other existing fusion methods.},
keywords = {gaze tracking;image colour analysis;eye-tracking;random forests algorithm;conspicuity maps;color gamut;VAM;visual attention models;SDR videos;standard dynamic range videos;saliency prediction;HDR;high dynamic range video;LBVS;learning-based visual saliency fusion;Visualization;Feature extraction;Image color analysis;Training;Databases;Dynamic range;Radio frequency;High Dynamic Range video;HDR;visual attention model;saliency prediction},
doi = {10.1109/EUSIPCO.2015.7362642},
issn = {2076-1465},
month = {Aug},
url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105157.pdf},
}
Downloads: 0
{"_id":"ZsaN6ydkqvB4QM2T5","bibbaseid":"banitalebidehkordi-dong-pourazad-nasiopoulos-alearningbasedvisualsaliencyfusionmodelforhighdynamicrangevideolbvshdr-2015","authorIDs":[],"author_short":["Banitalebi-Dehkordi, A.","Dong, Y.","Pourazad, M. T.","Nasiopoulos, P."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","author":[{"firstnames":["A."],"propositions":[],"lastnames":["Banitalebi-Dehkordi"],"suffixes":[]},{"firstnames":["Y."],"propositions":[],"lastnames":["Dong"],"suffixes":[]},{"firstnames":["M.","T."],"propositions":[],"lastnames":["Pourazad"],"suffixes":[]},{"firstnames":["P."],"propositions":[],"lastnames":["Nasiopoulos"],"suffixes":[]}],"booktitle":"2015 23rd European Signal Processing Conference (EUSIPCO)","title":"A learning-based visual saliency fusion model for High Dynamic Range video (LBVS-HDR)","year":"2015","pages":"1541-1545","abstract":"Saliency prediction for Standard Dynamic Range (SDR) videos has been well explored in the last decade. However, limited studies are available on High Dynamic Range (HDR) Visual Attention Models (VAMs). Considering that the characteristic of HDR content in terms of dynamic range and color gamut is quite different than those of SDR content, it is essential to identify the importance of different saliency attributes of HDR videos for designing a VAM and understand how to combine these features. To this end we propose a learning-based visual saliency fusion method for HDR content (LVBS-HDR) to combine various visual saliency features. In our approach various conspicuity maps are extracted from HDR data, and then for fusing conspicuity maps, a Random Forests algorithm is used to train a model based on the collected data from an eye-tracking experiment. Performance evaluations demonstrate the superiority of the proposed fusion method against other existing fusion methods.","keywords":"gaze tracking;image colour analysis;eye-tracking;random forests algorithm;conspicuity maps;color gamut;VAM;visual attention models;SDR videos;standard dynamic range videos;saliency prediction;HDR;high dynamic range video;LBVS;learning-based visual saliency fusion;Visualization;Feature extraction;Image color analysis;Training;Databases;Dynamic range;Radio frequency;High Dynamic Range video;HDR;visual attention model;saliency prediction","doi":"10.1109/EUSIPCO.2015.7362642","issn":"2076-1465","month":"Aug","url":"https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105157.pdf","bibtex":"@InProceedings{7362642,\n author = {A. Banitalebi-Dehkordi and Y. Dong and M. T. Pourazad and P. Nasiopoulos},\n booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)},\n title = {A learning-based visual saliency fusion model for High Dynamic Range video (LBVS-HDR)},\n year = {2015},\n pages = {1541-1545},\n abstract = {Saliency prediction for Standard Dynamic Range (SDR) videos has been well explored in the last decade. However, limited studies are available on High Dynamic Range (HDR) Visual Attention Models (VAMs). Considering that the characteristic of HDR content in terms of dynamic range and color gamut is quite different than those of SDR content, it is essential to identify the importance of different saliency attributes of HDR videos for designing a VAM and understand how to combine these features. To this end we propose a learning-based visual saliency fusion method for HDR content (LVBS-HDR) to combine various visual saliency features. In our approach various conspicuity maps are extracted from HDR data, and then for fusing conspicuity maps, a Random Forests algorithm is used to train a model based on the collected data from an eye-tracking experiment. Performance evaluations demonstrate the superiority of the proposed fusion method against other existing fusion methods.},\n keywords = {gaze tracking;image colour analysis;eye-tracking;random forests algorithm;conspicuity maps;color gamut;VAM;visual attention models;SDR videos;standard dynamic range videos;saliency prediction;HDR;high dynamic range video;LBVS;learning-based visual saliency fusion;Visualization;Feature extraction;Image color analysis;Training;Databases;Dynamic range;Radio frequency;High Dynamic Range video;HDR;visual attention model;saliency prediction},\n doi = {10.1109/EUSIPCO.2015.7362642},\n issn = {2076-1465},\n month = {Aug},\n url = {https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105157.pdf},\n}\n\n","author_short":["Banitalebi-Dehkordi, A.","Dong, Y.","Pourazad, M. T.","Nasiopoulos, P."],"key":"7362642","id":"7362642","bibbaseid":"banitalebidehkordi-dong-pourazad-nasiopoulos-alearningbasedvisualsaliencyfusionmodelforhighdynamicrangevideolbvshdr-2015","role":"author","urls":{"Paper":"https://www.eurasip.org/proceedings/eusipco/eusipco2015/papers/1570105157.pdf"},"keyword":["gaze tracking;image colour analysis;eye-tracking;random forests algorithm;conspicuity maps;color gamut;VAM;visual attention models;SDR videos;standard dynamic range videos;saliency prediction;HDR;high dynamic range video;LBVS;learning-based visual saliency fusion;Visualization;Feature extraction;Image color analysis;Training;Databases;Dynamic range;Radio frequency;High Dynamic Range video;HDR;visual attention model;saliency prediction"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"inproceedings","biburl":"https://raw.githubusercontent.com/Roznn/EUSIPCO/main/eusipco2015url.bib","creationDate":"2021-02-13T17:31:52.453Z","downloads":0,"keywords":["gaze tracking;image colour analysis;eye-tracking;random forests algorithm;conspicuity maps;color gamut;vam;visual attention models;sdr videos;standard dynamic range videos;saliency prediction;hdr;high dynamic range video;lbvs;learning-based visual saliency fusion;visualization;feature extraction;image color analysis;training;databases;dynamic range;radio frequency;high dynamic range video;hdr;visual attention model;saliency prediction"],"search_terms":["learning","based","visual","saliency","fusion","model","high","dynamic","range","video","lbvs","hdr","banitalebi-dehkordi","dong","pourazad","nasiopoulos"],"title":"A learning-based visual saliency fusion model for High Dynamic Range video (LBVS-HDR)","year":2015,"dataSources":["eov4vbT6mnAiTpKji","knrZsDjSNHWtA9WNT"]}