Automatic Foveation for Video Compression Using a Neurobiological Model of Visual Attention. Itti, L. IEEE Transactions on Image Processing, 13(10):1304-1318, Oct, 2004. abstract bibtex We evaluate the applicability of a biologically-motivated algorithm to select visually-salient regions of interest in video streams for multiply-foveated video compression. Regions are selected based on a nonlinear integration of low-level visual cues, mimicking processing in primate occipital and posterior parietal cortex. A dynamic foveation filter then blurs every frame, increasingly with distance from salient locations. Sixty-three variants of the algorithm (varying number and shape of virtual foveas, maximum blur, and saliency competition) are evaluated against an outdoor video scene, using MPEG-1 and constant-quality MPEG-4 (DivX) encoding. Additional compression radios of 1.1 to 8.5 are achieved by foveation. Two variants of the algorithm are validated against eye fixations recorded from 4-6 human observers on a heterogeneous collection of 50 video clips (over 45,000 frames in total). Significantly higher overlap than expected by chance is found between human and algorithmic foveations. With both variants, foveated clips are on average approximately half the size of unfoveated clips, for both MPEG-1 and MPEG-4. These results suggest a general-purpose usefulness of the algorithm in improving compression ratios of unconstrained video.
@article{ Itti04tip,
author = {L. Itti},
title = {Automatic Foveation for Video Compression Using a
Neurobiological Model of Visual Attention},
journal = {IEEE Transactions on Image Processing},
volume = {13},
number = {10},
pages = {1304-1318},
month = {Oct},
year = {2004},
abstract = {We evaluate the applicability of a biologically-motivated
algorithm to select visually-salient regions of interest in video
streams for multiply-foveated video compression. Regions are selected
based on a nonlinear integration of low-level visual cues, mimicking
processing in primate occipital and posterior parietal cortex. A
dynamic foveation filter then blurs every frame, increasingly with
distance from salient locations. Sixty-three variants of the
algorithm (varying number and shape of virtual foveas, maximum blur,
and saliency competition) are evaluated against an outdoor video
scene, using MPEG-1 and constant-quality MPEG-4 (DivX) encoding.
Additional compression radios of 1.1 to 8.5 are achieved by foveation.
Two variants of the algorithm are validated against eye fixations
recorded from 4-6 human observers on a heterogeneous collection of 50
video clips (over 45,000 frames in total). Significantly higher
overlap than expected by chance is found between human and algorithmic
foveations. With both variants, foveated clips are on average
approximately half the size of unfoveated clips, for both MPEG-1 and
MPEG-4. These results suggest a general-purpose usefulness of the
algorithm in improving compression ratios of unconstrained video.},
keywords = {Visual attention ; video compression ; saliency ; bottom-up ; eye
movements ; foveated},
type = { bu ; mod ; cv ; eye },
file = { http://iLab.usc.edu/publications/doc/Itti04tip.pdf },
if = {2003 impact factor: 2.642}
}
Downloads: 0
{"_id":{"_str":"5298a19f9eb585cc260007f5"},"__v":0,"authorIDs":[],"author_short":["Itti, L."],"bibbaseid":"itti-automaticfoveationforvideocompressionusinganeurobiologicalmodelofvisualattention-2004","bibdata":{"html":"<div class=\"bibbase_paper\"> \n\n\n<span class=\"bibbase_paper_titleauthoryear\">\n\t<span class=\"bibbase_paper_title\"><a name=\"Itti04tip\"> </a>Automatic Foveation for Video Compression Using a Neurobiological Model of Visual Attention.</span>\n\t<span class=\"bibbase_paper_author\">\nItti, L.</span>\n\t<!-- <span class=\"bibbase_paper_year\">2004</span>. -->\n</span>\n\n\n\n<i>IEEE Transactions on Image Processing</i>,\n\n13(10):1304-1318.\n\nOct 2004.\n\n\n\n\n<br class=\"bibbase_paper_content\"/>\n\n<span class=\"bibbase_paper_content\">\n \n \n \n <a href=\"javascript:showBib('Itti04tip')\"\n class=\"bibbase link\">\n <!-- <img src=\"http://www.bibbase.org/img/filetypes/bib.png\" -->\n\t<!-- alt=\"Automatic Foveation for Video Compression Using a Neurobiological Model of Visual Attention [bib]\" -->\n\t<!-- class=\"bibbase_icon\" -->\n\t<!-- style=\"width: 24px; height: 24px; border: 0px; vertical-align: text-top\"><span class=\"bibbase_icon_text\">Bibtex</span> -->\n BibTeX\n <i class=\"fa fa-caret-down\"></i></a>\n \n \n \n <a class=\"bibbase_abstract_link bibbase link\"\n href=\"javascript:showAbstract('Itti04tip')\">\n Abstract\n <i class=\"fa fa-caret-down\"></i></a>\n \n \n \n\n \n \n \n</span>\n\n<div class=\"well well-small bibbase\" id=\"bib_Itti04tip\"\n style=\"display:none\">\n <pre>@article{ Itti04tip,\n author = {L. Itti},\n title = {Automatic Foveation for Video Compression Using a\n Neurobiological Model of Visual Attention},\n journal = {IEEE Transactions on Image Processing},\n volume = {13},\n number = {10},\n pages = {1304-1318},\n month = {Oct},\n year = {2004},\n abstract = {We evaluate the applicability of a biologically-motivated\nalgorithm to select visually-salient regions of interest in video\nstreams for multiply-foveated video compression. Regions are selected\nbased on a nonlinear integration of low-level visual cues, mimicking\nprocessing in primate occipital and posterior parietal cortex. A\ndynamic foveation filter then blurs every frame, increasingly with\ndistance from salient locations. Sixty-three variants of the\nalgorithm (varying number and shape of virtual foveas, maximum blur,\nand saliency competition) are evaluated against an outdoor video\nscene, using MPEG-1 and constant-quality MPEG-4 (DivX) encoding.\nAdditional compression radios of 1.1 to 8.5 are achieved by foveation.\nTwo variants of the algorithm are validated against eye fixations\nrecorded from 4-6 human observers on a heterogeneous collection of 50\nvideo clips (over 45,000 frames in total). Significantly higher\noverlap than expected by chance is found between human and algorithmic\nfoveations. With both variants, foveated clips are on average\napproximately half the size of unfoveated clips, for both MPEG-1 and\nMPEG-4. These results suggest a general-purpose usefulness of the\nalgorithm in improving compression ratios of unconstrained video.},\n keywords = {Visual attention ; video compression ; saliency ; bottom-up ; eye\nmovements ; foveated},\n type = { bu ; mod ; cv ; eye },\n file = { http://iLab.usc.edu/publications/doc/Itti04tip.pdf },\n if = {2003 impact factor: 2.642}\n}</pre>\n</div>\n\n\n<div class=\"well well-small bibbase\" id=\"abstract_Itti04tip\"\n style=\"display:none\">\n We evaluate the applicability of a biologically-motivated algorithm to select visually-salient regions of interest in video streams for multiply-foveated video compression. Regions are selected based on a nonlinear integration of low-level visual cues, mimicking processing in primate occipital and posterior parietal cortex. A dynamic foveation filter then blurs every frame, increasingly with distance from salient locations. Sixty-three variants of the algorithm (varying number and shape of virtual foveas, maximum blur, and saliency competition) are evaluated against an outdoor video scene, using MPEG-1 and constant-quality MPEG-4 (DivX) encoding. Additional compression radios of 1.1 to 8.5 are achieved by foveation. Two variants of the algorithm are validated against eye fixations recorded from 4-6 human observers on a heterogeneous collection of 50 video clips (over 45,000 frames in total). Significantly higher overlap than expected by chance is found between human and algorithmic foveations. With both variants, foveated clips are on average approximately half the size of unfoveated clips, for both MPEG-1 and MPEG-4. These results suggest a general-purpose usefulness of the algorithm in improving compression ratios of unconstrained video.\n</div>\n\n\n</div>\n","downloads":0,"keyword":["Visual attention ; video compression ; saliency ; bottom-up ; eye movements ; foveated"],"bibbaseid":"itti-automaticfoveationforvideocompressionusinganeurobiologicalmodelofvisualattention-2004","role":"author","year":"2004","volume":"13","type":"bu ; mod ; cv ; eye","title":"Automatic Foveation for Video Compression Using a Neurobiological Model of Visual Attention","pages":"1304-1318","number":"10","month":"Oct","keywords":"Visual attention ; video compression ; saliency ; bottom-up ; eye movements ; foveated","key":"Itti04tip","journal":"IEEE Transactions on Image Processing","if":"2003 impact factor: 2.642","id":"Itti04tip","file":"http://iLab.usc.edu/publications/doc/Itti04tip.pdf","bibtype":"article","bibtex":"@article{ Itti04tip,\n author = {L. Itti},\n title = {Automatic Foveation for Video Compression Using a\n Neurobiological Model of Visual Attention},\n journal = {IEEE Transactions on Image Processing},\n volume = {13},\n number = {10},\n pages = {1304-1318},\n month = {Oct},\n year = {2004},\n abstract = {We evaluate the applicability of a biologically-motivated\nalgorithm to select visually-salient regions of interest in video\nstreams for multiply-foveated video compression. Regions are selected\nbased on a nonlinear integration of low-level visual cues, mimicking\nprocessing in primate occipital and posterior parietal cortex. A\ndynamic foveation filter then blurs every frame, increasingly with\ndistance from salient locations. Sixty-three variants of the\nalgorithm (varying number and shape of virtual foveas, maximum blur,\nand saliency competition) are evaluated against an outdoor video\nscene, using MPEG-1 and constant-quality MPEG-4 (DivX) encoding.\nAdditional compression radios of 1.1 to 8.5 are achieved by foveation.\nTwo variants of the algorithm are validated against eye fixations\nrecorded from 4-6 human observers on a heterogeneous collection of 50\nvideo clips (over 45,000 frames in total). Significantly higher\noverlap than expected by chance is found between human and algorithmic\nfoveations. With both variants, foveated clips are on average\napproximately half the size of unfoveated clips, for both MPEG-1 and\nMPEG-4. These results suggest a general-purpose usefulness of the\nalgorithm in improving compression ratios of unconstrained video.},\n keywords = {Visual attention ; video compression ; saliency ; bottom-up ; eye\nmovements ; foveated},\n type = { bu ; mod ; cv ; eye },\n file = { http://iLab.usc.edu/publications/doc/Itti04tip.pdf },\n if = {2003 impact factor: 2.642}\n}","author_short":["Itti, L."],"author":["Itti, L."],"abstract":"We evaluate the applicability of a biologically-motivated algorithm to select visually-salient regions of interest in video streams for multiply-foveated video compression. Regions are selected based on a nonlinear integration of low-level visual cues, mimicking processing in primate occipital and posterior parietal cortex. A dynamic foveation filter then blurs every frame, increasingly with distance from salient locations. Sixty-three variants of the algorithm (varying number and shape of virtual foveas, maximum blur, and saliency competition) are evaluated against an outdoor video scene, using MPEG-1 and constant-quality MPEG-4 (DivX) encoding. Additional compression radios of 1.1 to 8.5 are achieved by foveation. Two variants of the algorithm are validated against eye fixations recorded from 4-6 human observers on a heterogeneous collection of 50 video clips (over 45,000 frames in total). Significantly higher overlap than expected by chance is found between human and algorithmic foveations. With both variants, foveated clips are on average approximately half the size of unfoveated clips, for both MPEG-1 and MPEG-4. These results suggest a general-purpose usefulness of the algorithm in improving compression ratios of unconstrained video."},"bibtype":"article","biburl":"http://ilab.usc.edu/publications/src/ilab.bib","downloads":0,"search_terms":["automatic","foveation","video","compression","using","neurobiological","model","visual","attention","itti"],"title":"Automatic Foveation for Video Compression Using a Neurobiological Model of Visual Attention","year":2004,"dataSources":["wedBDxEpNXNCLZ2sZ"]}