Predicting Where We Look from Spatiotemporal Gaps. Yonetani, R., Kawashima, H., & Matsuyama, T. In Proceedings of the 15th ACM on International Conference on Multimodal Interaction, of ICMI '13, pages 421--428, New York, NY, USA, 2013. ACM.
Paper doi abstract bibtex When we are watching videos, there exist spatiotemporal gaps between where we look and what we focus on, which result from temporally delayed responses and anticipation in eye movements. We focus on the underlying structures of those gaps and propose a novel method to predict points of gaze from video data. In the proposed methods, we model the spatiotemporal patterns of salient regions that tend to be focused on and statistically learn which types of the patterns strongly appear around the points of gaze with respect to each type of eye movements. It allows us to exploit the structures of gaps affected by eye movements and salient motions for the gaze-point prediction. The effectiveness of the proposed method is confirmed with several public datasets.
@inproceedings{yonetani_predicting_2013,
address = {New York, NY, USA},
series = {{ICMI} '13},
title = {Predicting {Where} {We} {Look} from {Spatiotemporal} {Gaps}},
isbn = {978-1-4503-2129-7},
url = {http://doi.acm.org/10.1145/2522848.2522853},
doi = {10.1145/2522848.2522853},
abstract = {When we are watching videos, there exist spatiotemporal gaps between where we look and what we focus on, which result from temporally delayed responses and anticipation in eye movements. We focus on the underlying structures of those gaps and propose a novel method to predict points of gaze from video data. In the proposed methods, we model the spatiotemporal patterns of salient regions that tend to be focused on and statistically learn which types of the patterns strongly appear around the points of gaze with respect to each type of eye movements. It allows us to exploit the structures of gaps affected by eye movements and salient motions for the gaze-point prediction. The effectiveness of the proposed method is confirmed with several public datasets.},
urldate = {2014-06-05TZ},
booktitle = {Proceedings of the 15th {ACM} on {International} {Conference} on {Multimodal} {Interaction}},
publisher = {ACM},
author = {Yonetani, Ryo and Kawashima, Hiroaki and Matsuyama, Takashi},
year = {2013},
pages = {421--428}
}
Downloads: 0
{"_id":"fY3xsMFpiFgwsk8qy","bibbaseid":"yonetani-kawashima-matsuyama-predictingwherewelookfromspatiotemporalgaps-2013","downloads":0,"creationDate":"2016-10-05T13:48:42.973Z","title":"Predicting Where We Look from Spatiotemporal Gaps","author_short":["Yonetani, R.","Kawashima, H.","Matsuyama, T."],"year":2013,"bibtype":"inproceedings","biburl":"http://bibbase.org/zotero/alanlivio","bibdata":{"bibtype":"inproceedings","type":"inproceedings","address":"New York, NY, USA","series":"ICMI '13","title":"Predicting Where We Look from Spatiotemporal Gaps","isbn":"978-1-4503-2129-7","url":"http://doi.acm.org/10.1145/2522848.2522853","doi":"10.1145/2522848.2522853","abstract":"When we are watching videos, there exist spatiotemporal gaps between where we look and what we focus on, which result from temporally delayed responses and anticipation in eye movements. We focus on the underlying structures of those gaps and propose a novel method to predict points of gaze from video data. In the proposed methods, we model the spatiotemporal patterns of salient regions that tend to be focused on and statistically learn which types of the patterns strongly appear around the points of gaze with respect to each type of eye movements. It allows us to exploit the structures of gaps affected by eye movements and salient motions for the gaze-point prediction. The effectiveness of the proposed method is confirmed with several public datasets.","urldate":"2014-06-05TZ","booktitle":"Proceedings of the 15th ACM on International Conference on Multimodal Interaction","publisher":"ACM","author":[{"propositions":[],"lastnames":["Yonetani"],"firstnames":["Ryo"],"suffixes":[]},{"propositions":[],"lastnames":["Kawashima"],"firstnames":["Hiroaki"],"suffixes":[]},{"propositions":[],"lastnames":["Matsuyama"],"firstnames":["Takashi"],"suffixes":[]}],"year":"2013","pages":"421--428","bibtex":"@inproceedings{yonetani_predicting_2013,\n\taddress = {New York, NY, USA},\n\tseries = {{ICMI} '13},\n\ttitle = {Predicting {Where} {We} {Look} from {Spatiotemporal} {Gaps}},\n\tisbn = {978-1-4503-2129-7},\n\turl = {http://doi.acm.org/10.1145/2522848.2522853},\n\tdoi = {10.1145/2522848.2522853},\n\tabstract = {When we are watching videos, there exist spatiotemporal gaps between where we look and what we focus on, which result from temporally delayed responses and anticipation in eye movements. We focus on the underlying structures of those gaps and propose a novel method to predict points of gaze from video data. In the proposed methods, we model the spatiotemporal patterns of salient regions that tend to be focused on and statistically learn which types of the patterns strongly appear around the points of gaze with respect to each type of eye movements. It allows us to exploit the structures of gaps affected by eye movements and salient motions for the gaze-point prediction. The effectiveness of the proposed method is confirmed with several public datasets.},\n\turldate = {2014-06-05TZ},\n\tbooktitle = {Proceedings of the 15th {ACM} on {International} {Conference} on {Multimodal} {Interaction}},\n\tpublisher = {ACM},\n\tauthor = {Yonetani, Ryo and Kawashima, Hiroaki and Matsuyama, Takashi},\n\tyear = {2013},\n\tpages = {421--428}\n}\n\n","author_short":["Yonetani, R.","Kawashima, H.","Matsuyama, T."],"key":"yonetani_predicting_2013","id":"yonetani_predicting_2013","bibbaseid":"yonetani-kawashima-matsuyama-predictingwherewelookfromspatiotemporalgaps-2013","role":"author","urls":{"Paper":"http://doi.acm.org/10.1145/2522848.2522853"},"downloads":0},"search_terms":["predicting","look","spatiotemporal","gaps","yonetani","kawashima","matsuyama"],"keywords":[],"authorIDs":[],"dataSources":["tudya6YojbqEiF783"]}