Depth Map Prediction from a Single Image using a Multi-Scale Deep Network. Eigen, D., Puhrsch, C., & Fergus, R. arXiv:1406.2283 [cs], June, 2014. arXiv: 1406.2283
Paper abstract bibtex Predicting depth is an essential component in understanding the 3D geometry of a scene. While for stereo images local correspondence suffices for estimation, finding depth relations from a single image is less straightforward, requiring integration of both global and local information from various cues. Moreover, the task is inherently ambiguous, with a large source of uncertainty coming from the overall scale. In this paper, we present a new method that addresses this task by employing two deep network stacks: one that makes a coarse global prediction based on the entire image, and another that refines this prediction locally. We also apply a scale-invariant error to help measure depth relations rather than scale. By leveraging the raw datasets as large sources of training data, our method achieves state-of-the-art results on both NYU Depth and KITTI, and matches detailed depth boundaries without the need for superpixelation.
@article{eigen_depth_2014,
title = {Depth {Map} {Prediction} from a {Single} {Image} using a {Multi}-{Scale} {Deep} {Network}},
url = {http://arxiv.org/abs/1406.2283},
abstract = {Predicting depth is an essential component in understanding the 3D geometry of a scene. While for stereo images local correspondence suffices for estimation, finding depth relations from a single image is less straightforward, requiring integration of both global and local information from various cues. Moreover, the task is inherently ambiguous, with a large source of uncertainty coming from the overall scale. In this paper, we present a new method that addresses this task by employing two deep network stacks: one that makes a coarse global prediction based on the entire image, and another that refines this prediction locally. We also apply a scale-invariant error to help measure depth relations rather than scale. By leveraging the raw datasets as large sources of training data, our method achieves state-of-the-art results on both NYU Depth and KITTI, and matches detailed depth boundaries without the need for superpixelation.},
urldate = {2017-12-28TZ},
journal = {arXiv:1406.2283 [cs]},
author = {Eigen, David and Puhrsch, Christian and Fergus, Rob},
month = jun,
year = {2014},
note = {arXiv: 1406.2283},
keywords = {Computer Science - Computer Vision and Pattern Recognition}
}
Downloads: 0
{"_id":"p93fpvagr9tbBr5rK","bibbaseid":"eigen-puhrsch-fergus-depthmappredictionfromasingleimageusingamultiscaledeepnetwork-2014","downloads":0,"creationDate":"2018-04-06T04:26:07.239Z","title":"Depth Map Prediction from a Single Image using a Multi-Scale Deep Network","author_short":["Eigen, D.","Puhrsch, C.","Fergus, R."],"year":2014,"bibtype":"article","biburl":"https://bibbase.org/zotero/alwynmathew","bibdata":{"bibtype":"article","type":"article","title":"Depth Map Prediction from a Single Image using a Multi-Scale Deep Network","url":"http://arxiv.org/abs/1406.2283","abstract":"Predicting depth is an essential component in understanding the 3D geometry of a scene. While for stereo images local correspondence suffices for estimation, finding depth relations from a single image is less straightforward, requiring integration of both global and local information from various cues. Moreover, the task is inherently ambiguous, with a large source of uncertainty coming from the overall scale. In this paper, we present a new method that addresses this task by employing two deep network stacks: one that makes a coarse global prediction based on the entire image, and another that refines this prediction locally. We also apply a scale-invariant error to help measure depth relations rather than scale. By leveraging the raw datasets as large sources of training data, our method achieves state-of-the-art results on both NYU Depth and KITTI, and matches detailed depth boundaries without the need for superpixelation.","urldate":"2017-12-28TZ","journal":"arXiv:1406.2283 [cs]","author":[{"propositions":[],"lastnames":["Eigen"],"firstnames":["David"],"suffixes":[]},{"propositions":[],"lastnames":["Puhrsch"],"firstnames":["Christian"],"suffixes":[]},{"propositions":[],"lastnames":["Fergus"],"firstnames":["Rob"],"suffixes":[]}],"month":"June","year":"2014","note":"arXiv: 1406.2283","keywords":"Computer Science - Computer Vision and Pattern Recognition","bibtex":"@article{eigen_depth_2014,\n\ttitle = {Depth {Map} {Prediction} from a {Single} {Image} using a {Multi}-{Scale} {Deep} {Network}},\n\turl = {http://arxiv.org/abs/1406.2283},\n\tabstract = {Predicting depth is an essential component in understanding the 3D geometry of a scene. While for stereo images local correspondence suffices for estimation, finding depth relations from a single image is less straightforward, requiring integration of both global and local information from various cues. Moreover, the task is inherently ambiguous, with a large source of uncertainty coming from the overall scale. In this paper, we present a new method that addresses this task by employing two deep network stacks: one that makes a coarse global prediction based on the entire image, and another that refines this prediction locally. We also apply a scale-invariant error to help measure depth relations rather than scale. By leveraging the raw datasets as large sources of training data, our method achieves state-of-the-art results on both NYU Depth and KITTI, and matches detailed depth boundaries without the need for superpixelation.},\n\turldate = {2017-12-28TZ},\n\tjournal = {arXiv:1406.2283 [cs]},\n\tauthor = {Eigen, David and Puhrsch, Christian and Fergus, Rob},\n\tmonth = jun,\n\tyear = {2014},\n\tnote = {arXiv: 1406.2283},\n\tkeywords = {Computer Science - Computer Vision and Pattern Recognition}\n}\n\n","author_short":["Eigen, D.","Puhrsch, C.","Fergus, R."],"key":"eigen_depth_2014","id":"eigen_depth_2014","bibbaseid":"eigen-puhrsch-fergus-depthmappredictionfromasingleimageusingamultiscaledeepnetwork-2014","role":"author","urls":{"Paper":"http://arxiv.org/abs/1406.2283"},"keyword":["Computer Science - Computer Vision and Pattern Recognition"],"downloads":0,"html":""},"search_terms":["depth","map","prediction","single","image","using","multi","scale","deep","network","eigen","puhrsch","fergus"],"keywords":["computer science - computer vision and pattern recognition"],"authorIDs":[],"dataSources":["p3JdPh89hHfoARFkn"]}