Deep Learning for Automated Occlusion Edge Detection in RGB-D Frames. Sarkar, S., Venugopalan, V., Reddy, K., Ryde, J., Jaitly, N., & Giering, M. Journal of Signal Processing Systems, December, 2016. Paper doi abstract bibtex Occlusion edges correspond to range discontinuity in a scene from the point of view of the observer. Detection of occlusion edges is an important prerequisite for many machine vision and mobile robotic tasks. Although they can be extracted from range data, extracting them from images and videos would be extremely beneficial. We trained a deep convolutional neural network (CNN) to identify occlusion edges in images and videos with just RGB, RGB-D and RGB-D-UV inputs, where D stands for depth and UV stands for horizontal and vertical components of the optical flow field respectively. The use of CNN avoids hand-crafting of features for automatically isolating occlusion edges and distinguishing them from appearance edges. Other than quantitative occlusion edge detection results, qualitative results are provided to evaluate input data requirements and to demonstrate the trade-off between high resolution analysis and frame-level computation time that is critical for real-time robotics applications.
@article{Sarkar2016Deep-Learn,
abstract = {Occlusion edges correspond to range discontinuity in a scene from the point of view of the observer. Detection of occlusion edges is an important prerequisite for many machine vision and mobile robotic tasks. Although they can be extracted from range data, extracting them from images and videos would be extremely beneficial. We trained a deep convolutional neural network (CNN) to identify occlusion edges in images and videos with just RGB, RGB-D and RGB-D-UV inputs, where D stands for depth and UV stands for horizontal and vertical components of the optical flow field respectively. The use of CNN avoids hand-crafting of features for automatically isolating occlusion edges and distinguishing them from appearance edges. Other than quantitative occlusion edge detection results, qualitative results are provided to evaluate input data requirements and to demonstrate the trade-off between high resolution analysis and frame-level computation time that is critical for real-time robotics applications.},
author = {Sarkar, Soumik and Venugopalan, Vivek and Reddy, Kishore and Ryde, Julian and Jaitly, Navdeep and Giering, Michael},
date-added = {2021-01-30 10:23:38 -0500},
date-modified = {2021-01-30 10:23:38 -0500},
doi = {10.1007/s11265-016-1209-3},
issn = {1939-8115},
journal = {Journal of Signal Processing Systems},
month = {December},
pages = {1-13},
title = {{Deep Learning for Automated Occlusion Edge Detection in RGB-D Frames}},
url = {http://dx.doi.org/10.1007/s11265-016-1209-3},
year = {2016},
Bdsk-Url-1 = {http://dx.doi.org/10.1007/s11265-016-1209-3}}
Downloads: 0
{"_id":"XbXSHiEhjXntfrKhB","bibbaseid":"sarkar-venugopalan-reddy-ryde-jaitly-giering-deeplearningforautomatedocclusionedgedetectioninrgbdframes-2016","author_short":["Sarkar, S.","Venugopalan, V.","Reddy, K.","Ryde, J.","Jaitly, N.","Giering, M."],"bibdata":{"bibtype":"article","type":"article","abstract":"Occlusion edges correspond to range discontinuity in a scene from the point of view of the observer. Detection of occlusion edges is an important prerequisite for many machine vision and mobile robotic tasks. Although they can be extracted from range data, extracting them from images and videos would be extremely beneficial. We trained a deep convolutional neural network (CNN) to identify occlusion edges in images and videos with just RGB, RGB-D and RGB-D-UV inputs, where D stands for depth and UV stands for horizontal and vertical components of the optical flow field respectively. The use of CNN avoids hand-crafting of features for automatically isolating occlusion edges and distinguishing them from appearance edges. Other than quantitative occlusion edge detection results, qualitative results are provided to evaluate input data requirements and to demonstrate the trade-off between high resolution analysis and frame-level computation time that is critical for real-time robotics applications.","author":[{"propositions":[],"lastnames":["Sarkar"],"firstnames":["Soumik"],"suffixes":[]},{"propositions":[],"lastnames":["Venugopalan"],"firstnames":["Vivek"],"suffixes":[]},{"propositions":[],"lastnames":["Reddy"],"firstnames":["Kishore"],"suffixes":[]},{"propositions":[],"lastnames":["Ryde"],"firstnames":["Julian"],"suffixes":[]},{"propositions":[],"lastnames":["Jaitly"],"firstnames":["Navdeep"],"suffixes":[]},{"propositions":[],"lastnames":["Giering"],"firstnames":["Michael"],"suffixes":[]}],"date-added":"2021-01-30 10:23:38 -0500","date-modified":"2021-01-30 10:23:38 -0500","doi":"10.1007/s11265-016-1209-3","issn":"1939-8115","journal":"Journal of Signal Processing Systems","month":"December","pages":"1-13","title":"Deep Learning for Automated Occlusion Edge Detection in RGB-D Frames","url":"http://dx.doi.org/10.1007/s11265-016-1209-3","year":"2016","bdsk-url-1":"http://dx.doi.org/10.1007/s11265-016-1209-3","bibtex":"@article{Sarkar2016Deep-Learn,\n\tabstract = {Occlusion edges correspond to range discontinuity in a scene from the point of view of the observer. Detection of occlusion edges is an important prerequisite for many machine vision and mobile robotic tasks. Although they can be extracted from range data, extracting them from images and videos would be extremely beneficial. We trained a deep convolutional neural network (CNN) to identify occlusion edges in images and videos with just RGB, RGB-D and RGB-D-UV inputs, where D stands for depth and UV stands for horizontal and vertical components of the optical flow field respectively. The use of CNN avoids hand-crafting of features for automatically isolating occlusion edges and distinguishing them from appearance edges. Other than quantitative occlusion edge detection results, qualitative results are provided to evaluate input data requirements and to demonstrate the trade-off between high resolution analysis and frame-level computation time that is critical for real-time robotics applications.},\n\tauthor = {Sarkar, Soumik and Venugopalan, Vivek and Reddy, Kishore and Ryde, Julian and Jaitly, Navdeep and Giering, Michael},\n\tdate-added = {2021-01-30 10:23:38 -0500},\n\tdate-modified = {2021-01-30 10:23:38 -0500},\n\tdoi = {10.1007/s11265-016-1209-3},\n\tissn = {1939-8115},\n\tjournal = {Journal of Signal Processing Systems},\n\tmonth = {December},\n\tpages = {1-13},\n\ttitle = {{Deep Learning for Automated Occlusion Edge Detection in RGB-D Frames}},\n\turl = {http://dx.doi.org/10.1007/s11265-016-1209-3},\n\tyear = {2016},\n\tBdsk-Url-1 = {http://dx.doi.org/10.1007/s11265-016-1209-3}}\n\n","author_short":["Sarkar, S.","Venugopalan, V.","Reddy, K.","Ryde, J.","Jaitly, N.","Giering, M."],"bibbaseid":"sarkar-venugopalan-reddy-ryde-jaitly-giering-deeplearningforautomatedocclusionedgedetectioninrgbdframes-2016","role":"author","urls":{"Paper":"http://dx.doi.org/10.1007/s11265-016-1209-3"},"metadata":{"authorlinks":{}}},"bibtype":"article","biburl":"https://bibbase.org/f/R3LGkkqj3uPfqr3NM/vivekv-isi-edu.bib","dataSources":["i5A8jKqyW67R7nn75","oodiEqv3HQNeZtGeC"],"keywords":[],"search_terms":["deep","learning","automated","occlusion","edge","detection","rgb","frames","sarkar","venugopalan","reddy","ryde","jaitly","giering"],"title":"Deep Learning for Automated Occlusion Edge Detection in RGB-D Frames","year":2016}