Deep closest point: Learning representations for point cloud registration. Wang, Y. & Solomon, J. Proceedings of the IEEE International Conference on Computer Vision, 2019-Octob:3522-3531, 2019.
Paper doi abstract bibtex Point cloud registration is a key problem for computer vision applied to robotics, medical imaging, and other applications. This problem involves finding a rigid transformation from one point cloud into another so that they align. Iterative Closest Point (ICP) and its variants provide simple and easily-implemented iterative methods for this task, but these algorithms can converge to spurious local optima. To address local optima and other difficulties in the ICP pipeline, we propose a learning-based method, titled Deep Closest Point (DCP), inspired by recent techniques in computer vision and natural language processing. Our model consists of three parts: A point cloud embedding network, an attention-based module combined with a pointer generation layer to approximate combinatorial matching, and a differentiable singular value decomposition (SVD) layer to extract the final rigid transformation. We train our model end-to-end on the ModelNet40 dataset and show in several settings that it performs better than ICP, its variants (e.g., Go-ICP, FGR), and the recently-proposed learning-based method PointNetLK. Beyond providing a state-of-the-art registration technique, we evaluate the suitability of our learned features transferred to unseen objects. We also provide preliminary analysis of our learned model to help understand whether domain-specific and/or global features facilitate rigid registration.
@article{
title = {Deep closest point: Learning representations for point cloud registration},
type = {article},
year = {2019},
pages = {3522-3531},
volume = {2019-Octob},
id = {3a54d08e-17d3-3cb9-912d-d7f00c3b42eb},
created = {2021-09-16T07:10:13.949Z},
file_attached = {true},
profile_id = {f3d36c73-062b-3738-9a74-d09e4e83eb1e},
group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},
last_modified = {2021-09-28T07:20:31.412Z},
read = {true},
starred = {false},
authored = {false},
confirmed = {true},
hidden = {false},
folder_uuids = {48e9a80d-67a5-450d-9b08-c7bc934154e8},
private_publication = {false},
abstract = {Point cloud registration is a key problem for computer vision applied to robotics, medical imaging, and other applications. This problem involves finding a rigid transformation from one point cloud into another so that they align. Iterative Closest Point (ICP) and its variants provide simple and easily-implemented iterative methods for this task, but these algorithms can converge to spurious local optima. To address local optima and other difficulties in the ICP pipeline, we propose a learning-based method, titled Deep Closest Point (DCP), inspired by recent techniques in computer vision and natural language processing. Our model consists of three parts: A point cloud embedding network, an attention-based module combined with a pointer generation layer to approximate combinatorial matching, and a differentiable singular value decomposition (SVD) layer to extract the final rigid transformation. We train our model end-to-end on the ModelNet40 dataset and show in several settings that it performs better than ICP, its variants (e.g., Go-ICP, FGR), and the recently-proposed learning-based method PointNetLK. Beyond providing a state-of-the-art registration technique, we evaluate the suitability of our learned features transferred to unseen objects. We also provide preliminary analysis of our learned model to help understand whether domain-specific and/or global features facilitate rigid registration.},
bibtype = {article},
author = {Wang, Yue and Solomon, Justin},
doi = {10.1109/ICCV.2019.00362},
journal = {Proceedings of the IEEE International Conference on Computer Vision}
}
Downloads: 0
{"_id":"c2LLHpWqpiBQgHGDJ","bibbaseid":"wang-solomon-deepclosestpointlearningrepresentationsforpointcloudregistration-2019","author_short":["Wang, Y.","Solomon, J."],"bibdata":{"title":"Deep closest point: Learning representations for point cloud registration","type":"article","year":"2019","pages":"3522-3531","volume":"2019-Octob","id":"3a54d08e-17d3-3cb9-912d-d7f00c3b42eb","created":"2021-09-16T07:10:13.949Z","file_attached":"true","profile_id":"f3d36c73-062b-3738-9a74-d09e4e83eb1e","group_id":"1ff583c0-be37-34fa-9c04-73c69437d354","last_modified":"2021-09-28T07:20:31.412Z","read":"true","starred":false,"authored":false,"confirmed":"true","hidden":false,"folder_uuids":"48e9a80d-67a5-450d-9b08-c7bc934154e8","private_publication":false,"abstract":"Point cloud registration is a key problem for computer vision applied to robotics, medical imaging, and other applications. This problem involves finding a rigid transformation from one point cloud into another so that they align. Iterative Closest Point (ICP) and its variants provide simple and easily-implemented iterative methods for this task, but these algorithms can converge to spurious local optima. To address local optima and other difficulties in the ICP pipeline, we propose a learning-based method, titled Deep Closest Point (DCP), inspired by recent techniques in computer vision and natural language processing. Our model consists of three parts: A point cloud embedding network, an attention-based module combined with a pointer generation layer to approximate combinatorial matching, and a differentiable singular value decomposition (SVD) layer to extract the final rigid transformation. We train our model end-to-end on the ModelNet40 dataset and show in several settings that it performs better than ICP, its variants (e.g., Go-ICP, FGR), and the recently-proposed learning-based method PointNetLK. Beyond providing a state-of-the-art registration technique, we evaluate the suitability of our learned features transferred to unseen objects. We also provide preliminary analysis of our learned model to help understand whether domain-specific and/or global features facilitate rigid registration.","bibtype":"article","author":"Wang, Yue and Solomon, Justin","doi":"10.1109/ICCV.2019.00362","journal":"Proceedings of the IEEE International Conference on Computer Vision","bibtex":"@article{\n title = {Deep closest point: Learning representations for point cloud registration},\n type = {article},\n year = {2019},\n pages = {3522-3531},\n volume = {2019-Octob},\n id = {3a54d08e-17d3-3cb9-912d-d7f00c3b42eb},\n created = {2021-09-16T07:10:13.949Z},\n file_attached = {true},\n profile_id = {f3d36c73-062b-3738-9a74-d09e4e83eb1e},\n group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},\n last_modified = {2021-09-28T07:20:31.412Z},\n read = {true},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n folder_uuids = {48e9a80d-67a5-450d-9b08-c7bc934154e8},\n private_publication = {false},\n abstract = {Point cloud registration is a key problem for computer vision applied to robotics, medical imaging, and other applications. This problem involves finding a rigid transformation from one point cloud into another so that they align. Iterative Closest Point (ICP) and its variants provide simple and easily-implemented iterative methods for this task, but these algorithms can converge to spurious local optima. To address local optima and other difficulties in the ICP pipeline, we propose a learning-based method, titled Deep Closest Point (DCP), inspired by recent techniques in computer vision and natural language processing. Our model consists of three parts: A point cloud embedding network, an attention-based module combined with a pointer generation layer to approximate combinatorial matching, and a differentiable singular value decomposition (SVD) layer to extract the final rigid transformation. We train our model end-to-end on the ModelNet40 dataset and show in several settings that it performs better than ICP, its variants (e.g., Go-ICP, FGR), and the recently-proposed learning-based method PointNetLK. Beyond providing a state-of-the-art registration technique, we evaluate the suitability of our learned features transferred to unseen objects. We also provide preliminary analysis of our learned model to help understand whether domain-specific and/or global features facilitate rigid registration.},\n bibtype = {article},\n author = {Wang, Yue and Solomon, Justin},\n doi = {10.1109/ICCV.2019.00362},\n journal = {Proceedings of the IEEE International Conference on Computer Vision}\n}","author_short":["Wang, Y.","Solomon, J."],"urls":{"Paper":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c/file/3c313e95-fe56-4775-243f-b00e1043792a/Wang_Deep_Closest_Point_Learning_Representations_for_Point_Cloud_Registration_ICCV_2019_paper.pdf.pdf"},"biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibbaseid":"wang-solomon-deepclosestpointlearningrepresentationsforpointcloudregistration-2019","role":"author","metadata":{"authorlinks":{}}},"bibtype":"article","biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","dataSources":["nZHrFJKyxKKDaWYM8","2252seNhipfTmjEBQ"],"keywords":[],"search_terms":["deep","closest","point","learning","representations","point","cloud","registration","wang","solomon"],"title":"Deep closest point: Learning representations for point cloud registration","year":2019}