Feature Visualization for 3D Point Cloud Autoencoders. Rios, T., Van Stein, B., Menzel, S., Back, T., Sendhoff, B., & Wollstadt, P. Proceedings of the International Joint Conference on Neural Networks, 2020. Paper doi abstract bibtex In order to reduce the dimensionality of 3D point cloud representations, autoencoder architectures generate increasingly abstract, compressed features of the input data. Visualizing these features is central to understanding the learning process, however, while successful visualization techniques exist for neural networks applied to computer vision tasks, similar methods for geometric, especially non-Euclidean, input data are currently lacking. Hence, we propose a first-of-kind method to project the features learned by point cloud autoencoders into a 3D-space augmented with color maps. Our proposal explores the properties of 1D-convolutions, used in state-of-the art point cloud autoencoder architectures to handle the input data, which leads to an intuitive interpretation of the visualized features. Furthermore, we tackle the search for relevant co-activations in the feature space by clustering the input data in the latent space, where we explore the correspondence between network features and geometric characteristics of typical shapes of the clusters. We tested our approach with experiments on a benchmark data set, and with three different configurations of a point cloud autoencoder, where we show that the features learned by the autoencoder correlate with the occupancy of the input space by the training data.
@article{
title = {Feature Visualization for 3D Point Cloud Autoencoders},
type = {article},
year = {2020},
keywords = {autoencoder,deep learning,feature visualization},
id = {cd900c3d-4aa6-31dd-a2bc-399087c3659a},
created = {2022-01-05T11:42:39.269Z},
file_attached = {true},
profile_id = {bfbbf840-4c42-3914-a463-19024f50b30c},
group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},
last_modified = {2022-03-28T09:45:10.211Z},
read = {true},
starred = {false},
authored = {false},
confirmed = {true},
hidden = {false},
citation_key = {Rios2020},
folder_uuids = {10e04504-7e21-4b84-9037-5a4431df1a8a,1853f94b-7af1-40fa-b068-4758e9a02bc4,465973dc-f47a-4093-b987-b885254a351b},
private_publication = {false},
abstract = {In order to reduce the dimensionality of 3D point cloud representations, autoencoder architectures generate increasingly abstract, compressed features of the input data. Visualizing these features is central to understanding the learning process, however, while successful visualization techniques exist for neural networks applied to computer vision tasks, similar methods for geometric, especially non-Euclidean, input data are currently lacking. Hence, we propose a first-of-kind method to project the features learned by point cloud autoencoders into a 3D-space augmented with color maps. Our proposal explores the properties of 1D-convolutions, used in state-of-the art point cloud autoencoder architectures to handle the input data, which leads to an intuitive interpretation of the visualized features. Furthermore, we tackle the search for relevant co-activations in the feature space by clustering the input data in the latent space, where we explore the correspondence between network features and geometric characteristics of typical shapes of the clusters. We tested our approach with experiments on a benchmark data set, and with three different configurations of a point cloud autoencoder, where we show that the features learned by the autoencoder correlate with the occupancy of the input space by the training data.},
bibtype = {article},
author = {Rios, Thiago and Van Stein, Bas and Menzel, Stefan and Back, Thomas and Sendhoff, Bernhard and Wollstadt, Patricia},
doi = {10.1109/IJCNN48605.2020.9207326},
journal = {Proceedings of the International Joint Conference on Neural Networks}
}
Downloads: 0
{"_id":"5TA5KKfY8HN4zNxdy","bibbaseid":"rios-vanstein-menzel-back-sendhoff-wollstadt-featurevisualizationfor3dpointcloudautoencoders-2020","author_short":["Rios, T.","Van Stein, B.","Menzel, S.","Back, T.","Sendhoff, B.","Wollstadt, P."],"bibdata":{"title":"Feature Visualization for 3D Point Cloud Autoencoders","type":"article","year":"2020","keywords":"autoencoder,deep learning,feature visualization","id":"cd900c3d-4aa6-31dd-a2bc-399087c3659a","created":"2022-01-05T11:42:39.269Z","file_attached":"true","profile_id":"bfbbf840-4c42-3914-a463-19024f50b30c","group_id":"1ff583c0-be37-34fa-9c04-73c69437d354","last_modified":"2022-03-28T09:45:10.211Z","read":"true","starred":false,"authored":false,"confirmed":"true","hidden":false,"citation_key":"Rios2020","folder_uuids":"10e04504-7e21-4b84-9037-5a4431df1a8a,1853f94b-7af1-40fa-b068-4758e9a02bc4,465973dc-f47a-4093-b987-b885254a351b","private_publication":false,"abstract":"In order to reduce the dimensionality of 3D point cloud representations, autoencoder architectures generate increasingly abstract, compressed features of the input data. Visualizing these features is central to understanding the learning process, however, while successful visualization techniques exist for neural networks applied to computer vision tasks, similar methods for geometric, especially non-Euclidean, input data are currently lacking. Hence, we propose a first-of-kind method to project the features learned by point cloud autoencoders into a 3D-space augmented with color maps. Our proposal explores the properties of 1D-convolutions, used in state-of-the art point cloud autoencoder architectures to handle the input data, which leads to an intuitive interpretation of the visualized features. Furthermore, we tackle the search for relevant co-activations in the feature space by clustering the input data in the latent space, where we explore the correspondence between network features and geometric characteristics of typical shapes of the clusters. We tested our approach with experiments on a benchmark data set, and with three different configurations of a point cloud autoencoder, where we show that the features learned by the autoencoder correlate with the occupancy of the input space by the training data.","bibtype":"article","author":"Rios, Thiago and Van Stein, Bas and Menzel, Stefan and Back, Thomas and Sendhoff, Bernhard and Wollstadt, Patricia","doi":"10.1109/IJCNN48605.2020.9207326","journal":"Proceedings of the International Joint Conference on Neural Networks","bibtex":"@article{\n title = {Feature Visualization for 3D Point Cloud Autoencoders},\n type = {article},\n year = {2020},\n keywords = {autoencoder,deep learning,feature visualization},\n id = {cd900c3d-4aa6-31dd-a2bc-399087c3659a},\n created = {2022-01-05T11:42:39.269Z},\n file_attached = {true},\n profile_id = {bfbbf840-4c42-3914-a463-19024f50b30c},\n group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},\n last_modified = {2022-03-28T09:45:10.211Z},\n read = {true},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {Rios2020},\n folder_uuids = {10e04504-7e21-4b84-9037-5a4431df1a8a,1853f94b-7af1-40fa-b068-4758e9a02bc4,465973dc-f47a-4093-b987-b885254a351b},\n private_publication = {false},\n abstract = {In order to reduce the dimensionality of 3D point cloud representations, autoencoder architectures generate increasingly abstract, compressed features of the input data. Visualizing these features is central to understanding the learning process, however, while successful visualization techniques exist for neural networks applied to computer vision tasks, similar methods for geometric, especially non-Euclidean, input data are currently lacking. Hence, we propose a first-of-kind method to project the features learned by point cloud autoencoders into a 3D-space augmented with color maps. Our proposal explores the properties of 1D-convolutions, used in state-of-the art point cloud autoencoder architectures to handle the input data, which leads to an intuitive interpretation of the visualized features. Furthermore, we tackle the search for relevant co-activations in the feature space by clustering the input data in the latent space, where we explore the correspondence between network features and geometric characteristics of typical shapes of the clusters. We tested our approach with experiments on a benchmark data set, and with three different configurations of a point cloud autoencoder, where we show that the features learned by the autoencoder correlate with the occupancy of the input space by the training data.},\n bibtype = {article},\n author = {Rios, Thiago and Van Stein, Bas and Menzel, Stefan and Back, Thomas and Sendhoff, Bernhard and Wollstadt, Patricia},\n doi = {10.1109/IJCNN48605.2020.9207326},\n journal = {Proceedings of the International Joint Conference on Neural Networks}\n}","author_short":["Rios, T.","Van Stein, B.","Menzel, S.","Back, T.","Sendhoff, B.","Wollstadt, P."],"urls":{"Paper":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c/file/704812f3-0da8-df5d-7120-a1ef712dd2fe/N_21518.pdf.pdf"},"biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibbaseid":"rios-vanstein-menzel-back-sendhoff-wollstadt-featurevisualizationfor3dpointcloudautoencoders-2020","role":"author","keyword":["autoencoder","deep learning","feature visualization"],"metadata":{"authorlinks":{}}},"bibtype":"article","biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","dataSources":["pQYee5oovEhJF75n6","ya2CyA73rpZseyrZ8","2252seNhipfTmjEBQ"],"keywords":["autoencoder","deep learning","feature visualization"],"search_terms":["feature","visualization","point","cloud","autoencoders","rios","van stein","menzel","back","sendhoff","wollstadt"],"title":"Feature Visualization for 3D Point Cloud Autoencoders","year":2020}