Deep Architectures for Joint Clustering and Visualization with Self-Organizing Maps. Forest, F., Lebbah, M., Azzag, H., & Lacaille, J. In Workshop on Learning Data Representations for Clustering (LDRC), PAKDD, 2019. Link Paper Code doi abstract bibtex 39 downloads Recent research has demonstrated how deep neural networks are able to learn representations to improve data clustering. By considering representation learning and clustering as a joint task, models learn clustering-friendly spaces and achieve superior performance, com- pared with standard two-stage approaches where dimensionality reduc- tion and clustering are performed separately. We extend this idea to topology-preserving clustering models, known as self-organizing maps (SOM). First, we present the Deep Embedded Self-Organizing Map (DE- SOM), a model composed of a fully-connected autoencoder and a custom SOM layer, where the SOM code vectors are learnt jointly with the au- toencoder weights. Then, we show that this generic architecture can be extended to image and sequence data by using convolutional and recur- rent architectures, and present variants of these models. First results demonstrate advantages of the DESOM architecture in terms of cluster- ing performance, visualization and training time.
@inproceedings{forest2019deeparchitectures,
abstract = {Recent research has demonstrated how deep neural networks are able to learn representations to improve data clustering. By considering representation learning and clustering as a joint task, models learn clustering-friendly spaces and achieve superior performance, com- pared with standard two-stage approaches where dimensionality reduc- tion and clustering are performed separately. We extend this idea to topology-preserving clustering models, known as self-organizing maps (SOM). First, we present the Deep Embedded Self-Organizing Map (DE- SOM), a model composed of a fully-connected autoencoder and a custom SOM layer, where the SOM code vectors are learnt jointly with the au- toencoder weights. Then, we show that this generic architecture can be extended to image and sequence data by using convolutional and recur- rent architectures, and present variants of these models. First results demonstrate advantages of the DESOM architecture in terms of cluster- ing performance, visualization and training time.},
author = {Forest, Florent and Lebbah, Mustapha and Azzag, Hanane and Lacaille, J{\'{e}}r{\^{o}}me},
booktitle = {Workshop on Learning Data Representations for Clustering (LDRC), PAKDD},
doi = {10.1007/978-3-030-26142-9_10},
keywords = {autoencoder,clustering,deep learning,representation learning,self-organizing map},
title = {{Deep Architectures for Joint Clustering and Visualization with Self-Organizing Maps}},
year = {2019},
url_Link = {https://link.springer.com/chapter/10.1007/978-3-030-26142-9_10},
url_Paper = {LDRC-2019-DeepArchitecturesJointClusteringVisualization-full-paper.pdf},
url_Code = {https://github.com/FlorentF9/DESOM},
bibbase_note = {<img src="assets/img/papers/convdesom.png">}
}
Downloads: 39
{"_id":"49txbE6boMdbscSfb","bibbaseid":"forest-lebbah-azzag-lacaille-deeparchitecturesforjointclusteringandvisualizationwithselforganizingmaps-2019","authorIDs":["3RZwrfhgEfG63k5jg","3SxonBqsacbDvLJ76","4pjW5cyboi9zAMzpJ","5GQkZSCvSenq46qrN","5e6b9bc538517edf01000017","5yiWY47jSKdKfxe8h","9qSGhPLL995WCyBt4","AKaMxQWEWqbjcWG7N","AM9C2xF5khaf9AsvW","AxC6pWiCeJw6FYXPE","CaDeWzaiZSQLxHvcA","DoEx226svswhqRcaE","ELezCHnS9s4GLdQy6","FzQCQZZM2MyWCZ2Nk","LbHvNbjvd5SkFkuNR","PGMnjRbz468bY8K45","PMuH6kGJyGBBKaPYe","QKR8LH5fNTeez9B9D","QooEurLXqaPJTygeA","Shf2NXF4PRiWmkkaR","TLEpmitPf4iQZYYH3","XJuBB3DfhZqz5JRA2","XQYrXit4PKWKPpoFZ","Y9BDNsRff4jtL7KXf","ZJz7PJ2gTd3c9R5Me","ZP68aFMeNEEHE8xxP","ZdhbKf5hqQGmmgznz","bSZr8LpgDyfBqdthh","cWw2GCMAZjb3jNqJg","fqKhgaj4PXP3S7gdz","gin9q4MZJiqC4ijLj","hsKzafbbbN8QCZpaJ","nXg8oPeomTivMAv3c","osQRFxHgsbu3kMKhA","qg2SiAD87Cvt74rRz","rXGN6qAzjYH3w7cDb","rsGH8RBJrySfARcn6","ubSLXJFk5Lfp3hBSJ","v25LmrsFxX427ebY9","wqoh86YAdKofyck7L","xJ6yoMkx7j8uqjRS6","ysCctvEZbBsFvn8e7"],"author_short":["Forest, F.","Lebbah, M.","Azzag, H.","Lacaille, J."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","abstract":"Recent research has demonstrated how deep neural networks are able to learn representations to improve data clustering. By considering representation learning and clustering as a joint task, models learn clustering-friendly spaces and achieve superior performance, com- pared with standard two-stage approaches where dimensionality reduc- tion and clustering are performed separately. We extend this idea to topology-preserving clustering models, known as self-organizing maps (SOM). First, we present the Deep Embedded Self-Organizing Map (DE- SOM), a model composed of a fully-connected autoencoder and a custom SOM layer, where the SOM code vectors are learnt jointly with the au- toencoder weights. Then, we show that this generic architecture can be extended to image and sequence data by using convolutional and recur- rent architectures, and present variants of these models. First results demonstrate advantages of the DESOM architecture in terms of cluster- ing performance, visualization and training time.","author":[{"propositions":[],"lastnames":["Forest"],"firstnames":["Florent"],"suffixes":[]},{"propositions":[],"lastnames":["Lebbah"],"firstnames":["Mustapha"],"suffixes":[]},{"propositions":[],"lastnames":["Azzag"],"firstnames":["Hanane"],"suffixes":[]},{"propositions":[],"lastnames":["Lacaille"],"firstnames":["Jérôme"],"suffixes":[]}],"booktitle":"Workshop on Learning Data Representations for Clustering (LDRC), PAKDD","doi":"10.1007/978-3-030-26142-9_10","keywords":"autoencoder,clustering,deep learning,representation learning,self-organizing map","title":"Deep Architectures for Joint Clustering and Visualization with Self-Organizing Maps","year":"2019","url_link":"https://link.springer.com/chapter/10.1007/978-3-030-26142-9_10","url_paper":"LDRC-2019-DeepArchitecturesJointClusteringVisualization-full-paper.pdf","url_code":"https://github.com/FlorentF9/DESOM","bibbase_note":"<img src=\"assets/img/papers/convdesom.png\">","bibtex":"@inproceedings{forest2019deeparchitectures,\nabstract = {Recent research has demonstrated how deep neural networks are able to learn representations to improve data clustering. By considering representation learning and clustering as a joint task, models learn clustering-friendly spaces and achieve superior performance, com- pared with standard two-stage approaches where dimensionality reduc- tion and clustering are performed separately. We extend this idea to topology-preserving clustering models, known as self-organizing maps (SOM). First, we present the Deep Embedded Self-Organizing Map (DE- SOM), a model composed of a fully-connected autoencoder and a custom SOM layer, where the SOM code vectors are learnt jointly with the au- toencoder weights. Then, we show that this generic architecture can be extended to image and sequence data by using convolutional and recur- rent architectures, and present variants of these models. First results demonstrate advantages of the DESOM architecture in terms of cluster- ing performance, visualization and training time.},\nauthor = {Forest, Florent and Lebbah, Mustapha and Azzag, Hanane and Lacaille, J{\\'{e}}r{\\^{o}}me},\nbooktitle = {Workshop on Learning Data Representations for Clustering (LDRC), PAKDD},\ndoi = {10.1007/978-3-030-26142-9_10},\nkeywords = {autoencoder,clustering,deep learning,representation learning,self-organizing map},\ntitle = {{Deep Architectures for Joint Clustering and Visualization with Self-Organizing Maps}},\nyear = {2019},\nurl_Link = {https://link.springer.com/chapter/10.1007/978-3-030-26142-9_10},\nurl_Paper = {LDRC-2019-DeepArchitecturesJointClusteringVisualization-full-paper.pdf},\nurl_Code = {https://github.com/FlorentF9/DESOM},\nbibbase_note = {<img src=\"assets/img/papers/convdesom.png\">}\n}\n\n","author_short":["Forest, F.","Lebbah, M.","Azzag, H.","Lacaille, J."],"key":"forest2019deeparchitectures","id":"forest2019deeparchitectures","bibbaseid":"forest-lebbah-azzag-lacaille-deeparchitecturesforjointclusteringandvisualizationwithselforganizingmaps-2019","role":"author","urls":{" link":"https://link.springer.com/chapter/10.1007/978-3-030-26142-9_10"," paper":"https://florentfo.rest/files/LDRC-2019-DeepArchitecturesJointClusteringVisualization-full-paper.pdf"," code":"https://github.com/FlorentF9/DESOM"},"keyword":["autoencoder","clustering","deep learning","representation learning","self-organizing map"],"metadata":{"authorlinks":{"forest, f":"https://florentfo.rest/publications"}},"downloads":39},"bibtype":"inproceedings","creationDate":"2020-03-13T14:42:13.970Z","downloads":39,"keywords":["autoencoder","clustering","deep learning","representation learning","self-organizing map"],"search_terms":["deep","architectures","joint","clustering","visualization","self","organizing","maps","forest","lebbah","azzag","lacaille"],"title":"Deep Architectures for Joint Clustering and Visualization with Self-Organizing Maps","year":2019,"biburl":"https://florentfo.rest/files/publications.bib","dataSources":["pBkCjKbyeirr5jeAd","DgnR6pzJ98ZEp97PW","2puawT8ZAQyYRypA3","6rNfa4Kp6dL5sGmf5","xH8ySTsEPTLou9gyR"]}