Multi-scale similarities in stochastic neighbour embedding: Reducing dimensionality while preserving both local and global structure. Lee, J., A., Peluffo-Ordóñez, D., H., & Verleysen, M. Neurocomputing, 169:246-261, 12, 2015.
Multi-scale similarities in stochastic neighbour embedding: Reducing dimensionality while preserving both local and global structure [link]Website  doi  abstract   bibtex   1 download  
Stochastic neighbour embedding (SNE) and its variants are methods of nonlinear dimensionality reduction that involve soft Gaussian neighbourhoods to measure similarities for all pairs of data. In order to build a suitable embedding, these methods try to reproduce in a low-dimensional space the neighbourhoods that are observed in the high-dimensional data space. Previous works have investigated the immunity of such similarities to norm concentration, as well as enhanced cost functions, like sums of Jensen-Shannon divergences. This paper proposes an additional refinement, namely multi-scale similarities, which are averages of soft Gaussian neighbourhoods with exponentially growing bandwidths. Such multi-scale similarities can replace the regular, single-scale neighbourhoods in SNE-like methods. Their objective is then to maximise the embedding quality on all scales, with the best preservation of both local and global neighbourhoods, and also to exempt the user from having to fix a scale arbitrarily. Experiments with several data sets show that the proposed multi-scale approach captures better the structure of data and improves significantly the quality of dimensionality reduction.
@article{
 title = {Multi-scale similarities in stochastic neighbour embedding: Reducing dimensionality while preserving both local and global structure},
 type = {article},
 year = {2015},
 keywords = {Data visualisation,Jensen-Shannon divergence,Manifold learning,Nonlinear dimensionality reduction,Stochastic neighbour embedding},
 pages = {246-261},
 volume = {169},
 websites = {https://linkinghub.elsevier.com/retrieve/pii/S0925231215003641},
 month = {12},
 id = {43243739-ddee-32bf-b1e0-a6a70e8cdcaa},
 created = {2020-12-29T22:52:12.240Z},
 file_attached = {false},
 profile_id = {aba9653c-d139-3f95-aad8-969c487ed2f3},
 last_modified = {2021-08-11T22:46:31.060Z},
 read = {false},
 starred = {false},
 authored = {true},
 confirmed = {true},
 hidden = {false},
 citation_key = {Lee2015},
 folder_uuids = {15b32c16-cda9-4551-a173-303b5217df48},
 private_publication = {false},
 abstract = {Stochastic neighbour embedding (SNE) and its variants are methods of nonlinear dimensionality reduction that involve soft Gaussian neighbourhoods to measure similarities for all pairs of data. In order to build a suitable embedding, these methods try to reproduce in a low-dimensional space the neighbourhoods that are observed in the high-dimensional data space. Previous works have investigated the immunity of such similarities to norm concentration, as well as enhanced cost functions, like sums of Jensen-Shannon divergences. This paper proposes an additional refinement, namely multi-scale similarities, which are averages of soft Gaussian neighbourhoods with exponentially growing bandwidths. Such multi-scale similarities can replace the regular, single-scale neighbourhoods in SNE-like methods. Their objective is then to maximise the embedding quality on all scales, with the best preservation of both local and global neighbourhoods, and also to exempt the user from having to fix a scale arbitrarily. Experiments with several data sets show that the proposed multi-scale approach captures better the structure of data and improves significantly the quality of dimensionality reduction.},
 bibtype = {article},
 author = {Lee, John A. and Peluffo-Ordóñez, Diego H. and Verleysen, Michel},
 doi = {10.1016/j.neucom.2014.12.095},
 journal = {Neurocomputing}
}

Downloads: 1