Cluster Ensembles – A Knowledge Reuse Framework for Combining Multiple Partitions. Strehl, A. and Ghosh, J. Journal of Machine Learning Research, 3:583-617, 2002.
Cluster Ensembles – A Knowledge Reuse Framework for Combining Multiple Partitions [pdf]Paper  Cluster Ensembles – A Knowledge Reuse Framework for Combining Multiple Partitions [pdf]Website  abstract   bibtex   
This paper introduces the problem of combining multiple partitionings of a set of objects into a single consolidated clustering without accessing the features or algorithms that deter-mined these partitionings. We first identify several application scenarios for the resultant 'knowledge reuse' framework that we call cluster ensembles. The cluster ensemble prob-lem is then formalized as a combinatorial optimization problem in terms of shared mutual information. In addition to a direct maximization approach, we propose three effective and efficient techniques for obtaining high-quality combiners (consensus functions). The first combiner induces a similarity measure from the partitionings and then reclusters the objects. The second combiner is based on hypergraph partitioning. The third one collapses groups of clusters into meta-clusters which then compete for each object to determine the combined clustering. Due to the low computational costs of our techniques, it is quite feasible to use a supra-consensus function that evaluates all three approaches against the objective function and picks the best solution for a given situation. We evaluate the ef-fectiveness of cluster ensembles in three qualitatively different application scenarios: (i) where the original clusters were formed based on non-identical sets of features, (ii) where the original clustering algorithms worked on non-identical sets of objects, and (iii) where a common data-set is used and the main purpose of combining multiple clusterings is to improve the quality and robustness of the solution. Promising results are obtained in all three situations for synthetic as well as real data-sets.
@article{
 title = {Cluster Ensembles – A Knowledge Reuse Framework for Combining Multiple Partitions},
 type = {article},
 year = {2002},
 keywords = {cluster analysis,clustering,consensus functions,ensemble,knowledge reuse,multi-learner systems,mutual information,partitioning,unsupervised learning},
 pages = {583-617},
 volume = {3},
 websites = {http://strehl.com/download/strehl-jmlr02.pdf},
 id = {a99fdbdd-7f48-32d2-a686-fae7091d496e},
 created = {2018-02-05T19:33:08.113Z},
 accessed = {2018-02-05},
 file_attached = {true},
 profile_id = {371589bb-c770-37ff-8193-93c6f25ffeb1},
 group_id = {f982cd63-7ceb-3aa2-ac7e-a953963d6716},
 last_modified = {2018-02-05T19:33:10.610Z},
 read = {false},
 starred = {false},
 authored = {false},
 confirmed = {false},
 hidden = {false},
 private_publication = {false},
 abstract = {This paper introduces the problem of combining multiple partitionings of a set of objects into a single consolidated clustering without accessing the features or algorithms that deter-mined these partitionings. We first identify several application scenarios for the resultant 'knowledge reuse' framework that we call cluster ensembles. The cluster ensemble prob-lem is then formalized as a combinatorial optimization problem in terms of shared mutual information. In addition to a direct maximization approach, we propose three effective and efficient techniques for obtaining high-quality combiners (consensus functions). The first combiner induces a similarity measure from the partitionings and then reclusters the objects. The second combiner is based on hypergraph partitioning. The third one collapses groups of clusters into meta-clusters which then compete for each object to determine the combined clustering. Due to the low computational costs of our techniques, it is quite feasible to use a supra-consensus function that evaluates all three approaches against the objective function and picks the best solution for a given situation. We evaluate the ef-fectiveness of cluster ensembles in three qualitatively different application scenarios: (i) where the original clusters were formed based on non-identical sets of features, (ii) where the original clustering algorithms worked on non-identical sets of objects, and (iii) where a common data-set is used and the main purpose of combining multiple clusterings is to improve the quality and robustness of the solution. Promising results are obtained in all three situations for synthetic as well as real data-sets.},
 bibtype = {article},
 author = {Strehl, Alexander and Ghosh, Joydeep},
 journal = {Journal of Machine Learning Research}
}
Downloads: 0