Anchored Correlation Explanation: Topic Modeling with Minimal Domain Knowledge. Gallagher, R. J., Reing, K., Kale, D., & Steeg, G. V. Transactions of the Association for Computational Linguistics, 5(0):529–542, December, 2017. Paper abstract bibtex While generative models such as Latent Dirichlet Allocation (LDA) have proven fruitful in topic modeling, they often require detailed assumptions and careful specification of hyperparameters. Such model complexity issues only compound when trying to generalize generative models to incorporate human input. We introduce Correlation Explanation (CorEx), an alternative approach to topic modeling that does not assume an underlying generative model, and instead learns maximally informative topics through an information-theoretic framework. This framework naturally generalizes to hierarchical and semi-supervised extensions with no additional modeling assumptions. In particular, word-level domain knowledge can be flexibly incorporated within CorEx through anchor words, allowing topic separability and representation to be promoted with minimal human intervention. Across a variety of datasets, metrics, and experiments, we demonstrate that CorEx produces topics that are comparable in quality to those produced by unsupervised and semi-supervised variants of LDA.
@article{gallagher_anchored_2017-1,
title = {Anchored {Correlation} {Explanation}: {Topic} {Modeling} with {Minimal} {Domain} {Knowledge}},
volume = {5},
copyright = {Copyright (c) 2017 Association for Computational Linguistics},
issn = {2307-387X},
shorttitle = {Anchored {Correlation} {Explanation}},
url = {https://www.transacl.org/ojs/index.php/tacl/article/view/1244},
abstract = {While generative models such as Latent Dirichlet Allocation (LDA) have proven fruitful in topic modeling, they often require detailed assumptions and careful specification of hyperparameters. Such model complexity issues only compound when trying to generalize generative models to incorporate human input. We introduce Correlation Explanation (CorEx), an alternative approach to topic modeling that does not assume an underlying generative model, and instead learns maximally informative topics through an information-theoretic framework. This framework naturally generalizes to hierarchical and semi-supervised extensions with no additional modeling assumptions. In particular, word-level domain knowledge can be flexibly incorporated within CorEx through anchor words, allowing topic separability and representation to be promoted with minimal human intervention. Across a variety of datasets, metrics, and experiments, we demonstrate that CorEx produces topics that are comparable in quality to those produced by unsupervised and semi-supervised variants of LDA.},
language = {en},
number = {0},
urldate = {2017-12-04TZ},
journal = {Transactions of the Association for Computational Linguistics},
author = {Gallagher, Ryan J. and Reing, Kyle and Kale, David and Steeg, Greg Ver},
month = dec,
year = {2017},
pages = {529--542}
}
Downloads: 0
{"_id":"ZPHCSxWW8ThMHES2h","bibbaseid":"gallagher-reing-kale-steeg-anchoredcorrelationexplanationtopicmodelingwithminimaldomainknowledge-2017","downloads":0,"creationDate":"2018-10-19T17:50:29.406Z","title":"Anchored Correlation Explanation: Topic Modeling with Minimal Domain Knowledge","author_short":["Gallagher, R. J.","Reing, K.","Kale, D.","Steeg, G. V."],"year":2017,"bibtype":"article","biburl":"https://bibbase.org/zotero/gcordeiro","bibdata":{"bibtype":"article","type":"article","title":"Anchored Correlation Explanation: Topic Modeling with Minimal Domain Knowledge","volume":"5","copyright":"Copyright (c) 2017 Association for Computational Linguistics","issn":"2307-387X","shorttitle":"Anchored Correlation Explanation","url":"https://www.transacl.org/ojs/index.php/tacl/article/view/1244","abstract":"While generative models such as Latent Dirichlet Allocation (LDA) have proven fruitful in topic modeling, they often require detailed assumptions and careful specification of hyperparameters. Such model complexity issues only compound when trying to generalize generative models to incorporate human input. We introduce Correlation Explanation (CorEx), an alternative approach to topic modeling that does not assume an underlying generative model, and instead learns maximally informative topics through an information-theoretic framework. This framework naturally generalizes to hierarchical and semi-supervised extensions with no additional modeling assumptions. In particular, word-level domain knowledge can be flexibly incorporated within CorEx through anchor words, allowing topic separability and representation to be promoted with minimal human intervention. Across a variety of datasets, metrics, and experiments, we demonstrate that CorEx produces topics that are comparable in quality to those produced by unsupervised and semi-supervised variants of LDA.","language":"en","number":"0","urldate":"2017-12-04TZ","journal":"Transactions of the Association for Computational Linguistics","author":[{"propositions":[],"lastnames":["Gallagher"],"firstnames":["Ryan","J."],"suffixes":[]},{"propositions":[],"lastnames":["Reing"],"firstnames":["Kyle"],"suffixes":[]},{"propositions":[],"lastnames":["Kale"],"firstnames":["David"],"suffixes":[]},{"propositions":[],"lastnames":["Steeg"],"firstnames":["Greg","Ver"],"suffixes":[]}],"month":"December","year":"2017","pages":"529–542","bibtex":"@article{gallagher_anchored_2017-1,\n\ttitle = {Anchored {Correlation} {Explanation}: {Topic} {Modeling} with {Minimal} {Domain} {Knowledge}},\n\tvolume = {5},\n\tcopyright = {Copyright (c) 2017 Association for Computational Linguistics},\n\tissn = {2307-387X},\n\tshorttitle = {Anchored {Correlation} {Explanation}},\n\turl = {https://www.transacl.org/ojs/index.php/tacl/article/view/1244},\n\tabstract = {While generative models such as Latent Dirichlet Allocation (LDA) have proven fruitful in topic modeling, they often require detailed assumptions and careful specification of hyperparameters. Such model complexity issues only compound when trying to generalize generative models to incorporate human input. We introduce Correlation Explanation (CorEx), an alternative approach to topic modeling that does not assume an underlying generative model, and instead learns maximally informative topics through an information-theoretic framework. This framework naturally generalizes to hierarchical and semi-supervised extensions with no additional modeling assumptions. In particular, word-level domain knowledge can be flexibly incorporated within CorEx through anchor words, allowing topic separability and representation to be promoted with minimal human intervention. Across a variety of datasets, metrics, and experiments, we demonstrate that CorEx produces topics that are comparable in quality to those produced by unsupervised and semi-supervised variants of LDA.},\n\tlanguage = {en},\n\tnumber = {0},\n\turldate = {2017-12-04TZ},\n\tjournal = {Transactions of the Association for Computational Linguistics},\n\tauthor = {Gallagher, Ryan J. and Reing, Kyle and Kale, David and Steeg, Greg Ver},\n\tmonth = dec,\n\tyear = {2017},\n\tpages = {529--542}\n}\n\n","author_short":["Gallagher, R. J.","Reing, K.","Kale, D.","Steeg, G. V."],"key":"gallagher_anchored_2017-1","id":"gallagher_anchored_2017-1","bibbaseid":"gallagher-reing-kale-steeg-anchoredcorrelationexplanationtopicmodelingwithminimaldomainknowledge-2017","role":"author","urls":{"Paper":"https://www.transacl.org/ojs/index.php/tacl/article/view/1244"},"downloads":0},"search_terms":["anchored","correlation","explanation","topic","modeling","minimal","domain","knowledge","gallagher","reing","kale","steeg"],"keywords":[],"authorIDs":[],"dataSources":["RfNNZcJnzabazX9bu"]}