Accurate Methods for the Statistics of Surprise and Coincidence. Dunning, T. Computational Linguistics, 19(1):61-74, MIT Press, 1993.
Accurate Methods for the Statistics of Surprise and Coincidence [pdf]Paper  Accurate Methods for the Statistics of Surprise and Coincidence [link]Website  abstract   bibtex   
Much work has been done on the statistical analysis of text. In some cases reported in the literature, inappropriate statistical methods have been used, and statistical significance of results have not been addressed. In particular, asymptotic normality assumptions have often been used unjustifiably, leading to flawed results.This assumption of normal distribution limits the ability to analyze rare events. Unfortunately rare events do make up a large fraction of real text.However, more applicable methods based on likelihood ratio tests are available that yield good results with relatively small samples. These tests can be implemented efficiently, and have been used for the detection of composite terms and for the determination of domain-specific terms. In some cases, these measures perform much better than the methods previously used. In cases where traditional contingency table methods work well, the likelihood ratio tests described here are nearly identical.This paper describes the basis of a measure based on likelihood ratios that can be applied to the analysis of text.
@article{
 title = {Accurate Methods for the Statistics of Surprise and Coincidence},
 type = {article},
 year = {1993},
 identifiers = {[object Object]},
 pages = {61-74},
 volume = {19},
 websites = {http://portal.acm.org/citation.cfm?id=972454},
 publisher = {MIT Press},
 id = {098a9fb6-5d1c-3e17-8036-687726b3253c},
 created = {2011-12-14T02:37:07.000Z},
 file_attached = {true},
 profile_id = {5284e6aa-156c-3ce5-bc0e-b80cf09f3ef6},
 group_id = {066b42c8-f712-3fc3-abb2-225c158d2704},
 last_modified = {2017-03-14T14:36:19.698Z},
 tags = {association measures},
 read = {false},
 starred = {false},
 authored = {false},
 confirmed = {true},
 hidden = {false},
 citation_key = {Dunning1993},
 private_publication = {false},
 abstract = {Much work has been done on the statistical analysis of text. In some cases reported in the literature, inappropriate statistical methods have been used, and statistical significance of results have not been addressed. In particular, asymptotic normality assumptions have often been used unjustifiably, leading to flawed results.This assumption of normal distribution limits the ability to analyze rare events. Unfortunately rare events do make up a large fraction of real text.However, more applicable methods based on likelihood ratio tests are available that yield good results with relatively small samples. These tests can be implemented efficiently, and have been used for the detection of composite terms and for the determination of domain-specific terms. In some cases, these measures perform much better than the methods previously used. In cases where traditional contingency table methods work well, the likelihood ratio tests described here are nearly identical.This paper describes the basis of a measure based on likelihood ratios that can be applied to the analysis of text.},
 bibtype = {article},
 author = {Dunning, Ted},
 journal = {Computational Linguistics},
 number = {1}
}
Downloads: 0