Evaluating word embedding models: methods and experimental results. Wang, B., Wang, A., Chen, F., Wang, Y., & Kuo, C. J. 2019.
Paper doi abstract bibtex Extensive evaluation on a large number of word embedding models for language processing applications is conducted in this work. First, we introduce popular word embedding models and discuss desired properties of word models and evaluation methods (or evaluators). Then, we categorize evaluators into intrinsic and extrinsic two types. Intrinsic evaluators test the quality of a representation independent of specific natural language processing tasks while extrinsic evaluators use word embeddings as input features to a downstream task and measure changes in performance metrics specific to that task. We report experimental results of intrinsic and extrinsic evaluators on six word embedding models. It is shown that different evaluators focus on different aspects of word models, and some are more correlated with natural language processing tasks. Finally, we adopt correlation analysis to study performance consistency of extrinsic and intrinsic evaluators.
@article{wang_evaluating_2019,
title = {Evaluating word embedding models: methods and experimental results},
url = {https://www.cambridge.org/core/services/aop-cambridge-core/content/view/EDF43F837150B94E71DBB36B28B85E79/S204877031900012Xa.pdf/div-class-title-evaluating-word-embedding-models-methods-and-experimental-results-div.pdf},
doi = {doi:10.1017/ATSIP.2019.12},
abstract = {Extensive evaluation on a large number of word embedding models for language processing applications is conducted in this
work. First, we introduce popular word embedding models and discuss desired properties of word models and evaluation methods
(or evaluators). Then, we categorize evaluators into intrinsic and extrinsic two types. Intrinsic evaluators test the quality of a
representation independent of specific natural language processing tasks while extrinsic evaluators use word embeddings as input
features to a downstream task and measure changes in performance metrics specific to that task. We report experimental results
of intrinsic and extrinsic evaluators on six word embedding models. It is shown that different evaluators focus on different aspects
of word models, and some are more correlated with natural language processing tasks. Finally, we adopt correlation analysis to
study performance consistency of extrinsic and intrinsic evaluators.},
author = {Wang, Bin and Wang, Angela and Chen, Fenxiao and Wang, Yuncheng and Kuo, C.-C. Jay},
year = {2019},
keywords = {Word Embeddings},
}
Downloads: 0
{"_id":"FPYwSrvRqBDtacjNP","bibbaseid":"wang-wang-chen-wang-kuo-evaluatingwordembeddingmodelsmethodsandexperimentalresults-2019","author_short":["Wang, B.","Wang, A.","Chen, F.","Wang, Y.","Kuo, C. J."],"bibdata":{"bibtype":"article","type":"article","title":"Evaluating word embedding models: methods and experimental results","url":"https://www.cambridge.org/core/services/aop-cambridge-core/content/view/EDF43F837150B94E71DBB36B28B85E79/S204877031900012Xa.pdf/div-class-title-evaluating-word-embedding-models-methods-and-experimental-results-div.pdf","doi":"doi:10.1017/ATSIP.2019.12","abstract":"Extensive evaluation on a large number of word embedding models for language processing applications is conducted in this work. First, we introduce popular word embedding models and discuss desired properties of word models and evaluation methods (or evaluators). Then, we categorize evaluators into intrinsic and extrinsic two types. Intrinsic evaluators test the quality of a representation independent of specific natural language processing tasks while extrinsic evaluators use word embeddings as input features to a downstream task and measure changes in performance metrics specific to that task. We report experimental results of intrinsic and extrinsic evaluators on six word embedding models. It is shown that different evaluators focus on different aspects of word models, and some are more correlated with natural language processing tasks. Finally, we adopt correlation analysis to study performance consistency of extrinsic and intrinsic evaluators.","author":[{"propositions":[],"lastnames":["Wang"],"firstnames":["Bin"],"suffixes":[]},{"propositions":[],"lastnames":["Wang"],"firstnames":["Angela"],"suffixes":[]},{"propositions":[],"lastnames":["Chen"],"firstnames":["Fenxiao"],"suffixes":[]},{"propositions":[],"lastnames":["Wang"],"firstnames":["Yuncheng"],"suffixes":[]},{"propositions":[],"lastnames":["Kuo"],"firstnames":["C.-C.","Jay"],"suffixes":[]}],"year":"2019","keywords":"Word Embeddings","bibtex":"@article{wang_evaluating_2019,\n\ttitle = {Evaluating word embedding models: methods and experimental results},\n\turl = {https://www.cambridge.org/core/services/aop-cambridge-core/content/view/EDF43F837150B94E71DBB36B28B85E79/S204877031900012Xa.pdf/div-class-title-evaluating-word-embedding-models-methods-and-experimental-results-div.pdf},\n\tdoi = {doi:10.1017/ATSIP.2019.12},\n\tabstract = {Extensive evaluation on a large number of word embedding models for language processing applications is conducted in this\nwork. First, we introduce popular word embedding models and discuss desired properties of word models and evaluation methods\n(or evaluators). Then, we categorize evaluators into intrinsic and extrinsic two types. Intrinsic evaluators test the quality of a\nrepresentation independent of specific natural language processing tasks while extrinsic evaluators use word embeddings as input\nfeatures to a downstream task and measure changes in performance metrics specific to that task. We report experimental results\nof intrinsic and extrinsic evaluators on six word embedding models. It is shown that different evaluators focus on different aspects\nof word models, and some are more correlated with natural language processing tasks. Finally, we adopt correlation analysis to\nstudy performance consistency of extrinsic and intrinsic evaluators.},\n\tauthor = {Wang, Bin and Wang, Angela and Chen, Fenxiao and Wang, Yuncheng and Kuo, C.-C. Jay},\n\tyear = {2019},\n\tkeywords = {Word Embeddings},\n}\n\n\n\n","author_short":["Wang, B.","Wang, A.","Chen, F.","Wang, Y.","Kuo, C. J."],"key":"wang_evaluating_2019","id":"wang_evaluating_2019","bibbaseid":"wang-wang-chen-wang-kuo-evaluatingwordembeddingmodelsmethodsandexperimentalresults-2019","role":"author","urls":{"Paper":"https://www.cambridge.org/core/services/aop-cambridge-core/content/view/EDF43F837150B94E71DBB36B28B85E79/S204877031900012Xa.pdf/div-class-title-evaluating-word-embedding-models-methods-and-experimental-results-div.pdf"},"keyword":["Word Embeddings"],"metadata":{"authorlinks":{}}},"bibtype":"article","biburl":"https://bibbase.org/zotero-group/schulzkx/5158478","dataSources":["JFDnASMkoQCjjGL8E"],"keywords":["word embeddings"],"search_terms":["evaluating","word","embedding","models","methods","experimental","results","wang","wang","chen","wang","kuo"],"title":"Evaluating word embedding models: methods and experimental results","year":2019}