Enriching Word Vectors with Subword Information. Bojanowski, P.; Grave, E.; Joulin, A.; and Mikolov, T.
Enriching Word Vectors with Subword Information [pdf]Paper  Enriching Word Vectors with Subword Information [pdf]Website  abstract   bibtex   
Continuous word representations, trained on large unlabeled corpora are useful for many natural language processing tasks. Popular models that learn such representations ignore the morphology of words, by assigning a dis-tinct vector to each word. This is a limitation, especially for languages with large vocabular-ies and many rare words. In this paper, we pro-pose a new approach based on the skipgram model, where each word is represented as a bag of character n-grams. A vector represen-tation is associated to each character n-gram; words being represented as the sum of these representations. Our method is fast, allow-ing to train models on large corpora quickly and allows us to compute word representations for words that did not appear in the training data. We evaluate our word representations on nine different languages, both on word sim-ilarity and analogy tasks. By comparing to recently proposed morphological word repre-sentations, we show that our vectors achieve state-of-the-art performance on these tasks.
@article{
 title = {Enriching Word Vectors with Subword Information},
 type = {article},
 websites = {https://arxiv.org/pdf/1607.04606.pdf},
 id = {7b6d1241-6cae-3844-82c3-dc709229f630},
 created = {2018-02-05T19:17:08.931Z},
 accessed = {2018-02-05},
 file_attached = {true},
 profile_id = {371589bb-c770-37ff-8193-93c6f25ffeb1},
 group_id = {f982cd63-7ceb-3aa2-ac7e-a953963d6716},
 last_modified = {2018-02-05T19:17:12.110Z},
 read = {false},
 starred = {false},
 authored = {false},
 confirmed = {false},
 hidden = {false},
 private_publication = {false},
 abstract = {Continuous word representations, trained on large unlabeled corpora are useful for many natural language processing tasks. Popular models that learn such representations ignore the morphology of words, by assigning a dis-tinct vector to each word. This is a limitation, especially for languages with large vocabular-ies and many rare words. In this paper, we pro-pose a new approach based on the skipgram model, where each word is represented as a bag of character n-grams. A vector represen-tation is associated to each character n-gram; words being represented as the sum of these representations. Our method is fast, allow-ing to train models on large corpora quickly and allows us to compute word representations for words that did not appear in the training data. We evaluate our word representations on nine different languages, both on word sim-ilarity and analogy tasks. By comparing to recently proposed morphological word repre-sentations, we show that our vectors achieve state-of-the-art performance on these tasks.},
 bibtype = {article},
 author = {Bojanowski, Piotr and Grave, Edouard and Joulin, Armand and Mikolov, Tomas}
}
Downloads: 0