Pay Less Attention with Lightweight and Dynamic Convolutions. Wu, F., Fan, A., Baevski, A., Dauphin, Y., N., & Auli, M. 7th International Conference on Learning Representations, ICLR 2019, International Conference on Learning Representations, ICLR, 1, 2019. Paper Website abstract bibtex Self-attention is a useful mechanism to build generative models for language
and images. It determines the importance of context elements by comparing each
element to the current time step. In this paper, we show that a very
lightweight convolution can perform competitively to the best reported
self-attention results. Next, we introduce dynamic convolutions which are
simpler and more efficient than self-attention. We predict separate convolution
kernels based solely on the current time-step in order to determine the
importance of context elements. The number of operations required by this
approach scales linearly in the input length, whereas self-attention is
quadratic. Experiments on large-scale machine translation, language modeling
and abstractive summarization show that dynamic convolutions improve over
strong self-attention models. On the WMT'14 English-German test set dynamic
convolutions achieve a new state of the art of 29.7 BLEU.
@article{
title = {Pay Less Attention with Lightweight and Dynamic Convolutions},
type = {article},
year = {2019},
websites = {https://arxiv.org/abs/1901.10430v1},
month = {1},
publisher = {International Conference on Learning Representations, ICLR},
day = {29},
id = {a4bf96df-3972-3780-baf4-0bce55c21b88},
created = {2021-08-24T07:15:01.585Z},
accessed = {2021-08-24},
file_attached = {true},
profile_id = {48fc0258-023d-3602-860e-824092d62c56},
group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},
last_modified = {2021-08-24T07:15:04.707Z},
read = {false},
starred = {false},
authored = {false},
confirmed = {false},
hidden = {false},
folder_uuids = {c509f25c-b687-4ab5-8859-72131b6658d3},
private_publication = {false},
abstract = {Self-attention is a useful mechanism to build generative models for language
and images. It determines the importance of context elements by comparing each
element to the current time step. In this paper, we show that a very
lightweight convolution can perform competitively to the best reported
self-attention results. Next, we introduce dynamic convolutions which are
simpler and more efficient than self-attention. We predict separate convolution
kernels based solely on the current time-step in order to determine the
importance of context elements. The number of operations required by this
approach scales linearly in the input length, whereas self-attention is
quadratic. Experiments on large-scale machine translation, language modeling
and abstractive summarization show that dynamic convolutions improve over
strong self-attention models. On the WMT'14 English-German test set dynamic
convolutions achieve a new state of the art of 29.7 BLEU.},
bibtype = {article},
author = {Wu, Felix and Fan, Angela and Baevski, Alexei and Dauphin, Yann N. and Auli, Michael},
journal = {7th International Conference on Learning Representations, ICLR 2019}
}
Downloads: 0
{"_id":"sZ3quEC7hxkyannCA","bibbaseid":"wu-fan-baevski-dauphin-auli-paylessattentionwithlightweightanddynamicconvolutions-2019","authorIDs":[],"author_short":["Wu, F.","Fan, A.","Baevski, A.","Dauphin, Y., N.","Auli, M."],"bibdata":{"title":"Pay Less Attention with Lightweight and Dynamic Convolutions","type":"article","year":"2019","websites":"https://arxiv.org/abs/1901.10430v1","month":"1","publisher":"International Conference on Learning Representations, ICLR","day":"29","id":"a4bf96df-3972-3780-baf4-0bce55c21b88","created":"2021-08-24T07:15:01.585Z","accessed":"2021-08-24","file_attached":"true","profile_id":"48fc0258-023d-3602-860e-824092d62c56","group_id":"1ff583c0-be37-34fa-9c04-73c69437d354","last_modified":"2021-08-24T07:15:04.707Z","read":false,"starred":false,"authored":false,"confirmed":false,"hidden":false,"folder_uuids":"c509f25c-b687-4ab5-8859-72131b6658d3","private_publication":false,"abstract":"Self-attention is a useful mechanism to build generative models for language\nand images. It determines the importance of context elements by comparing each\nelement to the current time step. In this paper, we show that a very\nlightweight convolution can perform competitively to the best reported\nself-attention results. Next, we introduce dynamic convolutions which are\nsimpler and more efficient than self-attention. We predict separate convolution\nkernels based solely on the current time-step in order to determine the\nimportance of context elements. The number of operations required by this\napproach scales linearly in the input length, whereas self-attention is\nquadratic. Experiments on large-scale machine translation, language modeling\nand abstractive summarization show that dynamic convolutions improve over\nstrong self-attention models. On the WMT'14 English-German test set dynamic\nconvolutions achieve a new state of the art of 29.7 BLEU.","bibtype":"article","author":"Wu, Felix and Fan, Angela and Baevski, Alexei and Dauphin, Yann N. and Auli, Michael","journal":"7th International Conference on Learning Representations, ICLR 2019","bibtex":"@article{\n title = {Pay Less Attention with Lightweight and Dynamic Convolutions},\n type = {article},\n year = {2019},\n websites = {https://arxiv.org/abs/1901.10430v1},\n month = {1},\n publisher = {International Conference on Learning Representations, ICLR},\n day = {29},\n id = {a4bf96df-3972-3780-baf4-0bce55c21b88},\n created = {2021-08-24T07:15:01.585Z},\n accessed = {2021-08-24},\n file_attached = {true},\n profile_id = {48fc0258-023d-3602-860e-824092d62c56},\n group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},\n last_modified = {2021-08-24T07:15:04.707Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n folder_uuids = {c509f25c-b687-4ab5-8859-72131b6658d3},\n private_publication = {false},\n abstract = {Self-attention is a useful mechanism to build generative models for language\nand images. It determines the importance of context elements by comparing each\nelement to the current time step. In this paper, we show that a very\nlightweight convolution can perform competitively to the best reported\nself-attention results. Next, we introduce dynamic convolutions which are\nsimpler and more efficient than self-attention. We predict separate convolution\nkernels based solely on the current time-step in order to determine the\nimportance of context elements. The number of operations required by this\napproach scales linearly in the input length, whereas self-attention is\nquadratic. Experiments on large-scale machine translation, language modeling\nand abstractive summarization show that dynamic convolutions improve over\nstrong self-attention models. On the WMT'14 English-German test set dynamic\nconvolutions achieve a new state of the art of 29.7 BLEU.},\n bibtype = {article},\n author = {Wu, Felix and Fan, Angela and Baevski, Alexei and Dauphin, Yann N. and Auli, Michael},\n journal = {7th International Conference on Learning Representations, ICLR 2019}\n}","author_short":["Wu, F.","Fan, A.","Baevski, A.","Dauphin, Y., N.","Auli, M."],"urls":{"Paper":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c/file/5fa94644-d8a9-2f43-2c5d-06b5d402dce2/full_text.pdf.pdf","Website":"https://arxiv.org/abs/1901.10430v1"},"biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibbaseid":"wu-fan-baevski-dauphin-auli-paylessattentionwithlightweightanddynamicconvolutions-2019","role":"author","metadata":{"authorlinks":{}},"downloads":0},"bibtype":"article","biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","creationDate":"2020-01-27T02:13:33.832Z","downloads":0,"keywords":[],"search_terms":["pay","less","attention","lightweight","dynamic","convolutions","wu","fan","baevski","dauphin","auli"],"title":"Pay Less Attention with Lightweight and Dynamic Convolutions","year":2019,"dataSources":["hEoKh4ygEAWbAZ5iy","QGwcHf7xnb5mCCQi7","ya2CyA73rpZseyrZ8","2252seNhipfTmjEBQ"]}