Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention. Katharopoulos, A., Vyas, A., Pappas, N., & Fleuret, F. 37th International Conference on Machine Learning, ICML 2020, PartF168147-7:5112-5121, International Machine Learning Society (IMLS), 6, 2020.
Paper
Website abstract bibtex Transformers achieve remarkable performance in several tasks but due to their
quadratic complexity, with respect to the input's length, they are
prohibitively slow for very long sequences. To address this limitation, we
express the self-attention as a linear dot-product of kernel feature maps and
make use of the associativity property of matrix products to reduce the
complexity from $\mathcalO\left(N^2\right)$ to $\mathcalO\left(N\right)$,
where $N$ is the sequence length. We show that this formulation permits an
iterative implementation that dramatically accelerates autoregressive
transformers and reveals their relationship to recurrent neural networks. Our
linear transformers achieve similar performance to vanilla transformers and
they are up to 4000x faster on autoregressive prediction of very long
sequences.
@article{
title = {Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention},
type = {article},
year = {2020},
pages = {5112-5121},
volume = {PartF168147-7},
websites = {https://arxiv.org/abs/2006.16236v3},
month = {6},
publisher = {International Machine Learning Society (IMLS)},
day = {29},
id = {6edabdcb-7bb6-36eb-a59b-a0a306480bd3},
created = {2021-09-03T07:04:36.327Z},
accessed = {2021-09-03},
file_attached = {true},
profile_id = {48fc0258-023d-3602-860e-824092d62c56},
group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},
last_modified = {2021-09-03T07:04:39.222Z},
read = {false},
starred = {false},
authored = {false},
confirmed = {false},
hidden = {false},
folder_uuids = {8d050117-e419-4b32-ad70-c875c74fa2b4},
private_publication = {false},
abstract = {Transformers achieve remarkable performance in several tasks but due to their
quadratic complexity, with respect to the input's length, they are
prohibitively slow for very long sequences. To address this limitation, we
express the self-attention as a linear dot-product of kernel feature maps and
make use of the associativity property of matrix products to reduce the
complexity from $\mathcalO\left(N^2\right)$ to $\mathcalO\left(N\right)$,
where $N$ is the sequence length. We show that this formulation permits an
iterative implementation that dramatically accelerates autoregressive
transformers and reveals their relationship to recurrent neural networks. Our
linear transformers achieve similar performance to vanilla transformers and
they are up to 4000x faster on autoregressive prediction of very long
sequences.},
bibtype = {article},
author = {Katharopoulos, Angelos and Vyas, Apoorv and Pappas, Nikolaos and Fleuret, François},
journal = {37th International Conference on Machine Learning, ICML 2020}
}
Downloads: 0
{"_id":"pxkonyTvk2nGvsfSr","bibbaseid":"katharopoulos-vyas-pappas-fleuret-transformersarernnsfastautoregressivetransformerswithlinearattention-2020","author_short":["Katharopoulos, A.","Vyas, A.","Pappas, N.","Fleuret, F."],"bibdata":{"title":"Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention","type":"article","year":"2020","pages":"5112-5121","volume":"PartF168147-7","websites":"https://arxiv.org/abs/2006.16236v3","month":"6","publisher":"International Machine Learning Society (IMLS)","day":"29","id":"6edabdcb-7bb6-36eb-a59b-a0a306480bd3","created":"2021-09-03T07:04:36.327Z","accessed":"2021-09-03","file_attached":"true","profile_id":"48fc0258-023d-3602-860e-824092d62c56","group_id":"1ff583c0-be37-34fa-9c04-73c69437d354","last_modified":"2021-09-03T07:04:39.222Z","read":false,"starred":false,"authored":false,"confirmed":false,"hidden":false,"folder_uuids":"8d050117-e419-4b32-ad70-c875c74fa2b4","private_publication":false,"abstract":"Transformers achieve remarkable performance in several tasks but due to their\nquadratic complexity, with respect to the input's length, they are\nprohibitively slow for very long sequences. To address this limitation, we\nexpress the self-attention as a linear dot-product of kernel feature maps and\nmake use of the associativity property of matrix products to reduce the\ncomplexity from $\\mathcalO\\left(N^2\\right)$ to $\\mathcalO\\left(N\\right)$,\nwhere $N$ is the sequence length. We show that this formulation permits an\niterative implementation that dramatically accelerates autoregressive\ntransformers and reveals their relationship to recurrent neural networks. Our\nlinear transformers achieve similar performance to vanilla transformers and\nthey are up to 4000x faster on autoregressive prediction of very long\nsequences.","bibtype":"article","author":"Katharopoulos, Angelos and Vyas, Apoorv and Pappas, Nikolaos and Fleuret, François","journal":"37th International Conference on Machine Learning, ICML 2020","bibtex":"@article{\n title = {Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention},\n type = {article},\n year = {2020},\n pages = {5112-5121},\n volume = {PartF168147-7},\n websites = {https://arxiv.org/abs/2006.16236v3},\n month = {6},\n publisher = {International Machine Learning Society (IMLS)},\n day = {29},\n id = {6edabdcb-7bb6-36eb-a59b-a0a306480bd3},\n created = {2021-09-03T07:04:36.327Z},\n accessed = {2021-09-03},\n file_attached = {true},\n profile_id = {48fc0258-023d-3602-860e-824092d62c56},\n group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},\n last_modified = {2021-09-03T07:04:39.222Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n folder_uuids = {8d050117-e419-4b32-ad70-c875c74fa2b4},\n private_publication = {false},\n abstract = {Transformers achieve remarkable performance in several tasks but due to their\nquadratic complexity, with respect to the input's length, they are\nprohibitively slow for very long sequences. To address this limitation, we\nexpress the self-attention as a linear dot-product of kernel feature maps and\nmake use of the associativity property of matrix products to reduce the\ncomplexity from $\\mathcalO\\left(N^2\\right)$ to $\\mathcalO\\left(N\\right)$,\nwhere $N$ is the sequence length. We show that this formulation permits an\niterative implementation that dramatically accelerates autoregressive\ntransformers and reveals their relationship to recurrent neural networks. Our\nlinear transformers achieve similar performance to vanilla transformers and\nthey are up to 4000x faster on autoregressive prediction of very long\nsequences.},\n bibtype = {article},\n author = {Katharopoulos, Angelos and Vyas, Apoorv and Pappas, Nikolaos and Fleuret, François},\n journal = {37th International Conference on Machine Learning, ICML 2020}\n}","author_short":["Katharopoulos, A.","Vyas, A.","Pappas, N.","Fleuret, F."],"urls":{"Paper":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c/file/11521a26-33c9-b2f1-3294-f35b3137ecd2/full_text.pdf.pdf","Website":"https://arxiv.org/abs/2006.16236v3"},"biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibbaseid":"katharopoulos-vyas-pappas-fleuret-transformersarernnsfastautoregressivetransformerswithlinearattention-2020","role":"author","metadata":{"authorlinks":{}}},"bibtype":"article","biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","dataSources":["MpmemwLeQzDcKDq6x","2252seNhipfTmjEBQ"],"keywords":[],"search_terms":["transformers","rnns","fast","autoregressive","transformers","linear","attention","katharopoulos","vyas","pappas","fleuret"],"title":"Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention","year":2020}