LongNet: Scaling Transformers to 1,000,000,000 Tokens. Ding, J., Ma, S., Dong, L., Zhang, X., Huang, S., Wang, W., & Wei, F. 2023. cite arxiv:2307.02486Comment: Work in progress
Paper abstract bibtex Scaling sequence length has become a critical demand in the era of large language models. However, existing methods struggle with either computational complexity or model expressivity, rendering the maximum sequence length restricted. In this work, we introduce LongNet, a Transformer variant that can scale sequence length to more than 1 billion tokens, without sacrificing the performance on shorter sequences. Specifically, we propose dilated attention, which expands the attentive field exponentially as the distance grows. LongNet has significant advantages: 1) it has a linear computation complexity and a logarithm dependency between tokens; 2) it can be served as a distributed trainer for extremely long sequences; 3) its dilated attention is a drop-in replacement for standard attention, which can be seamlessly integrated with the existing Transformer-based optimization. Experiments results demonstrate that LongNet yields strong performance on both long-sequence modeling and general language tasks. Our work opens up new possibilities for modeling very long sequences, e.g., treating a whole corpus or even the entire Internet as a sequence.
@misc{ding2023longnet,
abstract = {Scaling sequence length has become a critical demand in the era of large
language models. However, existing methods struggle with either computational
complexity or model expressivity, rendering the maximum sequence length
restricted. In this work, we introduce LongNet, a Transformer variant that can
scale sequence length to more than 1 billion tokens, without sacrificing the
performance on shorter sequences. Specifically, we propose dilated attention,
which expands the attentive field exponentially as the distance grows. LongNet
has significant advantages: 1) it has a linear computation complexity and a
logarithm dependency between tokens; 2) it can be served as a distributed
trainer for extremely long sequences; 3) its dilated attention is a drop-in
replacement for standard attention, which can be seamlessly integrated with the
existing Transformer-based optimization. Experiments results demonstrate that
LongNet yields strong performance on both long-sequence modeling and general
language tasks. Our work opens up new possibilities for modeling very long
sequences, e.g., treating a whole corpus or even the entire Internet as a
sequence.},
added-at = {2023-07-12T13:31:41.000+0200},
author = {Ding, Jiayu and Ma, Shuming and Dong, Li and Zhang, Xingxing and Huang, Shaohan and Wang, Wenhui and Wei, Furu},
biburl = {https://www.bibsonomy.org/bibtex/20d55dc3a6ae821a95b7a9bbe911931d3/vincentqb},
description = {LongNet: Scaling Transformers to 1,000,000,000 Tokens},
interhash = {0f9206b5c91c6c4b68317f52d3304d6d},
intrahash = {0d55dc3a6ae821a95b7a9bbe911931d3},
keywords = {attention},
note = {cite arxiv:2307.02486Comment: Work in progress},
timestamp = {2023-07-12T13:31:41.000+0200},
title = {LongNet: Scaling Transformers to 1,000,000,000 Tokens},
url = {http://arxiv.org/abs/2307.02486},
year = 2023
}
Downloads: 0
{"_id":"R4bSxRoPsur3YK3ZG","bibbaseid":"ding-ma-dong-zhang-huang-wang-wei-longnetscalingtransformersto1000000000tokens-2023","author_short":["Ding, J.","Ma, S.","Dong, L.","Zhang, X.","Huang, S.","Wang, W.","Wei, F."],"bibdata":{"bibtype":"misc","type":"misc","abstract":"Scaling sequence length has become a critical demand in the era of large language models. However, existing methods struggle with either computational complexity or model expressivity, rendering the maximum sequence length restricted. In this work, we introduce LongNet, a Transformer variant that can scale sequence length to more than 1 billion tokens, without sacrificing the performance on shorter sequences. Specifically, we propose dilated attention, which expands the attentive field exponentially as the distance grows. LongNet has significant advantages: 1) it has a linear computation complexity and a logarithm dependency between tokens; 2) it can be served as a distributed trainer for extremely long sequences; 3) its dilated attention is a drop-in replacement for standard attention, which can be seamlessly integrated with the existing Transformer-based optimization. Experiments results demonstrate that LongNet yields strong performance on both long-sequence modeling and general language tasks. Our work opens up new possibilities for modeling very long sequences, e.g., treating a whole corpus or even the entire Internet as a sequence.","added-at":"2023-07-12T13:31:41.000+0200","author":[{"propositions":[],"lastnames":["Ding"],"firstnames":["Jiayu"],"suffixes":[]},{"propositions":[],"lastnames":["Ma"],"firstnames":["Shuming"],"suffixes":[]},{"propositions":[],"lastnames":["Dong"],"firstnames":["Li"],"suffixes":[]},{"propositions":[],"lastnames":["Zhang"],"firstnames":["Xingxing"],"suffixes":[]},{"propositions":[],"lastnames":["Huang"],"firstnames":["Shaohan"],"suffixes":[]},{"propositions":[],"lastnames":["Wang"],"firstnames":["Wenhui"],"suffixes":[]},{"propositions":[],"lastnames":["Wei"],"firstnames":["Furu"],"suffixes":[]}],"biburl":"https://www.bibsonomy.org/bibtex/20d55dc3a6ae821a95b7a9bbe911931d3/vincentqb","description":"LongNet: Scaling Transformers to 1,000,000,000 Tokens","interhash":"0f9206b5c91c6c4b68317f52d3304d6d","intrahash":"0d55dc3a6ae821a95b7a9bbe911931d3","keywords":"attention","note":"cite arxiv:2307.02486Comment: Work in progress","timestamp":"2023-07-12T13:31:41.000+0200","title":"LongNet: Scaling Transformers to 1,000,000,000 Tokens","url":"http://arxiv.org/abs/2307.02486","year":"2023","bibtex":"@misc{ding2023longnet,\n abstract = {Scaling sequence length has become a critical demand in the era of large\r\nlanguage models. However, existing methods struggle with either computational\r\ncomplexity or model expressivity, rendering the maximum sequence length\r\nrestricted. In this work, we introduce LongNet, a Transformer variant that can\r\nscale sequence length to more than 1 billion tokens, without sacrificing the\r\nperformance on shorter sequences. Specifically, we propose dilated attention,\r\nwhich expands the attentive field exponentially as the distance grows. LongNet\r\nhas significant advantages: 1) it has a linear computation complexity and a\r\nlogarithm dependency between tokens; 2) it can be served as a distributed\r\ntrainer for extremely long sequences; 3) its dilated attention is a drop-in\r\nreplacement for standard attention, which can be seamlessly integrated with the\r\nexisting Transformer-based optimization. Experiments results demonstrate that\r\nLongNet yields strong performance on both long-sequence modeling and general\r\nlanguage tasks. Our work opens up new possibilities for modeling very long\r\nsequences, e.g., treating a whole corpus or even the entire Internet as a\r\nsequence.},\n added-at = {2023-07-12T13:31:41.000+0200},\n author = {Ding, Jiayu and Ma, Shuming and Dong, Li and Zhang, Xingxing and Huang, Shaohan and Wang, Wenhui and Wei, Furu},\n biburl = {https://www.bibsonomy.org/bibtex/20d55dc3a6ae821a95b7a9bbe911931d3/vincentqb},\n description = {LongNet: Scaling Transformers to 1,000,000,000 Tokens},\n interhash = {0f9206b5c91c6c4b68317f52d3304d6d},\n intrahash = {0d55dc3a6ae821a95b7a9bbe911931d3},\n keywords = {attention},\n note = {cite arxiv:2307.02486Comment: Work in progress},\n timestamp = {2023-07-12T13:31:41.000+0200},\n title = {LongNet: Scaling Transformers to 1,000,000,000 Tokens},\n url = {http://arxiv.org/abs/2307.02486},\n year = 2023\n}\n\n","author_short":["Ding, J.","Ma, S.","Dong, L.","Zhang, X.","Huang, S.","Wang, W.","Wei, F."],"key":"ding2023longnet","id":"ding2023longnet","bibbaseid":"ding-ma-dong-zhang-huang-wang-wei-longnetscalingtransformersto1000000000tokens-2023","role":"author","urls":{"Paper":"http://arxiv.org/abs/2307.02486"},"keyword":["attention"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"misc","biburl":"http://www.bibsonomy.org/bib/author/zhang?items=1000","dataSources":["6yXn8CtuzyEbCSr2m"],"keywords":["attention"],"search_terms":["longnet","scaling","transformers","000","000","000","tokens","ding","ma","dong","zhang","huang","wang","wei"],"title":"LongNet: Scaling Transformers to 1,000,000,000 Tokens","year":2023}