Big Bird: Transformers for Longer Sequences. Zaheer, M., Guruganesh, G., Dubey, A., Ainslie, J., Alberti, C., Ontanon, S., Pham, P., Ravula, A., Wang, Q., Yang, L., & Ahmed, A. Advances in Neural Information Processing Systems, Neural information processing systems foundation, 7, 2020. Paper Website abstract bibtex Transformers-based models, such as BERT, have been one of the most successful
deep learning models for NLP. Unfortunately, one of their core limitations is
the quadratic dependency (mainly in terms of memory) on the sequence length due
to their full attention mechanism. To remedy this, we propose, BigBird, a
sparse attention mechanism that reduces this quadratic dependency to linear. We
show that BigBird is a universal approximator of sequence functions and is
Turing complete, thereby preserving these properties of the quadratic, full
attention model. Along the way, our theoretical analysis reveals some of the
benefits of having $O(1)$ global tokens (such as CLS), that attend to the
entire sequence as part of the sparse attention mechanism. The proposed sparse
attention can handle sequences of length up to 8x of what was previously
possible using similar hardware. As a consequence of the capability to handle
longer context, BigBird drastically improves performance on various NLP tasks
such as question answering and summarization. We also propose novel
applications to genomics data.
@article{
title = {Big Bird: Transformers for Longer Sequences},
type = {article},
year = {2020},
volume = {2020-December},
websites = {https://arxiv.org/abs/2007.14062v2},
month = {7},
publisher = {Neural information processing systems foundation},
day = {28},
id = {ca4522d1-6726-3e6a-907a-4d92af13c20e},
created = {2021-09-06T11:04:39.854Z},
accessed = {2021-09-06},
file_attached = {true},
profile_id = {48fc0258-023d-3602-860e-824092d62c56},
group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},
last_modified = {2021-09-06T11:04:42.451Z},
read = {false},
starred = {false},
authored = {false},
confirmed = {false},
hidden = {false},
folder_uuids = {8d050117-e419-4b32-ad70-c875c74fa2b4},
private_publication = {false},
abstract = {Transformers-based models, such as BERT, have been one of the most successful
deep learning models for NLP. Unfortunately, one of their core limitations is
the quadratic dependency (mainly in terms of memory) on the sequence length due
to their full attention mechanism. To remedy this, we propose, BigBird, a
sparse attention mechanism that reduces this quadratic dependency to linear. We
show that BigBird is a universal approximator of sequence functions and is
Turing complete, thereby preserving these properties of the quadratic, full
attention model. Along the way, our theoretical analysis reveals some of the
benefits of having $O(1)$ global tokens (such as CLS), that attend to the
entire sequence as part of the sparse attention mechanism. The proposed sparse
attention can handle sequences of length up to 8x of what was previously
possible using similar hardware. As a consequence of the capability to handle
longer context, BigBird drastically improves performance on various NLP tasks
such as question answering and summarization. We also propose novel
applications to genomics data.},
bibtype = {article},
author = {Zaheer, Manzil and Guruganesh, Guru and Dubey, Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon, Santiago and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and Ahmed, Amr},
journal = {Advances in Neural Information Processing Systems}
}
Downloads: 0
{"_id":"RwaJoXhPP7QdJqckC","bibbaseid":"zaheer-guruganesh-dubey-ainslie-alberti-ontanon-pham-ravula-etal-bigbirdtransformersforlongersequences-2020","authorIDs":[],"author_short":["Zaheer, M.","Guruganesh, G.","Dubey, A.","Ainslie, J.","Alberti, C.","Ontanon, S.","Pham, P.","Ravula, A.","Wang, Q.","Yang, L.","Ahmed, A."],"bibdata":{"title":"Big Bird: Transformers for Longer Sequences","type":"article","year":"2020","volume":"2020-December","websites":"https://arxiv.org/abs/2007.14062v2","month":"7","publisher":"Neural information processing systems foundation","day":"28","id":"ca4522d1-6726-3e6a-907a-4d92af13c20e","created":"2021-09-06T11:04:39.854Z","accessed":"2021-09-06","file_attached":"true","profile_id":"48fc0258-023d-3602-860e-824092d62c56","group_id":"1ff583c0-be37-34fa-9c04-73c69437d354","last_modified":"2021-09-06T11:04:42.451Z","read":false,"starred":false,"authored":false,"confirmed":false,"hidden":false,"folder_uuids":"8d050117-e419-4b32-ad70-c875c74fa2b4","private_publication":false,"abstract":"Transformers-based models, such as BERT, have been one of the most successful\ndeep learning models for NLP. Unfortunately, one of their core limitations is\nthe quadratic dependency (mainly in terms of memory) on the sequence length due\nto their full attention mechanism. To remedy this, we propose, BigBird, a\nsparse attention mechanism that reduces this quadratic dependency to linear. We\nshow that BigBird is a universal approximator of sequence functions and is\nTuring complete, thereby preserving these properties of the quadratic, full\nattention model. Along the way, our theoretical analysis reveals some of the\nbenefits of having $O(1)$ global tokens (such as CLS), that attend to the\nentire sequence as part of the sparse attention mechanism. The proposed sparse\nattention can handle sequences of length up to 8x of what was previously\npossible using similar hardware. As a consequence of the capability to handle\nlonger context, BigBird drastically improves performance on various NLP tasks\nsuch as question answering and summarization. We also propose novel\napplications to genomics data.","bibtype":"article","author":"Zaheer, Manzil and Guruganesh, Guru and Dubey, Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon, Santiago and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and Ahmed, Amr","journal":"Advances in Neural Information Processing Systems","bibtex":"@article{\n title = {Big Bird: Transformers for Longer Sequences},\n type = {article},\n year = {2020},\n volume = {2020-December},\n websites = {https://arxiv.org/abs/2007.14062v2},\n month = {7},\n publisher = {Neural information processing systems foundation},\n day = {28},\n id = {ca4522d1-6726-3e6a-907a-4d92af13c20e},\n created = {2021-09-06T11:04:39.854Z},\n accessed = {2021-09-06},\n file_attached = {true},\n profile_id = {48fc0258-023d-3602-860e-824092d62c56},\n group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},\n last_modified = {2021-09-06T11:04:42.451Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n folder_uuids = {8d050117-e419-4b32-ad70-c875c74fa2b4},\n private_publication = {false},\n abstract = {Transformers-based models, such as BERT, have been one of the most successful\ndeep learning models for NLP. Unfortunately, one of their core limitations is\nthe quadratic dependency (mainly in terms of memory) on the sequence length due\nto their full attention mechanism. To remedy this, we propose, BigBird, a\nsparse attention mechanism that reduces this quadratic dependency to linear. We\nshow that BigBird is a universal approximator of sequence functions and is\nTuring complete, thereby preserving these properties of the quadratic, full\nattention model. Along the way, our theoretical analysis reveals some of the\nbenefits of having $O(1)$ global tokens (such as CLS), that attend to the\nentire sequence as part of the sparse attention mechanism. The proposed sparse\nattention can handle sequences of length up to 8x of what was previously\npossible using similar hardware. As a consequence of the capability to handle\nlonger context, BigBird drastically improves performance on various NLP tasks\nsuch as question answering and summarization. We also propose novel\napplications to genomics data.},\n bibtype = {article},\n author = {Zaheer, Manzil and Guruganesh, Guru and Dubey, Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon, Santiago and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and Ahmed, Amr},\n journal = {Advances in Neural Information Processing Systems}\n}","author_short":["Zaheer, M.","Guruganesh, G.","Dubey, A.","Ainslie, J.","Alberti, C.","Ontanon, S.","Pham, P.","Ravula, A.","Wang, Q.","Yang, L.","Ahmed, A."],"urls":{"Paper":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c/file/5ce87e79-ce3b-8141-4b9e-bb624a45ce56/full_text.pdf.pdf","Website":"https://arxiv.org/abs/2007.14062v2"},"biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibbaseid":"zaheer-guruganesh-dubey-ainslie-alberti-ontanon-pham-ravula-etal-bigbirdtransformersforlongersequences-2020","role":"author","metadata":{"authorlinks":{}},"downloads":0},"bibtype":"article","biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","creationDate":"2021-02-12T21:37:01.537Z","downloads":0,"keywords":[],"search_terms":["big","bird","transformers","longer","sequences","zaheer","guruganesh","dubey","ainslie","alberti","ontanon","pham","ravula","wang","yang","ahmed"],"title":"Big Bird: Transformers for Longer Sequences","year":2020,"dataSources":["qLJ7Ld8T2ZKybATHB","ya2CyA73rpZseyrZ8","2252seNhipfTmjEBQ"]}