An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale. Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., Uszkoreit, J., & Houlsby, N. 10, 2020. Paper Website abstract bibtex While the Transformer architecture has become the de-facto standard for
natural language processing tasks, its applications to computer vision remain
limited. In vision, attention is either applied in conjunction with
convolutional networks, or used to replace certain components of convolutional
networks while keeping their overall structure in place. We show that this
reliance on CNNs is not necessary and a pure transformer applied directly to
sequences of image patches can perform very well on image classification tasks.
When pre-trained on large amounts of data and transferred to multiple mid-sized
or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision
Transformer (ViT) attains excellent results compared to state-of-the-art
convolutional networks while requiring substantially fewer computational
resources to train.
@article{
title = {An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale},
type = {article},
year = {2020},
websites = {https://arxiv.org/abs/2010.11929v2},
month = {10},
day = {22},
id = {6dada552-482a-3081-a312-e28845e54a2d},
created = {2021-09-01T08:02:18.175Z},
accessed = {2021-09-01},
file_attached = {true},
profile_id = {48fc0258-023d-3602-860e-824092d62c56},
group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},
last_modified = {2021-09-01T08:02:21.622Z},
read = {false},
starred = {false},
authored = {false},
confirmed = {false},
hidden = {false},
folder_uuids = {8d050117-e419-4b32-ad70-c875c74fa2b4},
private_publication = {false},
abstract = {While the Transformer architecture has become the de-facto standard for
natural language processing tasks, its applications to computer vision remain
limited. In vision, attention is either applied in conjunction with
convolutional networks, or used to replace certain components of convolutional
networks while keeping their overall structure in place. We show that this
reliance on CNNs is not necessary and a pure transformer applied directly to
sequences of image patches can perform very well on image classification tasks.
When pre-trained on large amounts of data and transferred to multiple mid-sized
or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision
Transformer (ViT) attains excellent results compared to state-of-the-art
convolutional networks while requiring substantially fewer computational
resources to train.},
bibtype = {article},
author = {Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil}
}
Downloads: 0
{"_id":"6fxWk9ekpzYxddmZC","bibbaseid":"dosovitskiy-beyer-kolesnikov-weissenborn-zhai-unterthiner-dehghani-minderer-etal-animageisworth16x16wordstransformersforimagerecognitionatscale-2020","author_short":["Dosovitskiy, A.","Beyer, L.","Kolesnikov, A.","Weissenborn, D.","Zhai, X.","Unterthiner, T.","Dehghani, M.","Minderer, M.","Heigold, G.","Gelly, S.","Uszkoreit, J.","Houlsby, N."],"bibdata":{"title":"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale","type":"article","year":"2020","websites":"https://arxiv.org/abs/2010.11929v2","month":"10","day":"22","id":"6dada552-482a-3081-a312-e28845e54a2d","created":"2021-09-01T08:02:18.175Z","accessed":"2021-09-01","file_attached":"true","profile_id":"48fc0258-023d-3602-860e-824092d62c56","group_id":"1ff583c0-be37-34fa-9c04-73c69437d354","last_modified":"2021-09-01T08:02:21.622Z","read":false,"starred":false,"authored":false,"confirmed":false,"hidden":false,"folder_uuids":"8d050117-e419-4b32-ad70-c875c74fa2b4","private_publication":false,"abstract":"While the Transformer architecture has become the de-facto standard for\nnatural language processing tasks, its applications to computer vision remain\nlimited. In vision, attention is either applied in conjunction with\nconvolutional networks, or used to replace certain components of convolutional\nnetworks while keeping their overall structure in place. We show that this\nreliance on CNNs is not necessary and a pure transformer applied directly to\nsequences of image patches can perform very well on image classification tasks.\nWhen pre-trained on large amounts of data and transferred to multiple mid-sized\nor small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision\nTransformer (ViT) attains excellent results compared to state-of-the-art\nconvolutional networks while requiring substantially fewer computational\nresources to train.","bibtype":"article","author":"Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil","bibtex":"@article{\n title = {An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale},\n type = {article},\n year = {2020},\n websites = {https://arxiv.org/abs/2010.11929v2},\n month = {10},\n day = {22},\n id = {6dada552-482a-3081-a312-e28845e54a2d},\n created = {2021-09-01T08:02:18.175Z},\n accessed = {2021-09-01},\n file_attached = {true},\n profile_id = {48fc0258-023d-3602-860e-824092d62c56},\n group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},\n last_modified = {2021-09-01T08:02:21.622Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n folder_uuids = {8d050117-e419-4b32-ad70-c875c74fa2b4},\n private_publication = {false},\n abstract = {While the Transformer architecture has become the de-facto standard for\nnatural language processing tasks, its applications to computer vision remain\nlimited. In vision, attention is either applied in conjunction with\nconvolutional networks, or used to replace certain components of convolutional\nnetworks while keeping their overall structure in place. We show that this\nreliance on CNNs is not necessary and a pure transformer applied directly to\nsequences of image patches can perform very well on image classification tasks.\nWhen pre-trained on large amounts of data and transferred to multiple mid-sized\nor small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision\nTransformer (ViT) attains excellent results compared to state-of-the-art\nconvolutional networks while requiring substantially fewer computational\nresources to train.},\n bibtype = {article},\n author = {Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil}\n}","author_short":["Dosovitskiy, A.","Beyer, L.","Kolesnikov, A.","Weissenborn, D.","Zhai, X.","Unterthiner, T.","Dehghani, M.","Minderer, M.","Heigold, G.","Gelly, S.","Uszkoreit, J.","Houlsby, N."],"urls":{"Paper":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c/file/4de167e7-3087-5745-0375-4a73f60777b2/full_text.pdf.pdf","Website":"https://arxiv.org/abs/2010.11929v2"},"biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibbaseid":"dosovitskiy-beyer-kolesnikov-weissenborn-zhai-unterthiner-dehghani-minderer-etal-animageisworth16x16wordstransformersforimagerecognitionatscale-2020","role":"author","metadata":{"authorlinks":{}}},"bibtype":"article","biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","dataSources":["rQFxZQs78YQJ9m34s","ya2CyA73rpZseyrZ8","gBvKD3NdQwvPCaD5C","2252seNhipfTmjEBQ"],"keywords":[],"search_terms":["image","worth","16x16","words","transformers","image","recognition","scale","dosovitskiy","beyer","kolesnikov","weissenborn","zhai","unterthiner","dehghani","minderer","heigold","gelly","uszkoreit","houlsby"],"title":"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale","year":2020}