Revisiting Self-Supervised Visual Representation Learning. Kolesnikov, A., Zhai, X., & Beyer, L. arXiv:1901.09005 [cs], January, 2019. arXiv: 1901.09005
Paper abstract bibtex Unsupervised visual representation learning remains a largely unsolved problem in computer vision research. Among a big body of recently proposed approaches for unsupervised learning of visual representations, a class of self-supervised techniques achieves superior performance on many challenging benchmarks. A large number of the pretext tasks for self-supervised learning have been studied, but other important aspects, such as the choice of convolutional neural networks (CNN), has not received equal attention. Therefore, we revisit numerous previously proposed self-supervised models, conduct a thorough large scale study and, as a result, uncover multiple crucial insights. We challenge a number of common practices in selfsupervised visual representation learning and observe that standard recipes for CNN design do not always translate to self-supervised representation learning. As part of our study, we drastically boost the performance of previously proposed techniques and outperform previously published state-of-the-art results by a large margin.
@article{kolesnikov_revisiting_2019,
title = {Revisiting {Self}-{Supervised} {Visual} {Representation} {Learning}},
url = {http://arxiv.org/abs/1901.09005},
abstract = {Unsupervised visual representation learning remains a largely unsolved problem in computer vision research. Among a big body of recently proposed approaches for unsupervised learning of visual representations, a class of self-supervised techniques achieves superior performance on many challenging benchmarks. A large number of the pretext tasks for self-supervised learning have been studied, but other important aspects, such as the choice of convolutional neural networks (CNN), has not received equal attention. Therefore, we revisit numerous previously proposed self-supervised models, conduct a thorough large scale study and, as a result, uncover multiple crucial insights. We challenge a number of common practices in selfsupervised visual representation learning and observe that standard recipes for CNN design do not always translate to self-supervised representation learning. As part of our study, we drastically boost the performance of previously proposed techniques and outperform previously published state-of-the-art results by a large margin.},
urldate = {2022-04-25},
journal = {arXiv:1901.09005 [cs]},
author = {Kolesnikov, Alexander and Zhai, Xiaohua and Beyer, Lucas},
month = jan,
year = {2019},
note = {arXiv: 1901.09005},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
}
Downloads: 0
{"_id":"m59zYBWAneffsJE6h","bibbaseid":"kolesnikov-zhai-beyer-revisitingselfsupervisedvisualrepresentationlearning-2019","author_short":["Kolesnikov, A.","Zhai, X.","Beyer, L."],"bibdata":{"bibtype":"article","type":"article","title":"Revisiting Self-Supervised Visual Representation Learning","url":"http://arxiv.org/abs/1901.09005","abstract":"Unsupervised visual representation learning remains a largely unsolved problem in computer vision research. Among a big body of recently proposed approaches for unsupervised learning of visual representations, a class of self-supervised techniques achieves superior performance on many challenging benchmarks. A large number of the pretext tasks for self-supervised learning have been studied, but other important aspects, such as the choice of convolutional neural networks (CNN), has not received equal attention. Therefore, we revisit numerous previously proposed self-supervised models, conduct a thorough large scale study and, as a result, uncover multiple crucial insights. We challenge a number of common practices in selfsupervised visual representation learning and observe that standard recipes for CNN design do not always translate to self-supervised representation learning. As part of our study, we drastically boost the performance of previously proposed techniques and outperform previously published state-of-the-art results by a large margin.","urldate":"2022-04-25","journal":"arXiv:1901.09005 [cs]","author":[{"propositions":[],"lastnames":["Kolesnikov"],"firstnames":["Alexander"],"suffixes":[]},{"propositions":[],"lastnames":["Zhai"],"firstnames":["Xiaohua"],"suffixes":[]},{"propositions":[],"lastnames":["Beyer"],"firstnames":["Lucas"],"suffixes":[]}],"month":"January","year":"2019","note":"arXiv: 1901.09005","keywords":"Computer Science - Computer Vision and Pattern Recognition","bibtex":"@article{kolesnikov_revisiting_2019,\n\ttitle = {Revisiting {Self}-{Supervised} {Visual} {Representation} {Learning}},\n\turl = {http://arxiv.org/abs/1901.09005},\n\tabstract = {Unsupervised visual representation learning remains a largely unsolved problem in computer vision research. Among a big body of recently proposed approaches for unsupervised learning of visual representations, a class of self-supervised techniques achieves superior performance on many challenging benchmarks. A large number of the pretext tasks for self-supervised learning have been studied, but other important aspects, such as the choice of convolutional neural networks (CNN), has not received equal attention. Therefore, we revisit numerous previously proposed self-supervised models, conduct a thorough large scale study and, as a result, uncover multiple crucial insights. We challenge a number of common practices in selfsupervised visual representation learning and observe that standard recipes for CNN design do not always translate to self-supervised representation learning. As part of our study, we drastically boost the performance of previously proposed techniques and outperform previously published state-of-the-art results by a large margin.},\n\turldate = {2022-04-25},\n\tjournal = {arXiv:1901.09005 [cs]},\n\tauthor = {Kolesnikov, Alexander and Zhai, Xiaohua and Beyer, Lucas},\n\tmonth = jan,\n\tyear = {2019},\n\tnote = {arXiv: 1901.09005},\n\tkeywords = {Computer Science - Computer Vision and Pattern Recognition},\n}\n\n\n\n","author_short":["Kolesnikov, A.","Zhai, X.","Beyer, L."],"key":"kolesnikov_revisiting_2019","id":"kolesnikov_revisiting_2019","bibbaseid":"kolesnikov-zhai-beyer-revisitingselfsupervisedvisualrepresentationlearning-2019","role":"author","urls":{"Paper":"http://arxiv.org/abs/1901.09005"},"keyword":["Computer Science - Computer Vision and Pattern Recognition"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"article","biburl":"https://bibbase.org/zotero/mh_lenguyen","dataSources":["iwKepCrWBps7ojhDx"],"keywords":["computer science - computer vision and pattern recognition"],"search_terms":["revisiting","self","supervised","visual","representation","learning","kolesnikov","zhai","beyer"],"title":"Revisiting Self-Supervised Visual Representation Learning","year":2019}