ImageNet classification with deep convolutional neural networks. Krizhevsky, A., Sutskever, I., & Hinton, G. E. Communications of the ACM, 60(6):84–90, May, 2017.
Paper doi abstract bibtex We trained a large, deep convolutional neural network to classify the 1.2 million high-resolution images in the ImageNet LSVRC-2010 contest into the 1000 different classes. On the test data, we achieved top-1 and top-5 error rates of 37.5% and 17.0% which is considerably better than the previous state-of-the-art. The neural network, which has 60 million parameters and 650,000 neurons, consists of five convolutional layers, some of which are followed by max-pooling layers, and three fully-connected layers with a final 1000-way softmax. To make training faster, we used non-saturating neurons and a very efficient GPU implementation of the convolution operation. To reduce overfitting in the fully-connected layers we employed a recently-developed regularization method called “dropout” that proved to be very effective. We also entered a variant of this model in the ILSVRC-2012 competition and achieved a winning top-5 test error rate of 15.3%, compared to 26.2% achieved by the second-best entry.
@article{krizhevsky_imagenet_2017,
title = {{ImageNet} classification with deep convolutional neural networks},
volume = {60},
issn = {0001-0782, 1557-7317},
url = {https://dl.acm.org/doi/10.1145/3065386},
doi = {10.1145/3065386},
abstract = {We trained a large, deep convolutional neural network to classify the 1.2 million high-resolution images in the ImageNet LSVRC-2010 contest into the 1000 different classes. On the test data, we achieved top-1 and top-5 error rates of 37.5\% and 17.0\% which is considerably better than the previous state-of-the-art. The neural network, which has 60 million parameters and 650,000 neurons, consists of five convolutional layers, some of which are followed by max-pooling layers, and three fully-connected layers with a final 1000-way softmax. To make training faster, we used non-saturating neurons and a very efficient GPU implementation of the convolution operation. To reduce overfitting in the fully-connected layers we employed a recently-developed regularization method called “dropout” that proved to be very effective. We also entered a variant of this model in the ILSVRC-2012 competition and achieved a winning top-5 test error rate of 15.3\%, compared to 26.2\% achieved by the second-best entry.},
language = {en},
number = {6},
urldate = {2022-03-02},
journal = {Communications of the ACM},
author = {Krizhevsky, Alex and Sutskever, Ilya and Hinton, Geoffrey E.},
month = may,
year = {2017},
pages = {84--90},
}
Downloads: 0
{"_id":"pk7YxjtWqGvpxodyr","bibbaseid":"krizhevsky-sutskever-hinton-imagenetclassificationwithdeepconvolutionalneuralnetworks-2017","downloads":0,"creationDate":"2019-03-11T15:34:35.445Z","title":"ImageNet classification with deep convolutional neural networks","author_short":["Krizhevsky, A.","Sutskever, I.","Hinton, G. E."],"year":2017,"bibtype":"article","biburl":"https://bibbase.org/zotero/mxmplx","bibdata":{"bibtype":"article","type":"article","title":"ImageNet classification with deep convolutional neural networks","volume":"60","issn":"0001-0782, 1557-7317","url":"https://dl.acm.org/doi/10.1145/3065386","doi":"10.1145/3065386","abstract":"We trained a large, deep convolutional neural network to classify the 1.2 million high-resolution images in the ImageNet LSVRC-2010 contest into the 1000 different classes. On the test data, we achieved top-1 and top-5 error rates of 37.5% and 17.0% which is considerably better than the previous state-of-the-art. The neural network, which has 60 million parameters and 650,000 neurons, consists of five convolutional layers, some of which are followed by max-pooling layers, and three fully-connected layers with a final 1000-way softmax. To make training faster, we used non-saturating neurons and a very efficient GPU implementation of the convolution operation. To reduce overfitting in the fully-connected layers we employed a recently-developed regularization method called “dropout” that proved to be very effective. We also entered a variant of this model in the ILSVRC-2012 competition and achieved a winning top-5 test error rate of 15.3%, compared to 26.2% achieved by the second-best entry.","language":"en","number":"6","urldate":"2022-03-02","journal":"Communications of the ACM","author":[{"propositions":[],"lastnames":["Krizhevsky"],"firstnames":["Alex"],"suffixes":[]},{"propositions":[],"lastnames":["Sutskever"],"firstnames":["Ilya"],"suffixes":[]},{"propositions":[],"lastnames":["Hinton"],"firstnames":["Geoffrey","E."],"suffixes":[]}],"month":"May","year":"2017","pages":"84–90","bibtex":"@article{krizhevsky_imagenet_2017,\n\ttitle = {{ImageNet} classification with deep convolutional neural networks},\n\tvolume = {60},\n\tissn = {0001-0782, 1557-7317},\n\turl = {https://dl.acm.org/doi/10.1145/3065386},\n\tdoi = {10.1145/3065386},\n\tabstract = {We trained a large, deep convolutional neural network to classify the 1.2 million high-resolution images in the ImageNet LSVRC-2010 contest into the 1000 different classes. On the test data, we achieved top-1 and top-5 error rates of 37.5\\% and 17.0\\% which is considerably better than the previous state-of-the-art. The neural network, which has 60 million parameters and 650,000 neurons, consists of five convolutional layers, some of which are followed by max-pooling layers, and three fully-connected layers with a final 1000-way softmax. To make training faster, we used non-saturating neurons and a very efficient GPU implementation of the convolution operation. To reduce overfitting in the fully-connected layers we employed a recently-developed regularization method called “dropout” that proved to be very effective. We also entered a variant of this model in the ILSVRC-2012 competition and achieved a winning top-5 test error rate of 15.3\\%, compared to 26.2\\% achieved by the second-best entry.},\n\tlanguage = {en},\n\tnumber = {6},\n\turldate = {2022-03-02},\n\tjournal = {Communications of the ACM},\n\tauthor = {Krizhevsky, Alex and Sutskever, Ilya and Hinton, Geoffrey E.},\n\tmonth = may,\n\tyear = {2017},\n\tpages = {84--90},\n}\n\n","author_short":["Krizhevsky, A.","Sutskever, I.","Hinton, G. E."],"key":"krizhevsky_imagenet_2017","id":"krizhevsky_imagenet_2017","bibbaseid":"krizhevsky-sutskever-hinton-imagenetclassificationwithdeepconvolutionalneuralnetworks-2017","role":"author","urls":{"Paper":"https://dl.acm.org/doi/10.1145/3065386"},"metadata":{"authorlinks":{}},"html":""},"search_terms":["imagenet","classification","deep","convolutional","neural","networks","krizhevsky","sutskever","hinton"],"keywords":[],"authorIDs":["gN5Lfqjgx8P4c7HJT"],"dataSources":["9qjpnLCP4efAcKjDr","6yqkfgG5XRop3rtQ6","gBvKD3NdQwvPCaD5C","aXmRAq63YsH7a3ufx"]}