One weird trick for parallelizing convolutional neural networks. Krizhevsky, A. April, 2014. arXiv:1404.5997 [cs]
Paper abstract bibtex I present a new way to parallelize the training of convolutional neural networks across multiple GPUs. The method scales significantly better than all alternatives when applied to modern convolutional neural networks.
@misc{krizhevsky_one_2014,
title = {One weird trick for parallelizing convolutional neural networks},
url = {http://arxiv.org/abs/1404.5997},
abstract = {I present a new way to parallelize the training of convolutional neural networks across multiple GPUs. The method scales significantly better than all alternatives when applied to modern convolutional neural networks.},
language = {en},
urldate = {2024-03-23},
publisher = {arXiv},
author = {Krizhevsky, Alex},
month = apr,
year = {2014},
note = {arXiv:1404.5997 [cs]},
keywords = {Computer Science - Distributed, Parallel, and Cluster Computing, Computer Science - Machine Learning, Computer Science - Neural and Evolutionary Computing},
}
Downloads: 0
{"_id":"vw4TDMjKFCoHdhvqH","bibbaseid":"krizhevsky-oneweirdtrickforparallelizingconvolutionalneuralnetworks-2014","downloads":0,"creationDate":"2015-09-03T07:18:47.352Z","title":"One weird trick for parallelizing convolutional neural networks","author_short":["Krizhevsky, A."],"year":2014,"bibtype":"misc","biburl":"https://bibbase.org/zotero/saurabhr","bibdata":{"bibtype":"misc","type":"misc","title":"One weird trick for parallelizing convolutional neural networks","url":"http://arxiv.org/abs/1404.5997","abstract":"I present a new way to parallelize the training of convolutional neural networks across multiple GPUs. The method scales significantly better than all alternatives when applied to modern convolutional neural networks.","language":"en","urldate":"2024-03-23","publisher":"arXiv","author":[{"propositions":[],"lastnames":["Krizhevsky"],"firstnames":["Alex"],"suffixes":[]}],"month":"April","year":"2014","note":"arXiv:1404.5997 [cs]","keywords":"Computer Science - Distributed, Parallel, and Cluster Computing, Computer Science - Machine Learning, Computer Science - Neural and Evolutionary Computing","bibtex":"@misc{krizhevsky_one_2014,\n\ttitle = {One weird trick for parallelizing convolutional neural networks},\n\turl = {http://arxiv.org/abs/1404.5997},\n\tabstract = {I present a new way to parallelize the training of convolutional neural networks across multiple GPUs. The method scales significantly better than all alternatives when applied to modern convolutional neural networks.},\n\tlanguage = {en},\n\turldate = {2024-03-23},\n\tpublisher = {arXiv},\n\tauthor = {Krizhevsky, Alex},\n\tmonth = apr,\n\tyear = {2014},\n\tnote = {arXiv:1404.5997 [cs]},\n\tkeywords = {Computer Science - Distributed, Parallel, and Cluster Computing, Computer Science - Machine Learning, Computer Science - Neural and Evolutionary Computing},\n}\n\n\n\n\n\n\n\n","author_short":["Krizhevsky, A."],"key":"krizhevsky_one_2014","id":"krizhevsky_one_2014","bibbaseid":"krizhevsky-oneweirdtrickforparallelizingconvolutionalneuralnetworks-2014","role":"author","urls":{"Paper":"http://arxiv.org/abs/1404.5997"},"keyword":["Computer Science - Distributed","Parallel","and Cluster Computing","Computer Science - Machine Learning","Computer Science - Neural and Evolutionary Computing"],"metadata":{"authorlinks":{}},"downloads":0,"html":""},"search_terms":["one","weird","trick","parallelizing","convolutional","neural","networks","krizhevsky"],"keywords":["computer science - distributed","parallel","and cluster computing","computer science - machine learning","computer science - neural and evolutionary computing"],"authorIDs":[],"dataSources":["rsreeGengKHQY7NKF","nxjWwW7fWbb5tfpKz"]}