EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks. Tan, M. & Le, Q., V. 36th International Conference on Machine Learning, ICML 2019, 2019-June:10691-10700, International Machine Learning Society (IMLS), 5, 2019. Paper Website abstract bibtex Convolutional Neural Networks (ConvNets) are commonly developed at a fixed
resource budget, and then scaled up for better accuracy if more resources are
available. In this paper, we systematically study model scaling and identify
that carefully balancing network depth, width, and resolution can lead to
better performance. Based on this observation, we propose a new scaling method
that uniformly scales all dimensions of depth/width/resolution using a simple
yet highly effective compound coefficient. We demonstrate the effectiveness of
this method on scaling up MobileNets and ResNet. To go even further, we use neural architecture search to design a new
baseline network and scale it up to obtain a family of models, called
EfficientNets, which achieve much better accuracy and efficiency than previous
ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3%
top-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on
inference than the best existing ConvNet. Our EfficientNets also transfer well
and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%),
and 3 other transfer learning datasets, with an order of magnitude fewer
parameters. Source code is at
https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet.
@article{
title = {EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks},
type = {article},
year = {2019},
pages = {10691-10700},
volume = {2019-June},
websites = {https://arxiv.org/abs/1905.11946v5},
month = {5},
publisher = {International Machine Learning Society (IMLS)},
day = {28},
id = {e7021e32-12a2-36c9-a44e-524a315cb308},
created = {2023-04-06T08:06:56.701Z},
accessed = {2023-04-06},
file_attached = {true},
profile_id = {f1f70cad-e32d-3de2-a3c0-be1736cb88be},
group_id = {5ec9cc91-a5d6-3de5-82f3-3ef3d98a89c1},
last_modified = {2023-04-11T08:22:41.755Z},
read = {false},
starred = {false},
authored = {false},
confirmed = {false},
hidden = {false},
folder_uuids = {5a010301-acb6-4642-a6b2-8afaee1b741c,bd3c6f2e-3514-47cf-bc42-12db8b9abe45},
private_publication = {false},
abstract = {Convolutional Neural Networks (ConvNets) are commonly developed at a fixed
resource budget, and then scaled up for better accuracy if more resources are
available. In this paper, we systematically study model scaling and identify
that carefully balancing network depth, width, and resolution can lead to
better performance. Based on this observation, we propose a new scaling method
that uniformly scales all dimensions of depth/width/resolution using a simple
yet highly effective compound coefficient. We demonstrate the effectiveness of
this method on scaling up MobileNets and ResNet. To go even further, we use neural architecture search to design a new
baseline network and scale it up to obtain a family of models, called
EfficientNets, which achieve much better accuracy and efficiency than previous
ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3%
top-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on
inference than the best existing ConvNet. Our EfficientNets also transfer well
and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%),
and 3 other transfer learning datasets, with an order of magnitude fewer
parameters. Source code is at
https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet.},
bibtype = {article},
author = {Tan, Mingxing and Le, Quoc V.},
journal = {36th International Conference on Machine Learning, ICML 2019}
}
Downloads: 0
{"_id":"cezCvDyJFpoSEoBGq","bibbaseid":"tan-le-efficientnetrethinkingmodelscalingforconvolutionalneuralnetworks-2019","author_short":["Tan, M.","Le, Q., V."],"bibdata":{"title":"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks","type":"article","year":"2019","pages":"10691-10700","volume":"2019-June","websites":"https://arxiv.org/abs/1905.11946v5","month":"5","publisher":"International Machine Learning Society (IMLS)","day":"28","id":"e7021e32-12a2-36c9-a44e-524a315cb308","created":"2023-04-06T08:06:56.701Z","accessed":"2023-04-06","file_attached":"true","profile_id":"f1f70cad-e32d-3de2-a3c0-be1736cb88be","group_id":"5ec9cc91-a5d6-3de5-82f3-3ef3d98a89c1","last_modified":"2023-04-11T08:22:41.755Z","read":false,"starred":false,"authored":false,"confirmed":false,"hidden":false,"folder_uuids":"5a010301-acb6-4642-a6b2-8afaee1b741c,bd3c6f2e-3514-47cf-bc42-12db8b9abe45","private_publication":false,"abstract":"Convolutional Neural Networks (ConvNets) are commonly developed at a fixed\nresource budget, and then scaled up for better accuracy if more resources are\navailable. In this paper, we systematically study model scaling and identify\nthat carefully balancing network depth, width, and resolution can lead to\nbetter performance. Based on this observation, we propose a new scaling method\nthat uniformly scales all dimensions of depth/width/resolution using a simple\nyet highly effective compound coefficient. We demonstrate the effectiveness of\nthis method on scaling up MobileNets and ResNet. To go even further, we use neural architecture search to design a new\nbaseline network and scale it up to obtain a family of models, called\nEfficientNets, which achieve much better accuracy and efficiency than previous\nConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3%\ntop-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on\ninference than the best existing ConvNet. Our EfficientNets also transfer well\nand achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%),\nand 3 other transfer learning datasets, with an order of magnitude fewer\nparameters. Source code is at\nhttps://github.com/tensorflow/tpu/tree/master/models/official/efficientnet.","bibtype":"article","author":"Tan, Mingxing and Le, Quoc V.","journal":"36th International Conference on Machine Learning, ICML 2019","bibtex":"@article{\n title = {EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks},\n type = {article},\n year = {2019},\n pages = {10691-10700},\n volume = {2019-June},\n websites = {https://arxiv.org/abs/1905.11946v5},\n month = {5},\n publisher = {International Machine Learning Society (IMLS)},\n day = {28},\n id = {e7021e32-12a2-36c9-a44e-524a315cb308},\n created = {2023-04-06T08:06:56.701Z},\n accessed = {2023-04-06},\n file_attached = {true},\n profile_id = {f1f70cad-e32d-3de2-a3c0-be1736cb88be},\n group_id = {5ec9cc91-a5d6-3de5-82f3-3ef3d98a89c1},\n last_modified = {2023-04-11T08:22:41.755Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n folder_uuids = {5a010301-acb6-4642-a6b2-8afaee1b741c,bd3c6f2e-3514-47cf-bc42-12db8b9abe45},\n private_publication = {false},\n abstract = {Convolutional Neural Networks (ConvNets) are commonly developed at a fixed\nresource budget, and then scaled up for better accuracy if more resources are\navailable. In this paper, we systematically study model scaling and identify\nthat carefully balancing network depth, width, and resolution can lead to\nbetter performance. Based on this observation, we propose a new scaling method\nthat uniformly scales all dimensions of depth/width/resolution using a simple\nyet highly effective compound coefficient. We demonstrate the effectiveness of\nthis method on scaling up MobileNets and ResNet. To go even further, we use neural architecture search to design a new\nbaseline network and scale it up to obtain a family of models, called\nEfficientNets, which achieve much better accuracy and efficiency than previous\nConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3%\ntop-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on\ninference than the best existing ConvNet. Our EfficientNets also transfer well\nand achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%),\nand 3 other transfer learning datasets, with an order of magnitude fewer\nparameters. Source code is at\nhttps://github.com/tensorflow/tpu/tree/master/models/official/efficientnet.},\n bibtype = {article},\n author = {Tan, Mingxing and Le, Quoc V.},\n journal = {36th International Conference on Machine Learning, ICML 2019}\n}","author_short":["Tan, M.","Le, Q., V."],"urls":{"Paper":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c/file/6fc7e5fb-a1b6-0191-2042-25af6c8d2ade/full_text.pdf.pdf","Website":"https://arxiv.org/abs/1905.11946v5"},"biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibbaseid":"tan-le-efficientnetrethinkingmodelscalingforconvolutionalneuralnetworks-2019","role":"author","metadata":{"authorlinks":{}}},"bibtype":"article","biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","dataSources":["gBvKD3NdQwvPCaD5C","ya2CyA73rpZseyrZ8","5kE2CnMg6u7MRd5WC","Bh4soD3jyHYoSp98p","2252seNhipfTmjEBQ","nZHrFJKyxKKDaWYM8"],"keywords":[],"search_terms":["efficientnet","rethinking","model","scaling","convolutional","neural","networks","tan","le"],"title":"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks","year":2019}