EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks. Tan, M. & Le, Q., V. 36th International Conference on Machine Learning, ICML 2019, 2019-June:10691-10700, International Machine Learning Society (IMLS), 5, 2019.
EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks [link]Website  abstract   bibtex   
Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are available. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on scaling up MobileNets and ResNet. To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3% top-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet. Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters. Source code is at https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet.
@article{
 title = {EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks},
 type = {article},
 year = {2019},
 pages = {10691-10700},
 volume = {2019-June},
 websites = {https://arxiv.org/abs/1905.11946v5},
 month = {5},
 publisher = {International Machine Learning Society (IMLS)},
 day = {28},
 id = {e7021e32-12a2-36c9-a44e-524a315cb308},
 created = {2023-04-06T08:06:56.701Z},
 accessed = {2023-04-06},
 file_attached = {true},
 profile_id = {f1f70cad-e32d-3de2-a3c0-be1736cb88be},
 group_id = {5ec9cc91-a5d6-3de5-82f3-3ef3d98a89c1},
 last_modified = {2023-04-11T08:22:41.755Z},
 read = {false},
 starred = {false},
 authored = {false},
 confirmed = {false},
 hidden = {false},
 folder_uuids = {5a010301-acb6-4642-a6b2-8afaee1b741c,bd3c6f2e-3514-47cf-bc42-12db8b9abe45},
 private_publication = {false},
 abstract = {Convolutional Neural Networks (ConvNets) are commonly developed at a fixed
resource budget, and then scaled up for better accuracy if more resources are
available. In this paper, we systematically study model scaling and identify
that carefully balancing network depth, width, and resolution can lead to
better performance. Based on this observation, we propose a new scaling method
that uniformly scales all dimensions of depth/width/resolution using a simple
yet highly effective compound coefficient. We demonstrate the effectiveness of
this method on scaling up MobileNets and ResNet. To go even further, we use neural architecture search to design a new
baseline network and scale it up to obtain a family of models, called
EfficientNets, which achieve much better accuracy and efficiency than previous
ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3%
top-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on
inference than the best existing ConvNet. Our EfficientNets also transfer well
and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%),
and 3 other transfer learning datasets, with an order of magnitude fewer
parameters. Source code is at
https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet.},
 bibtype = {article},
 author = {Tan, Mingxing and Le, Quoc V.},
 journal = {36th International Conference on Machine Learning, ICML 2019}
}

Downloads: 0