Enhancing batch normalized convolutional networks using displaced rectifier linear units: A systematic comparative study. Macêdo, D., Zanchettin, C., Oliveira, A., & Ludermir, T. Expert Systems with Applications, 2019. doi abstract bibtex © 2019 Elsevier Ltd A substantial number of expert and intelligent systems rely on deep learning methods to solve problems in areas such as economics, physics, and medicine. Improving the accuracy of the activation functions used by such methods can directly and positively impact the overall performance and quality of the mentioned systems at no cost whatsoever. In this sense, enhancing the design of such theoretical fundamental blocks is of great significance as it immediately impacts a broad range of current and future real-world deep learning based applications. Therefore, in this paper, we turn our attention to the interworking between the activation functions and the batch normalization, which is practically a mandatory technique to train deep networks currently. We propose the activation function Displaced Rectifier Linear Unit (DReLU) by conjecturing that extending the identity function of ReLU to the third quadrant enhances compatibility with batch normalization. Moreover, we used statistical tests to compare the impact of using distinct activation functions (ReLU, LReLU, PReLU, ELU, and DReLU) on the learning speed and test accuracy performance of standardized VGG and Residual Networks state-of-the-art models. These Convolutional Neural Networks were trained on CIFAR-100 and CIFAR-10, the most commonly used deep learning computer vision datasets. The results showed DReLU speeded up learning in all models and datasets. Besides, statistical significant performance assessments (p < 0.05) showed DReLU enhanced the test accuracy presented by ReLU in all scenarios. Furthermore, DReLU showed better test accuracy than any other tested activation function in all experiments with one exception, in which case it presented the second best performance. Therefore, this work demonstrates that it is possible to increase performance replacing ReLU by an enhanced activation function.
@article{
title = {Enhancing batch normalized convolutional networks using displaced rectifier linear units: A systematic comparative study},
type = {article},
year = {2019},
keywords = {Activation function,Batch normalization,Comparative study,Convolutional Neural Networks,DReLU,Deep learning},
volume = {124},
id = {b0ea06ba-8502-3e85-a7d5-dca22c78ab5f},
created = {2019-02-14T18:02:01.289Z},
file_attached = {false},
profile_id = {74e7d4ea-3dac-3118-aab9-511a5b337e8f},
last_modified = {2019-02-14T18:02:01.289Z},
read = {false},
starred = {false},
authored = {true},
confirmed = {false},
hidden = {false},
private_publication = {false},
abstract = {© 2019 Elsevier Ltd A substantial number of expert and intelligent systems rely on deep learning methods to solve problems in areas such as economics, physics, and medicine. Improving the accuracy of the activation functions used by such methods can directly and positively impact the overall performance and quality of the mentioned systems at no cost whatsoever. In this sense, enhancing the design of such theoretical fundamental blocks is of great significance as it immediately impacts a broad range of current and future real-world deep learning based applications. Therefore, in this paper, we turn our attention to the interworking between the activation functions and the batch normalization, which is practically a mandatory technique to train deep networks currently. We propose the activation function Displaced Rectifier Linear Unit (DReLU) by conjecturing that extending the identity function of ReLU to the third quadrant enhances compatibility with batch normalization. Moreover, we used statistical tests to compare the impact of using distinct activation functions (ReLU, LReLU, PReLU, ELU, and DReLU) on the learning speed and test accuracy performance of standardized VGG and Residual Networks state-of-the-art models. These Convolutional Neural Networks were trained on CIFAR-100 and CIFAR-10, the most commonly used deep learning computer vision datasets. The results showed DReLU speeded up learning in all models and datasets. Besides, statistical significant performance assessments (p < 0.05) showed DReLU enhanced the test accuracy presented by ReLU in all scenarios. Furthermore, DReLU showed better test accuracy than any other tested activation function in all experiments with one exception, in which case it presented the second best performance. Therefore, this work demonstrates that it is possible to increase performance replacing ReLU by an enhanced activation function.},
bibtype = {article},
author = {Macêdo, D. and Zanchettin, C. and Oliveira, A.L.I. and Ludermir, T.},
doi = {10.1016/j.eswa.2019.01.066},
journal = {Expert Systems with Applications}
}
Downloads: 0
{"_id":"GueZFxJH9kjcjuKF9","bibbaseid":"macdo-zanchettin-oliveira-ludermir-enhancingbatchnormalizedconvolutionalnetworksusingdisplacedrectifierlinearunitsasystematiccomparativestudy-2019","authorIDs":["95PhW7tkuv95vtHAq"],"author_short":["Macêdo, D.","Zanchettin, C.","Oliveira, A.","Ludermir, T."],"bibdata":{"title":"Enhancing batch normalized convolutional networks using displaced rectifier linear units: A systematic comparative study","type":"article","year":"2019","keywords":"Activation function,Batch normalization,Comparative study,Convolutional Neural Networks,DReLU,Deep learning","volume":"124","id":"b0ea06ba-8502-3e85-a7d5-dca22c78ab5f","created":"2019-02-14T18:02:01.289Z","file_attached":false,"profile_id":"74e7d4ea-3dac-3118-aab9-511a5b337e8f","last_modified":"2019-02-14T18:02:01.289Z","read":false,"starred":false,"authored":"true","confirmed":false,"hidden":false,"private_publication":false,"abstract":"© 2019 Elsevier Ltd A substantial number of expert and intelligent systems rely on deep learning methods to solve problems in areas such as economics, physics, and medicine. Improving the accuracy of the activation functions used by such methods can directly and positively impact the overall performance and quality of the mentioned systems at no cost whatsoever. In this sense, enhancing the design of such theoretical fundamental blocks is of great significance as it immediately impacts a broad range of current and future real-world deep learning based applications. Therefore, in this paper, we turn our attention to the interworking between the activation functions and the batch normalization, which is practically a mandatory technique to train deep networks currently. We propose the activation function Displaced Rectifier Linear Unit (DReLU) by conjecturing that extending the identity function of ReLU to the third quadrant enhances compatibility with batch normalization. Moreover, we used statistical tests to compare the impact of using distinct activation functions (ReLU, LReLU, PReLU, ELU, and DReLU) on the learning speed and test accuracy performance of standardized VGG and Residual Networks state-of-the-art models. These Convolutional Neural Networks were trained on CIFAR-100 and CIFAR-10, the most commonly used deep learning computer vision datasets. The results showed DReLU speeded up learning in all models and datasets. Besides, statistical significant performance assessments (p < 0.05) showed DReLU enhanced the test accuracy presented by ReLU in all scenarios. Furthermore, DReLU showed better test accuracy than any other tested activation function in all experiments with one exception, in which case it presented the second best performance. Therefore, this work demonstrates that it is possible to increase performance replacing ReLU by an enhanced activation function.","bibtype":"article","author":"Macêdo, D. and Zanchettin, C. and Oliveira, A.L.I. and Ludermir, T.","doi":"10.1016/j.eswa.2019.01.066","journal":"Expert Systems with Applications","bibtex":"@article{\n title = {Enhancing batch normalized convolutional networks using displaced rectifier linear units: A systematic comparative study},\n type = {article},\n year = {2019},\n keywords = {Activation function,Batch normalization,Comparative study,Convolutional Neural Networks,DReLU,Deep learning},\n volume = {124},\n id = {b0ea06ba-8502-3e85-a7d5-dca22c78ab5f},\n created = {2019-02-14T18:02:01.289Z},\n file_attached = {false},\n profile_id = {74e7d4ea-3dac-3118-aab9-511a5b337e8f},\n last_modified = {2019-02-14T18:02:01.289Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {© 2019 Elsevier Ltd A substantial number of expert and intelligent systems rely on deep learning methods to solve problems in areas such as economics, physics, and medicine. Improving the accuracy of the activation functions used by such methods can directly and positively impact the overall performance and quality of the mentioned systems at no cost whatsoever. In this sense, enhancing the design of such theoretical fundamental blocks is of great significance as it immediately impacts a broad range of current and future real-world deep learning based applications. Therefore, in this paper, we turn our attention to the interworking between the activation functions and the batch normalization, which is practically a mandatory technique to train deep networks currently. We propose the activation function Displaced Rectifier Linear Unit (DReLU) by conjecturing that extending the identity function of ReLU to the third quadrant enhances compatibility with batch normalization. Moreover, we used statistical tests to compare the impact of using distinct activation functions (ReLU, LReLU, PReLU, ELU, and DReLU) on the learning speed and test accuracy performance of standardized VGG and Residual Networks state-of-the-art models. These Convolutional Neural Networks were trained on CIFAR-100 and CIFAR-10, the most commonly used deep learning computer vision datasets. The results showed DReLU speeded up learning in all models and datasets. Besides, statistical significant performance assessments (p < 0.05) showed DReLU enhanced the test accuracy presented by ReLU in all scenarios. Furthermore, DReLU showed better test accuracy than any other tested activation function in all experiments with one exception, in which case it presented the second best performance. Therefore, this work demonstrates that it is possible to increase performance replacing ReLU by an enhanced activation function.},\n bibtype = {article},\n author = {Macêdo, D. and Zanchettin, C. and Oliveira, A.L.I. and Ludermir, T.},\n doi = {10.1016/j.eswa.2019.01.066},\n journal = {Expert Systems with Applications}\n}","author_short":["Macêdo, D.","Zanchettin, C.","Oliveira, A.","Ludermir, T."],"biburl":"https://bibbase.org/service/mendeley/74e7d4ea-3dac-3118-aab9-511a5b337e8f","bibbaseid":"macdo-zanchettin-oliveira-ludermir-enhancingbatchnormalizedconvolutionalnetworksusingdisplacedrectifierlinearunitsasystematiccomparativestudy-2019","role":"author","urls":{},"keyword":["Activation function","Batch normalization","Comparative study","Convolutional Neural Networks","DReLU","Deep learning"],"metadata":{"authorlinks":{"zanchettin, c":"https://bibbase.org/service/mendeley/74e7d4ea-3dac-3118-aab9-511a5b337e8f"}},"downloads":0},"bibtype":"article","creationDate":"2020-09-17T14:33:46.202Z","downloads":0,"keywords":["activation function","batch normalization","comparative study","convolutional neural networks","drelu","deep learning"],"search_terms":["enhancing","batch","normalized","convolutional","networks","using","displaced","rectifier","linear","units","systematic","comparative","study","macêdo","zanchettin","oliveira","ludermir"],"title":"Enhancing batch normalized convolutional networks using displaced rectifier linear units: A systematic comparative study","year":2019,"biburl":"https://bibbase.org/service/mendeley/74e7d4ea-3dac-3118-aab9-511a5b337e8f","dataSources":["XkGKCoQgZDKqXZqdh","ya2CyA73rpZseyrZ8","2252seNhipfTmjEBQ"]}