Understanding the difficulty of training deep feedforward neural networks. Glorot, X. & Bengio, Y. In Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics, pages 249-256, 3, 2010. JMLR Workshop and Conference Proceedings. Paper Website abstract bibtex Whereas before 2006 it appears that deep multi-layer neural networks were not successfully trained, since then several algorithms have been shown to successfully train them, with experimental results showing the superiority of deeper vs less deep architectures. All these experimental results were obtained with new initialization or training mechanisms. Our objective here is to understand better why standard gradient descent from random initialization is doing so poorly with deep neural networks, to better understand these recent relative successes and help design better algorithms in the future. We first observe the influence of the non-linear activations functions. We find that the logistic sigmoid activation is unsuited for deep networks with random initialization because of its mean value, which can drive especially the top hidden layer into saturation. Surprisingly, we find that saturated units can move out of saturation by themselves, albeit slowly, and explaining the plateaus sometimes seen when training neural networks. We find that a new non-linearity that saturates less can often be beneficial. Finally, we study how activations and gradients vary across layers and during training, with the idea that training may be more difficult when the singular values of the Jacobian associated with each layer are far from 1. Based on these considerations, we propose a new initialization scheme that brings substantially faster convergence.
@inproceedings{
title = {Understanding the difficulty of training deep feedforward neural networks},
type = {inproceedings},
year = {2010},
pages = {249-256},
websites = {https://proceedings.mlr.press/v9/glorot10a.html},
month = {3},
publisher = {JMLR Workshop and Conference Proceedings},
id = {02bc1d82-6b45-3c3b-a194-7177f73d39ec},
created = {2022-03-28T09:45:06.490Z},
accessed = {2022-03-26},
file_attached = {true},
profile_id = {235249c2-3ed4-314a-b309-b1ea0330f5d9},
group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},
last_modified = {2022-03-29T07:59:37.649Z},
read = {false},
starred = {false},
authored = {false},
confirmed = {true},
hidden = {false},
citation_key = {glorotUnderstandingDifficultyTraining2010},
source_type = {inproceedings},
notes = {ISSN: 1938-7228},
private_publication = {false},
abstract = {Whereas before 2006 it appears that deep multi-layer neural networks were not successfully trained, since then several algorithms have been shown to successfully train them, with experimental results showing the superiority of deeper vs less deep architectures. All these experimental results were obtained with new initialization or training mechanisms. Our objective here is to understand better why standard gradient descent from random initialization is doing so poorly with deep neural networks, to better understand these recent relative successes and help design better algorithms in the future. We first observe the influence of the non-linear activations functions. We find that the logistic sigmoid activation is unsuited for deep networks with random initialization because of its mean value, which can drive especially the top hidden layer into saturation. Surprisingly, we find that saturated units can move out of saturation by themselves, albeit slowly, and explaining the plateaus sometimes seen when training neural networks. We find that a new non-linearity that saturates less can often be beneficial. Finally, we study how activations and gradients vary across layers and during training, with the idea that training may be more difficult when the singular values of the Jacobian associated with each layer are far from 1. Based on these considerations, we propose a new initialization scheme that brings substantially faster convergence.},
bibtype = {inproceedings},
author = {Glorot, Xavier and Bengio, Yoshua},
booktitle = {Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics}
}
Downloads: 0
{"_id":"nvHzs3Yj4TS9jrWeG","bibbaseid":"glorot-bengio-understandingthedifficultyoftrainingdeepfeedforwardneuralnetworks-2010","author_short":["Glorot, X.","Bengio, Y."],"bibdata":{"title":"Understanding the difficulty of training deep feedforward neural networks","type":"inproceedings","year":"2010","pages":"249-256","websites":"https://proceedings.mlr.press/v9/glorot10a.html","month":"3","publisher":"JMLR Workshop and Conference Proceedings","id":"02bc1d82-6b45-3c3b-a194-7177f73d39ec","created":"2022-03-28T09:45:06.490Z","accessed":"2022-03-26","file_attached":"true","profile_id":"235249c2-3ed4-314a-b309-b1ea0330f5d9","group_id":"1ff583c0-be37-34fa-9c04-73c69437d354","last_modified":"2022-03-29T07:59:37.649Z","read":false,"starred":false,"authored":false,"confirmed":"true","hidden":false,"citation_key":"glorotUnderstandingDifficultyTraining2010","source_type":"inproceedings","notes":"ISSN: 1938-7228","private_publication":false,"abstract":"Whereas before 2006 it appears that deep multi-layer neural networks were not successfully trained, since then several algorithms have been shown to successfully train them, with experimental results showing the superiority of deeper vs less deep architectures. All these experimental results were obtained with new initialization or training mechanisms. Our objective here is to understand better why standard gradient descent from random initialization is doing so poorly with deep neural networks, to better understand these recent relative successes and help design better algorithms in the future. We first observe the influence of the non-linear activations functions. We find that the logistic sigmoid activation is unsuited for deep networks with random initialization because of its mean value, which can drive especially the top hidden layer into saturation. Surprisingly, we find that saturated units can move out of saturation by themselves, albeit slowly, and explaining the plateaus sometimes seen when training neural networks. We find that a new non-linearity that saturates less can often be beneficial. Finally, we study how activations and gradients vary across layers and during training, with the idea that training may be more difficult when the singular values of the Jacobian associated with each layer are far from 1. Based on these considerations, we propose a new initialization scheme that brings substantially faster convergence.","bibtype":"inproceedings","author":"Glorot, Xavier and Bengio, Yoshua","booktitle":"Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics","bibtex":"@inproceedings{\n title = {Understanding the difficulty of training deep feedforward neural networks},\n type = {inproceedings},\n year = {2010},\n pages = {249-256},\n websites = {https://proceedings.mlr.press/v9/glorot10a.html},\n month = {3},\n publisher = {JMLR Workshop and Conference Proceedings},\n id = {02bc1d82-6b45-3c3b-a194-7177f73d39ec},\n created = {2022-03-28T09:45:06.490Z},\n accessed = {2022-03-26},\n file_attached = {true},\n profile_id = {235249c2-3ed4-314a-b309-b1ea0330f5d9},\n group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},\n last_modified = {2022-03-29T07:59:37.649Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {glorotUnderstandingDifficultyTraining2010},\n source_type = {inproceedings},\n notes = {ISSN: 1938-7228},\n private_publication = {false},\n abstract = {Whereas before 2006 it appears that deep multi-layer neural networks were not successfully trained, since then several algorithms have been shown to successfully train them, with experimental results showing the superiority of deeper vs less deep architectures. All these experimental results were obtained with new initialization or training mechanisms. Our objective here is to understand better why standard gradient descent from random initialization is doing so poorly with deep neural networks, to better understand these recent relative successes and help design better algorithms in the future. We first observe the influence of the non-linear activations functions. We find that the logistic sigmoid activation is unsuited for deep networks with random initialization because of its mean value, which can drive especially the top hidden layer into saturation. Surprisingly, we find that saturated units can move out of saturation by themselves, albeit slowly, and explaining the plateaus sometimes seen when training neural networks. We find that a new non-linearity that saturates less can often be beneficial. Finally, we study how activations and gradients vary across layers and during training, with the idea that training may be more difficult when the singular values of the Jacobian associated with each layer are far from 1. Based on these considerations, we propose a new initialization scheme that brings substantially faster convergence.},\n bibtype = {inproceedings},\n author = {Glorot, Xavier and Bengio, Yoshua},\n booktitle = {Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics}\n}","author_short":["Glorot, X.","Bengio, Y."],"urls":{"Paper":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c/file/0bc237bd-b8aa-70b5-5920-eae4eb5a8c76/Glorot_and_Bengio___2010___Understanding_the_difficulty_of_training_deep_feed.pdf.pdf","Website":"https://proceedings.mlr.press/v9/glorot10a.html"},"biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibbaseid":"glorot-bengio-understandingthedifficultyoftrainingdeepfeedforwardneuralnetworks-2010","role":"author","metadata":{"authorlinks":{}}},"bibtype":"inproceedings","biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","dataSources":["iwKepCrWBps7ojhDx","ya2CyA73rpZseyrZ8","2252seNhipfTmjEBQ"],"keywords":[],"search_terms":["understanding","difficulty","training","deep","feedforward","neural","networks","glorot","bengio"],"title":"Understanding the difficulty of training deep feedforward neural networks","year":2010}