Training Neural Networks without Backpropagation: A Deeper Dive into the Likelihood Ratio Method. Jiang, J., Zhang, Z., Xu, C., Yu, Z., & Peng, Y. 2023. cite arxiv:2305.08960
Paper abstract bibtex Backpropagation (BP) is the most important gradient estimation method for training neural networks in deep learning. However, the literature shows that neural networks trained by BP are vulnerable to adversarial attacks. We develop the likelihood ratio (LR) method, a new gradient estimation method, for training a broad range of neural network architectures, including convolutional neural networks, recurrent neural networks, graph neural networks, and spiking neural networks, without recursive gradient computation. We propose three methods to efficiently reduce the variance of the gradient estimation in the neural network training process. Our experiments yield numerical results for training different neural networks on several datasets. All results demonstrate that the LR method is effective for training various neural networks and significantly improves the robustness of the neural networks under adversarial attacks relative to the BP method.
@misc{jiang2023training,
abstract = {Backpropagation (BP) is the most important gradient estimation method for
training neural networks in deep learning. However, the literature shows that
neural networks trained by BP are vulnerable to adversarial attacks. We develop
the likelihood ratio (LR) method, a new gradient estimation method, for
training a broad range of neural network architectures, including convolutional
neural networks, recurrent neural networks, graph neural networks, and spiking
neural networks, without recursive gradient computation. We propose three
methods to efficiently reduce the variance of the gradient estimation in the
neural network training process. Our experiments yield numerical results for
training different neural networks on several datasets. All results demonstrate
that the LR method is effective for training various neural networks and
significantly improves the robustness of the neural networks under adversarial
attacks relative to the BP method.},
added-at = {2023-07-25T13:29:28.000+0200},
author = {Jiang, Jinyang and Zhang, Zeliang and Xu, Chenliang and Yu, Zhaofei and Peng, Yijie},
biburl = {https://www.bibsonomy.org/bibtex/21629ed1ec2513ac7f0d0a0d649b8a351/koncar},
description = {Training Neural Networks without Backpropagation: A Deeper Dive into the Likelihood Ratio Method},
interhash = {2e1cb5b7bd723a4f871a1b92787c702b},
intrahash = {1629ed1ec2513ac7f0d0a0d649b8a351},
keywords = {neuralnetworks nobackprop},
note = {cite arxiv:2305.08960},
timestamp = {2023-07-25T13:29:28.000+0200},
title = {Training Neural Networks without Backpropagation: A Deeper Dive into the
Likelihood Ratio Method},
url = {http://arxiv.org/abs/2305.08960},
year = 2023
}
Downloads: 0
{"_id":"w5nXFktfnNM9mnAK6","bibbaseid":"jiang-zhang-xu-yu-peng-trainingneuralnetworkswithoutbackpropagationadeeperdiveintothelikelihoodratiomethod-2023","author_short":["Jiang, J.","Zhang, Z.","Xu, C.","Yu, Z.","Peng, Y."],"bibdata":{"bibtype":"misc","type":"misc","abstract":"Backpropagation (BP) is the most important gradient estimation method for training neural networks in deep learning. However, the literature shows that neural networks trained by BP are vulnerable to adversarial attacks. We develop the likelihood ratio (LR) method, a new gradient estimation method, for training a broad range of neural network architectures, including convolutional neural networks, recurrent neural networks, graph neural networks, and spiking neural networks, without recursive gradient computation. We propose three methods to efficiently reduce the variance of the gradient estimation in the neural network training process. Our experiments yield numerical results for training different neural networks on several datasets. All results demonstrate that the LR method is effective for training various neural networks and significantly improves the robustness of the neural networks under adversarial attacks relative to the BP method.","added-at":"2023-07-25T13:29:28.000+0200","author":[{"propositions":[],"lastnames":["Jiang"],"firstnames":["Jinyang"],"suffixes":[]},{"propositions":[],"lastnames":["Zhang"],"firstnames":["Zeliang"],"suffixes":[]},{"propositions":[],"lastnames":["Xu"],"firstnames":["Chenliang"],"suffixes":[]},{"propositions":[],"lastnames":["Yu"],"firstnames":["Zhaofei"],"suffixes":[]},{"propositions":[],"lastnames":["Peng"],"firstnames":["Yijie"],"suffixes":[]}],"biburl":"https://www.bibsonomy.org/bibtex/21629ed1ec2513ac7f0d0a0d649b8a351/koncar","description":"Training Neural Networks without Backpropagation: A Deeper Dive into the Likelihood Ratio Method","interhash":"2e1cb5b7bd723a4f871a1b92787c702b","intrahash":"1629ed1ec2513ac7f0d0a0d649b8a351","keywords":"neuralnetworks nobackprop","note":"cite arxiv:2305.08960","timestamp":"2023-07-25T13:29:28.000+0200","title":"Training Neural Networks without Backpropagation: A Deeper Dive into the Likelihood Ratio Method","url":"http://arxiv.org/abs/2305.08960","year":"2023","bibtex":"@misc{jiang2023training,\n abstract = {Backpropagation (BP) is the most important gradient estimation method for\r\ntraining neural networks in deep learning. However, the literature shows that\r\nneural networks trained by BP are vulnerable to adversarial attacks. We develop\r\nthe likelihood ratio (LR) method, a new gradient estimation method, for\r\ntraining a broad range of neural network architectures, including convolutional\r\nneural networks, recurrent neural networks, graph neural networks, and spiking\r\nneural networks, without recursive gradient computation. We propose three\r\nmethods to efficiently reduce the variance of the gradient estimation in the\r\nneural network training process. Our experiments yield numerical results for\r\ntraining different neural networks on several datasets. All results demonstrate\r\nthat the LR method is effective for training various neural networks and\r\nsignificantly improves the robustness of the neural networks under adversarial\r\nattacks relative to the BP method.},\n added-at = {2023-07-25T13:29:28.000+0200},\n author = {Jiang, Jinyang and Zhang, Zeliang and Xu, Chenliang and Yu, Zhaofei and Peng, Yijie},\n biburl = {https://www.bibsonomy.org/bibtex/21629ed1ec2513ac7f0d0a0d649b8a351/koncar},\n description = {Training Neural Networks without Backpropagation: A Deeper Dive into the Likelihood Ratio Method},\n interhash = {2e1cb5b7bd723a4f871a1b92787c702b},\n intrahash = {1629ed1ec2513ac7f0d0a0d649b8a351},\n keywords = {neuralnetworks nobackprop},\n note = {cite arxiv:2305.08960},\n timestamp = {2023-07-25T13:29:28.000+0200},\n title = {Training Neural Networks without Backpropagation: A Deeper Dive into the\r\n Likelihood Ratio Method},\n url = {http://arxiv.org/abs/2305.08960},\n year = 2023\n}\n\n","author_short":["Jiang, J.","Zhang, Z.","Xu, C.","Yu, Z.","Peng, Y."],"key":"jiang2023training","id":"jiang2023training","bibbaseid":"jiang-zhang-xu-yu-peng-trainingneuralnetworkswithoutbackpropagationadeeperdiveintothelikelihoodratiomethod-2023","role":"author","urls":{"Paper":"http://arxiv.org/abs/2305.08960"},"keyword":["neuralnetworks nobackprop"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"misc","biburl":"http://www.bibsonomy.org/bib/author/zhang?items=1000","dataSources":["6yXn8CtuzyEbCSr2m"],"keywords":["neuralnetworks nobackprop"],"search_terms":["training","neural","networks","without","backpropagation","deeper","dive","likelihood","ratio","method","jiang","zhang","xu","yu","peng"],"title":"Training Neural Networks without Backpropagation: A Deeper Dive into the Likelihood Ratio Method","year":2023}