A Recurrent Neural Network for Solving Nonconvex Optimization Problems. Hu, X. & Wang, J. abstract bibtex An existing recurrent neural network for convex optimization is extended to solve nonconvex optimization problems. One of the prominent features of this neural network is the one-to-one correspondence between its equilibria and the Karush-Kuhn-Tucker (KKT) points of the nonconvex optimization problem. The conditions are derived under which the neural network (locally) converges to the KKT points. It is desired that the neural network is stable at minimum solutions, and unstable at maximum solutions or saddle solutions. It is found in the paper that most likely the neural network is unstable at the maximum solutions. Moreover, we found that if the derived conditions are not satisﬁed at minimum solutions, by transforming the original problem into an equivalent one with the p-power (or partial p-power) method, these conditions can be satisﬁed. As a result, the neural network will locally converge to a minimum solution. Finally, two illustrative examples are provided to demonstrate the performance of the recurrent neural network.

@article{hu_recurrent_nodate,
title = {A {Recurrent} {Neural} {Network} for {Solving} {Nonconvex} {Optimization} {Problems}},
abstract = {An existing recurrent neural network for convex optimization is extended to solve nonconvex optimization problems. One of the prominent features of this neural network is the one-to-one correspondence between its equilibria and the Karush-Kuhn-Tucker (KKT) points of the nonconvex optimization problem. The conditions are derived under which the neural network (locally) converges to the KKT points. It is desired that the neural network is stable at minimum solutions, and unstable at maximum solutions or saddle solutions. It is found in the paper that most likely the neural network is unstable at the maximum solutions. Moreover, we found that if the derived conditions are not satisﬁed at minimum solutions, by transforming the original problem into an equivalent one with the p-power (or partial p-power) method, these conditions can be satisﬁed. As a result, the neural network will locally converge to a minimum solution. Finally, two illustrative examples are provided to demonstrate the performance of the recurrent neural network.},
language = {en},
author = {Hu, Xiaolin and Wang, Jun},
keywords = {/unread, ⛔ No DOI found},
pages = {7},
}

Downloads: 0

{"_id":"5FcP5T3tRx5F9vwxM","bibbaseid":"hu-wang-arecurrentneuralnetworkforsolvingnonconvexoptimizationproblems","author_short":["Hu, X.","Wang, J."],"bibdata":{"bibtype":"article","type":"article","title":"A Recurrent Neural Network for Solving Nonconvex Optimization Problems","abstract":"An existing recurrent neural network for convex optimization is extended to solve nonconvex optimization problems. One of the prominent features of this neural network is the one-to-one correspondence between its equilibria and the Karush-Kuhn-Tucker (KKT) points of the nonconvex optimization problem. The conditions are derived under which the neural network (locally) converges to the KKT points. It is desired that the neural network is stable at minimum solutions, and unstable at maximum solutions or saddle solutions. It is found in the paper that most likely the neural network is unstable at the maximum solutions. Moreover, we found that if the derived conditions are not satisﬁed at minimum solutions, by transforming the original problem into an equivalent one with the p-power (or partial p-power) method, these conditions can be satisﬁed. As a result, the neural network will locally converge to a minimum solution. Finally, two illustrative examples are provided to demonstrate the performance of the recurrent neural network.","language":"en","author":[{"propositions":[],"lastnames":["Hu"],"firstnames":["Xiaolin"],"suffixes":[]},{"propositions":[],"lastnames":["Wang"],"firstnames":["Jun"],"suffixes":[]}],"keywords":"/unread, ⛔ No DOI found","pages":"7","bibtex":"@article{hu_recurrent_nodate,\n\ttitle = {A {Recurrent} {Neural} {Network} for {Solving} {Nonconvex} {Optimization} {Problems}},\n\tabstract = {An existing recurrent neural network for convex optimization is extended to solve nonconvex optimization problems. One of the prominent features of this neural network is the one-to-one correspondence between its equilibria and the Karush-Kuhn-Tucker (KKT) points of the nonconvex optimization problem. The conditions are derived under which the neural network (locally) converges to the KKT points. It is desired that the neural network is stable at minimum solutions, and unstable at maximum solutions or saddle solutions. It is found in the paper that most likely the neural network is unstable at the maximum solutions. Moreover, we found that if the derived conditions are not satisﬁed at minimum solutions, by transforming the original problem into an equivalent one with the p-power (or partial p-power) method, these conditions can be satisﬁed. As a result, the neural network will locally converge to a minimum solution. Finally, two illustrative examples are provided to demonstrate the performance of the recurrent neural network.},\n\tlanguage = {en},\n\tauthor = {Hu, Xiaolin and Wang, Jun},\n\tkeywords = {/unread, ⛔ No DOI found},\n\tpages = {7},\n}\n\n","author_short":["Hu, X.","Wang, J."],"key":"hu_recurrent_nodate","id":"hu_recurrent_nodate","bibbaseid":"hu-wang-arecurrentneuralnetworkforsolvingnonconvexoptimizationproblems","role":"author","urls":{},"keyword":["/unread","⛔ No DOI found"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"article","biburl":"https://bibbase.org/zotero/victorjhu","dataSources":["CmHEoydhafhbkXXt5"],"keywords":["/unread","⛔ no doi found"],"search_terms":["recurrent","neural","network","solving","nonconvex","optimization","problems","hu","wang"],"title":"A Recurrent Neural Network for Solving Nonconvex Optimization Problems","year":null}