Stability-Certified Reinforcement Learning: A Control-Theoretic Perspective. Jin, M. & Lavaei, J. IEEE Access, 2020.
Pdf
Link doi abstract bibtex 3 downloads We investigate the important problem of certifying stability of reinforcement learning policies when interconnected with nonlinear dynamical systems. We show that by regulating the partial gradients of policies, strong guarantees of robust stability can be obtained based on a proposed semidefinite programming feasibility problem. The method is able to certify a large set of stabilizing controllers by exploiting problem-specific structures; furthermore, we analyze and establish its (non)conservatism. Empirical evaluations on two decentralized control tasks, namely multi-flight formation and power system frequency regulation, demonstrate that the reinforcement learning agents can have high performance within the stability-certified parameter space and also exhibit stable learning behaviors in the long run.
@ARTICLE{2020_2J_stabilityrl,
author={M. {Jin} and J. {Lavaei}},
journal={IEEE Access},
title={Stability-Certified Reinforcement Learning: A Control-Theoretic Perspective},
year={2020},
volume={},
number={},
pages={1-1},
doi={10.1109/ACCESS.2020.3045114},
abstract={We investigate the important problem of certifying stability of reinforcement learning policies when interconnected with nonlinear dynamical systems. We show that by regulating the partial gradients of policies, strong guarantees of robust stability can be obtained based on a proposed semidefinite programming feasibility problem. The method is able to certify a large set of stabilizing controllers by exploiting problem-specific structures; furthermore, we analyze and establish its (non)conservatism. Empirical evaluations on two decentralized control tasks, namely multi-flight formation and power system frequency regulation, demonstrate that the reinforcement learning agents can have high performance within the stability-certified parameter space and also exhibit stable learning behaviors in the long run.},
url_pdf = {SafeRL_2020.pdf},
url_link = {https://ieeexplore.ieee.org/document/9296215},
keywords = {Control theory, Machine learning, Power system}}
Downloads: 3
{"_id":"haSpDJPXX3vwWTS5R","bibbaseid":"jin-lavaei-stabilitycertifiedreinforcementlearningacontroltheoreticperspective-2020","author_short":["Jin, M.","Lavaei, J."],"bibdata":{"bibtype":"article","type":"article","author":[{"firstnames":["M."],"propositions":[],"lastnames":["Jin"],"suffixes":[]},{"firstnames":["J."],"propositions":[],"lastnames":["Lavaei"],"suffixes":[]}],"journal":"IEEE Access","title":"Stability-Certified Reinforcement Learning: A Control-Theoretic Perspective","year":"2020","volume":"","number":"","pages":"1-1","doi":"10.1109/ACCESS.2020.3045114","abstract":"We investigate the important problem of certifying stability of reinforcement learning policies when interconnected with nonlinear dynamical systems. We show that by regulating the partial gradients of policies, strong guarantees of robust stability can be obtained based on a proposed semidefinite programming feasibility problem. The method is able to certify a large set of stabilizing controllers by exploiting problem-specific structures; furthermore, we analyze and establish its (non)conservatism. Empirical evaluations on two decentralized control tasks, namely multi-flight formation and power system frequency regulation, demonstrate that the reinforcement learning agents can have high performance within the stability-certified parameter space and also exhibit stable learning behaviors in the long run.","url_pdf":"SafeRL_2020.pdf","url_link":"https://ieeexplore.ieee.org/document/9296215","keywords":"Control theory, Machine learning, Power system","bibtex":"@ARTICLE{2020_2J_stabilityrl,\n author={M. {Jin} and J. {Lavaei}},\n journal={IEEE Access}, \n title={Stability-Certified Reinforcement Learning: A Control-Theoretic Perspective}, \n year={2020},\n volume={},\n number={},\n pages={1-1},\n doi={10.1109/ACCESS.2020.3045114},\n abstract={We investigate the important problem of certifying stability of reinforcement learning policies when interconnected with nonlinear dynamical systems. We show that by regulating the partial gradients of policies, strong guarantees of robust stability can be obtained based on a proposed semidefinite programming feasibility problem. The method is able to certify a large set of stabilizing controllers by exploiting problem-specific structures; furthermore, we analyze and establish its (non)conservatism. Empirical evaluations on two decentralized control tasks, namely multi-flight formation and power system frequency regulation, demonstrate that the reinforcement learning agents can have high performance within the stability-certified parameter space and also exhibit stable learning behaviors in the long run.},\n url_pdf = {SafeRL_2020.pdf},\n url_link = {https://ieeexplore.ieee.org/document/9296215},\n keywords = {Control theory, Machine learning, Power system}}\n\n","author_short":["Jin, M.","Lavaei, J."],"key":"2020_2J_stabilityrl","id":"2020_2J_stabilityrl","bibbaseid":"jin-lavaei-stabilitycertifiedreinforcementlearningacontroltheoreticperspective-2020","role":"author","urls":{" pdf":"http://www.jinming.tech/papers/SafeRL_2020.pdf"," link":"https://ieeexplore.ieee.org/document/9296215"},"keyword":["Control theory","Machine learning","Power system"],"metadata":{"authorlinks":{}},"downloads":3},"bibtype":"article","biburl":"http://www.jinming.tech/papers/myref.bib","dataSources":["sTzDHHaipTZWjp8oe","Y64tp2HnDCfXgLdc5"],"keywords":["control theory","machine learning","power system"],"search_terms":["stability","certified","reinforcement","learning","control","theoretic","perspective","jin","lavaei"],"title":"Stability-Certified Reinforcement Learning: A Control-Theoretic Perspective","year":2020,"downloads":3}