Control-Theoretic Analysis of Smoothness for Stability-Certified Reinforcement Learning. Jin, M. & Lavaei, J. In IEEE Conference on Decision and Control (CDC), pages 6840-6847, 2018.
Pdf
Link doi abstract bibtex It is critical to obtain stability certificate before deploying reinforcement learning in real-world mission-critical systems. This study justifies the intuition that smoothness (i.e., small changes in inputs lead to small changes in outputs) is an important property for stability-certified reinforcement learning from a control-theoretic perspective. The smoothness margin can be obtained by solving a feasibility problem based on semi-definite programming for both linear and nonlinear dynamical systems, and it does not need to access the exact parameters of the learned controllers. Numerical evaluation on nonlinear and decentralized frequency control for large-scale power grids demonstrates that the smoothness margin can certify stability during both exploration and deployment for (deep) neural-network policies, which substantially surpass nominal controllers in performance. The study opens up new opportunities for robust Lipschitz continuous policy learning.
@INPROCEEDINGS{2018_2C_control,
author={M. {Jin} and J. {Lavaei}},
booktitle={IEEE Conference on Decision and Control (CDC)},
title={Control-Theoretic Analysis of Smoothness for Stability-Certified Reinforcement Learning},
year={2018},
volume={},
number={},
pages={6840-6847},
doi={10.1109/CDC.2018.8618996},
abstract={It is critical to obtain stability certificate before deploying reinforcement learning in real-world mission-critical systems. This study justifies the intuition that smoothness (i.e., small changes in inputs lead to small changes in outputs) is an important property for stability-certified reinforcement learning from a control-theoretic perspective. The smoothness margin can be obtained by solving a feasibility problem based on semi-definite programming for both linear and nonlinear dynamical systems, and it does not need to access the exact parameters of the learned controllers. Numerical evaluation on nonlinear and decentralized frequency control for large-scale power grids demonstrates that the smoothness margin can certify stability during both exploration and deployment for (deep) neural-network policies, which substantially surpass nominal controllers in performance. The study opens up new opportunities for robust Lipschitz continuous policy learning.},
url_pdf={smoothRL_cdc_tech.pdf},
url_link={https://ieeexplore.ieee.org/document/8618996},
keywords={Control theory, Machine learning}}
Downloads: 0
{"_id":"Fd8N7tDcfdBPsX7Cj","bibbaseid":"jin-lavaei-controltheoreticanalysisofsmoothnessforstabilitycertifiedreinforcementlearning-2018","author_short":["Jin, M.","Lavaei, J."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","author":[{"firstnames":["M."],"propositions":[],"lastnames":["Jin"],"suffixes":[]},{"firstnames":["J."],"propositions":[],"lastnames":["Lavaei"],"suffixes":[]}],"booktitle":"IEEE Conference on Decision and Control (CDC)","title":"Control-Theoretic Analysis of Smoothness for Stability-Certified Reinforcement Learning","year":"2018","volume":"","number":"","pages":"6840-6847","doi":"10.1109/CDC.2018.8618996","abstract":"It is critical to obtain stability certificate before deploying reinforcement learning in real-world mission-critical systems. This study justifies the intuition that smoothness (i.e., small changes in inputs lead to small changes in outputs) is an important property for stability-certified reinforcement learning from a control-theoretic perspective. The smoothness margin can be obtained by solving a feasibility problem based on semi-definite programming for both linear and nonlinear dynamical systems, and it does not need to access the exact parameters of the learned controllers. Numerical evaluation on nonlinear and decentralized frequency control for large-scale power grids demonstrates that the smoothness margin can certify stability during both exploration and deployment for (deep) neural-network policies, which substantially surpass nominal controllers in performance. The study opens up new opportunities for robust Lipschitz continuous policy learning.","url_pdf":"smoothRL_cdc_tech.pdf","url_link":"https://ieeexplore.ieee.org/document/8618996","keywords":"Control theory, Machine learning","bibtex":"@INPROCEEDINGS{2018_2C_control,\n author={M. {Jin} and J. {Lavaei}},\n booktitle={IEEE Conference on Decision and Control (CDC)}, \n title={Control-Theoretic Analysis of Smoothness for Stability-Certified Reinforcement Learning}, \n year={2018},\n volume={},\n number={},\n pages={6840-6847},\n doi={10.1109/CDC.2018.8618996},\n abstract={It is critical to obtain stability certificate before deploying reinforcement learning in real-world mission-critical systems. This study justifies the intuition that smoothness (i.e., small changes in inputs lead to small changes in outputs) is an important property for stability-certified reinforcement learning from a control-theoretic perspective. The smoothness margin can be obtained by solving a feasibility problem based on semi-definite programming for both linear and nonlinear dynamical systems, and it does not need to access the exact parameters of the learned controllers. Numerical evaluation on nonlinear and decentralized frequency control for large-scale power grids demonstrates that the smoothness margin can certify stability during both exploration and deployment for (deep) neural-network policies, which substantially surpass nominal controllers in performance. The study opens up new opportunities for robust Lipschitz continuous policy learning.},\n url_pdf={smoothRL_cdc_tech.pdf},\n url_link={https://ieeexplore.ieee.org/document/8618996},\n keywords={Control theory, Machine learning}}\n \n","author_short":["Jin, M.","Lavaei, J."],"key":"2018_2C_control","id":"2018_2C_control","bibbaseid":"jin-lavaei-controltheoreticanalysisofsmoothnessforstabilitycertifiedreinforcementlearning-2018","role":"author","urls":{" pdf":"http://www.jinming.tech/papers/smoothRL_cdc_tech.pdf"," link":"https://ieeexplore.ieee.org/document/8618996"},"keyword":["Control theory","Machine learning"],"metadata":{"authorlinks":{}}},"bibtype":"inproceedings","biburl":"http://www.jinming.tech/papers/myref.bib","dataSources":["sTzDHHaipTZWjp8oe","Y64tp2HnDCfXgLdc5"],"keywords":["control theory","machine learning"],"search_terms":["control","theoretic","analysis","smoothness","stability","certified","reinforcement","learning","jin","lavaei"],"title":"Control-Theoretic Analysis of Smoothness for Stability-Certified Reinforcement Learning","year":2018}