Online and Robust Intermittent Motion Planning in Dynamic and Changing Environments. Xu, Z., Kontoudis, G., P., & Vamvoudakis, K., G. IEEE Transactions on Neural Networks and Learning Systems, PP:1-15, IEEE, 2023. doi abstract bibtex 19 downloads In this paper, we present a real-time kinodynamic motion planning methodology for dynamic environments, denoted as RRT-Q X ∞. We leverage RRT X for global path planning and rapid replanning to produce a set of boundary value problems. A Q-learning optimal controller is proposed for waypoint navigation with completely unknown system dynamics, external disturbances, and intermittent communication. The problem is formulated as a finite-horizon, continuous-time zero-sum game, where the control input is the minimizer, and the worst-case disturbance is the maximizer. To reduce the communication overhead , we allow intermittent transmission of control inputs. Moreover , a relaxed persistence of excitation technique is employed to improve the convergence speed of the Q-learning controller. We provide rigorous Lyapunov-based proofs to guarantee the closed-loop stability of the equilibrium point. The efficacy of the proposed RRT-Q X ∞ is illustrated in several scenarios.
@article{
title = {Online and Robust Intermittent Motion Planning in Dynamic and Changing Environments},
type = {article},
year = {2023},
pages = {1-15},
volume = {PP},
publisher = {IEEE},
id = {81733efc-1065-3d60-85c6-14d5af8daaa5},
created = {2023-10-06T16:38:07.419Z},
file_attached = {true},
profile_id = {397e406e-8482-37ee-9e30-40cb0af7c9d7},
last_modified = {2023-10-06T16:39:24.425Z},
read = {false},
starred = {false},
authored = {true},
confirmed = {true},
hidden = {false},
private_publication = {false},
abstract = {In this paper, we present a real-time kinodynamic motion planning methodology for dynamic environments, denoted as RRT-Q X ∞. We leverage RRT X for global path planning and rapid replanning to produce a set of boundary value problems. A Q-learning optimal controller is proposed for waypoint navigation with completely unknown system dynamics, external disturbances, and intermittent communication. The problem is formulated as a finite-horizon, continuous-time zero-sum game, where the control input is the minimizer, and the worst-case disturbance is the maximizer. To reduce the communication overhead , we allow intermittent transmission of control inputs. Moreover , a relaxed persistence of excitation technique is employed to improve the convergence speed of the Q-learning controller. We provide rigorous Lyapunov-based proofs to guarantee the closed-loop stability of the equilibrium point. The efficacy of the proposed RRT-Q X ∞ is illustrated in several scenarios.},
bibtype = {article},
author = {Xu, Zirui and Kontoudis, George P and Vamvoudakis, Kyriakos G},
doi = {10.1109/TNNLS.2023.3303811},
journal = {IEEE Transactions on Neural Networks and Learning Systems}
}
Downloads: 19
{"_id":"nWFdSpG6YK7Pt7Yth","bibbaseid":"xu-kontoudis-vamvoudakis-onlineandrobustintermittentmotionplanningindynamicandchangingenvironments-2023","author_short":["Xu, Z.","Kontoudis, G., P.","Vamvoudakis, K., G."],"bibdata":{"title":"Online and Robust Intermittent Motion Planning in Dynamic and Changing Environments","type":"article","year":"2023","pages":"1-15","volume":"PP","publisher":"IEEE","id":"81733efc-1065-3d60-85c6-14d5af8daaa5","created":"2023-10-06T16:38:07.419Z","file_attached":"true","profile_id":"397e406e-8482-37ee-9e30-40cb0af7c9d7","last_modified":"2023-10-06T16:39:24.425Z","read":false,"starred":false,"authored":"true","confirmed":"true","hidden":false,"private_publication":false,"abstract":"In this paper, we present a real-time kinodynamic motion planning methodology for dynamic environments, denoted as RRT-Q X ∞. We leverage RRT X for global path planning and rapid replanning to produce a set of boundary value problems. A Q-learning optimal controller is proposed for waypoint navigation with completely unknown system dynamics, external disturbances, and intermittent communication. The problem is formulated as a finite-horizon, continuous-time zero-sum game, where the control input is the minimizer, and the worst-case disturbance is the maximizer. To reduce the communication overhead , we allow intermittent transmission of control inputs. Moreover , a relaxed persistence of excitation technique is employed to improve the convergence speed of the Q-learning controller. We provide rigorous Lyapunov-based proofs to guarantee the closed-loop stability of the equilibrium point. The efficacy of the proposed RRT-Q X ∞ is illustrated in several scenarios.","bibtype":"article","author":"Xu, Zirui and Kontoudis, George P and Vamvoudakis, Kyriakos G","doi":"10.1109/TNNLS.2023.3303811","journal":"IEEE Transactions on Neural Networks and Learning Systems","bibtex":"@article{\n title = {Online and Robust Intermittent Motion Planning in Dynamic and Changing Environments},\n type = {article},\n year = {2023},\n pages = {1-15},\n volume = {PP},\n publisher = {IEEE},\n id = {81733efc-1065-3d60-85c6-14d5af8daaa5},\n created = {2023-10-06T16:38:07.419Z},\n file_attached = {true},\n profile_id = {397e406e-8482-37ee-9e30-40cb0af7c9d7},\n last_modified = {2023-10-06T16:39:24.425Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {In this paper, we present a real-time kinodynamic motion planning methodology for dynamic environments, denoted as RRT-Q X ∞. We leverage RRT X for global path planning and rapid replanning to produce a set of boundary value problems. A Q-learning optimal controller is proposed for waypoint navigation with completely unknown system dynamics, external disturbances, and intermittent communication. The problem is formulated as a finite-horizon, continuous-time zero-sum game, where the control input is the minimizer, and the worst-case disturbance is the maximizer. To reduce the communication overhead , we allow intermittent transmission of control inputs. Moreover , a relaxed persistence of excitation technique is employed to improve the convergence speed of the Q-learning controller. We provide rigorous Lyapunov-based proofs to guarantee the closed-loop stability of the equilibrium point. The efficacy of the proposed RRT-Q X ∞ is illustrated in several scenarios.},\n bibtype = {article},\n author = {Xu, Zirui and Kontoudis, George P and Vamvoudakis, Kyriakos G},\n doi = {10.1109/TNNLS.2023.3303811},\n journal = {IEEE Transactions on Neural Networks and Learning Systems}\n}","author_short":["Xu, Z.","Kontoudis, G., P.","Vamvoudakis, K., G."],"biburl":"https://bibbase.org/service/mendeley/397e406e-8482-37ee-9e30-40cb0af7c9d7","bibbaseid":"xu-kontoudis-vamvoudakis-onlineandrobustintermittentmotionplanningindynamicandchangingenvironments-2023","role":"author","urls":{},"metadata":{"authorlinks":{}},"downloads":19},"bibtype":"article","biburl":"https://bibbase.org/service/mendeley/397e406e-8482-37ee-9e30-40cb0af7c9d7","dataSources":["ENfCwQrwsiuEJtsqK","ya2CyA73rpZseyrZ8","2252seNhipfTmjEBQ"],"keywords":[],"search_terms":["online","robust","intermittent","motion","planning","dynamic","changing","environments","xu","kontoudis","vamvoudakis"],"title":"Online and Robust Intermittent Motion Planning in Dynamic and Changing Environments","year":2023,"downloads":19}