\n \n \n
\n
\n\n \n \n \n \n \n HOMOLOGY THEORY AND DYNAMICAL SYSTEMS.\n \n \n \n\n\n \n Sullivan, D\n\n\n \n\n\n\n ,24. .\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{sullivan_homology_nodate,\n\ttitle = {{HOMOLOGY} {THEORY} {AND} {DYNAMICAL} {SYSTEMS}},\n\tlanguage = {en},\n\tauthor = {Sullivan, D},\n\tpages = {24},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Adaptive Safety with Control Barrier Functions.\n \n \n \n\n\n \n Taylor, A.; and Ames, A. D\n\n\n \n\n\n\n ,7. .\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{taylor_adaptive_nodate,\n\ttitle = {Adaptive {Safety} with {Control} {Barrier} {Functions}},\n\tabstract = {Adaptive Control Lyapunov Functions (aCLFs) were introduced 20 years ago, and provided a Lyapunovbased methodology for stabilizing systems with parameter uncertainty. The goal of this paper is to revisit this classic formulation in the context of safety-critical control. This will motivate a variant of aCLFs in the context of safety: adaptive Control Barrier Functions (aCBFs). Our proposed approach adaptively achieves safety by keeping the system’s state within a safe set even in the presence of parametric model uncertainty. We unify aCLFs and aCBFs into a single control methodology for systems with uncertain parameters in the context of a Quadratic Program (QP) based framework. We validate the ability of this unified framework to achieve stability and safety in an Adaptive Cruise Control (ACC) simulation.},\n\tlanguage = {en},\n\tauthor = {Taylor, Andrew and Ames, Aaron D},\n\tpages = {7},\n}\n\n
\n
\n\n\n
\n Adaptive Control Lyapunov Functions (aCLFs) were introduced 20 years ago, and provided a Lyapunovbased methodology for stabilizing systems with parameter uncertainty. The goal of this paper is to revisit this classic formulation in the context of safety-critical control. This will motivate a variant of aCLFs in the context of safety: adaptive Control Barrier Functions (aCBFs). Our proposed approach adaptively achieves safety by keeping the system’s state within a safe set even in the presence of parametric model uncertainty. We unify aCLFs and aCBFs into a single control methodology for systems with uncertain parameters in the context of a Quadratic Program (QP) based framework. We validate the ability of this unified framework to achieve stability and safety in an Adaptive Cruise Control (ACC) simulation.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n An Inverse Dynamics Approach to Control Lyapunov Functions.\n \n \n \n\n\n \n Reher, J.; Kann, C.; and Ames, A. D\n\n\n \n\n\n\n ,8. .\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{reher_inverse_nodate,\n\ttitle = {An {Inverse} {Dynamics} {Approach} to {Control} {Lyapunov} {Functions}},\n\tabstract = {With the goal of moving towards implementation of increasingly dynamic behaviors on underactuated systems, this paper presents an optimization-based approach for solving full-body dynamics based controllers on underactuated bipedal robots. The primary focus of this paper is on the development of an alternative approach to the implementation of controllers utilizing control Lyapunov function based quadratic programs. This approach utilizes many of the desirable aspects from successful inverse dynamics based controllers in the literature, while also incorporating a variant of control Lyapunov functions that renders better convergence in the context of tracking outputs. The principal benefits of this formulation include a greater ability to add costs which regulate the resulting behavior of the robot. In addition, the model error-prone inertia matrix is used only once, in a non-inverted form. The result is a successful demonstration of the controller for walking in simulation, and applied on hardware in real-time for dynamic crouching.},\n\tlanguage = {en},\n\tauthor = {Reher, Jenna and Kann, Claudia and Ames, Aaron D},\n\tpages = {8},\n}\n\n
\n
\n\n\n
\n With the goal of moving towards implementation of increasingly dynamic behaviors on underactuated systems, this paper presents an optimization-based approach for solving full-body dynamics based controllers on underactuated bipedal robots. The primary focus of this paper is on the development of an alternative approach to the implementation of controllers utilizing control Lyapunov function based quadratic programs. This approach utilizes many of the desirable aspects from successful inverse dynamics based controllers in the literature, while also incorporating a variant of control Lyapunov functions that renders better convergence in the context of tracking outputs. The principal benefits of this formulation include a greater ability to add costs which regulate the resulting behavior of the robot. In addition, the model error-prone inertia matrix is used only once, in a non-inverted form. The result is a successful demonstration of the controller for walking in simulation, and applied on hardware in real-time for dynamic crouching.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Density Functions for Guaranteed Safety on Robotic Systems.\n \n \n \n\n\n \n Chen, Y.; Singletary, A.; and Ames, A. D\n\n\n \n\n\n\n ,6. .\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{chen_density_nodate,\n\ttitle = {Density {Functions} for {Guaranteed} {Safety} on {Robotic} {Systems}},\n\tabstract = {The recent study on density functions as the dual of value functions for optimal control gives a new method for synthesizing safe controllers. A density function describes the state distribution in the state space, and its evolution follows the Liouville Partial Differential Equation (PDE). The duality between the density function and the value function in optimal control can be utilized to solve constrained optimal control problems with a primal-dual algorithm. This paper focuses on the application of the method on robotic systems and proposes an implementation of the primal-dual algorithm that is less computationally demanding than the method used in the literature. To be specific, we use kernel density estimation to estimate the density function, which scales better than the ODE approach in the literature and only requires a simulator instead of a dynamic model. The Hamilton Jacobi Bellman (HJB) PDE is solved with the finite element method in an implicit form, which accelerates the value iteration process. We show an application of the safe control synthesis with density functions on a segway control problem demonstrated experimentally.},\n\tlanguage = {en},\n\tauthor = {Chen, Yuxiao and Singletary, Andrew and Ames, Aaron D},\n\tpages = {6},\n}\n\n
\n
\n\n\n
\n The recent study on density functions as the dual of value functions for optimal control gives a new method for synthesizing safe controllers. A density function describes the state distribution in the state space, and its evolution follows the Liouville Partial Differential Equation (PDE). The duality between the density function and the value function in optimal control can be utilized to solve constrained optimal control problems with a primal-dual algorithm. This paper focuses on the application of the method on robotic systems and proposes an implementation of the primal-dual algorithm that is less computationally demanding than the method used in the literature. To be specific, we use kernel density estimation to estimate the density function, which scales better than the ODE approach in the literature and only requires a simulator instead of a dynamic model. The Hamilton Jacobi Bellman (HJB) PDE is solved with the finite element method in an implicit form, which accelerates the value iteration process. We show an application of the safe control synthesis with density functions on a segway control problem demonstrated experimentally.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n On a Converse Theorem for Finite-Time Lyapunov Functions to Estimate Domains of Attraction.\n \n \n \n\n\n \n Pandey, A.; and Ames, A. D\n\n\n \n\n\n\n ,7. .\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{pandey_converse_nodate,\n\ttitle = {On a {Converse} {Theorem} for {Finite}-{Time} {Lyapunov} {Functions} to {Estimate} {Domains} of {Attraction}},\n\tabstract = {The main result of the paper is a new converse theorem for finite-time Lyapunov functions. We show the existence of a finite-time Lyapunov function for an autonomous continuoustime nonlinear dynamical system if the origin of the system is asymptotically stable. Our proof extends the recent results in finite-time Lyapunov function theory by providing an alternative converse proof for the existence of finite-time Lyapunov functions. In particular, we show that given asymptotic stability of the origin, the linearized dynamics satisfy global finitetime Lyapunov function conditions hence proving the converse theorem. Using our results, we present a consolidated theory for using and constructing Lyapunov functions to certify system stability properties. We also propose a constructive algorithm to efficiently compute non-conservative estimates of the domain of attraction for nonlinear dynamical systems.},\n\tlanguage = {en},\n\tauthor = {Pandey, Ayush and Ames, Aaron D},\n\tpages = {7},\n}\n\n
\n
\n\n\n
\n The main result of the paper is a new converse theorem for finite-time Lyapunov functions. We show the existence of a finite-time Lyapunov function for an autonomous continuoustime nonlinear dynamical system if the origin of the system is asymptotically stable. Our proof extends the recent results in finite-time Lyapunov function theory by providing an alternative converse proof for the existence of finite-time Lyapunov functions. In particular, we show that given asymptotic stability of the origin, the linearized dynamics satisfy global finitetime Lyapunov function conditions hence proving the converse theorem. Using our results, we present a consolidated theory for using and constructing Lyapunov functions to certify system stability properties. We also propose a constructive algorithm to efficiently compute non-conservative estimates of the domain of attraction for nonlinear dynamical systems.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Invariant Sets for Integrators and Quadrotor Obstacle Avoidance.\n \n \n \n\n\n \n Doeser, L.; Nilsson, P.; Ames, A. D; and Murray, R. M\n\n\n \n\n\n\n ,8. .\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{doeser_invariant_nodate,\n\ttitle = {Invariant {Sets} for {Integrators} and {Quadrotor} {Obstacle} {Avoidance}},\n\tabstract = {Ensuring safety through set invariance has proven a useful method in a variety of applications in robotics and control. However, finding analytical expressions for maximal invariant sets, so as to maximize the operational freedom of the system without compromising safety, is notoriously difficult for high-dimensional systems with input constraints. Here we present a generic method for characterizing invariant sets of nthorder integrator systems, based on analyzing roots of univariate polynomials. Additionally, we obtain analytical expressions for the orders n ≤ 4. Using differential flatness we subsequently leverage the results for the n = 4 case to the problem of obstacle avoidance for quadrotor UAVs. The resulting controller has a light computational footprint that showcases the power of finding analytical expressions for control-invariant sets.},\n\tlanguage = {en},\n\tauthor = {Doeser, Ludvig and Nilsson, Petter and Ames, Aaron D and Murray, Richard M},\n\tpages = {8},\n}\n\n
\n
\n\n\n
\n Ensuring safety through set invariance has proven a useful method in a variety of applications in robotics and control. However, finding analytical expressions for maximal invariant sets, so as to maximize the operational freedom of the system without compromising safety, is notoriously difficult for high-dimensional systems with input constraints. Here we present a generic method for characterizing invariant sets of nthorder integrator systems, based on analyzing roots of univariate polynomials. Additionally, we obtain analytical expressions for the orders n ≤ 4. Using differential flatness we subsequently leverage the results for the n = 4 case to the problem of obstacle avoidance for quadrotor UAVs. The resulting controller has a light computational footprint that showcases the power of finding analytical expressions for control-invariant sets.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Lyapunov-Like Conditions for Tight Exit Probability Bounds through Comparison Theorems for SDEs.\n \n \n \n\n\n \n Nilsson, P.; and Ames, A. D\n\n\n \n\n\n\n ,7. .\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{nilsson_lyapunov-like_nodate,\n\ttitle = {Lyapunov-{Like} {Conditions} for {Tight} {Exit} {Probability} {Bounds} through {Comparison} {Theorems} for {SDEs}},\n\tabstract = {Computing upper bounds on exit probabilities—the probability that a system reaches certain “bad” sets—may assist decision-making in control of stochastic systems. Existing analytical bounds for systems described by stochastic differential equations are quite loose, especially for low-probability events, which limits their applicability in practical situations. In this paper we analyze why existing bounds are loose, and conclude that it is a fundamental issue with the underlying techniques based on martingale inequalities. As an alternative, we give comparison results for stochastic differential equations that via a Lyapunov-like function allow exit probabilities of an ndimensional system to be upper-bounded by an exit probability of a one-dimensional Ornstein-Uhlenbeck process. Even though no closed-form expression is known for the latter, it depends on three or four parameters and can be a priori tabulated for applications. We extend these ideas to the controlled setting and state a stochastic analogue of control barrier functions. The bounds are illustrated on numerical examples and are shown to be much tighter than those based on martingale inequalities.},\n\tlanguage = {en},\n\tauthor = {Nilsson, Petter and Ames, Aaron D},\n\tpages = {7},\n}\n\n
\n
\n\n\n
\n Computing upper bounds on exit probabilities—the probability that a system reaches certain “bad” sets—may assist decision-making in control of stochastic systems. Existing analytical bounds for systems described by stochastic differential equations are quite loose, especially for low-probability events, which limits their applicability in practical situations. In this paper we analyze why existing bounds are loose, and conclude that it is a fundamental issue with the underlying techniques based on martingale inequalities. As an alternative, we give comparison results for stochastic differential equations that via a Lyapunov-like function allow exit probabilities of an ndimensional system to be upper-bounded by an exit probability of a one-dimensional Ornstein-Uhlenbeck process. Even though no closed-form expression is known for the latter, it depends on three or four parameters and can be a priori tabulated for applications. We extend these ideas to the controlled setting and state a stochastic analogue of control barrier functions. The bounds are illustrated on numerical examples and are shown to be much tighter than those based on martingale inequalities.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Distributed Feedback Controllers for Stable Cooperative Locomotion of Quadrupedal Robots: A Virtual Constraint Approach.\n \n \n \n\n\n \n Hamed, K. A.\n\n\n \n\n\n\n ,8. .\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{hamed_distributed_nodate,\n\ttitle = {Distributed {Feedback} {Controllers} for {Stable} {Cooperative} {Locomotion} of {Quadrupedal} {Robots}: {A} {Virtual} {Constraint} {Approach}},\n\tabstract = {This paper aims to develop distributed feedback control algorithms that allow cooperative locomotion of quadrupedal robots which are coupled to each other by holonomic constraints. These constraints can arise from collaborative manipulation of objects during locomotion. In addressing this problem, the complex hybrid dynamical models that describe collaborative legged locomotion are studied. The complex periodic orbits (i.e., gaits) of these sophisticated and high-dimensional hybrid systems are investigated. We consider a set of virtual constraints that stabilizes locomotion of a single agent. The paper then generates modified and local virtual constraints for each agent that allow stable collaborative locomotion. Optimal distributed feedback controllers, based on nonlinear control and quadratic programming, are developed to impose the local virtual constraints. To demonstrate the power of the analytical foundation, an extensive numerical simulation for cooperative locomotion of two quadrupedal robots with robotic manipulators is presented. The numerical complex hybrid model has 64 continuous-time domains, 192 discretetime transitions, 96 state variables, and 36 control inputs.},\n\tlanguage = {en},\n\tauthor = {Hamed, Kaveh Akbari},\n\tpages = {8},\n}\n\n
\n
\n\n\n
\n This paper aims to develop distributed feedback control algorithms that allow cooperative locomotion of quadrupedal robots which are coupled to each other by holonomic constraints. These constraints can arise from collaborative manipulation of objects during locomotion. In addressing this problem, the complex hybrid dynamical models that describe collaborative legged locomotion are studied. The complex periodic orbits (i.e., gaits) of these sophisticated and high-dimensional hybrid systems are investigated. We consider a set of virtual constraints that stabilizes locomotion of a single agent. The paper then generates modified and local virtual constraints for each agent that allow stable collaborative locomotion. Optimal distributed feedback controllers, based on nonlinear control and quadratic programming, are developed to impose the local virtual constraints. To demonstrate the power of the analytical foundation, an extensive numerical simulation for cooperative locomotion of two quadrupedal robots with robotic manipulators is presented. The numerical complex hybrid model has 64 continuous-time domains, 192 discretetime transitions, 96 state variables, and 36 control inputs.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Optimal Safe Controller Synthesis: A Density Function Approach.\n \n \n \n\n\n \n Chen, Y.; Ahmadi, M.; and Ames, A. D\n\n\n \n\n\n\n ,6. .\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{chen_optimal_nodate,\n\ttitle = {Optimal {Safe} {Controller} {Synthesis}: {A} {Density} {Function} {Approach}},\n\tabstract = {This paper considers the synthesis of optimal safe controllers based on density functions. We present an algorithm for robust constrained optimal control synthesis using the duality relationship between the density function and the value function. The density function follows the Liouville equation and is the dual of the value function, which satisfies Bellman’s optimality principle. Thanks to density functions, constraints over the distribution of states, such as safety constraints, can be posed straightforwardly in an optimal control problem. The constrained optimal control problem is then solved with a primal-dual algorithm. This formulation is extended to the case with external disturbances, and we show that the robust constrained optimal control can be solved with a modified primal-dual algorithm. We apply this formulation to the problem of finding the optimal safe controller that minimizes the cumulative intervention. An adaptive cruise control (ACC) example is used to demonstrate the efficacy of the proposed, wherein we compare the result of the density function approach with the conventional control barrier function (CBF) method.},\n\tlanguage = {en},\n\tauthor = {Chen, Yuxiao and Ahmadi, Mohamadreza and Ames, Aaron D},\n\tpages = {6},\n}\n\n
\n
\n\n\n
\n This paper considers the synthesis of optimal safe controllers based on density functions. We present an algorithm for robust constrained optimal control synthesis using the duality relationship between the density function and the value function. The density function follows the Liouville equation and is the dual of the value function, which satisfies Bellman’s optimality principle. Thanks to density functions, constraints over the distribution of states, such as safety constraints, can be posed straightforwardly in an optimal control problem. The constrained optimal control problem is then solved with a primal-dual algorithm. This formulation is extended to the case with external disturbances, and we show that the robust constrained optimal control can be solved with a modified primal-dual algorithm. We apply this formulation to the problem of finding the optimal safe controller that minimizes the cumulative intervention. An adaptive cruise control (ACC) example is used to demonstrate the efficacy of the proposed, wherein we compare the result of the density function approach with the conventional control barrier function (CBF) method.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Continuous-Time Optimization of Time-Varying Cost Functions Via Finite-Time Stability with Pre-Defined Convergence Time.\n \n \n \n\n\n \n Romero, O.; and Benosman, M.\n\n\n \n\n\n\n ,6. .\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{romero_continuous-time_nodate,\n\ttitle = {Continuous-{Time} {Optimization} of {Time}-{Varying} {Cost} {Functions} {Via} {Finite}-{Time} {Stability} with {Pre}-{Defined} {Convergence} {Time}},\n\tabstract = {In this paper, we propose a new family of continuous-time optimization algorithms for time-varying, locally strongly convex cost functions, based on discontinuous second-order gradient optimization flows with provable finite-time convergence to local optima. To analyze our flows, we first extend a well-know Lyapunov inequality condition for finite-time stability, to the case of arbitrary time-varying differential inclusions, particularly of the Filippov type. We then prove the convergence of our proposed flows in finite time. We illustrate the performance of our proposed flows on a quadratic cost function to track a decaying sinusoid.},\n\tlanguage = {en},\n\tauthor = {Romero, Orlando and Benosman, Mouhacine},\n\tpages = {6},\n}\n\n
\n
\n\n\n
\n In this paper, we propose a new family of continuous-time optimization algorithms for time-varying, locally strongly convex cost functions, based on discontinuous second-order gradient optimization flows with provable finite-time convergence to local optima. To analyze our flows, we first extend a well-know Lyapunov inequality condition for finite-time stability, to the case of arbitrary time-varying differential inclusions, particularly of the Filippov type. We then prove the convergence of our proposed flows in finite time. We illustrate the performance of our proposed flows on a quadratic cost function to track a decaying sinusoid.\n
\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n STABILIZATION WITH RELAXED CONTROLS.\n \n \n \n \n\n\n \n Artstein\n\n\n \n\n\n\n \n
ISSN: 10.1016/0362-546X(83)90049-4 Library Catalog: reader.elsevier.com\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{artstein_stabilization_nodate,\n\ttitle = {{STABILIZATION} {WITH} {RELAXED} {CONTROLS}},\n\tshorttitle = {{PII}},\n\turl = {https://reader.elsevier.com/reader/sd/pii/0362546X83900494?token=B88DBE74ECF120DBA01EA046831622DD7E545B033FA36243E3552AA41910AC40C4AC00AEF64C05628DED2123DF4CD220},\n\tlanguage = {en},\n\turldate = {2020-06-22},\n\tauthor = {Artstein},\n\tdoi = {10.1016/0362-546X(83)90049-4},\n\tnote = {ISSN: 10.1016/0362-546X(83)90049-4\nLibrary Catalog: reader.elsevier.com},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Path-following for linear systems with unstable zero dynamics.\n \n \n \n \n\n\n \n \n\n\n \n\n\n\n \n
Library Catalog: reader.elsevier.com\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{noauthor_path-following_nodate,\n\ttitle = {Path-following for linear systems with unstable zero dynamics},\n\tshorttitle = {doi},\n\turl = {https://reader.elsevier.com/reader/sd/pii/S0005109806002196?token=22AE7F0A9F0CADCFAE496B41BFC0C9359622CC05594126822D2F63027DD83F339905E4DEA4652A25699EFA5AE0ACAD4B},\n\tlanguage = {en},\n\turldate = {2020-06-09},\n\tdoi = {10.1016/j.automatica.2006.05.014},\n\tnote = {Library Catalog: reader.elsevier.com},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Safety-Critical Rapid Aerial Exploration of Unknown Environments.\n \n \n \n\n\n \n Singletary, A.; Gurriet, T.; Nilsson, P.; and Ames, A.\n\n\n \n\n\n\n ,7. .\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{singletary_safety-critical_nodate,\n\ttitle = {Safety-{Critical} {Rapid} {Aerial} {Exploration} of {Unknown} {Environments}},\n\tabstract = {This paper details a novel approach to collision avoidance for aerial vehicles that enables high-speed flight in uncertain environments. This framework is applied at the controller level and provides safety regardless of the planner that is used. The method is shown to be robust to state uncertainty and disturbances, and is computed entirely online utilizing the full nonlinear system dynamics. The effectiveness of this method is shown in a high-fidelity simulation of a quadrotor with onboard sensors rapidly and safely exploring a cave environment utilizing a simple planner.},\n\tlanguage = {en},\n\tauthor = {Singletary, Andrew and Gurriet, Thomas and Nilsson, Petter and Ames, Aaron},\n\tpages = {7},\n}\n
\n
\n\n\n
\n This paper details a novel approach to collision avoidance for aerial vehicles that enables high-speed flight in uncertain environments. This framework is applied at the controller level and provides safety regardless of the planner that is used. The method is shown to be robust to state uncertainty and disturbances, and is computed entirely online utilizing the full nonlinear system dynamics. The effectiveness of this method is shown in a high-fidelity simulation of a quadrotor with onboard sensors rapidly and safely exploring a cave environment utilizing a simple planner.\n
\n\n\n
\n\n\n\n\n\n