\n \n \n
\n
\n\n \n \n \n \n \n Foundation Models in Robotics: Applications, Challenges, and the Future.\n \n \n \n\n\n \n Firoozi, R.; Tucker, J.; Tian, S.; Majumdar, A.; Sun, J.; Liu, W.; Zhu, Y.; Song, S.; Kapoor, A.; Hausman, K.; Ichter, B.; Driess, D.; Wu, J.; Lu, C.; and Schwager, M.\n\n\n \n\n\n\n 2023.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{firoozi2023foundation,\n\tarchiveprefix = {arXiv},\n\tauthor = {Roya Firoozi and Johnathan Tucker and Stephen Tian and Anirudha Majumdar and Jiankai Sun and Weiyu Liu and Yuke Zhu and Shuran Song and Ashish Kapoor and Karol Hausman and Brian Ichter and Danny Driess and Jiajun Wu and Cewu Lu and Mac Schwager},\n\tdate-added = {2024-05-27 14:23:55 -0700},\n\tdate-modified = {2024-05-27 14:23:55 -0700},\n\teprint = {2312.07843},\n\tprimaryclass = {cs.RO},\n\ttitle = {Foundation Models in Robotics: Applications, Challenges, and the Future},\n\tyear = {2023}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n A Holistic Assessment of the Reliability of Machine Learning Systems.\n \n \n \n\n\n \n Corso, A.; Karamadian, D.; Valentin, R.; Cooper, M.; and Kochenderfer, M. J.\n\n\n \n\n\n\n
arxiv. 2023.\n
\n\n
\n\n
\n\n
\n\n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@Article{Corso2023,\n author = {Anthony Corso and David Karamadian and Romeo Valentin and Mary Cooper and Mykel J. Kochenderfer},\n journal = arxiv,\n title = {A Holistic Assessment of the Reliability of Machine Learning Systems},\n year = {2023},\n doi = {10.48550/ARXIV.2307.10586},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Model-based Validation as Probabilistic Inference.\n \n \n \n\n\n \n Delecki, H.; Corso, A.; and Kochenderfer, M. J.\n\n\n \n\n\n\n In
Conference on Learning for Dynamics and Control (L4DC), 2023. \n
\n\n
\n\n
\n\n
\n\n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@InProceedings{Delecki2023,\n author = {Delecki, Harrison and Corso, Anthony and Kochenderfer, Mykel J.},\n booktitle = {Conference on Learning for Dynamics and Control (L4DC)},\n title = {Model-based Validation as Probabilistic Inference},\n year = {2023},\n doi = {10.48550/ARXIV.2305.09930},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n A Holistic Assessment of the Reliability of Machine Learning Systems.\n \n \n \n\n\n \n Corso, A.; Karamadian, D.; Valentin, R.; Cooper, M.; and Kochenderfer, M. J.\n\n\n \n\n\n\n
arXiv e-prints,arXiv:2307.10586. July 2023.\n
\n\n
\n\n
\n\n
\n\n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@ARTICLE{2023arXiv230710586C,\n author = {{Corso}, Anthony and {Karamadian}, David and {Valentin}, Romeo and {Cooper}, Mary and {Kochenderfer}, Mykel J.},\n title = {A Holistic Assessment of the Reliability of Machine Learning Systems},\n journal = {arXiv e-prints},\n keywords = {Computer Science - Machine Learning},\n year = 2023,\n month = jul,\n eid = {arXiv:2307.10586},\n pages = {arXiv:2307.10586},\n doi = {10.48550/arXiv.2307.10586},\narchivePrefix = {arXiv},\n eprint = {2307.10586},\n primaryClass = {cs.LG},\n adsurl = {https://ui.adsabs.harvard.edu/abs/2023arXiv230710586C},\n adsnote = {Provided by the SAO/NASA Astrophysics Data System}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n VeriX: Towards Verified Explainability of Deep Neural Networks.\n \n \n \n \n\n\n \n Wu, M.; Wu, H.; and Barrett, C.\n\n\n \n\n\n\n In Oh, A.; Neumann, T.; Globerson, A.; Saenko, K.; Hardt, M.; and Levine, S., editor(s),
Advances in Neural Information Processing Systems 36 (NeurIPS 2023), volume 36, pages 22247–22268, 2023. Curran Associates, Inc.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{WWB23,\n url = "https://proceedings.neurips.cc/paper_files/paper/2023/file/46907c2ff9fafd618095161d76461842-Paper-Conference.pdf",\n author = "Min Wu and Haoze Wu and Clark Barrett",\n title = "VeriX: Towards Verified Explainability of Deep Neural Networks",\n booktitle = "Advances in Neural Information Processing Systems 36 (NeurIPS 2023)",\n editor = "A. Oh and T. Neumann and A. Globerson and K. Saenko and M. Hardt and S. Levine",\n publisher = "Curran Associates, Inc.",\n pages = "22247--22268",\n volume = 36,\n mon = dec,\n year = 2023,\n category = "Conference Publications",\n abstract = "We present VeriX (Verified eXplainability), a system for\n producing optimal robust explanations and generating\n counterfactuals along decision boundaries of machine learning\n models. We build such explanations and counterfactuals\n iteratively using constraint solving techniques and a\n heuristic based on feature-level sensitivity ranking. We\n evaluate our method on image recognition benchmarks and a\n real-world scenario of autonomous aircraft taxiing."\n}\n\n
\n
\n\n\n
\n We present VeriX (Verified eXplainability), a system for producing optimal robust explanations and generating counterfactuals along decision boundaries of machine learning models. We build such explanations and counterfactuals iteratively using constraint solving techniques and a heuristic based on feature-level sensitivity ranking. We evaluate our method on image recognition benchmarks and a real-world scenario of autonomous aircraft taxiing.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n DNN Verification, Reachability, and the Exponential Function Problem.\n \n \n \n \n\n\n \n Isac, O.; Zohar, Y.; Barrett, C.; and Katz, G.\n\n\n \n\n\n\n In Pérez, G. A.; and Raskin, J., editor(s),
$34^{th}$ International Conference on Concurrency Theory (CONCUR '23), volume 279, of
Leibniz International Proceedings in Informatics (LIPIcs), pages 26:1–26:18, Dagstuhl, Germany, September 2023. Schloss Dagstuhl – Leibniz-Zentrum für Informatik\n
Antwerp, Belgium\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@InProceedings{IZB+23,\n url = "https://drops.dagstuhl.de/opus/volltexte/2023/19020/",\n author = "Isac, Omri and Zohar, Yoni and Barrett, Clark and Katz, Guy",\n title = "{DNN} Verification, Reachability, and the Exponential Function Problem",\n booktitle = "$34^{th}$ International Conference on Concurrency Theory (CONCUR '23)",\n pages = "26:1--26:18",\n series = "Leibniz International Proceedings in Informatics (LIPIcs)",\n ISBN = "978-3-95977-299-0",\n ISSN = "1868-8969",\n month = sep,\n year = 2023,\n volume = 279,\n editor = "P\\'{e}rez, Guillermo A. and Raskin, Jean-Fran\\c{c}ois",\n publisher = "Schloss Dagstuhl -- Leibniz-Zentrum f{\\"u}r Informatik",\n address = "Dagstuhl, Germany",\n doi = "10.4230/LIPIcs.CONCUR.2023.26",\n note = "Antwerp, Belgium",\n category = "Conference Publications",\n abstract = "Deep neural networks (DNNs) are increasingly being deployed to\n perform safety-critical tasks. The opacity of DNNs, which\n prevents humans from reasoning about them, presents new\n safety and security challenges. To address these challenges,\n the verification community has begun developing techniques\n for rigorously analyzing DNNs, with numerous verification\n algorithms proposed in recent years. While a significant\n amount of work has gone into developing these verification\n algorithms, little work has been devoted to rigorously\n studying the computability and complexity of the underlying\n theoretical problems. Here, we seek to contribute to the\n bridging of this gap. We focus on two kinds of DNNs: those\n that employ piecewise-linear activation functions (e.g.,\n ReLU), and those that employ piecewise-smooth activation\n functions (e.g., Sigmoids). We prove the two following\n theorems: (i) the decidability of verifying DNNs with a\n particular set of piecewise-smooth activation functions,\n including Sigmoid and tanh, is equivalent to a well-known,\n open problem formulated by Tarski; and (ii) the DNN\n verification problem for any quantifier-free linear\n arithmetic specification can be reduced to the DNN\n reachability problem, whose approximation is\n NP-complete. These results answer two fundamental questions\n about the computability and complexity of DNN verification,\n and the ways it is affected by the network’s activation\n functions and error tolerance; and could help guide future\n efforts in developing DNN verification tools.",\n}\n\n\n
\n
\n\n\n
\n Deep neural networks (DNNs) are increasingly being deployed to perform safety-critical tasks. The opacity of DNNs, which prevents humans from reasoning about them, presents new safety and security challenges. To address these challenges, the verification community has begun developing techniques for rigorously analyzing DNNs, with numerous verification algorithms proposed in recent years. While a significant amount of work has gone into developing these verification algorithms, little work has been devoted to rigorously studying the computability and complexity of the underlying theoretical problems. Here, we seek to contribute to the bridging of this gap. We focus on two kinds of DNNs: those that employ piecewise-linear activation functions (e.g., ReLU), and those that employ piecewise-smooth activation functions (e.g., Sigmoids). We prove the two following theorems: (i) the decidability of verifying DNNs with a particular set of piecewise-smooth activation functions, including Sigmoid and tanh, is equivalent to a well-known, open problem formulated by Tarski; and (ii) the DNN verification problem for any quantifier-free linear arithmetic specification can be reduced to the DNN reachability problem, whose approximation is NP-complete. These results answer two fundamental questions about the computability and complexity of DNN verification, and the ways it is affected by the network’s activation functions and error tolerance; and could help guide future efforts in developing DNN verification tools.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Toward Certified Robustness Against Real-World Distribution Shifts.\n \n \n \n \n\n\n \n Wu, H.; Tagomori, T.; Robey, A.; Yang, F.; Matni, N.; Pappas, G.; Hassani, H.; Păsăreanu, C.; and Barrett, C.\n\n\n \n\n\n\n In McDaniel, P.; and Papernot, N., editor(s),
Proceedings of the 2023 IEEE Conference on Secure and Trustworthy Machine Learning (SaTML), pages 537–553, February 2023. IEEE\n
Raleigh, NC\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{WTR+23,\n url = "http://theory.stanford.edu/~barrett/pubs/WTR+23.pdf",\n author = "Haoze Wu and Teruhiro Tagomori and Alexander Robey and Fengjun Yang and Nikolai Matni and George Pappas and Hamed Hassani and Corina P{\\u{a}}s{\\u{a}}reanu and Clark Barrett",\n title = "Toward Certified Robustness Against Real-World Distribution Shifts",\n booktitle = "Proceedings of the 2023 IEEE Conference on Secure and Trustworthy Machine Learning (SaTML)",\n publisher = "IEEE",\n editor = "Patrick McDaniel and Nicolas Papernot",\n month = feb,\n pages = "537--553",\n doi = "10.1109/SaTML54575.2023.00042",\n year = 2023,\n note = "Raleigh, NC",\n category = "Conference Publications",\n abstract = "We consider the problem of certifying the robustness of deep\n neural networks against real-world distribution shifts. To do\n so, we bridge the gap between hand-crafted specifications and\n realistic deployment settings by considering a\n neural-symbolic verification framework in which generative\n models are trained to learn perturbations from data and\n specifications are defined with respect to the output of\n these learned models. A pervasive challenge arising from this\n setting is that although S-shaped activations (e.g., sigmoid,\n tanh) are common in the last layer of deep generative models,\n existing verifiers cannot tightly approximate S-shaped\n activations. To address this challenge, we propose a general\n meta-algorithm for handling S-shaped activations which\n leverages classical notions of counter-example-guided\n abstraction refinement. The key idea is to ``lazily'' refine\n the abstraction of S-shaped functions to exclude spurious\n counter-examples found in the previous abstraction, thus\n guaranteeing progress in the verification process while\n keeping the state-space small. For networks with sigmoid\n activations, we show that our technique outperforms\n state-of-the-art verifiers on certifying robustness against\n both canonical adversarial perturbations and numerous\n real-world distribution shifts. Furthermore, experiments on\n the MNIST and CIFAR-10 datasets show that\n distribution-shift-aware algorithms have significantly higher\n certified robustness against distribution shifts.",\n}\n\n
\n
\n\n\n
\n We consider the problem of certifying the robustness of deep neural networks against real-world distribution shifts. To do so, we bridge the gap between hand-crafted specifications and realistic deployment settings by considering a neural-symbolic verification framework in which generative models are trained to learn perturbations from data and specifications are defined with respect to the output of these learned models. A pervasive challenge arising from this setting is that although S-shaped activations (e.g., sigmoid, tanh) are common in the last layer of deep generative models, existing verifiers cannot tightly approximate S-shaped activations. To address this challenge, we propose a general meta-algorithm for handling S-shaped activations which leverages classical notions of counter-example-guided abstraction refinement. The key idea is to ``lazily'' refine the abstraction of S-shaped functions to exclude spurious counter-examples found in the previous abstraction, thus guaranteeing progress in the verification process while keeping the state-space small. For networks with sigmoid activations, we show that our technique outperforms state-of-the-art verifiers on certifying robustness against both canonical adversarial perturbations and numerous real-world distribution shifts. Furthermore, experiments on the MNIST and CIFAR-10 datasets show that distribution-shift-aware algorithms have significantly higher certified robustness against distribution shifts.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Identifying and Mitigating the Security Risks of Generative AI.\n \n \n \n\n\n \n Barrett, C.; Boyd, B.; Burzstein, E.; Carlini, N.; Chen, B.; Choi, J.; Chowdhury, A. R.; Christodorescu, M.; Datta, A.; Feizi, S.; Fisher, K.; Hashimoto, T.; Hendrycks, D.; Jha, S.; Kang, D.; Kerschbaum, F.; Mitchell, E.; Mitchell, J.; Ramzan, Z.; Shams, K.; Song, D.; Taly, A.; and Yang, D.\n\n\n \n\n\n\n
arXiv e-prints,arXiv:2308.14840. August 2023.\n
\n\n
\n\n
\n\n
\n\n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@ARTICLE{2023arXiv230814840B,\n author = {{Barrett}, Clark and {Boyd}, Brad and {Burzstein}, Ellie and {Carlini}, Nicholas and {Chen}, Brad and {Choi}, Jihye and {Chowdhury}, Amrita Roy and {Christodorescu}, Mihai and {Datta}, Anupam and {Feizi}, Soheil and {Fisher}, Kathleen and {Hashimoto}, Tatsunori and {Hendrycks}, Dan and {Jha}, Somesh and {Kang}, Daniel and {Kerschbaum}, Florian and {Mitchell}, Eric and {Mitchell}, John and {Ramzan}, Zulfikar and {Shams}, Khawaja and {Song}, Dawn and {Taly}, Ankur and {Yang}, Diyi},\n title = {Identifying and Mitigating the Security Risks of Generative AI},\n journal = {arXiv e-prints},\n keywords = {Computer Science - Artificial Intelligence},\n year = 2023,\n month = aug,\n eid = {arXiv:2308.14840},\n pages = {arXiv:2308.14840},\n doi = {10.48550/arXiv.2308.14840},\narchivePrefix = {arXiv},\n eprint = {2308.14840},\n primaryClass = {cs.AI},\n adsurl = {https://ui.adsabs.harvard.edu/abs/2023arXiv230814840B},\n adsnote = {Provided by the SAO/NASA Astrophysics Data System}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Neural city maps for GNSS nlos prediction.\n \n \n \n\n\n \n Neamati, D.; Gupta, S.; Partha, M.; and Gao, G.\n\n\n \n\n\n\n In
Proceedings of the 36th International Technical Meeting of the Satellite Division of The Institute of Navigation (ION GNSS+ 2023), pages 2073–2087, 2023. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{neamati2023neural,\n title={Neural city maps for GNSS nlos prediction},\n author={Neamati, Daniel and Gupta, Shubh and Partha, Mira and Gao, Grace},\n booktitle={Proceedings of the 36th International Technical Meeting of the Satellite Division of The Institute of Navigation (ION GNSS+ 2023)},\n pages={2073--2087},\n year={2023}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Safeguarding Learning-Based Planners Under Motion and Sensing Uncertainties Using Reachability Analysis.\n \n \n \n\n\n \n Shetty, A.; Dai, A.; Tzikas, A.; and Gao, G.\n\n\n \n\n\n\n In
2023 IEEE International Conference on Robotics and Automation (ICRA), pages 7872–7878, 2023. IEEE\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{shetty2023safeguarding,\n title={Safeguarding Learning-Based Planners Under Motion and Sensing Uncertainties Using Reachability Analysis},\n author={Shetty, Akshay and Dai, Adam and Tzikas, Alexandros and Gao, Grace},\n booktitle={2023 IEEE International Conference on Robotics and Automation (ICRA)},\n pages={7872--7878},\n year={2023},\n organization={IEEE}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Bounding GPS-Based Positioning and Navigation Uncertainty for Autonomous Drifting via Reachability.\n \n \n \n\n\n \n Wu, A.; Mohanty, A.; Zaman, A.; and Gao, G.\n\n\n \n\n\n\n In
Proceedings of the 36th International Technical Meeting of the Satellite Division of The Institute of Navigation (ION GNSS+ 2023), pages 712–726, 2023. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{wu2023bounding,\n title={Bounding GPS-Based Positioning and Navigation Uncertainty for Autonomous Drifting via Reachability},\n author={Wu, Asta and Mohanty, Adyasha and Zaman, Anonto and Gao, Grace},\n booktitle={Proceedings of the 36th International Technical Meeting of the Satellite Division of The Institute of Navigation (ION GNSS+ 2023)},\n pages={712--726},\n year={2023}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Risk-aware autonomous localization in harsh urban environments with mosaic zonotope shadow matching.\n \n \n \n \n\n\n \n Neamati, D.; Bhamidipati, S.; and Gao, G.\n\n\n \n\n\n\n
Artificial Intelligence,104000. 2023.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{neamati2023risk,\ntitle = {Risk-aware autonomous localization in harsh urban environments with mosaic zonotope shadow matching},\njournal = {Artificial Intelligence},\npages = {104000},\nyear = {2023},\nissn = {0004-3702},\ndoi = {https://doi.org/10.1016/j.artint.2023.104000},\nurl = {https://www.sciencedirect.com/science/article/pii/S0004370223001467},\nauthor = {Daniel Neamati and Sriramya Bhamidipati and Grace Gao},\nkeywords = {GNSS, Shadow matching, Set-based, Risk-aware localization}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Discovering Closed-Loop Failures of Vision-Based Controllers Via Reachability Analysis.\n \n \n \n\n\n \n Chakraborty, K.; and Bansal, S.\n\n\n \n\n\n\n
IEEE Robotics and Automation Letters. 2023.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{chakraborty2023discovering,\n title={Discovering Closed-Loop Failures of Vision-Based Controllers Via Reachability Analysis},\n author={Chakraborty, Kaustav and Bansal, Somil},\n journal={IEEE Robotics and Automation Letters},\n year={2023},\n publisher={IEEE}\n}
\n
\n\n\n\n
\n\n\n\n\n\n