var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=aisafety.stanford.edu%2Fbib%2Fselected-pubs.bib&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=aisafety.stanford.edu%2Fbib%2Fselected-pubs.bib&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=aisafety.stanford.edu%2Fbib%2Fselected-pubs.bib&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (11)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Splat-Nav: Safe Real-Time Robot Navigation in Gaussian Splatting Maps.\n \n \n \n\n\n \n Chen, T.; Shorinwa, O.; Zeng, W.; Bruno, J.; Dames, P.; and Schwager, M.\n\n\n \n\n\n\n arXiv preprint arXiv:2403.02751. 2024.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{chen2024splat,\n\tauthor = {Chen, Timothy and Shorinwa, Ola and Zeng, Weijia and Bruno, Joseph and Dames, Philip and Schwager, Mac},\n\tdate-added = {2024-05-27 12:30:04 -0700},\n\tdate-modified = {2024-05-27 12:30:04 -0700},\n\tjournal = {arXiv preprint arXiv:2403.02751},\n\ttitle = {Splat-Nav: Safe Real-Time Robot Navigation in Gaussian Splatting Maps},\n\tyear = {2024}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n State Estimation and Belief Space Planning Under Epistemic Uncertainty for Learning-Based Perception Systems.\n \n \n \n\n\n \n Nagami, K.; and Schwager, M.\n\n\n \n\n\n\n IEEE Robotics and Automation Letters, 9(6): 5118-5125. 2024.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Nagami2024state,\n\tauthor = {Nagami, Keiko and Schwager, Mac},\n\tdate-added = {2024-05-27 12:30:45 -0700},\n\tdate-modified = {2024-05-27 12:31:46 -0700},\n\tdoi = {10.1109/LRA.2024.3387139},\n\tjournal = {IEEE Robotics and Automation Letters},\n\tnumber = {6},\n\tpages = {5118-5125},\n\ttitle = {State Estimation and Belief Space Planning Under Epistemic Uncertainty for Learning-Based Perception Systems},\n\tvolume = {9},\n\tyear = {2024}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ConstrainedZero: Chance-constrained POMDP planning using learned probabilistic failure surrogates and adaptive safety constraints.\n \n \n \n \n\n\n \n Moss, R. J.; Jamgochian, A.; Fischer, J.; Corso, A.; and Kochenderfer, M. J.\n\n\n \n\n\n\n In International Joint Conference on Artificial Intelligence (IJCAI), 2024. \n \n\n\n\n
\n\n\n\n \n \n \"ConstrainedZero:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Moss2024ijcai,\n  author    = {Robert J. Moss and Arec Jamgochian and Johannes Fischer and Anthony Corso and Mykel J. Kochenderfer},\n  booktitle = ijcai,\n  title     = {Constrained{Z}ero: {C}hance-constrained {POMDP} planning using learned probabilistic failure surrogates and adaptive safety constraints},\n  year      = {2024},\n  url       = {https://arxiv.org/abs/2405.00644},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Trajectory optimization for adaptive informative path planning with multimodal sensing.\n \n \n \n \n\n\n \n Ott, J.; Balaban, E.; and Kochenderfer, M. J.\n\n\n \n\n\n\n In codit, 2024. \n \n\n\n\n
\n\n\n\n \n \n \"TrajectoryPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Ott2024ipp,\n  author    = {Joshua Ott and Edward Balaban and Mykel J. Kochenderfer},\n  booktitle = codit,\n  title     = {Trajectory optimization for adaptive informative path planning with multimodal sensing},\n  year      = {2024},\n  url       = {https://arxiv.org/abs/2404.18374},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Risk-aware meta-level decision making for exploration under uncertainty.\n \n \n \n \n\n\n \n Ott, J.; Kim, S.; Bouman, A.; Peltzer, O.; Sobue, M.; Delecki, H.; Kochenderfer, M. J.; Burdick, J.; and Agha-mohammadi, A.\n\n\n \n\n\n\n In codit, 2024. \n \n\n\n\n
\n\n\n\n \n \n \"Risk-awarePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Ott2024meta,\n  author    = {Joshua Ott and Sung-Kyun Kim and Amanda Bouman and Oriana Peltzer and Mamoru Sobue and Harrison Delecki and Mykel J. Kochenderfer and Joel Burdick and Ali-akbar Agha-mohammadi},\n  booktitle = codit,\n  title     = {Risk-aware meta-level decision making for exploration under uncertainty},\n  year      = {2024},\n  url       = {https://arxiv.org/abs/2209.05580},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Disentangled Neural Relational Inference for Interpretable Motion Prediction.\n \n \n \n \n\n\n \n Dax, V. M.; Li, J.; Sachdeva, E.; Agarwal, N.; and Kochenderfer, M. J.\n\n\n \n\n\n\n IEEE Robotics and Automation Letters, 9(2): 1452–1459. 2024.\n \n\n\n\n
\n\n\n\n \n \n \"DisentangledPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{Dax2024,\n  author  = {Dax, Victoria M. and Li, Jiachen and Sachdeva, Enna and Agarwal, Nakul and Kochenderfer, Mykel J.},\n  journal = {IEEE Robotics and Automation Letters},\n  title   = {Disentangled Neural Relational Inference for Interpretable Motion Prediction},\n  year    = {2024},\n  number  = {2},\n  pages   = {1452--1459},\n  volume  = {9},\n  doi     = {10.1109/lra.2023.3342554},\n  url     = {https://arxiv.org/abs/2401.03599},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Marabou 2.0: A Versatile Formal Analyzer of Neural Networks.\n \n \n \n \n\n\n \n Wu, H.; Isac, O.; Zeljić, A.; Tagomori, T.; Daggitt, M.; Kokke, W.; Refaeli, I.; Amir, G.; Julian, K.; Bassan, S.; Huang, P.; Lahav, O.; Wu, M.; Zhang, M.; Komendantskaya, E.; Katz, G.; and Barrett, C.\n\n\n \n\n\n\n In Gurfinkel, A.; and Ganesh, V., editor(s), Proceedings of the $36^{th}$ International Conference on Computer Aided Verification (CAV '24), of Lecture Notes in Computer Science, July 2024. Springer\n Montreal, Canada\n\n\n\n
\n\n\n\n \n \n \"MarabouPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{WIZ+24,\n  url       = "https://arxiv.org/abs/2401.14461",\n  author    = "Haoze Wu and Omri Isac and Aleksandar Zelji{\\'{c}} and Teruhiro Tagomori and Matthew Daggitt and Wen Kokke and Idan Refaeli and Guy Amir and Kyle Julian and Shahaf Bassan and Pei Huang and Ori Lahav and Min Wu and Min Zhang and Ekaterina Komendantskaya and Guy Katz and Clark Barrett",\n  title     = "Marabou 2.0: A Versatile Formal Analyzer of Neural Networks", \n  booktitle = "Proceedings of the $36^{th}$ International Conference on Computer Aided Verification (CAV '24)",\n  series    = "Lecture Notes in Computer Science",\n  publisher = "Springer",\n  editor    = "Gurfinkel, Arie and Ganesh, Vijay",\n  month     = jul,\n  year      = 2024,\n  note      = "Montreal, Canada",\n  category  = "Conference Publications",\n  abstract  = "This paper serves as a comprehensive system description of\n                  version 2.0 of the Marabou framework for formal analysis of\n                  neural networks. We discuss the tool's architectural design\n                  and highlight the major features and components introduced\n                  since its initial release."\n}\n\n
\n
\n\n\n
\n This paper serves as a comprehensive system description of version 2.0 of the Marabou framework for formal analysis of neural networks. We discuss the tool's architectural design and highlight the major features and components introduced since its initial release.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Risks from Language Models for Automated Mental Healthcare: Ethics and Structure for Implementation.\n \n \n \n\n\n \n Grabb, D.; Lamparth, M.; and Vasan, N.\n\n\n \n\n\n\n medRxiv,2024–04. 2024.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{grabb2024risks,\n  title={Risks from Language Models for Automated Mental Healthcare: Ethics and Structure for Implementation},\n  author={Grabb, Declan and Lamparth, Max and Vasan, Nina},\n  journal={medRxiv},\n  pages={2024--04},\n  year={2024},\n  publisher={Cold Spring Harbor Laboratory Press}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Escalation risks from language models in military and diplomatic decision-making.\n \n \n \n\n\n \n Rivera, J.; Mukobi, G.; Reuel, A.; Lamparth, M.; Smith, C.; and Schneider, J.\n\n\n \n\n\n\n In The 2024 ACM Conference on Fairness, Accountability, and Transparency, pages 836–898, 2024. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{rivera2024escalation,\n  title={Escalation risks from language models in military and diplomatic decision-making},\n  author={Rivera, Juan-Pablo and Mukobi, Gabriel and Reuel, Anka and Lamparth, Max and Smith, Chandler and Schneider, Jacquelyn},\n  booktitle={The 2024 ACM Conference on Fairness, Accountability, and Transparency},\n  pages={836--898},\n  year={2024}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Real-Time Anomaly Detection and Reactive Planning with Large Language Models.\n \n \n \n\n\n \n Sinha, R.; Elhafsi, A.; Agia, C.; Foutter, M.; Schmerling, E.; and Pavone, M.\n\n\n \n\n\n\n In Robotics: Science and Systems Conference, 2024. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{sinha2024real,\n  title={Real-Time Anomaly Detection and Reactive Planning with Large Language Models},\n  author={Sinha, Rohan and Elhafsi, Amine and Agia, Christopher and Foutter, Matthew and Schmerling, Edward and Pavone, Marco},\n  booktitle={Robotics: Science and Systems Conference},\n  year={2024}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Unpacking Failure Modes of Generative Policies: Runtime Monitoring of Consistency and Progress.\n \n \n \n\n\n \n Agia, C.; Sinha, R.; Yang, J.; Cao, Z.; Antonova, R.; Pavone, M.; and Bohg, J.\n\n\n \n\n\n\n In Robotics: Science and Systems Conference, 2024. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{agia2024unpacking,\n  title={Unpacking Failure Modes of Generative Policies: Runtime Monitoring of Consistency and Progress},\n  author={Agia, Christopher and Sinha, Rohan and Yang, Jingyun and Cao, Ziang and Antonova, Rika and Pavone, Marco and Bohg, Jeannette},\n  booktitle={Robotics: Science and Systems Conference},\n  year={2024}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (12)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Foundation Models in Robotics: Applications, Challenges, and the Future.\n \n \n \n\n\n \n Firoozi, R.; Tucker, J.; Tian, S.; Majumdar, A.; Sun, J.; Liu, W.; Zhu, Y.; Song, S.; Kapoor, A.; Hausman, K.; Ichter, B.; Driess, D.; Wu, J.; Lu, C.; and Schwager, M.\n\n\n \n\n\n\n 2023.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{firoozi2023foundation,\n\tarchiveprefix = {arXiv},\n\tauthor = {Roya Firoozi and Johnathan Tucker and Stephen Tian and Anirudha Majumdar and Jiankai Sun and Weiyu Liu and Yuke Zhu and Shuran Song and Ashish Kapoor and Karol Hausman and Brian Ichter and Danny Driess and Jiajun Wu and Cewu Lu and Mac Schwager},\n\tdate-added = {2024-05-27 14:23:55 -0700},\n\tdate-modified = {2024-05-27 14:23:55 -0700},\n\teprint = {2312.07843},\n\tprimaryclass = {cs.RO},\n\ttitle = {Foundation Models in Robotics: Applications, Challenges, and the Future},\n\tyear = {2023}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Holistic Assessment of the Reliability of Machine Learning Systems.\n \n \n \n\n\n \n Corso, A.; Karamadian, D.; Valentin, R.; Cooper, M.; and Kochenderfer, M. J.\n\n\n \n\n\n\n arxiv. 2023.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{Corso2023,\n  author  = {Anthony Corso and David Karamadian and Romeo Valentin and Mary Cooper and Mykel J. Kochenderfer},\n  journal = arxiv,\n  title   = {A Holistic Assessment of the Reliability of Machine Learning Systems},\n  year    = {2023},\n  doi     = {10.48550/ARXIV.2307.10586},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Model-based Validation as Probabilistic Inference.\n \n \n \n\n\n \n Delecki, H.; Corso, A.; and Kochenderfer, M. J.\n\n\n \n\n\n\n In Conference on Learning for Dynamics and Control (L4DC), 2023. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Delecki2023,\n  author    = {Delecki, Harrison and Corso, Anthony and Kochenderfer, Mykel J.},\n  booktitle = {Conference on Learning for Dynamics and Control (L4DC)},\n  title     = {Model-based Validation as Probabilistic Inference},\n  year      = {2023},\n  doi       = {10.48550/ARXIV.2305.09930},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Holistic Assessment of the Reliability of Machine Learning Systems.\n \n \n \n\n\n \n Corso, A.; Karamadian, D.; Valentin, R.; Cooper, M.; and Kochenderfer, M. J.\n\n\n \n\n\n\n arXiv e-prints,arXiv:2307.10586. July 2023.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@ARTICLE{2023arXiv230710586C,\n       author = {{Corso}, Anthony and {Karamadian}, David and {Valentin}, Romeo and {Cooper}, Mary and {Kochenderfer}, Mykel J.},\n        title = {A Holistic Assessment of the Reliability of Machine Learning Systems},\n      journal = {arXiv e-prints},\n     keywords = {Computer Science - Machine Learning},\n         year = 2023,\n        month = jul,\n          eid = {arXiv:2307.10586},\n        pages = {arXiv:2307.10586},\n          doi = {10.48550/arXiv.2307.10586},\narchivePrefix = {arXiv},\n       eprint = {2307.10586},\n primaryClass = {cs.LG},\n       adsurl = {https://ui.adsabs.harvard.edu/abs/2023arXiv230710586C},\n      adsnote = {Provided by the SAO/NASA Astrophysics Data System}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n VeriX: Towards Verified Explainability of Deep Neural Networks.\n \n \n \n \n\n\n \n Wu, M.; Wu, H.; and Barrett, C.\n\n\n \n\n\n\n In Oh, A.; Neumann, T.; Globerson, A.; Saenko, K.; Hardt, M.; and Levine, S., editor(s), Advances in Neural Information Processing Systems 36 (NeurIPS 2023), volume 36, pages 22247–22268, 2023. Curran Associates, Inc.\n \n\n\n\n
\n\n\n\n \n \n \"VeriX:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{WWB23,\n  url       = "https://proceedings.neurips.cc/paper_files/paper/2023/file/46907c2ff9fafd618095161d76461842-Paper-Conference.pdf",\n  author    = "Min Wu and Haoze Wu and Clark Barrett",\n  title     = "VeriX: Towards Verified Explainability of Deep Neural Networks",\n  booktitle = "Advances in Neural Information Processing Systems 36 (NeurIPS 2023)",\n  editor    = "A. Oh and T. Neumann and A. Globerson and K. Saenko and M. Hardt and S. Levine",\n  publisher = "Curran Associates, Inc.",\n  pages     = "22247--22268",\n  volume    = 36,\n  mon       = dec,\n  year      = 2023,\n  category  = "Conference Publications",\n  abstract  = "We present VeriX (Verified eXplainability), a system for\n                  producing optimal robust explanations and generating\n                  counterfactuals along decision boundaries of machine learning\n                  models. We build such explanations and counterfactuals\n                  iteratively using constraint solving techniques and a\n                  heuristic based on feature-level sensitivity ranking. We\n                  evaluate our method on image recognition benchmarks and a\n                  real-world scenario of autonomous aircraft taxiing."\n}\n\n
\n
\n\n\n
\n We present VeriX (Verified eXplainability), a system for producing optimal robust explanations and generating counterfactuals along decision boundaries of machine learning models. We build such explanations and counterfactuals iteratively using constraint solving techniques and a heuristic based on feature-level sensitivity ranking. We evaluate our method on image recognition benchmarks and a real-world scenario of autonomous aircraft taxiing.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n DNN Verification, Reachability, and the Exponential Function Problem.\n \n \n \n \n\n\n \n Isac, O.; Zohar, Y.; Barrett, C.; and Katz, G.\n\n\n \n\n\n\n In Pérez, G. A.; and Raskin, J., editor(s), $34^{th}$ International Conference on Concurrency Theory (CONCUR '23), volume 279, of Leibniz International Proceedings in Informatics (LIPIcs), pages 26:1–26:18, Dagstuhl, Germany, September 2023. Schloss Dagstuhl – Leibniz-Zentrum für Informatik\n Antwerp, Belgium\n\n\n\n
\n\n\n\n \n \n \"DNNPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{IZB+23,\n  url       = "https://drops.dagstuhl.de/opus/volltexte/2023/19020/",\n  author    = "Isac, Omri and Zohar, Yoni and Barrett, Clark and Katz, Guy",\n  title     = "{DNN} Verification, Reachability, and the Exponential Function Problem",\n  booktitle = "$34^{th}$ International Conference on Concurrency Theory (CONCUR '23)",\n  pages     = "26:1--26:18",\n  series    = "Leibniz International Proceedings in Informatics (LIPIcs)",\n  ISBN      = "978-3-95977-299-0",\n  ISSN      = "1868-8969",\n  month     = sep,\n  year      = 2023,\n  volume    = 279,\n  editor    = "P\\'{e}rez, Guillermo A. and Raskin, Jean-Fran\\c{c}ois",\n  publisher = "Schloss Dagstuhl -- Leibniz-Zentrum f{\\"u}r Informatik",\n  address   = "Dagstuhl, Germany",\n  doi       = "10.4230/LIPIcs.CONCUR.2023.26",\n  note      = "Antwerp, Belgium",\n  category  = "Conference Publications",\n  abstract  = "Deep neural networks (DNNs) are increasingly being deployed to\n                  perform safety-critical tasks. The opacity of DNNs, which\n                  prevents humans from reasoning about them, presents new\n                  safety and security challenges. To address these challenges,\n                  the verification community has begun developing techniques\n                  for rigorously analyzing DNNs, with numerous verification\n                  algorithms proposed in recent years. While a significant\n                  amount of work has gone into developing these verification\n                  algorithms, little work has been devoted to rigorously\n                  studying the computability and complexity of the underlying\n                  theoretical problems. Here, we seek to contribute to the\n                  bridging of this gap. We focus on two kinds of DNNs: those\n                  that employ piecewise-linear activation functions (e.g.,\n                  ReLU), and those that employ piecewise-smooth activation\n                  functions (e.g., Sigmoids). We prove the two following\n                  theorems: (i) the decidability of verifying DNNs with a\n                  particular set of piecewise-smooth activation functions,\n                  including Sigmoid and tanh, is equivalent to a well-known,\n                  open problem formulated by Tarski; and (ii) the DNN\n                  verification problem for any quantifier-free linear\n                  arithmetic specification can be reduced to the DNN\n                  reachability problem, whose approximation is\n                  NP-complete. These results answer two fundamental questions\n                  about the computability and complexity of DNN verification,\n                  and the ways it is affected by the network’s activation\n                  functions and error tolerance; and could help guide future\n                  efforts in developing DNN verification tools.",\n}\n\n\n
\n
\n\n\n
\n Deep neural networks (DNNs) are increasingly being deployed to perform safety-critical tasks. The opacity of DNNs, which prevents humans from reasoning about them, presents new safety and security challenges. To address these challenges, the verification community has begun developing techniques for rigorously analyzing DNNs, with numerous verification algorithms proposed in recent years. While a significant amount of work has gone into developing these verification algorithms, little work has been devoted to rigorously studying the computability and complexity of the underlying theoretical problems. Here, we seek to contribute to the bridging of this gap. We focus on two kinds of DNNs: those that employ piecewise-linear activation functions (e.g., ReLU), and those that employ piecewise-smooth activation functions (e.g., Sigmoids). We prove the two following theorems: (i) the decidability of verifying DNNs with a particular set of piecewise-smooth activation functions, including Sigmoid and tanh, is equivalent to a well-known, open problem formulated by Tarski; and (ii) the DNN verification problem for any quantifier-free linear arithmetic specification can be reduced to the DNN reachability problem, whose approximation is NP-complete. These results answer two fundamental questions about the computability and complexity of DNN verification, and the ways it is affected by the network’s activation functions and error tolerance; and could help guide future efforts in developing DNN verification tools.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Toward Certified Robustness Against Real-World Distribution Shifts.\n \n \n \n \n\n\n \n Wu, H.; Tagomori, T.; Robey, A.; Yang, F.; Matni, N.; Pappas, G.; Hassani, H.; Păsăreanu, C.; and Barrett, C.\n\n\n \n\n\n\n In McDaniel, P.; and Papernot, N., editor(s), Proceedings of the 2023 IEEE Conference on Secure and Trustworthy Machine Learning (SaTML), pages 537–553, February 2023. IEEE\n Raleigh, NC\n\n\n\n
\n\n\n\n \n \n \"TowardPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{WTR+23,\n  url       = "http://theory.stanford.edu/~barrett/pubs/WTR+23.pdf",\n  author    = "Haoze Wu and Teruhiro Tagomori and Alexander Robey and Fengjun Yang and Nikolai Matni and George Pappas and Hamed Hassani and Corina P{\\u{a}}s{\\u{a}}reanu and Clark Barrett",\n  title     = "Toward Certified Robustness Against Real-World Distribution Shifts",\n  booktitle = "Proceedings of the 2023 IEEE Conference on Secure and Trustworthy Machine Learning (SaTML)",\n  publisher = "IEEE",\n  editor    = "Patrick McDaniel and Nicolas Papernot",\n  month     = feb,\n  pages     = "537--553",\n  doi       = "10.1109/SaTML54575.2023.00042",\n  year      = 2023,\n  note      = "Raleigh, NC",\n  category  = "Conference Publications",\n  abstract  = "We consider the problem of certifying the robustness of deep\n                  neural networks against real-world distribution shifts. To do\n                  so, we bridge the gap between hand-crafted specifications and\n                  realistic deployment settings by considering a\n                  neural-symbolic verification framework in which generative\n                  models are trained to learn perturbations from data and\n                  specifications are defined with respect to the output of\n                  these learned models. A pervasive challenge arising from this\n                  setting is that although S-shaped activations (e.g., sigmoid,\n                  tanh) are common in the last layer of deep generative models,\n                  existing verifiers cannot tightly approximate S-shaped\n                  activations. To address this challenge, we propose a general\n                  meta-algorithm for handling S-shaped activations which\n                  leverages classical notions of counter-example-guided\n                  abstraction refinement. The key idea is to ``lazily'' refine\n                  the abstraction of S-shaped functions to exclude spurious\n                  counter-examples found in the previous abstraction, thus\n                  guaranteeing progress in the verification process while\n                  keeping the state-space small. For networks with sigmoid\n                  activations, we show that our technique outperforms\n                  state-of-the-art verifiers on certifying robustness against\n                  both canonical adversarial perturbations and numerous\n                  real-world distribution shifts. Furthermore, experiments on\n                  the MNIST and CIFAR-10 datasets show that\n                  distribution-shift-aware algorithms have significantly higher\n                  certified robustness against distribution shifts.",\n}\n\n
\n
\n\n\n
\n We consider the problem of certifying the robustness of deep neural networks against real-world distribution shifts. To do so, we bridge the gap between hand-crafted specifications and realistic deployment settings by considering a neural-symbolic verification framework in which generative models are trained to learn perturbations from data and specifications are defined with respect to the output of these learned models. A pervasive challenge arising from this setting is that although S-shaped activations (e.g., sigmoid, tanh) are common in the last layer of deep generative models, existing verifiers cannot tightly approximate S-shaped activations. To address this challenge, we propose a general meta-algorithm for handling S-shaped activations which leverages classical notions of counter-example-guided abstraction refinement. The key idea is to ``lazily'' refine the abstraction of S-shaped functions to exclude spurious counter-examples found in the previous abstraction, thus guaranteeing progress in the verification process while keeping the state-space small. For networks with sigmoid activations, we show that our technique outperforms state-of-the-art verifiers on certifying robustness against both canonical adversarial perturbations and numerous real-world distribution shifts. Furthermore, experiments on the MNIST and CIFAR-10 datasets show that distribution-shift-aware algorithms have significantly higher certified robustness against distribution shifts.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Identifying and Mitigating the Security Risks of Generative AI.\n \n \n \n\n\n \n Barrett, C.; Boyd, B.; Burzstein, E.; Carlini, N.; Chen, B.; Choi, J.; Chowdhury, A. R.; Christodorescu, M.; Datta, A.; Feizi, S.; Fisher, K.; Hashimoto, T.; Hendrycks, D.; Jha, S.; Kang, D.; Kerschbaum, F.; Mitchell, E.; Mitchell, J.; Ramzan, Z.; Shams, K.; Song, D.; Taly, A.; and Yang, D.\n\n\n \n\n\n\n arXiv e-prints,arXiv:2308.14840. August 2023.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@ARTICLE{2023arXiv230814840B,\n       author = {{Barrett}, Clark and {Boyd}, Brad and {Burzstein}, Ellie and {Carlini}, Nicholas and {Chen}, Brad and {Choi}, Jihye and {Chowdhury}, Amrita Roy and {Christodorescu}, Mihai and {Datta}, Anupam and {Feizi}, Soheil and {Fisher}, Kathleen and {Hashimoto}, Tatsunori and {Hendrycks}, Dan and {Jha}, Somesh and {Kang}, Daniel and {Kerschbaum}, Florian and {Mitchell}, Eric and {Mitchell}, John and {Ramzan}, Zulfikar and {Shams}, Khawaja and {Song}, Dawn and {Taly}, Ankur and {Yang}, Diyi},\n        title = {Identifying and Mitigating the Security Risks of Generative AI},\n      journal = {arXiv e-prints},\n     keywords = {Computer Science - Artificial Intelligence},\n         year = 2023,\n        month = aug,\n          eid = {arXiv:2308.14840},\n        pages = {arXiv:2308.14840},\n          doi = {10.48550/arXiv.2308.14840},\narchivePrefix = {arXiv},\n       eprint = {2308.14840},\n primaryClass = {cs.AI},\n       adsurl = {https://ui.adsabs.harvard.edu/abs/2023arXiv230814840B},\n      adsnote = {Provided by the SAO/NASA Astrophysics Data System}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Neural city maps for GNSS nlos prediction.\n \n \n \n\n\n \n Neamati, D.; Gupta, S.; Partha, M.; and Gao, G.\n\n\n \n\n\n\n In Proceedings of the 36th International Technical Meeting of the Satellite Division of The Institute of Navigation (ION GNSS+ 2023), pages 2073–2087, 2023. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{neamati2023neural,\n  title={Neural city maps for GNSS nlos prediction},\n  author={Neamati, Daniel and Gupta, Shubh and Partha, Mira and Gao, Grace},\n  booktitle={Proceedings of the 36th International Technical Meeting of the Satellite Division of The Institute of Navigation (ION GNSS+ 2023)},\n  pages={2073--2087},\n  year={2023}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Safeguarding Learning-Based Planners Under Motion and Sensing Uncertainties Using Reachability Analysis.\n \n \n \n\n\n \n Shetty, A.; Dai, A.; Tzikas, A.; and Gao, G.\n\n\n \n\n\n\n In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 7872–7878, 2023. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{shetty2023safeguarding,\n  title={Safeguarding Learning-Based Planners Under Motion and Sensing Uncertainties Using Reachability Analysis},\n  author={Shetty, Akshay and Dai, Adam and Tzikas, Alexandros and Gao, Grace},\n  booktitle={2023 IEEE International Conference on Robotics and Automation (ICRA)},\n  pages={7872--7878},\n  year={2023},\n  organization={IEEE}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Bounding GPS-Based Positioning and Navigation Uncertainty for Autonomous Drifting via Reachability.\n \n \n \n\n\n \n Wu, A.; Mohanty, A.; Zaman, A.; and Gao, G.\n\n\n \n\n\n\n In Proceedings of the 36th International Technical Meeting of the Satellite Division of The Institute of Navigation (ION GNSS+ 2023), pages 712–726, 2023. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{wu2023bounding,\n  title={Bounding GPS-Based Positioning and Navigation Uncertainty for Autonomous Drifting via Reachability},\n  author={Wu, Asta and Mohanty, Adyasha and Zaman, Anonto and Gao, Grace},\n  booktitle={Proceedings of the 36th International Technical Meeting of the Satellite Division of The Institute of Navigation (ION GNSS+ 2023)},\n  pages={712--726},\n  year={2023}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Risk-aware autonomous localization in harsh urban environments with mosaic zonotope shadow matching.\n \n \n \n \n\n\n \n Neamati, D.; Bhamidipati, S.; and Gao, G.\n\n\n \n\n\n\n Artificial Intelligence,104000. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Risk-awarePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{neamati2023risk,\ntitle = {Risk-aware autonomous localization in harsh urban environments with mosaic zonotope shadow matching},\njournal = {Artificial Intelligence},\npages = {104000},\nyear = {2023},\nissn = {0004-3702},\ndoi = {https://doi.org/10.1016/j.artint.2023.104000},\nurl = {https://www.sciencedirect.com/science/article/pii/S0004370223001467},\nauthor = {Daniel Neamati and Sriramya Bhamidipati and Grace Gao},\nkeywords = {GNSS, Shadow matching, Set-based, Risk-aware localization}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Verification of image-based neural network controllers using generative models.\n \n \n \n \n\n\n \n Katz, S. M.; Corso, A. L.; Strong, C. A.; and Kochenderfer, M. J.\n\n\n \n\n\n\n Journal of Aerospace Information Systems. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"VerificationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{Katz2022,\nauthor = {Sydney M. Katz and Anthony L. Corso and Christopher A. Strong and Mykel J. Kochenderfer},\njournal = jais,\ntitle = {Verification of image-based neural network controllers using generative models},\nyear = {2022},\ndoi = {10.2514/1.I011071},\nurl = {https://arxiv.org/abs/2105.07091},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Risk-driven design of perception systems.\n \n \n \n \n\n\n \n Corso, A.; Katz, S. M.; Innes, C. A; Du, X.; Ramamoorthy, S.; and Kochenderfer, M. J.\n\n\n \n\n\n\n In Advances in Neural Information Processing Systems (NIPS), 2022. \n \n\n\n\n
\n\n\n\n \n \n \"Risk-drivenPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Corso2022,\nauthor = {Anthony Corso and Sydney Michelle Katz and Craig A Innes and Xin Du and Subramanian Ramamoorthy and Mykel J. Kochenderfer},\nbooktitle = nips,\ntitle = {Risk-driven design of perception systems},\nyear = {2022},\nurl = {https://arxiv.org/pdf/2205.10677.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Algorithms for Decision Making.\n \n \n \n \n\n\n \n Kochenderfer, M. J.; Wheeler, T. A.; and Wray, K. H.\n\n\n \n\n\n\n MIT Press, 2022.\n \n\n\n\n
\n\n\n\n \n \n \"AlgorithmsPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Book{Kochenderfer2022,\nauthor = {Mykel J. Kochenderfer and Tim A. Wheeler and Kyle H. Wray},\npublisher = {MIT Press},\ntitle = {Algorithms for Decision Making},\nyear = {2022},\nurl = {https://mitpress.mit.edu/9780262047012/algorithms-for-decision-making/},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adversarially Robust Models may not Transfer Better: Sufficient Conditions for Domain Transferability from the View of Regularization.\n \n \n \n \n\n\n \n Xu, X.; Zhang, J. Y; Ma, E.; Son, H. H.; Koyejo, S.; and Li, B.\n\n\n \n\n\n\n In Chaudhuri, K.; Jegelka, S.; Song, L.; Szepesvari, C.; Niu, G.; and Sabato, S., editor(s), Proceedings of the 39th International Conference on Machine Learning, volume 162, of Proceedings of Machine Learning Research, pages 24770–24802, 17–23 Jul 2022. PMLR\n \n\n\n\n
\n\n\n\n \n \n \"AdversariallyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{pmlr-v162-xu22n,\n  title = \t {Adversarially Robust Models may not Transfer Better: Sufficient Conditions for Domain Transferability from the View of Regularization},\n  author =       {Xu, Xiaojun and Zhang, Jacky Y and Ma, Evelyn and Son, Hyun Ho and Koyejo, Sanmi and Li, Bo},\n  booktitle = \t {Proceedings of the 39th International Conference on Machine Learning},\n  pages = \t {24770--24802},\n  year = \t {2022},\n  editor = \t {Chaudhuri, Kamalika and Jegelka, Stefanie and Song, Le and Szepesvari, Csaba and Niu, Gang and Sabato, Sivan},\n  volume = \t {162},\n  series = \t {Proceedings of Machine Learning Research},\n  month = \t {17--23 Jul},\n  publisher =    {PMLR},\n  pdf = \t {https://proceedings.mlr.press/v162/xu22n/xu22n.pdf},\n  url = \t {https://proceedings.mlr.press/v162/xu22n.html},\n  abstract = \t {Machine learning (ML) robustness and domain generalization are fundamentally correlated: they essentially concern data distribution shifts under adversarial and natural settings, respectively. On one hand, recent studies show that more robust (adversarially trained) models are more generalizable. On the other hand, there is a lack of theoretical understanding of their fundamental connections. In this paper, we explore the relationship between regularization and domain transferability considering different factors such as norm regularization and data augmentations (DA). We propose a general theoretical framework proving that factors involving the model function class regularization are sufficient conditions for relative domain transferability. Our analysis implies that “robustness" is neither necessary nor sufficient for transferability; rather, regularization is a more fundamental perspective for understanding domain transferability. We then discuss popular DA protocols (including adversarial training) and show when they can be viewed as the function class regularization under certain conditions and therefore improve generalization. We conduct extensive experiments to verify our theoretical findings and show several counterexamples where robustness and generalization are negatively correlated on different datasets.}\n}\n\n
\n
\n\n\n
\n Machine learning (ML) robustness and domain generalization are fundamentally correlated: they essentially concern data distribution shifts under adversarial and natural settings, respectively. On one hand, recent studies show that more robust (adversarially trained) models are more generalizable. On the other hand, there is a lack of theoretical understanding of their fundamental connections. In this paper, we explore the relationship between regularization and domain transferability considering different factors such as norm regularization and data augmentations (DA). We propose a general theoretical framework proving that factors involving the model function class regularization are sufficient conditions for relative domain transferability. Our analysis implies that “robustness\" is neither necessary nor sufficient for transferability; rather, regularization is a more fundamental perspective for understanding domain transferability. We then discuss popular DA protocols (including adversarial training) and show when they can be viewed as the function class regularization under certain conditions and therefore improve generalization. We conduct extensive experiments to verify our theoretical findings and show several counterexamples where robustness and generalization are negatively correlated on different datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient Neural Network Analysis with Sum-of-Infeasibilities.\n \n \n \n \n\n\n \n Wu, H.; Zeljić, A.; Katz, G.; and Barrett, C.\n\n\n \n\n\n\n In Fisman, D.; and Rosu, G., editor(s), International Conference on Tools and Algorithms for the Construction and Analysis of Systems (TACAS), volume 13243, of Lecture Notes in Computer Science, pages 143–163, April 2022. Springer\n \n\n\n\n
\n\n\n\n \n \n \"EfficientPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{WZK+22,\n   author = {Haoze Wu and Aleksandar Zelji{\\'c} and Guy Katz and Clark\n\tBarrett},\n   editor = {Dana Fisman and Grigore Rosu},\n   title = {Efficient Neural Network Analysis with Sum-of-Infeasibilities},\n   booktitle = tacas,\n   series = {Lecture Notes in Computer Science},\n   volume = {13243},\n   pages = {143--163},\n   publisher = {Springer},\n   month = apr,\n   year = {2022},\n   doi = {10.1007/978-3-030-99524-9_24},\n   url = {http://www.cs.stanford.edu/~barrett/pubs/WZK+22.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n A survey of algorithms for black-box safety validation of cyber-physical systems.\n \n \n \n\n\n \n Corso, A.; Moss, R. J.; Koren, M.; Lee, R.; and Kochenderfer, M. J.\n\n\n \n\n\n\n Journal of Artificial Intelligence Research, 72(2005.02979): 377–428. 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{Corso2021survey,\nauthor = {Anthony Corso and Robert J. Moss and Mark Koren and Ritchie Lee and Mykel J. Kochenderfer},\njournal = jair,\ntitle = {A survey of algorithms for black-box safety validation of cyber-physical systems},\nyear = {2021},\nnumber = {2005.02979},\npages = {377--428},\nvolume = {72},\ndoi = {10.1613/jair.1.12716},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Algorithms for verifying deep neural networks.\n \n \n \n \n\n\n \n Liu, C.; Arnon, T.; Lazarus, C.; Strong, C.; Barrett, C.; and Kochenderfer, M. J.\n\n\n \n\n\n\n Foundations and Trends in Optimization, 4(3–4): 244–404. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"AlgorithmsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{Liu2021,\nauthor = {Changliu Liu and Tomer Arnon and Christopher Lazarus and Christopher Strong and Clark Barrett and Mykel J. Kochenderfer},\njournal = {Foundations and Trends in Optimization},\ntitle = {Algorithms for verifying deep neural networks},\nyear = {2021},\nnumber = {3--4},\npages = {244--404},\nvolume = {4},\ndoi = {10.1561/2400000035},\nurl = {https://arxiv.org/abs/1903.06758},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Learning from Imperfect Demonstrations from Agents with Varying Dynamics.\n \n \n \n\n\n \n Cao, Z.; and Sadigh, D.\n\n\n \n\n\n\n IEEE Robotics and Automation Letters (RA-L). 2021.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{cao2021learning,\n title = {Learning from Imperfect Demonstrations from Agents with Varying Dynamics},\n author = {Cao, Zhangjie and Sadigh, Dorsa},\n journal = {IEEE Robotics and Automation Letters (RA-L)},\n year = {2021}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Directional primitives for uncertainty-aware motion estimation in urban environments.\n \n \n \n \n\n\n \n Senanayake, R.; Toyungyernsub, M.; Wang, M.; Kochenderfer, M. J.; and Schwager, M.\n\n\n \n\n\n\n In IEEE International Conference on Intelligent Transportation Systems (ITSC), 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DirectionalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Senanayake2020,\n\tauthor = {Ransalu Senanayake and Maneekwan Toyungyernsub and Mingyu Wang and Mykel J. Kochenderfer and Mac Schwager},\n\tbooktitle = itsc,\n\tdate-added = {2024-05-27 12:20:23 -0700},\n\tdate-modified = {2024-05-27 12:20:23 -0700},\n\tdoi = {10.1109/ITSC45102.2020.9294288},\n\ttitle = {Directional primitives for uncertainty-aware motion estimation in urban environments},\n\turl = {https://arxiv.org/abs/2007.00161},\n\tyear = {2020}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n When Humans Aren't Optimal: Robots that Collaborate with Risk-Aware Humans.\n \n \n \n\n\n \n Kwon, M.; Biyik, E.; Talati, A.; Bhasin, K.; Losey, D. P; and Sadigh, D.\n\n\n \n\n\n\n In Proceedings of the 2020 ACM/IEEE International Conference on Human-Robot Interaction, pages 43–52, 2020. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{kwon2020humans,\n  title={When Humans Aren't Optimal: Robots that Collaborate with Risk-Aware Humans},\n  author={Kwon, Minae and Biyik, Erdem and Talati, Aditi and Bhasin, Karan and Losey, Dylan P and Sadigh, Dorsa},\n  booktitle={Proceedings of the 2020 ACM/IEEE International Conference on Human-Robot Interaction},\n  pages={43--52},\n  year={2020}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);