\n \n \n
\n
\n\n \n \n \n \n \n Runtime Verification of the ARIAC competition: Can a robot be Agile and Safe at the same time?.\n \n \n \n\n\n \n Ferrando, A.; Kootbally, Z.; Piliptchak, P.; Cardoso, R. C.; Schlenoff, C.; and Fisher, M.\n\n\n \n\n\n\n In
AIRO, 2020. \n
[FAIR-SPACE, RAIN, ORCA]\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@InProceedings{FerrandoAIRO,\nauthor="Ferrando, Angelo\nand Kootbally, Zeid\nand Piliptchak, Pavel\nand Cardoso, Rafael C.\nand Schlenoff, Craig\nand Fisher, Michael",\ntitle="Runtime Verification of the ARIAC competition: Can a robot be Agile and Safe at the same time?",\nbooktitle="AIRO",\nyear="2020",\nnote = {[<span class="fs">FAIR-SPACE</span>, <span class="rain">RAIN</span>, <span class="orca">ORCA</span>]}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Multi-scale Verification of Distributed Synchronisation.\n \n \n \n\n\n \n Gainer, P.; Linker, S.; Dixon, C.; Hustadt, U.; and Fisher, M.\n\n\n \n\n\n\n
Formal Methods in Systems Design, 55(3): 171–221. 2020.\n
[S4]\n\n
\n\n
\n\n
\n\n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{GainerLDHF20,\n author = {Paul Gainer and\n Sven Linker and\n Clare Dixon and\n Ullrich Hustadt and\n Michael Fisher},\n title = {Multi-scale Verification of Distributed Synchronisation},\n journal = {Formal Methods in Systems Design},\n volume = {55},\n number = {3},\n pages = {171--221},\n year = {2020},\n doi = {10.1007/s10703-020-00347-z},\n note={[<span class="s4">S4</span>]}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n A Safety Framework for Critical Systems Utilising Deep Neural Networks.\n \n \n \n\n\n \n Zhao, X.; Banks, A.; Sharp, J.; Robu, V.; Flynn, D.; Fisher, M.; and Huang, X.\n\n\n \n\n\n\n In
Proc. 39th International Conference on Computer Safety, Reliability, and Security (SAFECOMP), volume 12234, of
Lecture Notes in Computer Science, pages 244–259, 2020. Springer\n
[ORCA]\n\n
\n\n
\n\n
\n\n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{ZhaoBSRF0020,\n author = {Xingyu Zhao and\n Alec Banks and\n James Sharp and\n Valentin Robu and\n David Flynn and\n Michael Fisher and\n Xiaowei Huang},\n title = {A Safety Framework for Critical Systems Utilising Deep Neural Networks},\n booktitle = {Proc. 39th International Conference on Computer Safety, Reliability, and Security (SAFECOMP)},\n series = {Lecture Notes in Computer Science},\n volume = {12234},\n pages = {244--259},\n publisher = {Springer},\n year = 2020,\n doi = {10.1007/978-3-030-54549-9_16},\n note = {[<span class="orca">ORCA</span>]}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n LFC: Combining Autonomous Agents and Automated Planning in the Multi-Agent Programming Contest.\n \n \n \n\n\n \n Cardoso, R. C.; Ferrando, A.; and Papacchini, F.\n\n\n \n\n\n\n In
The Multi-Agent Programming Contest 2019, pages 31–58, Cham, 2020. Springer International Publishing\n
[FAIR-SPACE, RAIN, ORCA]\n\n
\n\n
\n\n
\n\n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@InProceedings{Cardoso20d,\nauthor="Cardoso, Rafael C.\nand Ferrando, Angelo\nand Papacchini, Fabio",\ntitle="LFC: Combining Autonomous Agents and Automated Planning in the Multi-Agent Programming Contest",\nbooktitle="The Multi-Agent Programming Contest 2019",\nyear="2020",\npublisher="Springer International Publishing",\naddress="Cham",\npages="31--58",\nisbn="978-3-030-59299-8",\ndoi={10.1007/978-3-030-59299-8_2},\nnote = {[<span class="fs">FAIR-SPACE</span>, <span class="rain">RAIN</span>, <span class="orca">ORCA</span>]}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Formalisation and Implementation of Road Junction Rules on an Autonomous Vehicle Modelled as an Agent.\n \n \n \n\n\n \n Alves, G. V.; Dennis, L.; and Fisher, M.\n\n\n \n\n\n\n In Sekerinski, E.; Moreira, N.; Oliveira, J. N.; Ratiu, D.; Guidotti, R.; Farrell, M.; Luckcuck, M.; Marmsoler, D.; Campos, J.; Astarte, T.; Gonnord, L.; Cerone, A.; Couto, L.; Dongol, B.; Kutrib, M.; Monteiro, P.; and Delmas, D., editor(s),
Formal Methods. FM 2019 International Workshops, pages 217–232, Cham, 2020. Springer International Publishing\n
[Verifiable Autonomy]\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 7 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@InProceedings{10.1007/978-3-030-54994-7_16,\nauthor="Alves, Gleifer Vaz\nand Dennis, Louise\nand Fisher, Michael",\neditor="Sekerinski, Emil\nand Moreira, Nelma\nand Oliveira, Jos{\\'e} N.\nand Ratiu, Daniel\nand Guidotti, Riccardo\nand Farrell, Marie\nand Luckcuck, Matt\nand Marmsoler, Diego\nand Campos, Jos{\\'e}\nand Astarte, Troy\nand Gonnord, Laure\nand Cerone, Antonio\nand Couto, Luis\nand Dongol, Brijesh\nand Kutrib, Martin\nand Monteiro, Pedro\nand Delmas, David",\ntitle="Formalisation and Implementation of Road Junction Rules on an Autonomous Vehicle Modelled as an Agent",\nbooktitle="Formal Methods. FM 2019 International Workshops",\nyear="2020",\npublisher="Springer International Publishing",\naddress="Cham",\npages="217--232",\nabstract="The design of autonomous vehicles includes obstacle detection and avoidance, route planning, speed control, etc. However, there is a lack of an explicitely representation of the rules of the road on an autonomous vehicle. Additionally, it is necessary to understand the behaviour of an autonomous vehicle in order to check whether or not it works according to the rules of the road. Here, we propose an agent-based architecture to embed the rules of the road into an agent representing the behaviour of an autonomous vehicle. We use temporal logic to formally represent the rules of the road in a way it should be possible to capture when and how a given rule of the road can be applied. Our contributions include: i. suggestion of changes in the rules of the road; ii. representation of rules in a suitable way for an autonomous vehicle agent; iii. dealing with indeterminate terms in the Highway Code.",\nisbn="978-3-030-54994-7",\nnote = {[<span class="va">Verifiable Autonomy</span>]}\n}\n\n
\n
\n\n\n
\n The design of autonomous vehicles includes obstacle detection and avoidance, route planning, speed control, etc. However, there is a lack of an explicitely representation of the rules of the road on an autonomous vehicle. Additionally, it is necessary to understand the behaviour of an autonomous vehicle in order to check whether or not it works according to the rules of the road. Here, we propose an agent-based architecture to embed the rules of the road into an agent representing the behaviour of an autonomous vehicle. We use temporal logic to formally represent the rules of the road in a way it should be possible to capture when and how a given rule of the road can be applied. Our contributions include: i. suggestion of changes in the rules of the road; ii. representation of rules in a suitable way for an autonomous vehicle agent; iii. dealing with indeterminate terms in the Highway Code.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Computational Goals, Values and Decision-Making.\n \n \n \n \n\n\n \n Dennis, L. A.\n\n\n \n\n\n\n
Science and Engineering Ethics. 2020.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{dennis_see_2020,\n\tAbstract = {Considering the popular framing of an artificial intelligence as a rational agent that always seeks to maximise its expected utility, referred to as its goal, one of the features attributed to such rational agents is that they will never select an action which will change their goal. Therefore, if such an agent is to be friendly towards humanity, one argument goes, we must understand how to specify this friendliness in terms of a utility function. Wolfhart Totschnig (Fully Autonomous AI, Science and Engineering Ethics, 2020), argues in contrast that a fully autonomous agent will have the ability to change its utility function and will do so guided by its values. This commentary examines computational accounts of goals, values and decision-making. It rejects the idea that a rational agent will never select an action that changes its goal but also argues that an artificial intelligence is unlikely to be purely rational in terms of always acting to maximise a utility function. It nevertheless also challenges the idea that an agent which does not change its goal cannot be considered fully autonomous. It does agree that values are an important component of decision-making and explores a number of reasons why.},\n\tAuthor = {Dennis, Louise A. },\n\tDa = {2020/08/04},\n\tDate-Added = {2020-08-06 13:39:55 +0100},\n\tDate-Modified = {2020-08-06 13:39:55 +0100},\n\tDoi = {10.1007/s11948-020-00244-y},\n\tId = {Dennis2020},\n\tIsbn = {1471-5546},\n\tJournal = {Science and Engineering Ethics},\n\tTitle = {Computational Goals, Values and Decision-Making},\n\tTy = {JOUR},\n\tUrl = {https://doi.org/10.1007/s11948-020-00244-y},\n\tYear = {2020},\n\tBdsk-Url-1 = {https://doi.org/10.1007/s11948-020-00244-y}}\n\n
\n
\n\n\n
\n Considering the popular framing of an artificial intelligence as a rational agent that always seeks to maximise its expected utility, referred to as its goal, one of the features attributed to such rational agents is that they will never select an action which will change their goal. Therefore, if such an agent is to be friendly towards humanity, one argument goes, we must understand how to specify this friendliness in terms of a utility function. Wolfhart Totschnig (Fully Autonomous AI, Science and Engineering Ethics, 2020), argues in contrast that a fully autonomous agent will have the ability to change its utility function and will do so guided by its values. This commentary examines computational accounts of goals, values and decision-making. It rejects the idea that a rational agent will never select an action that changes its goal but also argues that an artificial intelligence is unlikely to be purely rational in terms of always acting to maximise a utility function. It nevertheless also challenges the idea that an agent which does not change its goal cannot be considered fully autonomous. It does agree that values are an important component of decision-making and explores a number of reasons why.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Ethical Governor Systems viewed as a Multi-Agent Problem.\n \n \n \n \n\n\n \n Cardoso, R. C.; Ene, D.; Evans, T.; and Dennis, L. A.\n\n\n \n\n\n\n In Nallur, V., editor(s),
Second Workshop on Implementing Machine Ethics, June 2020. Zenodo\n
[RAIN]\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{cardosoime,\n author = {Rafael C. Cardoso and Daniel Ene and Tom Evans and Louise A. Dennis},\n title = {Ethical Governor Systems viewed as a Multi-Agent Problem},\n editor = {Vivek Nallur},\n booktitle = {Second Workshop on Implementing Machine Ethics},\n month = jun,\n year = 2020,\n publisher = {Zenodo},\n doi = {10.5281/zenodo.3938851},\n url = {https://doi.org/10.5281/zenodo.3938851},\n note = {[<span class="rain">RAIN</span>]}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n First Steps towards an Ethical Agent for checking decision and behaviour for an Autonomous Vehicle on the Rules of the Road.\n \n \n \n \n\n\n \n Alves, G. V.; Dennis, L.; and Fisher, M.\n\n\n \n\n\n\n In Nallur, V., editor(s),
Second Workshop on Implementing Machine Ethics, June 2020. Zenodo\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{alvesime,\n author = {Gleifer Vaz Alves and Louise Dennis and Michael Fisher},\n title = {First Steps towards an Ethical Agent for checking decision and behaviour for an Autonomous Vehicle on the Rules of the Road},\n editor = {Vivek Nallur},\n booktitle = {Second Workshop on Implementing Machine Ethics},\n month = jun,\n year = 2020,\n publisher = {Zenodo},\n doi = {10.5281/zenodo.3938851},\n url = {https://doi.org/10.5281/zenodo.3938851}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Adaptable and Verifiable BDI Reasoning.\n \n \n \n\n\n \n Stringer, P.; Cardoso, R. C.; Huang, X.; and Dennis, L. A.\n\n\n \n\n\n\n In
Proceedings of the First Workshop on Agents and Robots for reliable Engineered Autonomy, Virtual event, 4th September 2020, volume 319, of
Electronic Proceedings in Theoretical Computer Science, pages 117-125, 2020. Open Publishing Association\n
[FAIR-SPACE, RAIN]\n\n
\n\n
\n\n
\n\n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@Inproceedings{Stringer20a,\n author = {Stringer, Peter and Cardoso, Rafael C. and Huang, Xiaowei and Dennis, Louise A.},\n year = {2020},\n title = {Adaptable and Verifiable BDI Reasoning},\n booktitle = {Proceedings of the First Workshop on\n Agents and Robots for reliable Engineered Autonomy,\n Virtual event, 4th September 2020},\n series = {Electronic Proceedings in Theoretical Computer Science},\n volume = {319},\n publisher = {Open Publishing Association},\n pages = {117-125},\n doi = {10.4204/EPTCS.319.9},\n note = {[<span class="fs">FAIR-SPACE</span>, <span class="rain">RAIN</span>]}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Verification and Validation for Space Robotics.\n \n \n \n\n\n \n Cardoso, R. C.; Farrell, M.; Kourtis, G.; Webster, M.; Dennis, L. A.; Dixon, C.; Fisher, M.; and Lisitsa, A.\n\n\n \n\n\n\n 2020.\n
Poster presented at the Opportunities and Challenges in Space Robotics Workshop held with ICRA 2020. [FAIR-SPACE]\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@unpublished{Cardoso20c,\n author = {Cardoso, R. C. and Farrell, M. and Kourtis, G. and Webster, M. and Dennis, L. A. and Dixon, C. and Fisher, M. and Lisitsa, A.},\n title = {Verification and Validation for Space Robotics},\n year = {2020},\n note = {Poster presented at the Opportunities and Challenges in Space Robotics Workshop held with ICRA 2020. [<span class="fs">FAIR-SPACE</span>]}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n ROSMonitoring: A Runtime Verification Framework for ROS.\n \n \n \n\n\n \n Ferrando, A.; Cardoso, R. C.; Fisher, M.; Ancona, D.; Franceschini, L.; and Mascardi, V.\n\n\n \n\n\n\n In
Towards Autonomous Robotic Systems, pages 387–399, Cham, 2020. Springer International Publishing\n
[FAIR-SPACE, RAIN, ORCA]\n\n
\n\n
\n\n
\n\n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 15 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@InProceedings{Ferrando20a,\nauthor="Ferrando, Angelo\nand Cardoso, Rafael C.\nand Fisher, Michael\nand Ancona, Davide\nand Franceschini, Luca\nand Mascardi, Viviana",\ntitle="ROSMonitoring: A Runtime Verification Framework for ROS",\nbooktitle="Towards Autonomous Robotic Systems",\nyear="2020",\npublisher="Springer International Publishing",\naddress="Cham",\npages="387--399",\ndoi="10.1007/978-3-030-63486-5_40",\nisbn="978-3-030-63486-5",\nnote = {[<span class="fs">FAIR-SPACE</span>, <span class="rain">RAIN</span>, <span class="orca">ORCA</span>]}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Verifiable Self-Aware Agent-Based Autonomous Systems.\n \n \n \n\n\n \n Dennis, L. A.; and Fisher, M.\n\n\n \n\n\n\n
Proceedings of the IEEE special issue on Self-Aware Autonomous Systems,1011-1026. 2020.\n
[Verifiable Autonomy, FAIR-SPACE, RAIN, ORCA]\n\n
\n\n
\n\n
\n\n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{dennis_ieee20,\n author={Louise A. Dennis and Michael Fisher},\n title ={Verifiable Self-Aware Agent-Based Autonomous Systems},\n journal={Proceedings of the IEEE special issue on Self-Aware Autonomous Systems},\n pages={1011-1026},\n doi={10.1109/JPROC.2020.2991262},\n note = {[<span class="va">Verifiable Autonomy</span>, <span class="fs">FAIR-SPACE</span>, <span class="rain">RAIN</span>, <span class="orca">ORCA</span>]},\nyear=2020}\n\n
\n
\n\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n An Interface for Programming Verifiable Autonomous Agents in ROS.\n \n \n \n\n\n \n Cardoso, R. C.; Ferrando, A.; Dennis, L. A.; and Fisher, M.\n\n\n \n\n\n\n In Bassiliades, N.; Chalkiadakis, G.; and de Jonge, D., editor(s),
Multi-Agent Systems and Agreement Technologies, pages 191–205, Cham, 2020. Springer International Publishing\n
[FAIR-SPACE, RAIN]\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{Cardoso20b,\n author="Cardoso, Rafael C.\nand Ferrando, Angelo\nand Dennis, Louise A.\nand Fisher, Michael",\neditor="Bassiliades, Nick\nand Chalkiadakis, Georgios\nand de Jonge, Dave",\ntitle="An Interface for Programming Verifiable Autonomous Agents in ROS",\nbooktitle="Multi-Agent Systems and Agreement Technologies",\nyear="2020",\npublisher="Springer International Publishing",\naddress="Cham",\npages="191--205",\nabstract="Autonomy has been one of the most desirable features for robotic applications in recent years. This is evidenced by a recent surge of research in autonomous driving cars, strong government funding for research in robotics for extreme environments, and overall progress in service robots. Autonomous decision-making is often at the core of these systems, thus, it is important to be able to verify and validate properties that relate to the correct behaviour that is expected of the system. Our main contribution in this paper, is an interface for integrating BDI-based agents into robotic systems developed using ROS. We use the Gwendolen language to program our BDI agents and to make use of the AJPF model checker in order to verify properties related to the decision-making in the agent programs. Our case studies include 3D simulations using a simple autonomous patrolling behaviour of a TurtleBot, and multiple TurtleBots servicing a house that can cooperate with each other in case of failure.",\nisbn="978-3-030-66412-1",\n note = {[<span class="fs">FAIR-SPACE</span>, <span class="rain">RAIN</span>]}\n}\n\n
\n
\n\n\n
\n Autonomy has been one of the most desirable features for robotic applications in recent years. This is evidenced by a recent surge of research in autonomous driving cars, strong government funding for research in robotics for extreme environments, and overall progress in service robots. Autonomous decision-making is often at the core of these systems, thus, it is important to be able to verify and validate properties that relate to the correct behaviour that is expected of the system. Our main contribution in this paper, is an interface for integrating BDI-based agents into robotic systems developed using ROS. We use the Gwendolen language to program our BDI agents and to make use of the AJPF model checker in order to verify properties related to the decision-making in the agent programs. Our case studies include 3D simulations using a simple autonomous patrolling behaviour of a TurtleBot, and multiple TurtleBots servicing a house that can cooperate with each other in case of failure.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Heterogeneous Verification of an Autonomous Curiosity Rover.\n \n \n \n\n\n \n Cardoso, R. C.; Farrell, M.; Luckcuck, M.; Ferrando, A.; and Fisher, M.\n\n\n \n\n\n\n In Lee, R.; Jha, S.; Mavridou, A.; and Giannakopoulou, D., editor(s),
NASA Formal Methods, pages 353–360, Cham, 2020. Springer International Publishing\n
[FAIR-SPACE]\n\n
\n\n
\n\n
\n\n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{Cardoso20a,\nauthor="Cardoso, Rafael C.\nand Farrell, Marie\nand Luckcuck, Matt\nand Ferrando, Angelo\nand Fisher, Michael",\neditor="Lee, Ritchie\nand Jha, Susmit\nand Mavridou, Anastasia\nand Giannakopoulou, Dimitra",\ntitle="Heterogeneous Verification of an Autonomous Curiosity Rover",\nbooktitle="NASA Formal Methods",\nyear="2020",\npublisher="Springer International Publishing",\naddress="Cham",\npages="353--360",\nabstract="The Curiosity rover is one of the most complex systems successfully deployed in a planetary exploration mission to date. It was sent by NASA to explore the surface of Mars and to identify potential signs of life. Even though it has limited autonomy on-board, most of its decisions are made by the ground control team. This hinders the speed at which the Curiosity reacts to its environment, due to the communication delays between Earth and Mars. Depending on the orbital position of both planets, it can take 4--24 min for a message to be transmitted between Earth and Mars. If the Curiosity were controlled autonomously, it would be able to perform its activities much faster and more flexibly. However, one of the major barriers to increased use of autonomy in such scenarios is the lack of assurances that the autonomous behaviour will work as expected. In this paper, we use a Robot Operating System (ROS) model of the Curiosity that is simulated in Gazebo and add an autonomous agent that is responsible for high-level decision-making. Then, we use a mixture of formal and non-formal techniques to verify the distinct system components (ROS nodes). This use of heterogeneous verification techniques is essential to provide guarantees about the nodes at different abstraction levels, and allows us to bring together relevant verification evidence to provide overall assurance.",\nisbn="978-3-030-55754-6",\ndoi="10.1007/978-3-030-55754-6_20",\nnote = {[<span class="fs">FAIR-SPACE</span>]}\n}\n\n
\n
\n\n\n
\n The Curiosity rover is one of the most complex systems successfully deployed in a planetary exploration mission to date. It was sent by NASA to explore the surface of Mars and to identify potential signs of life. Even though it has limited autonomy on-board, most of its decisions are made by the ground control team. This hinders the speed at which the Curiosity reacts to its environment, due to the communication delays between Earth and Mars. Depending on the orbital position of both planets, it can take 4–24 min for a message to be transmitted between Earth and Mars. If the Curiosity were controlled autonomously, it would be able to perform its activities much faster and more flexibly. However, one of the major barriers to increased use of autonomy in such scenarios is the lack of assurances that the autonomous behaviour will work as expected. In this paper, we use a Robot Operating System (ROS) model of the Curiosity that is simulated in Gazebo and add an autonomous agent that is responsible for high-level decision-making. Then, we use a mixture of formal and non-formal techniques to verify the distinct system components (ROS nodes). This use of heterogeneous verification techniques is essential to provide guarantees about the nodes at different abstraction levels, and allows us to bring together relevant verification evidence to provide overall assurance.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Reliable Decision-Making in Autonomous Vehicles.\n \n \n \n \n\n\n \n Alves, G. V.; Dennis, L.; Fernandes, L.; and Fisher, M.\n\n\n \n\n\n\n In Leitner, A.; Watzenig, D.; and Ibanez-Guzman, J., editor(s),
Validation and Verification of Automated Systems: Results of the ENABLE-S3 Project, pages 105–117. Springer International Publishing, Cham, 2020.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 9 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@incollection{Alves2020,\nauthor="Alves, Gleifer Vaz\nand Dennis, Louise\nand Fernandes, Lucas\nand Fisher, Michael",\neditor="Leitner, Andrea\nand Watzenig, Daniel\nand Ibanez-Guzman, Javier",\ntitle="Reliable Decision-Making in Autonomous Vehicles",\nbookTitle="Validation and Verification of Automated Systems: Results of the ENABLE-S3 Project",\nyear="2020",\npublisher="Springer International Publishing",\naddress="Cham",\npages="105--117",\nabstract="The use of Autonomous Vehicles (AVs) on our streets is soon to be a reality; increasingly, interacting with such AVs will be part of our daily routine. However, we will certainly need to assure the reliable behaviour of an AV, especially when some unexpected scenarios (e.g. harsh environments, obstacles, emergencies) are taken into account. In this article we use an intelligent agent approach to capture the high-level decision-making process within an AV and then use formal verification techniques to automatically, and strongly, analyse the required behaviours. Specifically, we use the MCAPL framework, wherein our core agent is implemented using the GWENDOLEN agent programming language, and to which we can apply model checking via the AJPF model checker. By performing such formal verification on our agent, we are able to prove that the AV's decision-making process, embedded within the GWENDOLEN agent plans, matches our requirements. As examples, we will verify (formal) properties in order to determine whether the agent behaves in a reliable manner through three different levels of emergency displayed in a simple urban traffic environment.",\nisbn="978-3-030-14628-3",\ndoi="10.1007/978-3-030-14628-3_10",\nurl="https://doi.org/10.1007/978-3-030-14628-3_10"\n}\n\n
\n
\n\n\n
\n The use of Autonomous Vehicles (AVs) on our streets is soon to be a reality; increasingly, interacting with such AVs will be part of our daily routine. However, we will certainly need to assure the reliable behaviour of an AV, especially when some unexpected scenarios (e.g. harsh environments, obstacles, emergencies) are taken into account. In this article we use an intelligent agent approach to capture the high-level decision-making process within an AV and then use formal verification techniques to automatically, and strongly, analyse the required behaviours. Specifically, we use the MCAPL framework, wherein our core agent is implemented using the GWENDOLEN agent programming language, and to which we can apply model checking via the AJPF model checker. By performing such formal verification on our agent, we are able to prove that the AV's decision-making process, embedded within the GWENDOLEN agent plans, matches our requirements. As examples, we will verify (formal) properties in order to determine whether the agent behaves in a reliable manner through three different levels of emergency displayed in a simple urban traffic environment.\n
\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Plan Library Reconfigurability in BDI Agents.\n \n \n \n\n\n \n Cardoso, R. C.; Dennis, L. A.; and Fisher, M.\n\n\n \n\n\n\n In Dennis, L. A.; Bordini, R. H.; and Lespérance, Y., editor(s),
Engineering Multi-Agent Systems, pages 195–212, Cham, 2020. Springer International Publishing\n
[FAIR-SPACE, RAIN]\n\n
\n\n
\n\n
\n\n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{Cardoso19EMAS,\nauthor="Cardoso, Rafael C.\nand Dennis, Louise A.\nand Fisher, Michael",\neditor="Dennis, Louise A.\nand Bordini, Rafael H.\nand Lesp{\\'e}rance, Yves",\ntitle="Plan Library Reconfigurability in BDI Agents",\nbooktitle="Engineering Multi-Agent Systems",\nyear="2020",\npublisher="Springer International Publishing",\naddress="Cham",\npages="195--212",\nabstract="One of the major advantages of modular architectures in robotic systems is the ability to add or replace nodes, without needing to rearrange the whole system. In this type of system, autonomous agents can aid in the decision making and high-level control of the robot. For example, a robot may have a module for each of the effectors and sensors that it has and an agent with a plan library containing high-level plans to aid in the decision making within these modules. However, when autonomously replacing a node it can be difficult to reconfigure plans in the agent's plan library while retaining correctness. In this paper, we exploit the formal concept of capabilities in Belief-Desire-Intention agents and describe how agents can reason about these capabilities in order to reconfigure their plan library while retaining overall correctness constraints. To validate our approach, we show the implementation of our framework and an experiment using a practical example in the Mars rover scenario.",\nisbn="978-3-030-51417-4",\ndoi="10.1007/978-3-030-51417-4_10",\n note = {[<span class="fs">FAIR-SPACE</span>, <span class="rain">RAIN</span>]}\n}\n\n
\n
\n\n\n
\n One of the major advantages of modular architectures in robotic systems is the ability to add or replace nodes, without needing to rearrange the whole system. In this type of system, autonomous agents can aid in the decision making and high-level control of the robot. For example, a robot may have a module for each of the effectors and sensors that it has and an agent with a plan library containing high-level plans to aid in the decision making within these modules. However, when autonomously replacing a node it can be difficult to reconfigure plans in the agent's plan library while retaining correctness. In this paper, we exploit the formal concept of capabilities in Belief-Desire-Intention agents and describe how agents can reason about these capabilities in order to reconfigure their plan library while retaining overall correctness constraints. To validate our approach, we show the implementation of our framework and an experiment using a practical example in the Mars rover scenario.\n
\n\n\n
\n\n\n\n\n\n