\n \n \n
\n
\n\n \n \n \n \n \n \n Anxiety Among Migrants - Questions for Agent Simulation.\n \n \n \n \n\n\n \n Nallur, V.\n\n\n \n\n\n\n In
Autonomous Agents and Multiagent Systems. Best and Visionary Papers, of
Lecture Notes in Computer Science, pages 141–150, London, UK, May 2023. Springer Cham\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{nallur_vivek_anxiety_2023,\n\taddress = {London, UK},\n\tseries = {Lecture {Notes} in {Computer} {Science}},\n\ttitle = {Anxiety {Among} {Migrants} - {Questions} for {Agent} {Simulation}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-3-031-56255-6},\n\turl = {https://viveknallur.github.io/wp-content/uploads/2023/07/camera-ready-cothrom_idea_2023.pdf},\n\tdoi = {10.1007/978-3-031-56255-6},\n\tabstract = {This paper starts with hypothesis (and presents some evidence) that anxiety in migrants is sufficiently important to be modelled. It presents a small (and very incomplete) review of emotion modelling in literature. It asks the question of how to translate these into agent-based modelling, and whether this can be orthogonal to specific modelling of goals and capabilities of agents. This short paper is offered as a motivator for discussion, rather than a discussion of results.},\n\tbooktitle = {Autonomous {Agents} and {Multiagent} {Systems}. {Best} and {Visionary} {Papers}},\n\tpublisher = {Springer Cham},\n\tauthor = {Nallur, Vivek},\n\tmonth = may,\n\tyear = {2023},\n\tpages = {141--150},\n}\n\n
\n
\n\n\n
\n This paper starts with hypothesis (and presents some evidence) that anxiety in migrants is sufficiently important to be modelled. It presents a small (and very incomplete) review of emotion modelling in literature. It asks the question of how to translate these into agent-based modelling, and whether this can be orthogonal to specific modelling of goals and capabilities of agents. This short paper is offered as a motivator for discussion, rather than a discussion of results.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Implementing Pro-social Rule Bending in an Elder-care Robot Environment.\n \n \n \n \n\n\n \n Ramanayake, R.; and Nallur, V.\n\n\n \n\n\n\n In
Proceedings of the 15th International Conference on Social Robotics, volume Lecture notes in Computer Science, of
Lecture notes in artificial intelligence (LNAI), pages 230–239, Doha, Qatar, December 2023. Springer International Publishing\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{ramanayake_implementing_2023,\n\taddress = {Doha, Qatar},\n\tseries = {Lecture notes in artificial intelligence ({LNAI})},\n\ttitle = {Implementing {Pro}-social {Rule} {Bending} in an {Elder}-care {Robot} {Environment}},\n\tvolume = {Lecture notes in Computer Science},\n\tcopyright = {All rights reserved},\n\tisbn = {978-981-9987-18-4},\n\turl = {https://rdcu.be/ds1C1},\n\tdoi = {10.1007/978-981-99-8718-4_20},\n\tabstract = {Many ethical issues arise when robots are introduced into elder-care settings. When ethically charged situations occur, robots ought to be able to handle them appropriately. Some experimental approaches use (top-down) moral generalist approaches, like Deontology and Utilitarianism, to implement ethical decision-making. Others have advocated the use of bottom-up approaches, such as learning algorithms, to learn ethical patterns from human behaviour. Both approaches have their shortcomings when it comes to real-world implementations. Human beings have been observed to use a hybrid form of ethical reasoning called Pro-Social Rule Bending, where top-down rules and constraints broadly apply, but in particular situations, certain rules are temporarily bent. This paper reports on implementing such a hybrid ethical reasoning approach in elder-care robots. We show through simulation studies that it leads to better upholding of human values such as autonomy, whilst not sacrificing beneficence.},\n\tbooktitle = {Proceedings of the 15th {International} {Conference} on {Social} {Robotics}},\n\tpublisher = {Springer International Publishing},\n\tauthor = {Ramanayake, Rajitha and Nallur, Vivek},\n\tmonth = dec,\n\tyear = {2023},\n\tpages = {230--239},\n}\n\n
\n
\n\n\n
\n Many ethical issues arise when robots are introduced into elder-care settings. When ethically charged situations occur, robots ought to be able to handle them appropriately. Some experimental approaches use (top-down) moral generalist approaches, like Deontology and Utilitarianism, to implement ethical decision-making. Others have advocated the use of bottom-up approaches, such as learning algorithms, to learn ethical patterns from human behaviour. Both approaches have their shortcomings when it comes to real-world implementations. Human beings have been observed to use a hybrid form of ethical reasoning called Pro-Social Rule Bending, where top-down rules and constraints broadly apply, but in particular situations, certain rules are temporarily bent. This paper reports on implementing such a hybrid ethical reasoning approach in elder-care robots. We show through simulation studies that it leads to better upholding of human values such as autonomy, whilst not sacrificing beneficence.\n
\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Statutory Professions in AI Governance and Their Consequences for Explainable AI.\n \n \n \n \n\n\n \n NiFhaolain, L.; Hines, A.; and Nallur, V.\n\n\n \n\n\n\n In Longo, L., editor(s),
Explainable Artificial Intelligence, volume 1901, pages 85–96. Springer Nature Switzerland, Cham, 2023.\n
Series Title: Communications in Computer and Information Science\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@incollection{longo_statutory_2023,\n\taddress = {Cham},\n\ttitle = {Statutory {Professions} in {AI} {Governance} and {Their} {Consequences} for {Explainable} {AI}},\n\tvolume = {1901},\n\tcopyright = {All rights reserved},\n\tisbn = {978-3-031-44063-2 978-3-031-44064-9},\n\turl = {https://link.springer.com/10.1007/978-3-031-44064-9_5},\n\tlanguage = {en},\n\turldate = {2023-11-03},\n\tbooktitle = {Explainable {Artificial} {Intelligence}},\n\tpublisher = {Springer Nature Switzerland},\n\tauthor = {NiFhaolain, Labhaoise and Hines, Andrew and Nallur, Vivek},\n\teditor = {Longo, Luca},\n\tyear = {2023},\n\tdoi = {10.1007/978-3-031-44064-9_5},\n\tnote = {Series Title: Communications in Computer and Information Science},\n\tpages = {85--96},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Security, Ethics and Privacy Issues in the Remote Extended Reality for Education.\n \n \n \n \n\n\n \n Iqbal, M. Z.; Xu, X.; Nallur, V.; Scanlon, M.; and Campbell, A. G.\n\n\n \n\n\n\n In Cai, Y.; Mangina, E.; and Goei, S. L., editor(s),
Mixed Reality for Education, pages 355–380. Springer Nature Singapore, Singapore, 2023.\n
Series Title: Gaming Media and Social Effects\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@incollection{cai_security_2023,\n\taddress = {Singapore},\n\ttitle = {Security, {Ethics} and {Privacy} {Issues} in the {Remote} {Extended} {Reality} for {Education}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-981-9949-57-1 978-981-9949-58-8},\n\turl = {https://link.springer.com/10.1007/978-981-99-4958-8_16},\n\tlanguage = {en},\n\turldate = {2023-09-18},\n\tbooktitle = {Mixed {Reality} for {Education}},\n\tpublisher = {Springer Nature Singapore},\n\tauthor = {Iqbal, Muhammad Zahid and Xu, Xuanhui and Nallur, Vivek and Scanlon, Mark and Campbell, Abraham G.},\n\teditor = {Cai, Yiyu and Mangina, Eleni and Goei, Sui Lin},\n\tyear = {2023},\n\tdoi = {10.1007/978-981-99-4958-8_16},\n\tnote = {Series Title: Gaming Media and Social Effects},\n\tpages = {355--380},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n A Partially Synthesized Position on the Automation of Machine Ethics.\n \n \n \n \n\n\n \n Nallur, V.; Dennis, L.; Bringsjord, S.; and Govindarajulu, N. S.\n\n\n \n\n\n\n
Digital Society, 2(2): 14. April 2023.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 7 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n\n\n\n
\n
@article{nallur_partially_2023,\n\ttitle = {A {Partially} {Synthesized} {Position} on the {Automation} of {Machine} {Ethics}},\n\tvolume = {2},\n\tcopyright = {All rights reserved},\n\tissn = {2731-4669},\n\turl = {https://viveknallur.github.io/wp-content/uploads/Nallur2023-Partially-synthesized-position-automation-machine-ethics.pdf},\n\tdoi = {10.1007/s44206-023-00040-8},\n\tabstract = {We economically express our respective prior positions on the automation of machine ethics, and then seek a corporate, partly synthesized position that could underlie, at least to a degree, our future machine-ethics work, and such work by others as well.},\n\tlanguage = {en},\n\tnumber = {2},\n\turldate = {2023-04-22},\n\tjournal = {Digital Society},\n\tauthor = {Nallur, Vivek and Dennis, Louise and Bringsjord, Selmer and Govindarajulu, Naveen Sundar},\n\tmonth = apr,\n\tyear = {2023},\n\tkeywords = {Autonomous machines, Machine-implemented ethics},\n\tpages = {14},\n}\n\n
\n
\n\n\n
\n We economically express our respective prior positions on the automation of machine ethics, and then seek a corporate, partly synthesized position that could underlie, at least to a degree, our future machine-ethics work, and such work by others as well.\n
\n\n\n
\n\n\n\n\n\n