var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Ff%2F7e4pNAQ8QwNzG5JZ5%2Fbaum.bib&jsonp=1&theme=mila&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Ff%2F7e4pNAQ8QwNzG5JZ5%2Fbaum.bib&jsonp=1&theme=mila\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Ff%2F7e4pNAQ8QwNzG5JZ5%2Fbaum.bib&jsonp=1&theme=mila\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Taming the AI Monster: Monitoring of Individual Fairness for Effective Human Oversight.\n \n \n \n \n\n\n \n Baum, K.; Biewer, S.; Hermanns, H.; Hetmank, S.; Langer, M.; Lauber-Rönsberg, A.; and Sterz, S.\n\n\n \n\n\n\n In International Symposium on Model Checking Software, pages 3–25, November 2024. Springer\n \n\n\n\n
\n\n\n\n \n \n \"TamingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{baum2024taming,\n  title={Taming the AI Monster: Monitoring of Individual Fairness for Effective Human Oversight},\n  author={Baum, Kevin and Biewer, Sebastian and Hermanns, Holger and Hetmank, Sven and Langer, Markus and Lauber-R{\\"o}nsberg, Anne and Sterz, Sarah},\n  booktitle={International Symposium on Model Checking Software},\n  pages={3--25},\n  year={2024},\n  month = {November},\n  organization={Springer},\n  url = {https://doi.org/10.1007/978-3-031-66149-5_1}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Soft Begging: Modular and Efficient Shielding of LLMs against Prompt Injection and Jailbreaking based on Prompt Tuning.\n \n \n \n \n\n\n \n Ostermann, S.; Baum, K.; Endres, C.; Masloh, J.; and Schramowski, P.\n\n\n \n\n\n\n 2024.\n \n\n\n\n
\n\n\n\n \n \n \"SoftPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{ostermann2024softbeggingmodularefficient,\n      title={Soft Begging: Modular and Efficient Shielding of LLMs against Prompt Injection and Jailbreaking based on Prompt Tuning}, \n      author={Simon Ostermann and Kevin Baum and Christoph Endres and Julia Masloh and Patrick Schramowski},\n      year={2024},\n      eprint={2407.03391},\n      archivePrefix={arXiv},\n      primaryClass={cs.CR},\n      url={https://arxiv.org/abs/2407.03391}, \n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Acting for the Right Reasons: Creating Reason-Sensitive Artificial Moral Agents.\n \n \n \n \n\n\n \n Baum, K.; Dargasz, L.; Jahn, F.; Gros, T. P.; and Wolf, V.\n\n\n \n\n\n\n 2024.\n \n\n\n\n
\n\n\n\n \n \n \"ActingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{baum2024actingrightreasonscreating,\n      title={Acting for the Right Reasons: Creating Reason-Sensitive Artificial Moral Agents}, \n      author={Kevin Baum and Lisa Dargasz and Felix Jahn and Timo P. Gros and Verena Wolf},\n      year={2024},\n      eprint={2409.15014},\n      archivePrefix={arXiv},\n      primaryClass={cs.AI},\n      url={https://arxiv.org/abs/2409.15014}, \n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On the Quest for Effectiveness in Human Oversight: Interdisciplinary Perspectives.\n \n \n \n\n\n \n Sterz, S.; Baum, K.; Biewer, S.; Hermanns, H.; Lauber-Rönsberg, A.; Meinel, P.; and Markus, L.\n\n\n \n\n\n\n In June 2024. \n FAccT '24: Proceedings of the 2024 ACM Conference on Fairness, Accountability, and Transparency\n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{sterz2024,\n  author = {Sterz, Sarah and Baum, Kevin and Biewer, Sebastian and Hermanns, Holger and Lauber-Rönsberg, Anne and Meinel, Philip and Markus, Langer},\n  title = {{On the Quest for Effectiveness in Human Oversight: Interdisciplinary Perspectives}},\n  month = {June},\n  year = {2024},\n  note = {FAccT '24: Proceedings of the 2024 ACM Conference on Fairness, Accountability, and Transparency},\n  doi={10.1145/3630106.3659051}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Effective Human Oversight of AI-Based Systems: A Signal Detection Perspective on the Detection of Inaccurate and Unfair Outputs.\n \n \n \n \n\n\n \n Langer, M.; Baum, K.; and Schlicker, N.\n\n\n \n\n\n\n Minds and Machines, 35(1): 1. 2024.\n \n\n\n\n
\n\n\n\n \n \n \"EffectivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{MaMMarkus,\n\tabstract = {Legislation and ethical guidelines around the globe call for effective human oversight of AI-based systems in high-risk contexts --that is oversight that reliably reduces the risks otherwise associated with the use of AI-based systems. Such risks may relate to the imperfect accuracy of systems (e.g., inaccurate classifications) or to ethical concerns (e.g., unfairness of outputs). Given the significant role that human oversight is expected to play in the operation of AI-based systems, it is crucial to better understand the conditions for effective human oversight. We argue that the reliable detection of errors (as an umbrella term for inaccuracies and unfairness) is crucial for effective human oversight. We then propose that Signal Detection Theory (SDT) offers a promising framework for better understanding what affects people's sensitivity (i.e., how well they are able to detect errors) and response bias (i.e., the tendency to report errors given a perceived evidence of an error) in detecting errors. Whereas an SDT perspective on the detection of inaccuracies is straightforward, we demonstrate its broader applicability by detailing the specifics for an SDT perspective on unfairness detection, including the need to choose a standard for (un)fairness. Additionally, we illustrate that an SDT perspective helps to better understand the conditions for effective error detection by showing examples of task-, system-, and person-related factors that may affect the sensitivity and response bias of humans tasked with detecting unfairness associated with the use of AI-based systems. Finally, we discuss future research directions for an SDT perspective on error detection.},\n\tauthor = {Langer, Markus and Baum, Kevin and Schlicker, Nadine},\n\tdate = {2024/11/05},\n\tdate-added = {2024-11-12 11:08:14 +0100},\n\tdate-modified = {2024-11-12 11:08:14 +0100},\n\tdoi = {10.1007/s11023-024-09701-0},\n\tid = {Langer2024},\n\tisbn = {1572-8641},\n\tjournal = {Minds and Machines},\n\tnumber = {1},\n\tpages = {1},\n\ttitle = {Effective Human Oversight of AI-Based Systems: A Signal Detection Perspective on the Detection of Inaccurate and Unfair Outputs},\n\turl = {https://doi.org/10.1007/s11023-024-09701-0},\n\tvolume = {35},\n\tyear = {2024},\n\tbdsk-url-1 = {https://doi.org/10.1007/s11023-024-09701-0}\n}\n\n\n
\n
\n\n\n
\n Legislation and ethical guidelines around the globe call for effective human oversight of AI-based systems in high-risk contexts –that is oversight that reliably reduces the risks otherwise associated with the use of AI-based systems. Such risks may relate to the imperfect accuracy of systems (e.g., inaccurate classifications) or to ethical concerns (e.g., unfairness of outputs). Given the significant role that human oversight is expected to play in the operation of AI-based systems, it is crucial to better understand the conditions for effective human oversight. We argue that the reliable detection of errors (as an umbrella term for inaccuracies and unfairness) is crucial for effective human oversight. We then propose that Signal Detection Theory (SDT) offers a promising framework for better understanding what affects people's sensitivity (i.e., how well they are able to detect errors) and response bias (i.e., the tendency to report errors given a perceived evidence of an error) in detecting errors. Whereas an SDT perspective on the detection of inaccuracies is straightforward, we demonstrate its broader applicability by detailing the specifics for an SDT perspective on unfairness detection, including the need to choose a standard for (un)fairness. Additionally, we illustrate that an SDT perspective helps to better understand the conditions for effective error detection by showing examples of task-, system-, and person-related factors that may affect the sensitivity and response bias of humans tasked with detecting unfairness associated with the use of AI-based systems. Finally, we discuss future research directions for an SDT perspective on error detection.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Software Doping Analysis for Human Oversight.\n \n \n \n\n\n \n Biewer, S.; Baum, K.; Sterz, S.; Hermanns, H.; Hetmank, S.; Langer, M.; Lauber-Rönsberg, A.; and Lehr, F.\n\n\n \n\n\n\n Formal Methods in System Design. 2024.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{biewer2023software,\n      title={{Software Doping Analysis for Human Oversight}}, \n      author={Sebastian Biewer and Kevin Baum and Sarah Sterz and Holger Hermanns and Sven Hetmank and Markus Langer and Anne Lauber-Rönsberg and Franz Lehr},\n      year={2024},\n      journal={Formal Methods in System Design},    \n      doi = {10.1007/s10703-024-00445-2}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n From Fear to Action: AI Governance and Opportunities for All.\n \n \n \n\n\n \n Baum, K.; Bryson, J.; Dignum, F.; Dignum, V.; Grobelnik, M.; Hoos, H.; Irgens, M.; Lukowicz, P.; Muller, C.; Rossi, F.; Shawe-Taylor, J.; Theodorou, A.; and Vinuesa, R.\n\n\n \n\n\n\n Frontiers in Computer Science, 5. 2023.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{baum2023fear,\n  title={{From Fear to Action: AI Governance and Opportunities for All}},\n  author={Baum, Kevin and Bryson, Joanna and Dignum, Frank and Dignum, Virginia and Grobelnik, Marko and Hoos, Holger and Irgens, Morten and Lukowicz, Paul and Muller, Catelijne and Rossi, Francesca and Shawe-Taylor, John  and Theodorou,  Andreas and Vinuesa, Ricardo},\n  journal={Frontiers in Computer Science},\n  volume={5},\n  id={1210421},\n  year={2023},\n  publisher={Frontiers},\n doi={10.3389/fcomp.2023.1210421}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n XAI Requirements in Smart Production Processes: A Case Study.\n \n \n \n\n\n \n Baum, D.; Baum, K.; Gros, T. P.; and Wolf, V.\n\n\n \n\n\n\n In Longo, L., editor(s), Explainable Artificial Intelligence. Proceedings of the World Conference on eXplainable Artificial Intelligence (xAI 2023), volume 1901, of Communications in Computer and Information Science (CCIS), pages 3–24, Cham, 2023. Springer Nature Switzerland\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{10.1007/978-3-031-44064-9_1,\n\tabstract = {The increasing prevalence of artificial intelligence (AI) systems has led to a growing consensus on the importance of the explainability of such systems. This is often emphasized with respect to societal and developmental contexts, but it is also crucial within the context of business processes, including manufacturing and production. While this is widely recognized, there is a notable lack of practical examples that demonstrate how to take explainability into account in the latter contexts. This paper presents a real-world use case in which we employed AI to optimize an Industry 4.0 production process without considering explainable AI (XAI) requirements. Building on previous work on models of the relationship between XAI methods and various associated expectations, as well as non-functional explainability requirements, we show how business-oriented XAI requirements can be formulated and prepared for integration into process design. This case study is a valuable resource for researchers and practitioners seeking better to understand the role of explainable AI in practice.},\n\taddress = {Cham},\n\tauthor = {Baum, Deborah and Baum, Kevin and Gros, Timo P. and Wolf, Verena},\n\tbooktitle = {Explainable Artificial Intelligence. Proceedings of the World Conference on eXplainable Artificial Intelligence (xAI 2023)},\n    series = {Communications in Computer and Information Science (CCIS)},\n    volume = 1901,\n\teditor = {Longo, Luca},\n\tdoi = {10.1007/978-3-031-44064-9_1},\n\tpages = {3--24},\n\tpublisher = {Springer Nature Switzerland},\n\ttitle = {{XAI Requirements in Smart Production Processes: A Case Study}},\n\tyear = {2023}}\n\n
\n
\n\n\n
\n The increasing prevalence of artificial intelligence (AI) systems has led to a growing consensus on the importance of the explainability of such systems. This is often emphasized with respect to societal and developmental contexts, but it is also crucial within the context of business processes, including manufacturing and production. While this is widely recognized, there is a notable lack of practical examples that demonstrate how to take explainability into account in the latter contexts. This paper presents a real-world use case in which we employed AI to optimize an Industry 4.0 production process without considering explainable AI (XAI) requirements. Building on previous work on models of the relationship between XAI methods and various associated expectations, as well as non-functional explainability requirements, we show how business-oriented XAI requirements can be formulated and prepared for integration into process design. This case study is a valuable resource for researchers and practitioners seeking better to understand the role of explainable AI in practice.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Ethics for Nerds.\n \n \n \n\n\n \n Baum, K.; and Sterz, S.\n\n\n \n\n\n\n The International Review of Information Ethics, 31(1). 2022.\n special issue on »Ethics in the Age of Smart Systems«\n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{baum2022ethics,\n  title={{Ethics for Nerds}},\n  author={Baum, Kevin and Sterz, Sarah},\n  journal={The International Review of Information Ethics},\n  volume={31},\n  number={1},\n  note = {special issue on »Ethics in the Age of Smart Systems«},\n  doi = {10.29173/irie484},\n  year={2022}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n From Responsibility to Reason-Giving Explainable Artificial Intelligence.\n \n \n \n\n\n \n Baum, K.; Mantel, S.; Schmidt, E.; and Speith, T.\n\n\n \n\n\n\n Philosophy & Technology, 35(1). 2022.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{baum2022responsibility,\n  title={{From Responsibility to Reason-Giving Explainable Artificial Intelligence}},\n  author={Baum, Kevin and Mantel, Susanne and Schmidt, Eva and Speith, Timo},\n  journal={Philosophy \\& Technology},\n  volume={35},\n  number={1},\n  year={2022},\n  doi = {10.1007/s13347-022-00510-w},\n  publisher={Springer International Publishing}\n}\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n What to Expect from Opening Up \"Black Boxes\"? Comparing Perceptions of Justice Between Human and Automated Agents.\n \n \n \n\n\n \n Schlicker, N.; Langer, M.; Ötting, S. K; Baum, K.; König, C. J; and Wallach, D.\n\n\n \n\n\n\n Computers in Human Behavior, 122. 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{schlicker2021expect,\n  title={{What to Expect from Opening Up "Black Boxes"? Comparing Perceptions of Justice Between Human and Automated Agents}},\n  author={Schlicker, Nadine and Langer, Markus and {\\"O}tting, Sonja K and Baum, Kevin and K{\\"o}nig, Cornelius J and Wallach, Dieter},\n  journal={Computers in Human Behavior},\n  volume={122},\n  year={2021},\n  doi = {10.1016/j.chb.2021.106837},\n  publisher={Elsevier}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n What Do We Want from Explainable Artificial Intelligence (XAI)? – A Stakeholder Perspective on XAI and a Conceptual Model Guiding Interdisciplinary XAI Research.\n \n \n \n\n\n \n Langer, M.; Oster, D.; Speith, T.; Hermanns, H.; Kästner, L.; Schmidt, E.; Sesing, A.; and Baum, K.\n\n\n \n\n\n\n Artificial Intelligence, 296. 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{langer2021we,\n  title={{What Do We Want from Explainable Artificial Intelligence (XAI)? -- A Stakeholder Perspective on XAI and a Conceptual Model Guiding Interdisciplinary XAI Research}},\n  author={Langer, Markus and Oster, Daniel and Speith, Timo and Hermanns, Holger and K{\\"a}stner, Lena and Schmidt, Eva and Sesing, Andreas and Baum, Kevin},\n  journal={Artificial Intelligence},\n  volume={296},\n  year={2021},\n  doi = {10.1016/j.artint.2021.103473}, \n  publisher={Elsevier}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Spare Me the Details: How the Type of Information About Automated Interviews Influences Applicant Reactions.\n \n \n \n\n\n \n Langer, M.; Baum, K.; König, C. J; Hähne, V.; Oster, D.; and Speith, T.\n\n\n \n\n\n\n International Journal of Selection and Assessment, 29(2): 154–169. 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{langer2021spare,\n  title={{Spare Me the Details: How the Type of Information About Automated Interviews Influences Applicant Reactions}},\n  author={Langer, Markus and Baum, Kevin and K{\\"o}nig, Cornelius J and H{\\"a}hne, Viviane and Oster, Daniel and Speith, Timo},\n  journal={International Journal of Selection and Assessment},\n  volume={29},\n  number={2},\n  pages={154--169},\n  year={2021},\n  publisher={Wiley Online Library},\n  doi ={10.1111/ijsa.12325}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Explainability Auditing for Intelligent Systems: A Rationale for Multi-Disciplinary Perspectives.\n \n \n \n\n\n \n Langer, M.; Baum, K.; Hartmann, K.; Hessel, S.; Speith, T.; and Wahl, J.\n\n\n \n\n\n\n In Yue, T.; and Mirakhorli, M., editor(s), 29th IEEE International Requirements Engineering Conference Workshops (RE 2021 Workshops), Notre Dame, Indiana, USA, pages 164–168, September 2021. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{DBLP:conf/re/LangerBHHSW21,\n  author       = {Markus Langer and\n                  Kevin Baum and\n                  Kathrin Hartmann and\n                  Stefan Hessel and\n                  Timo Speith and\n                  Jonas Wahl},\n  editor       = {Tao Yue and\n                  Mehdi Mirakhorli},\n  title        = {{Explainability Auditing for Intelligent Systems: {A} Rationale for\n                  Multi-Disciplinary Perspectives}},\n  booktitle    = {29th {IEEE} International Requirements Engineering Conference Workshops (RE 2021 Workshops), Notre Dame, Indiana, USA},\n  pages        = {164--168},\n  publisher    = {{IEEE}},\nmonth = September,\n  year         = {2021},\n  doi          = {10.1109/REW53955.2021.00030}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Towards Perspicuity Requirements.\n \n \n \n\n\n \n Sterz, S.; Baum, K.; Lauber-Rönsberg, A.; and Hermanns, H.\n\n\n \n\n\n\n In Yue, T.; and Mirakhorli, M., editor(s), 29th IEEE International Requirements Engineering Conference Workshops (RE 2021 Workshops), Notre Dame, Indiana, USA, pages 159–163, September 2021. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{DBLP:conf/re/SterzBLH21,\n  author       = {Sarah Sterz and\n                  Kevin Baum and\n                  Anne Lauber{-}R{\\"{o}}nsberg and\n                  Holger Hermanns},\n  editor       = {Tao Yue and\n                  Mehdi Mirakhorli},\n  title        = {Towards Perspicuity Requirements},\n  booktitle    = {29th {IEEE} International Requirements Engineering Conference Workshops (RE 2021 Workshops), Notre Dame, Indiana, USA},\n  pages        = {159--163},\n  publisher    = {{IEEE}},\n  month         = {September},\n  year         = {2021},\n  doi          = {10.1109/REW53955.2021.00029}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Explainability as a Non-Functional Requirement.\n \n \n \n\n\n \n Köhl, M. A; Baum, K.; Langer, M.; Oster, D.; Speith, T.; and Bohlender, D.\n\n\n \n\n\n\n In 27th IEEE International Requirements Engineering Conference (RE 2019), Jeju Island, South Korea, pages 363–368, 2019. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{kohl2019explainability,\n  title={{Explainability as a Non-Functional Requirement}},\n  author={K{\\"o}hl, Maximilian A and Baum, Kevin and Langer, Markus and Oster, Daniel and Speith, Timo and Bohlender, Dimitri},\n  booktitle={27th IEEE International Requirements Engineering Conference (RE 2019), Jeju Island, South Korea},\n  pages={363--368},\n  doi = {10.1109/RE.2019.00046},\n  year={2019},\n  organization={IEEE}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Towards a framework combining machine ethics and machine explainability.\n \n \n \n\n\n \n Baum, K.; Hermanns, H.; and Speith, T.\n\n\n \n\n\n\n In Finkbeiner, B.; and Kleinberg, S., editor(s), Proceedings of the 3rd Workshop on Formal Reasoning about Causation, Responsibility, and Explanations in Science and Technology (CREST 2018), Thessaloniki, Greece, 21st April 2018, 2019. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{baum2019towards,\n  title={{Towards a framework combining machine ethics and machine explainability}},\n  author={Baum, Kevin and Hermanns, Holger and Speith, Timo},\n  year={2019},\n  doi={10.4204/EPTCS.286.4},\n  booktitle={Proceedings of the 3rd Workshop on Formal Reasoning about Causation, Responsibility, and Explanations in Science and Technology (CREST 2018), Thessaloniki, Greece, 21st April 2018},\n  editor = {Bernd Finkbeiner and Samantha Kleinberg}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Informatikunterricht in der Grundschule? Erprobung und Auswertung eines Unterrichtsmoduls mit Calliope mini.\n \n \n \n\n\n \n Baum, K.; Kirsch, N.; Reese, K.; Schmidt, P.; Wachter, L.; and Wolf, V.\n\n\n \n\n\n\n In Pasternak, A., editor(s), Proceedings of the Informatik für alle, 18. GI-Fachtagung Informatik und Schule (INFOS 2019) in the GI-Edition: Lecture Notes in Informatics (LNI), volume P-288, pages 49–58, 2019. Gesellschaft für Informatik\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{DBLP:conf/schule/BaumKRSWW19,\n  author       = {Kevin Baum and\n                  Nadine Kirsch and\n                  Kerstin Reese and\n                  Pascal Schmidt and\n                  Lukas Wachter and\n                  Verena Wolf},\n  editor       = {Arno Pasternak},\n  title        = {{Informatikunterricht in der Grundschule? Erprobung und Auswertung eines Unterrichtsmoduls mit Calliope mini}},\n  booktitle    = {Proceedings of the Informatik f{\\"{u}}r alle, 18. GI-Fachtagung Informatik und Schule (INFOS 2019) in the {GI-Edition: Lecture Notes in Informatics} (LNI)},\n  volume       = {{P-288}},\n  pages        = {49--58},\n  publisher    = {Gesellschaft f{\\"{u}}r Informatik},\n  year         = {2019},\n  doi          = {10.18420/INFOS2019-B1},\n  timestamp    = {Tue, 04 Jul 2023 17:44:40 +0200},\n  biburl       = {https://dblp.org/rec/conf/schule/BaumKRSWW19.bib},\n  bibsource    = {dblp computer science bibliography, https://dblp.org}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Anforderungen an die Erklärbarkeit maschinengestützter Entscheidungen.\n \n \n \n \n\n\n \n Sesing, A.; and Baum, K.\n\n\n \n\n\n\n In Taeger, J., editor(s), Die Macht der Daten und der Algorithmen – Regulierung von IT, IoT und KI. Tagungsband DSRI-Herbstakademie 2019, pages 435–449, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"AnforderungenPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{sesinganforderungen,\n  title={{Anforderungen an die Erkl{\\"a}rbarkeit maschinengest{\\"u}tzter Entscheidungen}},\n  author={Sesing, Andreas and Baum, Kevin},\n  booktitle = {Die Macht der Daten und der Algorithmen -- Regulierung von IT, IoT und KI. Tagungsband DSRI-Herbstakademie 2019},\n  editor={Taeger, J{\\"u}rgen},\n  pages={435--449},\n  url = {http://olwir.de/?content=reihen/uebersicht&sort=tb&isbn=978-3-95599-061-9},\n  year = 2019\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n From Machine Ethics To Machine Explainability and Back.\n \n \n \n \n\n\n \n Baum, K.; Hermanns, H.; and Speith, T.\n\n\n \n\n\n\n In International Symposium on Artificial Intelligence and Mathematics (ISAIM 2018), Fort Lauderdale, Florida, USA, January 2018. \n \n\n\n\n
\n\n\n\n \n \n \"FromPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{baum2018machine,\n  title={{From Machine Ethics To Machine Explainability and Back}},\n  author={Baum, Kevin and Hermanns, Holger and Speith, Timo},\n  booktitle    = {International Symposium on Artificial Intelligence and Mathematics (ISAIM 2018), Fort Lauderdale, Florida, USA},\n  month = January,\n  year         = {2018},\n  url          = {https://isaim2018.cs.ou.edu/papers/ISAIM2018_Ethics_Baum_etal.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Two Challenges for CI Trustworthiness and How to Address Them.\n \n \n \n\n\n \n Baum, K.; Köhl, M.; and Schmidt, E.\n\n\n \n\n\n\n In Pereira-Fariña, M.; and Reed, C., editor(s), Proceedings of the 1st Workshop on Explainable Computational Intelligence (XCI 2017), Dundee, United Kingdom, September 2017. Association for Computational Linguistics\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{baum2017two,\n  title={{Two Challenges for CI Trustworthiness and How to Address Them}},\n  author={Baum, Kevin and K{\\"o}hl, Maximilian and Schmidt, Eva},\n  booktitle={Proceedings of the 1st Workshop on Explainable Computational Intelligence (XCI 2017)},    \n  editor = {Pereira-Fari{\\~n}a, M.  and Reed, C.},\n  month = sep,\n  year={2017},\n  address = {Dundee, United Kingdom},\n  publisher = {Association for Computational Linguistics},\n  doi = {10.18653/v1/W17-3701}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n What the Hack Is Wrong with Software Doping?.\n \n \n \n\n\n \n Baum, K.\n\n\n \n\n\n\n In Margaria, T.; and Steffen, B., editor(s), Proccedings of the 7th International Symposium on Leveraging Applications of Formal Methods: ISoLA 2016: Leveraging Applications of Formal Methods, Verification and Validation: Discussion, Dissemination, Applications, volume 9953, of Lecture Notes in Computer Science (LNCS), pages 633–647, 2016. Springer International Publishing\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{baum2016hack,\n  title={{What the Hack Is Wrong with Software Doping?}},\n  author={Baum, Kevin},\n  editor={Margaria, Tiziana and Steffen, Bernhard},\n  booktitle={Proccedings of the 7th International Symposium on Leveraging Applications of Formal Methods: ISoLA 2016: Leveraging Applications of Formal Methods, Verification and Validation: Discussion, Dissemination, Applications},\n  pages={633--647},\n  year={2016},\n  series = {Lecture Notes in Computer Science (LNCS)},\n  volume = 9953,\n  organization={Springer International Publishing},\n  doi = {10.1007/978-3-319-47169-3_49}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MorphableUI: A Hypergraph-Based Approach to Distributed Multimodal Interaction for Rapid Prototyping and Changing Environments.\n \n \n \n \n\n\n \n Krekhov, A.; Grüninger, J.; Baum, K.; McCann, D.; and Krüger, J.\n\n\n \n\n\n\n In Proceedings of the 24th International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision (WSCG 2016) in co-operation with EUROGRAPHICS, of Computer Science Research Notes (CSRN), pages 299-308, 2016. Václav Skala-UNION Agency\n \n\n\n\n
\n\n\n\n \n \n \"MorphableUI:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{krekhov2016morphableui,\n  title={{MorphableUI: A Hypergraph-Based Approach to Distributed Multimodal Interaction for Rapid Prototyping and Changing Environments}},\n  author={Krekhov, Andrey and Gr{\\"u}ninger, J{\\"u}rgen and Baum, Kevin and McCann, David and Kr{\\"u}ger, Jens},\n  year={2016},\n  booktitle = {Proceedings of the 24th International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision (WSCG 2016) in co-operation with EUROGRAPHICS},\n  series = {Computer Science Research Notes (CSRN)},\n  ISSN  = {2464-4617},\n  pages = { 299-308},\n  publisher={V{\\'a}clav Skala-UNION Agency},\n  url={http://wscg.zcu.cz/WSCG2016/!!_CSRN-2602.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n undefined\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Utilitarismus und das Problem kollektiven Handelns.\n \n \n \n\n\n \n Baum, K.\n\n\n \n\n\n\n In Andrić, V.; and Gesang, B., editor(s), Handbuch Utilitarismus. J.B. Metzler, .\n in print\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{baum2024,\ntitle = {{Utilitarismus und das Problem kollektiven Handelns}}, \nauthor = {Kevin Baum},\nbooktitle = {Handbuch Utilitarismus},\npublisher = {J.B. Metzler},\neditor = {Vuko Andrić and Bernward Gesang}, \nnote = {in print}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);