Explaining Decision-Making Algorithms through UI: Strategies to Help Non-Expert Stakeholders. Cheng, H., Wang, R., Zhang, Z., O'Connell, F., Gray, T., Harper, F. M., & Zhu, H. In Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems - CHI '19, 2019. ACM Press.
Paper doi abstract bibtex Increasingly, algorithms are used to make important decisions across society. However, these algorithms are usually poorly understood, which can reduce transparency and evoke negative emotions. In this research, we seek to learn design principles for explanation interfaces that communicate how decision-making algorithms work, in order to help organizations explain their decisions to stakeholders, or to support users' "right to explanation". We conducted an online experiment where 199 participants used different explanation interfaces to understand an algorithm for making university admissions decisions. We measured users' objective and self-reported understanding of the algorithm. Our results show that both interactive explanations and "white-box" explanations (i.e. that show the inner workings of an algorithm) can improve users' comprehension. Although the interactive approach is more effective at improving comprehension, it comes with a trade-off of taking more time. Surprisingly, we also find that users' trust in algorithmic decisions is not affected by the explanation interface or their level of comprehension of the algorithm.
@inproceedings{Cheng_2019,
abstract = {Increasingly, algorithms are used to make important decisions across society. However, these algorithms are usually poorly understood, which can reduce transparency and evoke negative emotions. In this research, we seek to learn design principles for explanation interfaces that communicate how decision-making algorithms work, in order to help organizations explain their decisions to stakeholders, or to support users' "right to explanation". We conducted an online experiment where 199 participants used different explanation interfaces to understand an algorithm for making university admissions decisions. We measured users' objective and self-reported understanding of the algorithm. Our results show that both interactive explanations and "white-box" explanations (i.e. that show the inner workings of an algorithm) can improve users' comprehension. Although the interactive approach is more effective at improving comprehension, it comes with a trade-off of taking more time. Surprisingly, we also find that users' trust in algorithmic decisions is not affected by the explanation interface or their level of comprehension of the algorithm.
},
added-at = {2020-12-07T21:02:37.000+0100},
author = {Cheng, Hao-Fei and Wang, Ruotong and Zhang, Zheng and O'Connell, Fiona and Gray, Terrance and Harper, F. Maxwell and Zhu, Haiyi},
biburl = {https://www.bibsonomy.org/bibtex/2693e5fd54388a3a414a309d66ba272dc/brusilovsky},
booktitle = {Proceedings of the 2019 {CHI} Conference on Human Factors in Computing Systems - {CHI} {\textquotesingle}19},
description = {Explaining Decision-Making Algorithms through UI | Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems},
doi = {10.1145/3290605.3300789},
interhash = {ec80700c0f1c4cf44faffe2bf2ef3ad3},
intrahash = {693e5fd54388a3a414a309d66ba272dc},
keywords = {explanation XAI human-centered-AI},
publisher = {{ACM} Press},
timestamp = {2020-12-07T21:03:30.000+0100},
title = {Explaining Decision-Making Algorithms through {UI}: Strategies to Help Non-Expert Stakeholders},
url = {https://doi.org/10.1145%2F3290605.3300789},
year = 2019
}
Downloads: 0
{"_id":"GbtDF62d8aMmsLinH","bibbaseid":"cheng-wang-zhang-oconnell-gray-harper-zhu-explainingdecisionmakingalgorithmsthroughuistrategiestohelpnonexpertstakeholders-2019","author_short":["Cheng, H.","Wang, R.","Zhang, Z.","O'Connell, F.","Gray, T.","Harper, F. M.","Zhu, H."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","abstract":"Increasingly, algorithms are used to make important decisions across society. However, these algorithms are usually poorly understood, which can reduce transparency and evoke negative emotions. In this research, we seek to learn design principles for explanation interfaces that communicate how decision-making algorithms work, in order to help organizations explain their decisions to stakeholders, or to support users' \"right to explanation\". We conducted an online experiment where 199 participants used different explanation interfaces to understand an algorithm for making university admissions decisions. We measured users' objective and self-reported understanding of the algorithm. Our results show that both interactive explanations and \"white-box\" explanations (i.e. that show the inner workings of an algorithm) can improve users' comprehension. Although the interactive approach is more effective at improving comprehension, it comes with a trade-off of taking more time. Surprisingly, we also find that users' trust in algorithmic decisions is not affected by the explanation interface or their level of comprehension of the algorithm. ","added-at":"2020-12-07T21:02:37.000+0100","author":[{"propositions":[],"lastnames":["Cheng"],"firstnames":["Hao-Fei"],"suffixes":[]},{"propositions":[],"lastnames":["Wang"],"firstnames":["Ruotong"],"suffixes":[]},{"propositions":[],"lastnames":["Zhang"],"firstnames":["Zheng"],"suffixes":[]},{"propositions":[],"lastnames":["O'Connell"],"firstnames":["Fiona"],"suffixes":[]},{"propositions":[],"lastnames":["Gray"],"firstnames":["Terrance"],"suffixes":[]},{"propositions":[],"lastnames":["Harper"],"firstnames":["F.","Maxwell"],"suffixes":[]},{"propositions":[],"lastnames":["Zhu"],"firstnames":["Haiyi"],"suffixes":[]}],"biburl":"https://www.bibsonomy.org/bibtex/2693e5fd54388a3a414a309d66ba272dc/brusilovsky","booktitle":"Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems - CHI '19","description":"Explaining Decision-Making Algorithms through UI | Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems","doi":"10.1145/3290605.3300789","interhash":"ec80700c0f1c4cf44faffe2bf2ef3ad3","intrahash":"693e5fd54388a3a414a309d66ba272dc","keywords":"explanation XAI human-centered-AI","publisher":"ACM Press","timestamp":"2020-12-07T21:03:30.000+0100","title":"Explaining Decision-Making Algorithms through UI: Strategies to Help Non-Expert Stakeholders","url":"https://doi.org/10.1145%2F3290605.3300789","year":"2019","bibtex":"@inproceedings{Cheng_2019,\n abstract = {Increasingly, algorithms are used to make important decisions across society. However, these algorithms are usually poorly understood, which can reduce transparency and evoke negative emotions. In this research, we seek to learn design principles for explanation interfaces that communicate how decision-making algorithms work, in order to help organizations explain their decisions to stakeholders, or to support users' \"right to explanation\". We conducted an online experiment where 199 participants used different explanation interfaces to understand an algorithm for making university admissions decisions. We measured users' objective and self-reported understanding of the algorithm. Our results show that both interactive explanations and \"white-box\" explanations (i.e. that show the inner workings of an algorithm) can improve users' comprehension. Although the interactive approach is more effective at improving comprehension, it comes with a trade-off of taking more time. Surprisingly, we also find that users' trust in algorithmic decisions is not affected by the explanation interface or their level of comprehension of the algorithm.\r\n\r\n},\n added-at = {2020-12-07T21:02:37.000+0100},\n author = {Cheng, Hao-Fei and Wang, Ruotong and Zhang, Zheng and O'Connell, Fiona and Gray, Terrance and Harper, F. Maxwell and Zhu, Haiyi},\n biburl = {https://www.bibsonomy.org/bibtex/2693e5fd54388a3a414a309d66ba272dc/brusilovsky},\n booktitle = {Proceedings of the 2019 {CHI} Conference on Human Factors in Computing Systems - {CHI} {\\textquotesingle}19},\n description = {Explaining Decision-Making Algorithms through UI | Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems},\n doi = {10.1145/3290605.3300789},\n interhash = {ec80700c0f1c4cf44faffe2bf2ef3ad3},\n intrahash = {693e5fd54388a3a414a309d66ba272dc},\n keywords = {explanation XAI human-centered-AI},\n publisher = {{ACM} Press},\n timestamp = {2020-12-07T21:03:30.000+0100},\n title = {Explaining Decision-Making Algorithms through {UI}: Strategies to Help Non-Expert Stakeholders},\n url = {https://doi.org/10.1145%2F3290605.3300789},\n year = 2019\n}\n\n","author_short":["Cheng, H.","Wang, R.","Zhang, Z.","O'Connell, F.","Gray, T.","Harper, F. M.","Zhu, H."],"key":"Cheng_2019","id":"Cheng_2019","bibbaseid":"cheng-wang-zhang-oconnell-gray-harper-zhu-explainingdecisionmakingalgorithmsthroughuistrategiestohelpnonexpertstakeholders-2019","role":"author","urls":{"Paper":"https://doi.org/10.1145%2F3290605.3300789"},"keyword":["explanation XAI human-centered-AI"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"inproceedings","biburl":"http://www.bibsonomy.org/bib/author/zhang?items=1000","dataSources":["6yXn8CtuzyEbCSr2m"],"keywords":["explanation xai human-centered-ai"],"search_terms":["explaining","decision","making","algorithms","through","strategies","help","non","expert","stakeholders","cheng","wang","zhang","o'connell","gray","harper","zhu"],"title":"Explaining Decision-Making Algorithms through UI: Strategies to Help Non-Expert Stakeholders","year":2019}