The Problem of Explanations without User Feedback. Smith, A. & Nolan, J., J. IUI 2018 Workshop on Explainable Smart Systems (ExSS), 2018.
abstract   bibtex   
Explanations are necessary for building users' understanding and trust in machine learning systems. However, users may abandon systems if these explanations demonstrate consistent errors and they cannot affect change in the systems' behavior in response. When user feedback is supported, then the utility of explanations is to not only promote understanding, but also enable users to help the machine learning system overcome errors. We suggest an experiment to examine how users react when a system makes explainable mistakes with varied support for user feedback.
@article{
 title = {The Problem of Explanations without User Feedback},
 type = {article},
 year = {2018},
 keywords = {Author Keywords Explanations,Human Information Processing,human-in-the-loop systems,user feedback},
 pages = {11-13},
 id = {24f0e803-719e-3d25-a4ee-709d7560941c},
 created = {2018-03-31T12:49:05.841Z},
 file_attached = {false},
 profile_id = {2ed0fe69-06a2-3e8b-9bc9-5bdb197f1120},
 group_id = {e795dbfa-5576-3499-9c01-6574f19bf7aa},
 last_modified = {2018-12-14T12:16:32.503Z},
 read = {true},
 starred = {false},
 authored = {false},
 confirmed = {true},
 hidden = {false},
 citation_key = {Smith2018},
 notes = {Las explicaciones que da el sistema al usuario cuando comete errores debería ir acompañado por un sistema de recibimiento de feedback por parte del usuario para que el sistema pueda solucionar los errores que comete, aumentando así la confianza del usuario en el sistema, la efectividad del mismo, la continuidad de su uso, etc.<br/>Se va a realizar un estudio para examinar cómo reaccionarían los usuarios ante sistemas que les permitan incluir feedback. Van a incluir dos métodos: un método que ayude a explorar la frustración o confusión del usuario cuando el sistema sigue cometiendo los mismos errores, y otro método que dé más control al usuario.},
 private_publication = {false},
 abstract = {Explanations are necessary for building users' understanding and trust in machine learning systems. However, users may abandon systems if these explanations demonstrate consistent errors and they cannot affect change in the systems' behavior in response. When user feedback is supported, then the utility of explanations is to not only promote understanding, but also enable users to help the machine learning system overcome errors. We suggest an experiment to examine how users react when a system makes explainable mistakes with varied support for user feedback.},
 bibtype = {article},
 author = {Smith, Alison and Nolan, James J},
 journal = {IUI 2018 Workshop on Explainable Smart Systems (ExSS)}
}

Downloads: 0