Explainable AI: Intrinsic, Dialogic, and Impact Measures of Success. Cassens, J. & Wegener, R. In Ehsan, U., Liao, Q. V., Mara, M., Riedl, M., Riener, A., Streit, M., Wachter, S., & Wintersberger, P., editors, Proceedings of the ACM CHI Workshop on Operationalizing Human-Centered Perspectives in Explainable AI (HCXAI 2021), The Internet, May, 2021. abstract bibtex We propose three perspectives on evaluation models for explainable AI that include intrinsic measures, dialogic measures and impact measures. The paper outlines these different perspectives and looks at how the separation might be used for explanation evaluation bench marking and integration into design and development.
@inproceedings{Cassens-Wegener-HCXAI-2021,
keywords = {paper},
author = {Jörg Cassens and Rebekah Wegener},
title = {Explainable AI: Intrinsic, Dialogic, and Impact Measures of Success},
booktitle = {Proceedings of the ACM CHI Workshop on Operationalizing Human-Centered Perspectives in Explainable AI (HCXAI 2021)},
month = {May},
year = {2021},
eventdate = {2021-05-08},
editor = {Upol Ehsan and Q. Vera Liao and Martina Mara and Mark Riedl and Andreas Riener and Marc Streit and Sandra Wachter and Philipp Wintersberger},
venue = {Yokohama, Japan},
address = {The Internet},
pdf = {https://www.dropbox.com/s/gb1xxlxkr5cyt07/HCXAI2021_paper_31.pdf},
abstract = {We propose three perspectives on evaluation models for explainable AI that include intrinsic measures, dialogic measures and impact measures. The paper outlines these different perspectives and looks at how the separation might be used for explanation evaluation bench marking and integration into design and development.}
}
Downloads: 0
{"_id":"nknSSWCxfgdw3H9Aj","bibbaseid":"cassens-wegener-explainableaiintrinsicdialogicandimpactmeasuresofsuccess-2021","author_short":["Cassens, J.","Wegener, R."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","keywords":"paper","author":[{"firstnames":["Jörg"],"propositions":[],"lastnames":["Cassens"],"suffixes":[]},{"firstnames":["Rebekah"],"propositions":[],"lastnames":["Wegener"],"suffixes":[]}],"title":"Explainable AI: Intrinsic, Dialogic, and Impact Measures of Success","booktitle":"Proceedings of the ACM CHI Workshop on Operationalizing Human-Centered Perspectives in Explainable AI (HCXAI 2021)","month":"May","year":"2021","eventdate":"2021-05-08","editor":[{"firstnames":["Upol"],"propositions":[],"lastnames":["Ehsan"],"suffixes":[]},{"firstnames":["Q.","Vera"],"propositions":[],"lastnames":["Liao"],"suffixes":[]},{"firstnames":["Martina"],"propositions":[],"lastnames":["Mara"],"suffixes":[]},{"firstnames":["Mark"],"propositions":[],"lastnames":["Riedl"],"suffixes":[]},{"firstnames":["Andreas"],"propositions":[],"lastnames":["Riener"],"suffixes":[]},{"firstnames":["Marc"],"propositions":[],"lastnames":["Streit"],"suffixes":[]},{"firstnames":["Sandra"],"propositions":[],"lastnames":["Wachter"],"suffixes":[]},{"firstnames":["Philipp"],"propositions":[],"lastnames":["Wintersberger"],"suffixes":[]}],"venue":"Yokohama, Japan","address":"The Internet","pdf":"https://www.dropbox.com/s/gb1xxlxkr5cyt07/HCXAI2021_paper_31.pdf","abstract":"We propose three perspectives on evaluation models for explainable AI that include intrinsic measures, dialogic measures and impact measures. The paper outlines these different perspectives and looks at how the separation might be used for explanation evaluation bench marking and integration into design and development.","bibtex":"@inproceedings{Cassens-Wegener-HCXAI-2021,\n keywords = {paper},\n author = {Jörg Cassens and Rebekah Wegener},\n title = {Explainable AI: Intrinsic, Dialogic, and Impact Measures of Success},\n booktitle = {Proceedings of the ACM CHI Workshop on Operationalizing Human-Centered Perspectives in Explainable AI (HCXAI 2021)},\n month = {May},\n year = {2021},\n eventdate = {2021-05-08},\n editor = {Upol Ehsan and Q. Vera Liao and Martina Mara and Mark Riedl and Andreas Riener and Marc Streit and Sandra Wachter and Philipp Wintersberger},\n venue = {Yokohama, Japan},\n address = {The Internet},\n pdf = {https://www.dropbox.com/s/gb1xxlxkr5cyt07/HCXAI2021_paper_31.pdf},\n abstract = {We propose three perspectives on evaluation models for explainable AI that include intrinsic measures, dialogic measures and impact measures. The paper outlines these different perspectives and looks at how the separation might be used for explanation evaluation bench marking and integration into design and development.}\n}\n\n","author_short":["Cassens, J.","Wegener, R."],"editor_short":["Ehsan, U.","Liao, Q. V.","Mara, M.","Riedl, M.","Riener, A.","Streit, M.","Wachter, S.","Wintersberger, P."],"key":"Cassens-Wegener-HCXAI-2021","id":"Cassens-Wegener-HCXAI-2021","bibbaseid":"cassens-wegener-explainableaiintrinsicdialogicandimpactmeasuresofsuccess-2021","role":"author","urls":{},"keyword":["paper"],"metadata":{"authorlinks":{}}},"bibtype":"inproceedings","biburl":"http://mi.kriwi.de/bib/cassens-publications-utf8.bib","dataSources":["8f3wr8A2WvwSFdDqC"],"keywords":["paper"],"search_terms":["explainable","intrinsic","dialogic","impact","measures","success","cassens","wegener"],"title":"Explainable AI: Intrinsic, Dialogic, and Impact Measures of Success","year":2021}