Towards Robust Interpretability with Self-Explaining Neural Networks. Alvarez-Melis, D. & Jaakkola, T. S. December, 2018. arXiv:1806.07538 [cs, stat]Paper doi abstract bibtex Most recent work on interpretability of complex machine learning models has focused on estimating ${\}textit\{a posteriori\}$ explanations for previously trained models around specific predictions. ${\}textit\{Self-explaining\}$ models where interpretability plays a key role already during learning have received much less attention. We propose three desiderata for explanations in general – explicitness, faithfulness, and stability – and show that existing methods do not satisfy them. In response, we design self-explaining models in stages, progressively generalizing linear classifiers to complex yet architecturally explicit models. Faithfulness and stability are enforced via regularization specifically tailored to such models. Experimental results across various benchmark datasets show that our framework offers a promising direction for reconciling model complexity and interpretability.
@misc{alvarez-melis_towards_2018,
title = {Towards {Robust} {Interpretability} with {Self}-{Explaining} {Neural} {Networks}},
url = {http://arxiv.org/abs/1806.07538},
doi = {10.48550/arXiv.1806.07538},
abstract = {Most recent work on interpretability of complex machine learning models has focused on estimating \${\textbackslash}textit\{a posteriori\}\$ explanations for previously trained models around specific predictions. \${\textbackslash}textit\{Self-explaining\}\$ models where interpretability plays a key role already during learning have received much less attention. We propose three desiderata for explanations in general -- explicitness, faithfulness, and stability -- and show that existing methods do not satisfy them. In response, we design self-explaining models in stages, progressively generalizing linear classifiers to complex yet architecturally explicit models. Faithfulness and stability are enforced via regularization specifically tailored to such models. Experimental results across various benchmark datasets show that our framework offers a promising direction for reconciling model complexity and interpretability.},
urldate = {2023-07-27},
publisher = {arXiv},
author = {Alvarez-Melis, David and Jaakkola, Tommi S.},
month = dec,
year = {2018},
note = {arXiv:1806.07538 [cs, stat]},
keywords = {Computer Science - Machine Learning, Statistics - Machine Learning},
}
Downloads: 0
{"_id":"R3Aq8TZt3AwsCDJGL","bibbaseid":"alvarezmelis-jaakkola-towardsrobustinterpretabilitywithselfexplainingneuralnetworks-2018","author_short":["Alvarez-Melis, D.","Jaakkola, T. S."],"bibdata":{"bibtype":"misc","type":"misc","title":"Towards Robust Interpretability with Self-Explaining Neural Networks","url":"http://arxiv.org/abs/1806.07538","doi":"10.48550/arXiv.1806.07538","abstract":"Most recent work on interpretability of complex machine learning models has focused on estimating ${\\}textit\\{a posteriori\\}$ explanations for previously trained models around specific predictions. ${\\}textit\\{Self-explaining\\}$ models where interpretability plays a key role already during learning have received much less attention. We propose three desiderata for explanations in general – explicitness, faithfulness, and stability – and show that existing methods do not satisfy them. In response, we design self-explaining models in stages, progressively generalizing linear classifiers to complex yet architecturally explicit models. Faithfulness and stability are enforced via regularization specifically tailored to such models. Experimental results across various benchmark datasets show that our framework offers a promising direction for reconciling model complexity and interpretability.","urldate":"2023-07-27","publisher":"arXiv","author":[{"propositions":[],"lastnames":["Alvarez-Melis"],"firstnames":["David"],"suffixes":[]},{"propositions":[],"lastnames":["Jaakkola"],"firstnames":["Tommi","S."],"suffixes":[]}],"month":"December","year":"2018","note":"arXiv:1806.07538 [cs, stat]","keywords":"Computer Science - Machine Learning, Statistics - Machine Learning","bibtex":"@misc{alvarez-melis_towards_2018,\n\ttitle = {Towards {Robust} {Interpretability} with {Self}-{Explaining} {Neural} {Networks}},\n\turl = {http://arxiv.org/abs/1806.07538},\n\tdoi = {10.48550/arXiv.1806.07538},\n\tabstract = {Most recent work on interpretability of complex machine learning models has focused on estimating \\${\\textbackslash}textit\\{a posteriori\\}\\$ explanations for previously trained models around specific predictions. \\${\\textbackslash}textit\\{Self-explaining\\}\\$ models where interpretability plays a key role already during learning have received much less attention. We propose three desiderata for explanations in general -- explicitness, faithfulness, and stability -- and show that existing methods do not satisfy them. In response, we design self-explaining models in stages, progressively generalizing linear classifiers to complex yet architecturally explicit models. Faithfulness and stability are enforced via regularization specifically tailored to such models. Experimental results across various benchmark datasets show that our framework offers a promising direction for reconciling model complexity and interpretability.},\n\turldate = {2023-07-27},\n\tpublisher = {arXiv},\n\tauthor = {Alvarez-Melis, David and Jaakkola, Tommi S.},\n\tmonth = dec,\n\tyear = {2018},\n\tnote = {arXiv:1806.07538 [cs, stat]},\n\tkeywords = {Computer Science - Machine Learning, Statistics - Machine Learning},\n}\n\n","author_short":["Alvarez-Melis, D.","Jaakkola, T. S."],"key":"alvarez-melis_towards_2018","id":"alvarez-melis_towards_2018","bibbaseid":"alvarezmelis-jaakkola-towardsrobustinterpretabilitywithselfexplainingneuralnetworks-2018","role":"author","urls":{"Paper":"http://arxiv.org/abs/1806.07538"},"keyword":["Computer Science - Machine Learning","Statistics - Machine Learning"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"misc","biburl":"https://api.zotero.org/users/7461051/collections/8LMFUKLJ/items?key=JesLwColmDamE3ak4jR0GxhE&format=bibtex&limit=100","dataSources":["gP7g8bHun7rdpfdsf"],"keywords":["computer science - machine learning","statistics - machine learning"],"search_terms":["towards","robust","interpretability","self","explaining","neural","networks","alvarez-melis","jaakkola"],"title":"Towards Robust Interpretability with Self-Explaining Neural Networks","year":2018}