CXPlain: Causal Explanations for Model Interpretation under Uncertainty. Schwab, P. & Karlen, W. In Wallach, H., Larochelle, H., Beygelzimer, A., D'Alché-Buc, F., Fox, E., & Garnett, R., editors, Advances in Neural Information Processing Systems 32 (NeurIPS 2019), volume 32, of Advances in Neural Information Processing Systems, pages 9211, 2019.
CXPlain: Causal Explanations for Model Interpretation under Uncertainty [link]Website  abstract   bibtex   
Feature importance estimates that inform us about the degree to which given inputs influence the output of a predictive model are crucial for understanding, validating, and interpreting machine-learning models. However, providing fast and accurate estimates of feature importance for high-dimensional data, and quantifying the uncertainty of such estimates remain open challenges. Here, we frame the task of providing explanations for the decisions of machine-learning models as a causal learning task, and train causal explanation (CXPlain) models that learn to estimate to what degree certain inputs cause outputs in another machine-learning model. CXPlain can, once trained, be used to explain the target model in little time, and enables the quantification of the uncertainty associated with its feature importance estimates via bootstrap ensembling. We present experiments that demonstrate that CXPlain is significantly more accurate than both existing model-agnostic and model-specific methods for estimating feature importance, and that it is signifi- cantly faster than other model-agnostic methods. In addition, we confirm that the uncertainty estimates provided by CXPlain ensembles are strongly correlated with their ability to accurately estimate feature importance on held-out data.
@inproceedings{
 title = {CXPlain: Causal Explanations for Model Interpretation under Uncertainty},
 type = {inproceedings},
 year = {2019},
 pages = {9211},
 volume = {32},
 websites = {https://arxiv.org/abs/1910.12336},
 city = {Vancouver, CA},
 series = {Advances in Neural Information Processing Systems},
 id = {28da2359-4125-3ed9-8117-2b2a4c62fbf5},
 created = {2019-09-12T10:56:46.160Z},
 file_attached = {true},
 profile_id = {6d353feb-efe4-367e-84a2-0815eb9ca878},
 last_modified = {2022-09-04T18:12:18.579Z},
 read = {true},
 starred = {false},
 authored = {true},
 confirmed = {true},
 hidden = {false},
 citation_key = {Schwab2019},
 notes = {Acceptance rate: 0.21},
 folder_uuids = {f1f67efc-95a7-4f1a-b181-c3670c667a34,4afa922c-d8d6-102e-ac9a-0024e85ead87,0801d9e0-d1ec-46e2-803d-c74946b43a02,d9198259-8733-497d-ab87-d2a9518e0d30},
 private_publication = {false},
 abstract = {Feature importance estimates that inform us about the degree to which given inputs influence the output of a predictive model are crucial for understanding, validating, and interpreting machine-learning models. However, providing fast and accurate estimates of feature importance for high-dimensional data, and quantifying the uncertainty of such estimates remain open challenges. Here, we frame the task of providing explanations for the decisions of machine-learning models as a causal learning task, and train causal explanation (CXPlain) models that learn to estimate to what degree certain inputs cause outputs in another machine-learning model. CXPlain can, once trained, be used to explain the target model in little time, and enables the quantification of the uncertainty associated with its feature importance estimates via bootstrap ensembling. We present experiments that demonstrate that CXPlain is significantly more accurate than both existing model-agnostic and model-specific methods for estimating feature importance, and that it is signifi- cantly faster than other model-agnostic methods. In addition, we confirm that the uncertainty estimates provided by CXPlain ensembles are strongly correlated with their ability to accurately estimate feature importance on held-out data.},
 bibtype = {inproceedings},
 author = {Schwab, Patrick and Karlen, Walter},
 editor = {Wallach, H. and Larochelle, H. and Beygelzimer, A. and D'Alché-Buc, F. and Fox, E. and Garnett, R.},
 booktitle = {Advances in Neural Information Processing Systems 32 (NeurIPS 2019)}
}

Downloads: 0