\n \n \n
\n
\n\n \n \n \n \n \n \n KSD Aggregated Goodness-of-fit Test.\n \n \n \n \n\n\n \n Schrab, A.; Guedj, B.; and Gretton, A.\n\n\n \n\n\n\n In Oh, A. H.; Agarwal, A.; Belgrave, D.; and Cho, K., editor(s),
Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, 2022. \n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n \n pdf\n \n \n \n supplemental\n \n \n \n code 1\n \n \n \n code 2\n \n \n \n slides 1\n \n \n \n slides 2\n \n \n \n slides 3\n \n \n \n poster 1\n \n \n \n poster 2\n \n \n \n poster 3\n \n \n \n video 1\n \n \n \n video 2\n \n \n \n video 3\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 58 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@inproceedings{schrab2022ksd,\n title = {{KSD} Aggregated Goodness-of-fit Test},\n author = {Antonin Schrab and Benjamin Guedj and Arthur Gretton},\n booktitle = {Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022},\n editor = {Alice H. Oh and Alekh Agarwal and Danielle Belgrave and Kyunghyun Cho},\n year = {2022},\n abstract = {We investigate properties of goodness-of-fit tests based on the Kernel Stein Discrepancy (KSD). We introduce a strategy to construct a test, called KSDAgg, which aggregates multiple tests with different kernels. KSDAgg avoids splitting the data to perform kernel selection (which leads to a loss in test power), and rather maximises the test power over a collection of kernels. We provide theoretical guarantees on the power of KSDAgg: we show it achieves the smallest uniform separation rate of the collection, up to a logarithmic term. KSDAgg can be computed exactly in practice as it relies either on a parametric bootstrap or on a wild bootstrap to estimate the quantiles and the level corrections. In particular, for the crucial choice of bandwidth of a fixed kernel, it avoids resorting to arbitrary heuristics (such as median or standard deviation) or to data splitting. We find on both synthetic and real-world data that KSDAgg outperforms other state-of-the-art adaptive KSD-based goodness-of-fit testing procedures.},\n keywords = {Goodness-of-fit testing},\n url = {https://proceedings.neurips.cc/paper_files/paper/2022/hash/d241a7b1499cee1bf40769ceade2444d-Abstract-Conference.html},\n url_PDF = {https://proceedings.neurips.cc/paper_files/paper/2022/file/d241a7b1499cee1bf40769ceade2444d-Paper-Conference.pdf},\n url_Supplemental = {https://proceedings.neurips.cc/paper_files/paper/2022/file/d241a7b1499cee1bf40769ceade2444d-Supplemental-Conference.pdf},\n url_Code_1 = {https://github.com/antoninschrab/ksdagg},\n url_Code_2 = {https://github.com/antoninschrab/ksdagg-paper},\n url_Slides_1 = {https://antoninschrab.github.io/files/Slides_MMDAgg_KSDAgg_long.pdf},\n url_Slides_2 = {https://antoninschrab.github.io/files/Slides_handout-31-05-22.pdf},\n url_Slides_3 = {https://nips.cc/media/neurips-2022/Slides/54932.pdf},\n url_Poster_1 = {https://nips.cc/media/PosterPDFs/NeurIPS%202022/54932.png?t=1669384001.1315906},\n url_Poster_2 = {https://antoninschrab.github.io/files/Poster_MMDAgg_KSDAgg.pdf},\n url_Poster_3 = {https://antoninschrab.github.io/files/Poster-03-09-22.pdf},\n url_Video_1 = {https://nips.cc/virtual/2022/poster/54932},\n url_Video_2 = {https://youtu.be/F0VOCrAf5_M},\n url_Video_3 = {https://youtu.be/OWh6Hj10wsY},\n eprint = {2202.00824},\n archivePrefix = {arXiv},\n primaryClass = {stat.ML}\n}\n\n
\n
\n\n\n
\n We investigate properties of goodness-of-fit tests based on the Kernel Stein Discrepancy (KSD). We introduce a strategy to construct a test, called KSDAgg, which aggregates multiple tests with different kernels. KSDAgg avoids splitting the data to perform kernel selection (which leads to a loss in test power), and rather maximises the test power over a collection of kernels. We provide theoretical guarantees on the power of KSDAgg: we show it achieves the smallest uniform separation rate of the collection, up to a logarithmic term. KSDAgg can be computed exactly in practice as it relies either on a parametric bootstrap or on a wild bootstrap to estimate the quantiles and the level corrections. In particular, for the crucial choice of bandwidth of a fixed kernel, it avoids resorting to arbitrary heuristics (such as median or standard deviation) or to data splitting. We find on both synthetic and real-world data that KSDAgg outperforms other state-of-the-art adaptive KSD-based goodness-of-fit testing procedures.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Efficient Aggregated Kernel Tests using Incomplete $U$-statistics.\n \n \n \n \n\n\n \n Schrab, A.; Kim, I.; Guedj, B.; and Gretton, A.\n\n\n \n\n\n\n In Oh, A. H.; Agarwal, A.; Belgrave, D.; and Cho, K., editor(s),
Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, 2022. \n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n \n pdf\n \n \n \n supplemental\n \n \n \n code 1\n \n \n \n code 2\n \n \n \n slides 1\n \n \n \n slides 2\n \n \n \n poster 1\n \n \n \n poster 2\n \n \n \n video\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 9 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{schrab2022efficient,\n title = {Efficient Aggregated Kernel Tests using Incomplete {$U$}-statistics},\n author = {Antonin Schrab and Ilmun Kim and Benjamin Guedj and Arthur Gretton},\n booktitle = {Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022},\n editor = {Alice H. Oh and Alekh Agarwal and Danielle Belgrave and Kyunghyun Cho},\n year = {2022},\n abstract = {We propose a series of computationally efficient, nonparametric tests for the two-sample, independence and goodness-of-fit problems, using the Maximum Mean Discrepancy (MMD), Hilbert Schmidt Independence Criterion (HSIC), and Kernel Stein Discrepancy (KSD), respectively. Our test statistics are incomplete $U$-statistics, with a computational cost that interpolates between linear time in the number of samples, and quadratic time, as associated with classical $U$-statistic tests. The three proposed tests aggregate over several kernel bandwidths to detect departures from the null on various scales: we call the resulting tests MMDAggInc, HSICAggInc and KSDAggInc. For the test thresholds, we derive a quantile bound for wild bootstrapped incomplete $U$- statistics, which is of independent interest. We derive uniform separation rates for MMDAggInc and HSICAggInc, and quantify exactly the trade-off between computational efficiency and the attainable rates: this result is novel for tests based on incomplete $U$-statistics, to our knowledge. We further show that in the quadratic-time case, the wild bootstrap incurs no penalty to test power over more widespread permutation-based approaches, since both attain the same minimax optimal rates (which in turn match the rates that use oracle quantiles). We support our claims with numerical experiments on the trade-off between computational efficiency and test power. In the three testing frameworks, we observe that our proposed linear-time aggregated tests obtain higher power than current state-of-the-art linear-time kernel tests.},\n keywords = {Two-sample testing, Independence testing, Goodness-of-fit testing},\n url = {https://proceedings.neurips.cc/paper_files/paper/2022/hash/774164b966cc277c82a960934445140d-Abstract-Conference.html},\n url_PDF = {https://proceedings.neurips.cc/paper_files/paper/2022/file/774164b966cc277c82a960934445140d-Paper-Conference.pdf},\n url_Supplemental = {https://proceedings.neurips.cc/paper_files/paper/2022/file/774164b966cc277c82a960934445140d-Supplemental-Conference.pdf},\n url_Code_1 = {https://github.com/antoninschrab/agginc},\n url_Code_2 = {https://github.com/antoninschrab/agginc-paper},\n url_Slides_1 = {https://antoninschrab.github.io/files/Slides_handout-31-05-22.pdf},\n url_Slides_2 = {https://nips.cc/media/neurips-2022/Slides/54933.pdf},\n url_Poster_1 = {https://nips.cc/media/PosterPDFs/NeurIPS%202022/54933.png?t=1669384934.1906412},\n url_Poster_2 = {https://antoninschrab.github.io/files/Poster-03-09-22.pdf},\n url_Video = {https://nips.cc/virtual/2022/poster/54933},\n eprint = {2206.09194},\n archivePrefix = {arXiv},\n primaryClass = {stat.ML}\n}\n\n
\n
\n\n\n
\n We propose a series of computationally efficient, nonparametric tests for the two-sample, independence and goodness-of-fit problems, using the Maximum Mean Discrepancy (MMD), Hilbert Schmidt Independence Criterion (HSIC), and Kernel Stein Discrepancy (KSD), respectively. Our test statistics are incomplete $U$-statistics, with a computational cost that interpolates between linear time in the number of samples, and quadratic time, as associated with classical $U$-statistic tests. The three proposed tests aggregate over several kernel bandwidths to detect departures from the null on various scales: we call the resulting tests MMDAggInc, HSICAggInc and KSDAggInc. For the test thresholds, we derive a quantile bound for wild bootstrapped incomplete $U$- statistics, which is of independent interest. We derive uniform separation rates for MMDAggInc and HSICAggInc, and quantify exactly the trade-off between computational efficiency and the attainable rates: this result is novel for tests based on incomplete $U$-statistics, to our knowledge. We further show that in the quadratic-time case, the wild bootstrap incurs no penalty to test power over more widespread permutation-based approaches, since both attain the same minimax optimal rates (which in turn match the rates that use oracle quantiles). We support our claims with numerical experiments on the trade-off between computational efficiency and test power. In the three testing frameworks, we observe that our proposed linear-time aggregated tests obtain higher power than current state-of-the-art linear-time kernel tests.\n
\n\n\n
\n\n\n
\n\n\n\n\n\n