WILDS: A Benchmark of in-the-Wild Distribution Shifts. Koh, P. W., Sagawa, S., Marklund, H., Xie, S. M., Zhang, M., Balsubramani, A., Hu, W., Yasunaga, M., Phillips, R. L., Gao, I., Lee, T., David, E., Stavness, I., Guo, W., Earnshaw, B., Haque, I., Beery, S. M, Leskovec, J., Kundaje, A., Pierson, E., Levine, S., Finn, C., & Liang, P. In Meila, M. & Zhang, T., editors, Proceedings of the 38th International Conference on Machine Learning, volume 139, of Proceedings of Machine Learning Research, pages 5637–5664, July, 2021. PMLR.
Paper abstract bibtex 1 download Distribution shifts—where the training distribution differs from the test distribution—can substantially degrade the accuracy of machine learning (ML) systems deployed in the wild. Despite their ubiquity in the real-world deployments, these distribution shifts are under-represented in the datasets widely used in the ML community today. To address this gap, we present WILDS, a curated benchmark of 10 datasets reflecting a diverse range of distribution shifts that naturally arise in real-world applications, such as shifts across hospitals for tumor identification; across camera traps for wildlife monitoring; and across time and location in satellite imaging and poverty mapping. On each dataset, we show that standard training yields substantially lower out-of-distribution than in-distribution performance. This gap remains even with models trained by existing methods for tackling distribution shifts, underscoring the need for new methods for training models that are more robust to the types of distribution shifts that arise in practice. To facilitate method development, we provide an open-source package that automates dataset loading, contains default model architectures and hyperparameters, and standardizes evaluations. The full paper, code, and leaderboards are available at https://wilds.stanford.edu.
@inproceedings{koh_wilds_2021,
series = {Proceedings of {Machine} {Learning} {Research}},
title = {{WILDS}: {A} {Benchmark} of in-the-{Wild} {Distribution} {Shifts}},
volume = {139},
url = {https://proceedings.mlr.press/v139/koh21a.html},
abstract = {Distribution shifts—where the training distribution differs from the test distribution—can substantially degrade the accuracy of machine learning (ML) systems deployed in the wild. Despite their ubiquity in the real-world deployments, these distribution shifts are under-represented in the datasets widely used in the ML community today. To address this gap, we present WILDS, a curated benchmark of 10 datasets reflecting a diverse range of distribution shifts that naturally arise in real-world applications, such as shifts across hospitals for tumor identification; across camera traps for wildlife monitoring; and across time and location in satellite imaging and poverty mapping. On each dataset, we show that standard training yields substantially lower out-of-distribution than in-distribution performance. This gap remains even with models trained by existing methods for tackling distribution shifts, underscoring the need for new methods for training models that are more robust to the types of distribution shifts that arise in practice. To facilitate method development, we provide an open-source package that automates dataset loading, contains default model architectures and hyperparameters, and standardizes evaluations. The full paper, code, and leaderboards are available at https://wilds.stanford.edu.},
booktitle = {Proceedings of the 38th {International} {Conference} on {Machine} {Learning}},
publisher = {PMLR},
author = {Koh, Pang Wei and Sagawa, Shiori and Marklund, Henrik and Xie, Sang Michael and Zhang, Marvin and Balsubramani, Akshay and Hu, Weihua and Yasunaga, Michihiro and Phillips, Richard Lanas and Gao, Irena and Lee, Tony and David, Etienne and Stavness, Ian and Guo, Wei and Earnshaw, Berton and Haque, Imran and Beery, Sara M and Leskovec, Jure and Kundaje, Anshul and Pierson, Emma and Levine, Sergey and Finn, Chelsea and Liang, Percy},
editor = {Meila, Marina and Zhang, Tong},
month = jul,
year = {2021},
pages = {5637--5664},
}
Downloads: 1
{"_id":"ZyxqQaxCudEYWxEX5","bibbaseid":"koh-sagawa-marklund-xie-zhang-balsubramani-hu-yasunaga-etal-wildsabenchmarkofinthewilddistributionshifts-2021","author_short":["Koh, P. W.","Sagawa, S.","Marklund, H.","Xie, S. M.","Zhang, M.","Balsubramani, A.","Hu, W.","Yasunaga, M.","Phillips, R. L.","Gao, I.","Lee, T.","David, E.","Stavness, I.","Guo, W.","Earnshaw, B.","Haque, I.","Beery, S. M","Leskovec, J.","Kundaje, A.","Pierson, E.","Levine, S.","Finn, C.","Liang, P."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","series":"Proceedings of Machine Learning Research","title":"WILDS: A Benchmark of in-the-Wild Distribution Shifts","volume":"139","url":"https://proceedings.mlr.press/v139/koh21a.html","abstract":"Distribution shifts—where the training distribution differs from the test distribution—can substantially degrade the accuracy of machine learning (ML) systems deployed in the wild. Despite their ubiquity in the real-world deployments, these distribution shifts are under-represented in the datasets widely used in the ML community today. To address this gap, we present WILDS, a curated benchmark of 10 datasets reflecting a diverse range of distribution shifts that naturally arise in real-world applications, such as shifts across hospitals for tumor identification; across camera traps for wildlife monitoring; and across time and location in satellite imaging and poverty mapping. On each dataset, we show that standard training yields substantially lower out-of-distribution than in-distribution performance. This gap remains even with models trained by existing methods for tackling distribution shifts, underscoring the need for new methods for training models that are more robust to the types of distribution shifts that arise in practice. To facilitate method development, we provide an open-source package that automates dataset loading, contains default model architectures and hyperparameters, and standardizes evaluations. The full paper, code, and leaderboards are available at https://wilds.stanford.edu.","booktitle":"Proceedings of the 38th International Conference on Machine Learning","publisher":"PMLR","author":[{"propositions":[],"lastnames":["Koh"],"firstnames":["Pang","Wei"],"suffixes":[]},{"propositions":[],"lastnames":["Sagawa"],"firstnames":["Shiori"],"suffixes":[]},{"propositions":[],"lastnames":["Marklund"],"firstnames":["Henrik"],"suffixes":[]},{"propositions":[],"lastnames":["Xie"],"firstnames":["Sang","Michael"],"suffixes":[]},{"propositions":[],"lastnames":["Zhang"],"firstnames":["Marvin"],"suffixes":[]},{"propositions":[],"lastnames":["Balsubramani"],"firstnames":["Akshay"],"suffixes":[]},{"propositions":[],"lastnames":["Hu"],"firstnames":["Weihua"],"suffixes":[]},{"propositions":[],"lastnames":["Yasunaga"],"firstnames":["Michihiro"],"suffixes":[]},{"propositions":[],"lastnames":["Phillips"],"firstnames":["Richard","Lanas"],"suffixes":[]},{"propositions":[],"lastnames":["Gao"],"firstnames":["Irena"],"suffixes":[]},{"propositions":[],"lastnames":["Lee"],"firstnames":["Tony"],"suffixes":[]},{"propositions":[],"lastnames":["David"],"firstnames":["Etienne"],"suffixes":[]},{"propositions":[],"lastnames":["Stavness"],"firstnames":["Ian"],"suffixes":[]},{"propositions":[],"lastnames":["Guo"],"firstnames":["Wei"],"suffixes":[]},{"propositions":[],"lastnames":["Earnshaw"],"firstnames":["Berton"],"suffixes":[]},{"propositions":[],"lastnames":["Haque"],"firstnames":["Imran"],"suffixes":[]},{"propositions":[],"lastnames":["Beery"],"firstnames":["Sara","M"],"suffixes":[]},{"propositions":[],"lastnames":["Leskovec"],"firstnames":["Jure"],"suffixes":[]},{"propositions":[],"lastnames":["Kundaje"],"firstnames":["Anshul"],"suffixes":[]},{"propositions":[],"lastnames":["Pierson"],"firstnames":["Emma"],"suffixes":[]},{"propositions":[],"lastnames":["Levine"],"firstnames":["Sergey"],"suffixes":[]},{"propositions":[],"lastnames":["Finn"],"firstnames":["Chelsea"],"suffixes":[]},{"propositions":[],"lastnames":["Liang"],"firstnames":["Percy"],"suffixes":[]}],"editor":[{"propositions":[],"lastnames":["Meila"],"firstnames":["Marina"],"suffixes":[]},{"propositions":[],"lastnames":["Zhang"],"firstnames":["Tong"],"suffixes":[]}],"month":"July","year":"2021","pages":"5637–5664","bibtex":"@inproceedings{koh_wilds_2021,\n\tseries = {Proceedings of {Machine} {Learning} {Research}},\n\ttitle = {{WILDS}: {A} {Benchmark} of in-the-{Wild} {Distribution} {Shifts}},\n\tvolume = {139},\n\turl = {https://proceedings.mlr.press/v139/koh21a.html},\n\tabstract = {Distribution shifts—where the training distribution differs from the test distribution—can substantially degrade the accuracy of machine learning (ML) systems deployed in the wild. Despite their ubiquity in the real-world deployments, these distribution shifts are under-represented in the datasets widely used in the ML community today. To address this gap, we present WILDS, a curated benchmark of 10 datasets reflecting a diverse range of distribution shifts that naturally arise in real-world applications, such as shifts across hospitals for tumor identification; across camera traps for wildlife monitoring; and across time and location in satellite imaging and poverty mapping. On each dataset, we show that standard training yields substantially lower out-of-distribution than in-distribution performance. This gap remains even with models trained by existing methods for tackling distribution shifts, underscoring the need for new methods for training models that are more robust to the types of distribution shifts that arise in practice. To facilitate method development, we provide an open-source package that automates dataset loading, contains default model architectures and hyperparameters, and standardizes evaluations. The full paper, code, and leaderboards are available at https://wilds.stanford.edu.},\n\tbooktitle = {Proceedings of the 38th {International} {Conference} on {Machine} {Learning}},\n\tpublisher = {PMLR},\n\tauthor = {Koh, Pang Wei and Sagawa, Shiori and Marklund, Henrik and Xie, Sang Michael and Zhang, Marvin and Balsubramani, Akshay and Hu, Weihua and Yasunaga, Michihiro and Phillips, Richard Lanas and Gao, Irena and Lee, Tony and David, Etienne and Stavness, Ian and Guo, Wei and Earnshaw, Berton and Haque, Imran and Beery, Sara M and Leskovec, Jure and Kundaje, Anshul and Pierson, Emma and Levine, Sergey and Finn, Chelsea and Liang, Percy},\n\teditor = {Meila, Marina and Zhang, Tong},\n\tmonth = jul,\n\tyear = {2021},\n\tpages = {5637--5664},\n}\n\n\n\n","author_short":["Koh, P. W.","Sagawa, S.","Marklund, H.","Xie, S. M.","Zhang, M.","Balsubramani, A.","Hu, W.","Yasunaga, M.","Phillips, R. L.","Gao, I.","Lee, T.","David, E.","Stavness, I.","Guo, W.","Earnshaw, B.","Haque, I.","Beery, S. M","Leskovec, J.","Kundaje, A.","Pierson, E.","Levine, S.","Finn, C.","Liang, P."],"editor_short":["Meila, M.","Zhang, T."],"key":"koh_wilds_2021","id":"koh_wilds_2021","bibbaseid":"koh-sagawa-marklund-xie-zhang-balsubramani-hu-yasunaga-etal-wildsabenchmarkofinthewilddistributionshifts-2021","role":"author","urls":{"Paper":"https://proceedings.mlr.press/v139/koh21a.html"},"metadata":{"authorlinks":{}},"downloads":1},"bibtype":"inproceedings","biburl":"https://bibbase.org/zotero-group/howcanoewang/4660314","dataSources":["Bw2R3bNsNP7z89tPm","ucgw8KTyMdD4wN9Tx","GBXPD4xSbvGqXeR24","6yXn8CtuzyEbCSr2m","AHMB37rGQdtcwer6D"],"keywords":[],"search_terms":["wilds","benchmark","wild","distribution","shifts","koh","sagawa","marklund","xie","zhang","balsubramani","hu","yasunaga","phillips","gao","lee","david","stavness","guo","earnshaw","haque","beery","leskovec","kundaje","pierson","levine","finn","liang"],"title":"WILDS: A Benchmark of in-the-Wild Distribution Shifts","year":2021,"downloads":1}