Measuring Fairness in Ranked Outputs. Yang, K. & Stoyanovich, J. In Proceedings of the 29th International Conference on Scientific and Statistical Database Management, of SSDBM '17, pages 22:1–22:6. ACM. event-place: Chicago, IL, USAPaper doi abstract bibtex Ranking and scoring are ubiquitous. We consider the setting in which an institution, called a ranker, evaluates a set of individuals based on demographic, behavioral or other characteristics. The final output is a ranking that represents the relative quality of the individuals. While automatic and therefore seemingly objective, rankers can, and often do, discriminate against individuals and systematically disadvantage members of protected groups. This warrants a careful study of the fairness of a ranking scheme, to enable data science for social good applications, among others. In this paper we propose fairness measures for ranked outputs. We develop a data generation procedure that allows us to systematically control the degree of unfairness in the output, and study the behavior of our measures on these datasets. We then apply our proposed measures to several real datasets, and detect cases of bias. Finally, we show preliminary results of incorporating our ranked fairness measures into an optimization framework, and show potential for improving fairness of ranked outputs while maintaining accuracy. The code implementing all parts of this work is publicly available at https://github.com/DataResponsibly/FairRank.
@inproceedings{yang_measuring_2017,
location = {New York, {NY}, {USA}},
title = {Measuring Fairness in Ranked Outputs},
isbn = {978-1-4503-5282-6},
url = {http://doi.acm.org/10.1145/3085504.3085526},
doi = {10.1145/3085504.3085526},
series = {{SSDBM} '17},
abstract = {Ranking and scoring are ubiquitous. We consider the setting in which an institution, called a ranker, evaluates a set of individuals based on demographic, behavioral or other characteristics. The final output is a ranking that represents the relative quality of the individuals. While automatic and therefore seemingly objective, rankers can, and often do, discriminate against individuals and systematically disadvantage members of protected groups. This warrants a careful study of the fairness of a ranking scheme, to enable data science for social good applications, among others. In this paper we propose fairness measures for ranked outputs. We develop a data generation procedure that allows us to systematically control the degree of unfairness in the output, and study the behavior of our measures on these datasets. We then apply our proposed measures to several real datasets, and detect cases of bias. Finally, we show preliminary results of incorporating our ranked fairness measures into an optimization framework, and show potential for improving fairness of ranked outputs while maintaining accuracy. The code implementing all parts of this work is publicly available at https://github.com/{DataResponsibly}/{FairRank}.},
pages = {22:1--22:6},
booktitle = {Proceedings of the 29th International Conference on Scientific and Statistical Database Management},
publisher = {{ACM}},
author = {Yang, Ke and Stoyanovich, Julia},
urldate = {2019-03-26},
date = {2017},
note = {event-place: Chicago, {IL}, {USA}},
keywords = {Fairness, Accountability, Data, Data Ethics, Data Science for Social Good, Responsibly, Transparency}
}
Downloads: 0
{"_id":"C9brD9HtAdXxpxaT2","bibbaseid":"yang-stoyanovich-measuringfairnessinrankedoutputs","authorIDs":[],"author_short":["Yang, K.","Stoyanovich, J."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","location":"New York, NY, USA","title":"Measuring Fairness in Ranked Outputs","isbn":"978-1-4503-5282-6","url":"http://doi.acm.org/10.1145/3085504.3085526","doi":"10.1145/3085504.3085526","series":"SSDBM '17","abstract":"Ranking and scoring are ubiquitous. We consider the setting in which an institution, called a ranker, evaluates a set of individuals based on demographic, behavioral or other characteristics. The final output is a ranking that represents the relative quality of the individuals. While automatic and therefore seemingly objective, rankers can, and often do, discriminate against individuals and systematically disadvantage members of protected groups. This warrants a careful study of the fairness of a ranking scheme, to enable data science for social good applications, among others. In this paper we propose fairness measures for ranked outputs. We develop a data generation procedure that allows us to systematically control the degree of unfairness in the output, and study the behavior of our measures on these datasets. We then apply our proposed measures to several real datasets, and detect cases of bias. Finally, we show preliminary results of incorporating our ranked fairness measures into an optimization framework, and show potential for improving fairness of ranked outputs while maintaining accuracy. The code implementing all parts of this work is publicly available at https://github.com/DataResponsibly/FairRank.","pages":"22:1–22:6","booktitle":"Proceedings of the 29th International Conference on Scientific and Statistical Database Management","publisher":"ACM","author":[{"propositions":[],"lastnames":["Yang"],"firstnames":["Ke"],"suffixes":[]},{"propositions":[],"lastnames":["Stoyanovich"],"firstnames":["Julia"],"suffixes":[]}],"urldate":"2019-03-26","date":"2017","note":"event-place: Chicago, IL, USA","keywords":"Fairness, Accountability, Data, Data Ethics, Data Science for Social Good, Responsibly, Transparency","bibtex":"@inproceedings{yang_measuring_2017,\n\tlocation = {New York, {NY}, {USA}},\n\ttitle = {Measuring Fairness in Ranked Outputs},\n\tisbn = {978-1-4503-5282-6},\n\turl = {http://doi.acm.org/10.1145/3085504.3085526},\n\tdoi = {10.1145/3085504.3085526},\n\tseries = {{SSDBM} '17},\n\tabstract = {Ranking and scoring are ubiquitous. We consider the setting in which an institution, called a ranker, evaluates a set of individuals based on demographic, behavioral or other characteristics. The final output is a ranking that represents the relative quality of the individuals. While automatic and therefore seemingly objective, rankers can, and often do, discriminate against individuals and systematically disadvantage members of protected groups. This warrants a careful study of the fairness of a ranking scheme, to enable data science for social good applications, among others. In this paper we propose fairness measures for ranked outputs. We develop a data generation procedure that allows us to systematically control the degree of unfairness in the output, and study the behavior of our measures on these datasets. We then apply our proposed measures to several real datasets, and detect cases of bias. Finally, we show preliminary results of incorporating our ranked fairness measures into an optimization framework, and show potential for improving fairness of ranked outputs while maintaining accuracy. The code implementing all parts of this work is publicly available at https://github.com/{DataResponsibly}/{FairRank}.},\n\tpages = {22:1--22:6},\n\tbooktitle = {Proceedings of the 29th International Conference on Scientific and Statistical Database Management},\n\tpublisher = {{ACM}},\n\tauthor = {Yang, Ke and Stoyanovich, Julia},\n\turldate = {2019-03-26},\n\tdate = {2017},\n\tnote = {event-place: Chicago, {IL}, {USA}},\n\tkeywords = {Fairness, Accountability, Data, Data Ethics, Data Science for Social Good, Responsibly, Transparency}\n}\n\n","author_short":["Yang, K.","Stoyanovich, J."],"key":"yang_measuring_2017","id":"yang_measuring_2017","bibbaseid":"yang-stoyanovich-measuringfairnessinrankedoutputs","role":"author","urls":{"Paper":"http://doi.acm.org/10.1145/3085504.3085526"},"keyword":["Fairness","Accountability","Data","Data Ethics","Data Science for Social Good","Responsibly","Transparency"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"inproceedings","biburl":"https://fair-ia.ekstrandom.net/fair-ia.bib","creationDate":"2020-04-09T18:53:45.517Z","downloads":0,"keywords":["fairness","accountability","data","data ethics","data science for social good","responsibly","transparency"],"search_terms":["measuring","fairness","ranked","outputs","yang","stoyanovich"],"title":"Measuring Fairness in Ranked Outputs","year":null,"dataSources":["FRCCaPECNMucjb6Hk"]}