var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https://fair-ia.ekstrandom.net/fair-ia.bib&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https://fair-ia.ekstrandom.net/fair-ia.bib&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https://fair-ia.ekstrandom.net/fair-ia.bib&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2018\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Exploring Author Gender in Book Rating and Recommendation.\n \n \n \n \n\n\n \n Ekstrand, M. D; Tian, M.; Kazi, M. R I.; Mehrpouyan, H.; and Kluver, D.\n\n\n \n\n\n\n In 2018. ACM\n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{bag-recsys18,\n\ttitle = {Exploring Author Gender in Book Rating and Recommendation},\n\trights = {All rights reserved},\n\turl = {https://md.ekstrandom.net/pubs/book-author-gender},\n\tdoi = {10.1145/3240323.3240373},\n\teventtitle = {Proceedings of the Twelfth {ACM} Conference on Recommender Systems},\n\tpublisher = {{ACM}},\n\tauthor = {Ekstrand, Michael D and Tian, Mucun and Kazi, Mohammed R Imran and Mehrpouyan, Hoda and Kluver, Daniel},\n\tdate = {2018},\n\tyear = 2018,\n\tkeywords = {My Papers, Research Using {LensKit}}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n undefined\n \n \n (41)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Where fairness fails: data, algorithms, and the limits of antidiscrimination discourse.\n \n \n \n \n\n\n \n Hoffmann, A. L.\n\n\n \n\n\n\n , 22(7): 900–915. .\n \n\n\n\n
\n\n\n\n \n \n \"WherePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{hoffmann_where_2019,\n\ttitle = {Where fairness fails: data, algorithms, and the limits of antidiscrimination discourse},\n\tvolume = {22},\n\tissn = {1369-118X},\n\turl = {https://doi.org/10.1080/1369118X.2019.1573912},\n\tdoi = {10.1080/1369118X.2019.1573912},\n\tshorttitle = {Where fairness fails},\n\tabstract = {Problems of bias and fairness are central to data justice, as they speak directly to the threat that ‘big data’ and algorithmic decision-making may worsen already existing injustices. In the United States, grappling with these problems has found clearest expression through liberal discourses of rights, due process, and antidiscrimination. Work in this area, however, has tended to overlook certain established limits of antidiscrimination discourses for bringing about the change demanded by social justice. In this paper, I engage three of these limits: 1) an overemphasis on discrete ‘bad actors’, 2) single-axis thinking that centers disadvantage, and 3) an inordinate focus on a limited set of goods. I show that, in mirroring some of antidiscrimination discourse’s most problematic tendencies, efforts to achieve fairness and combat algorithmic discrimination fail to address the very hierarchical logic that produces advantaged and disadvantaged subjects in the first place. Finally, I conclude by sketching three paths for future work to better account for the structural conditions against which we come to understand problems of data and unjust discrimination in the first place.},\n\tpages = {900--915},\n\tnumber = {7},\n\tjournaltitle = {Information, Communication \\& Society},\n\tauthor = {Hoffmann, Anna Lauren},\n\turldate = {2019-05-28},\n\tdate = {2019-06-07},\n\tkeywords = {algorithms, antidiscrimination, Big data, intersectionality, social justice}\n}\n\n
\n
\n\n\n
\n Problems of bias and fairness are central to data justice, as they speak directly to the threat that ‘big data’ and algorithmic decision-making may worsen already existing injustices. In the United States, grappling with these problems has found clearest expression through liberal discourses of rights, due process, and antidiscrimination. Work in this area, however, has tended to overlook certain established limits of antidiscrimination discourses for bringing about the change demanded by social justice. In this paper, I engage three of these limits: 1) an overemphasis on discrete ‘bad actors’, 2) single-axis thinking that centers disadvantage, and 3) an inordinate focus on a limited set of goods. I show that, in mirroring some of antidiscrimination discourse’s most problematic tendencies, efforts to achieve fairness and combat algorithmic discrimination fail to address the very hierarchical logic that produces advantaged and disadvantaged subjects in the first place. Finally, I conclude by sketching three paths for future work to better account for the structural conditions against which we come to understand problems of data and unjust discrimination in the first place.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards a Fair Marketplace: Counterfactual Evaluation of the Trade-off Between Relevance, Fairness & Satisfaction in Recommendation Systems.\n \n \n \n \n\n\n \n Mehrotra, R.; McInerney, J.; Bouchard, H.; Lalmas, M.; and Diaz, F.\n\n\n \n\n\n\n In Proceedings of the 27th ACM International Conference on Information and Knowledge Management, of CIKM '18, pages 2243–2251, . ACM\n event-place: Torino, Italy\n\n\n\n
\n\n\n\n \n \n \"TowardsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{mehrotra_towards_2018,\n\tlocation = {New York, {NY}, {USA}},\n\ttitle = {Towards a Fair Marketplace: Counterfactual Evaluation of the Trade-off Between Relevance, Fairness \\& Satisfaction in Recommendation Systems},\n\tisbn = {978-1-4503-6014-2},\n\turl = {http://doi.acm.org/10.1145/3269206.3272027},\n\tdoi = {10.1145/3269206.3272027},\n\tseries = {{CIKM} '18},\n\tshorttitle = {Towards a Fair Marketplace},\n\tabstract = {Two-sided marketplaces are platforms that have customers not only on the demand side (e.g. users), but also on the supply side (e.g. retailer, artists). While traditional recommender systems focused specifically towards increasing consumer satisfaction by providing relevant content to consumers, two-sided marketplaces face the problem of additionally optimizing for supplier preferences, and visibility. Indeed, the suppliers would want afair opportunity to be presented to users. Blindly optimizing for consumer relevance may have a detrimental impact on supplier fairness. Motivated by this problem, we focus on the trade-off between objectives of consumers and suppliers in the case of music streaming services, and consider the trade-off betweenrelevance of recommendations to the consumer (i.e. user) andfairness of representation of suppliers (i.e. artists) and measure their impact on consumersatisfaction. We propose a conceptual and computational framework using counterfactual estimation techniques to understand, and evaluate different recommendation policies, specifically around the trade-off between relevance and fairness, without the need for running many costly A/B tests. We propose a number of recommendation policies which jointly optimize relevance and fairness, thereby achieving substantial improvement in supplier fairness without noticeable decline in user satisfaction. Additionally, we consider user disposition towards fair content, and propose a personalized recommendation policy which takes into account consumer's tolerance towards fair content. Our findings could guide the design of algorithms powering two-sided marketplaces, as well as guide future research on sophisticated algorithms for joint optimization of user relevance, satisfaction and fairness.},\n\tpages = {2243--2251},\n\tbooktitle = {Proceedings of the 27th {ACM} International Conference on Information and Knowledge Management},\n\tpublisher = {{ACM}},\n\tauthor = {Mehrotra, Rishabh and {McInerney}, James and Bouchard, Hugues and Lalmas, Mounia and Diaz, Fernando},\n\turldate = {2019-07-17},\n\tdate = {2018},\n\tnote = {event-place: Torino, Italy},\n\tkeywords = {fairness, marketplace, satisfaction}\n}\n\n
\n
\n\n\n
\n Two-sided marketplaces are platforms that have customers not only on the demand side (e.g. users), but also on the supply side (e.g. retailer, artists). While traditional recommender systems focused specifically towards increasing consumer satisfaction by providing relevant content to consumers, two-sided marketplaces face the problem of additionally optimizing for supplier preferences, and visibility. Indeed, the suppliers would want afair opportunity to be presented to users. Blindly optimizing for consumer relevance may have a detrimental impact on supplier fairness. Motivated by this problem, we focus on the trade-off between objectives of consumers and suppliers in the case of music streaming services, and consider the trade-off betweenrelevance of recommendations to the consumer (i.e. user) andfairness of representation of suppliers (i.e. artists) and measure their impact on consumersatisfaction. We propose a conceptual and computational framework using counterfactual estimation techniques to understand, and evaluate different recommendation policies, specifically around the trade-off between relevance and fairness, without the need for running many costly A/B tests. We propose a number of recommendation policies which jointly optimize relevance and fairness, thereby achieving substantial improvement in supplier fairness without noticeable decline in user satisfaction. Additionally, we consider user disposition towards fair content, and propose a personalized recommendation policy which takes into account consumer's tolerance towards fair content. Our findings could guide the design of algorithms powering two-sided marketplaces, as well as guide future research on sophisticated algorithms for joint optimization of user relevance, satisfaction and fairness.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Algorithmic Bias: From Discrimination Discovery to Fairness-aware Data Mining.\n \n \n \n \n\n\n \n Hajian, S.; Bonchi, F.; and Castillo, C.\n\n\n \n\n\n\n In Proceedings of the 22Nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, of KDD '16, pages 2125–2126, . ACM\n event-place: San Francisco, California, USA\n\n\n\n
\n\n\n\n \n \n \"AlgorithmicPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{hajian_algorithmic_2016,\n\tlocation = {New York, {NY}, {USA}},\n\ttitle = {Algorithmic Bias: From Discrimination Discovery to Fairness-aware Data Mining},\n\tisbn = {978-1-4503-4232-2},\n\turl = {http://doi.acm.org/10.1145/2939672.2945386},\n\tdoi = {10.1145/2939672.2945386},\n\tseries = {{KDD} '16},\n\tshorttitle = {Algorithmic Bias},\n\tabstract = {Algorithms and decision making based on Big Data have become pervasive in all aspects of our daily lives lives (offline and online), as they have become essential tools in personal finance, health care, hiring, housing, education, and policies. It is therefore of societal and ethical importance to ask whether these algorithms can be discriminative on grounds such as gender, ethnicity, or health status. It turns out that the answer is positive: for instance, recent studies in the context of online advertising show that ads for high-income jobs are presented to men much more often than to women [Datta et al., 2015]; and ads for arrest records are significantly more likely to show up on searches for distinctively black names [Sweeney, 2013]. This algorithmic bias exists even when there is no discrimination intention in the developer of the algorithm. Sometimes it may be inherent to the data sources used (software making decisions based on data can reflect, or even amplify, the results of historical discrimination), but even when the sensitive attributes have been suppressed from the input, a well trained machine learning algorithm may still discriminate on the basis of such sensitive attributes because of correlations existing in the data. These considerations call for the development of data mining systems which are discrimination-conscious by-design. This is a novel and challenging research area for the data mining community. The aim of this tutorial is to survey algorithmic bias, presenting its most common variants, with an emphasis on the algorithmic techniques and key ideas developed to derive efficient solutions. The tutorial covers two main complementary approaches: algorithms for discrimination discovery and discrimination prevention by means of fairness-aware data mining. We conclude by summarizing promising paths for future research.},\n\tpages = {2125--2126},\n\tbooktitle = {Proceedings of the 22Nd {ACM} {SIGKDD} International Conference on Knowledge Discovery and Data Mining},\n\tpublisher = {{ACM}},\n\tauthor = {Hajian, Sara and Bonchi, Francesco and Castillo, Carlos},\n\turldate = {2019-07-10},\n\tdate = {2016},\n\tnote = {event-place: San Francisco, California, {USA}},\n\tkeywords = {algorithmic bias, discrimination discovery, discrimination prevention}\n}\n\n
\n
\n\n\n
\n Algorithms and decision making based on Big Data have become pervasive in all aspects of our daily lives lives (offline and online), as they have become essential tools in personal finance, health care, hiring, housing, education, and policies. It is therefore of societal and ethical importance to ask whether these algorithms can be discriminative on grounds such as gender, ethnicity, or health status. It turns out that the answer is positive: for instance, recent studies in the context of online advertising show that ads for high-income jobs are presented to men much more often than to women [Datta et al., 2015]; and ads for arrest records are significantly more likely to show up on searches for distinctively black names [Sweeney, 2013]. This algorithmic bias exists even when there is no discrimination intention in the developer of the algorithm. Sometimes it may be inherent to the data sources used (software making decisions based on data can reflect, or even amplify, the results of historical discrimination), but even when the sensitive attributes have been suppressed from the input, a well trained machine learning algorithm may still discriminate on the basis of such sensitive attributes because of correlations existing in the data. These considerations call for the development of data mining systems which are discrimination-conscious by-design. This is a novel and challenging research area for the data mining community. The aim of this tutorial is to survey algorithmic bias, presenting its most common variants, with an emphasis on the algorithmic techniques and key ideas developed to derive efficient solutions. The tutorial covers two main complementary approaches: algorithms for discrimination discovery and discrimination prevention by means of fairness-aware data mining. We conclude by summarizing promising paths for future research.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Using Image Fairness Representations in Diversity-Based Re-ranking for Recommendations.\n \n \n \n \n\n\n \n Karako, C.; and Manggala, P.\n\n\n \n\n\n\n In Adjunct Publication of the 26th Conference on User Modeling, Adaptation and Personalization, of UMAP '18, pages 23–28, . ACM\n event-place: Singapore, Singapore\n\n\n\n
\n\n\n\n \n \n \"UsingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{karako_using_2018,\n\tlocation = {New York, {NY}, {USA}},\n\ttitle = {Using Image Fairness Representations in Diversity-Based Re-ranking for Recommendations},\n\tisbn = {978-1-4503-5784-5},\n\turl = {http://doi.acm.org/10.1145/3213586.3226206},\n\tdoi = {10.1145/3213586.3226206},\n\tseries = {{UMAP} '18},\n\tabstract = {The trade-off between relevance and fairness in personalized recommendations has been explored in recent works, with the goal of minimizing learned discrimination towards certain demographics while still producing relevant results. We present a fairness-aware variation of the Maximal Marginal Relevance ({MMR}) re-ranking method which uses representations of demographic groups computed using a labeled dataset. This method is intended to incorporate fairness with respect to these demographic groups. We perform an experiment on a stock photo dataset and examine the trade-off between relevance and fairness against a well known baseline, {MMR}, by using human judgment to examine the results of the re-ranking when using different fractions of a labeled dataset, and by performing a quantitative analysis on the ranked results of a set of query images. We show that our proposed method can incorporate fairness in the ranked results while obtaining higher precision than the baseline, while our case study shows that even a limited amount of labeled data can be used to compute the representations to obtain fairness. This method can be used as a post-processing step for recommender systems and search.},\n\tpages = {23--28},\n\tbooktitle = {Adjunct Publication of the 26th Conference on User Modeling, Adaptation and Personalization},\n\tpublisher = {{ACM}},\n\tauthor = {Karako, Chen and Manggala, Putra},\n\turldate = {2019-07-10},\n\tdate = {2018},\n\tnote = {event-place: Singapore, Singapore},\n\tkeywords = {fairness, recommender systems, diversity, diversity, fairness, information retrieval, recommender systems, fatrec, information retrieval}\n}\n\n
\n
\n\n\n
\n The trade-off between relevance and fairness in personalized recommendations has been explored in recent works, with the goal of minimizing learned discrimination towards certain demographics while still producing relevant results. We present a fairness-aware variation of the Maximal Marginal Relevance (MMR) re-ranking method which uses representations of demographic groups computed using a labeled dataset. This method is intended to incorporate fairness with respect to these demographic groups. We perform an experiment on a stock photo dataset and examine the trade-off between relevance and fairness against a well known baseline, MMR, by using human judgment to examine the results of the re-ranking when using different fractions of a labeled dataset, and by performing a quantitative analysis on the ranked results of a set of query images. We show that our proposed method can incorporate fairness in the ranked results while obtaining higher precision than the baseline, while our case study shows that even a limited amount of labeled data can be used to compute the representations to obtain fairness. This method can be used as a post-processing step for recommender systems and search.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Runaway Feedback Loops in Predictive Policing.\n \n \n \n \n\n\n \n Ensign, D.; Friedler, S. A.; Neville, S.; Scheidegger, C.; and Venkatasubramanian, S.\n\n\n \n\n\n\n In Conference on Fairness, Accountability and Transparency, pages 160–171, . \n \n\n\n\n
\n\n\n\n \n \n \"RunawayPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ensign_runaway_2018,\n\ttitle = {Runaway Feedback Loops in Predictive Policing},\n\turl = {http://proceedings.mlr.press/v81/ensign18a.html},\n\tabstract = {Predictive policing systems are increasingly used to determine how to allocate police across a city in order to best prevent crime. Discovered crime data (e.g., arrest counts) are used to help upda...},\n\teventtitle = {Conference on Fairness, Accountability and Transparency},\n\tpages = {160--171},\n\tbooktitle = {Conference on Fairness, Accountability and Transparency},\n\tauthor = {Ensign, Danielle and Friedler, Sorelle A. and Neville, Scott and Scheidegger, Carlos and Venkatasubramanian, Suresh},\n\turldate = {2019-07-11},\n\tdate = {2018-01-21},\n\tlangid = {english}\n}\n\n
\n
\n\n\n
\n Predictive policing systems are increasingly used to determine how to allocate police across a city in order to best prevent crime. Discovered crime data (e.g., arrest counts) are used to help upda...\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Recommendation Independence.\n \n \n \n \n\n\n \n Kamishima, T.; Akaho, S.; Asoh, H.; and Sakuma, J.\n\n\n \n\n\n\n , 81: 187–201. .\n \n\n\n\n
\n\n\n\n \n \n \"RecommendationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{kamishima_recommendation_2018,\n\ttitle = {Recommendation Independence},\n\tvolume = {81},\n\turl = {http://proceedings.mlr.press/v81/kamishima18a.html},\n\tabstract = {This paper studies a recommendation algorithm whose outcomes are not\ninfluenced by specified information. It is useful in contexts potentially\nunfair decision should be avoided, such as job-applicant recommendations\nthat are not influenced by socially sensitive information. An algorithm\nthat could exclude the influence of sensitive information would thus be\nuseful for job-matching with fairness. We call the condition between a\nrecommendation outcome and a sensitive feature Recommendation\nIndependence, which is formally defined as statistical independence\nbetween the outcome and the feature. Our previous independence-enhanced\nalgorithms simply matched the means of predictions between sub-datasets\nconsisting of the same sensitive value. However, this approach could not\nremove the sensitive information represented by the second or higher\nmoments of distributions. In this paper, we develop new methods that can\ndeal with the second moment, i.e., variance, of recommendation outcomes\nwithout increasing the computational complexity. These methods can more\nstrictly remove the sensitive information, and experimental results\ndemonstrate that our new algorithms can more effectively eliminate the\nfactors that undermine fairness. Additionally, we explore potential\napplications for independence-enhanced recommendation, and discuss its\nrelation to other concepts, such as recommendation diversity.},\n\tpages = {187--201},\n\tauthor = {Kamishima, Toshihiro and Akaho, Shotaro and Asoh, Hideki and Sakuma, Jun},\n\tdate = {2018}\n}\n\n
\n
\n\n\n
\n This paper studies a recommendation algorithm whose outcomes are not influenced by specified information. It is useful in contexts potentially unfair decision should be avoided, such as job-applicant recommendations that are not influenced by socially sensitive information. An algorithm that could exclude the influence of sensitive information would thus be useful for job-matching with fairness. We call the condition between a recommendation outcome and a sensitive feature Recommendation Independence, which is formally defined as statistical independence between the outcome and the feature. Our previous independence-enhanced algorithms simply matched the means of predictions between sub-datasets consisting of the same sensitive value. However, this approach could not remove the sensitive information represented by the second or higher moments of distributions. In this paper, we develop new methods that can deal with the second moment, i.e., variance, of recommendation outcomes without increasing the computational complexity. These methods can more strictly remove the sensitive information, and experimental results demonstrate that our new algorithms can more effectively eliminate the factors that undermine fairness. Additionally, we explore potential applications for independence-enhanced recommendation, and discuss its relation to other concepts, such as recommendation diversity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ranking with Fairness Constraints.\n \n \n \n \n\n\n \n Celis, L. E.; Straszak, D.; and Vishnoi, N. K.\n\n\n \n\n\n\n In Chatzigiannakis, I.; Kaklamanis, C.; Marx, D.; and Sannella, D., editor(s), 45th International Colloquium on Automata, Languages, and Programming (ICALP 2018), volume 107, of Leibniz International Proceedings in Informatics (LIPIcs), pages 28:1–28:15, . Schloss Dagstuhl–Leibniz-Zentrum fuer Informatik\n \n\n\n\n
\n\n\n\n \n \n \"RankingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{celis_ranking_2018,\n\tlocation = {Dagstuhl, Germany},\n\ttitle = {Ranking with Fairness Constraints},\n\tvolume = {107},\n\tisbn = {978-3-95977-076-7},\n\turl = {http://drops.dagstuhl.de/opus/volltexte/2018/9032},\n\tdoi = {10.4230/LIPIcs.ICALP.2018.28},\n\tseries = {Leibniz International Proceedings in Informatics ({LIPIcs})},\n\tpages = {28:1--28:15},\n\tbooktitle = {45th International Colloquium on Automata, Languages, and Programming ({ICALP} 2018)},\n\tpublisher = {Schloss Dagstuhl–Leibniz-Zentrum fuer Informatik},\n\tauthor = {Celis, L. Elisa and Straszak, Damian and Vishnoi, Nisheeth K.},\n\teditor = {Chatzigiannakis, Ioannis and Kaklamanis, Christos and Marx, Dániel and Sannella, Donald},\n\turldate = {2019-07-10},\n\tdate = {2018},\n\tkeywords = {Fairness, Approximation Algorithms, Matching, Optimization, Ranking}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Quantifying the Impact of User Attention on Fair Group Representation in Ranked Lists.\n \n \n \n \n\n\n \n Sapiezynski, P.; Zeng, W.; E Robertson, R.; Mislove, A.; and Wilson, C.\n\n\n \n\n\n\n In Companion Proceedings of The 2019 World Wide Web Conference on - WWW '19, pages 553–562, . ACM Press\n \n\n\n\n
\n\n\n\n \n \n \"QuantifyingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{sapiezynski_quantifying_2019,\n\tlocation = {San Francisco, {USA}},\n\ttitle = {Quantifying the Impact of User Attention on Fair Group Representation in Ranked Lists},\n\tisbn = {978-1-4503-6675-5},\n\turl = {http://dl.acm.org/citation.cfm?doid=3308560.3317595},\n\tdoi = {10.1145/3308560.3317595},\n\tabstract = {In this work, we introduce a novel metric for auditing group fairness in ranked lists. Our approach offers two benefits compared to the state of the art. First, we offer a blueprint for modeling of user attention. Rather than assuming a logarithmic loss in importance as a function of the rank, we can account for varying user behaviors through parametrization. For example, we expect a user to see more items during a viewing of a social media feed than when they inspect the results list of a single web search query. Second, we allow non-binary protected attributes to enable investigating inherently continuous attributes (e.g., political alignment on the liberal to conservative spectrum) as well as to facilitate measurements across aggregated sets of search results, rather than separately for each result list. By combining these two elements into our metric, we are able to better address the human factors inherent in this problem. We measure the whole sociotechnical system, consisting of a ranking algorithm and individuals using it, instead of exclusively focusing on the ranking algorithm. Finally, we use our metric to perform three simulated fairness audits. We show that determining fairness of a ranked output necessitates knowledge (or a model) of the end-users of the particular service. Depending on their attention distribution function, a fixed ranking of results can appear biased both in favor and against a protected group1.},\n\teventtitle = {Companion  The 2019 World Wide Web Conference},\n\tpages = {553--562},\n\tbooktitle = {Companion Proceedings of The 2019 World Wide Web Conference on   - {WWW} '19},\n\tpublisher = {{ACM} Press},\n\tauthor = {Sapiezynski, Piotr and Zeng, Wesley and E Robertson, Ronald and Mislove, Alan and Wilson, Christo},\n\turldate = {2019-05-21},\n\tdate = {2019},\n\tlangid = {english}\n}\n\n
\n
\n\n\n
\n In this work, we introduce a novel metric for auditing group fairness in ranked lists. Our approach offers two benefits compared to the state of the art. First, we offer a blueprint for modeling of user attention. Rather than assuming a logarithmic loss in importance as a function of the rank, we can account for varying user behaviors through parametrization. For example, we expect a user to see more items during a viewing of a social media feed than when they inspect the results list of a single web search query. Second, we allow non-binary protected attributes to enable investigating inherently continuous attributes (e.g., political alignment on the liberal to conservative spectrum) as well as to facilitate measurements across aggregated sets of search results, rather than separately for each result list. By combining these two elements into our metric, we are able to better address the human factors inherent in this problem. We measure the whole sociotechnical system, consisting of a ranking algorithm and individuals using it, instead of exclusively focusing on the ranking algorithm. Finally, we use our metric to perform three simulated fairness audits. We show that determining fairness of a ranked output necessitates knowledge (or a model) of the end-users of the particular service. Depending on their attention distribution function, a fixed ranking of results can appear biased both in favor and against a protected group1.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Policy Learning for Fairness in Ranking.\n \n \n \n \n\n\n \n Singh, A.; and Joachims, T.\n\n\n \n\n\n\n . .\n \n\n\n\n
\n\n\n\n \n \n \"PolicyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{singh_policy_2019,\n\ttitle = {Policy Learning for Fairness in Ranking},\n\turl = {http://arxiv.org/abs/1902.04056},\n\tabstract = {Conventional Learning-to-Rank ({LTR}) methods optimize the utility of the rankings to the users, but they are oblivious to their impact on the ranked items. However, there has been a growing understanding that the latter is important to consider for a wide range of ranking applications (e.g. online marketplaces, job placement, admissions). To address this need, we propose a general {LTR} framework that can optimize a wide range of utility metrics (e.g. {NDCG}) while satisfying fairness of exposure constraints with respect to the items. This framework expands the class of learnable ranking functions to stochastic ranking policies, which provides a language for rigorously expressing fairness specifications. Furthermore, we provide a new {LTR} algorithm called Fair-{PG}-Rank for directly searching the space of fair ranking policies via a policy-gradient approach. Beyond the theoretical evidence in deriving the framework and the algorithm, we provide empirical results on simulated and real-world datasets verifying the effectiveness of the approach in individual and group-fairness settings.},\n\tjournaltitle = {{arXiv}:1902.04056 [cs, stat]},\n\tauthor = {Singh, Ashudeep and Joachims, Thorsten},\n\turldate = {2019-07-10},\n\tdate = {2019-02-11},\n\teprinttype = {arxiv},\n\teprint = {1902.04056},\n\tkeywords = {Computer Science - Information Retrieval, Computer Science - Machine Learning, Statistics - Machine Learning, Computer Science - Computers and Society}\n}\n\n
\n
\n\n\n
\n Conventional Learning-to-Rank (LTR) methods optimize the utility of the rankings to the users, but they are oblivious to their impact on the ranked items. However, there has been a growing understanding that the latter is important to consider for a wide range of ranking applications (e.g. online marketplaces, job placement, admissions). To address this need, we propose a general LTR framework that can optimize a wide range of utility metrics (e.g. NDCG) while satisfying fairness of exposure constraints with respect to the items. This framework expands the class of learnable ranking functions to stochastic ranking policies, which provides a language for rigorously expressing fairness specifications. Furthermore, we provide a new LTR algorithm called Fair-PG-Rank for directly searching the space of fair ranking policies via a policy-gradient approach. Beyond the theoretical evidence in deriving the framework and the algorithm, we provide empirical results on simulated and real-world datasets verifying the effectiveness of the approach in individual and group-fairness settings.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Personalizing Fairness-aware Re-ranking.\n \n \n \n \n\n\n \n Liu, W.; and Burke, R.\n\n\n \n\n\n\n . .\n \n\n\n\n
\n\n\n\n \n \n \"PersonalizingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{liu_personalizing_2018,\n\ttitle = {Personalizing Fairness-aware Re-ranking},\n\turl = {http://arxiv.org/abs/1809.02921},\n\tabstract = {Personalized recommendation brings about novel challenges in ensuring fairness, especially in scenarios in which users are not the only stakeholders involved in the recommender system. For example, the system may want to ensure that items from different providers have a fair chance of being recommended. To solve this problem, we propose a Fairness-Aware Re-ranking algorithm ({FAR}) to balance the ranking quality and provider-side fairness. We iteratively generate the ranking list by trading off between accuracy and the coverage of the providers. Although fair treatment of providers is desirable, users may differ in their receptivity to the addition of this type of diversity. Therefore, personalized user tolerance towards provider diversification is incorporated. Experiments are conducted on both synthetic and real-world data. The results show that our proposed re-ranking algorithm can significantly promote fairness with a slight sacrifice in accuracy and can do so while being attentive to individual user differences.},\n\tjournaltitle = {{arXiv}:1809.02921 [cs]},\n\tauthor = {Liu, Weiwen and Burke, Robin},\n\turldate = {2019-05-17},\n\tdate = {2018-09-09},\n\teprinttype = {arxiv},\n\teprint = {1809.02921},\n\tkeywords = {Computer Science - Information Retrieval}\n}\n\n
\n
\n\n\n
\n Personalized recommendation brings about novel challenges in ensuring fairness, especially in scenarios in which users are not the only stakeholders involved in the recommender system. For example, the system may want to ensure that items from different providers have a fair chance of being recommended. To solve this problem, we propose a Fairness-Aware Re-ranking algorithm (FAR) to balance the ranking quality and provider-side fairness. We iteratively generate the ranking list by trading off between accuracy and the coverage of the providers. Although fair treatment of providers is desirable, users may differ in their receptivity to the addition of this type of diversity. Therefore, personalized user tolerance towards provider diversification is incorporated. Experiments are conducted on both synthetic and real-world data. The results show that our proposed re-ranking algorithm can significantly promote fairness with a slight sacrifice in accuracy and can do so while being attentive to individual user differences.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multisided Fairness for Recommendation.\n \n \n \n \n\n\n \n Burke, R.\n\n\n \n\n\n\n .\n \n\n\n\n
\n\n\n\n \n \n \"MultisidedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@unpublished{burke_multisided_2017,\n\ttitle = {Multisided Fairness for Recommendation},\n\turl = {http://arxiv.org/abs/1707.00093},\n\tabstract = {Recent work on machine learning has begun to consider issues of fairness.\nIn this paper, we extend the concept of fairness to recommendation. In\nparticular, we show that in some recommendation contexts, fairness may be\na multisided concept, in which fair outcomes for multiple individuals need\nto be considered. Based on these considerations, we present a taxonomy of\nclasses of fairness-aware recommender systems and suggest possible\nfairness-aware recommendation architectures.},\n\tauthor = {Burke, Robin},\n\tdate = {2017-07-01},\n\tkeywords = {Fall 2017 {IR} Fairness}\n}\n\n
\n
\n\n\n
\n Recent work on machine learning has begun to consider issues of fairness. In this paper, we extend the concept of fairness to recommendation. In particular, we show that in some recommendation contexts, fairness may be a multisided concept, in which fair outcomes for multiple individuals need to be considered. Based on these considerations, we present a taxonomy of classes of fairness-aware recommender systems and suggest possible fairness-aware recommendation architectures.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Model-Based Approaches for Independence-Enhanced Recommendation.\n \n \n \n\n\n \n Kamishima, T.; Akaho, S.; Asoh, H.; and Sato, I.\n\n\n \n\n\n\n In 2016 IEEE 16th International Conference on Data Mining Workshops (ICDMW), pages 860–867, . \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{kamishima_model-based_2016,\n\ttitle = {Model-Based Approaches for Independence-Enhanced Recommendation},\n\tdoi = {10.1109/ICDMW.2016.0127},\n\tabstract = {This paper studies a new approach to enhance recommendation independence. Such approaches are useful in ensuring adherence to laws and regulations, fair treatment of content providers, and exclusion of unwanted information. For example, recommendations that match an employer with a job applicant should not be based on socially sensitive information, such as gender or race, from the perspective of social fairness. An algorithm that could exclude the influence of such sensitive information would be useful in this case. We previously gave a formal definition of recommendation independence and proposed a method adopting a regularizer that imposes such an independence constraint. As no other options than this regularization approach have been put forward, we here propose a new model-based approach, which is based on a generative model that satisfies the constraint of recommendation independence. We apply this approach to a latent class model and empirically show that the model-based approach can enhance recommendation independence. Recommendation algorithms based on generative models, such as topic models, are important, because they have a flexible functionality that enables them to incorporate a wide variety of information types. Our new model-based approach will broaden the applications of independence-enhanced recommendation by integrating the functionality of generative models.},\n\teventtitle = {2016 {IEEE} 16th International Conference on Data Mining Workshops ({ICDMW})},\n\tpages = {860--867},\n\tbooktitle = {2016 {IEEE} 16th International Conference on Data Mining Workshops ({ICDMW})},\n\tauthor = {Kamishima, T. and Akaho, S. and Asoh, H. and Sato, I.},\n\tdate = {2016-12},\n\tkeywords = {recommender systems, Predictive models, Recommender systems, data mining, Data mining, Data models, Training, recommender system, topic model, content providers, fairness-aware data mining, independence constraint, independence-enhanced recommendation, Linear programming, model-based approaches, Random variables, recommendation algorithms, sensitive information, social fairness, topic models, unwanted information exclusion}\n}\n\n
\n
\n\n\n
\n This paper studies a new approach to enhance recommendation independence. Such approaches are useful in ensuring adherence to laws and regulations, fair treatment of content providers, and exclusion of unwanted information. For example, recommendations that match an employer with a job applicant should not be based on socially sensitive information, such as gender or race, from the perspective of social fairness. An algorithm that could exclude the influence of such sensitive information would be useful in this case. We previously gave a formal definition of recommendation independence and proposed a method adopting a regularizer that imposes such an independence constraint. As no other options than this regularization approach have been put forward, we here propose a new model-based approach, which is based on a generative model that satisfies the constraint of recommendation independence. We apply this approach to a latent class model and empirically show that the model-based approach can enhance recommendation independence. Recommendation algorithms based on generative models, such as topic models, are important, because they have a flexible functionality that enables them to incorporate a wide variety of information types. Our new model-based approach will broaden the applications of independence-enhanced recommendation by integrating the functionality of generative models.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Measuring Fairness in Ranked Outputs.\n \n \n \n \n\n\n \n Yang, K.; and Stoyanovich, J.\n\n\n \n\n\n\n In Proceedings of the 29th International Conference on Scientific and Statistical Database Management, of SSDBM '17, pages 22:1–22:6, . ACM\n event-place: Chicago, IL, USA\n\n\n\n
\n\n\n\n \n \n \"MeasuringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{yang_measuring_2017,\n\tlocation = {New York, {NY}, {USA}},\n\ttitle = {Measuring Fairness in Ranked Outputs},\n\tisbn = {978-1-4503-5282-6},\n\turl = {http://doi.acm.org/10.1145/3085504.3085526},\n\tdoi = {10.1145/3085504.3085526},\n\tseries = {{SSDBM} '17},\n\tabstract = {Ranking and scoring are ubiquitous. We consider the setting in which an institution, called a ranker, evaluates a set of individuals based on demographic, behavioral or other characteristics. The final output is a ranking that represents the relative quality of the individuals. While automatic and therefore seemingly objective, rankers can, and often do, discriminate against individuals and systematically disadvantage members of protected groups. This warrants a careful study of the fairness of a ranking scheme, to enable data science for social good applications, among others. In this paper we propose fairness measures for ranked outputs. We develop a data generation procedure that allows us to systematically control the degree of unfairness in the output, and study the behavior of our measures on these datasets. We then apply our proposed measures to several real datasets, and detect cases of bias. Finally, we show preliminary results of incorporating our ranked fairness measures into an optimization framework, and show potential for improving fairness of ranked outputs while maintaining accuracy. The code implementing all parts of this work is publicly available at https://github.com/{DataResponsibly}/{FairRank}.},\n\tpages = {22:1--22:6},\n\tbooktitle = {Proceedings of the 29th International Conference on Scientific and Statistical Database Management},\n\tpublisher = {{ACM}},\n\tauthor = {Yang, Ke and Stoyanovich, Julia},\n\turldate = {2019-03-26},\n\tdate = {2017},\n\tnote = {event-place: Chicago, {IL}, {USA}},\n\tkeywords = {Fairness, Accountability, Data, Data Ethics, Data Science for Social Good, Responsibly, Transparency}\n}\n\n
\n
\n\n\n
\n Ranking and scoring are ubiquitous. We consider the setting in which an institution, called a ranker, evaluates a set of individuals based on demographic, behavioral or other characteristics. The final output is a ranking that represents the relative quality of the individuals. While automatic and therefore seemingly objective, rankers can, and often do, discriminate against individuals and systematically disadvantage members of protected groups. This warrants a careful study of the fairness of a ranking scheme, to enable data science for social good applications, among others. In this paper we propose fairness measures for ranked outputs. We develop a data generation procedure that allows us to systematically control the degree of unfairness in the output, and study the behavior of our measures on these datasets. We then apply our proposed measures to several real datasets, and detect cases of bias. Finally, we show preliminary results of incorporating our ranked fairness measures into an optimization framework, and show potential for improving fairness of ranked outputs while maintaining accuracy. The code implementing all parts of this work is publicly available at https://github.com/DataResponsibly/FairRank.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Inherent Trade-Offs in the Fair Determination of Risk Scores.\n \n \n \n \n\n\n \n Kleinberg, J.; Mullainathan, S.; and Raghavan, M.\n\n\n \n\n\n\n In Papadimitriou, C. H., editor(s), 8th Innovations in Theoretical Computer Science Conference (ITCS 2017), volume 67, of Leibniz International Proceedings in Informatics (LIPIcs), pages 43:1–43:23, . Schloss Dagstuhl–Leibniz-Zentrum fuer Informatik\n \n\n\n\n
\n\n\n\n \n \n \"InherentPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{kleinberg_inherent_2017,\n\tlocation = {Dagstuhl, Germany},\n\ttitle = {Inherent Trade-Offs in the Fair Determination of Risk Scores},\n\tvolume = {67},\n\tisbn = {978-3-95977-029-3},\n\turl = {http://drops.dagstuhl.de/opus/volltexte/2017/8156},\n\tdoi = {10.4230/LIPIcs.ITCS.2017.43},\n\tseries = {Leibniz International Proceedings in Informatics ({LIPIcs})},\n\tpages = {43:1--43:23},\n\tbooktitle = {8th Innovations in Theoretical Computer Science Conference ({ITCS} 2017)},\n\tpublisher = {Schloss Dagstuhl–Leibniz-Zentrum fuer Informatik},\n\tauthor = {Kleinberg, Jon and Mullainathan, Sendhil and Raghavan, Manish},\n\teditor = {Papadimitriou, Christos H.},\n\turldate = {2019-07-16},\n\tdate = {2017},\n\tkeywords = {algorithmic fairness, calibration, risk tools}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Information and equity.\n \n \n \n \n\n\n \n Lievrouw, L. A.; and Farb, S. E.\n\n\n \n\n\n\n , 37(1): 499–540. .\n \n\n\n\n
\n\n\n\n \n \n \"InformationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{lievrouw_information_2003,\n\ttitle = {Information and equity},\n\tvolume = {37},\n\trights = {Copyright © 2002 American Society for Information Science and Technology},\n\tissn = {1550-8382},\n\turl = {https://onlinelibrary.wiley.com/doi/abs/10.1002/aris.1440370112},\n\tdoi = {10.1002/aris.1440370112},\n\tpages = {499--540},\n\tnumber = {1},\n\tjournaltitle = {Annual Review of Information Science and Technology},\n\tauthor = {Lievrouw, Leah A. and Farb, Sharon E.},\n\turldate = {2019-07-10},\n\tdate = {2003},\n\tlangid = {english}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Investigating User Perception of Gender Bias in Image Search: The Role of Sexism.\n \n \n \n \n\n\n \n Otterbacher, J.; Checco, A.; Demartini, G.; and Clough, P.\n\n\n \n\n\n\n In The 41st International ACM SIGIR Conference on Research & Development in Information Retrieval, of SIGIR '18, pages 933–936, . ACM\n event-place: Ann Arbor, MI, USA\n\n\n\n
\n\n\n\n \n \n \"InvestigatingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{otterbacher_investigating_2018,\n\tlocation = {New York, {NY}, {USA}},\n\ttitle = {Investigating User Perception of Gender Bias in Image Search: The Role of Sexism},\n\tisbn = {978-1-4503-5657-2},\n\turl = {http://doi.acm.org/10.1145/3209978.3210094},\n\tdoi = {10.1145/3209978.3210094},\n\tseries = {{SIGIR} '18},\n\tshorttitle = {Investigating User Perception of Gender Bias in Image Search},\n\tabstract = {There is growing evidence that search engines produce results that are socially biased, reinforcing a view of the world that aligns with prevalent social stereotypes. One means to promote greater transparency of search algorithms - which are typically complex and proprietary - is to raise user awareness of biased result sets. However, to date, little is known concerning how users perceive bias in search results, and the degree to which their perceptions differ and/or might be predicted based on user attributes. One particular area of search that has recently gained attention, and forms the focus of this study, is image retrieval and gender bias. We conduct a controlled experiment via crowdsourcing using participants recruited from three countries to measure the extent to which workers perceive a given image results set to be subjective or objective. Demographic information about the workers, along with measures of sexism, are gathered and analysed to investigate whether (gender) biases in the image search results can be detected. Amongst other findings, the results confirm that sexist people are less likely to detect and report gender biases in image search results.},\n\tpages = {933--936},\n\tbooktitle = {The 41st International {ACM} {SIGIR} Conference on Research \\& Development in Information Retrieval},\n\tpublisher = {{ACM}},\n\tauthor = {Otterbacher, Jahna and Checco, Alessandro and Demartini, Gianluca and Clough, Paul},\n\turldate = {2019-07-10},\n\tdate = {2018},\n\tnote = {event-place: Ann Arbor, {MI}, {USA}},\n\tkeywords = {gender stereotypes, search engine bias, user perceptions}\n}\n\n
\n
\n\n\n
\n There is growing evidence that search engines produce results that are socially biased, reinforcing a view of the world that aligns with prevalent social stereotypes. One means to promote greater transparency of search algorithms - which are typically complex and proprietary - is to raise user awareness of biased result sets. However, to date, little is known concerning how users perceive bias in search results, and the degree to which their perceptions differ and/or might be predicted based on user attributes. One particular area of search that has recently gained attention, and forms the focus of this study, is image retrieval and gender bias. We conduct a controlled experiment via crowdsourcing using participants recruited from three countries to measure the extent to which workers perceive a given image results set to be subjective or objective. Demographic information about the workers, along with measures of sexism, are gathered and analysed to investigate whether (gender) biases in the image search results can be detected. Amongst other findings, the results confirm that sexist people are less likely to detect and report gender biases in image search results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gaps in Information Access in Social Networks.\n \n \n \n \n\n\n \n Fish, B.; Bashardoust, A.; boyd , d.; Friedler, S. A.; Scheidegger, C.; and Venkatasubramanian, S.\n\n\n \n\n\n\n In Proceedings of the World Wide Web Conference, pages 480–490, . \n \n\n\n\n
\n\n\n\n \n \n \"GapsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{fish_gaps_2019,\n\ttitle = {Gaps in Information Access in Social Networks},\n\turl = {http://arxiv.org/abs/1903.02047},\n\tdoi = {10.1145/3308558.3313680},\n\tabstract = {The study of influence maximization in social networks has largely ignored disparate effects these algorithms might have on the individuals contained in the social network. Individuals may place a high value on receiving information, e.g. job openings or advertisements for loans. While well-connected individuals at the center of the network are likely to receive the information that is being distributed through the network, poorly connected individuals are systematically less likely to receive the information, producing a gap in access to the information between individuals. In this work, we study how best to spread information in a social network while minimizing this access gap. We propose to use the maximin social welfare function as an objective function, where we maximize the minimum probability of receiving the information under an intervention. We prove that in this setting this welfare function constrains the access gap whereas maximizing the expected number of nodes reached does not. We also investigate the difficulties of using the maximin, and present hardness results and analysis for standard greedy strategies. Finally, we investigate practical ways of optimizing for the maximin, and give empirical evidence that a simple greedy-based strategy works well in practice.},\n\tpages = {480--490},\n\tbooktitle = {Proceedings of the World Wide Web Conference},\n\tauthor = {Fish, Benjamin and Bashardoust, Ashkan and boyd, danah and Friedler, Sorelle A. and Scheidegger, Carlos and Venkatasubramanian, Suresh},\n\turldate = {2019-05-23},\n\tdate = {2019},\n\teprinttype = {arxiv},\n\teprint = {1903.02047},\n\tkeywords = {fairness, Computer Science - Social and Information Networks, influence maximization, Physics - Physics and Society, social networks}\n}\n\n
\n
\n\n\n
\n The study of influence maximization in social networks has largely ignored disparate effects these algorithms might have on the individuals contained in the social network. Individuals may place a high value on receiving information, e.g. job openings or advertisements for loans. While well-connected individuals at the center of the network are likely to receive the information that is being distributed through the network, poorly connected individuals are systematically less likely to receive the information, producing a gap in access to the information between individuals. In this work, we study how best to spread information in a social network while minimizing this access gap. We propose to use the maximin social welfare function as an objective function, where we maximize the minimum probability of receiving the information under an intervention. We prove that in this setting this welfare function constrains the access gap whereas maximizing the expected number of nodes reached does not. We also investigate the difficulties of using the maximin, and present hardness results and analysis for standard greedy strategies. Finally, we investigate practical ways of optimizing for the maximin, and give empirical evidence that a simple greedy-based strategy works well in practice.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n FARE: Diagnostics for Fair Ranking Using Pairwise Error Metrics.\n \n \n \n \n\n\n \n Kuhlman, C.; VanValkenburg, M.; and Rundensteiner, E.\n\n\n \n\n\n\n In The World Wide Web Conference, of WWW '19, pages 2936–2942, . ACM\n event-place: San Francisco, CA, USA\n\n\n\n
\n\n\n\n \n \n \"FARE:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{kuhlman_fare:_2019,\n\tlocation = {New York, {NY}, {USA}},\n\ttitle = {{FARE}: Diagnostics for Fair Ranking Using Pairwise Error Metrics},\n\tisbn = {978-1-4503-6674-8},\n\turl = {http://doi.acm.org/10.1145/3308558.3313443},\n\tdoi = {10.1145/3308558.3313443},\n\tseries = {{WWW} '19},\n\tshorttitle = {{FARE}},\n\tabstract = {Ranking, used extensively online and as a critical tool for decision making across many domains, may embed unfair bias. Tools to measure and correct for discriminatory bias are required to ensure that ranking models do not perpetuate unfair practices. Recently, a number of error-based criteria have been proposed to assess fairness with regard to the treatment of protected groups (as determined by sensitive data attributes, e.g., race, gender, or age). However this has largely been limited to classification tasks, and error metrics used in these approaches are not applicable for ranking. Therefore, in this work we propose to broaden the scope of fairness assessment to include error-based fairness criteria for rankings. Our approach supports three criteria: Rank Equality, Rank Calibration, and Rank Parity, which cover a broad spectrum of fairness considerations from proportional group representation to error rate similarity. The underlying error metrics are formulated to be rank-appropriate, using pairwise discordance to measure prediction error in a model-agnostic fashion. Based on this foundation, we then design a fair auditing mechanism which captures group treatment throughout the entire ranking, generating in-depth yet nuanced diagnostics. We demonstrate the efficacy of our error metrics using real-world scenarios, exposing trade-offs among fairness criteria and providing guidance in the selection of fair-ranking algorithms.},\n\tpages = {2936--2942},\n\tbooktitle = {The World Wide Web Conference},\n\tpublisher = {{ACM}},\n\tauthor = {Kuhlman, Caitlin and {VanValkenburg}, {MaryAnn} and Rundensteiner, Elke},\n\turldate = {2019-07-10},\n\tdate = {2019},\n\tnote = {event-place: San Francisco, {CA}, {USA}},\n\tkeywords = {Fairness, Fair Ranking, Fairness Auditing, Pairwise Fairness}\n}\n\n
\n
\n\n\n
\n Ranking, used extensively online and as a critical tool for decision making across many domains, may embed unfair bias. Tools to measure and correct for discriminatory bias are required to ensure that ranking models do not perpetuate unfair practices. Recently, a number of error-based criteria have been proposed to assess fairness with regard to the treatment of protected groups (as determined by sensitive data attributes, e.g., race, gender, or age). However this has largely been limited to classification tasks, and error metrics used in these approaches are not applicable for ranking. Therefore, in this work we propose to broaden the scope of fairness assessment to include error-based fairness criteria for rankings. Our approach supports three criteria: Rank Equality, Rank Calibration, and Rank Parity, which cover a broad spectrum of fairness considerations from proportional group representation to error rate similarity. The underlying error metrics are formulated to be rank-appropriate, using pairwise discordance to measure prediction error in a model-agnostic fashion. Based on this foundation, we then design a fair auditing mechanism which captures group treatment throughout the entire ranking, generating in-depth yet nuanced diagnostics. We demonstrate the efficacy of our error metrics using real-world scenarios, exposing trade-offs among fairness criteria and providing guidance in the selection of fair-ranking algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n FairSearch: A Tool For Fairness in Ranked Search Results.\n \n \n \n \n\n\n \n Zehlike, M.; Sühr, T.; Castillo, C.; and Kitanovski, I.\n\n\n \n\n\n\n . .\n \n\n\n\n
\n\n\n\n \n \n \"FairSearch:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{zehlike_fairsearch:_2019,\n\ttitle = {{FairSearch}: A Tool For Fairness in Ranked Search Results},\n\turl = {http://arxiv.org/abs/1905.13134},\n\tshorttitle = {{FairSearch}},\n\tabstract = {Ranked search results and recommendations have become the main mechanism by which we find content, products, places, and people online. With hiring, selecting, purchasing, and dating being increasingly mediated by algorithms, rankings may determine career and business opportunities, educational placement, access to benefits, and even social and reproductive success. It is therefore of societal and ethical importance to ask whether search results can demote, marginalize, or exclude individuals of unprivileged groups or promote products with undesired features. In this paper we present {FairSearch}, the first fair open source search {API} to provide fairness notions in ranked search results. We implement two algorithms from the fair ranking literature, namely {FA}*{IR} (Zehlike et al., 2017) and {DELTR} (Zehlike and Castillo, 2018) and provide them as stand-alone libraries in Python and Java. Additionally we implement interfaces to Elasticsearch for both algorithms, that use the aforementioned Java libraries and are then provided as Elasticsearch plugins. Elasticsearch is a well-known search engine {API} based on Apache Lucene. With our plugins we enable search engine developers who wish to ensure fair search results of different styles to easily integrate {DELTR} and {FA}*{IR} into their existing Elasticsearch environment.},\n\tjournaltitle = {{arXiv}:1905.13134 [cs]},\n\tauthor = {Zehlike, Meike and Sühr, Tom and Castillo, Carlos and Kitanovski, Ivan},\n\turldate = {2019-07-10},\n\tdate = {2019-05-27},\n\teprinttype = {arxiv},\n\teprint = {1905.13134},\n\tkeywords = {Computer Science - Information Retrieval, H.3.3}\n}\n\n
\n
\n\n\n
\n Ranked search results and recommendations have become the main mechanism by which we find content, products, places, and people online. With hiring, selecting, purchasing, and dating being increasingly mediated by algorithms, rankings may determine career and business opportunities, educational placement, access to benefits, and even social and reproductive success. It is therefore of societal and ethical importance to ask whether search results can demote, marginalize, or exclude individuals of unprivileged groups or promote products with undesired features. In this paper we present FairSearch, the first fair open source search API to provide fairness notions in ranked search results. We implement two algorithms from the fair ranking literature, namely FA*IR (Zehlike et al., 2017) and DELTR (Zehlike and Castillo, 2018) and provide them as stand-alone libraries in Python and Java. Additionally we implement interfaces to Elasticsearch for both algorithms, that use the aforementioned Java libraries and are then provided as Elasticsearch plugins. Elasticsearch is a well-known search engine API based on Apache Lucene. With our plugins we enable search engine developers who wish to ensure fair search results of different styles to easily integrate DELTR and FA*IR into their existing Elasticsearch environment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fairness-Aware Group Recommendation with Pareto-Efficiency.\n \n \n \n \n\n\n \n Xiao, L.; Min, Z.; Yongfeng, Z.; Zhaoquan, G.; Yiqun, L.; and Shaoping, M.\n\n\n \n\n\n\n In Proceedings of the Eleventh ACM Conference on Recommender Systems, of RecSys '17, pages 107–115, . ACM\n event-place: Como, Italy\n\n\n\n
\n\n\n\n \n \n \"Fairness-AwarePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{xiao_fairness-aware_2017,\n\tlocation = {New York, {NY}, {USA}},\n\ttitle = {Fairness-Aware Group Recommendation with Pareto-Efficiency},\n\tisbn = {978-1-4503-4652-8},\n\turl = {http://doi.acm.org/10.1145/3109859.3109887},\n\tdoi = {10.1145/3109859.3109887},\n\tseries = {{RecSys} '17},\n\tabstract = {Group recommendation has attracted significant research efforts for its importance in benefiting a group of users. This paper investigates the Group Recommendation problem from a novel aspect, which tries to maximize the satisfaction of each group member while minimizing the unfairness between them. In this work, we present several semantics of the individual utility and propose two concepts of social welfare and fairness for modeling the overall utilities and the balance between group members. We formulate the problem as a multiple objective optimization problem and show that it is {NP}-Hard in different semantics. Given the multiple-objective nature of fairness-aware group recommendation problem, we provide an optimization framework for fairness-aware group recommendation from the perspective of Pareto Efficiency. We conduct extensive experiments on real-world datasets and evaluate our algorithm in terms of standard accuracy metrics. The results indicate that our algorithm achieves superior performances and considering fairness in group recommendation can enhance the recommendation accuracy.},\n\tpages = {107--115},\n\tbooktitle = {Proceedings of the Eleventh {ACM} Conference on Recommender Systems},\n\tpublisher = {{ACM}},\n\tauthor = {Xiao, Lin and Min, Zhang and Yongfeng, Zhang and Zhaoquan, Gu and Yiqun, Liu and Shaoping, Ma},\n\turldate = {2019-05-17},\n\tdate = {2017},\n\tnote = {event-place: Como, Italy},\n\tkeywords = {fairness, group recommendation, pareto-efficiency}\n}\n\n
\n
\n\n\n
\n Group recommendation has attracted significant research efforts for its importance in benefiting a group of users. This paper investigates the Group Recommendation problem from a novel aspect, which tries to maximize the satisfaction of each group member while minimizing the unfairness between them. In this work, we present several semantics of the individual utility and propose two concepts of social welfare and fairness for modeling the overall utilities and the balance between group members. We formulate the problem as a multiple objective optimization problem and show that it is NP-Hard in different semantics. Given the multiple-objective nature of fairness-aware group recommendation problem, we provide an optimization framework for fairness-aware group recommendation from the perspective of Pareto Efficiency. We conduct extensive experiments on real-world datasets and evaluate our algorithm in terms of standard accuracy metrics. The results indicate that our algorithm achieves superior performances and considering fairness in group recommendation can enhance the recommendation accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fairness Without Demographics in Repeated Loss Minimization.\n \n \n \n \n\n\n \n Hashimoto, T.; Srivastava, M.; Namkoong, H.; and Liang, P.\n\n\n \n\n\n\n In International Conference on Machine Learning, pages 1929–1938, . \n \n\n\n\n
\n\n\n\n \n \n \"FairnessPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{hashimoto_fairness_2018,\n\ttitle = {Fairness Without Demographics in Repeated Loss Minimization},\n\turl = {http://proceedings.mlr.press/v80/hashimoto18a.html},\n\tabstract = {Machine learning models (e.g., speech recognizers) trained on average loss suffer from representation disparity—minority groups (e.g., non-native speakers) carry less weight in the training objecti...},\n\teventtitle = {International Conference on Machine Learning},\n\tpages = {1929--1938},\n\tbooktitle = {International Conference on Machine Learning},\n\tauthor = {Hashimoto, Tatsunori and Srivastava, Megha and Namkoong, Hongseok and Liang, Percy},\n\turldate = {2019-07-12},\n\tdate = {2018-07-03},\n\tlangid = {english}\n}\n\n
\n
\n\n\n
\n Machine learning models (e.g., speech recognizers) trained on average loss suffer from representation disparity—minority groups (e.g., non-native speakers) carry less weight in the training objecti...\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fairness Through Awareness.\n \n \n \n \n\n\n \n Dwork, C.; Hardt, M.; Pitassi, T.; Reingold, O.; and Zemel, R.\n\n\n \n\n\n\n In ITCS '12, pages 214–226, . ACM\n \n\n\n\n
\n\n\n\n \n \n \"FairnessPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{dwork_fairness_2012,\n\tlocation = {New York, {NY}, {USA}},\n\ttitle = {Fairness Through Awareness},\n\turl = {http://doi.acm.org/10.1145/2090236.2090255},\n\tdoi = {10.1145/2090236.2090255},\n\tabstract = {We study fairness in classification, where individuals are classified,\ne.g., admitted to a university, and the goal is to prevent discrimination\nagainst individuals based on their membership in some group, while\nmaintaining utility for the classifier (the university). The main\nconceptual contribution of this paper is a framework for fair\nclassification comprising (1) a (hypothetical) task-specific metric for\ndetermining the degree to which individuals are similar with respect to\nthe classification task at hand; (2) an algorithm for maximizing utility\nsubject to the fairness constraint, that similar individuals are treated\nsimilarly. We also present an adaptation of our approach to achieve the\ncomplementary goal of "fair affirmative action," which guarantees\nstatistical parity (i.e., the demographics of the set of individuals\nreceiving any classification are the same as the demographics of the\nunderlying population), while treating similar individuals as similarly as\npossible. Finally, we discuss the relationship of fairness to privacy:\nwhen fairness implies privacy, and how tools developed in the context of\ndifferential privacy may be applied to fairness.},\n\teventtitle = {Proceedings of the 3rd Innovations in Theoretical Computer Science Conference},\n\tpages = {214--226},\n\tbooktitle = {{ITCS} '12},\n\tpublisher = {{ACM}},\n\tauthor = {Dwork, Cynthia and Hardt, Moritz and Pitassi, Toniann and Reingold, Omer and Zemel, Richard},\n\turldate = {2017-01-11},\n\tdate = {2012},\n\tkeywords = {{CAREER}, Zotero Import (Mar 30), Zotero Import (Mar 30)/My Library, Zotero Import (Mar 30)/My Library/Algorithmic Fairness, Privacy and Fairness, Statistical Fairness}\n}\n\n
\n
\n\n\n
\n We study fairness in classification, where individuals are classified, e.g., admitted to a university, and the goal is to prevent discrimination against individuals based on their membership in some group, while maintaining utility for the classifier (the university). The main conceptual contribution of this paper is a framework for fair classification comprising (1) a (hypothetical) task-specific metric for determining the degree to which individuals are similar with respect to the classification task at hand; (2) an algorithm for maximizing utility subject to the fairness constraint, that similar individuals are treated similarly. We also present an adaptation of our approach to achieve the complementary goal of \"fair affirmative action,\" which guarantees statistical parity (i.e., the demographics of the set of individuals receiving any classification are the same as the demographics of the underlying population), while treating similar individuals as similarly as possible. Finally, we discuss the relationship of fairness to privacy: when fairness implies privacy, and how tools developed in the context of differential privacy may be applied to fairness.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fairness of Exposure in Rankings.\n \n \n \n \n\n\n \n Singh, A.; and Joachims, T.\n\n\n \n\n\n\n In KDD '18, pages 2219–2228, . ACM\n \n\n\n\n
\n\n\n\n \n \n \"FairnessPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{singh_fairness_2018,\n\tlocation = {New York, {NY}, {USA}},\n\ttitle = {Fairness of Exposure in Rankings},\n\turl = {http://doi.acm.org/10.1145/3219819.3220088},\n\tdoi = {10.1145/3219819.3220088},\n\teventtitle = {Proceedings of the 24th {ACM} {SIGKDD} International Conference on Knowledge Discovery \\& Data Mining},\n\tpages = {2219--2228},\n\tbooktitle = {{KDD} '18},\n\tpublisher = {{ACM}},\n\tauthor = {Singh, Ashudeep and Joachims, Thorsten},\n\tdate = {2018},\n\tkeywords = {fatrec, algorithmic bias, equal opportunity, fairness, fairness in rankings, position bias}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fairness in Recommendation Ranking through Pairwise Comparisons.\n \n \n \n \n\n\n \n Beutel, A.; Chen, J.; Doshi, T.; Qian, H.; Wei, L.; Wu, Y.; Heldt, L.; Zhao, Z.; Hong, L.; Chi, E. H.; and Goodrow, C.\n\n\n \n\n\n\n . .\n \n\n\n\n
\n\n\n\n \n \n \"FairnessPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{beutel_fairness_2019,\n\ttitle = {Fairness in Recommendation Ranking through Pairwise Comparisons},\n\turl = {http://arxiv.org/abs/1903.00780},\n\tabstract = {Recommender systems are one of the most pervasive applications of machine learning in industry, with many services using them to match users to products or information. As such it is important to ask: what are the possible fairness risks, how can we quantify them, and how should we address them? In this paper we offer a set of novel metrics for evaluating algorithmic fairness concerns in recommender systems. In particular we show how measuring fairness based on pairwise comparisons from randomized experiments provides a tractable means to reason about fairness in rankings from recommender systems. Building on this metric, we offer a new regularizer to encourage improving this metric during model training and thus improve fairness in the resulting rankings. We apply this pairwise regularization to a large-scale, production recommender system and show that we are able to significantly improve the system's pairwise fairness.},\n\tjournaltitle = {{arXiv}:1903.00780 [cs, stat]},\n\tauthor = {Beutel, Alex and Chen, Jilin and Doshi, Tulsee and Qian, Hai and Wei, Li and Wu, Yi and Heldt, Lukasz and Zhao, Zhe and Hong, Lichan and Chi, Ed H. and Goodrow, Cristos},\n\turldate = {2019-04-29},\n\tdate = {2019-03-02},\n\teprinttype = {arxiv},\n\teprint = {1903.00780},\n\tkeywords = {Computer Science - Information Retrieval, Computer Science - Machine Learning, Statistics - Machine Learning, Computer Science - Computers and Society, Computer Science - Artificial Intelligence}\n}\n\n
\n
\n\n\n
\n Recommender systems are one of the most pervasive applications of machine learning in industry, with many services using them to match users to products or information. As such it is important to ask: what are the possible fairness risks, how can we quantify them, and how should we address them? In this paper we offer a set of novel metrics for evaluating algorithmic fairness concerns in recommender systems. In particular we show how measuring fairness based on pairwise comparisons from randomized experiments provides a tractable means to reason about fairness in rankings from recommender systems. Building on this metric, we offer a new regularizer to encourage improving this metric during model training and thus improve fairness in the resulting rankings. We apply this pairwise regularization to a large-scale, production recommender system and show that we are able to significantly improve the system's pairwise fairness.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fairness and Transparency in Ranking.\n \n \n \n \n\n\n \n Castillo, C.\n\n\n \n\n\n\n , 52(2): 64–71. .\n \n\n\n\n
\n\n\n\n \n \n \"FairnessPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{castillo_fairness_2019,\n\ttitle = {Fairness and Transparency in Ranking},\n\tvolume = {52},\n\tissn = {0163-5840},\n\turl = {http://doi.acm.org/10.1145/3308774.3308783},\n\tdoi = {10.1145/3308774.3308783},\n\tabstract = {Ranking in Information Retrieval ({IR}) has been traditionally evaluated from the perspective of the relevance of search engine results to people searching for information, i.e., the extent to which the system provides "the right information, to the right people, in the right way, at the right time." However, people in current {IR} systems are not only the ones issuing search queries, but increasingly they are also the ones being searched. This raises several new problems in {IR} that have been addressed in recent research, particularly with respect to fairness/non-discrimination, accountability, and transparency. This is a summary of some these initial developments.},\n\tpages = {64--71},\n\tnumber = {2},\n\tjournaltitle = {{SIGIR} Forum},\n\tauthor = {Castillo, Carlos},\n\turldate = {2019-07-10},\n\tdate = {2019-01}\n}\n\n
\n
\n\n\n
\n Ranking in Information Retrieval (IR) has been traditionally evaluated from the perspective of the relevance of search engine results to people searching for information, i.e., the extent to which the system provides \"the right information, to the right people, in the right way, at the right time.\" However, people in current IR systems are not only the ones issuing search queries, but increasingly they are also the ones being searched. This raises several new problems in IR that have been addressed in recent research, particularly with respect to fairness/non-discrimination, accountability, and transparency. This is a summary of some these initial developments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fair Prediction with Disparate Impact: A Study of Bias in Recidivism Prediction Instruments.\n \n \n \n \n\n\n \n Chouldechova, A.\n\n\n \n\n\n\n , 5(2): 153–163. .\n \n\n\n\n
\n\n\n\n \n \n \"FairPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{chouldechova_fair_2017,\n\ttitle = {Fair Prediction with Disparate Impact: A Study of Bias in Recidivism Prediction Instruments},\n\tvolume = {5},\n\tissn = {2167-6461},\n\turl = {https://www.liebertpub.com/doi/abs/10.1089/big.2016.0047},\n\tdoi = {10.1089/big.2016.0047},\n\tshorttitle = {Fair Prediction with Disparate Impact},\n\tabstract = {Recidivism prediction instruments ({RPIs}) provide decision-makers with an assessment of the likelihood that a criminal defendant will reoffend at a future point in time. Although such instruments are gaining increasing popularity across the country, their use is attracting tremendous controversy. Much of the controversy concerns potential discriminatory bias in the risk assessments that are produced. This article discusses several fairness criteria that have recently been applied to assess the fairness of {RPIs}. We demonstrate that the criteria cannot all be simultaneously satisfied when recidivism prevalence differs across groups. We then show how disparate impact can arise when an {RPI} fails to satisfy the criterion of error rate balance.},\n\tpages = {153--163},\n\tnumber = {2},\n\tjournaltitle = {Big Data},\n\tshortjournal = {Big Data},\n\tauthor = {Chouldechova, Alexandra},\n\turldate = {2019-07-16},\n\tdate = {2017-06-01}\n}\n\n
\n
\n\n\n
\n Recidivism prediction instruments (RPIs) provide decision-makers with an assessment of the likelihood that a criminal defendant will reoffend at a future point in time. Although such instruments are gaining increasing popularity across the country, their use is attracting tremendous controversy. Much of the controversy concerns potential discriminatory bias in the risk assessments that are produced. This article discusses several fairness criteria that have recently been applied to assess the fairness of RPIs. We demonstrate that the criteria cannot all be simultaneously satisfied when recidivism prevalence differs across groups. We then show how disparate impact can arise when an RPI fails to satisfy the criterion of error rate balance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n FA*IR: A Fair Top-k Ranking Algorithm.\n \n \n \n \n\n\n \n Zehlike, M.; Bonchi, F.; Castillo, C.; Hajian, S.; Megahed, M.; and Baeza-Yates, R.\n\n\n \n\n\n\n In Proceedings of the 2017 ACM on Conference on Information and Knowledge Management, of CIKM '17, pages 1569–1578, . ACM\n event-place: Singapore, Singapore\n\n\n\n
\n\n\n\n \n \n \"FA*IR:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{zehlike_fair:_2017,\n\tlocation = {New York, {NY}, {USA}},\n\ttitle = {{FA}*{IR}: A Fair Top-k Ranking Algorithm},\n\tisbn = {978-1-4503-4918-5},\n\turl = {http://doi.acm.org/10.1145/3132847.3132938},\n\tdoi = {10.1145/3132847.3132938},\n\tseries = {{CIKM} '17},\n\tshorttitle = {{FA}*{IR}},\n\tabstract = {In this work, we define and solve the Fair Top-k Ranking problem, in which we want to determine a subset of k candidates from a large pool of n » k candidates, maximizing utility (i.e., select the "best" candidates) subject to group fairness criteria. Our ranked group fairness definition extends group fairness using the standard notion of protected groups and is based on ensuring that the proportion of protected candidates in every prefix of the top-k ranking remains statistically above or indistinguishable from a given minimum. Utility is operationalized in two ways: (i) every candidate included in the top-k should be more qualified than every candidate not included; and (ii) for every pair of candidates in the top-k, the more qualified candidate should be ranked above. An efficient algorithm is presented for producing the Fair Top-k Ranking, and tested experimentally on existing datasets as well as new datasets released with this paper, showing that our approach yields small distortions with respect to rankings that maximize utility without considering fairness criteria. To the best of our knowledge, this is the first algorithm grounded in statistical tests that can mitigate biases in the representation of an under-represented group along a ranked list.},\n\tpages = {1569--1578},\n\tbooktitle = {Proceedings of the 2017 {ACM} on Conference on Information and Knowledge Management},\n\tpublisher = {{ACM}},\n\tauthor = {Zehlike, Meike and Bonchi, Francesco and Castillo, Carlos and Hajian, Sara and Megahed, Mohamed and Baeza-Yates, Ricardo},\n\turldate = {2019-07-10},\n\tdate = {2017},\n\tnote = {event-place: Singapore, Singapore},\n\tkeywords = {ranking, algorithmic fairness, bias in computer systems, top-k selection}\n}\n\n
\n
\n\n\n
\n In this work, we define and solve the Fair Top-k Ranking problem, in which we want to determine a subset of k candidates from a large pool of n » k candidates, maximizing utility (i.e., select the \"best\" candidates) subject to group fairness criteria. Our ranked group fairness definition extends group fairness using the standard notion of protected groups and is based on ensuring that the proportion of protected candidates in every prefix of the top-k ranking remains statistically above or indistinguishable from a given minimum. Utility is operationalized in two ways: (i) every candidate included in the top-k should be more qualified than every candidate not included; and (ii) for every pair of candidates in the top-k, the more qualified candidate should be ranked above. An efficient algorithm is presented for producing the Fair Top-k Ranking, and tested experimentally on existing datasets as well as new datasets released with this paper, showing that our approach yields small distortions with respect to rankings that maximize utility without considering fairness criteria. To the best of our knowledge, this is the first algorithm grounded in statistical tests that can mitigate biases in the representation of an under-represented group along a ranked list.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Equity of Attention: Amortizing Individual Fairness in Rankings.\n \n \n \n \n\n\n \n Biega, A. J; Gummadi, K. P; and Weikum, G.\n\n\n \n\n\n\n In The 41st International ACM SIGIR Conference on Research & Development in Information Retrieval, pages 405–414, . ACM\n \n\n\n\n
\n\n\n\n \n \n \"EquityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{biega_equity_2018,\n\ttitle = {Equity of Attention: Amortizing Individual Fairness in Rankings},\n\turl = {https://dl.acm.org/citation.cfm?doid=3209978.3210063},\n\tdoi = {10.1145/3209978.3210063},\n\tpages = {405--414},\n\tbooktitle = {The 41st International {ACM} {SIGIR} Conference on Research \\& Development in Information Retrieval},\n\tpublisher = {{ACM}},\n\tauthor = {Biega, Asia J and Gummadi, Krishna P and Weikum, Gerhard},\n\turldate = {2018-09-10},\n\tdate = {2018-06-27},\n\tkeywords = {fatrec, algorithmic fairness, amortized fairness, attention, exposure, fair ranking, individual fairness, position bias}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Does mitigating ML\\textquotesingle s impact disparity require treatment disparity?.\n \n \n \n \n\n\n \n Lipton, Z.; McAuley, J.; and Chouldechova, A.\n\n\n \n\n\n\n In Bengio, S.; Wallach, H.; Larochelle, H.; Grauman, K.; Cesa-Bianchi, N.; and Garnett, R., editor(s), Advances in Neural Information Processing Systems 31, pages 8125–8135. Curran Associates, Inc., .\n \n\n\n\n
\n\n\n\n \n \n \"DoesPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{lipton_does_2018,\n\ttitle = {Does mitigating {ML}{\\textbackslash}textquotesingle s impact disparity require treatment disparity?},\n\turl = {http://papers.nips.cc/paper/8035-does-mitigating-mls-impact-disparity-require-treatment-disparity.pdf},\n\tpages = {8125--8135},\n\tbooktitle = {Advances in Neural Information Processing Systems 31},\n\tpublisher = {Curran Associates, Inc.},\n\tauthor = {Lipton, Zachary and {McAuley}, Julian and Chouldechova, Alexandra},\n\teditor = {Bengio, S. and Wallach, H. and Larochelle, H. and Grauman, K. and Cesa-Bianchi, N. and Garnett, R.},\n\turldate = {2019-07-21},\n\tdate = {2018}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Disparate Interactions: An Algorithm-in-the-Loop Analysis of Fairness in Risk Assessments.\n \n \n \n \n\n\n \n Green, B.; and Chen, Y.\n\n\n \n\n\n\n In Proceedings of the Conference on Fairness, Accountability, and Transparency - FAT* '19, pages 90–99, . ACM Press\n \n\n\n\n
\n\n\n\n \n \n \"DisparatePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{green_disparate_2019,\n\tlocation = {Atlanta, {GA}, {USA}},\n\ttitle = {Disparate Interactions: An Algorithm-in-the-Loop Analysis of Fairness in Risk Assessments},\n\tisbn = {978-1-4503-6125-5},\n\turl = {http://dl.acm.org/citation.cfm?doid=3287560.3287563},\n\tdoi = {10.1145/3287560.3287563},\n\tshorttitle = {Disparate Interactions},\n\tabstract = {Despite vigorous debates about the technical characteristics of risk assessments being deployed in the U.S. criminal justice system, remarkably little research has studied how these tools a�ect actual decision-making processes. After all, risk assessments do not make de�nitive decisions—they inform judges, who are the �nal arbiters. It is therefore essential that considerations of risk assessments be informed by rigorous studies of how judges actually interpret and use them. This paper takes a �rst step toward such research on human interactions with risk assessments through a controlled experimental study on Amazon Mechanical Turk. We found several behaviors that call into question the supposed e�cacy and fairness of risk assessments: our study participants 1) underperformed the risk assessment even when presented with its predictions, 2) could not e�ectively evaluate the accuracy of their own or the risk assessment’s predictions, and 3) exhibited behaviors fraught with “disparate interactions,” whereby the use of risk assessments led to higher risk predictions about black defendants and lower risk predictions about white defendants. These results suggest the need for a new “algorithm-in-the-loop” framework that places machine learning decision-making aids into the sociotechnical context of improving human decisions rather than the technical context of generating the best prediction in the abstract. If risk assessments are to be used at all, they must be grounded in rigorous evaluations of their real-world impacts instead of in their theoretical potential.},\n\teventtitle = {the Conference},\n\tpages = {90--99},\n\tbooktitle = {Proceedings of the Conference on Fairness, Accountability, and Transparency  - {FAT}* '19},\n\tpublisher = {{ACM} Press},\n\tauthor = {Green, Ben and Chen, Yiling},\n\turldate = {2019-07-16},\n\tdate = {2019},\n\tlangid = {english}\n}\n\n
\n
\n\n\n
\n Despite vigorous debates about the technical characteristics of risk assessments being deployed in the U.S. criminal justice system, remarkably little research has studied how these tools a�ect actual decision-making processes. After all, risk assessments do not make de�nitive decisions—they inform judges, who are the �nal arbiters. It is therefore essential that considerations of risk assessments be informed by rigorous studies of how judges actually interpret and use them. This paper takes a �rst step toward such research on human interactions with risk assessments through a controlled experimental study on Amazon Mechanical Turk. We found several behaviors that call into question the supposed e�cacy and fairness of risk assessments: our study participants 1) underperformed the risk assessment even when presented with its predictions, 2) could not e�ectively evaluate the accuracy of their own or the risk assessment’s predictions, and 3) exhibited behaviors fraught with “disparate interactions,” whereby the use of risk assessments led to higher risk predictions about black defendants and lower risk predictions about white defendants. These results suggest the need for a new “algorithm-in-the-loop” framework that places machine learning decision-making aids into the sociotechnical context of improving human decisions rather than the technical context of generating the best prediction in the abstract. If risk assessments are to be used at all, they must be grounded in rigorous evaluations of their real-world impacts instead of in their theoretical potential.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Designing Fair Ranking Schemes.\n \n \n \n \n\n\n \n Asudeh, A.; Jagadish, H. V.; Stoyanovich, J.; and Das, G.\n\n\n \n\n\n\n . .\n \n\n\n\n
\n\n\n\n \n \n \"DesigningPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{asudeh_designing_2017,\n\ttitle = {Designing Fair Ranking Schemes},\n\turl = {http://arxiv.org/abs/1712.09752},\n\tabstract = {Items from a database are often ranked based on a combination of multiple criteria. A user may have the flexibility to accept combinations that weigh these criteria differently, within limits. On the other hand, this choice of weights can greatly affect the fairness of the produced ranking. In this paper, we develop a system that helps users choose criterion weights that lead to greater fairness. We consider ranking functions that compute the score of each item as a weighted sum of (numeric) attribute values, and then sort items on their score. Each ranking function can be expressed as a vector of weights, or as a point in a multi-dimensional space. For a broad range of fairness criteria, we show how to efficiently identify regions in this space that satisfy these criteria. Using this identification method, our system is able to tell users whether their proposed ranking function satisfies the desired fairness criteria and, if it does not, to suggest the smallest modification that does. We develop user-controllable approximation that and indexing techniques that are applied during preprocessing, and support sub-second response times during the online phase. Our extensive experiments on real datasets demonstrate that our methods are able to find solutions that satisfy fairness criteria effectively and efficiently.},\n\tjournaltitle = {{arXiv}:1712.09752 [cs]},\n\tauthor = {Asudeh, Abolfazl and Jagadish, H. V. and Stoyanovich, Julia and Das, Gautam},\n\turldate = {2019-07-10},\n\tdate = {2017-12-27},\n\teprinttype = {arxiv},\n\teprint = {1712.09752},\n\tkeywords = {Computer Science - Databases}\n}\n\n
\n
\n\n\n
\n Items from a database are often ranked based on a combination of multiple criteria. A user may have the flexibility to accept combinations that weigh these criteria differently, within limits. On the other hand, this choice of weights can greatly affect the fairness of the produced ranking. In this paper, we develop a system that helps users choose criterion weights that lead to greater fairness. We consider ranking functions that compute the score of each item as a weighted sum of (numeric) attribute values, and then sort items on their score. Each ranking function can be expressed as a vector of weights, or as a point in a multi-dimensional space. For a broad range of fairness criteria, we show how to efficiently identify regions in this space that satisfy these criteria. Using this identification method, our system is able to tell users whether their proposed ranking function satisfies the desired fairness criteria and, if it does not, to suggest the smallest modification that does. We develop user-controllable approximation that and indexing techniques that are applied during preprocessing, and support sub-second response times during the online phase. Our extensive experiments on real datasets demonstrate that our methods are able to find solutions that satisfy fairness criteria effectively and efficiently.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Decision making with limited feedback: Error bounds for predictive policing and recidivism prediction.\n \n \n \n\n\n \n Ensign, D.; Frielder, S. A; Neville, S.; Scheidegger, C.; and Venkatasubramanian, S.\n\n\n \n\n\n\n ,9. .\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{ensign_decision_nodate,\n\ttitle = {Decision making with limited feedback: Error bounds for predictive policing and recidivism prediction},\n\tabstract = {In this paper, we focus on the problems of recidivism prediction and predictive policing. We present the first algorithms with provable regret for these problems, by showing that both problems (and others like these) can be abstracted into a general reinforcement learning framework called partial monitoring. We also discuss the policy implications of these solutions.},\n\tpages = {9},\n\tauthor = {Ensign, Danielle and Frielder, Sorelle A and Neville, Scott and Scheidegger, Carlos and Venkatasubramanian, Suresh},\n\tlangid = {english}\n}\n\n
\n
\n\n\n
\n In this paper, we focus on the problems of recidivism prediction and predictive policing. We present the first algorithms with provable regret for these problems, by showing that both problems (and others like these) can be abstracted into a general reinforcement learning framework called partial monitoring. We also discuss the policy implications of these solutions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Debiasing Desire: Addressing Bias and Discrimination on Intimate Platforms.\n \n \n \n \n\n\n \n Hutson, J.; Taft, J.; Barocas, S.; and Levy, K.\n\n\n \n\n\n\n , 2: 18. .\n \n\n\n\n
\n\n\n\n \n \n \"DebiasingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{hutson_debiasing_2018,\n\ttitle = {Debiasing Desire: Addressing Bias and Discrimination on Intimate Platforms},\n\tvolume = {2},\n\turl = {https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3244459},\n\tdoi = {10.1145/3274342},\n\tabstract = {Designing technical systems to be resistant to bias and discrimination\nrepresents vital new terrain for researchers, policymakers, and the\nanti-discrimination project more broadly. We consider bias and\ndiscrimination in the context of popular online dating and hookup\nplatforms in the United States, which we call "intimate platforms."\nDrawing on work in social-justice-oriented and Queer {HCI}, we review design\nfeatures of popular intimate platforms and their potential role in\nexacerbating or mitigating interpersonal bias. We argue that focusing on\nplatform design can reveal opportunities to reshape troubling patterns of\nintimate contact without overriding users’ decisional autonomy. We\nidentify and address the difficult ethical questions that nevertheless\ncome along with such intervention, while urging the social computing\ncommunity to engage more deeply with issues of bias, discrimination, and\nexclusion in the study and design of intimate platforms.},\n\tpages = {18},\n\tissue = {{CSCW}},\n\tjournaltitle = {Proceedings of the {ACM} on Human-Computer Interaction},\n\tauthor = {Hutson, Jevan and Taft, Jessie and Barocas, Solon and Levy, Karen},\n\turldate = {2018-09-07},\n\tdate = {2018-09-05},\n\tkeywords = {fatrec, platforms, bias, discrimination, ethics, design, law, policy, online dating, intimacy}\n}\n\n
\n
\n\n\n
\n Designing technical systems to be resistant to bias and discrimination represents vital new terrain for researchers, policymakers, and the anti-discrimination project more broadly. We consider bias and discrimination in the context of popular online dating and hookup platforms in the United States, which we call \"intimate platforms.\" Drawing on work in social-justice-oriented and Queer HCI, we review design features of popular intimate platforms and their potential role in exacerbating or mitigating interpersonal bias. We argue that focusing on platform design can reveal opportunities to reshape troubling patterns of intimate contact without overriding users’ decisional autonomy. We identify and address the difficult ethical questions that nevertheless come along with such intervention, while urging the social computing community to engage more deeply with issues of bias, discrimination, and exclusion in the study and design of intimate platforms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Certifying and Removing Disparate Impact.\n \n \n \n \n\n\n \n Feldman, M.; Friedler, S. A; Moeller, J.; Scheidegger, C.; and Venkatasubramanian, S.\n\n\n \n\n\n\n In pages 259–268, . ACM\n \n\n\n\n
\n\n\n\n \n \n \"CertifyingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{feldman_certifying_2015,\n\ttitle = {Certifying and Removing Disparate Impact},\n\turl = {http://dl.acm.org/citation.cfm?doid=2783258.2783311},\n\tdoi = {10.1145/2783258.2783311},\n\teventtitle = {Proceedings of the 21th {ACM} {SIGKDD} International Conference on Knowledge Discovery and Data Mining},\n\tpages = {259--268},\n\tpublisher = {{ACM}},\n\tauthor = {Feldman, Michael and Friedler, Sorelle A and Moeller, John and Scheidegger, Carlos and Venkatasubramanian, Suresh},\n\turldate = {2017-04-04},\n\tdate = {2015-08-10},\n\tkeywords = {fairness, machine learning, {CAREER}, Privacy and Fairness, disparate impact}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Calibrated recommendations.\n \n \n \n \n\n\n \n Steck, H.\n\n\n \n\n\n\n In pages 154–162, . ACM\n \n\n\n\n
\n\n\n\n \n \n \"CalibratedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{steck_calibrated_2018,\n\ttitle = {Calibrated recommendations},\n\turl = {https://dl.acm.org/citation.cfm?doid=3240323.3240372},\n\tdoi = {10.1145/3240323.3240372},\n\teventtitle = {Proceedings of the 12th {ACM} Conference on Recommender Systems},\n\tpages = {154--162},\n\tpublisher = {{ACM}},\n\tauthor = {Steck, Harald},\n\turldate = {2018-11-02},\n\tdate = {2018-09-27},\n\tkeywords = {fairness, recommender systems, diversity, calibration}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Beyond Parity: Fairness Objectives for Collaborative Filtering.\n \n \n \n \n\n\n \n Yao, S.; and Huang, B.\n\n\n \n\n\n\n In Guyon, I; Luxburg, U V; Bengio, S; Wallach, H; Fergus, R; Vishwanathan, S; and Garnett, R, editor(s), Advances in Neural Information Processing Systems 30, pages 2925–2934. Curran Associates, Inc., .\n \n\n\n\n
\n\n\n\n \n \n \"BeyondPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@incollection{yao_beyond_2017,\n\ttitle = {Beyond Parity: Fairness Objectives for Collaborative Filtering},\n\turl = {http://papers.nips.cc/paper/6885-beyond-parity-fairness-objectives-for-collaborative-filtering.pdf},\n\tpages = {2925--2934},\n\tbooktitle = {Advances in Neural Information Processing Systems 30},\n\tpublisher = {Curran Associates, Inc.},\n\tauthor = {Yao, Sirui and Huang, Bert},\n\teditor = {Guyon, I and Luxburg, U V and Bengio, S and Wallach, H and Fergus, R and Vishwanathan, S and Garnett, R},\n\tdate = {2017},\n\tkeywords = {fatrec}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Balanced Neighborhoods for Multi-sided Fairness in Recommendation.\n \n \n \n \n\n\n \n Burke, R.; Sonboli, N.; and Ordonez-Gauger, A.\n\n\n \n\n\n\n , 81: 202–214. .\n \n\n\n\n
\n\n\n\n \n \n \"BalancedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{burke_balanced_2018,\n\ttitle = {Balanced Neighborhoods for Multi-sided Fairness in Recommendation},\n\tvolume = {81},\n\turl = {http://proceedings.mlr.press/v81/burke18a.html},\n\tabstract = {Fairness has emerged as an important category of analysis for machine\nlearning systems in some application areas. In extending the concept of\nfairness to recommender systems, there is an essential tension between the\ngoals of fairness and those of personalization. However, there are\ncontexts in which equity across recommendation outcomes is a desirable\ngoal. It is also the case that in some applications fairness may be a\nmultisided concept, in which the impacts on multiple groups of individuals\nmust be considered. In this paper, we examine two different cases of\nfairness-aware recommender systems: consumer-centered and\nprovider-centered. We explore the concept of a balanced neighborhood as a\nmechanism to preserve personalization in recommendation while enhancing\nthe fairness of recommendation outcomes. We show that a modified version\nof the Sparse Linear Method ({SLIM}) can be used to improve the balance of\nuser and item neighborhoods, with the result of achieving greater outcome\nfairness in real-world datasets with minimal loss in ranking performance.},\n\tpages = {202--214},\n\tauthor = {Burke, Robin and Sonboli, Nasim and Ordonez-Gauger, Aldo},\n\tdate = {2018}\n}\n\n
\n
\n\n\n
\n Fairness has emerged as an important category of analysis for machine learning systems in some application areas. In extending the concept of fairness to recommender systems, there is an essential tension between the goals of fairness and those of personalization. However, there are contexts in which equity across recommendation outcomes is a desirable goal. It is also the case that in some applications fairness may be a multisided concept, in which the impacts on multiple groups of individuals must be considered. In this paper, we examine two different cases of fairness-aware recommender systems: consumer-centered and provider-centered. We explore the concept of a balanced neighborhood as a mechanism to preserve personalization in recommendation while enhancing the fairness of recommendation outcomes. We show that a modified version of the Sparse Linear Method (SLIM) can be used to improve the balance of user and item neighborhoods, with the result of achieving greater outcome fairness in real-world datasets with minimal loss in ranking performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Assessing and Addressing Algorithmic Bias - But Before We Get There...\n \n \n \n \n\n\n \n Springer, A.; Garcia-Gathright, J.; and Cramer, H.\n\n\n \n\n\n\n In AAAA Spring Symposium Series, . \n \n\n\n\n
\n\n\n\n \n \n \"AssessingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{springer_assessing_2018,\n\ttitle = {Assessing and Addressing Algorithmic Bias - But Before We Get There...},\n\turl = {https://www.aaai.org/ocs/index.php/SSS/SSS18/paper/view/17542},\n\tbooktitle = {{AAAA} Spring Symposium Series},\n\tauthor = {Springer, Aaron and Garcia-Gathright, Jean and Cramer, Henriette},\n\turldate = {2019-07-10},\n\tdate = {2018-03-15}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Assessing and Addressing Algorithmic Bias - But Before We Get There.\n \n \n \n \n\n\n \n Garcia-Gathright, J.; Springer, A.; and Cramer, H.\n\n\n \n\n\n\n . .\n \n\n\n\n
\n\n\n\n \n \n \"AssessingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{garcia-gathright_assessing_2018,\n\ttitle = {Assessing and Addressing Algorithmic Bias - But Before We Get There},\n\turl = {http://arxiv.org/abs/1809.03332},\n\tabstract = {Algorithmic and data bias are gaining attention as a pressing issue in popular press - and rightly so. However, beyond these calls to action, standard processes and tools for practitioners do not readily exist to assess and address unfair algorithmic and data biases. The literature is relatively scattered and the needed interdisciplinary approach means that very different communities are working on the topic. We here provide a number of challenges encountered in assessing and addressing algorithmic and data bias in practice. We describe an early approach that attempts to translate the literature into processes for (production) teams wanting to assess both intended data and algorithm characteristics and unintended, unfair biases.},\n\tjournaltitle = {{arXiv}:1809.03332 [cs]},\n\tauthor = {Garcia-Gathright, Jean and Springer, Aaron and Cramer, Henriette},\n\turldate = {2019-05-17},\n\tdate = {2018-09-10},\n\teprinttype = {arxiv},\n\teprint = {1809.03332},\n\tkeywords = {Computer Science - Computers and Society}\n}\n\n
\n
\n\n\n
\n Algorithmic and data bias are gaining attention as a pressing issue in popular press - and rightly so. However, beyond these calls to action, standard processes and tools for practitioners do not readily exist to assess and address unfair algorithmic and data biases. The literature is relatively scattered and the needed interdisciplinary approach means that very different communities are working on the topic. We here provide a number of challenges encountered in assessing and addressing algorithmic and data bias in practice. We describe an early approach that attempts to translate the literature into processes for (production) teams wanting to assess both intended data and algorithm characteristics and unintended, unfair biases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n All The Cool Kids, How Do They Fit In?: Popularity and Demographic Biases in Recommender Evaluation and Effectiveness.\n \n \n \n \n\n\n \n Ekstrand, M. D; Tian, M.; Azpiazu, I. M.; Ekstrand, J. D; Anuyah, O.; McNeill, D.; and Pera, \\. M. S.\n\n\n \n\n\n\n In PMLR, volume 81, pages 172–186, . \n \n\n\n\n
\n\n\n\n \n \n \"AllPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{ekstrand_all_2018,\n\ttitle = {All The Cool Kids, How Do They Fit In?: Popularity and Demographic Biases in Recommender Evaluation and Effectiveness},\n\tvolume = {81},\n\trights = {All rights reserved},\n\turl = {http://proceedings.mlr.press/v81/ekstrand18b.html},\n\teventtitle = {Proceedings of the Conference on Fairness, Accountability, and Transparency},\n\tpages = {172--186},\n\tbooktitle = {{PMLR}},\n\tauthor = {Ekstrand, Michael D and Tian, Mucun and Azpiazu, Ion Madrazo and Ekstrand, Jennifer D and Anuyah, Oghenemaro and {McNeill}, David and Pera, \\{And\\} Maria Soledad},\n\tdate = {2018-02},\n\tkeywords = {My Papers, Privacy and Fairness, Research Using {LensKit}}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Nutritional Label for Rankings.\n \n \n \n \n\n\n \n Yang, K.; Stoyanovich, J.; Asudeh, A.; Howe, B.; Jagadish, H.; and Miklau, G.\n\n\n \n\n\n\n In Proceedings of the 2018 International Conference on Management of Data, of SIGMOD '18, pages 1773–1776, . ACM\n event-place: Houston, TX, USA\n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{yang_nutritional_2018,\n\tlocation = {New York, {NY}, {USA}},\n\ttitle = {A Nutritional Label for Rankings},\n\tisbn = {978-1-4503-4703-7},\n\turl = {http://doi.acm.org/10.1145/3183713.3193568},\n\tdoi = {10.1145/3183713.3193568},\n\tseries = {{SIGMOD} '18},\n\tabstract = {Algorithmic decisions often result in scoring and ranking individuals to determine credit worthiness, qualifications for college admissions and employment, and compatibility as dating partners. While automatic and seemingly objective, ranking algorithms can discriminate against individuals and protected groups, and exhibit low diversity. Furthermore, ranked results are often unstable -- small changes in the input data or in the ranking methodology may lead to drastic changes in the output, making the result uninformative and easy to manipulate. Similar concerns apply in cases where items other than individuals are ranked, including colleges, academic departments, or products. Despite the ubiquity of rankers, there is, to the best of our knowledge, no technical work that focuses on making rankers transparent. In this demonstration we present Ranking Facts, a Web-based application that generates a "nutritional label" for rankings. Ranking Facts is made up of a collection of visual widgets that implement our latest research results on fairness, stability, and transparency for rankings, and that communicate details of the ranking methodology, or of the output, to the end user. We will showcase Ranking Facts on real datasets from different domains, including college rankings, criminal risk assessment, and financial services.},\n\tpages = {1773--1776},\n\tbooktitle = {Proceedings of the 2018 International Conference on Management of Data},\n\tpublisher = {{ACM}},\n\tauthor = {Yang, Ke and Stoyanovich, Julia and Asudeh, Abolfazl and Howe, Bill and Jagadish, {HV} and Miklau, Gerome},\n\turldate = {2019-03-26},\n\tdate = {2018},\n\tnote = {event-place: Houston, {TX}, {USA}},\n\tkeywords = {fairness, ranking, diversity, transparency, accountability, data, data ethics, responsibly, stability}\n}
\n
\n\n\n
\n Algorithmic decisions often result in scoring and ranking individuals to determine credit worthiness, qualifications for college admissions and employment, and compatibility as dating partners. While automatic and seemingly objective, ranking algorithms can discriminate against individuals and protected groups, and exhibit low diversity. Furthermore, ranked results are often unstable – small changes in the input data or in the ranking methodology may lead to drastic changes in the output, making the result uninformative and easy to manipulate. Similar concerns apply in cases where items other than individuals are ranked, including colleges, academic departments, or products. Despite the ubiquity of rankers, there is, to the best of our knowledge, no technical work that focuses on making rankers transparent. In this demonstration we present Ranking Facts, a Web-based application that generates a \"nutritional label\" for rankings. Ranking Facts is made up of a collection of visual widgets that implement our latest research results on fairness, stability, and transparency for rankings, and that communicate details of the ranking methodology, or of the output, to the end user. We will showcase Ranking Facts on real datasets from different domains, including college rankings, criminal risk assessment, and financial services.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);