var bibbase_data = {"data":"\n\n
\n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fusers%2F6655%2Fcollections%2F3TB3KT36%2Fitems%3Fkey%3DVFvZhZXIoHNBbzoLZ1IM2zgf%26format%3Dbibtex%26limit%3D100&jsonp=1&jsonp=1\"></script>\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fusers%2F6655%2Fcollections%2F3TB3KT36%2Fitems%3Fkey%3DVFvZhZXIoHNBbzoLZ1IM2zgf%26format%3Dbibtex%26limit%3D100&jsonp=1\");\n print_r($contents);\n ?>\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fusers%2F6655%2Fcollections%2F3TB3KT36%2Fitems%3Fkey%3DVFvZhZXIoHNBbzoLZ1IM2zgf%26format%3Dbibtex%26limit%3D100&jsonp=1\"></iframe>\n
\n \n For more details see the documention.\n
\nTo the site owner:
\n\nAction required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n
\n \n \n Fix it now\n
\n@inproceedings{raj_towards_2024,\n\tseries = {{LNCS}},\n\ttitle = {Towards optimizing ranking in grid-layout for provider-side fairness},\n\tvolume = {14612},\n\tcopyright = {All rights reserved},\n\turl = {https://md.ekstrandom.net/pubs/ecir-fair-grids},\n\tdoi = {10.1007/978-3-031-56069-9_7},\n\tabstract = {Information access systems, such as search engines and recommender systems, order and position results based on their estimated relevance. These results are then evaluated for a range of concerns, including provider-side fairness: whether exposure to users is fairly distributed among items and the people who created them. Several fairness-aware ranking and re-ranking techniques have been proposed to ensure fair exposure for providers, but this work focuses almost exclusively on linear layouts in which items are displayed in single ranked list. Many widely-used systems use other layouts, such as the grid views common in streaming platforms, image search, and other applications. Providing fair exposure to providers in such layouts is not well-studied. We seek to fill this gap by providing a grid-aware re-ranking algorithm to optimize layouts for provider-side fairness by adapting existing re-ranking techniques to grid-aware browsing models, and an analysis of the effect of grid-specific factors such as device size on the resulting fairness optimization.},\n\tlanguage = {en},\n\turldate = {2024-01-04},\n\tbooktitle = {Advances in {Information} {Retrieval}},\n\tpublisher = {Springer},\n\tauthor = {Raj, Amifa and Ekstrand, Michael D.},\n\tmonth = mar,\n\tyear = {2024},\n\tpages = {90--105},\n}\n\n\n
@inproceedings{ihemelandu_multiple_2024,\n\tseries = {{LNCS}},\n\ttitle = {Multiple testing for {IR} and recommendation system experiments},\n\tvolume = {14610},\n\turl = {https://md.ekstrandom.net/pubs/ecir-mcp},\n\tdoi = {10.1007/978-3-031-56063-7_37},\n\tabstract = {While there has been significant research on statistical techniques for comparing two information retrieval (IR) systems, many IR experiments test more than two systems. This can lead to inflated false discoveries due to the multiple-comparison problem (MCP). A few IR studies have investigated multiple comparison procedures; these studies mostly use TREC data and control the familywise error rate. In this study, we extend their investigation to include recommendation system evaluation data as well as multiple comparison procedures that controls for False Discovery Rate (FDR).},\n\tlanguage = {en},\n\turldate = {2024-01-04},\n\tbooktitle = {Proceedings of the 46th {European} {Conference} on {Information} {Retrieval}},\n\tpublisher = {Springer},\n\tauthor = {Ihemelandu, Ngozi and Ekstrand, Michael D.},\n\tmonth = mar,\n\tyear = {2024},\n\tpages = {449--457},\n}\n\n\n
@inproceedings{wegmeth_revealing_2024,\n\ttitle = {Revealing the {Hidden} {Impact} of {Top}-{N} {Metrics} on {Optimization} in {Recommender} {Systems}},\n\tisbn = {978-3-031-56027-9},\n\tdoi = {10.1007/978-3-031-56027-9_9},\n\tabstract = {The hyperparameters of recommender systems for top-n predictions are typically optimized to enhance the predictive performance of algorithms. Thereby, the optimization algorithm, e.g., grid search or random search, searches for the best hyperparameter configuration according to an optimization-target metric, like nDCG or Precision. In contrast, the optimized algorithm, e.g., Alternating Least Squares Matrix Factorization or Bayesian Personalized Ranking, internally optimizes a different loss function during training, like squared error or cross-entropy. To tackle this discrepancy, recent work focused on generating loss functions better suited for recommender systems. Yet, when evaluating an algorithm using a top-n metric during optimization, another discrepancy between the optimization-target metric and the training loss has so far been ignored. During optimization, the top-n items are selected for computing a top-n metric; ignoring that the top-n items are selected from the recommendations of a model trained with an entirely different loss function. Item recommendations suitable for optimization-target metrics could be outside the top-n recommended items; hiddenly impacting the optimization performance. Therefore, we were motivated to analyze whether the top-n items are optimal for optimization-target top-n metrics. In pursuit of an answer, we exhaustively evaluate the predictive performance of 250 selection strategies besides selecting the top-n. We extensively evaluate each selection strategy over twelve implicit feedback and eight explicit feedback data sets with eleven recommender systems algorithms. Our results show that there exist selection strategies other than top-n that increase predictive performance for various algorithms and recommendation domains. However, the performance of the top \\$\\${\\textbackslash}sim 43{\\textbackslash}\\%\\$\\$∼43\\%of selection strategies is not significantly different. We discuss the impact of our findings on optimization and re-ranking in recommender systems and feasible solutions. The implementation of our study is publicly available.},\n\tlanguage = {en},\n\tbooktitle = {Advances in {Information} {Retrieval}},\n\tpublisher = {Springer Nature Switzerland},\n\tauthor = {Wegmeth, Lukas and Vente, Tobias and Purucker, Lennart},\n\teditor = {Goharian, Nazli and Tonellotto, Nicola and He, Yulan and Lipani, Aldo and McDonald, Graham and Macdonald, Craig and Ounis, Iadh},\n\tyear = {2024},\n\tpages = {140--156},\n}\n\n\n
@inproceedings{pathak_empirical_2024,\n\tseries = {{LNCS}},\n\ttitle = {An {Empirical} {Analysis} of {Intervention} {Strategies}’ {Effectiveness} for {Countering} {Misinformation} {Amplification} by {Recommendation} {Algorithms}},\n\tvolume = {14611},\n\tisbn = {978-3-031-56066-8},\n\tdoi = {10.1007/978-3-031-56066-8_23},\n\tabstract = {Social network platforms connect people worldwide, facilitating communication, information sharing, and personal/professional networking. They use recommendation algorithms to personalize content and enhance user experiences. However, these algorithms can unintentionally amplify misinformation by prioritizing engagement over accuracy. For instance, recent works suggest that popularity-based and network-based recommendation algorithms contribute the most to misinformation diffusion. In our study, we present an exploration on two Twitter datasets to understand the impact of intervention techniques on combating misinformation amplification initiated by recommendation algorithms. We simulate various scenarios and evaluate the effectiveness of intervention strategies in social sciences such as Virality Circuit Breakers and accuracy nudges. Our findings highlight that these intervention strategies are generally successful when applied on top of collaborative filtering and content-based recommendation algorithms, while having different levels of effectiveness depending on the number of users keen to spread fake news present in the dataset.},\n\tlanguage = {en},\n\tbooktitle = {Advances in {Information} {Retrieval}},\n\tpublisher = {Springer Nature Switzerland},\n\tauthor = {Pathak, Royal and Spezzano, Francesca},\n\teditor = {Goharian, Nazli and Tonellotto, Nicola and He, Yulan and Lipani, Aldo and McDonald, Graham and Macdonald, Craig and Ounis, Iadh},\n\tyear = {2024},\n\tpages = {285--301},\n}\n\n\n
@article{lopes_recommendations_2024,\n\ttitle = {Recommendations with minimum exposure guarantees: a post-processing framework},\n\tvolume = {236},\n\tissn = {0957-4174},\n\tshorttitle = {Recommendations with minimum exposure guarantees},\n\turl = {https://www.sciencedirect.com/science/article/pii/S0957417423016664},\n\tdoi = {10.1016/j.eswa.2023.121164},\n\tabstract = {Relevance-based ranking is a popular ingredient in recommenders, but it frequently struggles to meet fairness criteria because social and cultural norms may favor some item groups over others. For instance, some items might receive lower ratings due to some sort of bias (e.g. gender bias). A fair ranking should balance the exposure of items from advantaged and disadvantaged groups. To this end, we propose a novel post-processing framework to produce fair, exposure-aware recommendations. Our approach is based on an integer linear programming model maximizing the expected utility while satisfying a minimum exposure constraint. The model has fewer variables than previous work and thus can be deployed to larger datasets and allows the organization to define a minimum level of exposure for groups of items. We conduct an extensive empirical evaluation indicating that our new framework can increase the exposure of items from disadvantaged groups at a small cost of recommendation accuracy.},\n\turldate = {2023-09-19},\n\tjournal = {Expert Systems with Applications},\n\tauthor = {Lopes, Ramon and Alves, Rodrigo and Ledent, Antoine and Santos, Rodrygo L. T. and Kloft, Marius},\n\tmonth = feb,\n\tyear = {2024},\n\tkeywords = {Exposure, Fairness, Integer linear programming, Recommender systems, to-read},\n\tpages = {121164},\n}\n\n\n
@inproceedings{ihemelandu_candidate_2023,\n\ttitle = {Candidate set sampling for evaluating top-{N} recommendation},\n\turl = {http://arxiv.org/abs/2309.11723},\n\tdoi = {10.1109/WI-IAT59888.2023.00018},\n\tabstract = {The strategy for selecting candidate sets -- the set of items that the recommendation system is expected to rank for each user -- is an important decision in carrying out an offline top-\\$N\\$ recommender system evaluation. The set of candidates is composed of the union of the user's test items and an arbitrary number of non-relevant items that we refer to as decoys. Previous studies have aimed to understand the effect of different candidate set sizes and selection strategies on evaluation. In this paper, we extend this knowledge by studying the specific interaction of candidate set selection strategies with popularity bias, and use simulation to assess whether sampled candidate sets result in metric estimates that are less biased with respect to the true metric values under complete data that is typically unavailable in ordinary experiments.},\n\turldate = {2023-11-08},\n\tbooktitle = {Proceedings of the 22nd {IEEE}/{WIC} international conference on web intelligence and intelligent agent technology},\n\tauthor = {Ihemelandu, Ngozi and Ekstrand, Michael D.},\n\tmonth = oct,\n\tyear = {2023},\n\tnote = {arXiv:2309.11723 [cs]},\n\tkeywords = {Computer Science - Information Retrieval},\n\tpages = {88--94},\n}\n\n\n
@article{wang_modeling_2023,\n\ttitle = {Modeling uncertainty to improve personalized recommendations via {Bayesian} deep learning},\n\tvolume = {16},\n\tissn = {2364-4168},\n\turl = {https://doi.org/10.1007/s41060-020-00241-1},\n\tdoi = {10.1007/s41060-020-00241-1},\n\tabstract = {Modeling uncertainty has been a major challenge in developing Machine Learning solutions to solve real world problems in various domains. In Recommender Systems, a typical usage of uncertainty is to balance exploration and exploitation, where the uncertainty helps to guide the selection of new options in exploration. Recent advances in combining Bayesian methods with deep learning enable us to express uncertain status in deep learning models. In this paper, we investigate an approach based on Bayesian deep learning to improve personalized recommendations. We first build deep learning architectures to learn useful representation of user and item inputs for predicting their interactions. We then explore multiple embedding components to accommodate different types of user and item inputs. Based on Bayesian deep learning techniques, a key novelty of our approach is to capture the uncertainty associated with the model output and further utilize it to boost exploration in the context of Recommender Systems. We test the proposed approach in both a Collaborative Filtering and a simulated online recommendation setting. Experimental results on publicly available benchmarks demonstrate the benefits of our approach in improving the recommendation performance.},\n\tlanguage = {en},\n\tnumber = {2},\n\turldate = {2024-03-17},\n\tjournal = {International Journal of Data Science and Analytics},\n\tauthor = {Wang, Xin and Kadıoğlu, Serdar},\n\tmonth = aug,\n\tyear = {2023},\n\tpages = {191--201},\n}\n\n\n
@inproceedings{wegmeth_effect_2023,\n\ttitle = {The effect of random seeds for data splitting on recommendation accuracy},\n\tabstract = {The evaluation of recommender system algorithms depends on randomness, e.g., during randomly splitting data into training and testing data. We suspect that failing to account for randomness in this scenario may lead to misrepresenting the predictive accuracy of recommendation algorithms. To understand the community’s view of the importance of randomness, we conducted a paper study on 39 full papers published at the ACM RecSys 2022 conference. We found that the authors of 26 papers used some variation of a holdout split that requires a random seed. However, only five papers explicitly repeated experiments and averaged their results over different random seeds. This potentially problematic research practice motivated us to analyze the effect of data split random seeds on recommendation accuracy. Therefore, we train three common algorithms on nine public data sets with 20 data split random seeds, evaluate them on two ranking metrics with three different ranking cutoff values 𝑘, and compare the results. In the extreme case with 𝑘 = 1, we show that depending on the data split random seed, the accuracy with traditional recommendation algorithms deviates by up to ∼6.3\\% from the mean accuracy achieved on the data set. Hence, we show that an algorithm may significantly over- or under-perform when maliciously or negligently selecting a random seed for splitting the data. To showcase a mitigation strategy and better research practice, we compare holdout to cross-validation and show that, again, for 𝑘 = 1, the accuracy of algorithms evaluated with cross-validation deviates only up to ∼2.3\\% from the mean accuracy achieved on the data set. Furthermore, we found that the deviation becomes smaller the higher the value of 𝑘 for both holdout and cross-validation.},\n\tlanguage = {en},\n\tbooktitle = {Perspectives on the {Evaluation} of {Recommender} {Systems} {Workshop} ({PERSPECTIVES} 2023)},\n\tauthor = {Wegmeth, Lukas and Vente, Tobias and Purucker, Lennart and Beel, Joeran},\n\tmonth = sep,\n\tyear = {2023},\n\tkeywords = {to-read},\n}\n\n\n
@inproceedings{vente_introducing_2023,\n\taddress = {New York, NY, USA},\n\tseries = {{RecSys} '23},\n\ttitle = {Introducing {LensKit}-{Auto}, an experimental automated recommender system ({AutoRecSys}) toolkit},\n\tisbn = {9798400702419},\n\turl = {https://dl.acm.org/doi/10.1145/3604915.3610656},\n\tdoi = {10.1145/3604915.3610656},\n\tabstract = {LensKit is one of the first and most popular Recommender System libraries. While LensKit offers a wide variety of features, it does not include any optimization strategies or guidelines on how to select and tune LensKit algorithms. LensKit developers have to manually include third-party libraries into their experimental setup or implement optimization strategies by hand to optimize hyperparameters. We found that 63.6\\% (21 out of 33) of papers using LensKit algorithms for their experiments did not select algorithms or tune hyperparameters. Non-optimized models represent poor baselines and produce less meaningful research results. This demo introduces LensKit-Auto. LensKit-Auto automates the entire Recommender System pipeline and enables LensKit developers to automatically select, optimize, and ensemble LensKit algorithms.},\n\turldate = {2023-09-18},\n\tbooktitle = {Proceedings of the 17th {ACM} {Conference} on {Recommender} {Systems}},\n\tpublisher = {Association for Computing Machinery},\n\tauthor = {Vente, Tobias and Ekstrand, Michael and Beel, Joeran},\n\tmonth = sep,\n\tyear = {2023},\n\tkeywords = {Algorithm Selection, AutoRecSys, Automated Recommender Systems, CASH, Hyperparameter Optimization, Recommender Systems},\n\tpages = {1212--1216},\n}\n\n\n
@misc{li_mitigating_2023,\n\ttitle = {Mitigating mainstream bias in recommendation via cost-sensitive learning},\n\turl = {http://arxiv.org/abs/2307.13632},\n\tdoi = {10.1145/3578337.3605134},\n\tabstract = {Mainstream bias, where some users receive poor recommendations because their preferences are uncommon or simply because they are less active, is an important aspect to consider regarding fairness in recommender systems. Existing methods to mitigate mainstream bias do not explicitly model the importance of these non-mainstream users or, when they do, it is in a way that is not necessarily compatible with the data and recommendation model at hand. In contrast, we use the recommendation utility as a more generic and implicit proxy to quantify mainstreamness, and propose a simple user-weighting approach to incorporate it into the training process while taking the cost of potential recommendation errors into account. We provide extensive experimental results showing that quantifying mainstreamness via utility is better able at identifying non-mainstream users, and that they are indeed better served when training the model in a cost-sensitive way. This is achieved with negligible or no loss in overall recommendation accuracy, meaning that the models learn a better balance across users. In addition, we show that research of this kind, which evaluates recommendation quality at the individual user level, may not be reliable if not using enough interactions when assessing model performance.},\n\turldate = {2023-07-29},\n\tauthor = {Li, Roger Zhe and Urbano, Julián and Hanjalic, Alan},\n\tmonth = jul,\n\tyear = {2023},\n\tnote = {arXiv:2307.13632 [cs]},\n\tkeywords = {Computer Science - Information Retrieval},\n}\n\n\n
@inproceedings{ihemelandu_inference_2023,\n\taddress = {New York, NY, USA},\n\tseries = {{SIGIR} '23},\n\ttitle = {Inference at scale: significance testing for large search and recommendation experiments},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-4503-9408-6},\n\tshorttitle = {Inference at scale},\n\turl = {https://dl.acm.org/doi/10.1145/3539618.3592004},\n\tdoi = {10.1145/3539618.3592004},\n\tabstract = {A number of information retrieval studies have been done to assess which statistical techniques are appropriate for comparing systems. However, these studies are focused on TREC-style experiments, which typically have fewer than 100 topics. There is no similar line of work for large search and recommendation experiments; such studies typically have thousands of topics or users and much sparser relevance judgements, so it is not clear if recommendations for analyzing traditional TREC experiments apply to these settings. In this paper, we empirically study the behavior of significance tests with large search and recommendation evaluation data. Our results show that the Wilcoxon and Sign tests show significantly higher Type-1 error rates for large sample sizes than the bootstrap, randomization and t-tests, which were more consistent with the expected error rate. While the statistical tests displayed differences in their power for smaller sample sizes, they showed no difference in their power for large sample sizes. We recommend the sign and Wilcoxon tests should not be used to analyze large scale evaluation results. Our result demonstrate that with Top-N recommendation and large search evaluation data, most tests would have a 100\\% chance of finding statistically significant results. Therefore, the effect size should be used to determine practical or scientific significance.},\n\turldate = {2023-07-23},\n\tbooktitle = {Proceedings of the 46th {International} {ACM} {SIGIR} {Conference} on {Research} and {Development} in {Information} {Retrieval}},\n\tpublisher = {Association for Computing Machinery},\n\tauthor = {Ihemelandu, Ngozi and Ekstrand, Michael D.},\n\tmonth = jul,\n\tyear = {2023},\n\tkeywords = {evaluation, statistical inference},\n\tpages = {2087--2091},\n}\n\n\n
@article{ekstrand_distributionally-informed_2023,\n\ttitle = {Distributionally-informed recommender system evaluation},\n\tcopyright = {All rights reserved},\n\turl = {https://dl.acm.org/doi/10.1145/3613455},\n\tdoi = {10.1145/3613455},\n\tabstract = {Current practice for evaluating recommender systems typically focuses on point estimates of user-oriented effectiveness metrics or business metrics, sometimes combined with additional metrics for considerations such as diversity and novelty. In this paper, we argue for the need for researchers and practitioners to attend more closely to various distributions that arise from a recommender system (or other information access system) and the sources of uncertainty that lead to these distributions. One immediate implication of our argument is that both researchers and practitioners must report and examine more thoroughly the distribution of utility between and within different stakeholder groups. However, distributions of various forms arise in many more aspects of the recommender systems experimental process, and distributional thinking has substantial ramifications for how we design, evaluate, and present recommender systems evaluation and research results. Leveraging and emphasizing distributions in the evaluation of recommender systems is a necessary step to ensure that the systems provide appropriate and equitably-distributed benefit to the people they affect.},\n\turldate = {2023-09-07},\n\tjournal = {ACM Transactions on Recommender Systems},\n\tauthor = {Ekstrand, Michael D. and Carterette, Ben and Diaz, Fernando},\n\tmonth = aug,\n\tyear = {2023},\n\tnote = {Just Accepted},\n\tkeywords = {distributions, evaluation, exposure, statistics},\n}\n\n\n
@article{michiels_framework_2023,\n\ttitle = {A {Framework} and {Toolkit} for {Testing} the {Correctness} of {Recommendation} {Algorithms}},\n\turl = {https://doi.org/10.1145/3591109},\n\tdoi = {10.1145/3591109},\n\tabstract = {Evaluating recommender systems adequately and thoroughly is an important\ntask. Significant efforts are dedicated to proposing metrics, methods and\nprotocols for doing so. However, there has been little discussion in the\nrecommender systems’ literature on the topic of testing. In this work, we\nadopt and adapt concepts from the software testing domain, e.g., code\ncoverage, metamorphic testing, or property-based testing, to help\nresearchers to detect and correct faults in recommendation algorithms. We\npropose a test suite that can be used to validate the correctness of a\nrecommendation algorithm, and thus identify and correct issues that can\naffect the performance and behavior of these algorithms. Our test suite\ncontains both black box and white box tests at every level of abstraction,\ni.e., system, integration and unit. To facilitate adoption, we release\nRecPack Tests, an open-source Python package containing template test\nimplementations. We use it to test four popular Python packages for\nrecommender systems: RecPack, PyLensKit, Surprise and Cornac. Despite the\nhigh test coverage of each of these packages, we find that we are still\nable to uncover undocumented functional requirements and even some bugs.\nThis validates our thesis that testing the correctness of recommendation\nalgorithms can complement traditional methods for evaluating\nrecommendation algorithms.},\n\tjournal = {ACM Trans. Recomm. Syst.},\n\tauthor = {Michiels, Lien and Verachtert, Robin and Ferraro, Andres and Falk, Kim and Goethals, Bart},\n\tmonth = apr,\n\tyear = {2023},\n\tnote = {Place: New York, NY, USA\nPublisher: Association for Computing Machinery},\n}\n\n\n
@inproceedings{dokoupil_easystudy_2023,\n\taddress = {New York, NY, USA},\n\tseries = {{RecSys} '23},\n\ttitle = {{EasyStudy}: {Framework} for {Easy} {Deployment} of {User} {Studies} on {Recommender} {Systems}},\n\tisbn = {9798400702419},\n\tshorttitle = {{EasyStudy}},\n\turl = {https://dl.acm.org/doi/10.1145/3604915.3610640},\n\tdoi = {10.1145/3604915.3610640},\n\tabstract = {Improvements in the recommender systems (RS) domain are not possible without a thorough way to evaluate and compare newly proposed approaches. User studies represent a viable alternative to online and offline evaluation schemes, but despite their numerous benefits, they are only rarely used. One of the main reasons behind this fact is that preparing a user study from scratch involves a lot of extra work on top of a simple algorithm proposal. To simplify this task, we propose EasyStudy, a modular framework built on the credo “Make simple things fast and hard things possible”. It features ready-to-use datasets, preference elicitation methods, incrementally tuned baseline algorithms, study flow plugins, and evaluation metrics. As a result, a simple study comparing several RS can be deployed with just a few clicks, while more complex study designs can still benefit from a range of reusable components, such as preference elicitation. Overall, EasyStudy dramatically decreases the gap between the laboriousness of offline evaluation vs. user studies and, therefore, may contribute towards the more reliable and insightful user-centric evaluation of next-generation RS. The project repository is available from https://bit.ly/easy-study-repo.},\n\turldate = {2023-09-18},\n\tbooktitle = {Proceedings of the 17th {ACM} {Conference} on {Recommender} {Systems}},\n\tpublisher = {Association for Computing Machinery},\n\tauthor = {Dokoupil, Patrik and Peska, Ladislav},\n\tmonth = sep,\n\tyear = {2023},\n\tkeywords = {Recommender systems, evaluation frameworks, user centric, user studies},\n\tpages = {1196--1199},\n}\n\n\n
@article{pathak_understanding_2023,\n\ttitle = {Understanding the {Contribution} of {Recommendation} {Algorithms} on {Misinformation} {Recommendation} and {Misinformation} {Dissemination} on {Social} {Networks}},\n\tissn = {1559-1131},\n\turl = {https://dl.acm.org/doi/10.1145/3616088},\n\tdoi = {10.1145/3616088},\n\tabstract = {Social networks are a platform for individuals and organizations to connect with each other and inform, advertise, spread ideas, and ultimately influence opinions. These platforms have been known to propel misinformation. We argue that this could be compounded by the recommender algorithms that these platforms use to suggest items potentially of interest to their users, given the known biases and filter bubbles issues affecting recommender systems. While much has been studied about misinformation on social networks, the potential exacerbation that could result from recommender algorithms in this environment is in its infancy. In this manuscript, we present the result of an in-depth analysis conducted on two datasets (Politifact FakeNewsNet dataset and HealthStory FakeHealth dataset) in order to deepen our understanding of the interconnection between recommender algorithms and misinformation spread on Twitter. In particular, we explore the degree to which well-known recommendation algorithms are prone to be impacted by misinformation. Via simulation, we also study misinformation diffusion on social networks, as triggered by suggestions produced by these recommendation algorithms. Outcomes from this work evidence that misinformation does not equally affect all recommendation algorithms. Popularity-based and network-based recommender algorithms contribute the most to misinformation diffusion. Users who are known to be superspreaders are known to directly impact algorithmic performance and misinformation spread in specific scenarios. Findings emerging from our exploration result in a number of implications for researchers and practitioners to consider when designing and deploying recommender algorithms in social networks.},\n\turldate = {2023-09-19},\n\tjournal = {ACM Transactions on the Web},\n\tauthor = {Pathak, Royal and Spezzano, Francesca and Pera, Maria Soledad},\n\tmonth = aug,\n\tyear = {2023},\n\tnote = {Just Accepted},\n\tkeywords = {Twitter, diffusion, misinformation, news, recommendation algorithms, social networks},\n}\n\n\n
@article{godinot_measuring_2023,\n\ttitle = {Measuring the effect of collaborative filtering on the diversity of users’ attention},\n\tvolume = {8},\n\tcopyright = {2023 The Author(s)},\n\tissn = {2364-8228},\n\turl = {https://appliednetsci.springeropen.com/articles/10.1007/s41109-022-00530-7},\n\tdoi = {10.1007/s41109-022-00530-7},\n\tabstract = {While the ever-increasing emergence of online services has led to a growing interest in the development of recommender systems, the algorithms underpinning such systems have begun to be criticized for their role in limiting the variety of content exposed to users. In this context, the notion of diversity has been proposed as a way of mitigating the side effects resulting from the specialization of recommender systems. In this paper, using a well-known recommender system that makes use of collaborative filtering in the context of musical content, we analyze the diversity of recommendations generated through the lens of the recently proposed information network diversity measure. The results of our study offer significant insights into the effect of algorithmic recommendations. On the one hand, we show that the musical selections of a large proportion of users are diversified as a result of the recommendations. On the other hand, however, such improvements do not benefit all users. They are in fact mainly restricted to users with a low level of activity or whose past musical listening selections are very narrow. Through more in-depth investigations, we also discovered that while recommendations generally increase the variety of the songs recommended to users, they nonetheless fail to provide a balanced exposure to the different related categories.},\n\tlanguage = {en},\n\tnumber = {1},\n\turldate = {2023-09-19},\n\tjournal = {Applied Network Science},\n\tauthor = {Godinot, Augustin and Tarissan, Fabien},\n\tmonth = dec,\n\tyear = {2023},\n\tnote = {Number: 1\nPublisher: SpringerOpen},\n\tpages = {1--18},\n}\n\n\n
@inproceedings{dokoupil_effect_2023,\n\taddress = {New York, NY, USA},\n\tseries = {{UMAP} '23 {Adjunct}},\n\ttitle = {The {Effect} of {Similarity} {Metric} and {Group} {Size} on {Outlier} {Selection} \\& {Satisfaction} in {Group} {Recommender} {Systems}},\n\tisbn = {978-1-4503-9891-6},\n\turl = {https://dl.acm.org/doi/10.1145/3563359.3597386},\n\tdoi = {10.1145/3563359.3597386},\n\tabstract = {Group recommender systems (GRS) are a specific case of recommender systems (RS), where recommendations are constructed to a group of users rather than an individual. GRS has diverse application areas including trip planning, recommending movies to watch together, or music in shared environments. However, due to the lack of large datasets with group decision-making feedback information, or even the group definitions, GRS approaches are often evaluated offline w.r.t. individual user feedback and artificially generated groups. These synthetic groups are usually constructed w.r.t. pre-defined group size and inter-user similarity metric. While numerous variants of synthetic group generation procedures were utilized so far, its impact on the evaluation results was not sufficiently discussed. In this paper, we address this research gap by investigating the impact of various synthetic group generation procedures, namely the usage of different user similarity metrics and the effect of group sizes. We consider them in the context of “outlier vs. majority” groups, where a group of similar users is extended with one or more diverse ones. Experimental results indicate a strong impact of the selected similarity metric on both the typical characteristics of selected outliers as well as the performance of individual GRS algorithms. Moreover, we show that certain algorithms better adapt to larger groups than others.},\n\turldate = {2023-06-25},\n\tbooktitle = {Adjunct {Proceedings} of the 31st {ACM} {Conference} on {User} {Modeling}, {Adaptation} and {Personalization}},\n\tpublisher = {Association for Computing Machinery},\n\tauthor = {Dokoupil, Patrik and Peska, Ladislav},\n\tmonth = jun,\n\tyear = {2023},\n\tkeywords = {Group Recommender systems, Synthetic groups construction, User similarity metrics},\n\tpages = {296--301},\n}\n\n\n
@inproceedings{bauer_fairreckit_2023,\n\taddress = {New York, NY, USA},\n\ttitle = {{FairRecKit}: {A} {Web}-based {Analysis} {Software} for {Recommender} {Evaluations}},\n\turl = {https://doi.org/10.1145/3576840.3578274},\n\tdoi = {10.1145/3576840.3578274},\n\tabstract = {FairRecKit is a web-based analysis software that supports researchers in\nperforming, analyzing, and understanding recommendation computations. The\nidea behind FairRecKit is to facilitate the in-depth analysis of\nrecommendation outcomes considering fairness aspects. With (nested)\nfilters on user or item attributes, metrics can easily be compared across\nuser and item subgroups. Further, (nested) filters can be used on the\ndataset level; this way, recommendation outcomes can be compared across\nseveral sub-datasets to analyze for differences considering fairness\naspects. The software currently features five datasets, 11 metrics, and 21\nrecommendation algorithms to be used in computational experimentation. It\nis open source and developed in a modular manner to facilitate extension.\nThe analysis software consists of two components: A software package\n(FairRecKitLib) for running recommendation algorithms on the available\ndatasets and a web-based user interface (FairRecKitApp) to start\nexperiments, retrieve results of previous experiments, and analyze\ndetails. The application also comes with extensive documentation and\noptions for result customization, which makes for a flexible tool that\nsupports in-depth analysis.},\n\turldate = {2023-03-28},\n\tbooktitle = {{CHIIR} '23},\n\tpublisher = {Association for Computing Machinery},\n\tauthor = {Bauer, Christine and Chung, Lennard and Cornelissen, Aleksej and van Driessel, Isabelle and van der Hoorn, Diede and de Jong, Yme and Le, Lan and Najiyan Tabriz, Sanaz and Spaans, Roderick and Thijsen, Casper and Verbeeten, Robert and Wesseling, Vos and Wieland, Fern},\n\tmonth = mar,\n\tyear = {2023},\n\tnote = {Journal Abbreviation: CHIIR '23},\n\tkeywords = {evaluation, music, web-based, recommender systems, toolkit, analysis, movies, resource},\n\tpages = {438--443},\n}\n\n\n
@article{balchanowski_comparative_2023,\n\ttitle = {A {Comparative} {Study} of {Rank} {Aggregation} {Methods} in {Recommendation} {Systems}},\n\tvolume = {25},\n\tissn = {1099-4300},\n\turl = {https://www.mdpi.com/1099-4300/25/1/132},\n\tdoi = {10.3390/e25010132},\n\tabstract = {The aim of a recommender system is to suggest to the user certain products\nor services that most likely will interest them. Within the context of\npersonalized recommender systems, a number of algorithms have been\nsuggested to generate a ranking of items tailored to individual user\npreferences. However, these algorithms do not generate identical\nrecommendations, and for this reason it has been suggested in the\nliterature that the results of these algorithms can be combined using\naggregation techniques, hoping that this will translate into an\nimprovement in the quality of the final recommendation. In order to see\nwhich of these techniques increase the quality of recommendations to the\ngreatest extent, the authors of this publication conducted experiments in\nwhich they considered five recommendation algorithms and 20 aggregation\nmethods. The research was carried out on the popular and publicly\navailable MovieLens 100k and MovieLens 1M datasets, and the results were\nconfirmed by statistical tests.},\n\tnumber = {1},\n\turldate = {2023-01-12},\n\tjournal = {Entropy},\n\tauthor = {Bałchanowski, Michał and Boryczka, Urszula},\n\tmonth = jan,\n\tyear = {2023},\n\tnote = {Publisher: Multidisciplinary Digital Publishing Institute},\n\tpages = {132},\n}\n\n\n
@inproceedings{raj_measuring_2022,\n\ttitle = {Measuring fairness in ranked results: an analytical and empirical comparison},\n\turl = {https://md.ekstrandom.net/pubs/fair-ranking},\n\tdoi = {10.1145/3477495.3532018},\n\tabstract = {Information access systems, such as search and recommender systems, often use ranked lists to present results believed to be relevant to the user's information need. Evaluating these lists for their fairness along with other traditional metrics provides a more complete understanding of an information access system's behavior beyond accuracy or utility constructs. To measure the (un)fairness of rankings, particularly with respect to the protected group(s) of producers or providers, several metrics have been proposed in the last several years. However, an empirical and comparative analyses of these metrics showing the applicability to specific scenario or real data, conceptual similarities, and differences is still lacking.\n\nWe aim to bridge the gap between theoretical and practical ap-plication of these metrics. In this paper we describe several fair ranking metrics from the existing literature in a common notation, enabling direct comparison of their approaches and assumptions, and empirically compare them on the same experimental setup and data sets in the context of three information access tasks. We also provide a sensitivity analysis to assess the impact of the design choices and parameter settings that go in to these metrics and point to additional work needed to improve fairness measurement.},\n\tbooktitle = {Proceedings of the 45th {International} {ACM} {SIGIR} {Conference} on {Research} and {Development} in {Information} {Retrieval}},\n\tpublisher = {ACM},\n\tauthor = {Raj, Amifa and Ekstrand, Michael D},\n\tmonth = jul,\n\tyear = {2022},\n\tpages = {726--736},\n}\n\n\n
@unpublished{bhattacharya_what_2022,\n\ttitle = {What {You} {Like}: {Generating} {Explainable} {Topical} {Recommendations} for {Twitter} {Using} {Social} {Annotations}},\n\turl = {http://arxiv.org/abs/2212.13897},\n\tabstract = {With over 500 million tweets posted per day, in Twitter, it is difficult\nfor Twitter users to discover interesting content from the deluge of\nuninteresting posts. In this work, we present a novel, explainable,\ntopical recommendation system, that utilizes social annotations, to help\nTwitter users discover tweets, on topics of their interest. A major\nchallenge in using traditional rating dependent recommendation systems,\nlike collaborative filtering and content based systems, in high volume\nsocial networks is that, due to attention scarcity most items do not get\nany ratings. Additionally, the fact that most Twitter users are passive\nconsumers, with 44\\% users never tweeting, makes it very difficult to use\nuser ratings for generating recommendations. Further, a key challenge in\ndeveloping recommendation systems is that in many cases users reject\nrelevant recommendations if they are totally unfamiliar with the\nrecommended item. Providing a suitable explanation, for why the item is\nrecommended, significantly improves the acceptability of recommendation.\nBy virtue of being a topical recommendation system our method is able to\npresent simple topical explanations for the generated recommendations.\nComparisons with state-of-the-art matrix factorization based collaborative\nfiltering, content based and social recommendations demonstrate the\nefficacy of the proposed approach.},\n\tauthor = {Bhattacharya, Parantapa and Ghosh, Saptarshi and Zafar, Muhammad Bilal and Ghosh, Soumya K and Ganguly, Niloy},\n\tmonth = dec,\n\tyear = {2022},\n\tnote = {ISBN: 2212.13897\nPublication Title: arXiv [cs.IR]},\n}\n\n\n
@unpublished{lopes_recommendations_2022,\n\ttitle = {Recommendations with {Minimum} {Exposure} {Guarantees}: {A} {Post}-{Processing} {Framework}},\n\turl = {https://papers.ssrn.com/abstract=4274780},\n\tabstract = {Relevance-based ranking is a popular ingredient in recommenders, but it\nfrequently struggles to meet fairness criteria because social and cultural\nnorms may favor some item groups over others. For instance, some items\nmight receive lower ratings due to some sort of bias (e.g. gender bias). A\nfair ranking should balance the exposure of items from advantaged and\ndisadvantagedgroups. To this end, we propose a novel post-processing\nframework to produce fair, exposure-aware recommendations. Our approach is\nbased on an integer linear programming model maximizing the expected\nutility while satisfying a minimum exposure constraint. The model has\nfewer variables than previous work and thus can be deployed to larger\ndatasets and allows the organization to define a minimum level of exposure\nfor groups of items. We conduct an extensive empirical evaluation\nindicating that our new framework can increase the exposure of items from\ndisadvantaged groups at a small cost of recommendation accuracy.},\n\turldate = {2022-12-23},\n\tauthor = {Lopes, Ramon and Alves, Rodrigo and Ledent, Antoine and Santos, Rodrygo and Kloft, Marius},\n\tmonth = nov,\n\tyear = {2022},\n\tdoi = {10.2139/ssrn.4274780},\n\tkeywords = {recommender systems, fairness, exposure, integer linear programming},\n}\n\n\n
@article{balchanowski_collaborative_2022,\n\ttitle = {Collaborative {Rank} {Aggregation} in {Recommendation} {Systems}},\n\tvolume = {207},\n\tissn = {1877-0509},\n\turl = {https://www.sciencedirect.com/science/article/pii/S187705092201167X},\n\tdoi = {10.1016/j.procs.2022.09.281},\n\tabstract = {Over the years, various techniques of generating recommendations have been\ndeveloped. However, it turns out that when we compare the recommendations\ngenerated by different algorithms in the context of a particular user, the\nquality of such recommendations for different techniques may differ. The\nuse of the aggregation techniques, the aim of which is to combine several\nrankings into one, can be a solution to this problem. In theory it should\nimprove the quality of the recommendations. Additionally, in order to\npersonalize the recommendations better, a metaheuristic algorithm, which,\nby assigning different weights to each feature, tries to represent the\npreference of the active user, was used. This paper also presents a\nsuggestion to include additional rankings generated for other users in the\nsystem in the aggregation process. The idea will be supported by research\nresults that clearly show that taking into account rankings of other users\ncan improve the quality of the generated recommendations.},\n\tjournal = {Procedia Comput. Sci.},\n\tauthor = {Bałchanowski, Michał and Boryczka, Urszula},\n\tmonth = jan,\n\tyear = {2022},\n\tkeywords = {differential evolution, metaheuristic, rank aggregation, recommendation systems},\n\tpages = {2213--2222},\n}\n\n\n
@inproceedings{wegmeth_camels_2022,\n\ttitle = {{CaMeLS}: {Cooperative} meta-learning service for recommender systems},\n\turl = {https://ceur-ws.org/Vol-3228/paper2.pdf},\n\tabstract = {We present CaMeLS, a proof of concept of a cooperative meta-learning\nservice for recommender systems. CaMeLS leverages the computing power of\nrecommender systems users by uploading their metadata and algorithm\nevaluation scores to a centralized environment. Through the resulting\ndatabase, CaMeLS then offers meta-learning services for everyone.\nAdditionally, users may access evaluations of common data sets immediately\nto know the best-performing algorithms for those data sets. The metadata\ntable may also be used for other purposes, e.g., to perform benchmarks. In\nthe initial version discussed in this paper, CaMeLS implements automatic\nalgorithm selection through meta-learning over two recommender systems\nlibraries. Automatic algorithm selection saves users time and computing\npower and does not require expertise, as the best algorithm is\nautomatically found over multiple libraries. The CaMeLS database contains\n20 metadata sets by default. We show that the automatic algorithm\nselection service is already on par with the single best algorithm in this\ndefault scenario. CaMeLS only requires a few seconds to predict a suitable\nalgorithm, rather than potentially hours or days if performed manually,\ndepending on the data set. The code is publicly available on our GitHub\nhttps://camels.recommender-systems.com.},\n\turldate = {2022-11-13},\n\tpublisher = {CEUR-WS},\n\tauthor = {Wegmeth, Lukas and Beel, Joeran},\n\tmonth = sep,\n\tyear = {2022},\n}\n\n\n
@unpublished{halpern_representation_2022,\n\ttitle = {Representation with {Incomplete} {Votes}},\n\turl = {http://arxiv.org/abs/2211.15608},\n\tabstract = {Platforms for online civic participation rely heavily on methods for\ncondensing thousands of comments into a relevant handful, based on whether\nparticipants agree or disagree with them. These methods should guarantee\nfair representation of the participants, as their outcomes may affect the\nhealth of the conversation and inform impactful downstream decisions. To\nthat end, we draw on the literature on approval-based committee elections.\nOur setting is novel in that the approval votes are incomplete since\nparticipants will typically not vote on all comments. We prove that this\ncomplication renders non-adaptive algorithms impractical in terms of the\namount of information they must gather. Therefore, we develop an adaptive\nalgorithm that uses information more efficiently by presenting incoming\nparticipants with statements that appear promising based on votes by\nprevious participants. We prove that this method satisfies commonly used\nnotions of fair representation, even when participants only vote on a\nsmall fraction of comments. Finally, an empirical evaluation using real\ndata shows that the proposed algorithm provides representative outcomes in\npractice.},\n\tauthor = {Halpern, Daniel and Kehne, Gregory and Procaccia, Ariel D and Tucker-Foltz, Jamie and Wüthrich, Manuel},\n\tmonth = nov,\n\tyear = {2022},\n\tnote = {ISBN: 2211.15608\nPublication Title: arXiv [cs.GT]},\n}\n\n\n
@article{lu_user_2022,\n\ttitle = {User {Perception} of {Recommendation} {Explanation}: {Are} {Your} {Explanations} {What} {Users} {Need}?},\n\tissn = {1094-9224},\n\turl = {https://doi.org/10.1145/3565480},\n\tdoi = {10.1145/3565480},\n\tabstract = {As recommender systems become increasingly important in daily human\ndecision-making, users are demanding convincing explanations to understand\nthey get the specific recommendation results. Although a number of\nexplainable recommender systems have recently been proposed, there still\nlacks an understanding of what users really need in a recommendation\nexplanation. The actual reason behind users’ intention to examine and\nconsume (e.g., click and watch a movie) can be the window to answer this\nquestion and is named as self-explanation in this work. In addition,\nhumans usually make recommendations accompanied by explanations, but there\nremain fewer studies on how humans explain and what we can learn from\nhuman-generated explanations. To investigate these questions, we conduct a\nnovel multi-role, multi-session user study in which users interact with\nmultiple types of system-generated explanations as well as human-generated\nexplanations, namely peer-explanation. During the study, users’\nintentions, expectations, and experiences are tracked in several phases,\nincluding before and after the users are presented with an explanation and\nafter the content is examined. Through comprehensive investigations, three\nmain findings have been made: First, we observe not only the positive but\nalso the negative effects of explanations, and the impact varies across\ndifferent types of explanations. Moreover, human-generated explanation,\npeer-explanation, performs better in increasing user intentions and\nhelping users to better construct preferences, which results in better\nuser satisfaction. Second, based on users’ self-explanation, the\ninformation accuracy is measured and found to be a major factor associated\nwith user satisfaction. Some other factors, such as unfamiliarity and\nsimilarity, are also discovered and summarized. Third, through annotations\nof the information aspects used in the human-generated self-explanation\nand peer-explanation, patterns of how humans explain are investigated,\nincluding what information and how much information is utilized. In\naddition, based on the findings, a human-inspired explanation approach is\nproposed and found to increase user satisfaction, revealing the potential\nimprovement of further incorporating more human patterns in recommendation\nexplanations. These findings have shed light on the deeper understanding\nof the recommendation explanation and further research on its evaluation\nand generation. Furthermore, the collected data, including human-generated\nexplanations by both the external peers and the users’ selves, will be\nreleased to support future research works on explanation evaluation.},\n\tjournal = {ACM Trans. Inf. Syst. Secur.},\n\tauthor = {Lu, Hongyu and Ma, Weizhi and Wang, Yifan and Zhang, Min and Wang, Xiang and Liu, Yiqun and Chua, Tat-Seng and Ma, Shaoping},\n\tmonth = nov,\n\tyear = {2022},\n\tnote = {Place: New York, NY, USA\nPublisher: Association for Computing Machinery},\n\tkeywords = {Recommendation Explanation, User Modeling, Recommender System},\n}\n\n\n
@article{liu_new_2022,\n\ttitle = {A {New} {Collaborative} {Filtering} {Algorithm} {Integrating} {Time} and {Multisimilarity}},\n\tvolume = {2022},\n\tissn = {1024-123X},\n\turl = {https://www.hindawi.com/journals/mpe/2022/2340671/},\n\tdoi = {10.1155/2022/2340671},\n\tabstract = {Aiming at the problem of low recommendation accuracy of existing\nrecommendation algorithms, an algorithm integrating time factors and\nmultisimilarity is proposed to improve the impact of long-term data, user\nattention, and project popularity on the recommendation algorithm and the\nsimilarity of user attributes is introduced to improve the problem of cold\nstart to a certain extent. Considering that the longer the time, the less\nlikely it is to be selected again, time is introduced into the algorithm\nas a weight factor. When the behavior occurs, i.e., interest in the\nproject, so as to judge the similarity between users, not just the score\nvalue, we normalize the popularity to avoid misjudgment of high scoring\nand popular items. Because new users do not have past score records, the\nproblem of cold start can be solved by calculating the similarity of user\nattributes. Through the comparative experiment on Movielens100K dataset\nand Epinions dataset, the results show that the algorithm can improve the\naccuracy of recommendation and give users a better recommendation effect.},\n\turldate = {2022-09-05},\n\tjournal = {Math. Probl. Eng.},\n\tauthor = {Liu, Qin},\n\tmonth = aug,\n\tyear = {2022},\n\tnote = {Publisher: Hindawi},\n}\n\n\n
@unpublished{wegmeth_impact_2022,\n\ttitle = {The {Impact} of {Feature} {Quantity} on {Recommendation} {Algorithm} {Performance}: {A} {Movielens}-{100K} {Case} {Study}},\n\turl = {http://arxiv.org/abs/2207.08713},\n\tabstract = {Recent model-based Recommender Systems (RecSys) algorithms emphasize on\nthe use of features, also called side information, in their design similar\nto algorithms in Machine Learning (ML). In contrast, some of the most\npopular and traditional algorithms for RecSys solely focus on a given\nuser-item-rating relation without including side information. The goal of\nthis case study is to provide a performance comparison and assessment of\nRecSys and ML algorithms when side information is included. We chose the\nMovielens-100K data set since it is a standard for comparing RecSys\nalgorithms. We compared six different feature sets with varying quantities\nof features which were generated from the baseline data and evaluated on a\ntotal of 19 RecSys algorithms, baseline ML algorithms, Automated Machine\nLearning (AutoML) pipelines, and state-of-the-art RecSys algorithms that\nincorporate side information. The results show that additional features\nbenefit all algorithms we evaluated. However, the correlation between\nfeature quantity and performance is not monotonous for AutoML and RecSys.\nIn these categories, an analysis of feature importance revealed that the\nquality of features matters more than quantity. Throughout our\nexperiments, the average performance on the feature set with the lowest\nnumber of features is about 6\\% worse compared to that with the highest in\nterms of the Root Mean Squared Error. An interesting observation is that\nAutoML outperforms matrix factorization-based RecSys algorithms when\nadditional features are used. Almost all algorithms that can include side\ninformation have higher performance when using the highest quantity of\nfeatures. In the other cases, the performance difference is negligible\n({\\textless}1\\%). The results show a clear positive trend for the effect of feature\nquantity as well as the important effects of feature quality on the\nevaluated algorithms.},\n\tauthor = {Wegmeth, Lukas},\n\tmonth = jul,\n\tyear = {2022},\n\tnote = {ISBN: 2207.08713\nPublication Title: arXiv [cs.IR]},\n}\n\n\n
@phdthesis{fortes_enhancing_2022,\n\ttitle = {Enhancing the {Multi}-{Objective} {Recommendation} from three new perspectives: data characterization, risk-sensitiveness, and prioritization of the objectives},\n\turl = {https://repositorio.ufmg.br/bitstream/1843/43915/1/Reinaldo_Silva_Fortes_Tese_UFMG.pdf},\n\tschool = {Universidade Federal de Minas Gerais},\n\tauthor = {Fortes, Renaldo Silva},\n\tcollaborator = {Gonçalves, Marcos André},\n\tyear = {2022},\n\tnote = {Publication Title: Ciência da Computação\nVolume: Doctoral},\n}\n\n\n
@article{ekstrand_exploring_2021,\n\ttitle = {Exploring author gender in book rating and recommendation},\n\tvolume = {31},\n\tissn = {0924-1868},\n\turl = {https://md.ekstrandom.net/pubs/bag-extended},\n\tdoi = {10.1007/s11257-020-09284-2},\n\tabstract = {Collaborative filtering algorithms find useful patterns in rating and consumption data and exploit these patterns to guide users to good items. Many of the patterns in rating datasets reflect important real-world differences between the various users and items in the data; other patterns may be irrelevant or possibly undesirable for social or ethical reasons, particularly if they reflect undesired discrimination, such as discrimination in publishing or purchasing against authors who are women or ethnic minorities. In this work, we examine the response of collaborative filtering recommender algorithms to the distribution of their input data with respect to a dimension of social concern, namely content creator gender. Using publicly-available book ratings data, we measure the distribution of the genders of the authors of books in user rating profiles and recommendation lists produced from this data. We find that common collaborative filtering algorithms differ in the gender distribution of their recommendation lists, and in the relationship of that output distribution to user profile distribution.},\n\tnumber = {3},\n\turldate = {2020-06-05},\n\tjournal = {User Modeling and User-Adapted Interaction},\n\tauthor = {Ekstrand, Michael D and Kluver, Daniel},\n\tmonth = jul,\n\tyear = {2021},\n\tpages = {377--420},\n}\n\n\n
@inproceedings{quang-hung_exploring_2021,\n\ttitle = {Exploring {Set}-{Inspired} {Similarity} {Measures} for {Collaborative} {Filtering} {Recommendation}},\n\turl = {https://iisi.siit.tu.ac.th/KSE2021/uploads_final/79__e291caf8e3a4ca8c47e9ed657a1d0f76/[KSE2021-0079]-Final-camera-ready-paper.pdf},\n\tauthor = {Quang-Hung, L E and Thi-Xinh, L E},\n\tmonth = nov,\n\tyear = {2021},\n}\n\n\n
@article{slokom_towards_2021,\n\ttitle = {Towards user-oriented privacy for recommender system data: {A} personalization-based approach to gender obfuscation for user profiles},\n\tvolume = {58},\n\tissn = {0306-4573},\n\turl = {https://www.sciencedirect.com/science/article/pii/S0306457321002065},\n\tdoi = {10.1016/j.ipm.2021.102722},\n\tabstract = {In this paper, we propose a new privacy solution for the data used to\ntrain a recommender system, i.e., the user–item matrix. The user–item\nmatrix contains implicit information, which can be inferred using a\nclassifier, leading to potential privacy violations. Our solution, called\nPersonalized Blurring (PerBlur), is a simple, yet effective, approach to\nadding and removing items from users’ profiles in order to generate an\nobfuscated user–item matrix. The novelty of PerBlur is personalization of\nthe choice of items used for obfuscation to the individual user profiles.\nPerBlur is formulated within a user-oriented paradigm of recommender\nsystem data privacy that aims at making privacy solutions understandable,\nunobtrusive, and useful for the user. When obfuscated data is used for\ntraining, a recommender system algorithm is able to reach performance\ncomparable to what is attained when it is trained on the original,\nunobfuscated data. At the same time, a classifier can no longer reliably\nuse the obfuscated data to predict the gender of users, indicating that\nimplicit gender information has been removed. In addition to introducing\nPerBlur, we make several key contributions. First, we propose an\nevaluation protocol that creates a fair environment to compare between\ndifferent obfuscation conditions. Second, we carry out experiments that\nshow that gender obfuscation impacts the fairness and diversity of\nrecommender system results. In sum, our work establishes that a simple,\ntransparent approach to gender obfuscation can protect user privacy while\nat the same time improving recommendation results for users by maintaining\nfairness and enhancing diversity.},\n\tnumber = {6},\n\tjournal = {Inf. Process. Manag.},\n\tauthor = {Slokom, Manel and Hanjalic, Alan and Larson, Martha},\n\tmonth = nov,\n\tyear = {2021},\n\tkeywords = {Diversity, Evaluation, Fairness, Gender inference, Obfuscation, Privacy, Top-N recommendation},\n\tpages = {102722},\n}\n\n\n
@unpublished{mao_application_2021,\n\ttitle = {Application of {Knowledge} {Graphs} to {Provide} {Side} {Information} for {Improved} {Recommendation} {Accuracy}},\n\turl = {http://arxiv.org/abs/2101.03054},\n\tabstract = {Personalized recommendations are popular in these days of Internet driven\nactivities, specifically shopping. Recommendation methods can be grouped\ninto three major categories, content based filtering, collaborative\nfiltering and machine learning enhanced. Information about products and\npreferences of different users are primarily used to infer preferences for\na specific user. Inadequate information can obviously cause these methods\nto fail or perform poorly. The more information we provide to these\nmethods, the more likely it is that the methods perform better. Knowledge\ngraphs represent the current trend in recording information in the form of\nrelations between entities, and can provide additional (side) information\nabout products and users. Such information can be used to improve nearest\nneighbour search, clustering users and products, or train the neural\nnetwork, when one is used. In this work, we present a new generic\nrecommendation systems framework, that integrates knowledge graphs into\nthe recommendation pipeline. We describe its software design and\nimplementation, and then show through experiments, how such a framework\ncan be specialized for a domain, say movie recommendations, and the\nimprovements in recommendation results possible due to side information\nobtained from knowledge graphs representation of such information. Our\nframework supports different knowledge graph representation formats, and\nfacilitates format conversion, merging and information extraction needed\nfor training recommendation methods.},\n\tauthor = {Mao, Yuhao and Mokhov, Serguei A and Mudur, Sudhir P},\n\tmonth = jan,\n\tyear = {2021},\n\tdoi = {10.1007/s11257-018-9213-x},\n\tnote = {ISBN: 2101.03054\nPublication Title: arXiv [cs.IR]},\n}\n\n\n
@inproceedings{scheidt_time-dependent_2021,\n\ttitle = {Time-dependent evaluation of recommender systems},\n\tvolume = {2955},\n\turl = {https://perspectives-ws.github.io/2021/proceedings/paper10.pdf},\n\turldate = {2021-09-27},\n\tbooktitle = {{CEUR}-{WS}},\n\tauthor = {Scheidt, Teresa and Beel, Joeran},\n\tmonth = sep,\n\tyear = {2021},\n\tnote = {Journal Abbreviation: CEUR-WS},\n}\n\n\n
@inproceedings{mudur_framework_2021,\n\taddress = {New York, NY, USA},\n\ttitle = {A {Framework} for {Enhancing} {Deep} {Learning} {Based} {Recommender} {Systems} with {Knowledge} {Graphs}},\n\turl = {https://doi.org/10.1145/3472163.3472183},\n\tdoi = {10.1145/3472163.3472183},\n\tabstract = {Recommendation methods fall into three major categories, content based\nfiltering, collaborative filtering and deep learning based. Information\nabout products and the preferences of earlier users are used in an\nunsupervised manner to create models which help make personalized\nrecommendations to a specific new user. The more information we provide to\nthese methods, the more likely it is that they yield better\nrecommendations. Deep learning based methods are relatively recent, and\nare generally more robust to noise and missing information. This is\nbecause deep learning models can be trained even when some of the\ninformation records have partial information. Knowledge graphs represent\nthe current trend in recording information in the form of relations\nbetween entities, and can provide any available information about products\nand users. This information is used to train the recommendation model. In\nthis work, we present a new generic recommender systems framework, that\nintegrates knowledge graphs into the recommendation pipeline. We describe\nits design and implementation, and then show through experiments, how such\na framework can be specialized, taking the domain of movies as an example,\nand the resulting improvements in recommendations made possible by using\nall the information obtained using knowledge graphs. Our framework, to be\nmade publicly available, supports different knowledge graph representation\nformats, and facilitates format conversion, merging and information\nextraction needed for training recommendation models.},\n\turldate = {2021-09-14},\n\tbooktitle = {{IDEAS} 2021},\n\tpublisher = {Association for Computing Machinery},\n\tauthor = {Mudur, Sudhir P and Mokhov, Serguei A and Mao, Yuhao},\n\tmonth = jul,\n\tyear = {2021},\n\tnote = {Journal Abbreviation: IDEAS 2021},\n\tkeywords = {framework, knowledge graphs, recommendation model training, recommender system, deep learning based recommendations},\n\tpages = {11--20},\n}\n\n\n
@unpublished{lucherini_t-recs_2021,\n\ttitle = {T-{RECS}: {A} simulation tool to study the societal impact of recommender systems},\n\turl = {http://arxiv.org/abs/2107.08959},\n\tabstract = {Simulation has emerged as a popular method to study the long-term societal\nconsequences of recommender systems. This approach allows researchers to\nspecify their theoretical model explicitly and observe the evolution of\nsystem-level outcomes over time. However, performing simulation-based\nstudies often requires researchers to build their own simulation\nenvironments from the ground up, which creates a high barrier to entry,\nintroduces room for implementation error, and makes it difficult to\ndisentangle whether observed outcomes are due to the model or the\nimplementation. We introduce T-RECS, an open-sourced Python package\ndesigned for researchers to simulate recommendation systems and other\ntypes of sociotechnical systems in which an algorithm mediates the\ninteractions between multiple stakeholders, such as users and content\ncreators. To demonstrate the flexibility of T-RECS, we perform a\nreplication of two prior simulation-based research on sociotechnical\nsystems. We additionally show how T-RECS can be used to generate novel\ninsights with minimal overhead. Our tool promotes reproducibility in this\narea of research, provides a unified language for simulating\nsociotechnical systems, and removes the friction of implementing\nsimulations from scratch.},\n\tauthor = {Lucherini, Eli and Sun, Matthew and Winecoff, Amy and Narayanan, Arvind},\n\tmonth = jul,\n\tyear = {2021},\n\tnote = {ISBN: 2107.08959\nPublication Title: arXiv [cs.CY]},\n}\n\n\n
@inproceedings{li_new_2021,\n\taddress = {New York, NY, USA},\n\ttitle = {New {Insights} into {Metric} {Optimization} for {Ranking}-based {Recommendation}},\n\turl = {https://doi.org/10.1145/3404835.3462973},\n\tdoi = {10.1145/3404835.3462973},\n\tabstract = {Direct optimization of IR metrics has often been adopted as an approach to\ndevise and develop ranking-based recommender systems. Most methods\nfollowing this approach (e.g. TFMAP, CLiMF, Top-N-Rank) aim at optimizing\nthe same metric being used for evaluation, under the assumption that this\nwill lead to the best performance. A number of studies of this practice\nbring this assumption, however, into question. In this paper, we dig\ndeeper into this issue in order to learn more about the effects of the\nchoice of the metric to optimize on the performance of a ranking-based\nrecommender system. We present an extensive experimental study conducted\non different datasets in both pairwise and listwise learning-to-rank (LTR)\nscenarios, to compare the relative merit of four popular IR metrics,\nnamely RR, AP, nDCG and RBP, when used for optimization and assessment of\nrecommender systems in various combinations. For the first three, we\nfollow the practice of loss function formulation available in literature.\nFor the fourth one, we propose novel loss functions inspired by RBP for\nboth the pairwise and listwise scenario. Our results confirm that the best\nperformance is indeed not necessarily achieved when optimizing the same\nmetric being used for evaluation. In fact, we find that RBP-inspired\nlosses perform at least as well as other metrics in a consistent way, and\noffer clear benefits in several cases. Interesting to see is that\nRBP-inspired losses, while improving the recommendation performance for\nall uses, may lead to an individual performance gain that is correlated\nwith the activity level of a user in interacting with items. The more\nactive the users, the more they benefit. Overall, our results challenge\nthe assumption behind the current research practice of optimizing and\nevaluating the same metric, and point to RBP-based optimization instead as\na promising alternative when learning to rank in the recommendation\ncontext.},\n\turldate = {2021-07-15},\n\tbooktitle = {{SIGIR} '21},\n\tpublisher = {Association for Computing Machinery},\n\tauthor = {Li, Roger Zhe and Urbano, Julián and Hanjalic, Alan},\n\tmonth = jul,\n\tyear = {2021},\n\tnote = {Journal Abbreviation: SIGIR '21},\n\tkeywords = {evaluation metrics, learning to rank, recommender systems},\n\tpages = {932--941},\n}\n\n\n
@article{ashokan_fairness_2021,\n\ttitle = {Fairness metrics and bias mitigation strategies for rating predictions},\n\tvolume = {58},\n\tissn = {0306-4573},\n\turl = {https://www.sciencedirect.com/science/article/pii/S0306457321001369},\n\tdoi = {10.1016/j.ipm.2021.102646},\n\tabstract = {Algorithm fairness is an established line of research in the machine\nlearning domain with substantial work while the equivalent in the\nrecommender system domain is relatively new. In this article, we consider\nrating-based recommender systems which model the recommendation process as\na prediction problem. We consider different types of biases that can occur\nin this setting, discuss various fairness definitions, and also propose a\nnovel bias mitigation strategy to address potential unfairness in a\nrating-based recommender system. Based on an analysis of fairness metrics\nused in machine learning and a discussion of their applicability in the\nrecommender system domain, we map the proposed metrics from the two\ndomains and identify commonly used concepts and definitions of fairness.\nFinally, to address unfairness and potential bias against certain groups\nin a recommender system, we develop a bias mitigation algorithm and\nconduct case studies on one synthetic and one empirical dataset to show\nits effectiveness. Our results show that unfairness can be significantly\nlowered through our approach and that bias mitigation is a fruitful area\nof research for recommender systems.},\n\tnumber = {5},\n\tjournal = {Inf. Process. Manag.},\n\tauthor = {Ashokan, Ashwathy and Haas, Christian},\n\tmonth = sep,\n\tyear = {2021},\n\tkeywords = {Algorithmic fairness, Bias mitigation, Fairness metrics, Recommender systems},\n\tpages = {102646},\n}\n\n\n
@inproceedings{lu_standing_2021,\n\ttitle = {Standing in {Your} {Shoes}: {External} {Assessments} for {Personalized} {Recommender} {Systems}},\n\turl = {http://dx.doi.org/10.1145/3404835.3462916},\n\tdoi = {10.1145/3404835.3462916},\n\tauthor = {Lu, Hongyu and Ma, Weizhi and Zhang, Min and de Rijke, Maarten and Liu, Yiqun and Ma, Shaoping},\n\tyear = {2021},\n}\n\n\n
@mastersthesis{noguera_torres_sistema_2021,\n\ttitle = {Sistema de recomendación de matrícula de cursos electivos para estudiantes de {Ingeniería} {Electrónica} e {Ingeniería} de {Telecomunicaciones} de la {UNAD}},\n\turl = {https://repository.unad.edu.co/handle/10596/40465},\n\turldate = {2021-05-12},\n\tschool = {Universidad Nacional Abierta y a Distancia},\n\tauthor = {Noguera Torres, Adriana del Pilar},\n\tcollaborator = {Rúa Pérez, Santiago},\n\tmonth = apr,\n\tyear = {2021},\n\tnote = {Volume: MS in Information Technology},\n}\n\n\n
@inproceedings{diaz_evaluating_2020,\n\tseries = {{CIKM} '20},\n\ttitle = {Evaluating stochastic rankings with expected exposure},\n\turl = {http://arxiv.org/abs/2004.13157},\n\tdoi = {10.1145/3340531.3411962},\n\tabstract = {We introduce the concept of expected exposure as the average attention\nranked items receive from users over repeated samples of the same query.\nFurthermore, we advocate for the adoption of the principle of equal\nexpected exposure: given a fixed information need, no item receive more or\nless expected exposure compared to any other item of the same relevance\ngrade. We argue that this principle is desirable for many retrieval\nobjectives and scenarios, including topical diversity and fair ranking.\nLeveraging user models from existing retrieval metrics, we propose a\ngeneral evaluation methodology based on expected exposure and draw\nconnections to related metrics in information retrieval evaluation.\nImportantly, this methodology relaxes classic information retrieval\nassumptions, allowing a system, in response to a query, to produce a\ndistribution over rankings instead of a single fixed ranking. We study the\nbehavior of the expected exposure metric and stochastic rankers across a\nvariety of information access conditions, including ad hoc retrieval and\nrecommendation. We believe that measuring and optimizing expected exposure\nmetrics using randomization opens a new area for retrieval algorithm\ndevelopment and progress.},\n\tbooktitle = {Proceedings of the 29th {ACM} {International} {Conference} on {Information} and {Knowledge} {Management}},\n\tpublisher = {ACM},\n\tauthor = {Diaz, Fernando and Mitra, Bhaskar and Ekstrand, Michael D and Biega, Asia J and Carterette, Ben},\n\tmonth = oct,\n\tyear = {2020},\n}\n\n\n
@inproceedings{raj_comparing_2020,\n\ttitle = {Comparing fair ranking metrics},\n\turl = {http://arxiv.org/abs/2009.01311},\n\tabstract = {Ranking is a fundamental aspect of recommender systems. However, ranked\noutputs can be susceptible to various biases; some of these may cause\ndisadvantages to members of protected groups. Several metrics have been\nproposed to quantify the (un)fairness of rankings, but there has not been\nto date any direct comparison of these metrics. This complicates deciding\nwhat fairness metrics are applicable for specific scenarios, and assessing\nthe extent to which metrics agree or disagree. In this paper, we describe\nseveral fair ranking metrics in a common notation, enabling direct\ncomparison of their approaches and assumptions, and empirically compare\nthem on the same experimental setup and data set. Our work provides a\ndirect comparative analysis identifying similarities and differences of\nfair ranking metrics selected for our work.},\n\tauthor = {Raj, Amifa and Wood, Connor and Montoly, Ananda and Ekstrand, Michael D},\n\tmonth = sep,\n\tyear = {2020},\n}\n\n\n
@inproceedings{tian_estimating_2020,\n\tseries = {{CHIIR} '20},\n\ttitle = {Estimating {Error} and {Bias} in {Offline} {Evaluation} {Results}},\n\turl = {https://doi.org/10.1145/3343413.3378004},\n\tdoi = {10.1145/3343413.3378004},\n\turldate = {2020-03-26},\n\tbooktitle = {Proceedings of the 2020 {Conference} on {Human} {Information} {Interaction} and {Retrieval}},\n\tpublisher = {ACM},\n\tauthor = {Tian, Mucun and Ekstrand, Michael D},\n\tmonth = mar,\n\tyear = {2020},\n\tnote = {Journal Abbreviation: CHIIR '20},\n\tkeywords = {offline evaluation, simulation},\n\tpages = {392--396},\n}\n\n\n
@inproceedings{chen_impact_2020,\n\ttitle = {Impact of {End}-{User} {Privacy} {Enhancing} {Technologies} ({PETs}) on {Firms}’ {Analytics} {Performance}},\n\turl = {https://aisel.aisnet.org/icis2020/digital_commerce/digital_commerce/3/},\n\tabstract = {Big data analytics in digital commerce requires vast amounts of personal\ninformation from consumers, but this gives rise to major privacy concerns.\nTo combat the threat of privacy invasion, individuals are proactively\nadopting privacy enhancing technologies (PETs) to protect their personal\ninformation. Consumers’ adoption of PETs may hamper firms’ big data\nanalytics capabilities and performance but our knowledge of how PETs\nimpact firms’ data analytics is limited. This study proposes an\ninductively derived framework which qualitatively shows that end-user PETs\ninduce measurement error and/or missing values with regards to attributes,\nentities and relationships in firms’ customer databases, but the impacts\nof specific end-user PETs may vary by analytics use case. Our simulation\nexperiments in the context of product recommendations quantitively find\nthat consumers’ adoption characteristics (adoption rate and pattern) and\nPETs characteristics (protection mechanism and intensity) significantly\naffect the performance of recommender systems.},\n\turldate = {2021-01-28},\n\tauthor = {Chen, Dawei and Hahn, Jungpil},\n\tyear = {2020},\n}\n\n\n
@inproceedings{paullier_recommender_2020,\n\ttitle = {A {Recommender} {Systems}’ algorithm evaluation using the {Lenskit} library and {MovieLens} databases},\n\turl = {http://dx.doi.org/10.1109/BMSB49480.2020.9379914},\n\tdoi = {10.1109/BMSB49480.2020.9379914},\n\tabstract = {A wide variety of algorithms can be found in the recommender system's\nliterature for computing predictions and obtaining recommendation lists.\nThese algorithms' performance can be evaluated by numerous metrics\ndepending on which business goal is intended to be optimized. However,\nperformance can vary drastically depending on the selected algorithm as\nwell as the selected metric. In addition, algorithmic performance can be\nheavily influenced by the experimental setup such as: dataset,\ncross-validation strategy, recommender framework and the model\nhyper-parameters chosen. In this work we will compare nine different\nalgorithms used for generating recommendations across seven different\nmetrics. One vital aspect of our analysis is to provide a detailed and\nclear explanation of the methodology carried out as well as the\nexperimental setup in order to ensure future reproducibility. Moreover,\nperformance and computation times will be introduced for further\ncomparison. Finally, optimal model hyper-parameter combinations will be\npresented for benchmarking purposes.},\n\tauthor = {Paullier, Alejo and Sotelo, Rafael},\n\tmonth = oct,\n\tyear = {2020},\n\tkeywords = {Algorithms, Business, Computational modeling, Databases, Evaluation, LensKit, Libraries, MovieLens, Multimedia communication, Prediction algorithms, Recommender systems, Reproducibility of results},\n\tpages = {1--7},\n}\n\n\n
@incollection{ziegler_empfehlungssysteme_2020,\n\taddress = {Wiesbaden},\n\ttitle = {Empfehlungssysteme},\n\tisbn = {978-3-658-17291-6},\n\turl = {https://doi.org/10.1007/978-3-658-17291-6_52},\n\tabstract = {Empfehlungssysteme stellen heute eine zentrale Komponente vieler\nOnline-Plattformen dar, die bei Online-Shops und vielen anderen\nAnwendungen häufig zum Einsatz kommt. Ziel ist es, dem Kunden entsprechend\nseinen persönlichen Präferenzen Produkte oder andere Artikel\nvorzuschlagen, die für ihn von Interesse sind und potenziell zu einem Kauf\noder generell zur Nutzung führen. Empfehlungssysteme haben eine erhebliche\nwirtschaftliche Bedeutung, da sie in vielen Fällen zu einem signifikanten\nAnteil zu Erfolgsfaktoren wie Click-through-Raten oder Käufen beitragen.\nWir stellen in diesem Kapitel die unterschiedlichen Ansätze zur\nautomatisierten Empfehlungsgebung vor und beschreiben konkrete Techniken\nzu deren Umsetzung. Weiterhin gehen wir auf wesentliche Aspekte der\nGestaltung und Bewertung von Empfehlungssystemen ein und diskutieren\nanwenderrelevante Themen wie Usability und Vertrauen in systemgenerierte\nEmpfehlungen.},\n\tbooktitle = {Handbuch {Digitale} {Wirtschaft}},\n\tpublisher = {Springer Fachmedien Wiesbaden},\n\tauthor = {Ziegler, Jürgen and Loepp, Benedikt},\n\teditor = {Kollmann, Tobias},\n\tyear = {2020},\n\tdoi = {10.1007/978-3-658-17291-6_52},\n\tpages = {717--741},\n}\n\n\n
@article{de_pessemier_evaluating_2020,\n\ttitle = {Evaluating facial recognition services as interaction technique for recommender systems},\n\tissn = {1380-7501},\n\turl = {https://doi.org/10.1007/s11042-020-09061-8},\n\tdoi = {10.1007/s11042-020-09061-8},\n\tabstract = {Recommender systems are tools and techniques to assist users in the\ncontent selection process thereby coping with the problem of information\noverload. For recommender systems, user authentication and feedback\ngathering are of crucial importance. However, the typical user\nauthentication with username / password and feedback method with a star\nrating system are not user friendly and often bypassed. This article\nproposes an alternative method for user authentication based on facial\nrecognition and an automatic feedback gathering method by detecting\nvarious face characteristics such as emotions. We studied the use case of\nvideo watching. Photos made with the front-facing camera of a tablet,\nsmartphone, or smart TV are used as input of a facial recognition service.\nThe persons in front of the screen can be identified. During video\nwatching, implicit feedback for the video content is automatically\ngathered through emotion recognition, attention measurements, and behavior\nanalysis. An evaluation with a test panel showed that the recognized\nemotions are correlated with the user’s star ratings and that happiness\ncan be most accurately detected. So as the main contribution, this article\nindicates that emotion recognition might be used as an alternative\nfeedback mechanism for recommender systems.},\n\tjournal = {Multimed. Tools Appl.},\n\tauthor = {De Pessemier, Toon and Coppens, Ine and Martens, Luc},\n\tmonth = jun,\n\tyear = {2020},\n}\n\n\n
@article{ng_cbrec_2020,\n\ttitle = {{CBRec}: a book recommendation system for children using the matrix factorisation and content-based filtering approaches},\n\tvolume = {16},\n\tissn = {1743-8187},\n\turl = {https://www.inderscienceonline.com/doi/abs/10.1504/IJBIDM.2020.104738},\n\tdoi = {10.1504/IJBIDM.2020.104738},\n\tabstract = {Promoting good reading habits among children is essential, given the\nenormous influence of reading on students' development as learners and\nmembers of the society. Unfortunately, very few (children) websites or\nonline applications recommend books to children, even though they can play\na significant role in encouraging children to read. Given that a few\npopular book websites suggest books to children based on the popularity of\nbooks or rankings on books, they are not customised/personalised for each\nindividual user and likely recommend books that users do not want or like.\nWe have integrated the matrix factorisation approach and the content-based\napproach, in addition to predicting the grade levels of books, to\nrecommend books for children. Recent research works have demonstrated that\na hybrid approach, which combines different filtering approaches, is more\neffective in making recommendations. Conducted empirical study has\nverified the effectiveness of our proposed children book recommendation\nsystem.},\n\tnumber = {2},\n\tjournal = {International Journal of Business Intelligence and Data Mining},\n\tauthor = {Ng, Yiu-Kai},\n\tmonth = jan,\n\tyear = {2020},\n\tnote = {Publisher: Inderscience Publishers},\n\tpages = {129--149},\n}\n\n
@mastersthesis{narayan_what_2019,\n\ttitle = {What is the {Value} of {Rating} {Obscure} {Items}? {An} {Analysis} of the {Effect} of {Less}-{Popular} {Items} on {Recommendation} {Quality}},\n\turl = {https://conservancy.umn.edu/handle/11299/206148},\n\tabstract = {Recommender systems designers believe that the system stands to benefit\nfrom the users rating items that do not have many ratings. However, the\neffect of this act of rating lesser known items on the user's\nrecommendations is unknown. This leads to asking the question of whether\nthese low popularity items affect the recommendations received by users.\nThis work looks at the effect less popular items have on a user's\nrecommendations and the prediction and recommendations metrics that\nquantify the quality of recommendations …},\n\tschool = {University of Minnesota},\n\tauthor = {Narayan, A},\n\tyear = {2019},\n\tnote = {Publication Title: Department of Computer Science\nVolume: M.S. in Computer Science},\n}\n\n\n
@inproceedings{pessemier_using_2019,\n\ttitle = {Using facial recognition services as implicit feedback for recommenders},\n\tvolume = {2450},\n\turl = {http://ceur-ws.org/Vol-2450/paper4.pdf},\n\tabstract = {User authentication and feedback gathering are crucial aspects for\nrecommender systems. The most common implementations, a username /\npassword login and star rating systems, require user interaction and a\ncognitive effort from the user. As a result, users opt to save their\npassword in the interface and optional feedback with a star rating system\nis often skipped, especially for applications such as video watching in a\nhome environment. In this article, we propose an alternative method for\nuser authentication based on facial recognition and an automatic feedback\ngathering method by detecting various face characteristics. Using facial\nrecognition with a camera in a tablet, smartphone, or smart TV, the\npersons in front of the screen can be identified in order to link video\nwatching sessions to their user profile. During video watching, implicit\nfeedback is automatically gathered through emotion recognition, attention\nmeasurements, and behavior analysis. An emotion fingerprint, which is\ndefined as a unique spectrum of expected emotions for a video scene, is\ncompared to the recognized emotions in order to estimate the experience of\na user while watching. An evaluation with a test panel showed that\nhappiness can be most accurately detected and the recognized emotions are\ncorrelated with the user’s star rating.},\n\tpublisher = {CEUR-WS},\n\tauthor = {Pessemier, Toon De and Coppens, Ine and Martens, Luc},\n\tyear = {2019},\n\tpages = {8},\n}\n\n\n
@inproceedings{wang_bayesian_2019,\n\ttitle = {Bayesian {Deep} {Learning} {Based} {Exploration}-{Exploitation} for {Personalized} {Recommendations}},\n\turl = {http://dx.doi.org/10.1109/ICTAI.2019.00253},\n\tdoi = {10.1109/ICTAI.2019.00253},\n\tabstract = {Personalized Recommendation Systems require an effective method to balance\nexploration and exploitation. To learn effective strategies, user and item\nattributes are critical data sources to capture contextual information. In\nthis paper, we first present an approach based on Bayesian Deep Learning\nto learn a compact representation of user and item attributes to guide\nexploitation. A key novelty of the approach lies in its ability to also\ncapture the uncertainty associated with the model output to guide\nexploration. We then show how to further boost exploration by\nincorporating model uncertainty with that of data uncertainty.\nExperimental results demonstrate the benefits of our approach in terms of\naccuracy in recommendations as well as its performance in an online\nsetting.},\n\tpublisher = {ieeexplore.ieee.org},\n\tauthor = {Wang, X and Kadioglu, S},\n\tmonth = nov,\n\tyear = {2019},\n\tkeywords = {Bayesian Deep Learning, ExplorationExploitation, Personalized Recommendation, Bayesian deep learning, boost exploration, compact representation, contextual information, critical data sources, data uncertainty, exploration-exploitation, information filtering, item attributes, learning (artificial intelligence), model output, model uncertainty, personalized recommendation systems, recommender systems},\n\tpages = {1715--1719},\n}\n\n\n
@incollection{varga_recommender_2019,\n\ttitle = {Recommender {Systems}},\n\tisbn = {978-1-4842-4858-4},\n\turl = {http://dx.doi.org/10.1007/978-1-4842-4859-1},\n\tbooktitle = {Practical {Data} {Science} with {Python} 3},\n\tpublisher = {Apress},\n\tauthor = {Varga, Ervin},\n\tyear = {2019},\n\tdoi = {10.1007/978-1-4842-4859-1},\n\tpages = {317--339},\n}\n\n\n
@mastersthesis{balseca_ninez_estudio_2019,\n\ttitle = {Estudio del estado del arte de la ciencia de datos aplicada a la neuroeconomía},\n\turl = {http://dspace.ups.edu.ec/handle/123456789/17605},\n\tschool = {Universidad Politécnica Salesiana},\n\tauthor = {Balseca Níñez, Christian Ricardo and Fernández Peñafiel, Daniel Andrés},\n\tyear = {2019},\n}\n\n\n