How many bits per rating?. Kluver, D., Nguyen, T. T, Ekstrand, M., Sen, S., & Riedl, J. In RecSys '12, pages 99–106, New York, NY, USA, 2012. ACM. Journal Abbreviation: RecSys '12
Paper doi abstract bibtex Most recommender systems assume user ratings accurately represent user preferences. However, prior research shows that user ratings are imperfect and noisy. Moreover, this noise limits the measurable predictive power of any recommender system. We propose an information theoretic framework for quantifying the preference information contained in ratings and predictions. We computationally explore the properties of our model and apply our framework to estimate the efficiency of different rating scales for real world datasets. We then estimate how the amount of information predictions give to users is related to the scale ratings are collected on. Our findings suggest a tradeoff in rating scale granularity: while previous research indicates that coarse scales (such as thumbs up / thumbs down) take less time, we find that ratings with these scales provide less predictive value to users. We introduce a new measure, preference bits per second, to quantitatively reconcile this tradeoff.
@inproceedings{kluver_how_2012,
address = {New York, NY, USA},
title = {How many bits per rating?},
url = {http://doi.acm.org/10.1145/2365952.2365974},
doi = {10.1145/2365952.2365974},
abstract = {Most recommender systems assume user ratings accurately represent user
preferences. However, prior research shows that user ratings are imperfect
and noisy. Moreover, this noise limits the measurable predictive power of
any recommender system. We propose an information theoretic framework for
quantifying the preference information contained in ratings and
predictions. We computationally explore the properties of our model and
apply our framework to estimate the efficiency of different rating scales
for real world datasets. We then estimate how the amount of information
predictions give to users is related to the scale ratings are collected
on. Our findings suggest a tradeoff in rating scale granularity: while
previous research indicates that coarse scales (such as thumbs up / thumbs
down) take less time, we find that ratings with these scales provide less
predictive value to users. We introduce a new measure, preference bits per
second, to quantitatively reconcile this tradeoff.},
urldate = {2013-09-12},
booktitle = {{RecSys} '12},
publisher = {ACM},
author = {Kluver, Daniel and Nguyen, Tien T and Ekstrand, Michael and Sen, Shilad and Riedl, John},
year = {2012},
note = {Journal Abbreviation: RecSys '12},
pages = {99--106},
}
Downloads: 0
{"_id":"PsmtYmScLkzeB9i4Y","bibbaseid":"kluver-nguyen-ekstrand-sen-riedl-howmanybitsperrating-2012","authorIDs":[],"author_short":["Kluver, D.","Nguyen, T. T","Ekstrand, M.","Sen, S.","Riedl, J."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","address":"New York, NY, USA","title":"How many bits per rating?","url":"http://doi.acm.org/10.1145/2365952.2365974","doi":"10.1145/2365952.2365974","abstract":"Most recommender systems assume user ratings accurately represent user preferences. However, prior research shows that user ratings are imperfect and noisy. Moreover, this noise limits the measurable predictive power of any recommender system. We propose an information theoretic framework for quantifying the preference information contained in ratings and predictions. We computationally explore the properties of our model and apply our framework to estimate the efficiency of different rating scales for real world datasets. We then estimate how the amount of information predictions give to users is related to the scale ratings are collected on. Our findings suggest a tradeoff in rating scale granularity: while previous research indicates that coarse scales (such as thumbs up / thumbs down) take less time, we find that ratings with these scales provide less predictive value to users. We introduce a new measure, preference bits per second, to quantitatively reconcile this tradeoff.","urldate":"2013-09-12","booktitle":"RecSys '12","publisher":"ACM","author":[{"propositions":[],"lastnames":["Kluver"],"firstnames":["Daniel"],"suffixes":[]},{"propositions":[],"lastnames":["Nguyen"],"firstnames":["Tien","T"],"suffixes":[]},{"propositions":[],"lastnames":["Ekstrand"],"firstnames":["Michael"],"suffixes":[]},{"propositions":[],"lastnames":["Sen"],"firstnames":["Shilad"],"suffixes":[]},{"propositions":[],"lastnames":["Riedl"],"firstnames":["John"],"suffixes":[]}],"year":"2012","note":"Journal Abbreviation: RecSys '12","pages":"99–106","bibtex":"@inproceedings{kluver_how_2012,\n\taddress = {New York, NY, USA},\n\ttitle = {How many bits per rating?},\n\turl = {http://doi.acm.org/10.1145/2365952.2365974},\n\tdoi = {10.1145/2365952.2365974},\n\tabstract = {Most recommender systems assume user ratings accurately represent user\npreferences. However, prior research shows that user ratings are imperfect\nand noisy. Moreover, this noise limits the measurable predictive power of\nany recommender system. We propose an information theoretic framework for\nquantifying the preference information contained in ratings and\npredictions. We computationally explore the properties of our model and\napply our framework to estimate the efficiency of different rating scales\nfor real world datasets. We then estimate how the amount of information\npredictions give to users is related to the scale ratings are collected\non. Our findings suggest a tradeoff in rating scale granularity: while\nprevious research indicates that coarse scales (such as thumbs up / thumbs\ndown) take less time, we find that ratings with these scales provide less\npredictive value to users. We introduce a new measure, preference bits per\nsecond, to quantitatively reconcile this tradeoff.},\n\turldate = {2013-09-12},\n\tbooktitle = {{RecSys} '12},\n\tpublisher = {ACM},\n\tauthor = {Kluver, Daniel and Nguyen, Tien T and Ekstrand, Michael and Sen, Shilad and Riedl, John},\n\tyear = {2012},\n\tnote = {Journal Abbreviation: RecSys '12},\n\tpages = {99--106},\n}\n\n","author_short":["Kluver, D.","Nguyen, T. T","Ekstrand, M.","Sen, S.","Riedl, J."],"key":"kluver_how_2012","id":"kluver_how_2012","bibbaseid":"kluver-nguyen-ekstrand-sen-riedl-howmanybitsperrating-2012","role":"author","urls":{"Paper":"http://doi.acm.org/10.1145/2365952.2365974"},"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"inproceedings","biburl":"https://api.zotero.org/users/6655/collections/TJPPJ92X/items?key=VFvZhZXIoHNBbzoLZ1IM2zgf&format=bibtex&limit=100","creationDate":"2020-03-27T02:34:35.368Z","downloads":0,"keywords":[],"search_terms":["many","bits","per","rating","kluver","nguyen","ekstrand","sen","riedl"],"title":"How many bits per rating?","year":2012,"dataSources":["5Dp4QphkvpvNA33zi","jfoasiDDpStqkkoZB","BiuuFc45aHCgJqDLY"]}