Tuning Bandit Algorithms in Stochastic Environments. Audibert, J., Munos, R., & Szepesvári, C. In *ALT*, pages 150–165, 2007. Springer. See i̧teaudibert2009 for a longer, updated versionPaper abstract bibtex 5 downloads Algorithms based on upper-confidence bounds for balancing exploration and exploitation are gaining popularity since they are easy to implement, efficient and effective. In this paper we consider a variant of the basic algorithm for the stochastic, multi-armed bandit problem that takes into account the empirical variance of the different arms. In earlier experimental works, such algorithms were found to outperform the competing algorithms. The purpose of this paper is to provide a theoretical explanation of these findings and provide theoretical guidelines for the tuning of the parameters of these algorithms. For this we analyze the expected regret and for the first time the concentration of the regret. The analysis of the expected regret shows that variance estimates can be especially advantageous when the payoffs of suboptimal arms have low variance. The risk analysis, rather unexpectedly, reveals that except some very special bandit problems, for upper confidence bound based algorithms with standard bias sequences, the regret concentrates only at a polynomial rate. Hence, although these algorithms achieve logarithmic expected regret rates, they seem less attractive when the risk of achieving much worse than logarithmic cumulative regret is also taken into account.

@inproceedings{audibert2007,
abstract = {Algorithms based on upper-confidence bounds for balancing exploration and exploitation are gaining popularity since they are easy to implement, efficient and effective. In this paper we consider a variant of the basic algorithm for the stochastic, multi-armed bandit problem that takes into account the empirical variance of the different arms. In earlier experimental works, such algorithms were found to outperform the competing algorithms. The purpose of this paper is to provide a theoretical explanation of these findings and provide theoretical guidelines for the tuning of the parameters of these algorithms. For this we analyze the expected regret and for the first time the concentration of the regret. The analysis of the expected regret shows that variance estimates can be especially advantageous when the payoffs of suboptimal arms have low variance. The risk analysis, rather unexpectedly, reveals that except some very special bandit problems, for upper confidence bound based algorithms with standard bias sequences, the regret concentrates only at a polynomial rate. Hence, although these algorithms achieve logarithmic expected regret rates, they seem less attractive when the risk of achieving much worse than logarithmic cumulative regret is also taken into account.},
acceptrate = {50\%},
author = {Audibert, J.-Y. and Munos, R. and Szepesv{\'a}ri, Cs.},
booktitle = {ALT},
keywords = {multi-armed bandits, sequential algorithms, stochastic bandits, Bernstein's inequality, theory},
note = {See \cite{audibert2009} for a longer, updated version},
pages = {150--165},
ppt = {talks/ALT07-UCBTuned-Talk.ppt},
publisher = {Springer},
title = {Tuning Bandit Algorithms in Stochastic Environments},
url_paper = {ucb_alt.pdf},
year = {2007}}

Downloads: 5

{"_id":"EQ8EFSy94Mj4rCLj6","bibbaseid":"audibert-munos-szepesvri-tuningbanditalgorithmsinstochasticenvironments-2007","downloads":5,"creationDate":"2018-07-06T20:22:33.253Z","title":"Tuning Bandit Algorithms in Stochastic Environments","author_short":["Audibert, J.","Munos, R.","Szepesvári, C."],"year":2007,"bibtype":"inproceedings","biburl":"https://www.ualberta.ca/~szepesva/papers/p2.bib","bibdata":{"bibtype":"inproceedings","type":"inproceedings","abstract":"Algorithms based on upper-confidence bounds for balancing exploration and exploitation are gaining popularity since they are easy to implement, efficient and effective. In this paper we consider a variant of the basic algorithm for the stochastic, multi-armed bandit problem that takes into account the empirical variance of the different arms. In earlier experimental works, such algorithms were found to outperform the competing algorithms. The purpose of this paper is to provide a theoretical explanation of these findings and provide theoretical guidelines for the tuning of the parameters of these algorithms. For this we analyze the expected regret and for the first time the concentration of the regret. The analysis of the expected regret shows that variance estimates can be especially advantageous when the payoffs of suboptimal arms have low variance. The risk analysis, rather unexpectedly, reveals that except some very special bandit problems, for upper confidence bound based algorithms with standard bias sequences, the regret concentrates only at a polynomial rate. Hence, although these algorithms achieve logarithmic expected regret rates, they seem less attractive when the risk of achieving much worse than logarithmic cumulative regret is also taken into account.","acceptrate":"50%","author":[{"propositions":[],"lastnames":["Audibert"],"firstnames":["J.-Y."],"suffixes":[]},{"propositions":[],"lastnames":["Munos"],"firstnames":["R."],"suffixes":[]},{"propositions":[],"lastnames":["Szepesvári"],"firstnames":["Cs."],"suffixes":[]}],"booktitle":"ALT","keywords":"multi-armed bandits, sequential algorithms, stochastic bandits, Bernstein's inequality, theory","note":"See i̧teaudibert2009 for a longer, updated version","pages":"150–165","ppt":"talks/ALT07-UCBTuned-Talk.ppt","publisher":"Springer","title":"Tuning Bandit Algorithms in Stochastic Environments","url_paper":"ucb_alt.pdf","year":"2007","bibtex":"@inproceedings{audibert2007,\n\tabstract = {Algorithms based on upper-confidence bounds for balancing exploration and exploitation are gaining popularity since they are easy to implement, efficient and effective. In this paper we consider a variant of the basic algorithm for the stochastic, multi-armed bandit problem that takes into account the empirical variance of the different arms. In earlier experimental works, such algorithms were found to outperform the competing algorithms. The purpose of this paper is to provide a theoretical explanation of these findings and provide theoretical guidelines for the tuning of the parameters of these algorithms. For this we analyze the expected regret and for the first time the concentration of the regret. The analysis of the expected regret shows that variance estimates can be especially advantageous when the payoffs of suboptimal arms have low variance. The risk analysis, rather unexpectedly, reveals that except some very special bandit problems, for upper confidence bound based algorithms with standard bias sequences, the regret concentrates only at a polynomial rate. Hence, although these algorithms achieve logarithmic expected regret rates, they seem less attractive when the risk of achieving much worse than logarithmic cumulative regret is also taken into account.},\n\tacceptrate = {50\\%},\n\tauthor = {Audibert, J.-Y. and Munos, R. and Szepesv{\\'a}ri, Cs.},\n\tbooktitle = {ALT},\n\tkeywords = {multi-armed bandits, sequential algorithms, stochastic bandits, Bernstein's inequality, theory},\n\tnote = {See \\cite{audibert2009} for a longer, updated version},\n\tpages = {150--165},\n\tppt = {talks/ALT07-UCBTuned-Talk.ppt},\n\tpublisher = {Springer},\n\ttitle = {Tuning Bandit Algorithms in Stochastic Environments},\n\turl_paper = {ucb_alt.pdf},\n\tyear = {2007}}\n\n","author_short":["Audibert, J.","Munos, R.","Szepesvári, C."],"key":"audibert2007","id":"audibert2007","bibbaseid":"audibert-munos-szepesvri-tuningbanditalgorithmsinstochasticenvironments-2007","role":"author","urls":{" paper":"https://www.ualberta.ca/~szepesva/papers/ucb_alt.pdf"},"keyword":["multi-armed bandits","sequential algorithms","stochastic bandits","Bernstein's inequality","theory"],"metadata":{"authorlinks":{"szepesvári, c":"https://sites.ualberta.ca/~szepesva/pubs.html"}},"downloads":5,"html":""},"search_terms":["tuning","bandit","algorithms","stochastic","environments","audibert","munos","szepesvári"],"keywords":["multi-armed bandits","sequential algorithms","stochastic bandits","bernstein's inequality","theory"],"authorIDs":["279PY77kXFE8vWA2Z"],"dataSources":["dYMomj4Jofy8t4qmm","Ciq2jeFvPFYBCoxwJ","v2PxY4iCzrNyY9fhF","cd5AYQRw3RHjTgoQc"]}