Active learning in Multi-armed bandits. Antos, A., Grover, V., & Szepesvári, C. In *ALT*, of *Lecture Notes in Computer Science 5254*, pages 287–302, 2008. Springer-Verlag. See i̧tea.antos2010 for an extended versionPaper abstract bibtex In this paper we consider the problem of actively learning the mean values of distributions associated with a finite number of options (arms). The algorithms can select which option to generate the next sample from in order to produce estimates with equally good precision for all the distributions. When an algorithm uses sample means to estimate the unknown values then the optimal solution, assuming full knowledge of the distributions, is to sample each option proportional to its variance. In this paper we propose an incremental algorithm that asymptotically achieves the same loss as an optimal rule. We prove that the excess loss suffered by this algorithm, apart from logarithmic factors, scales as $1/n^{(3/2)}$, which we conjecture to be the optimal rate. The performance of the algorithm is illustrated in a simple problem.

@inproceedings{antos2008,
abstract = {In this paper we consider the problem of actively learning the mean values of distributions associated with a finite number of options (arms). The algorithms can select which option to generate the next sample from in order to produce estimates with equally good precision for all the distributions. When an algorithm uses sample means to estimate the unknown values then the optimal solution, assuming full knowledge of the distributions, is to sample each option proportional to its variance. In this paper we propose an incremental algorithm that asymptotically achieves the same loss as an optimal rule. We prove that the excess loss suffered by this algorithm, apart from logarithmic factors, scales as $1/n^{(3/2)}$, which we conjecture to be the optimal rate. The performance of the algorithm is illustrated in a simple problem.},
author = {Antos, A. and Grover, V. and Szepesv{\'a}ri, Cs.},
booktitle = {ALT},
keywords = {active learning, regression, sequential algorithms, theory},
note = {See \cite{a.antos2010} for an extended version},
pages = {287--302},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science 5254},
title = {Active learning in Multi-armed bandits},
url_paper = {Allocation.pdf},
year = {2008}}