Prioritized Sweeping: Reinforcement Learning with Less Data and Less Time. Moore, A. W. & Atkeson, C. G. 13(1):103-130.
Paper doi abstract bibtex We present a new algorithm,prioritized sweeping, for efficient prediction and control of stochastic Markov systems. Incremental learning methods such as temporal differencing and Q-learning have real-time performance. Classical methods are slower, but more accurate, because they make full use of the observations. Prioritized sweeping aims for the best of both worlds. It uses all previous experiences both to prioritize important dynamic programming sweeps and to guide the exploration of state-space. We compare prioritized sweeping with other reinforcement learning schemes for a number of different stochastic optimal control problems. It successfully solves large state-space real-time problems with which other methods have difficulty.
@article{moorePrioritizedSweepingReinforcement1993,
langid = {english},
title = {Prioritized Sweeping: {{Reinforcement}} Learning with Less Data and Less Time},
volume = {13},
issn = {1573-0565},
url = {https://doi.org/10.1007/BF00993104},
doi = {10.1007/BF00993104},
shorttitle = {Prioritized Sweeping},
abstract = {We present a new algorithm,prioritized sweeping, for efficient prediction and control of stochastic Markov systems. Incremental learning methods such as temporal differencing and Q-learning have real-time performance. Classical methods are slower, but more accurate, because they make full use of the observations. Prioritized sweeping aims for the best of both worlds. It uses all previous experiences both to prioritize important dynamic programming sweeps and to guide the exploration of state-space. We compare prioritized sweeping with other reinforcement learning schemes for a number of different stochastic optimal control problems. It successfully solves large state-space real-time problems with which other methods have difficulty.},
number = {1},
journaltitle = {Machine Learning},
shortjournal = {Mach Learn},
urldate = {2019-01-21},
date = {1993-10-01},
pages = {103-130},
keywords = {asynchronous dynamic programming,reinforcement learning,heuristic search,learning control,Memory-based learning,prioritized sweeping,temporal differencing},
author = {Moore, Andrew W. and Atkeson, Christopher G.},
file = {/home/dimitri/Nextcloud/Zotero/storage/T6UITRMF/Moore and Atkeson - 1993 - Prioritized sweeping Reinforcement learning with .pdf}
}
Downloads: 0
{"_id":"wtCqxJkiptW5MwjWx","bibbaseid":"moore-atkeson-prioritizedsweepingreinforcementlearningwithlessdataandlesstime","authorIDs":[],"author_short":["Moore, A. W.","Atkeson, C. G."],"bibdata":{"bibtype":"article","type":"article","langid":"english","title":"Prioritized Sweeping: Reinforcement Learning with Less Data and Less Time","volume":"13","issn":"1573-0565","url":"https://doi.org/10.1007/BF00993104","doi":"10.1007/BF00993104","shorttitle":"Prioritized Sweeping","abstract":"We present a new algorithm,prioritized sweeping, for efficient prediction and control of stochastic Markov systems. Incremental learning methods such as temporal differencing and Q-learning have real-time performance. Classical methods are slower, but more accurate, because they make full use of the observations. Prioritized sweeping aims for the best of both worlds. It uses all previous experiences both to prioritize important dynamic programming sweeps and to guide the exploration of state-space. We compare prioritized sweeping with other reinforcement learning schemes for a number of different stochastic optimal control problems. It successfully solves large state-space real-time problems with which other methods have difficulty.","number":"1","journaltitle":"Machine Learning","shortjournal":"Mach Learn","urldate":"2019-01-21","date":"1993-10-01","pages":"103-130","keywords":"asynchronous dynamic programming,reinforcement learning,heuristic search,learning control,Memory-based learning,prioritized sweeping,temporal differencing","author":[{"propositions":[],"lastnames":["Moore"],"firstnames":["Andrew","W."],"suffixes":[]},{"propositions":[],"lastnames":["Atkeson"],"firstnames":["Christopher","G."],"suffixes":[]}],"file":"/home/dimitri/Nextcloud/Zotero/storage/T6UITRMF/Moore and Atkeson - 1993 - Prioritized sweeping Reinforcement learning with .pdf","bibtex":"@article{moorePrioritizedSweepingReinforcement1993,\n langid = {english},\n title = {Prioritized Sweeping: {{Reinforcement}} Learning with Less Data and Less Time},\n volume = {13},\n issn = {1573-0565},\n url = {https://doi.org/10.1007/BF00993104},\n doi = {10.1007/BF00993104},\n shorttitle = {Prioritized Sweeping},\n abstract = {We present a new algorithm,prioritized sweeping, for efficient prediction and control of stochastic Markov systems. Incremental learning methods such as temporal differencing and Q-learning have real-time performance. Classical methods are slower, but more accurate, because they make full use of the observations. Prioritized sweeping aims for the best of both worlds. It uses all previous experiences both to prioritize important dynamic programming sweeps and to guide the exploration of state-space. We compare prioritized sweeping with other reinforcement learning schemes for a number of different stochastic optimal control problems. It successfully solves large state-space real-time problems with which other methods have difficulty.},\n number = {1},\n journaltitle = {Machine Learning},\n shortjournal = {Mach Learn},\n urldate = {2019-01-21},\n date = {1993-10-01},\n pages = {103-130},\n keywords = {asynchronous dynamic programming,reinforcement learning,heuristic search,learning control,Memory-based learning,prioritized sweeping,temporal differencing},\n author = {Moore, Andrew W. and Atkeson, Christopher G.},\n file = {/home/dimitri/Nextcloud/Zotero/storage/T6UITRMF/Moore and Atkeson - 1993 - Prioritized sweeping Reinforcement learning with .pdf}\n}\n\n","author_short":["Moore, A. W.","Atkeson, C. G."],"key":"moorePrioritizedSweepingReinforcement1993","id":"moorePrioritizedSweepingReinforcement1993","bibbaseid":"moore-atkeson-prioritizedsweepingreinforcementlearningwithlessdataandlesstime","role":"author","urls":{"Paper":"https://doi.org/10.1007/BF00993104"},"keyword":["asynchronous dynamic programming","reinforcement learning","heuristic search","learning control","Memory-based learning","prioritized sweeping","temporal differencing"],"downloads":0},"bibtype":"article","biburl":"https://raw.githubusercontent.com/dlozeve/newblog/master/bib/all.bib","creationDate":"2020-01-08T20:39:39.228Z","downloads":0,"keywords":["asynchronous dynamic programming","reinforcement learning","heuristic search","learning control","memory-based learning","prioritized sweeping","temporal differencing"],"search_terms":["prioritized","sweeping","reinforcement","learning","less","data","less","time","moore","atkeson"],"title":"Prioritized Sweeping: Reinforcement Learning with Less Data and Less Time","year":null,"dataSources":["3XqdvqRE7zuX4cm8m"]}