Learning Action Representations for Reinforcement Learning. Chandak, Y., Theocharous, G., Kostas, J., Jordan, S., & Thomas, P. S. Paper abstract bibtex Most model-free reinforcement learning methods leverage state representations (embeddings) for generalization, but either ignore structure in the space of actions or assume the structure is provided a priori. We show how a policy can be decomposed into a component that acts in a low-dimensional space of action representations and a component that transforms these representations into actual actions. These representations improve generalization over large, finite action sets by allowing the agent to infer the outcomes of actions similar to actions already taken. We provide an algorithm to both learn and use action representations and provide conditions for its convergence. The efficacy of the proposed method is demonstrated on large-scale real-world problems.

@article{chandakLearningActionRepresentations2019a,
archivePrefix = {arXiv},
eprinttype = {arxiv},
eprint = {1902.00183},
primaryClass = {cs, stat},
title = {Learning {{Action Representations}} for {{Reinforcement Learning}}},
url = {http://arxiv.org/abs/1902.00183},
abstract = {Most model-free reinforcement learning methods leverage state representations (embeddings) for generalization, but either ignore structure in the space of actions or assume the structure is provided a priori. We show how a policy can be decomposed into a component that acts in a low-dimensional space of action representations and a component that transforms these representations into actual actions. These representations improve generalization over large, finite action sets by allowing the agent to infer the outcomes of actions similar to actions already taken. We provide an algorithm to both learn and use action representations and provide conditions for its convergence. The efficacy of the proposed method is demonstrated on large-scale real-world problems.},
urldate = {2019-06-18},
date = {2019-01-31},
keywords = {Statistics - Machine Learning,Computer Science - Machine Learning},
author = {Chandak, Yash and Theocharous, Georgios and Kostas, James and Jordan, Scott and Thomas, Philip S.},
file = {/home/dimitri/Nextcloud/Zotero/storage/IALS2P6C/Chandak et al. - 2019 - Learning Action Representations for Reinforcement .pdf;/home/dimitri/Nextcloud/Zotero/storage/SC7ZUA3I/1902.html}
}

Downloads: 0

{"_id":"Ai4kT2pJZiF6Mr8mt","bibbaseid":"chandak-theocharous-kostas-jordan-thomas-learningactionrepresentationsforreinforcementlearning","authorIDs":[],"author_short":["Chandak, Y.","Theocharous, G.","Kostas, J.","Jordan, S.","Thomas, P. S."],"bibdata":{"bibtype":"article","type":"article","archiveprefix":"arXiv","eprinttype":"arxiv","eprint":"1902.00183","primaryclass":"cs, stat","title":"Learning Action Representations for Reinforcement Learning","url":"http://arxiv.org/abs/1902.00183","abstract":"Most model-free reinforcement learning methods leverage state representations (embeddings) for generalization, but either ignore structure in the space of actions or assume the structure is provided a priori. We show how a policy can be decomposed into a component that acts in a low-dimensional space of action representations and a component that transforms these representations into actual actions. These representations improve generalization over large, finite action sets by allowing the agent to infer the outcomes of actions similar to actions already taken. We provide an algorithm to both learn and use action representations and provide conditions for its convergence. The efficacy of the proposed method is demonstrated on large-scale real-world problems.","urldate":"2019-06-18","date":"2019-01-31","keywords":"Statistics - Machine Learning,Computer Science - Machine Learning","author":[{"propositions":[],"lastnames":["Chandak"],"firstnames":["Yash"],"suffixes":[]},{"propositions":[],"lastnames":["Theocharous"],"firstnames":["Georgios"],"suffixes":[]},{"propositions":[],"lastnames":["Kostas"],"firstnames":["James"],"suffixes":[]},{"propositions":[],"lastnames":["Jordan"],"firstnames":["Scott"],"suffixes":[]},{"propositions":[],"lastnames":["Thomas"],"firstnames":["Philip","S."],"suffixes":[]}],"file":"/home/dimitri/Nextcloud/Zotero/storage/IALS2P6C/Chandak et al. - 2019 - Learning Action Representations for Reinforcement .pdf;/home/dimitri/Nextcloud/Zotero/storage/SC7ZUA3I/1902.html","bibtex":"@article{chandakLearningActionRepresentations2019a,\n archivePrefix = {arXiv},\n eprinttype = {arxiv},\n eprint = {1902.00183},\n primaryClass = {cs, stat},\n title = {Learning {{Action Representations}} for {{Reinforcement Learning}}},\n url = {http://arxiv.org/abs/1902.00183},\n abstract = {Most model-free reinforcement learning methods leverage state representations (embeddings) for generalization, but either ignore structure in the space of actions or assume the structure is provided a priori. We show how a policy can be decomposed into a component that acts in a low-dimensional space of action representations and a component that transforms these representations into actual actions. These representations improve generalization over large, finite action sets by allowing the agent to infer the outcomes of actions similar to actions already taken. We provide an algorithm to both learn and use action representations and provide conditions for its convergence. The efficacy of the proposed method is demonstrated on large-scale real-world problems.},\n urldate = {2019-06-18},\n date = {2019-01-31},\n keywords = {Statistics - Machine Learning,Computer Science - Machine Learning},\n author = {Chandak, Yash and Theocharous, Georgios and Kostas, James and Jordan, Scott and Thomas, Philip S.},\n file = {/home/dimitri/Nextcloud/Zotero/storage/IALS2P6C/Chandak et al. - 2019 - Learning Action Representations for Reinforcement .pdf;/home/dimitri/Nextcloud/Zotero/storage/SC7ZUA3I/1902.html}\n}\n\n","author_short":["Chandak, Y.","Theocharous, G.","Kostas, J.","Jordan, S.","Thomas, P. S."],"key":"chandakLearningActionRepresentations2019a","id":"chandakLearningActionRepresentations2019a","bibbaseid":"chandak-theocharous-kostas-jordan-thomas-learningactionrepresentationsforreinforcementlearning","role":"author","urls":{"Paper":"http://arxiv.org/abs/1902.00183"},"keyword":["Statistics - Machine Learning","Computer Science - Machine Learning"],"downloads":0},"bibtype":"article","biburl":"https://raw.githubusercontent.com/dlozeve/newblog/master/bib/all.bib","creationDate":"2020-01-08T20:39:39.376Z","downloads":0,"keywords":["statistics - machine learning","computer science - machine learning"],"search_terms":["learning","action","representations","reinforcement","learning","chandak","theocharous","kostas","jordan","thomas"],"title":"Learning Action Representations for Reinforcement Learning","year":null,"dataSources":["3XqdvqRE7zuX4cm8m"]}