Mean Field Multi-Agent Reinforcement Learning. Yang, Y., Luo, R., Li, M., Zhou, M., Zhang, W., & Wang, J. abstract bibtex Existing multi-agent reinforcement learning methods are limited typically to a small number of agents. When the agent number increases largely, the learning becomes intractable due to the curse of the dimensionality and the exponential growth of agent interactions. In this paper, we present Mean Field Reinforcement Learning where the interactions within the population of agents are approximated by those between a single agent and the average effect from the overall population or neighboring agents; the interplay between the two entities is mutually reinforced: the learning of the individual agent’s optimal policy depends on the dynamics of the population, while the dynamics of the population change according to the collective patterns of the individual policies. We develop practical mean field Q-learning and mean field Actor-Critic algorithms and analyze the convergence of the solution to Nash equilibrium. Experiments on Gaussian squeeze, Ising model, and battle games justify the learning effectiveness of our mean field approaches. In addition, we report the first result to solve the Ising model via model-free reinforcement learning methods.
@article{yang_mean_nodate,
title = {Mean {Field} {Multi}-{Agent} {Reinforcement} {Learning}},
abstract = {Existing multi-agent reinforcement learning methods are limited typically to a small number of agents. When the agent number increases largely, the learning becomes intractable due to the curse of the dimensionality and the exponential growth of agent interactions. In this paper, we present Mean Field Reinforcement Learning where the interactions within the population of agents are approximated by those between a single agent and the average effect from the overall population or neighboring agents; the interplay between the two entities is mutually reinforced: the learning of the individual agent’s optimal policy depends on the dynamics of the population, while the dynamics of the population change according to the collective patterns of the individual policies. We develop practical mean field Q-learning and mean field Actor-Critic algorithms and analyze the convergence of the solution to Nash equilibrium. Experiments on Gaussian squeeze, Ising model, and battle games justify the learning effectiveness of our mean field approaches. In addition, we report the first result to solve the Ising model via model-free reinforcement learning methods.},
language = {en},
author = {Yang, Yaodong and Luo, Rui and Li, Minne and Zhou, Ming and Zhang, Weinan and Wang, Jun},
pages = {10}
}
Downloads: 0
{"_id":"yDDJeFJPKHAR2ktGa","bibbaseid":"yang-luo-li-zhou-zhang-wang-meanfieldmultiagentreinforcementlearning","authorIDs":[],"author_short":["Yang, Y.","Luo, R.","Li, M.","Zhou, M.","Zhang, W.","Wang, J."],"bibdata":{"bibtype":"article","type":"article","title":"Mean Field Multi-Agent Reinforcement Learning","abstract":"Existing multi-agent reinforcement learning methods are limited typically to a small number of agents. When the agent number increases largely, the learning becomes intractable due to the curse of the dimensionality and the exponential growth of agent interactions. In this paper, we present Mean Field Reinforcement Learning where the interactions within the population of agents are approximated by those between a single agent and the average effect from the overall population or neighboring agents; the interplay between the two entities is mutually reinforced: the learning of the individual agent’s optimal policy depends on the dynamics of the population, while the dynamics of the population change according to the collective patterns of the individual policies. We develop practical mean field Q-learning and mean field Actor-Critic algorithms and analyze the convergence of the solution to Nash equilibrium. Experiments on Gaussian squeeze, Ising model, and battle games justify the learning effectiveness of our mean field approaches. In addition, we report the first result to solve the Ising model via model-free reinforcement learning methods.","language":"en","author":[{"propositions":[],"lastnames":["Yang"],"firstnames":["Yaodong"],"suffixes":[]},{"propositions":[],"lastnames":["Luo"],"firstnames":["Rui"],"suffixes":[]},{"propositions":[],"lastnames":["Li"],"firstnames":["Minne"],"suffixes":[]},{"propositions":[],"lastnames":["Zhou"],"firstnames":["Ming"],"suffixes":[]},{"propositions":[],"lastnames":["Zhang"],"firstnames":["Weinan"],"suffixes":[]},{"propositions":[],"lastnames":["Wang"],"firstnames":["Jun"],"suffixes":[]}],"pages":"10","bibtex":"@article{yang_mean_nodate,\n\ttitle = {Mean {Field} {Multi}-{Agent} {Reinforcement} {Learning}},\n\tabstract = {Existing multi-agent reinforcement learning methods are limited typically to a small number of agents. When the agent number increases largely, the learning becomes intractable due to the curse of the dimensionality and the exponential growth of agent interactions. In this paper, we present Mean Field Reinforcement Learning where the interactions within the population of agents are approximated by those between a single agent and the average effect from the overall population or neighboring agents; the interplay between the two entities is mutually reinforced: the learning of the individual agent’s optimal policy depends on the dynamics of the population, while the dynamics of the population change according to the collective patterns of the individual policies. We develop practical mean field Q-learning and mean field Actor-Critic algorithms and analyze the convergence of the solution to Nash equilibrium. Experiments on Gaussian squeeze, Ising model, and battle games justify the learning effectiveness of our mean field approaches. In addition, we report the first result to solve the Ising model via model-free reinforcement learning methods.},\n\tlanguage = {en},\n\tauthor = {Yang, Yaodong and Luo, Rui and Li, Minne and Zhou, Ming and Zhang, Weinan and Wang, Jun},\n\tpages = {10}\n}\n\n","author_short":["Yang, Y.","Luo, R.","Li, M.","Zhou, M.","Zhang, W.","Wang, J."],"key":"yang_mean_nodate","id":"yang_mean_nodate","bibbaseid":"yang-luo-li-zhou-zhang-wang-meanfieldmultiagentreinforcementlearning","role":"author","urls":{},"downloads":0,"html":""},"bibtype":"article","biburl":"https://bibbase.org/zotero/asneha213","creationDate":"2019-07-08T00:48:54.942Z","downloads":0,"keywords":[],"search_terms":["mean","field","multi","agent","reinforcement","learning","yang","luo","li","zhou","zhang","wang"],"title":"Mean Field Multi-Agent Reinforcement Learning","year":null,"dataSources":["fjacg9txEnNSDwee6"]}