Efficient Variance-Reduced Learning Over Multi-Agent Networks. Yuan, K., Ying, B., & Sayed, A. H. In 2018 26th European Signal Processing Conference (EUSIPCO), pages 415-419, Sep., 2018. Paper doi abstract bibtex This work develops a fully decentralized variance-reduced learning algorithm for multi-agent networks where nodes store and process the data locally and are only allowed to communicate with their immediate neighbors. In the proposed algorithm, there is no need for a central or master unit while the objective is to enable the dispersed nodes to learn the exact global model despite their limited localized interactions. The resulting algorithm is shown to have low memory requirement, guaranteed linear convergence, robustness to failure of links or nodes and scalability to the network size. Moreover, the decentralized nature of the solution makes large-scale machine learning problems more tractable and also scalable since data is stored and processed locally at the nodes.
@InProceedings{8553100,
author = {K. Yuan and B. Ying and A. H. Sayed},
booktitle = {2018 26th European Signal Processing Conference (EUSIPCO)},
title = {Efficient Variance-Reduced Learning Over Multi-Agent Networks},
year = {2018},
pages = {415-419},
abstract = {This work develops a fully decentralized variance-reduced learning algorithm for multi-agent networks where nodes store and process the data locally and are only allowed to communicate with their immediate neighbors. In the proposed algorithm, there is no need for a central or master unit while the objective is to enable the dispersed nodes to learn the exact global model despite their limited localized interactions. The resulting algorithm is shown to have low memory requirement, guaranteed linear convergence, robustness to failure of links or nodes and scalability to the network size. Moreover, the decentralized nature of the solution makes large-scale machine learning problems more tractable and also scalable since data is stored and processed locally at the nodes.},
keywords = {learning (artificial intelligence);minimisation;multi-agent systems;multiagent networks;fully decentralized variance-reduced learning algorithm;nodes store;immediate neighbors;central master unit;dispersed nodes;exact global model;localized interactions;low memory requirement;network size;decentralized nature;linear convergence;large-scale machine learning problems;Signal processing algorithms;Convergence;Memory management;Europe;Signal processing;Indexes;Optimization;diffusion strategy;variance-reduction;stochastic gradient descent;memory efficiency;SVRG;SAGA;AVRG},
doi = {10.23919/EUSIPCO.2018.8553100},
issn = {2076-1465},
month = {Sep.},
url = {https://www.eurasip.org/proceedings/eusipco/eusipco2018/papers/1570435623.pdf},
}
Downloads: 0
{"_id":"4BEN8HE95RRTcK2pS","bibbaseid":"yuan-ying-sayed-efficientvariancereducedlearningovermultiagentnetworks-2018","authorIDs":[],"author_short":["Yuan, K.","Ying, B.","Sayed, A. H."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","author":[{"firstnames":["K."],"propositions":[],"lastnames":["Yuan"],"suffixes":[]},{"firstnames":["B."],"propositions":[],"lastnames":["Ying"],"suffixes":[]},{"firstnames":["A.","H."],"propositions":[],"lastnames":["Sayed"],"suffixes":[]}],"booktitle":"2018 26th European Signal Processing Conference (EUSIPCO)","title":"Efficient Variance-Reduced Learning Over Multi-Agent Networks","year":"2018","pages":"415-419","abstract":"This work develops a fully decentralized variance-reduced learning algorithm for multi-agent networks where nodes store and process the data locally and are only allowed to communicate with their immediate neighbors. In the proposed algorithm, there is no need for a central or master unit while the objective is to enable the dispersed nodes to learn the exact global model despite their limited localized interactions. The resulting algorithm is shown to have low memory requirement, guaranteed linear convergence, robustness to failure of links or nodes and scalability to the network size. Moreover, the decentralized nature of the solution makes large-scale machine learning problems more tractable and also scalable since data is stored and processed locally at the nodes.","keywords":"learning (artificial intelligence);minimisation;multi-agent systems;multiagent networks;fully decentralized variance-reduced learning algorithm;nodes store;immediate neighbors;central master unit;dispersed nodes;exact global model;localized interactions;low memory requirement;network size;decentralized nature;linear convergence;large-scale machine learning problems;Signal processing algorithms;Convergence;Memory management;Europe;Signal processing;Indexes;Optimization;diffusion strategy;variance-reduction;stochastic gradient descent;memory efficiency;SVRG;SAGA;AVRG","doi":"10.23919/EUSIPCO.2018.8553100","issn":"2076-1465","month":"Sep.","url":"https://www.eurasip.org/proceedings/eusipco/eusipco2018/papers/1570435623.pdf","bibtex":"@InProceedings{8553100,\n author = {K. Yuan and B. Ying and A. H. Sayed},\n booktitle = {2018 26th European Signal Processing Conference (EUSIPCO)},\n title = {Efficient Variance-Reduced Learning Over Multi-Agent Networks},\n year = {2018},\n pages = {415-419},\n abstract = {This work develops a fully decentralized variance-reduced learning algorithm for multi-agent networks where nodes store and process the data locally and are only allowed to communicate with their immediate neighbors. In the proposed algorithm, there is no need for a central or master unit while the objective is to enable the dispersed nodes to learn the exact global model despite their limited localized interactions. The resulting algorithm is shown to have low memory requirement, guaranteed linear convergence, robustness to failure of links or nodes and scalability to the network size. Moreover, the decentralized nature of the solution makes large-scale machine learning problems more tractable and also scalable since data is stored and processed locally at the nodes.},\n keywords = {learning (artificial intelligence);minimisation;multi-agent systems;multiagent networks;fully decentralized variance-reduced learning algorithm;nodes store;immediate neighbors;central master unit;dispersed nodes;exact global model;localized interactions;low memory requirement;network size;decentralized nature;linear convergence;large-scale machine learning problems;Signal processing algorithms;Convergence;Memory management;Europe;Signal processing;Indexes;Optimization;diffusion strategy;variance-reduction;stochastic gradient descent;memory efficiency;SVRG;SAGA;AVRG},\n doi = {10.23919/EUSIPCO.2018.8553100},\n issn = {2076-1465},\n month = {Sep.},\n url = {https://www.eurasip.org/proceedings/eusipco/eusipco2018/papers/1570435623.pdf},\n}\n\n","author_short":["Yuan, K.","Ying, B.","Sayed, A. H."],"key":"8553100","id":"8553100","bibbaseid":"yuan-ying-sayed-efficientvariancereducedlearningovermultiagentnetworks-2018","role":"author","urls":{"Paper":"https://www.eurasip.org/proceedings/eusipco/eusipco2018/papers/1570435623.pdf"},"keyword":["learning (artificial intelligence);minimisation;multi-agent systems;multiagent networks;fully decentralized variance-reduced learning algorithm;nodes store;immediate neighbors;central master unit;dispersed nodes;exact global model;localized interactions;low memory requirement;network size;decentralized nature;linear convergence;large-scale machine learning problems;Signal processing algorithms;Convergence;Memory management;Europe;Signal processing;Indexes;Optimization;diffusion strategy;variance-reduction;stochastic gradient descent;memory efficiency;SVRG;SAGA;AVRG"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"inproceedings","biburl":"https://raw.githubusercontent.com/Roznn/EUSIPCO/main/eusipco2018url.bib","creationDate":"2021-02-13T15:38:40.122Z","downloads":0,"keywords":["learning (artificial intelligence);minimisation;multi-agent systems;multiagent networks;fully decentralized variance-reduced learning algorithm;nodes store;immediate neighbors;central master unit;dispersed nodes;exact global model;localized interactions;low memory requirement;network size;decentralized nature;linear convergence;large-scale machine learning problems;signal processing algorithms;convergence;memory management;europe;signal processing;indexes;optimization;diffusion strategy;variance-reduction;stochastic gradient descent;memory efficiency;svrg;saga;avrg"],"search_terms":["efficient","variance","reduced","learning","over","multi","agent","networks","yuan","ying","sayed"],"title":"Efficient Variance-Reduced Learning Over Multi-Agent Networks","year":2018,"dataSources":["yiZioZximP7hphDpY","iuBeKSmaES2fHcEE9"]}