{"_id":"XwDPPtwsZFobrYY8e","bibbaseid":"jung-eldar-grtz-performancelimitsofdictionarylearningforsparsecoding-2014","authorIDs":[],"author_short":["Jung, A.","Eldar, Y. C.","Görtz, N."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","author":[{"firstnames":["A."],"propositions":[],"lastnames":["Jung"],"suffixes":[]},{"firstnames":["Y.","C."],"propositions":[],"lastnames":["Eldar"],"suffixes":[]},{"firstnames":["N."],"propositions":[],"lastnames":["Görtz"],"suffixes":[]}],"booktitle":"2014 22nd European Signal Processing Conference (EUSIPCO)","title":"Performance limits of dictionary learning for sparse coding","year":"2014","pages":"765-769","abstract":"We consider the problem of dictionary learning under the assumption that the observed signals can be represented as sparse linear combinations of the columns of a single large dictionary matrix. In particular, we analyze the minimax risk of the dictionary learning problem which governs the mean squared error (MSE) performance of any learning scheme, regardless of its computational complexity. By following an established information-theoretic method based on Fano's inequality, we derive a lower bound on the minimax risk for a given dictionary learning problem. This lower bound yields a characterization of the sample-complexity, i.e., a lower bound on the required number of observations such that consistent dictionary learning schemes exist. Our bounds may be compared with the performance of a given learning scheme, allowing to characterize how far the method is from optimal performance.","keywords":"computational complexity;encoding;mean square error methods;minimax techniques;sparse coding;sparse linear combinations;single-large-dictionary matrix;minimax risk;dictionary learning problem;mean squared error performance;MSE performance;computational complexity;information-theoretic method;Fano inequality;sample-complexity characterization;Dictionaries;Vectors;Indexes;Estimation;Mutual information;Signal to noise ratio;Compressed sensing;Dictionary Identification;Dictionary Learning;Big Data;Minimax Risk;Fano Inequality","issn":"2076-1465","month":"Sep.","url":"https://www.eurasip.org/proceedings/eusipco/eusipco2014/html/papers/1569917473.pdf","bibtex":"@InProceedings{6952232,\n author = {A. Jung and Y. C. Eldar and N. Görtz},\n booktitle = {2014 22nd European Signal Processing Conference (EUSIPCO)},\n title = {Performance limits of dictionary learning for sparse coding},\n year = {2014},\n pages = {765-769},\n abstract = {We consider the problem of dictionary learning under the assumption that the observed signals can be represented as sparse linear combinations of the columns of a single large dictionary matrix. In particular, we analyze the minimax risk of the dictionary learning problem which governs the mean squared error (MSE) performance of any learning scheme, regardless of its computational complexity. By following an established information-theoretic method based on Fano's inequality, we derive a lower bound on the minimax risk for a given dictionary learning problem. This lower bound yields a characterization of the sample-complexity, i.e., a lower bound on the required number of observations such that consistent dictionary learning schemes exist. Our bounds may be compared with the performance of a given learning scheme, allowing to characterize how far the method is from optimal performance.},\n keywords = {computational complexity;encoding;mean square error methods;minimax techniques;sparse coding;sparse linear combinations;single-large-dictionary matrix;minimax risk;dictionary learning problem;mean squared error performance;MSE performance;computational complexity;information-theoretic method;Fano inequality;sample-complexity characterization;Dictionaries;Vectors;Indexes;Estimation;Mutual information;Signal to noise ratio;Compressed sensing;Dictionary Identification;Dictionary Learning;Big Data;Minimax Risk;Fano Inequality},\n issn = {2076-1465},\n month = {Sep.},\n url = {https://www.eurasip.org/proceedings/eusipco/eusipco2014/html/papers/1569917473.pdf},\n}\n\n","author_short":["Jung, A.","Eldar, Y. C.","Görtz, N."],"key":"6952232","id":"6952232","bibbaseid":"jung-eldar-grtz-performancelimitsofdictionarylearningforsparsecoding-2014","role":"author","urls":{"Paper":"https://www.eurasip.org/proceedings/eusipco/eusipco2014/html/papers/1569917473.pdf"},"keyword":["computational complexity;encoding;mean square error methods;minimax techniques;sparse coding;sparse linear combinations;single-large-dictionary matrix;minimax risk;dictionary learning problem;mean squared error performance;MSE performance;computational complexity;information-theoretic method;Fano inequality;sample-complexity characterization;Dictionaries;Vectors;Indexes;Estimation;Mutual information;Signal to noise ratio;Compressed sensing;Dictionary Identification;Dictionary Learning;Big Data;Minimax Risk;Fano Inequality"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"inproceedings","biburl":"https://raw.githubusercontent.com/Roznn/EUSIPCO/main/eusipco2014url.bib","creationDate":"2021-02-13T17:43:41.633Z","downloads":0,"keywords":["computational complexity;encoding;mean square error methods;minimax techniques;sparse coding;sparse linear combinations;single-large-dictionary matrix;minimax risk;dictionary learning problem;mean squared error performance;mse performance;computational complexity;information-theoretic method;fano inequality;sample-complexity characterization;dictionaries;vectors;indexes;estimation;mutual information;signal to noise ratio;compressed sensing;dictionary identification;dictionary learning;big data;minimax risk;fano inequality"],"search_terms":["performance","limits","dictionary","learning","sparse","coding","jung","eldar","görtz"],"title":"Performance limits of dictionary learning for sparse coding","year":2014,"dataSources":["A2ezyFL6GG6na7bbs","oZFG3eQZPXnykPgnE"]}