Sparse Algorithms Are Not Stable: A No-Free-Lunch Theorem. Xu, H., Caramanis, C., & Mannor, S. Pattern Analysis and Machine Intelligence, IEEE Transactions on, 34(1):187–193, January, 2012. doi abstract bibtex We consider two desired properties of learning algorithms: *sparsity* and *algorithmic stability*. Both properties are believed to lead to good generalization ability. We show that these two properties are fundamentally at odds with each other: a sparse algorithm cannot be stable and vice versa. Thus, one has to trade off sparsity and stability in designing a learning algorithm. In particular, our general result implies that \$_1\$-regularized regression (Lasso) cannot be stable, while \$_2\$-regularized regression is known to have strong stability properties and is therefore not sparse.
@article{xuSparseAlgorithmsAre2012,
title = {Sparse {{Algorithms Are Not Stable}}: {{A No}}-{{Free}}-{{Lunch Theorem}}},
author = {Xu, Huan and Caramanis, C. and Mannor, S.},
year = {2012},
month = jan,
volume = {34},
pages = {187--193},
issn = {0162-8828},
doi = {10.1109/tpami.2011.177},
abstract = {We consider two desired properties of learning algorithms: *sparsity* and *algorithmic stability*. Both properties are believed to lead to good generalization ability. We show that these two properties are fundamentally at odds with each other: a sparse algorithm cannot be stable and vice versa. Thus, one has to trade off sparsity and stability in designing a learning algorithm. In particular, our general result implies that \$\_1\$-regularized regression (Lasso) cannot be stable, while \$\_2\$-regularized regression is known to have strong stability properties and is therefore not sparse.},
journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on},
keywords = {*imported-from-citeulike-INRMM,~INRMM-MiD:c-9685435,machine-learning,mathematical-reasoning,modelling,no-free-lunch-theorem,stability-vs-sparsity},
lccn = {INRMM-MiD:c-9685435},
number = {1}
}
Downloads: 0
{"_id":"P8WxnScGDtnuRtrrZ","bibbaseid":"xu-caramanis-mannor-sparsealgorithmsarenotstableanofreelunchtheorem-2012","downloads":0,"creationDate":"2016-06-22T10:19:52.893Z","title":"Sparse Algorithms Are Not Stable: A No-Free-Lunch Theorem","author_short":["Xu, H.","Caramanis, C.","Mannor, S."],"year":2012,"bibtype":"article","biburl":"https://sharefast.me/php/download.php?id=zOUKvA&token=29","bibdata":{"bibtype":"article","type":"article","title":"Sparse Algorithms Are Not Stable: A No-Free-Lunch Theorem","author":[{"propositions":[],"lastnames":["Xu"],"firstnames":["Huan"],"suffixes":[]},{"propositions":[],"lastnames":["Caramanis"],"firstnames":["C."],"suffixes":[]},{"propositions":[],"lastnames":["Mannor"],"firstnames":["S."],"suffixes":[]}],"year":"2012","month":"January","volume":"34","pages":"187–193","issn":"0162-8828","doi":"10.1109/tpami.2011.177","abstract":"We consider two desired properties of learning algorithms: *sparsity* and *algorithmic stability*. Both properties are believed to lead to good generalization ability. We show that these two properties are fundamentally at odds with each other: a sparse algorithm cannot be stable and vice versa. Thus, one has to trade off sparsity and stability in designing a learning algorithm. In particular, our general result implies that \\$_1\\$-regularized regression (Lasso) cannot be stable, while \\$_2\\$-regularized regression is known to have strong stability properties and is therefore not sparse.","journal":"Pattern Analysis and Machine Intelligence, IEEE Transactions on","keywords":"*imported-from-citeulike-INRMM,~INRMM-MiD:c-9685435,machine-learning,mathematical-reasoning,modelling,no-free-lunch-theorem,stability-vs-sparsity","lccn":"INRMM-MiD:c-9685435","number":"1","bibtex":"@article{xuSparseAlgorithmsAre2012,\n title = {Sparse {{Algorithms Are Not Stable}}: {{A No}}-{{Free}}-{{Lunch Theorem}}},\n author = {Xu, Huan and Caramanis, C. and Mannor, S.},\n year = {2012},\n month = jan,\n volume = {34},\n pages = {187--193},\n issn = {0162-8828},\n doi = {10.1109/tpami.2011.177},\n abstract = {We consider two desired properties of learning algorithms: *sparsity* and *algorithmic stability*. Both properties are believed to lead to good generalization ability. We show that these two properties are fundamentally at odds with each other: a sparse algorithm cannot be stable and vice versa. Thus, one has to trade off sparsity and stability in designing a learning algorithm. In particular, our general result implies that \\$\\_1\\$-regularized regression (Lasso) cannot be stable, while \\$\\_2\\$-regularized regression is known to have strong stability properties and is therefore not sparse.},\n journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on},\n keywords = {*imported-from-citeulike-INRMM,~INRMM-MiD:c-9685435,machine-learning,mathematical-reasoning,modelling,no-free-lunch-theorem,stability-vs-sparsity},\n lccn = {INRMM-MiD:c-9685435},\n number = {1}\n}\n\n","author_short":["Xu, H.","Caramanis, C.","Mannor, S."],"key":"xuSparseAlgorithmsAre2012","id":"xuSparseAlgorithmsAre2012","bibbaseid":"xu-caramanis-mannor-sparsealgorithmsarenotstableanofreelunchtheorem-2012","role":"author","urls":{},"keyword":["*imported-from-citeulike-INRMM","~INRMM-MiD:c-9685435","machine-learning","mathematical-reasoning","modelling","no-free-lunch-theorem","stability-vs-sparsity"],"downloads":0},"search_terms":["sparse","algorithms","stable","free","lunch","theorem","xu","caramanis","mannor"],"keywords":["machine-learning","mathematical-reasoning","modelling","no-free-lunch-theorem","stability-vs-sparsity","*imported-from-citeulike-inrmm","~inrmm-mid:c-9685435"],"authorIDs":[],"dataSources":["5S2zj2hKW8TWTkuMq"]}