Learned Robust PCA: A Scalable Deep Unfolding Approach for High-Dimensional Outlier Detection. Cai, H., Liu, J., & Yin, W. October, 2021. 15 citations (Semantic Scholar/arXiv) [2023-04-20] arXiv:2110.05649 [cs, math]Paper abstract bibtex Robust principal component analysis (RPCA) is a critical tool in modern machine learning, which detects outliers in the task of low-rank matrix reconstruction. In this paper, we propose a scalable and learnable non-convex approach for highdimensional RPCA problems, which we call Learned Robust PCA (LRPCA). LRPCA is highly efficient, and its free parameters can be effectively learned to optimize via deep unfolding. Moreover, we extend deep unfolding from finite iterations to infinite iterations via a novel feedforward-recurrent-mixed neural network model. We establish the recovery guarantee of LRPCA under mild assumptions for RPCA. Numerical experiments show that LRPCA outperforms the state-of-the-art RPCA algorithms, such as ScaledGD and AltProj, on both synthetic datasets and real-world applications.
@misc{cai_learned_2021,
title = {Learned {Robust} {PCA}: {A} {Scalable} {Deep} {Unfolding} {Approach} for {High}-{Dimensional} {Outlier} {Detection}},
shorttitle = {Learned {Robust} {PCA}},
url = {http://arxiv.org/abs/2110.05649},
abstract = {Robust principal component analysis (RPCA) is a critical tool in modern machine learning, which detects outliers in the task of low-rank matrix reconstruction. In this paper, we propose a scalable and learnable non-convex approach for highdimensional RPCA problems, which we call Learned Robust PCA (LRPCA). LRPCA is highly efficient, and its free parameters can be effectively learned to optimize via deep unfolding. Moreover, we extend deep unfolding from finite iterations to infinite iterations via a novel feedforward-recurrent-mixed neural network model. We establish the recovery guarantee of LRPCA under mild assumptions for RPCA. Numerical experiments show that LRPCA outperforms the state-of-the-art RPCA algorithms, such as ScaledGD and AltProj, on both synthetic datasets and real-world applications.},
language = {en},
urldate = {2023-04-20},
publisher = {arXiv},
author = {Cai, HanQin and Liu, Jialin and Yin, Wotao},
month = oct,
year = {2021},
note = {15 citations (Semantic Scholar/arXiv) [2023-04-20]
arXiv:2110.05649 [cs, math]},
keywords = {/unread, Computer Science - Computer Vision and Pattern Recognition, Computer Science - Information Theory, Computer Science - Machine Learning, Mathematics - Numerical Analysis},
}
Downloads: 0
{"_id":"xKGnkqJS5ovQkeiFa","bibbaseid":"cai-liu-yin-learnedrobustpcaascalabledeepunfoldingapproachforhighdimensionaloutlierdetection-2021","author_short":["Cai, H.","Liu, J.","Yin, W."],"bibdata":{"bibtype":"misc","type":"misc","title":"Learned Robust PCA: A Scalable Deep Unfolding Approach for High-Dimensional Outlier Detection","shorttitle":"Learned Robust PCA","url":"http://arxiv.org/abs/2110.05649","abstract":"Robust principal component analysis (RPCA) is a critical tool in modern machine learning, which detects outliers in the task of low-rank matrix reconstruction. In this paper, we propose a scalable and learnable non-convex approach for highdimensional RPCA problems, which we call Learned Robust PCA (LRPCA). LRPCA is highly efficient, and its free parameters can be effectively learned to optimize via deep unfolding. Moreover, we extend deep unfolding from finite iterations to infinite iterations via a novel feedforward-recurrent-mixed neural network model. We establish the recovery guarantee of LRPCA under mild assumptions for RPCA. Numerical experiments show that LRPCA outperforms the state-of-the-art RPCA algorithms, such as ScaledGD and AltProj, on both synthetic datasets and real-world applications.","language":"en","urldate":"2023-04-20","publisher":"arXiv","author":[{"propositions":[],"lastnames":["Cai"],"firstnames":["HanQin"],"suffixes":[]},{"propositions":[],"lastnames":["Liu"],"firstnames":["Jialin"],"suffixes":[]},{"propositions":[],"lastnames":["Yin"],"firstnames":["Wotao"],"suffixes":[]}],"month":"October","year":"2021","note":"15 citations (Semantic Scholar/arXiv) [2023-04-20] arXiv:2110.05649 [cs, math]","keywords":"/unread, Computer Science - Computer Vision and Pattern Recognition, Computer Science - Information Theory, Computer Science - Machine Learning, Mathematics - Numerical Analysis","bibtex":"@misc{cai_learned_2021,\n\ttitle = {Learned {Robust} {PCA}: {A} {Scalable} {Deep} {Unfolding} {Approach} for {High}-{Dimensional} {Outlier} {Detection}},\n\tshorttitle = {Learned {Robust} {PCA}},\n\turl = {http://arxiv.org/abs/2110.05649},\n\tabstract = {Robust principal component analysis (RPCA) is a critical tool in modern machine learning, which detects outliers in the task of low-rank matrix reconstruction. In this paper, we propose a scalable and learnable non-convex approach for highdimensional RPCA problems, which we call Learned Robust PCA (LRPCA). LRPCA is highly efficient, and its free parameters can be effectively learned to optimize via deep unfolding. Moreover, we extend deep unfolding from finite iterations to infinite iterations via a novel feedforward-recurrent-mixed neural network model. We establish the recovery guarantee of LRPCA under mild assumptions for RPCA. Numerical experiments show that LRPCA outperforms the state-of-the-art RPCA algorithms, such as ScaledGD and AltProj, on both synthetic datasets and real-world applications.},\n\tlanguage = {en},\n\turldate = {2023-04-20},\n\tpublisher = {arXiv},\n\tauthor = {Cai, HanQin and Liu, Jialin and Yin, Wotao},\n\tmonth = oct,\n\tyear = {2021},\n\tnote = {15 citations (Semantic Scholar/arXiv) [2023-04-20]\narXiv:2110.05649 [cs, math]},\n\tkeywords = {/unread, Computer Science - Computer Vision and Pattern Recognition, Computer Science - Information Theory, Computer Science - Machine Learning, Mathematics - Numerical Analysis},\n}\n\n","author_short":["Cai, H.","Liu, J.","Yin, W."],"key":"cai_learned_2021","id":"cai_learned_2021","bibbaseid":"cai-liu-yin-learnedrobustpcaascalabledeepunfoldingapproachforhighdimensionaloutlierdetection-2021","role":"author","urls":{"Paper":"http://arxiv.org/abs/2110.05649"},"keyword":["/unread","Computer Science - Computer Vision and Pattern Recognition","Computer Science - Information Theory","Computer Science - Machine Learning","Mathematics - Numerical Analysis"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"misc","biburl":"https://bibbase.org/zotero/victorjhu","dataSources":["CmHEoydhafhbkXXt5"],"keywords":["/unread","computer science - computer vision and pattern recognition","computer science - information theory","computer science - machine learning","mathematics - numerical analysis"],"search_terms":["learned","robust","pca","scalable","deep","unfolding","approach","high","dimensional","outlier","detection","cai","liu","yin"],"title":"Learned Robust PCA: A Scalable Deep Unfolding Approach for High-Dimensional Outlier Detection","year":2021}