A Unified Learning Framework for Single Image Super-Resolution. Yu, J., Gao, X., Tao, D., Li, X., & Zhang, K. IEEE Transactions on Neural Networks and Learning Systems, 25(4):780--792, April, 2014. 00003doi abstract bibtex It has been widely acknowledged that learning- and reconstruction-based super-resolution (SR) methods are effective to generate a high-resolution (HR) image from a single low-resolution (LR) input. However, learning-based methods are prone to introduce unexpected details into resultant HR images. Although reconstruction-based methods do not generate obvious artifacts, they tend to blur fine details and end up with unnatural results. In this paper, we propose a new SR framework that seamlessly integrates learning- and reconstruction-based methods for single image SR to: 1) avoid unexpected artifacts introduced by learning-based SR and 2) restore the missing high-frequency details smoothed by reconstruction-based SR. This integrated framework learns a single dictionary from the LR input instead of from external images to hallucinate details, embeds nonlocal means filter in the reconstruction-based SR to enhance edges and suppress artifacts, and gradually magnifies the LR input to the desired high-quality SR result. We demonstrate both visually and quantitatively that the proposed framework produces better results than previous methods from the literature.
@article{ yu_unified_2014,
title = {A {Unified} {Learning} {Framework} for {Single} {Image} {Super}-{Resolution}},
volume = {25},
issn = {2162-237X},
doi = {10.1109/TNNLS.2013.2281313},
abstract = {It has been widely acknowledged that learning- and reconstruction-based super-resolution (SR) methods are effective to generate a high-resolution (HR) image from a single low-resolution (LR) input. However, learning-based methods are prone to introduce unexpected details into resultant HR images. Although reconstruction-based methods do not generate obvious artifacts, they tend to blur fine details and end up with unnatural results. In this paper, we propose a new SR framework that seamlessly integrates learning- and reconstruction-based methods for single image SR to: 1) avoid unexpected artifacts introduced by learning-based SR and 2) restore the missing high-frequency details smoothed by reconstruction-based SR. This integrated framework learns a single dictionary from the LR input instead of from external images to hallucinate details, embeds nonlocal means filter in the reconstruction-based SR to enhance edges and suppress artifacts, and gradually magnifies the LR input to the desired high-quality SR result. We demonstrate both visually and quantitatively that the proposed framework produces better results than previous methods from the literature.},
number = {4},
journal = {IEEE Transactions on Neural Networks and Learning Systems},
author = {Yu, Jifei and Gao, Xinbo and Tao, Dacheng and Li, Xuelong and Zhang, Kaibing},
month = {April},
year = {2014},
note = {00003},
pages = {780--792}
}
Downloads: 0
{"_id":"QrohTsK7faHLsriLa","bibbaseid":"yu-gao-tao-li-zhang-aunifiedlearningframeworkforsingleimagesuperresolution-2014","downloads":0,"creationDate":"2015-09-03T07:18:48.170Z","title":"A Unified Learning Framework for Single Image Super-Resolution","author_short":["Yu, J.","Gao, X.","Tao, D.","Li, X.","Zhang, K."],"year":2014,"bibtype":"article","biburl":"http://bibbase.org/zotero/fred.qi","bibdata":{"abstract":"It has been widely acknowledged that learning- and reconstruction-based super-resolution (SR) methods are effective to generate a high-resolution (HR) image from a single low-resolution (LR) input. However, learning-based methods are prone to introduce unexpected details into resultant HR images. Although reconstruction-based methods do not generate obvious artifacts, they tend to blur fine details and end up with unnatural results. In this paper, we propose a new SR framework that seamlessly integrates learning- and reconstruction-based methods for single image SR to: 1) avoid unexpected artifacts introduced by learning-based SR and 2) restore the missing high-frequency details smoothed by reconstruction-based SR. This integrated framework learns a single dictionary from the LR input instead of from external images to hallucinate details, embeds nonlocal means filter in the reconstruction-based SR to enhance edges and suppress artifacts, and gradually magnifies the LR input to the desired high-quality SR result. We demonstrate both visually and quantitatively that the proposed framework produces better results than previous methods from the literature.","author":["Yu, Jifei","Gao, Xinbo","Tao, Dacheng","Li, Xuelong","Zhang, Kaibing"],"author_short":["Yu, J.","Gao, X.","Tao, D.","Li, X.","Zhang, K."],"bibtex":"@article{ yu_unified_2014,\n title = {A {Unified} {Learning} {Framework} for {Single} {Image} {Super}-{Resolution}},\n volume = {25},\n issn = {2162-237X},\n doi = {10.1109/TNNLS.2013.2281313},\n abstract = {It has been widely acknowledged that learning- and reconstruction-based super-resolution (SR) methods are effective to generate a high-resolution (HR) image from a single low-resolution (LR) input. However, learning-based methods are prone to introduce unexpected details into resultant HR images. Although reconstruction-based methods do not generate obvious artifacts, they tend to blur fine details and end up with unnatural results. In this paper, we propose a new SR framework that seamlessly integrates learning- and reconstruction-based methods for single image SR to: 1) avoid unexpected artifacts introduced by learning-based SR and 2) restore the missing high-frequency details smoothed by reconstruction-based SR. This integrated framework learns a single dictionary from the LR input instead of from external images to hallucinate details, embeds nonlocal means filter in the reconstruction-based SR to enhance edges and suppress artifacts, and gradually magnifies the LR input to the desired high-quality SR result. We demonstrate both visually and quantitatively that the proposed framework produces better results than previous methods from the literature.},\n number = {4},\n journal = {IEEE Transactions on Neural Networks and Learning Systems},\n author = {Yu, Jifei and Gao, Xinbo and Tao, Dacheng and Li, Xuelong and Zhang, Kaibing},\n month = {April},\n year = {2014},\n note = {00003},\n pages = {780--792}\n}","bibtype":"article","doi":"10.1109/TNNLS.2013.2281313","id":"yu_unified_2014","issn":"2162-237X","journal":"IEEE Transactions on Neural Networks and Learning Systems","key":"yu_unified_2014","month":"April","note":"00003","number":"4","pages":"780--792","title":"A Unified Learning Framework for Single Image Super-Resolution","type":"article","volume":"25","year":"2014","bibbaseid":"yu-gao-tao-li-zhang-aunifiedlearningframeworkforsingleimagesuperresolution-2014","role":"author","urls":{},"downloads":0},"search_terms":["unified","learning","framework","single","image","super","resolution","yu","gao","tao","li","zhang"],"keywords":[],"authorIDs":[],"dataSources":["y8en6y5RHukfeLuPH"]}