Semantic Prior Based Generative Adversarial Network for Video Super-Resolution. Wu, X., Lucas, A., Lopez-Tapia, S., Wang, X., Kim, Y. H., Molina, R., & Katsaggelos, A. K. In 2019 27th European Signal Processing Conference (EUSIPCO), pages 1-5, Sep., 2019.
Paper doi abstract bibtex Semantic information is widely used in the deep learning literature to improve the performance of visual media processing. In this work, we propose a semantic prior based Generative Adversarial Network (GAN) model for video super-resolution. The model fully utilizes various texture styles from different semantic categories of video-frame patches, contributing to more accurate and efficient learning for the generator. Based on the GAN framework, we introduce the semantic prior by making use of the spatial feature transform during the learning process of the generator. The patch-wise semantic prior is extracted on the whole video frame by a semantic segmentation network. A hybrid loss function is designed to guide the learning performance. Experimental results show that our proposed model is advantageous in sharpening video frames, reducing noise and artifacts, and recovering realistic textures.
@InProceedings{8902987,
author = {X. Wu and A. Lucas and S. Lopez-Tapia and X. Wang and Y. H. Kim and R. Molina and A. K. Katsaggelos},
booktitle = {2019 27th European Signal Processing Conference (EUSIPCO)},
title = {Semantic Prior Based Generative Adversarial Network for Video Super-Resolution},
year = {2019},
pages = {1-5},
abstract = {Semantic information is widely used in the deep learning literature to improve the performance of visual media processing. In this work, we propose a semantic prior based Generative Adversarial Network (GAN) model for video super-resolution. The model fully utilizes various texture styles from different semantic categories of video-frame patches, contributing to more accurate and efficient learning for the generator. Based on the GAN framework, we introduce the semantic prior by making use of the spatial feature transform during the learning process of the generator. The patch-wise semantic prior is extracted on the whole video frame by a semantic segmentation network. A hybrid loss function is designed to guide the learning performance. Experimental results show that our proposed model is advantageous in sharpening video frames, reducing noise and artifacts, and recovering realistic textures.},
keywords = {image resolution;image segmentation;image texture;learning (artificial intelligence);video signal processing;video super-resolution;semantic information;deep learning;visual media processing;video-frame patches;semantic segmentation network;semantic prior based generative adversarial network model;Semantics;Training;Generators;Gallium nitride;Generative adversarial networks;Transforms;Image segmentation;Video Super-Resolution;Generative Adversarial Networks;Semantic Segmentation;Spatial Feature Transform;Hybrid loss function},
doi = {10.23919/EUSIPCO.2019.8902987},
issn = {2076-1465},
month = {Sep.},
url = {https://www.eurasip.org/proceedings/eusipco/eusipco2019/proceedings/papers/1570534014.pdf},
}
Downloads: 0
{"_id":"cPSzLCDxm3uQWSwvq","bibbaseid":"wu-lucas-lopeztapia-wang-kim-molina-katsaggelos-semanticpriorbasedgenerativeadversarialnetworkforvideosuperresolution-2019","authorIDs":[],"author_short":["Wu, X.","Lucas, A.","Lopez-Tapia, S.","Wang, X.","Kim, Y. H.","Molina, R.","Katsaggelos, A. K."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","author":[{"firstnames":["X."],"propositions":[],"lastnames":["Wu"],"suffixes":[]},{"firstnames":["A."],"propositions":[],"lastnames":["Lucas"],"suffixes":[]},{"firstnames":["S."],"propositions":[],"lastnames":["Lopez-Tapia"],"suffixes":[]},{"firstnames":["X."],"propositions":[],"lastnames":["Wang"],"suffixes":[]},{"firstnames":["Y.","H."],"propositions":[],"lastnames":["Kim"],"suffixes":[]},{"firstnames":["R."],"propositions":[],"lastnames":["Molina"],"suffixes":[]},{"firstnames":["A.","K."],"propositions":[],"lastnames":["Katsaggelos"],"suffixes":[]}],"booktitle":"2019 27th European Signal Processing Conference (EUSIPCO)","title":"Semantic Prior Based Generative Adversarial Network for Video Super-Resolution","year":"2019","pages":"1-5","abstract":"Semantic information is widely used in the deep learning literature to improve the performance of visual media processing. In this work, we propose a semantic prior based Generative Adversarial Network (GAN) model for video super-resolution. The model fully utilizes various texture styles from different semantic categories of video-frame patches, contributing to more accurate and efficient learning for the generator. Based on the GAN framework, we introduce the semantic prior by making use of the spatial feature transform during the learning process of the generator. The patch-wise semantic prior is extracted on the whole video frame by a semantic segmentation network. A hybrid loss function is designed to guide the learning performance. Experimental results show that our proposed model is advantageous in sharpening video frames, reducing noise and artifacts, and recovering realistic textures.","keywords":"image resolution;image segmentation;image texture;learning (artificial intelligence);video signal processing;video super-resolution;semantic information;deep learning;visual media processing;video-frame patches;semantic segmentation network;semantic prior based generative adversarial network model;Semantics;Training;Generators;Gallium nitride;Generative adversarial networks;Transforms;Image segmentation;Video Super-Resolution;Generative Adversarial Networks;Semantic Segmentation;Spatial Feature Transform;Hybrid loss function","doi":"10.23919/EUSIPCO.2019.8902987","issn":"2076-1465","month":"Sep.","url":"https://www.eurasip.org/proceedings/eusipco/eusipco2019/proceedings/papers/1570534014.pdf","bibtex":"@InProceedings{8902987,\n author = {X. Wu and A. Lucas and S. Lopez-Tapia and X. Wang and Y. H. Kim and R. Molina and A. K. Katsaggelos},\n booktitle = {2019 27th European Signal Processing Conference (EUSIPCO)},\n title = {Semantic Prior Based Generative Adversarial Network for Video Super-Resolution},\n year = {2019},\n pages = {1-5},\n abstract = {Semantic information is widely used in the deep learning literature to improve the performance of visual media processing. In this work, we propose a semantic prior based Generative Adversarial Network (GAN) model for video super-resolution. The model fully utilizes various texture styles from different semantic categories of video-frame patches, contributing to more accurate and efficient learning for the generator. Based on the GAN framework, we introduce the semantic prior by making use of the spatial feature transform during the learning process of the generator. The patch-wise semantic prior is extracted on the whole video frame by a semantic segmentation network. A hybrid loss function is designed to guide the learning performance. Experimental results show that our proposed model is advantageous in sharpening video frames, reducing noise and artifacts, and recovering realistic textures.},\n keywords = {image resolution;image segmentation;image texture;learning (artificial intelligence);video signal processing;video super-resolution;semantic information;deep learning;visual media processing;video-frame patches;semantic segmentation network;semantic prior based generative adversarial network model;Semantics;Training;Generators;Gallium nitride;Generative adversarial networks;Transforms;Image segmentation;Video Super-Resolution;Generative Adversarial Networks;Semantic Segmentation;Spatial Feature Transform;Hybrid loss function},\n doi = {10.23919/EUSIPCO.2019.8902987},\n issn = {2076-1465},\n month = {Sep.},\n url = {https://www.eurasip.org/proceedings/eusipco/eusipco2019/proceedings/papers/1570534014.pdf},\n}\n\n","author_short":["Wu, X.","Lucas, A.","Lopez-Tapia, S.","Wang, X.","Kim, Y. H.","Molina, R.","Katsaggelos, A. K."],"key":"8902987","id":"8902987","bibbaseid":"wu-lucas-lopeztapia-wang-kim-molina-katsaggelos-semanticpriorbasedgenerativeadversarialnetworkforvideosuperresolution-2019","role":"author","urls":{"Paper":"https://www.eurasip.org/proceedings/eusipco/eusipco2019/proceedings/papers/1570534014.pdf"},"keyword":["image resolution;image segmentation;image texture;learning (artificial intelligence);video signal processing;video super-resolution;semantic information;deep learning;visual media processing;video-frame patches;semantic segmentation network;semantic prior based generative adversarial network model;Semantics;Training;Generators;Gallium nitride;Generative adversarial networks;Transforms;Image segmentation;Video Super-Resolution;Generative Adversarial Networks;Semantic Segmentation;Spatial Feature Transform;Hybrid loss function"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"inproceedings","biburl":"https://raw.githubusercontent.com/Roznn/EUSIPCO/main/eusipco2019url.bib","creationDate":"2021-02-11T19:15:22.081Z","downloads":0,"keywords":["image resolution;image segmentation;image texture;learning (artificial intelligence);video signal processing;video super-resolution;semantic information;deep learning;visual media processing;video-frame patches;semantic segmentation network;semantic prior based generative adversarial network model;semantics;training;generators;gallium nitride;generative adversarial networks;transforms;image segmentation;video super-resolution;generative adversarial networks;semantic segmentation;spatial feature transform;hybrid loss function"],"search_terms":["semantic","prior","based","generative","adversarial","network","video","super","resolution","wu","lucas","lopez-tapia","wang","kim","molina","katsaggelos"],"title":"Semantic Prior Based Generative Adversarial Network for Video Super-Resolution","year":2019,"dataSources":["NqWTiMfRR56v86wRs","r6oz3cMyC99QfiuHW","ya2CyA73rpZseyrZ8","ePKPjG8C6yvpk4mEK","E6Bth2QB5BYjBMZE7","nbnEjsN7MJhurAK9x","PNQZj6FjzoxxJk4Yi","7FpDWDGJ4KgpDiGfB","bod9ms4MQJHuJgPpp","QR9t5P2cLdJuzhfzK","D8k2SxfC5dKNRFgro","7Dwzbxq93HWrJEhT6","qhF8zxmGcJfvtdeAg","fvDEHD49E2ZRwE3fb","H7crv8NWhZup4d4by","DHqokWsryttGh7pJE","vRJd4wNg9HpoZSMHD","sYxQ6pxFgA59JRhxi","w2WahSbYrbcCKBDsC","XasdXLL99y5rygCmq","3gkSihZQRfAD2KBo3","t5XMbyZbtPBo4wBGS","bEpHM2CtrwW2qE8FP","teJzFLHexaz5AQW5z","taz5xnPrcQTmMdtqr"]}