A Composite Discriminator for Generative Adversarial Network based Video Super-Resolution. Wang, X., Lucas, A., Lopez-Tapia, S., Wu, X., Molina, R., & Katsaggelos, A. K. In 2019 27th European Signal Processing Conference (EUSIPCO), pages 1-5, Sep., 2019. Paper doi abstract bibtex Generative Adversarial Networks (GANs) have been used for solving the video super-resolution problem. So far, video super-resolution GAN-based methods use the traditional GAN framework which consists of a single generator and a single discriminator that are trained against each other. In this work we propose a new framework which incorporates two collaborative discriminators whose aim is to jointly improve the quality of the reconstructed video sequence. While one discriminator concentrates on general properties of the images, the second one specializes on obtaining realistically reconstructed features, such as, edges. Experiments results demonstrate that the learned model outperforms current state of the art models and obtains super-resolved frames, with fine details, sharp edges, and fewer artifacts.
@InProceedings{8903072,
author = {X. Wang and A. Lucas and S. Lopez-Tapia and X. Wu and R. Molina and A. K. Katsaggelos},
booktitle = {2019 27th European Signal Processing Conference (EUSIPCO)},
title = {A Composite Discriminator for Generative Adversarial Network based Video Super-Resolution},
year = {2019},
pages = {1-5},
abstract = {Generative Adversarial Networks (GANs) have been used for solving the video super-resolution problem. So far, video super-resolution GAN-based methods use the traditional GAN framework which consists of a single generator and a single discriminator that are trained against each other. In this work we propose a new framework which incorporates two collaborative discriminators whose aim is to jointly improve the quality of the reconstructed video sequence. While one discriminator concentrates on general properties of the images, the second one specializes on obtaining realistically reconstructed features, such as, edges. Experiments results demonstrate that the learned model outperforms current state of the art models and obtains super-resolved frames, with fine details, sharp edges, and fewer artifacts.},
keywords = {image reconstruction;image resolution;image sequences;learning (artificial intelligence);video signal processing;composite discriminator;generative adversarial networks;video super-resolution problem;video super-resolution GAN-based methods;reconstructed video sequence;super-resolved frames;collaborative discriminators;GAN framework;Generators;Training;Gallium nitride;Image edge detection;Generative adversarial networks;Video Super-Resolution;Spatially Adaptive;Generative Adversarial Networks;the Composite Discriminator},
doi = {10.23919/EUSIPCO.2019.8903072},
issn = {2076-1465},
month = {Sep.},
url = {https://www.eurasip.org/proceedings/eusipco/eusipco2019/proceedings/papers/1570534004.pdf},
}
Downloads: 0
{"_id":"xEXJpE2YgTHP3fwHb","bibbaseid":"wang-lucas-lopeztapia-wu-molina-katsaggelos-acompositediscriminatorforgenerativeadversarialnetworkbasedvideosuperresolution-2019","authorIDs":[],"author_short":["Wang, X.","Lucas, A.","Lopez-Tapia, S.","Wu, X.","Molina, R.","Katsaggelos, A. K."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","author":[{"firstnames":["X."],"propositions":[],"lastnames":["Wang"],"suffixes":[]},{"firstnames":["A."],"propositions":[],"lastnames":["Lucas"],"suffixes":[]},{"firstnames":["S."],"propositions":[],"lastnames":["Lopez-Tapia"],"suffixes":[]},{"firstnames":["X."],"propositions":[],"lastnames":["Wu"],"suffixes":[]},{"firstnames":["R."],"propositions":[],"lastnames":["Molina"],"suffixes":[]},{"firstnames":["A.","K."],"propositions":[],"lastnames":["Katsaggelos"],"suffixes":[]}],"booktitle":"2019 27th European Signal Processing Conference (EUSIPCO)","title":"A Composite Discriminator for Generative Adversarial Network based Video Super-Resolution","year":"2019","pages":"1-5","abstract":"Generative Adversarial Networks (GANs) have been used for solving the video super-resolution problem. So far, video super-resolution GAN-based methods use the traditional GAN framework which consists of a single generator and a single discriminator that are trained against each other. In this work we propose a new framework which incorporates two collaborative discriminators whose aim is to jointly improve the quality of the reconstructed video sequence. While one discriminator concentrates on general properties of the images, the second one specializes on obtaining realistically reconstructed features, such as, edges. Experiments results demonstrate that the learned model outperforms current state of the art models and obtains super-resolved frames, with fine details, sharp edges, and fewer artifacts.","keywords":"image reconstruction;image resolution;image sequences;learning (artificial intelligence);video signal processing;composite discriminator;generative adversarial networks;video super-resolution problem;video super-resolution GAN-based methods;reconstructed video sequence;super-resolved frames;collaborative discriminators;GAN framework;Generators;Training;Gallium nitride;Image edge detection;Generative adversarial networks;Video Super-Resolution;Spatially Adaptive;Generative Adversarial Networks;the Composite Discriminator","doi":"10.23919/EUSIPCO.2019.8903072","issn":"2076-1465","month":"Sep.","url":"https://www.eurasip.org/proceedings/eusipco/eusipco2019/proceedings/papers/1570534004.pdf","bibtex":"@InProceedings{8903072,\n author = {X. Wang and A. Lucas and S. Lopez-Tapia and X. Wu and R. Molina and A. K. Katsaggelos},\n booktitle = {2019 27th European Signal Processing Conference (EUSIPCO)},\n title = {A Composite Discriminator for Generative Adversarial Network based Video Super-Resolution},\n year = {2019},\n pages = {1-5},\n abstract = {Generative Adversarial Networks (GANs) have been used for solving the video super-resolution problem. So far, video super-resolution GAN-based methods use the traditional GAN framework which consists of a single generator and a single discriminator that are trained against each other. In this work we propose a new framework which incorporates two collaborative discriminators whose aim is to jointly improve the quality of the reconstructed video sequence. While one discriminator concentrates on general properties of the images, the second one specializes on obtaining realistically reconstructed features, such as, edges. Experiments results demonstrate that the learned model outperforms current state of the art models and obtains super-resolved frames, with fine details, sharp edges, and fewer artifacts.},\n keywords = {image reconstruction;image resolution;image sequences;learning (artificial intelligence);video signal processing;composite discriminator;generative adversarial networks;video super-resolution problem;video super-resolution GAN-based methods;reconstructed video sequence;super-resolved frames;collaborative discriminators;GAN framework;Generators;Training;Gallium nitride;Image edge detection;Generative adversarial networks;Video Super-Resolution;Spatially Adaptive;Generative Adversarial Networks;the Composite Discriminator},\n doi = {10.23919/EUSIPCO.2019.8903072},\n issn = {2076-1465},\n month = {Sep.},\n url = {https://www.eurasip.org/proceedings/eusipco/eusipco2019/proceedings/papers/1570534004.pdf},\n}\n\n","author_short":["Wang, X.","Lucas, A.","Lopez-Tapia, S.","Wu, X.","Molina, R.","Katsaggelos, A. K."],"key":"8903072","id":"8903072","bibbaseid":"wang-lucas-lopeztapia-wu-molina-katsaggelos-acompositediscriminatorforgenerativeadversarialnetworkbasedvideosuperresolution-2019","role":"author","urls":{"Paper":"https://www.eurasip.org/proceedings/eusipco/eusipco2019/proceedings/papers/1570534004.pdf"},"keyword":["image reconstruction;image resolution;image sequences;learning (artificial intelligence);video signal processing;composite discriminator;generative adversarial networks;video super-resolution problem;video super-resolution GAN-based methods;reconstructed video sequence;super-resolved frames;collaborative discriminators;GAN framework;Generators;Training;Gallium nitride;Image edge detection;Generative adversarial networks;Video Super-Resolution;Spatially Adaptive;Generative Adversarial Networks;the Composite Discriminator"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"inproceedings","biburl":"https://raw.githubusercontent.com/Roznn/EUSIPCO/main/eusipco2019url.bib","creationDate":"2021-02-11T19:15:22.122Z","downloads":0,"keywords":["image reconstruction;image resolution;image sequences;learning (artificial intelligence);video signal processing;composite discriminator;generative adversarial networks;video super-resolution problem;video super-resolution gan-based methods;reconstructed video sequence;super-resolved frames;collaborative discriminators;gan framework;generators;training;gallium nitride;image edge detection;generative adversarial networks;video super-resolution;spatially adaptive;generative adversarial networks;the composite discriminator"],"search_terms":["composite","discriminator","generative","adversarial","network","based","video","super","resolution","wang","lucas","lopez-tapia","wu","molina","katsaggelos"],"title":"A Composite Discriminator for Generative Adversarial Network based Video Super-Resolution","year":2019,"dataSources":["NqWTiMfRR56v86wRs","r6oz3cMyC99QfiuHW","ya2CyA73rpZseyrZ8","ePKPjG8C6yvpk4mEK","E6Bth2QB5BYjBMZE7","nbnEjsN7MJhurAK9x","PNQZj6FjzoxxJk4Yi","7FpDWDGJ4KgpDiGfB","bod9ms4MQJHuJgPpp","QR9t5P2cLdJuzhfzK","D8k2SxfC5dKNRFgro","7Dwzbxq93HWrJEhT6","qhF8zxmGcJfvtdeAg","fvDEHD49E2ZRwE3fb","H7crv8NWhZup4d4by","DHqokWsryttGh7pJE","vRJd4wNg9HpoZSMHD","sYxQ6pxFgA59JRhxi","w2WahSbYrbcCKBDsC","XasdXLL99y5rygCmq","3gkSihZQRfAD2KBo3","t5XMbyZbtPBo4wBGS","bEpHM2CtrwW2qE8FP","teJzFLHexaz5AQW5z"]}