CCPL: Contrastive Coherence Preserving Loss for Versatile Style Transfer. Wu, Z., Zhu, Z., Du, J., & Bai, X. In Avidan, S., Brostow, G., Cissé, M., Farinella, G. M., & Hassner, T., editors, Computer Vision – ECCV 2022, of Lecture Notes in Computer Science, pages 189–206, Cham, 2022. Springer Nature Switzerland. doi abstract bibtex In this paper, we aim to devise a universally versatile style transfer method capable of performing artistic, photo-realistic, and video style transfer jointly, without seeing videos during training. Previous single-frame methods assume a strong constraint on the whole image to maintain temporal consistency, which could be violated in many cases. Instead, we make a mild and reasonable assumption that global inconsistency is dominated by local inconsistencies and devise a generic Contrastive Coherence Preserving Loss (CCPL) applied to local patches. CCPL can preserve the coherence of the content source during style transfer without degrading stylization. Moreover, it owns a neighbor-regulating mechanism, resulting in a vast reduction of local distortions and considerable visual quality improvement. Aside from its superior performance on versatile style transfer, it can be easily extended to other tasks, such as image-to-image translation. Besides, to better fuse content and style features, we propose Simple Covariance Transformation (SCT) to effectively align second-order statistics of the content feature with the style feature. Experiments demonstrate the effectiveness of the resulting model for versatile style transfer, when armed with CCPL.
@inproceedings{wu_ccpl_2022,
address = {Cham},
series = {Lecture {Notes} in {Computer} {Science}},
title = {{CCPL}: {Contrastive} {Coherence} {Preserving} {Loss} for {Versatile} {Style} {Transfer}},
isbn = {978-3-031-19787-1},
shorttitle = {{CCPL}},
doi = {10.1007/978-3-031-19787-1_11},
abstract = {In this paper, we aim to devise a universally versatile style transfer method capable of performing artistic, photo-realistic, and video style transfer jointly, without seeing videos during training. Previous single-frame methods assume a strong constraint on the whole image to maintain temporal consistency, which could be violated in many cases. Instead, we make a mild and reasonable assumption that global inconsistency is dominated by local inconsistencies and devise a generic Contrastive Coherence Preserving Loss (CCPL) applied to local patches. CCPL can preserve the coherence of the content source during style transfer without degrading stylization. Moreover, it owns a neighbor-regulating mechanism, resulting in a vast reduction of local distortions and considerable visual quality improvement. Aside from its superior performance on versatile style transfer, it can be easily extended to other tasks, such as image-to-image translation. Besides, to better fuse content and style features, we propose Simple Covariance Transformation (SCT) to effectively align second-order statistics of the content feature with the style feature. Experiments demonstrate the effectiveness of the resulting model for versatile style transfer, when armed with CCPL.},
language = {en},
booktitle = {Computer {Vision} – {ECCV} 2022},
publisher = {Springer Nature Switzerland},
author = {Wu, Zijie and Zhu, Zhen and Du, Junping and Bai, Xiang},
editor = {Avidan, Shai and Brostow, Gabriel and Cissé, Moustapha and Farinella, Giovanni Maria and Hassner, Tal},
year = {2022},
keywords = {Contrastive learning, Image style transfer, Image-to-image translation, Temporal consistency, Video style transfer},
pages = {189--206},
}
Downloads: 0
{"_id":"XwBCDFfka593h6msy","bibbaseid":"wu-zhu-du-bai-ccplcontrastivecoherencepreservinglossforversatilestyletransfer-2022","author_short":["Wu, Z.","Zhu, Z.","Du, J.","Bai, X."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","address":"Cham","series":"Lecture Notes in Computer Science","title":"CCPL: Contrastive Coherence Preserving Loss for Versatile Style Transfer","isbn":"978-3-031-19787-1","shorttitle":"CCPL","doi":"10.1007/978-3-031-19787-1_11","abstract":"In this paper, we aim to devise a universally versatile style transfer method capable of performing artistic, photo-realistic, and video style transfer jointly, without seeing videos during training. Previous single-frame methods assume a strong constraint on the whole image to maintain temporal consistency, which could be violated in many cases. Instead, we make a mild and reasonable assumption that global inconsistency is dominated by local inconsistencies and devise a generic Contrastive Coherence Preserving Loss (CCPL) applied to local patches. CCPL can preserve the coherence of the content source during style transfer without degrading stylization. Moreover, it owns a neighbor-regulating mechanism, resulting in a vast reduction of local distortions and considerable visual quality improvement. Aside from its superior performance on versatile style transfer, it can be easily extended to other tasks, such as image-to-image translation. Besides, to better fuse content and style features, we propose Simple Covariance Transformation (SCT) to effectively align second-order statistics of the content feature with the style feature. Experiments demonstrate the effectiveness of the resulting model for versatile style transfer, when armed with CCPL.","language":"en","booktitle":"Computer Vision – ECCV 2022","publisher":"Springer Nature Switzerland","author":[{"propositions":[],"lastnames":["Wu"],"firstnames":["Zijie"],"suffixes":[]},{"propositions":[],"lastnames":["Zhu"],"firstnames":["Zhen"],"suffixes":[]},{"propositions":[],"lastnames":["Du"],"firstnames":["Junping"],"suffixes":[]},{"propositions":[],"lastnames":["Bai"],"firstnames":["Xiang"],"suffixes":[]}],"editor":[{"propositions":[],"lastnames":["Avidan"],"firstnames":["Shai"],"suffixes":[]},{"propositions":[],"lastnames":["Brostow"],"firstnames":["Gabriel"],"suffixes":[]},{"propositions":[],"lastnames":["Cissé"],"firstnames":["Moustapha"],"suffixes":[]},{"propositions":[],"lastnames":["Farinella"],"firstnames":["Giovanni","Maria"],"suffixes":[]},{"propositions":[],"lastnames":["Hassner"],"firstnames":["Tal"],"suffixes":[]}],"year":"2022","keywords":"Contrastive learning, Image style transfer, Image-to-image translation, Temporal consistency, Video style transfer","pages":"189–206","bibtex":"@inproceedings{wu_ccpl_2022,\n\taddress = {Cham},\n\tseries = {Lecture {Notes} in {Computer} {Science}},\n\ttitle = {{CCPL}: {Contrastive} {Coherence} {Preserving} {Loss} for {Versatile} {Style} {Transfer}},\n\tisbn = {978-3-031-19787-1},\n\tshorttitle = {{CCPL}},\n\tdoi = {10.1007/978-3-031-19787-1_11},\n\tabstract = {In this paper, we aim to devise a universally versatile style transfer method capable of performing artistic, photo-realistic, and video style transfer jointly, without seeing videos during training. Previous single-frame methods assume a strong constraint on the whole image to maintain temporal consistency, which could be violated in many cases. Instead, we make a mild and reasonable assumption that global inconsistency is dominated by local inconsistencies and devise a generic Contrastive Coherence Preserving Loss (CCPL) applied to local patches. CCPL can preserve the coherence of the content source during style transfer without degrading stylization. Moreover, it owns a neighbor-regulating mechanism, resulting in a vast reduction of local distortions and considerable visual quality improvement. Aside from its superior performance on versatile style transfer, it can be easily extended to other tasks, such as image-to-image translation. Besides, to better fuse content and style features, we propose Simple Covariance Transformation (SCT) to effectively align second-order statistics of the content feature with the style feature. Experiments demonstrate the effectiveness of the resulting model for versatile style transfer, when armed with CCPL.},\n\tlanguage = {en},\n\tbooktitle = {Computer {Vision} – {ECCV} 2022},\n\tpublisher = {Springer Nature Switzerland},\n\tauthor = {Wu, Zijie and Zhu, Zhen and Du, Junping and Bai, Xiang},\n\teditor = {Avidan, Shai and Brostow, Gabriel and Cissé, Moustapha and Farinella, Giovanni Maria and Hassner, Tal},\n\tyear = {2022},\n\tkeywords = {Contrastive learning, Image style transfer, Image-to-image translation, Temporal consistency, Video style transfer},\n\tpages = {189--206},\n}\n\n\n\n","author_short":["Wu, Z.","Zhu, Z.","Du, J.","Bai, X."],"editor_short":["Avidan, S.","Brostow, G.","Cissé, M.","Farinella, G. M.","Hassner, T."],"key":"wu_ccpl_2022","id":"wu_ccpl_2022","bibbaseid":"wu-zhu-du-bai-ccplcontrastivecoherencepreservinglossforversatilestyletransfer-2022","role":"author","urls":{},"keyword":["Contrastive learning","Image style transfer","Image-to-image translation","Temporal consistency","Video style transfer"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"inproceedings","biburl":"https://bibbase.org/zotero/fsimonetta","dataSources":["pzyFFGWvxG2bs63zP"],"keywords":["contrastive learning","image style transfer","image-to-image translation","temporal consistency","video style transfer"],"search_terms":["ccpl","contrastive","coherence","preserving","loss","versatile","style","transfer","wu","zhu","du","bai"],"title":"CCPL: Contrastive Coherence Preserving Loss for Versatile Style Transfer","year":2022}