Variational image compression with a scale hyperprior. Ballé, J., Minnen, D., Singh, S., Hwang, S., J., & Johnston, N. arXiv:1802.01436 [cs, eess, math], 5, 2018.
Paper
Website abstract bibtex We describe an end-to-end trainable model for image compression based on variational autoencoders. The model incorporates a hyperprior to effectively capture spatial dependencies in the latent representation. This hyperprior relates to side information, a concept universal to virtually all modern image codecs, but largely unexplored in image compression using artificial neural networks (ANNs). Unlike existing autoencoder compression methods, our model trains a complex prior jointly with the underlying autoencoder. We demonstrate that this model leads to state-of-the-art image compression when measuring visual quality using the popular MS-SSIM index, and yields rate-distortion performance surpassing published ANN-based methods when evaluated using a more traditional metric based on squared error (PSNR). Furthermore, we provide a qualitative comparison of models trained for different distortion metrics.
@article{
title = {Variational image compression with a scale hyperprior},
type = {article},
year = {2018},
keywords = {Computer Science - Information Theory,Electrical Engineering and Systems Science - Imag},
websites = {http://arxiv.org/abs/1802.01436},
month = {5},
id = {c2ad8060-65ff-3140-aa08-50cca292c872},
created = {2022-03-28T09:45:04.129Z},
accessed = {2022-03-27},
file_attached = {true},
profile_id = {235249c2-3ed4-314a-b309-b1ea0330f5d9},
group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},
last_modified = {2022-03-29T08:06:51.950Z},
read = {false},
starred = {false},
authored = {false},
confirmed = {true},
hidden = {false},
citation_key = {balleVariationalImageCompression2018},
source_type = {article},
notes = {arXiv: 1802.01436},
private_publication = {false},
abstract = {We describe an end-to-end trainable model for image compression based on variational autoencoders. The model incorporates a hyperprior to effectively capture spatial dependencies in the latent representation. This hyperprior relates to side information, a concept universal to virtually all modern image codecs, but largely unexplored in image compression using artificial neural networks (ANNs). Unlike existing autoencoder compression methods, our model trains a complex prior jointly with the underlying autoencoder. We demonstrate that this model leads to state-of-the-art image compression when measuring visual quality using the popular MS-SSIM index, and yields rate-distortion performance surpassing published ANN-based methods when evaluated using a more traditional metric based on squared error (PSNR). Furthermore, we provide a qualitative comparison of models trained for different distortion metrics.},
bibtype = {article},
author = {Ballé, Johannes and Minnen, David and Singh, Saurabh and Hwang, Sung Jin and Johnston, Nick},
journal = {arXiv:1802.01436 [cs, eess, math]}
}
Downloads: 0
{"_id":"S9gevM2WEMyJjyyw3","bibbaseid":"ball-minnen-singh-hwang-johnston-variationalimagecompressionwithascalehyperprior-2018","author_short":["Ballé, J.","Minnen, D.","Singh, S.","Hwang, S., J.","Johnston, N."],"bibdata":{"title":"Variational image compression with a scale hyperprior","type":"article","year":"2018","keywords":"Computer Science - Information Theory,Electrical Engineering and Systems Science - Imag","websites":"http://arxiv.org/abs/1802.01436","month":"5","id":"c2ad8060-65ff-3140-aa08-50cca292c872","created":"2022-03-28T09:45:04.129Z","accessed":"2022-03-27","file_attached":"true","profile_id":"235249c2-3ed4-314a-b309-b1ea0330f5d9","group_id":"1ff583c0-be37-34fa-9c04-73c69437d354","last_modified":"2022-03-29T08:06:51.950Z","read":false,"starred":false,"authored":false,"confirmed":"true","hidden":false,"citation_key":"balleVariationalImageCompression2018","source_type":"article","notes":"arXiv: 1802.01436","private_publication":false,"abstract":"We describe an end-to-end trainable model for image compression based on variational autoencoders. The model incorporates a hyperprior to effectively capture spatial dependencies in the latent representation. This hyperprior relates to side information, a concept universal to virtually all modern image codecs, but largely unexplored in image compression using artificial neural networks (ANNs). Unlike existing autoencoder compression methods, our model trains a complex prior jointly with the underlying autoencoder. We demonstrate that this model leads to state-of-the-art image compression when measuring visual quality using the popular MS-SSIM index, and yields rate-distortion performance surpassing published ANN-based methods when evaluated using a more traditional metric based on squared error (PSNR). Furthermore, we provide a qualitative comparison of models trained for different distortion metrics.","bibtype":"article","author":"Ballé, Johannes and Minnen, David and Singh, Saurabh and Hwang, Sung Jin and Johnston, Nick","journal":"arXiv:1802.01436 [cs, eess, math]","bibtex":"@article{\n title = {Variational image compression with a scale hyperprior},\n type = {article},\n year = {2018},\n keywords = {Computer Science - Information Theory,Electrical Engineering and Systems Science - Imag},\n websites = {http://arxiv.org/abs/1802.01436},\n month = {5},\n id = {c2ad8060-65ff-3140-aa08-50cca292c872},\n created = {2022-03-28T09:45:04.129Z},\n accessed = {2022-03-27},\n file_attached = {true},\n profile_id = {235249c2-3ed4-314a-b309-b1ea0330f5d9},\n group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},\n last_modified = {2022-03-29T08:06:51.950Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {balleVariationalImageCompression2018},\n source_type = {article},\n notes = {arXiv: 1802.01436},\n private_publication = {false},\n abstract = {We describe an end-to-end trainable model for image compression based on variational autoencoders. The model incorporates a hyperprior to effectively capture spatial dependencies in the latent representation. This hyperprior relates to side information, a concept universal to virtually all modern image codecs, but largely unexplored in image compression using artificial neural networks (ANNs). Unlike existing autoencoder compression methods, our model trains a complex prior jointly with the underlying autoencoder. We demonstrate that this model leads to state-of-the-art image compression when measuring visual quality using the popular MS-SSIM index, and yields rate-distortion performance surpassing published ANN-based methods when evaluated using a more traditional metric based on squared error (PSNR). Furthermore, we provide a qualitative comparison of models trained for different distortion metrics.},\n bibtype = {article},\n author = {Ballé, Johannes and Minnen, David and Singh, Saurabh and Hwang, Sung Jin and Johnston, Nick},\n journal = {arXiv:1802.01436 [cs, eess, math]}\n}","author_short":["Ballé, J.","Minnen, D.","Singh, S.","Hwang, S., J.","Johnston, N."],"urls":{"Paper":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c/file/5294e66e-73aa-2c4b-7509-cef017ba22fe/Ballé_et_al___2018___Variational_image_compression_with_a_scale_hyperpr.pdf.pdf","Website":"http://arxiv.org/abs/1802.01436"},"biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibbaseid":"ball-minnen-singh-hwang-johnston-variationalimagecompressionwithascalehyperprior-2018","role":"author","keyword":["Computer Science - Information Theory","Electrical Engineering and Systems Science - Imag"],"metadata":{"authorlinks":{}}},"bibtype":"article","biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","dataSources":["sMDSdyoMCx58Wwkca","NFvNQiivTuTn9JJKL","8Pn5iG4mxAxfyzsY4","2252seNhipfTmjEBQ"],"keywords":["computer science - information theory","electrical engineering and systems science - imag"],"search_terms":["variational","image","compression","scale","hyperprior","ballé","minnen","singh","hwang","johnston"],"title":"Variational image compression with a scale hyperprior","year":2018}