2-Step Sparse-View CT Reconstruction with a Domain-Specific Perceptual Network. Wei, H., Schiffers, F., Würfl, T., Shen, D., Kim, D., Katsaggelos, A. K., & Cossairt, O. arXiv preprint arXiv:2012.04743, dec, 2020. Paper abstract bibtex Computed tomography is widely used to examine internal structures in a non-destructive manner. To obtain high-quality reconstructions, one typically has to acquire a densely sampled trajectory to avoid angular undersampling. However, many scenarios require a sparse-view measurement leading to streak-artifacts if unaccounted for. Current methods do not make full use of the domain-specific information, and hence fail to provide reliable reconstructions for highly undersampled data. We present a novel framework for sparse-view tomography by decoupling the reconstruction into two steps: First, we overcome its ill-posedness using a super-resolution network, SIN, trained on the sparse projections. The intermediate result allows for a closed-form tomographic reconstruction with preserved details and highly reduced streak-artifacts. Second, a refinement network, PRN, trained on the reconstructions reduces any remaining artifacts. We further propose a light-weight variant of the perceptual-loss that enhances domain-specific information, boosting restoration accuracy. Our experiments demonstrate an improvement over current solutions by 4 dB.
@article{Haoyu2020,
abstract = {Computed tomography is widely used to examine internal structures in a non-destructive manner. To obtain high-quality reconstructions, one typically has to acquire a densely sampled trajectory to avoid angular undersampling. However, many scenarios require a sparse-view measurement leading to streak-artifacts if unaccounted for. Current methods do not make full use of the domain-specific information, and hence fail to provide reliable reconstructions for highly undersampled data. We present a novel framework for sparse-view tomography by decoupling the reconstruction into two steps: First, we overcome its ill-posedness using a super-resolution network, SIN, trained on the sparse projections. The intermediate result allows for a closed-form tomographic reconstruction with preserved details and highly reduced streak-artifacts. Second, a refinement network, PRN, trained on the reconstructions reduces any remaining artifacts. We further propose a light-weight variant of the perceptual-loss that enhances domain-specific information, boosting restoration accuracy. Our experiments demonstrate an improvement over current solutions by 4 dB.},
archivePrefix = {arXiv},
arxivId = {2012.04743},
author = {Wei, Haoyu and Schiffers, Florian and W{\"{u}}rfl, Tobias and Shen, Daming and Kim, Daniel and Katsaggelos, Aggelos K. and Cossairt, Oliver},
eprint = {2012.04743},
journal = {arXiv preprint arXiv:2012.04743},
month = {dec},
title = {{2-Step Sparse-View CT Reconstruction with a Domain-Specific Perceptual Network}},
url = {http://arxiv.org/abs/2012.04743},
year = {2020}
}
Downloads: 0
{"_id":"Gsqur4BqSBdTFoLPv","bibbaseid":"wei-schiffers-wrfl-shen-kim-katsaggelos-cossairt-2stepsparseviewctreconstructionwithadomainspecificperceptualnetwork-2020","author_short":["Wei, H.","Schiffers, F.","Würfl, T.","Shen, D.","Kim, D.","Katsaggelos, A. K.","Cossairt, O."],"bibdata":{"bibtype":"article","type":"article","abstract":"Computed tomography is widely used to examine internal structures in a non-destructive manner. To obtain high-quality reconstructions, one typically has to acquire a densely sampled trajectory to avoid angular undersampling. However, many scenarios require a sparse-view measurement leading to streak-artifacts if unaccounted for. Current methods do not make full use of the domain-specific information, and hence fail to provide reliable reconstructions for highly undersampled data. We present a novel framework for sparse-view tomography by decoupling the reconstruction into two steps: First, we overcome its ill-posedness using a super-resolution network, SIN, trained on the sparse projections. The intermediate result allows for a closed-form tomographic reconstruction with preserved details and highly reduced streak-artifacts. Second, a refinement network, PRN, trained on the reconstructions reduces any remaining artifacts. We further propose a light-weight variant of the perceptual-loss that enhances domain-specific information, boosting restoration accuracy. Our experiments demonstrate an improvement over current solutions by 4 dB.","archiveprefix":"arXiv","arxivid":"2012.04743","author":[{"propositions":[],"lastnames":["Wei"],"firstnames":["Haoyu"],"suffixes":[]},{"propositions":[],"lastnames":["Schiffers"],"firstnames":["Florian"],"suffixes":[]},{"propositions":[],"lastnames":["Würfl"],"firstnames":["Tobias"],"suffixes":[]},{"propositions":[],"lastnames":["Shen"],"firstnames":["Daming"],"suffixes":[]},{"propositions":[],"lastnames":["Kim"],"firstnames":["Daniel"],"suffixes":[]},{"propositions":[],"lastnames":["Katsaggelos"],"firstnames":["Aggelos","K."],"suffixes":[]},{"propositions":[],"lastnames":["Cossairt"],"firstnames":["Oliver"],"suffixes":[]}],"eprint":"2012.04743","journal":"arXiv preprint arXiv:2012.04743","month":"dec","title":"2-Step Sparse-View CT Reconstruction with a Domain-Specific Perceptual Network","url":"http://arxiv.org/abs/2012.04743","year":"2020","bibtex":"@article{Haoyu2020,\nabstract = {Computed tomography is widely used to examine internal structures in a non-destructive manner. To obtain high-quality reconstructions, one typically has to acquire a densely sampled trajectory to avoid angular undersampling. However, many scenarios require a sparse-view measurement leading to streak-artifacts if unaccounted for. Current methods do not make full use of the domain-specific information, and hence fail to provide reliable reconstructions for highly undersampled data. We present a novel framework for sparse-view tomography by decoupling the reconstruction into two steps: First, we overcome its ill-posedness using a super-resolution network, SIN, trained on the sparse projections. The intermediate result allows for a closed-form tomographic reconstruction with preserved details and highly reduced streak-artifacts. Second, a refinement network, PRN, trained on the reconstructions reduces any remaining artifacts. We further propose a light-weight variant of the perceptual-loss that enhances domain-specific information, boosting restoration accuracy. Our experiments demonstrate an improvement over current solutions by 4 dB.},\narchivePrefix = {arXiv},\narxivId = {2012.04743},\nauthor = {Wei, Haoyu and Schiffers, Florian and W{\\\"{u}}rfl, Tobias and Shen, Daming and Kim, Daniel and Katsaggelos, Aggelos K. and Cossairt, Oliver},\neprint = {2012.04743},\njournal = {arXiv preprint arXiv:2012.04743},\nmonth = {dec},\ntitle = {{2-Step Sparse-View CT Reconstruction with a Domain-Specific Perceptual Network}},\nurl = {http://arxiv.org/abs/2012.04743},\nyear = {2020}\n}\n","author_short":["Wei, H.","Schiffers, F.","Würfl, T.","Shen, D.","Kim, D.","Katsaggelos, A. K.","Cossairt, O."],"key":"Haoyu2020","id":"Haoyu2020","bibbaseid":"wei-schiffers-wrfl-shen-kim-katsaggelos-cossairt-2stepsparseviewctreconstructionwithadomainspecificperceptualnetwork-2020","role":"author","urls":{"Paper":"http://arxiv.org/abs/2012.04743"},"metadata":{"authorlinks":{}}},"bibtype":"article","biburl":"https://sites.northwestern.edu/ivpl/files/2023/06/IVPL_Updated_publications-1.bib","dataSources":["KTWAakbPXLGfYseXn","ePKPjG8C6yvpk4mEK","ya2CyA73rpZseyrZ8","E6Bth2QB5BYjBMZE7","nbnEjsN7MJhurAK9x","PNQZj6FjzoxxJk4Yi","7FpDWDGJ4KgpDiGfB","bod9ms4MQJHuJgPpp","QR9t5P2cLdJuzhfzK","D8k2SxfC5dKNRFgro","7Dwzbxq93HWrJEhT6","qhF8zxmGcJfvtdeAg","fvDEHD49E2ZRwE3fb","H7crv8NWhZup4d4by","DHqokWsryttGh7pJE","vRJd4wNg9HpoZSMHD","sYxQ6pxFgA59JRhxi","w2WahSbYrbcCKBDsC","XasdXLL99y5rygCmq","3gkSihZQRfAD2KBo3","t5XMbyZbtPBo4wBGS","bEpHM2CtrwW2qE8FP","teJzFLHexaz5AQW5z"],"keywords":[],"search_terms":["step","sparse","view","reconstruction","domain","specific","perceptual","network","wei","schiffers","würfl","shen","kim","katsaggelos","cossairt"],"title":"2-Step Sparse-View CT Reconstruction with a Domain-Specific Perceptual Network","year":2020}