var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/service/mendeley/d5b9f595-4104-33d2-b640-e84dee1b7ded?jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/service/mendeley/d5b9f595-4104-33d2-b640-e84dee1b7ded?jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/service/mendeley/d5b9f595-4104-33d2-b640-e84dee1b7ded?jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (11)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Complex Neural Networks for Reconstructing Undersampled Spiral DiffusionTensor Cardiovascular Magnetic Resonance Data.\n \n \n \n\n\n \n Luo, Y.; Ferreira, P.; Pennell, D.; Yang, G.; Nielles-Vallespin, S.; and Scott, A.\n\n\n \n\n\n\n In Society for Cardiovascular Magnetic Resonance (SCMR) Annual Meeting, 2024. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Complex Neural Networks for Reconstructing Undersampled Spiral DiffusionTensor Cardiovascular Magnetic Resonance Data},\n type = {inproceedings},\n year = {2024},\n id = {5ea66afd-75dd-3503-b449-8c5d258195e8},\n created = {2024-01-13T05:46:28.836Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:23:14.346Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {luo_complex_2024},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Luo, Yaqing and Ferreira, Pedro and Pennell, Dudley and Yang, Guang and Nielles-Vallespin, Sonia and Scott, Andrew},\n booktitle = {Society for Cardiovascular Magnetic Resonance (SCMR) Annual Meeting}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep Learning based Synthesis of MRI, CT and PET: Review and Analysis.\n \n \n \n \n\n\n \n Dayarathna, S.; Islam, K., T.; Uribe, S.; Yang, G.; Hayat, M.; and Chen, Z.\n\n\n \n\n\n\n Medical Image Analysis. 2024.\n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Deep Learning based Synthesis of MRI, CT and PET: Review and Analysis},\n type = {article},\n year = {2024},\n id = {f1b9718b-cabc-3873-adb3-51035907d480},\n created = {2024-01-13T05:46:29.492Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:05:26.334Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {dayarathna_deep_2024},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Dayarathna, Sanuwani and Islam, Kh Tohidul and Uribe, Sergio and Yang, Guang and Hayat, Munawar and Chen, Zhaolin},\n journal = {Medical Image Analysis}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n CCheXR-Attention: Clinical Concept Extraction and Chest X-ray Reports Classification using Modified Mogrifier and Bidirectional LSTM with Multihead Attention.\n \n \n \n\n\n \n Rani, S.; Jain, A.; Kumar, A.; and Yang, G.\n\n\n \n\n\n\n International Journal of Imaging Systems and Technology. 2024.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {CCheXR-Attention: Clinical Concept Extraction and Chest X-ray Reports Classification using Modified Mogrifier and Bidirectional LSTM with Multihead Attention},\n type = {article},\n year = {2024},\n id = {dc9c5419-9fe2-3df0-8fbc-892f4e497615},\n created = {2024-01-13T05:46:29.655Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:23:14.447Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {rani_cchexr-attention_2024},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Rani, Somiya and Jain, Amita and Kumar, Akshi and Yang, Guang},\n journal = {International Journal of Imaging Systems and Technology}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Optimized Post-processing for Diffusion Tensor Cardiac MRI with Texture-Preserving Deformable Alignment.\n \n \n \n\n\n \n Wang, F.; Ferreira, P.; Wu, Y.; Munoz, C.; Wen, K.; Luo, Y.; Huang, J.; Pennell, D., J.; Scott, A., D.; and Nielles-Vallespin, S.\n\n\n \n\n\n\n In Society for Cardiovascular Magnetic Resonance (SCMR) Annual Meeting, 2024. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Optimized Post-processing for Diffusion Tensor Cardiac MRI with Texture-Preserving Deformable Alignment},\n type = {inproceedings},\n year = {2024},\n id = {b690f576-15f3-3d71-9f5e-58e59de4406a},\n created = {2024-01-13T05:46:29.997Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:23:14.236Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {wang_optimized_2024},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Wang, Fanwen and Ferreira, Pedro and Wu, Yinzhe and Munoz, Camila and Wen, Ke and Luo, Yaqing and Huang, Jiahao and Pennell, Dudley J and Scott, Andrew D and Nielles-Vallespin, Sonia},\n booktitle = {Society for Cardiovascular Magnetic Resonance (SCMR) Annual Meeting}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient Post-processing of Diffusion Tensor Cardiac Magnetic Imaging Using Texture-conserving Deformable Registration.\n \n \n \n \n\n\n \n Wang, F.; Ferreira, P., F.; Wu, Y.; Munoz, C.; Wen, K.; Luo, Y.; Huang, J.; Pennell, D., J.; Scott, A., D.; and Nielles-Vallespin, S.\n\n\n \n\n\n\n In SPIE Medical Imaging 2024, 2024. \n \n\n\n\n
\n\n\n\n \n \n \"EfficientPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Efficient Post-processing of Diffusion Tensor Cardiac Magnetic Imaging Using Texture-conserving Deformable Registration},\n type = {inproceedings},\n year = {2024},\n id = {367eb63e-b850-3ebf-915b-0f848bd161fa},\n created = {2024-01-13T05:46:30.364Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:06:57.524Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {wang_efficient_2024},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Wang, Fanwen and Ferreira, Pedro F and Wu, Yinzhe and Munoz, Camila and Wen, Ke and Luo, Yaqing and Huang, Jiahao and Pennell, Dudley J and Scott, Andrew D and Nielles-Vallespin, Sonia},\n booktitle = {SPIE Medical Imaging 2024}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n High-Resolution Reference Image Assisted Volumetric Super- Resolution of Cardiac Diffusion Weighted Imaging.\n \n \n \n \n\n\n \n Wu, Y.; Huang, J.; Wang, F.; Ferreira, P.; and Scott, A.\n\n\n \n\n\n\n In SPIE Medical Imaging 2024, 2024. \n \n\n\n\n
\n\n\n\n \n \n \"High-ResolutionPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {High-Resolution Reference Image Assisted Volumetric Super- Resolution of Cardiac Diffusion Weighted Imaging},\n type = {inproceedings},\n year = {2024},\n keywords = {1,allows further examination of,cardiac magnetic resonance,characterization,cmr,deep learning,description of purpose,diffusion tensor cmr,diffusion weighted imaging,dt-cmr,imaging offers a non-invasive,myocardial microstructure and re-,myocardial tissue,super-resolution,way to obtain multi-parametric},\n id = {99bae65f-94f2-36f3-a456-417b6bc359b3},\n created = {2024-01-13T07:02:56.152Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:18.900Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Wu, Yinzhe and Huang, Jiahao and Wang, Fanwen and Ferreira, Pedro and Scott, Andrew},\n booktitle = {SPIE Medical Imaging 2024}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SegmentAnything helps microscopy images based automatic and quantitative organoid detection and analysis.\n \n \n \n \n\n\n \n Xing, X.; Tang, C.; Guo, Y.; Kurniawan, N.; and Yang, G.\n\n\n \n\n\n\n In SPIE Medical Imaging 2024, 2024. \n \n\n\n\n
\n\n\n\n \n \n \"SegmentAnythingPaper\n  \n \n \n \"SegmentAnythingWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {SegmentAnything helps microscopy images based automatic and quantitative organoid detection and analysis},\n type = {inproceedings},\n year = {2024},\n keywords = {1,1 their close biological,analysis,and biological complexity similar,description of purpose,exhibiting key functional,microscopy image,organoid detection,organoids are self-organized 3d,resemblance makes organoid culture,segmentanything,stem cells,structural,tissues typically derived from,to organs},\n websites = {http://arxiv.org/abs/2309.04190},\n id = {442b93fe-27ef-3eb8-8bb9-0057bff7afb6},\n created = {2024-01-13T07:02:56.235Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:18.932Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Organoids are self-organized 3D cell clusters that closely mimic the architecture and function of in vivo tissues and organs. Quantification of organoid morphology helps in studying organ development, drug discovery, and toxicity assessment. Recent microscopy techniques provide a potent tool to acquire organoid morphology features, but manual image analysis remains a labor and time-intensive process. Thus, this paper proposes a comprehensive pipeline for microscopy analysis that leverages the SegmentAnything to precisely demarcate individual organoids. Additionally, we introduce a set of morphological properties, including perimeter, area, radius, non-smoothness, and non-circularity, allowing researchers to analyze the organoid structures quantitatively and automatically. To validate the effectiveness of our approach, we conducted tests on bright-field images of human induced pluripotent stem cells (iPSCs) derived neural-epithelial (NE) organoids. The results obtained from our automatic pipeline closely align with manual organoid detection and measurement, showcasing the capability of our proposed method in accelerating organoids morphology analysis.},\n bibtype = {inproceedings},\n author = {Xing, Xiaodan and Tang, Chunling and Guo, Yunzhe and Kurniawan, Nicholas and Yang, Guang},\n booktitle = {SPIE Medical Imaging 2024}\n}
\n
\n\n\n
\n Organoids are self-organized 3D cell clusters that closely mimic the architecture and function of in vivo tissues and organs. Quantification of organoid morphology helps in studying organ development, drug discovery, and toxicity assessment. Recent microscopy techniques provide a potent tool to acquire organoid morphology features, but manual image analysis remains a labor and time-intensive process. Thus, this paper proposes a comprehensive pipeline for microscopy analysis that leverages the SegmentAnything to precisely demarcate individual organoids. Additionally, we introduce a set of morphological properties, including perimeter, area, radius, non-smoothness, and non-circularity, allowing researchers to analyze the organoid structures quantitatively and automatically. To validate the effectiveness of our approach, we conducted tests on bright-field images of human induced pluripotent stem cells (iPSCs) derived neural-epithelial (NE) organoids. The results obtained from our automatic pipeline closely align with manual organoid detection and measurement, showcasing the capability of our proposed method in accelerating organoids morphology analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Post-COVID Highlights: Challenges and Solutions of AI Techniques for Swift Identification of COVID-19.\n \n \n \n \n\n\n \n Fang, Y.; Xing, X.; Wang, S.; Walsh, S.; and Yang, G.\n\n\n \n\n\n\n Current Opinion in Structural Biology. 2024.\n \n\n\n\n
\n\n\n\n \n \n \"Post-COVIDPaper\n  \n \n \n \"Post-COVIDWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Post-COVID Highlights: Challenges and Solutions of AI Techniques for Swift Identification of COVID-19},\n type = {article},\n year = {2024},\n keywords = {3d image processing,ai-based diagnosis,covid-19,reliability,small datasets,swift identification},\n websites = {http://arxiv.org/abs/2311.06258},\n id = {705190e3-a276-30ed-a16d-76908c640e2d},\n created = {2024-01-13T07:02:56.237Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:18.900Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Since the onset of the COVID-19 pandemic in 2019, there has been a concerted effort to develop cost-effective, non-invasive, and rapid AI-based tools. These tools were intended to alleviate the burden on healthcare systems, control the rapid spread of the virus, and enhance intervention outcomes, all in response to this unprecedented global crisis. As we transition into a post-COVID era, we retrospectively evaluate these proposed studies and offer a review of the techniques employed in AI diagnostic models, with a focus on the solutions proposed for different challenges. This review endeavors to provide insights into the diverse solutions designed to address the multifaceted challenges that arose during the pandemic. By doing so, we aim to prepare the AI community for the development of AI tools tailored to address public health emergencies effectively.},\n bibtype = {article},\n author = {Fang, Yingying and Xing, Xiaodan and Wang, Shiyi and Walsh, Simon and Yang, Guang},\n journal = {Current Opinion in Structural Biology}\n}
\n
\n\n\n
\n Since the onset of the COVID-19 pandemic in 2019, there has been a concerted effort to develop cost-effective, non-invasive, and rapid AI-based tools. These tools were intended to alleviate the burden on healthcare systems, control the rapid spread of the virus, and enhance intervention outcomes, all in response to this unprecedented global crisis. As we transition into a post-COVID era, we retrospectively evaluate these proposed studies and offer a review of the techniques employed in AI diagnostic models, with a focus on the solutions proposed for different challenges. This review endeavors to provide insights into the diverse solutions designed to address the multifaceted challenges that arose during the pandemic. By doing so, we aim to prepare the AI community for the development of AI tools tailored to address public health emergencies effectively.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dynamic Multimodal Information Bottleneck for Multimodality Classification.\n \n \n \n \n\n\n \n Fang, Y.; Wu, S.; Zhang, S.; Huang, C.; Zeng, T.; Xing, X.; Walsh, S.; and Yang, G.\n\n\n \n\n\n\n In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV) 2024, pages 7696-7706, 2024. \n \n\n\n\n
\n\n\n\n \n \n \"DynamicPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Dynamic Multimodal Information Bottleneck for Multimodality Classification},\n type = {inproceedings},\n year = {2024},\n pages = {7696-7706},\n id = {88957e4a-0d93-3bed-b23c-a9de97ee873f},\n created = {2024-01-13T07:02:56.312Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:19.140Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Fang, Yingying and Wu, Shuang and Zhang, Sheng and Huang, Chaoyan and Zeng, Tieyong and Xing, Xiaodan and Walsh, Simon and Yang, Guang},\n booktitle = {Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV) 2024}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n RCAR-UNet: Retinal vessel segmentation network algorithm via novel rough attention mechanism.\n \n \n \n \n\n\n \n Ding, W.; Sun, Y.; Huang, J.; Ju, H.; Zhang, C.; Yang, G.; and Lin, C., T.\n\n\n \n\n\n\n Information Sciences, 657(November 2023): 120007. 2024.\n \n\n\n\n
\n\n\n\n \n \n \"RCAR-UNet:Paper\n  \n \n \n \"RCAR-UNet:Website\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {RCAR-UNet: Retinal vessel segmentation network algorithm via novel rough attention mechanism},\n type = {article},\n year = {2024},\n keywords = {Attention mechanism,Fundus retinal blood vessel image,Image segmentation,Residual connection,Rough neuron,Rough set},\n pages = {120007},\n volume = {657},\n websites = {https://doi.org/10.1016/j.ins.2023.120007},\n publisher = {Elsevier Inc.},\n id = {0cafa754-bfde-3ff1-bdb1-9dc04a408a07},\n created = {2024-01-13T07:02:56.319Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:20:36.529Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {The health status of the retinal blood vessels is a significant reference for rapid and non-invasive diagnosis of various ophthalmological, diabetic, and cardio-cerebrovascular diseases. However, retinal vessels are characterized by ambiguous boundaries, with multiple thicknesses and obscured lesion areas. These phenomena cause deep neural networks to face the characteristic channel uncertainty when segmenting retinal blood vessels. The uncertainty in feature channels will affect the channel attention coefficient, making the deep neural network incapable of paying attention to the detailed features of retinal vessels. This study proposes a retinal vessel segmentation via a rough channel attention mechanism. First, the method integrates deep neural networks to learn complex features and rough sets to handle uncertainty for designing rough neurons. Second, a rough channel attention mechanism module is constructed based on rough neurons, and embedded in U-Net skip connection for the integration of high-level and low-level features. Then, the residual connections are added to transmit low-level features to high-level to enrich network feature extraction and help back-propagate the gradient when training the model. Finally, multiple comparison experiments were carried out on three public fundus retinal image datasets to verify the validity of Rough Channel Attention Residual U-Net (RCAR-UNet) model. The results show that the RCAR-UNet model offers high superiority in accuracy, sensitivity, F1, and Jaccard similarity, especially for the precise segmentation of fragile blood vessels, guaranteeing blood vessels’ continuity.},\n bibtype = {article},\n author = {Ding, Weiping and Sun, Ying and Huang, Jiashuang and Ju, Hengrong and Zhang, Chongsheng and Yang, Guang and Lin, Chin Teng},\n doi = {10.1016/j.ins.2023.120007},\n journal = {Information Sciences},\n number = {November 2023}\n}
\n
\n\n\n
\n The health status of the retinal blood vessels is a significant reference for rapid and non-invasive diagnosis of various ophthalmological, diabetic, and cardio-cerebrovascular diseases. However, retinal vessels are characterized by ambiguous boundaries, with multiple thicknesses and obscured lesion areas. These phenomena cause deep neural networks to face the characteristic channel uncertainty when segmenting retinal blood vessels. The uncertainty in feature channels will affect the channel attention coefficient, making the deep neural network incapable of paying attention to the detailed features of retinal vessels. This study proposes a retinal vessel segmentation via a rough channel attention mechanism. First, the method integrates deep neural networks to learn complex features and rough sets to handle uncertainty for designing rough neurons. Second, a rough channel attention mechanism module is constructed based on rough neurons, and embedded in U-Net skip connection for the integration of high-level and low-level features. Then, the residual connections are added to transmit low-level features to high-level to enrich network feature extraction and help back-propagate the gradient when training the model. Finally, multiple comparison experiments were carried out on three public fundus retinal image datasets to verify the validity of Rough Channel Attention Residual U-Net (RCAR-UNet) model. The results show that the RCAR-UNet model offers high superiority in accuracy, sensitivity, F1, and Jaccard similarity, especially for the precise segmentation of fragile blood vessels, guaranteeing blood vessels’ continuity.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MLC: Multi-level consistency learning for semi-supervised left atrium segmentation.\n \n \n \n \n\n\n \n Shi, Z.; Jiang, M.; Li, Y.; Wei, B.; Wang, Z.; Wu, Y.; Tan, T.; and Yang, G.\n\n\n \n\n\n\n Expert Systems with Applications, 244(August 2023): 122903. 2024.\n \n\n\n\n
\n\n\n\n \n \n \"MLC:Paper\n  \n \n \n \"MLC:Website\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {MLC: Multi-level consistency learning for semi-supervised left atrium segmentation},\n type = {article},\n year = {2024},\n keywords = {Left atrium segmentation,Semi-supervised learning,,consistency regularization,left atrium segmentation,semi-supervised learning},\n pages = {122903},\n volume = {244},\n websites = {https://doi.org/10.1016/j.eswa.2023.122903},\n publisher = {Elsevier Ltd},\n id = {966b3c14-e7f7-30b6-8781-e9f9f099ae7d},\n created = {2024-01-13T07:02:56.383Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:20:44.729Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Atrial fibrillation is the most common type of arrhythmia associated with a high mortality rate. Left atrium segmentation is crucial for the diagnosis and treatment of atrial fibrillation. Accurate left atrium segmentation with limited labeled data is a tricky problem. In this paper, a novel multi-level consistency semi-supervised learning method is proposed for left atrium segmentation from 3D magnetic resonance images. The proposed framework can efficiently utilize limited labeled data and large amounts of unlabeled data by performing consistency predictions under task level, data level, and feature level perturbations. For task consistency, the segmentation results and signed distance maps were used for both segmentation and distance estimation tasks. For data level perturbation, random flips (horizontal or vertical) were introduced for unlabeled data. Moreover, based on virtual adversarial training, we design a multi-layer feature perturbation in the structure of skipping connection. Our method is evaluated on the publicly available Left Atrium Segmentation Challenge dataset version 2018. For the model trained with a label rate of 20%, the evaluation metrics Dice, Jaccard, ASD, and 95HD are 91.69%, 84.71%, 1.43 voxel, and 5.44 voxel, respectively. The experimental results show that the proposed method outperforms other semi-supervised learning methods and even achieves better performance than the fully supervised V-Net.},\n bibtype = {article},\n author = {Shi, Zhebin and Jiang, Mingfeng and Li, Yang and Wei, Bo and Wang, Zefeng and Wu, Yongquan and Tan, Tao and Yang, Guang},\n doi = {10.1016/j.eswa.2023.122903},\n journal = {Expert Systems with Applications},\n number = {August 2023}\n}
\n
\n\n\n
\n Atrial fibrillation is the most common type of arrhythmia associated with a high mortality rate. Left atrium segmentation is crucial for the diagnosis and treatment of atrial fibrillation. Accurate left atrium segmentation with limited labeled data is a tricky problem. In this paper, a novel multi-level consistency semi-supervised learning method is proposed for left atrium segmentation from 3D magnetic resonance images. The proposed framework can efficiently utilize limited labeled data and large amounts of unlabeled data by performing consistency predictions under task level, data level, and feature level perturbations. For task consistency, the segmentation results and signed distance maps were used for both segmentation and distance estimation tasks. For data level perturbation, random flips (horizontal or vertical) were introduced for unlabeled data. Moreover, based on virtual adversarial training, we design a multi-layer feature perturbation in the structure of skipping connection. Our method is evaluated on the publicly available Left Atrium Segmentation Challenge dataset version 2018. For the model trained with a label rate of 20%, the evaluation metrics Dice, Jaccard, ASD, and 95HD are 91.69%, 84.71%, 1.43 voxel, and 5.44 voxel, respectively. The experimental results show that the proposed method outperforms other semi-supervised learning methods and even achieves better performance than the fully supervised V-Net.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (62)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A Survey, Review, and Future Trends of Skin Lesion Segmentation and Classification.\n \n \n \n \n\n\n \n Hasan, K.; Ahamad, M., A.; Yap, C., H.; and Yang, G.\n\n\n \n\n\n\n Computers in Biology and Medicine. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {A Survey, Review, and Future Trends of Skin Lesion Segmentation and Classification},\n type = {article},\n year = {2023},\n id = {17fb061d-b966-35d3-9cb7-c4b3e0775a8d},\n created = {2024-01-13T05:46:20.196Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:11:36.169Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {hasan_survey_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Hasan, Kamrul and Ahamad, Md Asif and Yap, Choon Hwai and Yang, Guang},\n journal = {Computers in Biology and Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Navigating the Development Challenges in Creating Complex Data Systems.\n \n \n \n \n\n\n \n Dittmer, S.; Roberts, M.; Gilbey, J.; Biguri, A.; Selby, I.; Breger, A.; Thorpe, M.; Gilbey, J.; Weir-McCall, J., R.; and Gkrania-Klotsas, E.\n\n\n \n\n\n\n Nature Machine Intelligence. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"NavigatingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Navigating the Development Challenges in Creating Complex Data Systems},\n type = {article},\n year = {2023},\n id = {fc847baf-1a03-34d3-82f2-23c799ebb303},\n created = {2024-01-13T05:46:20.327Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:13:32.804Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {dittmer_navigating_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Dittmer, Sören and Roberts, Michael and Gilbey, Julian and Biguri, Ander and Selby, Ian and Breger, Anna and Thorpe, Matthew and Gilbey, Julian and Weir-McCall, Jonathan R and Gkrania-Klotsas, Effrossyni},\n journal = {Nature Machine Intelligence}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Hierarchical Relational Inference for Few-Shot Learning in 3D Left Atrial Segmentation.\n \n \n \n\n\n \n Li, X.; Chen, J.; Zhang, H.; Cho, Y.; Hwang, S., H.; Gao, Z.; and Yang, G.\n\n\n \n\n\n\n IEEE Transactions on Emerging Topics in Computational Intelligence. 2023.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Hierarchical Relational Inference for Few-Shot Learning in 3D Left Atrial Segmentation},\n type = {article},\n year = {2023},\n id = {a3909219-fbd2-33e1-98c3-8e17e64ea7f7},\n created = {2024-01-13T05:46:20.387Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.390Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {li_hierarchical_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Li, Xuejiao and Chen, Jun and Zhang, Heye and Cho, Yongwon and Hwang, Sung Ho and Gao, Zhifan and Yang, Guang},\n journal = {IEEE Transactions on Emerging Topics in Computational Intelligence}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stain Consistency Learning: Handling Stain Variation for Automatic Digital Pathology Segmentation.\n \n \n \n \n\n\n \n Yeung, M.; Watts, T.; Tan, S., Y., W.; Ferreira, P., F.; Scott, A., D.; Nielles-Vallespin, S.; and Yang, G.\n\n\n \n\n\n\n arXiv preprint arXiv:2311.06552. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"StainPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Stain Consistency Learning: Handling Stain Variation for Automatic Digital Pathology Segmentation},\n type = {article},\n year = {2023},\n id = {aafa74f2-746b-3110-8a7a-cb868f535756},\n created = {2024-01-13T05:46:20.388Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:13:29.320Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yeung_stain_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Yeung, Michael and Watts, Todd and Tan, Sean Y W and Ferreira, Pedro F and Scott, Andrew D and Nielles-Vallespin, Sonia and Yang, Guang},\n journal = {arXiv preprint arXiv:2311.06552}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Deep Learning-Based Quantification of Traction Bronchiectasis Severity For Predicting Outcome in Idiopathic Pulmonary Fibrosis.\n \n \n \n\n\n \n Felder, F., N.; Nan, Y.; Yang, G.; Mackintosh, J.; Calandriello, L.; Goh, N.; Hopkins, P.; Moodley, Y.; Reynolds, P., N.; and Corte, T.\n\n\n \n\n\n\n European Respiratory Journal, 11(1): 76-78. 2023.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Deep Learning-Based Quantification of Traction Bronchiectasis Severity For Predicting Outcome in Idiopathic Pulmonary Fibrosis},\n type = {article},\n year = {2023},\n pages = {76-78},\n volume = {11},\n id = {6a8bc74c-8a7c-30c8-a8e7-51fc98999ef0},\n created = {2024-01-13T05:46:20.407Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:23:14.454Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {felder_deep_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Felder, Federico N and Nan, Yang and Yang, Guang and Mackintosh, John and Calandriello, Lucio and Goh, Nicole and Hopkins, Peter and Moodley, Yuben and Reynolds, Paul N and Corte, Temera},\n journal = {European Respiratory Journal},\n number = {1}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fuzzy Attention Neural Network to Tackle Discontinuity in Airway Segmentation.\n \n \n \n \n\n\n \n Nan, Y.; Del Ser, J.; Tang, Z.; Tang, P.; Xing, X.; Fang, Y.; Herrera, F.; Pedrycz, W.; Walsh, S.; and Yang, G.\n\n\n \n\n\n\n IEEE Transactions on Neural Networks and Learning Systems. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"FuzzyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Fuzzy Attention Neural Network to Tackle Discontinuity in Airway Segmentation},\n type = {article},\n year = {2023},\n id = {d2a36075-a2cc-338d-b670-2b67d8670813},\n created = {2024-01-13T05:46:20.518Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:13:40.206Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {nan_fuzzy_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Nan, Yang and Del Ser, Javier and Tang, Zeyu and Tang, Peng and Xing, Xiaodan and Fang, Yingying and Herrera, Francisco and Pedrycz, Witold and Walsh, Simon and Yang, Guang},\n journal = {IEEE Transactions on Neural Networks and Learning Systems}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hybrid Swin Deformable Attention U-Net for Medical Image Segmentation.\n \n \n \n \n\n\n \n Wang, L.; Huang, J.; Xing, X.; and Yang, G.\n\n\n \n\n\n\n In International Symposium on Medical Information Processing and Analysis (SIPAIM), 2023. \n \n\n\n\n
\n\n\n\n \n \n \"HybridPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Hybrid Swin Deformable Attention U-Net for Medical Image Segmentation},\n type = {inproceedings},\n year = {2023},\n id = {293f2ef9-bf86-377b-bc00-6ec6c282726e},\n created = {2024-01-13T05:46:20.578Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:13:57.422Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {wang_hybrid_2023},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Wang, Lichao and Huang, Jiahao and Xing, Xiaodan and Yang, Guang},\n booktitle = {International Symposium on Medical Information Processing and Analysis (SIPAIM)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Diff-UNet: A Diffusion Embedded Network for Volumetric Segmentation.\n \n \n \n \n\n\n \n Xing, Z.; Wan, L.; Fu, H.; Yang, G.; and Zhu, L.\n\n\n \n\n\n\n arXiv preprint arXiv:2303.10326. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Diff-UNet:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Diff-UNet: A Diffusion Embedded Network for Volumetric Segmentation},\n type = {article},\n year = {2023},\n id = {ca204e4c-6d66-3b1c-8331-afb42f2f817f},\n created = {2024-01-13T05:46:20.593Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:14:02.138Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {xing_diff-unet_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Xing, Zhaohu and Wan, Liang and Fu, Huazhu and Yang, Guang and Zhu, Lei},\n journal = {arXiv preprint arXiv:2303.10326}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Outcome Prediction in Patients with Acute Ischemic Stroke by fusing MRI- based Deep Learning and Clinical Information.\n \n \n \n\n\n \n Liu, Y.; Ouyang, J.; Jiang, B.; Yu, Y.; Yang, G.; Liebeskind, D.; Lansberg, M.; Albers, G.; and Zaharchuk, G.\n\n\n \n\n\n\n In Annual Meeting of the American Society of Neuroradiology (ASNR 2023), 2023. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Outcome Prediction in Patients with Acute Ischemic Stroke by fusing MRI- based Deep Learning and Clinical Information},\n type = {inproceedings},\n year = {2023},\n id = {1d3890b8-4b82-3048-958e-c7610b206633},\n created = {2024-01-13T05:46:20.625Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:23:14.348Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {liu_outcome_2023},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Liu, Yongkai and Ouyang, Jiahong and Jiang, Bin and Yu, Yannan and Yang, Guang and Liebeskind, David and Lansberg, Maarten and Albers, Gregory and Zaharchuk, Greg},\n booktitle = {Annual Meeting of the American Society of Neuroradiology (ASNR 2023)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adversarial Transformer for Repairing Human Airway Segmentation.\n \n \n \n \n\n\n \n Tang, Z.; Yang, N.; Walsh, S.; and Yang, G.\n\n\n \n\n\n\n IEEE Journal of Biomedical and Health Informatics. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"AdversarialPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Adversarial Transformer for Repairing Human Airway Segmentation},\n type = {article},\n year = {2023},\n id = {f06cbe99-313c-33e7-bb7a-3dd304b0c1bd},\n created = {2024-01-13T05:46:20.734Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:14:53.946Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {tang_adversarial_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Tang, Zeyu and Yang, Nan and Walsh, Simon and Yang, Guang},\n journal = {IEEE Journal of Biomedical and Health Informatics}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Pipeline to Further Enhance Quality, Integrity and Reusability of the NCCID Clinical Data.\n \n \n \n \n\n\n \n Breger, A.; Selby, I.; Roberts, M.; Babar, J.; Gkrania-Klotsas, E.; Preller, J.; Sánchez, L., E.; Dittmer, S.; Thorpe, M.; and Gilbey, J.\n\n\n \n\n\n\n Nature Scientific Data, 10(493). 2023.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {A Pipeline to Further Enhance Quality, Integrity and Reusability of the NCCID Clinical Data},\n type = {article},\n year = {2023},\n volume = {10},\n id = {c6aa5592-3a2c-312f-928a-72e3791d7cb4},\n created = {2024-01-13T05:46:23.191Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:03:24.753Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {breger_pipeline_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Breger, Anna and Selby, Ian and Roberts, Michael and Babar, Judith and Gkrania-Klotsas, Efrossyni and Preller, Jacobus and Sánchez, Lorena Escudero and Dittmer, Sören and Thorpe, Matthew and Gilbey, Julian},\n journal = {Nature Scientific Data},\n number = {493}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hunting Imaging Biomarkers in Pulmonary Fibrosis: Benchmarks of the AIIB23 Challenge.\n \n \n \n \n\n\n \n Nan, Y.; Xing, X.; Wang, S.; Tang, Z.; Felder, F., N.; Zhang, S.; Ledda, R., E.; Ding, X.; Yu, R.; and Liu, W.\n\n\n \n\n\n\n arXiv preprint arXiv:2312.13752. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"HuntingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Hunting Imaging Biomarkers in Pulmonary Fibrosis: Benchmarks of the AIIB23 Challenge},\n type = {article},\n year = {2023},\n id = {43c80978-4b84-3bd7-978c-676749a3dc9d},\n created = {2024-01-13T05:46:23.401Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:03:34.370Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {nan_hunting_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Nan, Yang and Xing, Xiaodan and Wang, Shiyi and Tang, Zeyu and Felder, Federico N and Zhang, Sheng and Ledda, Roberta Eufrasia and Ding, Xiaoliu and Yu, Ruiqi and Liu, Weiping},\n journal = {arXiv preprint arXiv:2312.13752}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n High Accuracy and Cost-Saving Active Learning 3D WD-UNet for Airway Segmentation.\n \n \n \n \n\n\n \n Wang, S.; Nan, Y.; Walsh, S.; and Yang, G.\n\n\n \n\n\n\n In International Symposium on Medical Information Processing and Analysis (SIPAIM), 2023. \n \n\n\n\n
\n\n\n\n \n \n \"HighPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {High Accuracy and Cost-Saving Active Learning 3D WD-UNet for Airway Segmentation},\n type = {inproceedings},\n year = {2023},\n id = {0882f5f6-d09a-3b86-a8aa-24baecb44278},\n created = {2024-01-13T05:46:23.613Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:03:37.333Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {wang_high_2023},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Wang, Shiyi and Nan, Yang and Walsh, Simon and Yang, Guang},\n booktitle = {International Symposium on Medical Information Processing and Analysis (SIPAIM)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Improving Early Diagnosis of Primary Immunodeficiencies by Learning Causal Clinical History.\n \n \n \n\n\n \n Papanastasiou, G.; Ivanov, V.; Moore, S.; Sobolevsky, L.; Hsueh, S.; Xiang, J.; Aristeridou, D.; Kritharidou, M.; Tsarapatsanis, V.; and Yang, G.\n\n\n \n\n\n\n In Annual Symposium of American Medical Informatics Association (AMIA 2023), 2023. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Improving Early Diagnosis of Primary Immunodeficiencies by Learning Causal Clinical History},\n type = {inproceedings},\n year = {2023},\n id = {6206f9da-275b-3083-9cc0-6567c06a04e6},\n created = {2024-01-13T05:46:23.707Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:23:14.435Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {papanastasiou_improving_2023},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Papanastasiou, Giorgos and Ivanov, Vladimir and Moore, Shelley and Sobolevsky, Luba and Hsueh, Sabrina and Xiang, Ji and Aristeridou, Danai and Kritharidou, Maria and Tsarapatsanis, Vaios and Yang, Guang},\n booktitle = {Annual Symposium of American Medical Informatics Association (AMIA 2023)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fuzz-ClustNet: Coupled Fuzzy Clustering and Deep Neural Networks for Arrhythmia Detection from ECG Signals.\n \n \n \n \n\n\n \n Kumar, S.; Mallik, A.; Kumar, A.; Del Ser, J.; and Yang, G.\n\n\n \n\n\n\n Computers in Biology and Medicine. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Fuzz-ClustNet:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Fuzz-ClustNet: Coupled Fuzzy Clustering and Deep Neural Networks for Arrhythmia Detection from ECG Signals},\n type = {article},\n year = {2023},\n id = {9d12f937-8e15-3936-9ce1-c5ae5fdf7e76},\n created = {2024-01-13T05:46:23.952Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:03:39.231Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {kumar_fuzz-clustnet_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Kumar, Sanjay and Mallik, Abhishek and Kumar, Akshi and Del Ser, Javier and Yang, Guang},\n journal = {Computers in Biology and Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ChatAgri: Exploring Potentials of ChatGPT on Cross-linguistic Agricultural Text Classification.\n \n \n \n \n\n\n \n Zhao, B.; Jin, W.; Del Ser, J.; and Yang, G.\n\n\n \n\n\n\n Neurocomputing. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"ChatAgri:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {ChatAgri: Exploring Potentials of ChatGPT on Cross-linguistic Agricultural Text Classification},\n type = {article},\n year = {2023},\n id = {5c08ff64-ea65-3fad-9541-5c9baa739300},\n created = {2024-01-13T05:46:28.269Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:03:59.653Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhao_chatagri_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Zhao, Biao and Jin, Weiqiang and Del Ser, Javier and Yang, Guang},\n journal = {Neurocomputing}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-site, Multi-domain Airway Tree Modeling.\n \n \n \n \n\n\n \n Zhang, M.; Wu, Y.; Zhang, H.; Qin, Y.; Zheng, H.; Tang, W.; Arnold, C.; Pei, C.; Yu, P.; and Nan, Y.\n\n\n \n\n\n\n Medical Image Analysis. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Multi-site,Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Multi-site, Multi-domain Airway Tree Modeling},\n type = {article},\n year = {2023},\n id = {e992b6b9-b71a-308b-8db5-aaf3545f84f1},\n created = {2024-01-13T05:46:28.273Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:03:51.928Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhang_multi-site_2023-1},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Zhang, Minghui and Wu, Yangqian and Zhang, Hanxiao and Qin, Yulei and Zheng, Hao and Tang, Wen and Arnold, Corey and Pei, Chenhao and Yu, Pengxin and Nan, Yang},\n journal = {Medical Image Analysis}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Late Breaking Abstract-Deep learning-based outcome prediction in pulmonary fibrosis using synthetic HRCT.\n \n \n \n\n\n \n Walsh, S.; Xing, X.; Mackintosh, J.; Calandriello, L.; Fang, Y.; Wang, S.; Zhang, S.; Nan, Y.; Silva, M.; and Wells, A.\n\n\n \n\n\n\n European Respiratory Journal. 2023.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Late Breaking Abstract-Deep learning-based outcome prediction in pulmonary fibrosis using synthetic HRCT},\n type = {article},\n year = {2023},\n publisher = {European Respiratory Society},\n id = {427aa195-73f7-3a2e-bcd6-4313784ff039},\n created = {2024-01-13T05:46:28.966Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:23:14.358Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {walsh_late_2023},\n source_type = {book},\n private_publication = {false},\n bibtype = {article},\n author = {Walsh, Simon and Xing, Xiaodan and Mackintosh, John and Calandriello, Lucio and Fang, Yingying and Wang, Shiyi and Zhang, Sheng and Nan, Yang and Silva, Mario and Wells, Athol},\n journal = {European Respiratory Journal}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep Learning-based Prognostic Model Using Non-enhanced Cardiac Cine MRI for Outcome Prediction in Patients With Heart Failure.\n \n \n \n \n\n\n \n Gao, Y.; Zhou, Z.; Zhang, B.; Guo, S.; Bo, K.; Li, S.; Zhang, N.; Wang, H.; Yang, G.; and Zhang, H.\n\n\n \n\n\n\n European Radiology. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Deep Learning-based Prognostic Model Using Non-enhanced Cardiac Cine MRI for Outcome Prediction in Patients With Heart Failure},\n type = {article},\n year = {2023},\n id = {270452e8-69b6-3bc9-a1eb-800e82212d95},\n created = {2024-01-13T05:46:28.981Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:04:11.567Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {gao_deep_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Gao, Yifeng and Zhou, Zhen and Zhang, Bing and Guo, Saidi and Bo, Kairui and Li, Shuang and Zhang, Nan and Wang, Hui and Yang, Guang and Zhang, Heye},\n journal = {European Radiology}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Less is More: Unsupervised Mask-guided Annotated CT Image Synthesis with Minimum Manual Segmentations.\n \n \n \n \n\n\n \n Xing, X.; Papanastasiou, G.; Walsh, S.; and Yang, G.\n\n\n \n\n\n\n IEEE Transactions on Medical Imaging. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"LessPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Less is More: Unsupervised Mask-guided Annotated CT Image Synthesis with Minimum Manual Segmentations},\n type = {article},\n year = {2023},\n id = {b2971b69-c2c1-3549-873c-808361223d69},\n created = {2024-01-13T05:46:29.180Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:04:27.394Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {xing_less_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Xing, Xiaodan and Papanastasiou, Giorgos and Walsh, Simon and Yang, Guang},\n journal = {IEEE Transactions on Medical Imaging}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Non-Imaging Medical Data Synthesis for Trustworthy AI: A Comprehensive Survey.\n \n \n \n \n\n\n \n Xing, X.; Wu, H.; Wang, L.; Stenson, I.; Yong, M.; Del Ser, J.; Walsh, S.; and Yang, G.\n\n\n \n\n\n\n ACM Computing Surveys. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Non-ImagingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Non-Imaging Medical Data Synthesis for Trustworthy AI: A Comprehensive Survey},\n type = {article},\n year = {2023},\n id = {a66b9df5-1b1e-371b-aca7-4d220b1cb70a},\n created = {2024-01-13T05:46:29.219Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:04:30.597Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {xing_non-imaging_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Xing, Xiaodan and Wu, Huanjun and Wang, Lichao and Stenson, Iain and Yong, May and Del Ser, Javier and Walsh, Simon and Yang, Guang},\n journal = {ACM Computing Surveys}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Aortic Annulus Detection Based on Deep Learning for Transcatheter Aortic Valve Replacement Using Cardiac Computed Tomography.\n \n \n \n \n\n\n \n Cho, Y.; Park, S.; Hwang, S., H.; Ko, M.; Lim, D.; Yu, C., W.; Park, S.; Kim, M.; Oh, Y.; and Yang, G.\n\n\n \n\n\n\n Journal of Korean Medical Science. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"AorticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Aortic Annulus Detection Based on Deep Learning for Transcatheter Aortic Valve Replacement Using Cardiac Computed Tomography},\n type = {article},\n year = {2023},\n id = {af9c6e40-21f3-331b-a8ff-44431b6abc4e},\n created = {2024-01-13T05:46:29.242Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:04:40.754Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {cho_aortic_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Cho, Yongwon and Park, Soojung and Hwang, Sung Ho and Ko, Minseok and Lim, Do-Sun and Yu, Cheol Woong and Park, Seong-Mi and Kim, Mi-Na and Oh, Yu-Whan and Yang, Guang},\n journal = {Journal of Korean Medical Science}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n T1/T2 relaxation temporal modelling from accelerated acquisitions using a Latent Transformer.\n \n \n \n \n\n\n \n Wang, F.; Tanzer, M.; Qiao, M.; Bai, W.; Rueckert, D.; Yang, G.; and Nielles-Vallespin, S.\n\n\n \n\n\n\n In Medical Image Computing and Computer Assisted Intervention MICCAI 2023 International Workshop on Statistical Atlases and Computational Models of the Heart, 2023. \n \n\n\n\n
\n\n\n\n \n \n \"T1/T2Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {T1/T2 relaxation temporal modelling from accelerated acquisitions using a Latent Transformer},\n type = {inproceedings},\n year = {2023},\n id = {76674fd7-04a1-349e-bc8a-2323f0ac72b3},\n created = {2024-01-13T05:46:29.535Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:05:29.033Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {wang_t1t2_2023},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Wang, Fanwen and Tanzer, Michael and Qiao, Mengyun and Bai, Wenjia and Rueckert, Daniel and Yang, Guang and Nielles-Vallespin, Sonia},\n booktitle = {Medical Image Computing and Computer Assisted Intervention MICCAI 2023 International Workshop on Statistical Atlases and Computational Models of the Heart}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CDiffMR: Can We Replace the Gaussian Noise with K-Space Undersampling for Fast MRI?.\n \n \n \n \n\n\n \n Huang, J.; Aviles-Rivero, A.; Schönlieb, C.; and Yang, G.\n\n\n \n\n\n\n In Medical Image Computing and Computer Assisted Intervention (MICCAI 2023), 2023. \n \n\n\n\n
\n\n\n\n \n \n \"CDiffMR:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {CDiffMR: Can We Replace the Gaussian Noise with K-Space Undersampling for Fast MRI?},\n type = {inproceedings},\n year = {2023},\n id = {04cefa07-7417-38a8-afa8-bd4e3b513464},\n created = {2024-01-13T05:46:29.542Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:05:33.543Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {huang_cdiffmr_2023},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Huang, Jiahao and Aviles-Rivero, Angelica and Schönlieb, Carola-Bibiane and Yang, Guang},\n booktitle = {Medical Image Computing and Computer Assisted Intervention (MICCAI 2023)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep Learning-based Diffusion Tensor Cardiac Magnetic Resonance Reconstruction: A Comparison Study.\n \n \n \n \n\n\n \n Huang, J.; Ferreira, P., F.; Wang, L.; Wu, Y.; Aviles-Rivero, A., I.; Schonlieb, C.; Scott, A., D.; Khalique, Z.; Dwornik, M.; and Rajakulasingam, R.\n\n\n \n\n\n\n arXiv preprint arXiv:2304.00996. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Deep Learning-based Diffusion Tensor Cardiac Magnetic Resonance Reconstruction: A Comparison Study},\n type = {article},\n year = {2023},\n id = {66c51f7c-4bc0-3334-881f-d1ed6be47f69},\n created = {2024-01-13T05:46:29.653Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:19.581Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {huang_deep_2023},\n source_type = {book},\n notes = {Publication Title: arXiv preprint arXiv:2304.00996},\n private_publication = {false},\n bibtype = {article},\n author = {Huang, Jiahao and Ferreira, Pedro F and Wang, Lichao and Wu, Yinzhe and Aviles-Rivero, Angelica I and Schonlieb, Carola-Bibiane and Scott, Andrew D and Khalique, Zohya and Dwornik, Maria and Rajakulasingam, Ramyah},\n journal = {arXiv preprint arXiv:2304.00996}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Non-Invasive Prediction of Overall Survival Time for Glioblastoma Multiforme Patients Based on Multimodal MRI Radiomics.\n \n \n \n \n\n\n \n Zhu, J.; Ye, J.; Dong, L.; Ma, X.; Tang, N.; Xu, P.; Jin, W.; Li, R.; Yang, G.; and Lai, X.\n\n\n \n\n\n\n International Journal of Imaging Systems and Technology. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Non-InvasivePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Non-Invasive Prediction of Overall Survival Time for Glioblastoma Multiforme Patients Based on Multimodal MRI Radiomics},\n type = {article},\n year = {2023},\n id = {18051a49-243c-32e3-a005-b3afd1ba1997},\n created = {2024-01-13T05:46:29.809Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:06:10.711Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhu_non-invasive_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Zhu, Jingyu and Ye, Jianming and Dong, Leshui and Ma, Xiaofei and Tang, Na and Xu, Peng and Jin, Wei and Li, Ruipeng and Yang, Guang and Lai, Xiaobo},\n journal = {International Journal of Imaging Systems and Technology}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Vehicular Abandoned Object Detection Based on VANET and Edge AI in Road Scenes.\n \n \n \n \n\n\n \n Wang, G.; Zhou, M.; Wei, X.; and Yang, G.\n\n\n \n\n\n\n IEEE Transactions on Intelligent Transportation Systems. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"VehicularPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Vehicular Abandoned Object Detection Based on VANET and Edge AI in Road Scenes},\n type = {article},\n year = {2023},\n id = {c0e5bde5-bfe4-3410-b1af-7bf2f658273b},\n created = {2024-01-13T05:46:29.811Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:06:03.685Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {wang_vehicular_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Wang, Gang and Zhou, Mingliang and Wei, Xuekai and Yang, Guang},\n journal = {IEEE Transactions on Intelligent Transportation Systems}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n GLRP: Global and Local Contrastive Learning Based on Relative Position for Medical Image Segmentation on Cardiac MRI.\n \n \n \n \n\n\n \n Zhao, X.; Wang, T.; Chen, J.; Jiang, B.; Li, H.; Zhang, N.; Yang, G.; and Chai, S.\n\n\n \n\n\n\n International Journal of Imaging Systems and Technology. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"GLRP:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {GLRP: Global and Local Contrastive Learning Based on Relative Position for Medical Image Segmentation on Cardiac MRI},\n type = {article},\n year = {2023},\n id = {3c0d5016-cd0d-36aa-b7ec-d34917cdf565},\n created = {2024-01-13T05:46:29.871Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:06:16.266Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhao_glrp_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Zhao, Xin and Wang, Tongming and Chen, Jingsong and Jiang, Bingrun and Li, Haotian and Zhang, Nan and Yang, Guang and Chai, Senchun},\n journal = {International Journal of Imaging Systems and Technology}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Prompt Learning for Metonymy Resolution: Enhancing Performance with Internal Prior Knowledge of Pre-Trained Language Models.\n \n \n \n \n\n\n \n Zhao, B.; Jin, W.; Zhang, Y.; Huang, S.; and Yang, G.\n\n\n \n\n\n\n Knowledge-Based Systems. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"PromptPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Prompt Learning for Metonymy Resolution: Enhancing Performance with Internal Prior Knowledge of Pre-Trained Language Models},\n type = {article},\n year = {2023},\n id = {aa49ffed-b051-3a78-bd1f-440ac2567dad},\n created = {2024-01-13T05:46:30.038Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:06:28.953Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhao_prompt_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Zhao, Biao and Jin, Weiqiang and Zhang, Yu and Huang, Subin and Yang, Guang},\n journal = {Knowledge-Based Systems}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Mutually Aided Uncertainty Incorporated Dual Consistency Regularization with Pseudo Label for Semi-Supervised Medical Image Segmentation.\n \n \n \n \n\n\n \n Lu, S.; Zhang, Z.; Yan, Z.; Wang, Y.; Cheng, T.; Zhou, R.; and Yang, G.\n\n\n \n\n\n\n Neurocomputing. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"MutuallyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Mutually Aided Uncertainty Incorporated Dual Consistency Regularization with Pseudo Label for Semi-Supervised Medical Image Segmentation},\n type = {article},\n year = {2023},\n id = {032d2663-642e-3e6f-a424-51084f86f4ca},\n created = {2024-01-13T05:46:30.405Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:07:05.110Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {lu_mutually_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Lu, Shanfu and Zhang, Zijian and Yan, Ziye and Wang, Yiran and Cheng, Tingting and Zhou, Rongrong and Yang, Guang},\n journal = {Neurocomputing}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MRI Radiomics for Brain Metastasis Sub-pathology Classification From Non-small Cell Lung Cancer: a Machine Learning, Multicenter Study.\n \n \n \n \n\n\n \n Deng, F.; Liu, Z.; Fang, W.; Niu, L.; Chu, X.; Cheng, Q.; Zhang, Z.; Zhang, R.; and Yang, G.\n\n\n \n\n\n\n Physical and Engineering Sciences in Medicine. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"MRIPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {MRI Radiomics for Brain Metastasis Sub-pathology Classification From Non-small Cell Lung Cancer: a Machine Learning, Multicenter Study},\n type = {article},\n year = {2023},\n id = {062bbeea-28e6-359f-a620-b6569e7e2a41},\n created = {2024-01-13T05:46:30.449Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:07:20.084Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {deng_mri_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Deng, Fuxing and Liu, Zhiyuan and Fang, Wei and Niu, Lishui and Chu, Xianjing and Cheng, Quan and Zhang, Zijian and Zhang, Rongrong and Yang, Guang},\n journal = {Physical and Engineering Sciences in Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Motion Estimation Based on Projective Information Disentanglement for 3D Reconstruction of Rotational Coronary Angiography.\n \n \n \n \n\n\n \n Liu, X.; Li, S.; Wang, B.; Xu, L.; Gao, Z.; and Yang, G.\n\n\n \n\n\n\n Computers in Biology and Medicine. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"MotionPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Motion Estimation Based on Projective Information Disentanglement for 3D Reconstruction of Rotational Coronary Angiography},\n type = {article},\n year = {2023},\n id = {057fb5cb-e8d9-3512-97f2-6af3bad02af7},\n created = {2024-01-13T05:46:30.521Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:07:28.711Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {liu_motion_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Liu, Xiujian and Li, Si and Wang, Bin and Xu, Lin and Gao, Zhifan and Yang, Guang},\n journal = {Computers in Biology and Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Clinical and Imaging Fused Deep Learning Model Matches Expert Clinician Prediction of 90-Day Stroke Outcomes.\n \n \n \n\n\n \n Liu, Y.; Shah, P.; Yu, Y.; Horsey, J.; Ouyang, J.; Jiang, B.; Yang, G.; Heit, J., J.; McCullough, M., E.; and Hugdal, S., M.\n\n\n \n\n\n\n American Journal of Neuroradiology. 2023.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {A Clinical and Imaging Fused Deep Learning Model Matches Expert Clinician Prediction of 90-Day Stroke Outcomes},\n type = {article},\n year = {2023},\n id = {1ce98388-2523-33de-89ee-8d70d56894da},\n created = {2024-01-13T05:46:30.542Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:23:14.236Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {liu_clinical_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Liu, Yongkai and Shah, Preya and Yu, Yannan and Horsey, Jai and Ouyang, Jiahong and Jiang, Bin and Yang, Guang and Heit, Jeremy J and McCullough, Margy E and Hugdal, Stephen M},\n journal = {American Journal of Neuroradiology}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Large-Kernel Attention for 3D Medical Image Segmentation.\n \n \n \n \n\n\n \n Li, H.; Nan, Y.; Del Ser, J.; and Yang, G.\n\n\n \n\n\n\n Cognitive Computation. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Large-KernelPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Large-Kernel Attention for 3D Medical Image Segmentation},\n type = {article},\n year = {2023},\n id = {fb5f5840-4900-3163-8392-713492e76e6b},\n created = {2024-01-13T05:46:30.655Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:09:19.418Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {li_large-kernel_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Li, Hao and Nan, Yang and Del Ser, Javier and Yang, Guang},\n journal = {Cognitive Computation}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Is Attention All You Need in Medical Image Analysis? A Review.\n \n \n \n \n\n\n \n Papanastasiou, G.; Dikaios, N.; Huang, J.; Wang, C.; and Yang, G.\n\n\n \n\n\n\n IEEE Journal of Biomedical and Health Informatics. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"IsPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Is Attention All You Need in Medical Image Analysis? A Review},\n type = {article},\n year = {2023},\n id = {5a4a4801-20f6-3ac4-93e8-5d6bce16d241},\n created = {2024-01-13T05:46:30.879Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:10:05.159Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {papanastasiou_is_2023},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Papanastasiou, Giorgos and Dikaios, Nikolaos and Huang, Jiahao and Wang, Chengjia and Yang, Guang},\n journal = {IEEE Journal of Biomedical and Health Informatics}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Style Transfer and Self-Supervised Learning Powered Myocardium Infarction Super-Resolution Segmentation.\n \n \n \n \n\n\n \n Wang, L.; Huang, J.; Xing, X.; Wu, Y.; Rajakulasingam, R.; Scott, A., D.; Ferreira, P., F.; De Silva, R.; Nielles-Vallespin, S.; and Yang, G.\n\n\n \n\n\n\n In International Symposium on Medical Information Processing and Analysis (SIPAIM), 2023. \n \n\n\n\n
\n\n\n\n \n \n \"StylePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Style Transfer and Self-Supervised Learning Powered Myocardium Infarction Super-Resolution Segmentation},\n type = {inproceedings},\n year = {2023},\n id = {bbfb26c6-d567-3d8f-b65a-7356952193ec},\n created = {2024-01-13T05:46:30.902Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:10:22.286Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {wang_style_2023},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Wang, Lichao and Huang, Jiahao and Xing, Xiaodan and Wu, Yinzhe and Rajakulasingam, Ramyah and Scott, Andrew D and Ferreira, Pedro F and De Silva, Ranil and Nielles-Vallespin, Sonia and Yang, Guang},\n booktitle = {International Symposium on Medical Information Processing and Analysis (SIPAIM)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Real-Time Non-Invasive Imaging and Detection of Spreading Depolarizations through EEG: An Ultra-Light Explainable Deep Learning Approach.\n \n \n \n \n\n\n \n Wu, Y.; Jewell, S.; Xing, X.; Nan, Y.; Strong, A., J.; Yang, G.; and Boutelle, M., G.\n\n\n \n\n\n\n arXiv preprint arXiv:2309.03147. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Real-TimePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Real-Time Non-Invasive Imaging and Detection of Spreading Depolarizations through EEG: An Ultra-Light Explainable Deep Learning Approach},\n type = {article},\n year = {2023},\n id = {597b1b6a-1c20-37d3-8b1f-6d164d29019b},\n created = {2024-01-13T05:46:31.015Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:18.376Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {wu_real-time_2023},\n source_type = {book},\n notes = {Publication Title: arxiv preprint arxiv:2309.03147},\n private_publication = {false},\n bibtype = {article},\n author = {Wu, Yinzhe and Jewell, Sharon and Xing, Xiaodan and Nan, Yang and Strong, Anthony J and Yang, Guang and Boutelle, Martyn G},\n journal = {arXiv preprint arXiv:2309.03147}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Beauty or the Beast: Which Aspect of Synthetic Medical Images Deserves Our Focus.\n \n \n \n \n\n\n \n Xing, X.; Nan, Y.; Felder, F.; Walsh, S.; and Yang, G.\n\n\n \n\n\n\n In IEEE International Symposium on Computer-Based Medical Systems (CBMS 2023), 2023. \n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {The Beauty or the Beast: Which Aspect of Synthetic Medical Images Deserves Our Focus},\n type = {inproceedings},\n year = {2023},\n id = {e21a2106-7ed6-304d-b15e-42226c8e9266},\n created = {2024-01-13T05:46:31.017Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:10:48.596Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {xing_beauty_2023},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Xing, Xiaodan and Nan, Yang and Felder, Federico and Walsh, Simon and Yang, Guang},\n booktitle = {IEEE International Symposium on Computer-Based Medical Systems (CBMS 2023)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n You Don’t Have to Be Perfect to Be Amazing: Unveil the Utility of Synthetic Images.\n \n \n \n \n\n\n \n Xing, X.; Felder, F.; Nan, Y.; Papanastasiou, G.; Simon, W.; and Yang, G.\n\n\n \n\n\n\n In Medical Image Computing and Computer Assisted Intervention (MICCAI 2023), 2023. \n \n\n\n\n
\n\n\n\n \n \n \"YouPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {You Don’t Have to Be Perfect to Be Amazing: Unveil the Utility of Synthetic Images},\n type = {inproceedings},\n year = {2023},\n id = {4f8eae34-3ebc-3128-9a97-a08f7f9cdb2f},\n created = {2024-01-13T05:46:31.023Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:10:27.672Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {xing_you_2023},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Xing, Xiaodan and Felder, Federico and Nan, Yang and Papanastasiou, Giorgos and Simon, Walsh and Yang, Guang},\n booktitle = {Medical Image Computing and Computer Assisted Intervention (MICCAI 2023)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Large-Scale Analysis with Deep Learning for Early Diagnosis of Patients at Risk for Primary Immunodeficiencies.\n \n \n \n\n\n \n Papanastasiou, G.; Sagalovich, M.; Sidhu, G.; Sobolevsky, L.; Yang, G.; Fotiadis, D.; and Palumbo, D.\n\n\n \n\n\n\n In American Medical Informatics Association Summit 2023 (AMIA 2023), 2023. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Large-Scale Analysis with Deep Learning for Early Diagnosis of Patients at Risk for Primary Immunodeficiencies},\n type = {inproceedings},\n year = {2023},\n id = {5d4d2b58-15ca-39b2-8920-a070405ac96f},\n created = {2024-01-13T05:46:31.040Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:23:14.366Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {papanastasiou_large-scale_2023},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Papanastasiou, Georgios and Sagalovich, Marina and Sidhu, Gurinder and Sobolevsky, Luba and Yang, Guang and Fotiadis, Dimitris and Palumbo, Donna},\n booktitle = {American Medical Informatics Association Summit 2023 (AMIA 2023)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Editorial: Generative adversarial networks in cardiovascular research.\n \n \n \n \n\n\n \n Zhang, Q.; Cukur, T.; Greenspan, H.; and Yang, G.\n\n\n \n\n\n\n Frontiers in Cardiovascular Medicine, 10(October): 10-12. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Editorial:Paper\n  \n \n \n \"Editorial:Website\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Editorial: Generative adversarial networks in cardiovascular research},\n type = {article},\n year = {2023},\n keywords = {cardiovascular magnetic resonance,deep generative models,echocardiography (Echo),generative adversarial networks (GAN),segmentation (Image processing)},\n pages = {10-12},\n volume = {10},\n websites = {https://doi.org/10.3389/fcvm.2023.1307812},\n id = {833d5329-3075-3716-b70c-6507c319049d},\n created = {2024-01-13T07:02:55.416Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:21:04.359Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {article},\n author = {Zhang, Qiang and Cukur, Tolga and Greenspan, Hayit and Yang, Guang},\n doi = {10.3389/fcvm.2023.1307812},\n journal = {Frontiers in Cardiovascular Medicine},\n number = {October}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep Learning-based Prediction of Percutaneous Recanalization in Chronic Total Occlusion Using Coronary CT Angiography.\n \n \n \n \n\n\n \n Zhou, Z.; Gao, Y.; Zhang, W.; Zhang, N.; Wang, H.; Wang, R.; Gao, Z.; Huang, X.; Zhou, S.; Dai, X.; Yang, G.; Zhang, H.; Nieman, K.; and Xu, L.\n\n\n \n\n\n\n Radiology, 309(2): e231149. 11 2023.\n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n \n \"DeepWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Deep Learning-based Prediction of Percutaneous Recanalization in Chronic Total Occlusion Using Coronary CT Angiography.},\n type = {article},\n year = {2023},\n pages = {e231149},\n volume = {309},\n websites = {http://www.ncbi.nlm.nih.gov/pubmed/37962501,http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=PMC10698501},\n month = {11},\n id = {4fbfecf3-683a-3ef6-ba12-80f27bca7e1c},\n created = {2024-01-13T07:02:55.425Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:19.435Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {UNLABELLED Background CT is helpful in guiding the revascularization of chronic total occlusion (CTO), but manual prediction scores of percutaneous coronary intervention (PCI) success have challenges. Deep learning (DL) is expected to predict success of PCI for CTO lesions more efficiently. Purpose To develop a DL model to predict guidewire crossing and PCI outcomes for CTO using coronary CT angiography (CCTA) and evaluate its performance compared with manual prediction scores. MATERIALS AND METHODS Participants with CTO lesions were prospectively identified from one tertiary hospital between January 2018 and December 2021 as the training set to develop the DL prediction model for PCI of CTO, with fivefold cross validation. The algorithm was tested using an external test set prospectively enrolled from three tertiary hospitals between January 2021 and June 2022 with the same eligibility criteria. All participants underwent preprocedural CCTA within 1 month before PCI. The end points were guidewire crossing within 30 minutes and PCI success of CTO. UNLABELLED Results A total of 534 participants (mean age, 57.7 years ± 10.8 [SD]; 417 [78.1%] men) with 565 CTO lesions were included. In the external test set (186 participants with 189 CTOs), the DL model saved 85.0% of the reconstruction and analysis time of manual scores (mean, 73.7 seconds vs 418.2-466.9 seconds) and had higher accuracy than manual scores in predicting guidewire crossing within 30 minutes (DL, 91.0%; CT Registry of Chronic Total Occlusion Revascularization, 61.9%; Korean Multicenter CTO CT Registry [KCCT], 68.3%; CCTA-derived Multicenter CTO Registry of Japan (J-CTO), 68.8%; P < .05) and PCI success (DL, 93.7%; KCCT, 74.6%; J-CTO, 75.1%; P < .05). For DL, the area under the receiver operating characteristic curve was 0.97 (95% CI: 0.89, 0.99) for the training test set and 0.96 (95% CI: 0.90, 0.98) for the external test set. Conclusion The DL prediction model accurately predicted the percutaneous recanalization outcomes of CTO lesions and increased the efficiency of noninvasively grading the difficulty of PCI. © RSNA, 2023 Supplemental material is available for this article. See also the editorial by Pundziute-do Prado in this issue.},\n bibtype = {article},\n author = {Zhou, Zhen and Gao, Yifeng and Zhang, Weiwei and Zhang, Nan and Wang, Hui and Wang, Rui and Gao, Zhifan and Huang, Xiaomeng and Zhou, Shanshan and Dai, Xu and Yang, Guang and Zhang, Heye and Nieman, Koen and Xu, Lei},\n doi = {10.1148/radiol.231149},\n journal = {Radiology},\n number = {2}\n}
\n
\n\n\n
\n UNLABELLED Background CT is helpful in guiding the revascularization of chronic total occlusion (CTO), but manual prediction scores of percutaneous coronary intervention (PCI) success have challenges. Deep learning (DL) is expected to predict success of PCI for CTO lesions more efficiently. Purpose To develop a DL model to predict guidewire crossing and PCI outcomes for CTO using coronary CT angiography (CCTA) and evaluate its performance compared with manual prediction scores. MATERIALS AND METHODS Participants with CTO lesions were prospectively identified from one tertiary hospital between January 2018 and December 2021 as the training set to develop the DL prediction model for PCI of CTO, with fivefold cross validation. The algorithm was tested using an external test set prospectively enrolled from three tertiary hospitals between January 2021 and June 2022 with the same eligibility criteria. All participants underwent preprocedural CCTA within 1 month before PCI. The end points were guidewire crossing within 30 minutes and PCI success of CTO. UNLABELLED Results A total of 534 participants (mean age, 57.7 years ± 10.8 [SD]; 417 [78.1%] men) with 565 CTO lesions were included. In the external test set (186 participants with 189 CTOs), the DL model saved 85.0% of the reconstruction and analysis time of manual scores (mean, 73.7 seconds vs 418.2-466.9 seconds) and had higher accuracy than manual scores in predicting guidewire crossing within 30 minutes (DL, 91.0%; CT Registry of Chronic Total Occlusion Revascularization, 61.9%; Korean Multicenter CTO CT Registry [KCCT], 68.3%; CCTA-derived Multicenter CTO Registry of Japan (J-CTO), 68.8%; P < .05) and PCI success (DL, 93.7%; KCCT, 74.6%; J-CTO, 75.1%; P < .05). For DL, the area under the receiver operating characteristic curve was 0.97 (95% CI: 0.89, 0.99) for the training test set and 0.96 (95% CI: 0.90, 0.98) for the external test set. Conclusion The DL prediction model accurately predicted the percutaneous recanalization outcomes of CTO lesions and increased the efficiency of noninvasively grading the difficulty of PCI. © RSNA, 2023 Supplemental material is available for this article. See also the editorial by Pundziute-do Prado in this issue.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-scale, Data-driven and Anatomically Constrained Deep Learning Image Registration for Adult and Fetal Echocardiography.\n \n \n \n \n\n\n \n Hasan, M., K.; Zhu, H.; Yang, G.; and Yap, C., H.\n\n\n \n\n\n\n arXiv preprint arXiv:2309.00831. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Multi-scale,Paper\n  \n \n \n \"Multi-scale,Website\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Multi-scale, Data-driven and Anatomically Constrained Deep Learning Image Registration for Adult and Fetal Echocardiography},\n type = {article},\n year = {2023},\n websites = {http://arxiv.org/abs/2309.00831},\n id = {ad283405-5555-3f9e-823e-11f9f84f9d5c},\n created = {2024-01-13T07:02:55.468Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:19.498Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Temporal echocardiography image registration is a basis for clinical quantifications such as cardiac motion estimation, myocardial strain assessments, and stroke volume quantifications. In past studies, deep learning image registration (DLIR) has shown promising results and is consistently accurate and precise, requiring less computational time. We propose that a greater focus on the warped moving image's anatomic plausibility and image quality can support robust DLIR performance. Further, past implementations have focused on adult echocardiography, and there is an absence of DLIR implementations for fetal echocardiography. We propose a framework that combines three strategies for DLIR in both fetal and adult echo: (1) an anatomic shape-encoded loss to preserve physiological myocardial and left ventricular anatomical topologies in warped images; (2) a data-driven loss that is trained adversarially to preserve good image texture features in warped images; and (3) a multi-scale training scheme of a data-driven and anatomically constrained algorithm to improve accuracy. Our tests show that good anatomical topology and image textures are strongly linked to shape-encoded and data-driven adversarial losses. They improve different aspects of registration performance in a non-overlapping way, justifying their combination. Despite fundamental distinctions between adult and fetal echo images, we show that these strategies can provide excellent registration results in both adult and fetal echocardiography using the publicly available CAMUS adult echo dataset and our private multi-demographic fetal echo dataset. Our approach outperforms traditional non-DL gold standard registration approaches, including Optical Flow and Elastix. Registration improvements could be translated to more accurate and precise clinical quantification of cardiac ejection fraction, demonstrating a potential for translation.},\n bibtype = {article},\n author = {Hasan, Md. Kamrul and Zhu, Haobo and Yang, Guang and Yap, Choon Hwai},\n journal = {arXiv preprint arXiv:2309.00831}\n}
\n
\n\n\n
\n Temporal echocardiography image registration is a basis for clinical quantifications such as cardiac motion estimation, myocardial strain assessments, and stroke volume quantifications. In past studies, deep learning image registration (DLIR) has shown promising results and is consistently accurate and precise, requiring less computational time. We propose that a greater focus on the warped moving image's anatomic plausibility and image quality can support robust DLIR performance. Further, past implementations have focused on adult echocardiography, and there is an absence of DLIR implementations for fetal echocardiography. We propose a framework that combines three strategies for DLIR in both fetal and adult echo: (1) an anatomic shape-encoded loss to preserve physiological myocardial and left ventricular anatomical topologies in warped images; (2) a data-driven loss that is trained adversarially to preserve good image texture features in warped images; and (3) a multi-scale training scheme of a data-driven and anatomically constrained algorithm to improve accuracy. Our tests show that good anatomical topology and image textures are strongly linked to shape-encoded and data-driven adversarial losses. They improve different aspects of registration performance in a non-overlapping way, justifying their combination. Despite fundamental distinctions between adult and fetal echo images, we show that these strategies can provide excellent registration results in both adult and fetal echocardiography using the publicly available CAMUS adult echo dataset and our private multi-demographic fetal echo dataset. Our approach outperforms traditional non-DL gold standard registration approaches, including Optical Flow and Elastix. Registration improvements could be translated to more accurate and precise clinical quantification of cardiac ejection fraction, demonstrating a potential for translation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Large-scale deep learning analysis to identify adult patients at risk for combined and common variable immunodeficiencies.\n \n \n \n \n\n\n \n Papanastasiou, G.; Yang, G.; Fotiadis, D., I.; Dikaios, N.; Wang, C.; Huda, A.; Sobolevsky, L.; Raasch, J.; Perez, E.; Sidhu, G.; and Palumbo, D.\n\n\n \n\n\n\n Communications Medicine, 3(1): 1-15. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Large-scalePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Large-scale deep learning analysis to identify adult patients at risk for combined and common variable immunodeficiencies},\n type = {article},\n year = {2023},\n pages = {1-15},\n volume = {3},\n publisher = {Springer US},\n id = {9a13feae-c81e-3efd-a62f-578b56d3eb54},\n created = {2024-01-13T07:02:55.478Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:21:12.064Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {article},\n author = {Papanastasiou, Giorgos and Yang, Guang and Fotiadis, Dimitris I. and Dikaios, Nikolaos and Wang, Chengjia and Huda, Ahsan and Sobolevsky, Luba and Raasch, Jason and Perez, Elena and Sidhu, Gurinder and Palumbo, Donna},\n doi = {10.1038/s43856-023-00412-8},\n journal = {Communications Medicine},\n number = {1}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CMRxRecon: An open cardiac MRI dataset for the competition of accelerated image reconstruction.\n \n \n \n \n\n\n \n Wang, C.; Lyu, J.; Wang, S.; Qin, C.; Guo, K.; Zhang, X.; Yu, X.; Li, Y.; Wang, F.; Jin, J.; Shi, Z.; Xu, Z.; Tian, Y.; Hua, S.; Chen, Z.; Liu, M.; Sun, M.; Kuang, X.; Wang, K.; Wang, H.; Li, H.; Chu, Y.; Yang, G.; Bai, W.; Zhuang, X.; Wang, H.; Qin, J.; and Qu, X.\n\n\n \n\n\n\n arXiv preprint arXiv:2309.10836,1-14. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"CMRxRecon:Paper\n  \n \n \n \"CMRxRecon:Website\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {CMRxRecon: An open cardiac MRI dataset for the competition of accelerated image reconstruction},\n type = {article},\n year = {2023},\n pages = {1-14},\n websites = {https://arxiv.org/abs/2309.10836v1},\n id = {11ba93c9-8746-3789-8ca7-447711a181cc},\n created = {2024-01-13T07:02:55.481Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:19.447Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Cardiac magnetic resonance imaging (CMR) has emerged as a valuable diagnostic tool for cardiac diseases. However, a limitation of CMR is its slow imaging speed, which causes patient discomfort and introduces artifacts in the images. There has been growing interest in deep learning-based CMR imaging algorithms that can reconstruct high-quality images from highly under-sampled k-space data. However, the development of deep learning methods requires large training datasets, which have not been publicly available for CMR. To address this gap, we released a dataset that includes multi-contrast, multi-view, multi-slice and multi-coil CMR imaging data from 300 subjects. Imaging studies include cardiac cine and mapping sequences. Manual segmentations of the myocardium and chambers of all the subjects are also provided within the dataset. Scripts of state-of-the-art reconstruction algorithms were also provided as a point of reference. Our aim is to facilitate the advancement of state-of-the-art CMR image reconstruction by introducing standardized evaluation criteria and making the dataset freely accessible to the research community. Researchers can access the dataset at https://www.synapse.org/#!Synapse:syn51471091/wiki/.},\n bibtype = {article},\n author = {Wang, Chengyan and Lyu, Jun and Wang, Shuo and Qin, Chen and Guo, Kunyuan and Zhang, Xinyu and Yu, Xiaotong and Li, Yan and Wang, Fanwen and Jin, Jianhua and Shi, Zhang and Xu, Ziqiang and Tian, Yapeng and Hua, Sha and Chen, Zhensen and Liu, Meng and Sun, Mengting and Kuang, Xutong and Wang, Kang and Wang, Haoran and Li, Hao and Chu, Yinghua and Yang, Guang and Bai, Wenjia and Zhuang, Xiahai and Wang, He and Qin, Jing and Qu, Xiaobo},\n journal = {arXiv preprint arXiv:2309.10836}\n}
\n
\n\n\n
\n Cardiac magnetic resonance imaging (CMR) has emerged as a valuable diagnostic tool for cardiac diseases. However, a limitation of CMR is its slow imaging speed, which causes patient discomfort and introduces artifacts in the images. There has been growing interest in deep learning-based CMR imaging algorithms that can reconstruct high-quality images from highly under-sampled k-space data. However, the development of deep learning methods requires large training datasets, which have not been publicly available for CMR. To address this gap, we released a dataset that includes multi-contrast, multi-view, multi-slice and multi-coil CMR imaging data from 300 subjects. Imaging studies include cardiac cine and mapping sequences. Manual segmentations of the myocardium and chambers of all the subjects are also provided within the dataset. Scripts of state-of-the-art reconstruction algorithms were also provided as a point of reference. Our aim is to facilitate the advancement of state-of-the-art CMR image reconstruction by introducing standardized evaluation criteria and making the dataset freely accessible to the research community. Researchers can access the dataset at https://www.synapse.org/#!Synapse:syn51471091/wiki/.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Is Autoencoder Truly Applicable for 3d CT Super-Resolution?.\n \n \n \n \n\n\n \n Luo, W.; Xing, X.; and Yang, G.\n\n\n \n\n\n\n Proceedings - International Symposium on Biomedical Imaging, 2023-April: 1-4. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"IsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Is Autoencoder Truly Applicable for 3d CT Super-Resolution?},\n type = {article},\n year = {2023},\n keywords = {Autoencoder,CT,super-resolution},\n pages = {1-4},\n volume = {2023-April},\n publisher = {IEEE},\n id = {d0a5b6c5-93b3-3667-8d37-fb246d88c7e6},\n created = {2024-01-13T07:02:55.514Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:23:26.173Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Featured by a bottleneck structure, autoencoder (AE) and its variants have been largely applied in various medical image analysis tasks, such as segmentation, reconstruction and de-noising. Despite of their promising performances in aforementioned tasks, in this paper, we claim that AE models are not applicable to single image super-resolution (SISR) for 3D CT data. Our hypothesis is that the bottleneck architecture that resizes feature maps in AE models degrades the details of input images, thus can sabotage the performance of super-resolution. Although U-Net proposed skip connections that merge information from different levels, we claim that the degrading impact of feature resizing operations could hardly be removed by skip connections. By conducting large-scale ablation experiments and comparing the performance between models with and without the bottleneck design on a public CT lung dataset, we have discovered that AE models, including U-Net, have failed to achieve a compatible SISR result (p < 0.05 by Student's t-test) compared to the baseline model. Our work is the first comparative study investigating the suitability of AE architecture for 3D CT SISR tasks and brings a rationale for researchers to re-think the choice of model architectures especially for 3D CT SISR tasks. The full implementation and trained models can be found at: https://github.com/Roldbach/Autoencoder-3D-CT-SISR},\n bibtype = {article},\n author = {Luo, Weixun and Xing, Xiaodan and Yang, Guang},\n doi = {10.1109/ISBI53787.2023.10230786},\n journal = {Proceedings - International Symposium on Biomedical Imaging}\n}
\n
\n\n\n
\n Featured by a bottleneck structure, autoencoder (AE) and its variants have been largely applied in various medical image analysis tasks, such as segmentation, reconstruction and de-noising. Despite of their promising performances in aforementioned tasks, in this paper, we claim that AE models are not applicable to single image super-resolution (SISR) for 3D CT data. Our hypothesis is that the bottleneck architecture that resizes feature maps in AE models degrades the details of input images, thus can sabotage the performance of super-resolution. Although U-Net proposed skip connections that merge information from different levels, we claim that the degrading impact of feature resizing operations could hardly be removed by skip connections. By conducting large-scale ablation experiments and comparing the performance between models with and without the bottleneck design on a public CT lung dataset, we have discovered that AE models, including U-Net, have failed to achieve a compatible SISR result (p < 0.05 by Student's t-test) compared to the baseline model. Our work is the first comparative study investigating the suitability of AE architecture for 3D CT SISR tasks and brings a rationale for researchers to re-think the choice of model architectures especially for 3D CT SISR tasks. The full implementation and trained models can be found at: https://github.com/Roldbach/Autoencoder-3D-CT-SISR\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Editorial: Advances in machine learning methods facilitating collaborative image-based decision making for neuroscience.\n \n \n \n \n\n\n \n Wang, C.; Zhang, H.; Papanastasiou, G.; and Yang, G.\n\n\n \n\n\n\n Frontiers in Computational Neuroscience, 17. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Editorial:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Editorial: Advances in machine learning methods facilitating collaborative image-based decision making for neuroscience},\n type = {article},\n year = {2023},\n keywords = {IoT,dynamic convolution,federated learning,heterogeneous models,machine learning,multi-modality,transformer},\n volume = {17},\n id = {33351e95-7e0c-33cc-8957-37b9ef80e83b},\n created = {2024-01-13T07:02:55.546Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:21:37.036Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {article},\n author = {Wang, Chengjia and Zhang, Heye and Papanastasiou, Georgios and Yang, Guang},\n doi = {10.3389/fncom.2023.1267489},\n journal = {Frontiers in Computational Neuroscience}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ViGU: Vision GNN U-Net for fast MRI.\n \n \n \n \n\n\n \n Huang, J.; Aviles-Rivero, A., I.; Schonlieb, C., B.; and Yang, G.\n\n\n \n\n\n\n Proceedings - International Symposium on Biomedical Imaging, 2023-April: 11-15. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"ViGU:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {ViGU: Vision GNN U-Net for fast MRI},\n type = {article},\n year = {2023},\n keywords = {Fast MRI,Graph Neural Network (GNN)},\n pages = {11-15},\n volume = {2023-April},\n id = {894709b8-a76a-3fd5-9c84-1b01554fd827},\n created = {2024-01-13T07:02:55.599Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:23:22.565Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Deep learning models have been widely applied for fast MRI. The majority of existing deep learning models, e.g., convolutional neural networks, work on data with Euclidean or regular grids structures. However, high-dimensional features extracted from MR data could be encapsulated in non-Euclidean manifolds. This disparity between the go-to assumption of existing models and data requirements limits the flexibility to capture irregular anatomical features in MR data. In this work, we introduce a novel Vision GNN type network for fast MRI called Vision GNN U-Net (ViGU). More precisely, the pixel array is first embedded into patches and then converted into a graph. Secondly, a U-shape network is developed using several graph blocks in symmetrical encoder and decoder paths. Moreover, we show that the proposed ViGU can also benefit from Generative Adversarial Networks yielding to its variant ViGU-GAN. We demonstrate, through numerical and visual experiments, that the proposed ViGU and GAN variant outperform existing CNN and GAN-based methods. Moreover, we show that the proposed network readily competes with approaches based on Transformers while requiring a fraction of the computational cost. More importantly, the graph structure of the network reveals how the network extracts features from MR images, providing intuitive explainability. The code is publicly available at https://github.com/ayanglab/ViGU.},\n bibtype = {article},\n author = {Huang, Jiahao and Aviles-Rivero, Angelica I. and Schonlieb, Carola Bibiane and Yang, Guang},\n doi = {10.1109/ISBI53787.2023.10230600},\n journal = {Proceedings - International Symposium on Biomedical Imaging}\n}
\n
\n\n\n
\n Deep learning models have been widely applied for fast MRI. The majority of existing deep learning models, e.g., convolutional neural networks, work on data with Euclidean or regular grids structures. However, high-dimensional features extracted from MR data could be encapsulated in non-Euclidean manifolds. This disparity between the go-to assumption of existing models and data requirements limits the flexibility to capture irregular anatomical features in MR data. In this work, we introduce a novel Vision GNN type network for fast MRI called Vision GNN U-Net (ViGU). More precisely, the pixel array is first embedded into patches and then converted into a graph. Secondly, a U-shape network is developed using several graph blocks in symmetrical encoder and decoder paths. Moreover, we show that the proposed ViGU can also benefit from Generative Adversarial Networks yielding to its variant ViGU-GAN. We demonstrate, through numerical and visual experiments, that the proposed ViGU and GAN variant outperform existing CNN and GAN-based methods. Moreover, we show that the proposed network readily competes with approaches based on Transformers while requiring a fraction of the computational cost. More importantly, the graph structure of the network reveals how the network extracts features from MR images, providing intuitive explainability. The code is publicly available at https://github.com/ayanglab/ViGU.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Focus on machine learning models in medical imaging.\n \n \n \n \n\n\n \n Papanastasiou, G.; García Seco de Herrera, A.; Wang, C.; Zhang, H.; Yang, G.; and Wang, G.\n\n\n \n\n\n\n Physics in Medicine and Biology, 68(1): 0-4. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"FocusPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Focus on machine learning models in medical imaging},\n type = {article},\n year = {2023},\n pages = {0-4},\n volume = {68},\n publisher = {IOP Publishing},\n id = {f8ef4440-5046-316c-acb1-7bc7e6cabfaf},\n created = {2024-01-13T07:02:55.727Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:17:50.972Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {article},\n author = {Papanastasiou, Giorgos and García Seco de Herrera, Alba and Wang, Chengjia and Zhang, Heye and Yang, Guang and Wang, Ge},\n doi = {10.1088/1361-6560/aca069},\n journal = {Physics in Medicine and Biology},\n number = {1}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multiple Adversarial Learning Based Angiography Reconstruction for Ultra-Low-Dose Contrast Medium CT.\n \n \n \n \n\n\n \n Zhang, W.; Zhou, Z.; Gao, Z.; Yang, G.; Xu, L.; Wu, W.; and Zhang, H.\n\n\n \n\n\n\n IEEE Journal of Biomedical and Health Informatics, 27(1): 409-420. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"MultiplePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Multiple Adversarial Learning Based Angiography Reconstruction for Ultra-Low-Dose Contrast Medium CT},\n type = {article},\n year = {2023},\n keywords = {Angiography CT reconstruction,adaptive fusion,customized windowing,iodinated contrast medium,multiple adversarial learning,ultra-low-dose},\n pages = {409-420},\n volume = {27},\n publisher = {IEEE},\n id = {7bae3c7b-4581-381d-9a89-474e1455730e},\n created = {2024-01-13T07:02:55.733Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:17:49.783Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Iodinated contrast medium (ICM) dose reduction is beneficial for decreasing potential health risk to renal-insufficiency patients in CT scanning. Due to the low-intensity vessel in ultra-low-dose-ICM CT angiography, it cannot provide clinical diagnosis of vascular diseases. Angiography reconstruction for ultra-low-dose-ICM CT can enhance vascular intensity for directly vascular diseases diagnosis. However, the angiography reconstruction is challenging since patient individual differences and vascular disease diversity. In this paper, we propose a Multiple Adversarial Learning based Angiography Reconstruction (i.e., MALAR) framework to enhance vascular intensity. Specifically, a bilateral learning mechanism is developed for mapping a relationship between source and target domains rather than the image-to-image mapping. Then, a dual correlation constraint is introduced to characterize both distribution uniformity from across-domain features and sample inconsistency within domain simultaneously. Finally, an adaptive fusion module by combining multi-scale information and long-range interactive dependency is explored to alleviate the interference of high-noise metal. Experiments are performed on CT sequences with different ICM doses. Quantitative results based on multiple metrics demonstrate the effectiveness of our MALAR on angiography reconstruction. Qualitative assessments by radiographers confirm the potential of our MALAR for the clinical diagnosis of vascular diseases.},\n bibtype = {article},\n author = {Zhang, Weiwei and Zhou, Zhen and Gao, Zhifan and Yang, Guang and Xu, Lei and Wu, Weiwen and Zhang, Heye},\n doi = {10.1109/JBHI.2022.3213595},\n journal = {IEEE Journal of Biomedical and Health Informatics},\n number = {1}\n}
\n
\n\n\n
\n Iodinated contrast medium (ICM) dose reduction is beneficial for decreasing potential health risk to renal-insufficiency patients in CT scanning. Due to the low-intensity vessel in ultra-low-dose-ICM CT angiography, it cannot provide clinical diagnosis of vascular diseases. Angiography reconstruction for ultra-low-dose-ICM CT can enhance vascular intensity for directly vascular diseases diagnosis. However, the angiography reconstruction is challenging since patient individual differences and vascular disease diversity. In this paper, we propose a Multiple Adversarial Learning based Angiography Reconstruction (i.e., MALAR) framework to enhance vascular intensity. Specifically, a bilateral learning mechanism is developed for mapping a relationship between source and target domains rather than the image-to-image mapping. Then, a dual correlation constraint is introduced to characterize both distribution uniformity from across-domain features and sample inconsistency within domain simultaneously. Finally, an adaptive fusion module by combining multi-scale information and long-range interactive dependency is explored to alleviate the interference of high-noise metal. Experiments are performed on CT sequences with different ICM doses. Quantitative results based on multiple metrics demonstrate the effectiveness of our MALAR on angiography reconstruction. Qualitative assessments by radiographers confirm the potential of our MALAR for the clinical diagnosis of vascular diseases.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Artificial intelligence–based full aortic CT angiography imaging with ultra-low-dose contrast medium: a preliminary study.\n \n \n \n \n\n\n \n Zhou, Z.; Gao, Y.; Zhang, W.; Bo, K.; Zhang, N.; Wang, H.; Wang, R.; Du, Z.; Firmin, D.; Yang, G.; Zhang, H.; and Xu, L.\n\n\n \n\n\n\n European Radiology, 33(1): 678-689. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"ArtificialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Artificial intelligence–based full aortic CT angiography imaging with ultra-low-dose contrast medium: a preliminary study},\n type = {article},\n year = {2023},\n keywords = {Aortic CT angiography,Augmented cycle-consistent adversarial framework,Contrast medium,Diagnostic accuracy,Image quality},\n pages = {678-689},\n volume = {33},\n id = {7ced13cb-3d95-3f17-bc86-e4a58b02fe90},\n created = {2024-01-13T07:02:55.871Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:18:00.310Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Objectives: To further reduce the contrast medium (CM) dose of full aortic CT angiography (ACTA) imaging using the augmented cycle-consistent adversarial framework (Au-CycleGAN) algorithm. Methods: We prospectively enrolled 150 consecutive patients with suspected aortic disease. All received ACTA scans of ultra-low-dose CM (ULDCM) protocol and low-dose CM (LDCM) protocol. These data were randomly assigned to the training datasets (n = 100) and the validation datasets (n = 50). The ULDCM images were reconstructed by the Au-CycleGAN algorithm. Then, the AI-based ULDCM images were compared with LDCM images in terms of image quality and diagnostic accuracy. Results: The mean image quality score of each location in the AI-based ULDCM group was higher than that in the ULDCM group but a little lower than that in the LDCM group (all p < 0.05). All AI-based ULDCM images met the diagnostic requirements (score ≥ 3). Except for the image noise, the AI-based ULDCM images had higher attenuation value than the ULDCM and LDCM images as well as higher SNR and CNR in all locations of the aorta analyzed (all p < 0.05). Similar results were also seen in obese patients (BMI > 25, all p < 0.05). Using the findings of LDCM images as the reference, the AI-based ULDCM images showed good diagnostic parameters and no significant differences in any of the analyzed aortic disease diagnoses (all K-values > 0.80, p < 0.05). Conclusions: The required dose of CM for full ACTA imaging can be reduced to one-third of the CM dose of the LDCM protocol while maintaining image quality and diagnostic accuracy using the Au-CycleGAN algorithm. Key Points: • The required dose of contrast medium (CM) for full ACTA imaging can be reduced to one-third of the CM dose of the low-dose contrast medium (LDCM) protocol using the Au-CycleGAN algorithm. • Except for the image noise, the AI-based ultra-low-dose contrast medium (ULDCM) images had better quantitative image quality parameters than the ULDCM and LDCM images. • No significant diagnostic differences were noted between the AI-based ULDCM and LDCM images regarding all the analyzed aortic disease diagnoses.},\n bibtype = {article},\n author = {Zhou, Zhen and Gao, Yifeng and Zhang, Weiwei and Bo, Kairui and Zhang, Nan and Wang, Hui and Wang, Rui and Du, Zhiqiang and Firmin, David and Yang, Guang and Zhang, Heye and Xu, Lei},\n doi = {10.1007/s00330-022-08975-1},\n journal = {European Radiology},\n number = {1}\n}
\n
\n\n\n
\n Objectives: To further reduce the contrast medium (CM) dose of full aortic CT angiography (ACTA) imaging using the augmented cycle-consistent adversarial framework (Au-CycleGAN) algorithm. Methods: We prospectively enrolled 150 consecutive patients with suspected aortic disease. All received ACTA scans of ultra-low-dose CM (ULDCM) protocol and low-dose CM (LDCM) protocol. These data were randomly assigned to the training datasets (n = 100) and the validation datasets (n = 50). The ULDCM images were reconstructed by the Au-CycleGAN algorithm. Then, the AI-based ULDCM images were compared with LDCM images in terms of image quality and diagnostic accuracy. Results: The mean image quality score of each location in the AI-based ULDCM group was higher than that in the ULDCM group but a little lower than that in the LDCM group (all p < 0.05). All AI-based ULDCM images met the diagnostic requirements (score ≥ 3). Except for the image noise, the AI-based ULDCM images had higher attenuation value than the ULDCM and LDCM images as well as higher SNR and CNR in all locations of the aorta analyzed (all p < 0.05). Similar results were also seen in obese patients (BMI > 25, all p < 0.05). Using the findings of LDCM images as the reference, the AI-based ULDCM images showed good diagnostic parameters and no significant differences in any of the analyzed aortic disease diagnoses (all K-values > 0.80, p < 0.05). Conclusions: The required dose of CM for full ACTA imaging can be reduced to one-third of the CM dose of the LDCM protocol while maintaining image quality and diagnostic accuracy using the Au-CycleGAN algorithm. Key Points: • The required dose of contrast medium (CM) for full ACTA imaging can be reduced to one-third of the CM dose of the low-dose contrast medium (LDCM) protocol using the Au-CycleGAN algorithm. • Except for the image noise, the AI-based ultra-low-dose contrast medium (ULDCM) images had better quantitative image quality parameters than the ULDCM and LDCM images. • No significant diagnostic differences were noted between the AI-based ULDCM and LDCM images regarding all the analyzed aortic disease diagnoses.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Missing U for Efficient Diffusion Models.\n \n \n \n \n\n\n \n Calvo-Ordonez, S.; Cheng, C.; Huang, J.; Zhang, L.; Yang, G.; Schonlieb, C.; and Aviles-Rivero, A., I.\n\n\n \n\n\n\n arXiv preprint arXiv:2310.20092. 10 2023.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n \n \"TheWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {The Missing U for Efficient Diffusion Models},\n type = {article},\n year = {2023},\n websites = {http://arxiv.org/abs/2310.20092},\n month = {10},\n day = {30},\n id = {0407fa2d-81bc-3794-8478-8f5c74a05842},\n created = {2024-01-13T07:02:56.377Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:19.057Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Diffusion Probabilistic Models stand as a critical tool in generative modelling, enabling the generation of complex data distributions. This family of generative models yields record-breaking performance in tasks such as image synthesis, video generation, and molecule design. Despite their capabilities, their efficiency, especially in the reverse process, remains a challenge due to slow convergence rates and high computational costs. In this paper, we introduce an approach that leverages continuous dynamical systems to design a novel denoising network for diffusion models that is more parameter-efficient, exhibits faster convergence, and demonstrates increased noise robustness. Experimenting with Denoising Diffusion Probabilistic Models (DDPMs), our framework operates with approximately a quarter of the parameters, and $\\sim$ 30\\% of the Floating Point Operations (FLOPs) compared to standard U-Nets in DDPMs. Furthermore, our model is notably faster in inference than the baseline when measured in fair and equal conditions. We also provide a mathematical intuition as to why our proposed reverse process is faster as well as a mathematical discussion of the empirical tradeoffs in the denoising downstream task. Finally, we argue that our method is compatible with existing performance enhancement techniques, enabling further improvements in efficiency, quality, and speed.},\n bibtype = {article},\n author = {Calvo-Ordonez, Sergio and Cheng, Chun-Wun and Huang, Jiahao and Zhang, Lipei and Yang, Guang and Schonlieb, Carola-Bibiane and Aviles-Rivero, Angelica I},\n journal = {arXiv preprint arXiv:2310.20092}\n}
\n
\n\n\n
\n Diffusion Probabilistic Models stand as a critical tool in generative modelling, enabling the generation of complex data distributions. This family of generative models yields record-breaking performance in tasks such as image synthesis, video generation, and molecule design. Despite their capabilities, their efficiency, especially in the reverse process, remains a challenge due to slow convergence rates and high computational costs. In this paper, we introduce an approach that leverages continuous dynamical systems to design a novel denoising network for diffusion models that is more parameter-efficient, exhibits faster convergence, and demonstrates increased noise robustness. Experimenting with Denoising Diffusion Probabilistic Models (DDPMs), our framework operates with approximately a quarter of the parameters, and $\\sim$ 30\\% of the Floating Point Operations (FLOPs) compared to standard U-Nets in DDPMs. Furthermore, our model is notably faster in inference than the baseline when measured in fair and equal conditions. We also provide a mathematical intuition as to why our proposed reverse process is faster as well as a mathematical discussion of the empirical tradeoffs in the denoising downstream task. Finally, we argue that our method is compatible with existing performance enhancement techniques, enabling further improvements in efficiency, quality, and speed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Enhancing Super-Resolution Networks through Realistic Thick-Slice CT Simulation.\n \n \n \n \n\n\n \n Tang, Z.; Xing, X.; and Yang, G.\n\n\n \n\n\n\n arXiv preprint arXiv:2307.10182, XXX(X): 1-11. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"EnhancingPaper\n  \n \n \n \"EnhancingWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Enhancing Super-Resolution Networks through Realistic Thick-Slice CT Simulation},\n type = {article},\n year = {2023},\n pages = {1-11},\n volume = {XXX},\n websites = {http://arxiv.org/abs/2307.10182},\n id = {78cb73c1-bffb-3bce-ad94-719d1d477bb6},\n created = {2024-01-13T07:02:56.801Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:19.229Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {This study aims to develop and evaluate an innovative simulation algorithm for generating thick-slice CT images that closely resemble actual images in the AAPM-Mayo's 2016 Low Dose CT Grand Challenge dataset. The proposed method was evaluated using Peak Signal-to-Noise Ratio (PSNR) and Root Mean Square Error (RMSE) metrics, with the hypothesis that our simulation would produce images more congruent with their real counterparts. Our proposed method demonstrated substantial enhancements in terms of both PSNR and RMSE over other simulation methods. The highest PSNR values were obtained with the proposed method, yielding 49.7369 $\\pm$ 2.5223 and 48.5801 $\\pm$ 7.3271 for D45 and B30 reconstruction kernels, respectively. The proposed method also registered the lowest RMSE with values of 0.0068 $\\pm$ 0.0020 and 0.0108 $\\pm$ 0.0099 for D45 and B30, respectively, indicating a distribution more closely aligned with the authentic thick-slice image. Further validation of the proposed simulation algorithm was conducted using the TCIA LDCT-and-Projection-data dataset. The generated images were then leveraged to train four distinct super-resolution (SR) models, which were subsequently evaluated using the real thick-slice images from the 2016 Low Dose CT Grand Challenge dataset. When trained with data produced by our novel algorithm, all four SR models exhibited enhanced performance.},\n bibtype = {article},\n author = {Tang, Zeyu and Xing, Xiaodan and Yang, Guang},\n journal = {arXiv preprint arXiv:2307.10182},\n number = {X}\n}
\n
\n\n\n
\n This study aims to develop and evaluate an innovative simulation algorithm for generating thick-slice CT images that closely resemble actual images in the AAPM-Mayo's 2016 Low Dose CT Grand Challenge dataset. The proposed method was evaluated using Peak Signal-to-Noise Ratio (PSNR) and Root Mean Square Error (RMSE) metrics, with the hypothesis that our simulation would produce images more congruent with their real counterparts. Our proposed method demonstrated substantial enhancements in terms of both PSNR and RMSE over other simulation methods. The highest PSNR values were obtained with the proposed method, yielding 49.7369 $\\pm$ 2.5223 and 48.5801 $\\pm$ 7.3271 for D45 and B30 reconstruction kernels, respectively. The proposed method also registered the lowest RMSE with values of 0.0068 $\\pm$ 0.0020 and 0.0108 $\\pm$ 0.0099 for D45 and B30, respectively, indicating a distribution more closely aligned with the authentic thick-slice image. Further validation of the proposed simulation algorithm was conducted using the TCIA LDCT-and-Projection-data dataset. The generated images were then leveraged to train four distinct super-resolution (SR) models, which were subsequently evaluated using the real thick-slice images from the 2016 Low Dose CT Grand Challenge dataset. When trained with data produced by our novel algorithm, all four SR models exhibited enhanced performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Functional Outcome Prediction in Acute Ischemic Stroke Using a Fused Imaging and Clinical Deep Learning Model.\n \n \n \n \n\n\n \n Liu, Y.; Yu, Y.; Ouyang, J.; Jiang, B.; Yang, G.; Ostmeier, S.; Wintermark, M.; Michel, P.; Liebeskind, D., S.; Lansberg, M., G.; Albers, G., W.; and Zaharchuk, G.\n\n\n \n\n\n\n Stroke, 54(9): 2316-2327. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"FunctionalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Functional Outcome Prediction in Acute Ischemic Stroke Using a Fused Imaging and Clinical Deep Learning Model},\n type = {article},\n year = {2023},\n keywords = {goal,infarction,ischemic stroke,magnetic resonance imaging,quality of life},\n pages = {2316-2327},\n volume = {54},\n id = {80c16dd4-2652-3cc5-923d-0ddb7eb0bb21},\n created = {2024-01-13T07:02:56.805Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:21:48.983Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {BACKGROUND: Predicting long-term clinical outcome based on the early acute ischemic stroke information is valuable for prognostication, resource management, clinical trials, and patient expectations. Current methods require subjective decisions about which imaging features to assess and may require time-consuming postprocessing. This study's goal was to predict ordinal 90-day modified Rankin Scale (mRS) score in acute ischemic stroke patients by fusing a Deep Learning model of diffusion-weighted imaging images and clinical information from the acute period. METHODS: A total of 640 acute ischemic stroke patients who underwent magnetic resonance imaging within 1 to 7 days poststroke and had 90-day mRS follow-up data were randomly divided into 70% (n=448) for model training, 15% (n=96) for validation, and 15% (n=96) for internal testing. Additionally, external testing on a cohort from Lausanne University Hospital (n=280) was performed to further evaluate model generalization. Accuracy for ordinal mRS, accuracy within ±1 mRS category, mean absolute prediction error, and determination of unfavorable outcome (mRS score >2) were evaluated for clinical only, imaging only, and 2 fused clinical-imaging models. RESULTS: The fused models demonstrated superior performance in predicting ordinal mRS score and unfavorable outcome in both internal and external test cohorts when compared with the clinical and imaging models. For the internal test cohort, the top fused model had the highest area under the curve of 0.92 for unfavorable outcome prediction and the lowest mean absolute error (0.96 [95% CI, 0.77-1.16]), with the highest proportion of mRS score predictions within ±1 category (79% [95% CI, 71%-88%]). On the external Lausanne University Hospital cohort, the best fused model had an area under the curve of 0.90 for unfavorable outcome prediction and outperformed other models with an mean absolute error of 0.90 (95% CI, 0.79-1.01), and the highest percentage of mRS score predictions within ±1 category (83% [95% CI, 78%-87%]). CONCLUSIONS: A Deep Learning-based imaging model fused with clinical variables can be used to predict 90-day stroke outcome with reduced subjectivity and user burden.},\n bibtype = {article},\n author = {Liu, Yongkai and Yu, Yannan and Ouyang, Jiahong and Jiang, Bin and Yang, Guang and Ostmeier, Sophie and Wintermark, Max and Michel, Patrik and Liebeskind, David S. and Lansberg, Maarten G. and Albers, Gregory W. and Zaharchuk, Greg},\n doi = {10.1161/STROKEAHA.123.044072},\n journal = {Stroke},\n number = {9}\n}
\n
\n\n\n
\n BACKGROUND: Predicting long-term clinical outcome based on the early acute ischemic stroke information is valuable for prognostication, resource management, clinical trials, and patient expectations. Current methods require subjective decisions about which imaging features to assess and may require time-consuming postprocessing. This study's goal was to predict ordinal 90-day modified Rankin Scale (mRS) score in acute ischemic stroke patients by fusing a Deep Learning model of diffusion-weighted imaging images and clinical information from the acute period. METHODS: A total of 640 acute ischemic stroke patients who underwent magnetic resonance imaging within 1 to 7 days poststroke and had 90-day mRS follow-up data were randomly divided into 70% (n=448) for model training, 15% (n=96) for validation, and 15% (n=96) for internal testing. Additionally, external testing on a cohort from Lausanne University Hospital (n=280) was performed to further evaluate model generalization. Accuracy for ordinal mRS, accuracy within ±1 mRS category, mean absolute prediction error, and determination of unfavorable outcome (mRS score >2) were evaluated for clinical only, imaging only, and 2 fused clinical-imaging models. RESULTS: The fused models demonstrated superior performance in predicting ordinal mRS score and unfavorable outcome in both internal and external test cohorts when compared with the clinical and imaging models. For the internal test cohort, the top fused model had the highest area under the curve of 0.92 for unfavorable outcome prediction and the lowest mean absolute error (0.96 [95% CI, 0.77-1.16]), with the highest proportion of mRS score predictions within ±1 category (79% [95% CI, 71%-88%]). On the external Lausanne University Hospital cohort, the best fused model had an area under the curve of 0.90 for unfavorable outcome prediction and outperformed other models with an mean absolute error of 0.90 (95% CI, 0.79-1.01), and the highest percentage of mRS score predictions within ±1 category (83% [95% CI, 78%-87%]). CONCLUSIONS: A Deep Learning-based imaging model fused with clinical variables can be used to predict 90-day stroke outcome with reduced subjectivity and user burden.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic COVID-19 and Common-Acquired Pneumonia Diagnosis Using Chest CT Scans.\n \n \n \n \n\n\n \n Motta, P., C.; Cortez, P., C.; Silva, B., R.; Yang, G.; and Albuquerque, V., H., C.\n\n\n \n\n\n\n Bioengineering, 10(5): 1-29. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Automatic COVID-19 and Common-Acquired Pneumonia Diagnosis Using Chest CT Scans},\n type = {article},\n year = {2023},\n keywords = {CNN,COVID-19,CT scan,Computer-Aided Diagnostic,classification,external validation,medical image,segmentation},\n pages = {1-29},\n volume = {10},\n id = {385610cc-9cfd-366e-a48d-e1e2b1659659},\n created = {2024-01-13T07:02:56.878Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:22:05.739Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Even with over 80% of the population being vaccinated against COVID-19, the disease continues to claim victims. Therefore, it is crucial to have a secure Computer-Aided Diagnostic system that can assist in identifying COVID-19 and determining the necessary level of care. This is especially important in the Intensive Care Unit to monitor disease progression or regression in the fight against this epidemic. To accomplish this, we merged public datasets from the literature to train lung and lesion segmentation models with five different distributions. We then trained eight CNN models for COVID-19 and Common-Acquired Pneumonia classification. If the examination was classified as COVID-19, we quantified the lesions and assessed the severity of the full CT scan. To validate the system, we used Resnetxt101 Unet++ and Mobilenet Unet for lung and lesion segmentation, respectively, achieving accuracy of 98.05%, F1-score of 98.70%, precision of 98.7%, recall of 98.7%, and specificity of 96.05%. This was accomplished in just 19.70 s per full CT scan, with external validation on the SPGC dataset. Finally, when classifying these detected lesions, we used Densenet201 and achieved accuracy of 90.47%, F1-score of 93.85%, precision of 88.42%, recall of 100.0%, and specificity of 65.07%. The results demonstrate that our pipeline can correctly detect and segment lesions due to COVID-19 and Common-Acquired Pneumonia in CT scans. It can differentiate these two classes from normal exams, indicating that our system is efficient and effective in identifying the disease and assessing the severity of the condition.},\n bibtype = {article},\n author = {Motta, Pedro Crosara and Cortez, Paulo César and Silva, Bruno R.S. and Yang, Guang and Albuquerque, Victor Hugo C.de},\n doi = {10.3390/bioengineering10050529},\n journal = {Bioengineering},\n number = {5}\n}
\n
\n\n\n
\n Even with over 80% of the population being vaccinated against COVID-19, the disease continues to claim victims. Therefore, it is crucial to have a secure Computer-Aided Diagnostic system that can assist in identifying COVID-19 and determining the necessary level of care. This is especially important in the Intensive Care Unit to monitor disease progression or regression in the fight against this epidemic. To accomplish this, we merged public datasets from the literature to train lung and lesion segmentation models with five different distributions. We then trained eight CNN models for COVID-19 and Common-Acquired Pneumonia classification. If the examination was classified as COVID-19, we quantified the lesions and assessed the severity of the full CT scan. To validate the system, we used Resnetxt101 Unet++ and Mobilenet Unet for lung and lesion segmentation, respectively, achieving accuracy of 98.05%, F1-score of 98.70%, precision of 98.7%, recall of 98.7%, and specificity of 96.05%. This was accomplished in just 19.70 s per full CT scan, with external validation on the SPGC dataset. Finally, when classifying these detected lesions, we used Densenet201 and achieved accuracy of 90.47%, F1-score of 93.85%, precision of 88.42%, recall of 100.0%, and specificity of 65.07%. The results demonstrate that our pipeline can correctly detect and segment lesions due to COVID-19 and Common-Acquired Pneumonia in CT scans. It can differentiate these two classes from normal exams, indicating that our system is efficient and effective in identifying the disease and assessing the severity of the condition.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Conditional Physics-Informed Graph Neural Network for Fractional Flow Reserve Assessment.\n \n \n \n \n\n\n \n Xie, B.; Liu, X.; Zhang, H.; Xu, C.; Zeng, T.; Yuan, Y.; Yang, G.; and Gao, Z.\n\n\n \n\n\n\n Volume 14226 LNCS Springer Nature Switzerland, 2023.\n \n\n\n\n
\n\n\n\n \n \n \"ConditionalPaper\n  \n \n \n \"ConditionalWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@book{\n title = {Conditional Physics-Informed Graph Neural Network for Fractional Flow Reserve Assessment},\n type = {book},\n year = {2023},\n source = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},\n keywords = {Coronary Angiography,Fractional Flow Reserve Assessment,Physics-Informed Neural Networks},\n pages = {110-120},\n volume = {14226 LNCS},\n websites = {http://dx.doi.org/10.1007/978-3-031-43990-2_11},\n publisher = {Springer Nature Switzerland},\n id = {98645e7e-a13f-3a5a-8ecd-3cb11fb3ab1c},\n created = {2024-01-13T07:02:56.889Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:21:52.908Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {The assessment of fractional flow reserve (FFR) is significant for diagnosing coronary artery disease and determining the patients and lesions in need of revascularization. Deep learning has become a promising approach for the assessment of FFR, due to its high computation efficiency in contrast to computational fluid dynamics. However, it suffers from the lack of appropriate priors. The current study only considers adding priors into the loss function, which is insufficient to learn features having strong relationships with the boundary conditions. In this paper, we propose a conditional physics-informed graph neural network (CPGNN) for FFR assessment under the morphology and boundary condition information. Specially, CPGNN adds morphology and boundary conditions into inputs to learn the conditioned features and penalizes the residual of physical equations and the boundary condition in the loss function. Additionally, CPGNN consists of a multi-scale graph fusion module (MSGF) and a physics-informed loss. MSGF is to generate the features constrained by the coronary topology and better represent the different-range dependence. The physics-informed loss uses the finite difference method to calculate the residuals of physical equations. Our CPGNN is evaluated over 183 real-world coronary observed from 143 X-ray and 40 CT angiography. The FFR values of CPGNN correlate well with FFR measurements r = 0.89 in X-ray and r = 0.88 in CT.},\n bibtype = {book},\n author = {Xie, Baihong and Liu, Xiujian and Zhang, Heye and Xu, Chenchu and Zeng, Tieyong and Yuan, Yixuan and Yang, Guang and Gao, Zhifan},\n doi = {10.1007/978-3-031-43990-2_11}\n}
\n
\n\n\n
\n The assessment of fractional flow reserve (FFR) is significant for diagnosing coronary artery disease and determining the patients and lesions in need of revascularization. Deep learning has become a promising approach for the assessment of FFR, due to its high computation efficiency in contrast to computational fluid dynamics. However, it suffers from the lack of appropriate priors. The current study only considers adding priors into the loss function, which is insufficient to learn features having strong relationships with the boundary conditions. In this paper, we propose a conditional physics-informed graph neural network (CPGNN) for FFR assessment under the morphology and boundary condition information. Specially, CPGNN adds morphology and boundary conditions into inputs to learn the conditioned features and penalizes the residual of physical equations and the boundary condition in the loss function. Additionally, CPGNN consists of a multi-scale graph fusion module (MSGF) and a physics-informed loss. MSGF is to generate the features constrained by the coronary topology and better represent the different-range dependence. The physics-informed loss uses the finite difference method to calculate the residuals of physical equations. Our CPGNN is evaluated over 183 real-world coronary observed from 143 X-ray and 40 CT angiography. The FFR values of CPGNN correlate well with FFR measurements r = 0.89 in X-ray and r = 0.88 in CT.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pharmacophoric-constrained heterogeneous graph transformer model for molecular property prediction.\n \n \n \n \n\n\n \n Jiang, Y.; Jin, S.; Jin, X.; Xiao, X.; Wu, W.; Liu, X.; Zhang, Q.; Zeng, X.; Yang, G.; and Niu, Z.\n\n\n \n\n\n\n Communications Chemistry, 6(1). 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Pharmacophoric-constrainedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Pharmacophoric-constrained heterogeneous graph transformer model for molecular property prediction},\n type = {article},\n year = {2023},\n volume = {6},\n publisher = {Springer US},\n id = {86f10c8b-bca7-364b-a1d6-d3e4f2f540e2},\n created = {2024-01-13T07:02:56.896Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:22:10.927Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Informative representation of molecules is a crucial prerequisite in AI-driven drug design and discovery. Pharmacophore information including functional groups and chemical reactions can indicate molecular properties, which have not been fully exploited by prior atom-based molecular graph representation. To obtain a more informative representation of molecules for better molecule property prediction, we propose the Pharmacophoric-constrained Heterogeneous Graph Transformer (PharmHGT). We design a pharmacophoric-constrained multi-views molecular representation graph, enabling PharmHGT to extract vital chemical information from functional substructures and chemical reactions. With a carefully designed pharmacophoric-constrained multi-view molecular representation graph, PharmHGT can learn more chemical information from molecular functional substructures and chemical reaction information. Extensive downstream experiments prove that PharmHGT achieves remarkably superior performance over the state-of-the-art models the performance of our model is up to 1.55% in ROC-AUC and 0.272 in RMSE higher than the best baseline model) on molecular properties prediction. The ablation study and case study show that our proposed molecular graph representation method and heterogeneous graph transformer model can better capture the pharmacophoric structure and chemical information features. Further visualization studies also indicated a better representation capacity achieved by our model.},\n bibtype = {article},\n author = {Jiang, Yinghui and Jin, Shuting and Jin, Xurui and Xiao, Xianglu and Wu, Wenfan and Liu, Xiangrong and Zhang, Qiang and Zeng, Xiangxiang and Yang, Guang and Niu, Zhangming},\n doi = {10.1038/s42004-023-00857-x},\n journal = {Communications Chemistry},\n number = {1}\n}
\n
\n\n\n
\n Informative representation of molecules is a crucial prerequisite in AI-driven drug design and discovery. Pharmacophore information including functional groups and chemical reactions can indicate molecular properties, which have not been fully exploited by prior atom-based molecular graph representation. To obtain a more informative representation of molecules for better molecule property prediction, we propose the Pharmacophoric-constrained Heterogeneous Graph Transformer (PharmHGT). We design a pharmacophoric-constrained multi-views molecular representation graph, enabling PharmHGT to extract vital chemical information from functional substructures and chemical reactions. With a carefully designed pharmacophoric-constrained multi-view molecular representation graph, PharmHGT can learn more chemical information from molecular functional substructures and chemical reaction information. Extensive downstream experiments prove that PharmHGT achieves remarkably superior performance over the state-of-the-art models the performance of our model is up to 1.55% in ROC-AUC and 0.272 in RMSE higher than the best baseline model) on molecular properties prediction. The ablation study and case study show that our proposed molecular graph representation method and heterogeneous graph transformer model can better capture the pharmacophoric structure and chemical information features. Further visualization studies also indicated a better representation capacity achieved by our model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-site, Multi-domain Airway Tree Modeling (ATM'22): A Public Benchmark for Pulmonary Airway Segmentation.\n \n \n \n \n\n\n \n Zhang, M.; Wu, Y.; Zhang, H.; Qin, Y.; Zheng, H.; Tang, W.; Arnold, C.; Pei, C.; Yu, P.; Nan, Y.; Yang, G.; Walsh, S.; Marshall, D., C.; Komorowski, M.; Wang, P.; Guo, D.; Jin, D.; Wu, Y.; Zhao, S.; Chang, R.; Zhang, B.; Lv, X.; Qayyum, A.; Mazher, M.; Su, Q.; Wu, Y.; Liu, Y.; Zhu, Y.; Yang, J.; Pakzad, A.; Rangelov, B.; Estepar, R., S., J.; Espinosa, C., C.; Sun, J.; Yang, G.; and Gu, Y.\n\n\n \n\n\n\n arXiv preprint arXiv:2303.05745. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Multi-site,Paper\n  \n \n \n \"Multi-site,Website\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Multi-site, Multi-domain Airway Tree Modeling (ATM'22): A Public Benchmark for Pulmonary Airway Segmentation},\n type = {article},\n year = {2023},\n keywords = {methods,pulmonary airway segmen-,tation,topological prior knowledge,traditional and deep-learning},\n websites = {http://arxiv.org/abs/2303.05745},\n id = {7ede210d-4e55-3f76-a81d-eb2dd5b05186},\n created = {2024-01-13T07:02:56.915Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:19.400Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Open international challenges are becoming the de facto standard for assessing computer vision and image analysis algorithms. In recent years, new methods have extended the reach of pulmonary airway segmentation that is closer to the limit of image resolution. Since EXACT'09 pulmonary airway segmentation, limited effort has been directed to quantitative comparison of newly emerged algorithms driven by the maturity of deep learning based approaches and clinical drive for resolving finer details of distal airways for early intervention of pulmonary diseases. Thus far, public annotated datasets are extremely limited, hindering the development of data-driven methods and detailed performance evaluation of new algorithms. To provide a benchmark for the medical imaging community, we organized the Multi-site, Multi-domain Airway Tree Modeling (ATM'22), which was held as an official challenge event during the MICCAI 2022 conference. ATM'22 provides large-scale CT scans with detailed pulmonary airway annotation, including 500 CT scans (300 for training, 50 for validation, and 150 for testing). The dataset was collected from different sites and it further included a portion of noisy COVID-19 CTs with ground-glass opacity and consolidation. Twenty-three teams participated in the entire phase of the challenge and the algorithms for the top ten teams are reviewed in this paper. Quantitative and qualitative results revealed that deep learning models embedded with the topological continuity enhancement achieved superior performance in general. ATM'22 challenge holds as an open-call design, the training data and the gold standard evaluation are available upon successful registration via its homepage.},\n bibtype = {article},\n author = {Zhang, Minghui and Wu, Yangqian and Zhang, Hanxiao and Qin, Yulei and Zheng, Hao and Tang, Wen and Arnold, Corey and Pei, Chenhao and Yu, Pengxin and Nan, Yang and Yang, Guang and Walsh, Simon and Marshall, Dominic C. and Komorowski, Matthieu and Wang, Puyang and Guo, Dazhou and Jin, Dakai and Wu, Ya'nan and Zhao, Shuiqing and Chang, Runsheng and Zhang, Boyu and Lv, Xing and Qayyum, Abdul and Mazher, Moona and Su, Qi and Wu, Yonghuang and Liu, Ying'ao and Zhu, Yufei and Yang, Jiancheng and Pakzad, Ashkan and Rangelov, Bojidar and Estepar, Raul San Jose and Espinosa, Carlos Cano and Sun, Jiayuan and Yang, Guang-Zhong and Gu, Yun},\n journal = {arXiv preprint arXiv:2303.05745}\n}
\n
\n\n\n
\n Open international challenges are becoming the de facto standard for assessing computer vision and image analysis algorithms. In recent years, new methods have extended the reach of pulmonary airway segmentation that is closer to the limit of image resolution. Since EXACT'09 pulmonary airway segmentation, limited effort has been directed to quantitative comparison of newly emerged algorithms driven by the maturity of deep learning based approaches and clinical drive for resolving finer details of distal airways for early intervention of pulmonary diseases. Thus far, public annotated datasets are extremely limited, hindering the development of data-driven methods and detailed performance evaluation of new algorithms. To provide a benchmark for the medical imaging community, we organized the Multi-site, Multi-domain Airway Tree Modeling (ATM'22), which was held as an official challenge event during the MICCAI 2022 conference. ATM'22 provides large-scale CT scans with detailed pulmonary airway annotation, including 500 CT scans (300 for training, 50 for validation, and 150 for testing). The dataset was collected from different sites and it further included a portion of noisy COVID-19 CTs with ground-glass opacity and consolidation. Twenty-three teams participated in the entire phase of the challenge and the algorithms for the top ten teams are reviewed in this paper. Quantitative and qualitative results revealed that deep learning models embedded with the topological continuity enhancement achieved superior performance in general. ATM'22 challenge holds as an open-call design, the training data and the gold standard evaluation are available upon successful registration via its homepage.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A residual dense vision transformer for medical image super-resolution with segmentation-based perceptual loss fine-tuning.\n \n \n \n \n\n\n \n Zhu, J.; Yang, G.; and Lio, P.\n\n\n \n\n\n\n arXiv preprint arXiv:2302.11184,1-28. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n \n \"AWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {A residual dense vision transformer for medical image super-resolution with segmentation-based perceptual loss fine-tuning},\n type = {article},\n year = {2023},\n pages = {1-28},\n websites = {http://arxiv.org/abs/2302.11184},\n id = {e53e21e8-5a40-36aa-a108-6d2f8e4c337f},\n created = {2024-01-13T07:02:56.955Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:19.380Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Super-resolution plays an essential role in medical imaging because it provides an alternative way to achieve high spatial resolutions and image quality with no extra acquisition costs. In the past few decades, the rapid development of deep neural networks has promoted super-resolution performance with novel network architectures, loss functions and evaluation metrics. Specifically, vision transformers dominate a broad range of computer vision tasks, but challenges still exist when applying them to low-level medical image processing tasks. This paper proposes an efficient vision transformer with residual dense connections and local feature fusion to achieve efficient single-image super-resolution (SISR) of medical modalities. Moreover, we implement a general-purpose perceptual loss with manual control for image quality improvements of desired aspects by incorporating prior knowledge of medical image segmentation. Compared with state-of-the-art methods on four public medical image datasets, the proposed method achieves the best PSNR scores of 6 modalities among seven modalities. It leads to an average improvement of $+0.09$ dB PSNR with only 38\\% parameters of SwinIR. On the other hand, the segmentation-based perceptual loss increases $+0.14$ dB PSNR on average for SOTA methods, including CNNs and vision transformers. Additionally, we conduct comprehensive ablation studies to discuss potential factors for the superior performance of vision transformers over CNNs and the impacts of network and loss function components. The code will be released on GitHub with the paper published.},\n bibtype = {article},\n author = {Zhu, Jin and Yang, Guang and Lio, Pietro},\n journal = {arXiv preprint arXiv:2302.11184}\n}
\n
\n\n\n
\n Super-resolution plays an essential role in medical imaging because it provides an alternative way to achieve high spatial resolutions and image quality with no extra acquisition costs. In the past few decades, the rapid development of deep neural networks has promoted super-resolution performance with novel network architectures, loss functions and evaluation metrics. Specifically, vision transformers dominate a broad range of computer vision tasks, but challenges still exist when applying them to low-level medical image processing tasks. This paper proposes an efficient vision transformer with residual dense connections and local feature fusion to achieve efficient single-image super-resolution (SISR) of medical modalities. Moreover, we implement a general-purpose perceptual loss with manual control for image quality improvements of desired aspects by incorporating prior knowledge of medical image segmentation. Compared with state-of-the-art methods on four public medical image datasets, the proposed method achieves the best PSNR scores of 6 modalities among seven modalities. It leads to an average improvement of $+0.09$ dB PSNR with only 38\\% parameters of SwinIR. On the other hand, the segmentation-based perceptual loss increases $+0.14$ dB PSNR on average for SOTA methods, including CNNs and vision transformers. Additionally, we conduct comprehensive ablation studies to discuss potential factors for the superior performance of vision transformers over CNNs and the impacts of network and loss function components. The code will be released on GitHub with the paper published.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hierarchical Perception Adversarial Learning Framework for Compressed Sensing MRI.\n \n \n \n \n\n\n \n Gao, Z.; Guo, Y.; Zhang, J.; Zeng, T.; and Yang, G.\n\n\n \n\n\n\n IEEE Transactions on Medical Imaging, 42(6): 1859-1874. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"HierarchicalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Hierarchical Perception Adversarial Learning Framework for Compressed Sensing MRI},\n type = {article},\n year = {2023},\n keywords = {MRI reconstruction,compressed sensing,generative adversarial networks,magnetic resonance imaging},\n pages = {1859-1874},\n volume = {42},\n publisher = {IEEE},\n id = {5f3d5b23-d130-3113-8ab4-07178838dd70},\n created = {2024-01-13T07:02:56.956Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:22:37.159Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {The long acquisition time has limited the accessibility of magnetic resonance imaging (MRI) because it leads to patient discomfort and motion artifacts. Although several MRI techniques have been proposed to reduce the acquisition time, compressed sensing in magnetic resonance imaging (CS-MRI) enables fast acquisition without compromising SNR and resolution. However, existing CS-MRI methods suffer from the challenge of aliasing artifacts. This challenge results in the noise-like textures and missing the fine details, thus leading to unsatisfactory reconstruction performance. To tackle this challenge, we propose a hierarchical perception adversarial learning framework (HP-ALF). HP-ALF can perceive the image information in the hierarchical mechanism: image-level perception and patch-level perception. The former can reduce the visual perception difference in the entire image, and thus achieve aliasing artifact removal. The latter can reduce this difference in the regions of the image, and thus recover fine details. Specifically, HP-ALF achieves the hierarchical mechanism by utilizing multilevel perspective discrimination. This discrimination can provide the information from two perspectives (overall and regional) for adversarial learning. It also utilizes a global and local coherent discriminator to provide structure information to the generator during training. In addition, HP-ALF contains a context-aware learning block to effectively exploit the slice information between individual images for better reconstruction performance. The experiments validated on three datasets demonstrate the effectiveness of HP-ALF and its superiority to the comparative methods.},\n bibtype = {article},\n author = {Gao, Zhifan and Guo, Yifeng and Zhang, Jiajing and Zeng, Tieyong and Yang, Guang},\n doi = {10.1109/TMI.2023.3240862},\n journal = {IEEE Transactions on Medical Imaging},\n number = {6}\n}
\n
\n\n\n
\n The long acquisition time has limited the accessibility of magnetic resonance imaging (MRI) because it leads to patient discomfort and motion artifacts. Although several MRI techniques have been proposed to reduce the acquisition time, compressed sensing in magnetic resonance imaging (CS-MRI) enables fast acquisition without compromising SNR and resolution. However, existing CS-MRI methods suffer from the challenge of aliasing artifacts. This challenge results in the noise-like textures and missing the fine details, thus leading to unsatisfactory reconstruction performance. To tackle this challenge, we propose a hierarchical perception adversarial learning framework (HP-ALF). HP-ALF can perceive the image information in the hierarchical mechanism: image-level perception and patch-level perception. The former can reduce the visual perception difference in the entire image, and thus achieve aliasing artifact removal. The latter can reduce this difference in the regions of the image, and thus recover fine details. Specifically, HP-ALF achieves the hierarchical mechanism by utilizing multilevel perspective discrimination. This discrimination can provide the information from two perspectives (overall and regional) for adversarial learning. It also utilizes a global and local coherent discriminator to provide structure information to the generator during training. In addition, HP-ALF contains a context-aware learning block to effectively exploit the slice information between individual images for better reconstruction performance. The experiments validated on three datasets demonstrate the effectiveness of HP-ALF and its superiority to the comparative methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data-Free Distillation Improves Efficiency and Privacy in Federated Thorax Disease Analysis.\n \n \n \n \n\n\n \n Li, M.; and Yang, G.\n\n\n \n\n\n\n In IEEE EMBS International Conference on Data Science and Engineering in Healthcare, Medicine & Biology, 2023. \n \n\n\n\n
\n\n\n\n \n \n \"Data-FreePaper\n  \n \n \n \"Data-FreeWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Data-Free Distillation Improves Efficiency and Privacy in Federated Thorax Disease Analysis},\n type = {inproceedings},\n year = {2023},\n issue = {3},\n websites = {http://arxiv.org/abs/2310.18346},\n id = {f570dceb-25f3-38d1-b5ba-99dba69fb031},\n created = {2024-01-13T07:02:57.026Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:19.397Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Thorax disease analysis in large-scale, multi-centre, and multi-scanner settings is often limited by strict privacy policies. Federated learning (FL) offers a potential solution, while traditional parameter-based FL can be limited by issues such as high communication costs, data leakage, and heterogeneity. Distillation-based FL can improve efficiency, but it relies on a proxy dataset, which is often impractical in clinical practice. To address these challenges, we introduce a data-free distillation-based FL approach FedKDF. In FedKDF, the server employs a lightweight generator to aggregate knowledge from different clients without requiring access to their private data or a proxy dataset. FedKDF combines the predictors from clients into a single, unified predictor, which is further optimized using the learned knowledge in the lightweight generator. Our empirical experiments demonstrate that FedKDF offers a robust solution for efficient, privacy-preserving federated thorax disease analysis.},\n bibtype = {inproceedings},\n author = {Li, Ming and Yang, Guang},\n booktitle = {IEEE EMBS International Conference on Data Science and Engineering in Healthcare, Medicine & Biology}\n}
\n
\n\n\n
\n Thorax disease analysis in large-scale, multi-centre, and multi-scanner settings is often limited by strict privacy policies. Federated learning (FL) offers a potential solution, while traditional parameter-based FL can be limited by issues such as high communication costs, data leakage, and heterogeneity. Distillation-based FL can improve efficiency, but it relies on a proxy dataset, which is often impractical in clinical practice. To address these challenges, we introduce a data-free distillation-based FL approach FedKDF. In FedKDF, the server employs a lightweight generator to aggregate knowledge from different clients without requiring access to their private data or a proxy dataset. FedKDF combines the predictors from clients into a single, unified predictor, which is further optimized using the learned knowledge in the lightweight generator. Our empirical experiments demonstrate that FedKDF offers a robust solution for efficient, privacy-preserving federated thorax disease analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The impact of imputation quality on machine learning classifiers for datasets with missing values.\n \n \n \n \n\n\n \n Shadbahr, T.; Roberts, M.; Stanczuk, J.; Gilbey, J.; Teare, P.; Dittmer, S.; Thorpe, M.; Torné, R., V.; Sala, E.; Lió, P.; Patel, M.; Preller, J.; Selby, I.; Breger, A.; Weir-McCall, J., R.; Gkrania-Klotsas, E.; Korhonen, A.; Jefferson, E.; Langs, G.; Yang, G.; Prosch, H.; Babar, J.; Escudero Sánchez, L.; Wassin, M.; Holzer, M.; Walton, N.; Lió, P.; Rudd, J., H., F.; Mirtti, T.; Rannikko, A., S.; Aston, J., A., D.; Tang, J.; and Schönlieb, C.\n\n\n \n\n\n\n Nature Communications Medicine, 3(1): 139. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {The impact of imputation quality on machine learning classifiers for datasets with missing values},\n type = {article},\n year = {2023},\n pages = {139},\n volume = {3},\n id = {b670aba7-e847-394a-b31e-93b41697de9e},\n created = {2024-01-13T08:14:14.102Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:18:21.356Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {shadbahr_impact_2023},\n source_type = {article},\n notes = {<b>From Duplicate 2 (<i>The impact of imputation quality on machine learning classifiers for datasets with missing values</i> - Shadbahr, Tolou; Roberts, Michael; Stanczuk, Jan; Gilbey, Julian; Teare, Philip; Dittmer, Sören; Thorpe, Matthew; Torné, Ramon Viñas; Sala, Evis; Lió, Pietro)<br/></b><br/>Publisher: Nature Publishing Group UK},\n private_publication = {false},\n abstract = {Classifying samples in incomplete datasets is a common aim for machine learning practitioners, but is non-trivial. Missing data is found in most real-world datasets and these missing values are typically imputed using established methods, followed by classification of the now complete samples. The focus of the machine learning researcher is to optimise the classifier’s performance. We utilise three simulated and three real-world clinical datasets with different feature types and missingness patterns. Initially, we evaluate how the downstream classifier performance depends on the choice of classifier and imputation methods. We employ ANOVA to quantitatively evaluate how the choice of missingness rate, imputation method, and classifier method influences the performance. Additionally, we compare commonly used methods for assessing imputation quality and introduce a class of discrepancy scores based on the sliced Wasserstein distance. We also assess the stability of the imputations and the interpretability of model built on the imputed data. The performance of the classifier is most affected by the percentage of missingness in the test data, with a considerable performance decline observed as the test missingness rate increases. We also show that the commonly used measures for assessing imputation quality tend to lead to imputed data which poorly matches the underlying data distribution, whereas our new class of discrepancy scores performs much better on this measure. Furthermore, we show that the interpretability of classifier models trained using poorly imputed data is compromised. It is imperative to consider the quality of the imputation when performing downstream classification as the effects on the classifier can be considerable. Many artificial intelligence (AI) methods aim to classify samples of data into groups, e.g., patients with disease vs. those without. This often requires datasets to be complete, i.e., that all data has been collected for all samples. However, in clinical practice this is often not the case and some data can be missing. One solution is to ‘complete’ the dataset using a technique called imputation to replace those missing values. However, assessing how well the imputation method performs is challenging. In this work, we demonstrate why people should care about imputation, develop a new method for assessing imputation quality, and demonstrate that if we build AI models on poorly imputed data, the model can give different results to those we would hope for. Our findings may improve the utility and quality of AI models in the clinic. Shadbahr et al. highlight the importance of evaluating imputation quality when building classification models for incomplete data. They demonstrate how a model built on poorly imputed data can compromise the classifier, and develop a new method for assessing imputation quality based on how well the overall data distribution is preserved.},\n bibtype = {article},\n author = {Shadbahr, Tolou and Roberts, Michael and Stanczuk, Jan and Gilbey, Julian and Teare, Philip and Dittmer, Sören and Thorpe, Matthew and Torné, Ramon Viñas and Sala, Evis and Lió, Pietro and Patel, Mishal and Preller, Jacobus and Selby, Ian and Breger, Anna and Weir-McCall, Jonathan R. and Gkrania-Klotsas, Effrossyni and Korhonen, Anna and Jefferson, Emily and Langs, Georg and Yang, Guang and Prosch, Helmut and Babar, Judith and Escudero Sánchez, Lorena and Wassin, Marcel and Holzer, Markus and Walton, Nicholas and Lió, Pietro and Rudd, James H. F. and Mirtti, Tuomas and Rannikko, Antti Sakari and Aston, John A. D. and Tang, Jing and Schönlieb, Carola-Bibiane},\n doi = {10.1038/s43856-023-00356-z},\n journal = {Nature Communications Medicine},\n number = {1}\n}
\n
\n\n\n
\n Classifying samples in incomplete datasets is a common aim for machine learning practitioners, but is non-trivial. Missing data is found in most real-world datasets and these missing values are typically imputed using established methods, followed by classification of the now complete samples. The focus of the machine learning researcher is to optimise the classifier’s performance. We utilise three simulated and three real-world clinical datasets with different feature types and missingness patterns. Initially, we evaluate how the downstream classifier performance depends on the choice of classifier and imputation methods. We employ ANOVA to quantitatively evaluate how the choice of missingness rate, imputation method, and classifier method influences the performance. Additionally, we compare commonly used methods for assessing imputation quality and introduce a class of discrepancy scores based on the sliced Wasserstein distance. We also assess the stability of the imputations and the interpretability of model built on the imputed data. The performance of the classifier is most affected by the percentage of missingness in the test data, with a considerable performance decline observed as the test missingness rate increases. We also show that the commonly used measures for assessing imputation quality tend to lead to imputed data which poorly matches the underlying data distribution, whereas our new class of discrepancy scores performs much better on this measure. Furthermore, we show that the interpretability of classifier models trained using poorly imputed data is compromised. It is imperative to consider the quality of the imputation when performing downstream classification as the effects on the classifier can be considerable. Many artificial intelligence (AI) methods aim to classify samples of data into groups, e.g., patients with disease vs. those without. This often requires datasets to be complete, i.e., that all data has been collected for all samples. However, in clinical practice this is often not the case and some data can be missing. One solution is to ‘complete’ the dataset using a technique called imputation to replace those missing values. However, assessing how well the imputation method performs is challenging. In this work, we demonstrate why people should care about imputation, develop a new method for assessing imputation quality, and demonstrate that if we build AI models on poorly imputed data, the model can give different results to those we would hope for. Our findings may improve the utility and quality of AI models in the clinic. Shadbahr et al. highlight the importance of evaluating imputation quality when building classification models for incomplete data. They demonstrate how a model built on poorly imputed data can compromise the classifier, and develop a new method for assessing imputation quality based on how well the overall data distribution is preserved.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (42)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Quantifying the Impact of Pyramid Squeeze Attention Mechanism and Filtering Approaches on Alzheimer's Disease Classification.\n \n \n \n \n\n\n \n Yan, B.; Li, Y.; Li, L.; Yang, X.; Li, T.; Yang, G.; and Jiang, M.\n\n\n \n\n\n\n Computers in Biology and Medicine. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"QuantifyingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Quantifying the Impact of Pyramid Squeeze Attention Mechanism and Filtering Approaches on Alzheimer's Disease Classification},\n type = {article},\n year = {2022},\n id = {dde2bd54-7db5-3bd0-94ad-eff2ce8833a2},\n created = {2024-01-13T05:46:20.685Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:14:19.469Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yan_quantifying_2022},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Yan, Bin and Li, Yang and Li, Lin and Yang, Xiaocheng and Li, Tie-qiang and Yang, Guang and Jiang, Mingfeng},\n journal = {Computers in Biology and Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Fine-grained Glomerular Lesion Recognition in Kidney Pathology.\n \n \n \n \n\n\n \n Nan, Y.; Li, F.; Tang, P.; Zhang, G.; Zeng, C.; Xie, G.; Liu, Z.; and Yang, G.\n\n\n \n\n\n\n Pattern Recognition. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Automatic Fine-grained Glomerular Lesion Recognition in Kidney Pathology},\n type = {article},\n year = {2022},\n id = {7ffea282-0842-3838-88e5-9a2752d221ef},\n created = {2024-01-13T05:46:20.757Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:14:23.925Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {nan_automatic_2022},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Nan, Yang and Li, Fengyi and Tang, Peng and Zhang, Guyue and Zeng, Caihong and Xie, Guotong and Liu, Zhihong and Yang, Guang},\n journal = {Pattern Recognition}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Swin Transformer for Fast MRI.\n \n \n \n \n\n\n \n Huang, J.; Fang, Y.; Wu, Y.; Wu, H.; Gao, Z.; Li, Y.; Del Ser, J.; Xia, J.; and Yang, G.\n\n\n \n\n\n\n Neurocomputing. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"SwinPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Swin Transformer for Fast MRI},\n type = {article},\n year = {2022},\n id = {8eb1edbd-abef-33ef-926d-9053c3d2fc09},\n created = {2024-01-13T05:46:20.783Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:15:29.356Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {huang_swin_2022},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Huang, Jiahao and Fang, Yingying and Wu, Yinzhe and Wu, Huanjun and Gao, Zhifan and Li, Yang and Del Ser, Javier and Xia, Jun and Yang, Guang},\n journal = {Neurocomputing}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Global Transformer and Dual Local Attention Network via Deep-Shallow Hierarchical Feature Fusion for Retinal Vessel Segmentation.\n \n \n \n \n\n\n \n Li, Y.; Zhang, Y.; Liu, J.; Wang, K.; Zhang, K.; Zhang, G.; Liao, X.; and Yang, G.\n\n\n \n\n\n\n IEEE Transactions on Cybernetics. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"GlobalPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Global Transformer and Dual Local Attention Network via Deep-Shallow Hierarchical Feature Fusion for Retinal Vessel Segmentation},\n type = {article},\n year = {2022},\n id = {71ce521a-2953-30f0-bcd6-c7948de47e23},\n created = {2024-01-13T05:46:22.929Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:03:14.620Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {li_global_2022},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Li, Yang and Zhang, Yue and Liu, Jing-Yu and Wang, Kang and Zhang, Kai and Zhang, Gen-Sheng and Liao, Xiao-Feng and Yang, Guang},\n journal = {IEEE Transactions on Cybernetics}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Swin Deformable Attention U-Net Transformer (SDAUT) for Explainable Fast MRI.\n \n \n \n \n\n\n \n Huang, J.; Xing, X.; Gao, Z.; and Yang, G.\n\n\n \n\n\n\n In Medical Image Computing and Computer Assisted Intervention (MICCAI 2022), 2022. \n \n\n\n\n
\n\n\n\n \n \n \"SwinPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Swin Deformable Attention U-Net Transformer (SDAUT) for Explainable Fast MRI},\n type = {inproceedings},\n year = {2022},\n id = {301e266a-5b5b-3d81-9b7c-3ab038223eee},\n created = {2024-01-13T05:46:23.312Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:03:30.050Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {huang_swin_2022-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Huang, Jiahao and Xing, Xiaodan and Gao, Zhifan and Yang, Guang},\n booktitle = {Medical Image Computing and Computer Assisted Intervention (MICCAI 2022)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CHAIMELEON project: Creation of a pan-European repository of health imaging data for the development of AI-powered cancer management tools.\n \n \n \n \n\n\n \n Bonmatí, L., M.; Blanco, A., M.; Suárez, A.; Aznar, M.; Beregi, J., P.; Fournier, L.; Neri, E.; Laghi, A.; França, M.; and Sardanelli, F.\n\n\n \n\n\n\n Frontiers in Oncology. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"CHAIMELEONPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {CHAIMELEON project: Creation of a pan-European repository of health imaging data for the development of AI-powered cancer management tools},\n type = {article},\n year = {2022},\n id = {ca4aa972-fa65-3498-af55-59a82103b3d4},\n created = {2024-01-13T05:46:28.541Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:04:02.942Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {bonmati_chaimeleon_2022},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Bonmatí, Luis Martí and Blanco, Ana Miguel and Suárez, Amelia and Aznar, Mario and Beregi, Jean Paul and Fournier, Laure and Neri, Emanuele and Laghi, Andrea and França, Manuela and Sardanelli, Francesco},\n journal = {Frontiers in Oncology}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MCAL: An Anatomical Knowledge Learning Model for Myocardial Segmentation in 2D Echocardiography.\n \n \n \n \n\n\n \n Cui, X.; Zhang, P.; Li, Y.; Liu, Z.; Xiao, X.; Zhang, Y.; Sun, L.; Cui, L.; Yang, G.; and Li, S.\n\n\n \n\n\n\n IEEE Transactions on Ultrasonics, Ferroelectrics and Frequency Control. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"MCAL:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {MCAL: An Anatomical Knowledge Learning Model for Myocardial Segmentation in 2D Echocardiography},\n type = {article},\n year = {2022},\n id = {bc6f53a1-9b2f-32df-ac2c-28668068926c},\n created = {2024-01-13T05:46:29.099Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:04:17.384Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {cui_mcal_2022},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Cui, Xiaoxiao and Zhang, Pengfei and Li, Yujun and Liu, Zhi and Xiao, Xiaoyan and Zhang, Yang and Sun, Longkun and Cui, Lizhen and Yang, Guang and Li, Shuo},\n journal = {IEEE Transactions on Ultrasonics, Ferroelectrics and Frequency Control}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast MRI Reconstruction: How Powerful Transformers Are?.\n \n \n \n \n\n\n \n Huang, J.; Wu, Y.; Wu, H.; and Yang, G.\n\n\n \n\n\n\n In IEEE International Engineering in Medicine and Biology Conference (EMBC 2022), 2022. \n \n\n\n\n
\n\n\n\n \n \n \"FastPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Fast MRI Reconstruction: How Powerful Transformers Are?},\n type = {inproceedings},\n year = {2022},\n id = {f0cdf421-4c36-3ce7-a309-3cbe023f70b1},\n created = {2024-01-13T05:46:29.330Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:04:50.565Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {huang_fast_2022},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Huang, Jiahao and Wu, Yinzhe and Wu, Huanjun and Yang, Guang},\n booktitle = {IEEE International Engineering in Medicine and Biology Conference (EMBC 2022)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CS2: A Controllable and Simultaneous Synthesizer of Images and Annotations with Minimal Human Intervention.\n \n \n \n \n\n\n \n Xing, X.; Huang, J.; Nan, Y.; Wu, Y.; Wang, C.; Gao, Z.; Walsh, S.; and Yang, G.\n\n\n \n\n\n\n In Medical Image Computing and Computer Assisted Intervention (MICCAI 2022), 2022. \n \n\n\n\n
\n\n\n\n \n \n \"CS2:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {CS2: A Controllable and Simultaneous Synthesizer of Images and Annotations with Minimal Human Intervention},\n type = {inproceedings},\n year = {2022},\n id = {e651bf7e-0773-31a8-ad19-e904934489cc},\n created = {2024-01-13T05:46:29.592Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:05:38.943Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {xing_cs2_2022},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Xing, Xiaodan and Huang, Jiahao and Nan, Yang and Wu, Yinzhe and Wang, Chengjia and Gao, Zhifan and Walsh, Simon and Yang, Guang},\n booktitle = {Medical Image Computing and Computer Assisted Intervention (MICCAI 2022)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n AI-based reconstruction for fast MRI—a systematic review and meta-analysis.\n \n \n \n \n\n\n \n Chen, Y.; Schönlieb, C.; Liò, P.; Leiner, T.; Dragotti, P., L.; Wang, G.; Rueckert, D.; Firmin, D.; and Yang, G.\n\n\n \n\n\n\n Proceedings of the IEEE, 110(2): 224-245. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"AI-basedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {AI-based reconstruction for fast MRI—a systematic review and meta-analysis},\n type = {article},\n year = {2022},\n pages = {224-245},\n volume = {110},\n id = {7da85972-b414-35a3-8eb6-d448813a29ff},\n created = {2024-01-13T05:46:29.910Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:15:09.790Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {chen_ai-based_2022},\n source_type = {article},\n notes = {ISBN: 0018-9219<br/>Publisher: IEEE},\n private_publication = {false},\n bibtype = {article},\n author = {Chen, Yutong and Schönlieb, Carola-Bibiane and Liò, Pietro and Leiner, Tim and Dragotti, Pier Luigi and Wang, Ge and Rueckert, Daniel and Firmin, David and Yang, Guang},\n journal = {Proceedings of the IEEE},\n number = {2}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n AI-based Medical e-Diagnosis for Fast and Automatic Ventricular Volume Measurement in the Patients with Normal Pressure Hydrocephalus.\n \n \n \n \n\n\n \n Zhou, X.; Ye, Q.; Yang, X.; Chen, J.; Ma, H.; Xia, J.; Del Ser, J.; and Yang, G.\n\n\n \n\n\n\n Neural Computing and Applications. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"AI-basedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {AI-based Medical e-Diagnosis for Fast and Automatic Ventricular Volume Measurement in the Patients with Normal Pressure Hydrocephalus},\n type = {article},\n year = {2022},\n id = {37e469d8-0810-3377-8994-b06ead8ed89a},\n created = {2024-01-13T05:46:30.108Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:06:31.563Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhou_ai-based_2022},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Zhou, Xi and Ye, Qinghao and Yang, Xiaolin and Chen, Jiakuan and Ma, Haiqin and Xia, Jun and Del Ser, Javier and Yang, Guang},\n journal = {Neural Computing and Applications}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Calibrating the Dice Loss to Handle Neural Network Overconfidence for Biomedical Image Segmentation.\n \n \n \n \n\n\n \n Yeung, M.; Rundo, L.; Nan, Y.; Sala, E.; Schönlieb, C.; and Yang, G.\n\n\n \n\n\n\n Journal of Digital Imaging. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"CalibratingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Calibrating the Dice Loss to Handle Neural Network Overconfidence for Biomedical Image Segmentation},\n type = {article},\n year = {2022},\n id = {0cda90b0-e901-308f-a1a9-c2bc290cc78f},\n created = {2024-01-13T05:46:30.113Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:06:41.551Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yeung_calibrating_2022},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Yeung, Michael and Rundo, Leonardo and Nan, Yang and Sala, Evis and Schönlieb, Carola-Bibiane and Yang, Guang},\n journal = {Journal of Digital Imaging}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Explainable COVID-19 Infections Identification and Delineation Using Calibrated Pseudo Labels.\n \n \n \n \n\n\n \n Li, M.; Fang, Y.; Tang, Z.; Onuorah, C.; Xia, J.; Del Ser, J.; Walsh, S.; and Yang, G.\n\n\n \n\n\n\n IEEE Transactions on Emerging Topics in Computational Intelligence. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"ExplainablePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Explainable COVID-19 Infections Identification and Delineation Using Calibrated Pseudo Labels},\n type = {article},\n year = {2022},\n id = {15c81bc6-266e-33a7-990b-94f0c367db81},\n created = {2024-01-13T05:46:30.164Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:06:47.013Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {li_explainable_2022},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Li, Ming and Fang, Yingying and Tang, Zeyu and Onuorah, Chibudom and Xia, Jun and Del Ser, Javier and Walsh, Simon and Yang, Guang},\n journal = {IEEE Transactions on Emerging Topics in Computational Intelligence}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Influence of Co-morbidities during SARS-CoV-2 infection in an Indian Population.\n \n \n \n \n\n\n \n Matysek, A.; Studnicka, A.; Smith, W., M.; Hutny, M.; Gajewski, P.; Filipiak, K., J.; Goh, J.; and Yang, G.\n\n\n \n\n\n\n Frontiers in Medicine. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"InfluencePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Influence of Co-morbidities during SARS-CoV-2 infection in an Indian Population},\n type = {article},\n year = {2022},\n id = {8f45c2b2-f60e-3292-89e0-f50604f18311},\n created = {2024-01-13T05:46:30.404Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:06:59.950Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {matysek_influence_2022},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Matysek, Adrian and Studnicka, Aneta and Smith, Wade Menpes and Hutny, Michal and Gajewski, Pawel and Filipiak, Krzysztof J and Goh, Jorming and Yang, Guang},\n journal = {Frontiers in Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Quantification of Changes in White Matter Tract Fibers in Idiopathic Normal Pressure Hydrocephalus Based on Diffusion Spectrum Imaging.\n \n \n \n \n\n\n \n Yang, X.; Li, H.; He, W.; Lv, M.; Zhang, H.; Zhou, X.; Wei, H.; Xu, B.; Chen, J.; and Ma, H.\n\n\n \n\n\n\n European Journal of Radiology. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"QuantificationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Quantification of Changes in White Matter Tract Fibers in Idiopathic Normal Pressure Hydrocephalus Based on Diffusion Spectrum Imaging},\n type = {article},\n year = {2022},\n id = {b53dd2b5-b5b5-3300-8e2b-44acefb0479f},\n created = {2024-01-13T05:46:30.415Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:07:11.841Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_quantification_2022},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Yang, Xiaolin and Li, Hongbing and He, Wenjie and Lv, Minrui and Zhang, Hong and Zhou, Xi and Wei, Haihua and Xu, Boyan and Chen, Jiakuan and Ma, Haiqin},\n journal = {European Journal of Radiology}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Medical Image Understanding and Analysis.\n \n \n \n\n\n \n Yang, G.; Aviles-Rivero, A.; Roberts, M.; and Schönlieb, C.\n\n\n \n\n\n\n 2022.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{\n title = {Medical Image Understanding and Analysis},\n type = {book},\n year = {2022},\n id = {9e4ee40b-1b10-36e8-9da3-76b130a63a91},\n created = {2024-01-13T05:46:30.584Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.822Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_medical_2022},\n source_type = {book},\n private_publication = {false},\n bibtype = {book},\n author = {Yang, Guang and Aviles-Rivero, Angelica and Roberts, Michael and Schönlieb, Carola-Bibiane}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n HDL: Hybrid Deep Learning for the Synthesis of Myocardial Velocity Maps in Digital Twins for Cardiac Analysis.\n \n \n \n \n\n\n \n Xing, X.; Del Ser, J.; Wu, Y.; Li, Y.; Xia, J.; Xu, L.; Firmin, D.; Gatehouse, P.; and Yang, G.\n\n\n \n\n\n\n IEEE Journal of Biomedical and Health Informatics. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"HDL:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {HDL: Hybrid Deep Learning for the Synthesis of Myocardial Velocity Maps in Digital Twins for Cardiac Analysis},\n type = {article},\n year = {2022},\n id = {51cb21cd-3bd2-3f53-b754-d96a217ee959},\n created = {2024-01-13T05:46:30.663Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:09:13.432Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {xing_hdl_2022},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Xing, Xiaodan and Del Ser, Javier and Wu, Yinzhe and Li, Yang and Xia, Jun and Xu, Lei and Firmin, David and Gatehouse, Peter and Yang, Guang},\n journal = {IEEE Journal of Biomedical and Health Informatics}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Region-Based Evidential Deep Learning to Quantify Uncertainty and Improve Robustness of Brain Tumor Segmentation.\n \n \n \n \n\n\n \n Li, H.; Nan, Y.; Del Ser, J.; and Yang, G.\n\n\n \n\n\n\n Neural Computing and Applications. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Region-BasedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Region-Based Evidential Deep Learning to Quantify Uncertainty and Improve Robustness of Brain Tumor Segmentation},\n type = {article},\n year = {2022},\n id = {a9184868-e249-3f3b-80b6-c3bc9c8ef03d},\n created = {2024-01-13T05:46:30.764Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:10:01.675Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {li_region-based_2022},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Li, Hao and Nan, Yang and Del Ser, Javier and Yang, Guang},\n journal = {Neural Computing and Applications}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Skin Lesion Analysis: A State-of-the-Art Survey, Systematic Review, and Future Trends.\n \n \n \n \n\n\n \n Hasan, M., K.; Ahamad, M., A.; Yap, C., H.; and Yang, G.\n\n\n \n\n\n\n 2022.\n \n\n\n\n
\n\n\n\n \n \n \"SkinPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{\n title = {Skin Lesion Analysis: A State-of-the-Art Survey, Systematic Review, and Future Trends},\n type = {book},\n year = {2022},\n id = {8ce14be0-8ad3-3516-83c1-1c76731301a5},\n created = {2024-01-13T05:46:31.137Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:15:24.226Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {hasan_skin_2022},\n source_type = {book},\n notes = {Publication Title: arXiv e-prints arXiv:2208.12232},\n private_publication = {false},\n bibtype = {book},\n author = {Hasan, Md Kamrul and Ahamad, Md Asif and Yap, Choon Hwai and Yang, Guang}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n From Astronomy to Histology: Adapting the Fellwalker Algorithm to Deep Nuclear Instance Segmentation.\n \n \n \n \n\n\n \n Yeung, M.; Watts, T.; and Yang, G.\n\n\n \n\n\n\n In Annual Conference on Medical Image Understanding and Analysis (MIUA 2022), 2022. \n \n\n\n\n
\n\n\n\n \n \n \"FromPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {From Astronomy to Histology: Adapting the Fellwalker Algorithm to Deep Nuclear Instance Segmentation},\n type = {inproceedings},\n year = {2022},\n id = {837f989e-dd76-3ebe-ad58-cbf32c6950ce},\n created = {2024-01-13T05:46:31.138Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:11:14.324Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yeung_astronomy_2022},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yeung, Michael and Watts, Todd and Yang, Guang},\n booktitle = {Annual Conference on Medical Image Understanding and Analysis (MIUA 2022)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unbox the black-box for the medical explainable AI via multi-modal and multi-centre data fusion: A mini-review, two showcases and beyond.\n \n \n \n \n\n\n \n Yang, G.; Ye, Q.; and Xia, J.\n\n\n \n\n\n\n Information Fusion, 77: 29-52. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"UnboxPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Unbox the black-box for the medical explainable AI via multi-modal and multi-centre data fusion: A mini-review, two showcases and beyond},\n type = {article},\n year = {2022},\n keywords = {Explainable AI,Information fusion,Medical image analysis,Multi-domain information fusion,Weakly supervised learning},\n pages = {29-52},\n volume = {77},\n id = {9dcf84b5-1caf-37d8-b073-a824a9243823},\n created = {2024-01-13T06:15:54.774Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.241Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Explainable Artificial Intelligence (XAI) is an emerging research topic of machine learning aimed at unboxing how AI systems’ black-box choices are made. This research field inspects the measures and models involved in decision-making and seeks solutions to explain them explicitly. Many of the machine learning algorithms cannot manifest how and why a decision has been cast. This is particularly true of the most popular deep neural network approaches currently in use. Consequently, our confidence in AI systems can be hindered by the lack of explainability in these black-box models. The XAI becomes more and more crucial for deep learning powered applications, especially for medical and healthcare studies, although in general these deep neural networks can return an arresting dividend in performance. The insufficient explainability and transparency in most existing AI systems can be one of the major reasons that successful implementation and integration of AI tools into routine clinical practice are uncommon. In this study, we first surveyed the current progress of XAI and in particular its advances in healthcare applications. We then introduced our solutions for XAI leveraging multi-modal and multi-centre data fusion, and subsequently validated in two showcases following real clinical scenarios. Comprehensive quantitative and qualitative analyses can prove the efficacy of our proposed XAI solutions, from which we can envisage successful applications in a broader range of clinical questions.},\n bibtype = {article},\n author = {Yang, Guang and Ye, Qinghao and Xia, Jun},\n doi = {10.1016/j.inffus.2021.07.016},\n journal = {Information Fusion}\n}
\n
\n\n\n
\n Explainable Artificial Intelligence (XAI) is an emerging research topic of machine learning aimed at unboxing how AI systems’ black-box choices are made. This research field inspects the measures and models involved in decision-making and seeks solutions to explain them explicitly. Many of the machine learning algorithms cannot manifest how and why a decision has been cast. This is particularly true of the most popular deep neural network approaches currently in use. Consequently, our confidence in AI systems can be hindered by the lack of explainability in these black-box models. The XAI becomes more and more crucial for deep learning powered applications, especially for medical and healthcare studies, although in general these deep neural networks can return an arresting dividend in performance. The insufficient explainability and transparency in most existing AI systems can be one of the major reasons that successful implementation and integration of AI tools into routine clinical practice are uncommon. In this study, we first surveyed the current progress of XAI and in particular its advances in healthcare applications. We then introduced our solutions for XAI leveraging multi-modal and multi-centre data fusion, and subsequently validated in two showcases following real clinical scenarios. Comprehensive quantitative and qualitative analyses can prove the efficacy of our proposed XAI solutions, from which we can envisage successful applications in a broader range of clinical questions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n JAS-GAN: Generative Adversarial Network Based Joint Atrium and Scar Segmentations on Unbalanced Atrial Targets.\n \n \n \n \n\n\n \n Chen, J.; Yang, G.; Khan, H.; Zhang, H.; Zhang, Y.; Zhao, S.; Mohiaddin, R.; Wong, T.; Firmin, D.; and Keegan, J.\n\n\n \n\n\n\n IEEE Journal of Biomedical and Health Informatics, 26(1): 103-114. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"JAS-GAN:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {JAS-GAN: Generative Adversarial Network Based Joint Atrium and Scar Segmentations on Unbalanced Atrial Targets},\n type = {article},\n year = {2022},\n keywords = {Adaptive Cascade,Adversarial Regularization,Medical Image Segmentation,Unbalanced Atrial Targets},\n pages = {103-114},\n volume = {26},\n id = {c22d199c-61c3-3971-be9b-14c9215ff29b},\n created = {2024-01-13T06:15:55.163Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.329Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Automated and accurate segmentations of left atrium (LA) and atrial scars from late gadolinium-enhanced cardiac magnetic resonance (LGE CMR) images are in high demand for quantifying atrial scars. The previous quantification of atrial scars relies on a two-phase segmentation for LA and atrial scars due to their large volume difference (unbalanced atrial targets). In this paper, we propose an inter-cascade generative adversarial network, namely JAS-GAN, to segment the unbalanced atrial targets from LGE CMR images automatically and accurately in an end-to-end way. Firstly, JAS-GAN investigates an adaptive attention cascade to automatically correlate the segmentation tasks of the unbalanced atrial targets. The adaptive attention cascade mainly models the inclusion relationship of the two unbalanced atrial targets, where the estimated LA acts as the attention map to adaptively focus on the small atrial scars roughly. Then, an adversarial regularization is applied to the segmentation tasks of the unbalanced atrial targets for making a consistent optimization. It mainly forces the estimated joint distribution of LA and atrial scars to match the real ones. We evaluated the performance of our JAS-GAN on a 3D LGE CMR dataset with 192 scans. Compared with the state-of-the-art methods, our proposed approach yielded better segmentation performance (Average Dice Similarity Coefficient (DSC) values of 0.946 and 0.821 for LA and atrial scars, respectively), which indicated the effectiveness of our proposed approach for segmenting unbalanced atrial targets.},\n bibtype = {article},\n author = {Chen, Jun and Yang, Guang and Khan, Habib and Zhang, Heye and Zhang, Yanping and Zhao, Shu and Mohiaddin, Raad and Wong, Tom and Firmin, David and Keegan, Jennifer},\n doi = {10.1109/JBHI.2021.3077469},\n journal = {IEEE Journal of Biomedical and Health Informatics},\n number = {1}\n}
\n
\n\n\n
\n Automated and accurate segmentations of left atrium (LA) and atrial scars from late gadolinium-enhanced cardiac magnetic resonance (LGE CMR) images are in high demand for quantifying atrial scars. The previous quantification of atrial scars relies on a two-phase segmentation for LA and atrial scars due to their large volume difference (unbalanced atrial targets). In this paper, we propose an inter-cascade generative adversarial network, namely JAS-GAN, to segment the unbalanced atrial targets from LGE CMR images automatically and accurately in an end-to-end way. Firstly, JAS-GAN investigates an adaptive attention cascade to automatically correlate the segmentation tasks of the unbalanced atrial targets. The adaptive attention cascade mainly models the inclusion relationship of the two unbalanced atrial targets, where the estimated LA acts as the attention map to adaptively focus on the small atrial scars roughly. Then, an adversarial regularization is applied to the segmentation tasks of the unbalanced atrial targets for making a consistent optimization. It mainly forces the estimated joint distribution of LA and atrial scars to match the real ones. We evaluated the performance of our JAS-GAN on a 3D LGE CMR dataset with 192 scans. Compared with the state-of-the-art methods, our proposed approach yielded better segmentation performance (Average Dice Similarity Coefficient (DSC) values of 0.946 and 0.821 for LA and atrial scars, respectively), which indicated the effectiveness of our proposed approach for segmenting unbalanced atrial targets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Focal Attention Networks: Optimising Attention for Biomedical Image Segmentation.\n \n \n \n \n\n\n \n Yeung, M.; Rundo, L.; Sala, E.; Schonlieb, C.; and Yang, G.\n\n\n \n\n\n\n In 2022 IEEE 19th International Symposium on Biomedical Imaging (ISBI), pages 1-5, 3 2022. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"FocalPaper\n  \n \n \n \"FocalWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Focal Attention Networks: Optimising Attention for Biomedical Image Segmentation},\n type = {inproceedings},\n year = {2022},\n keywords = {biomedical imaging,chine learning,cost function,image segmentation,ma-},\n pages = {1-5},\n websites = {https://ieeexplore.ieee.org/document/9761414/},\n month = {3},\n publisher = {IEEE},\n day = {28},\n id = {254fc5ab-5b30-33ac-acb7-e0abea26fa93},\n created = {2024-01-13T07:02:55.680Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:18.641Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yeung, Michael and Rundo, Leonardo and Sala, Evis and Schonlieb, Carola-Bibiane and Yang, Guang},\n doi = {10.1109/ISBI52829.2022.9761414},\n booktitle = {2022 IEEE 19th International Symposium on Biomedical Imaging (ISBI)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Accelerating Cardiac Diffusion Tensor Imaging With a U-Net Based Model: Toward Single Breath-Hold.\n \n \n \n \n\n\n \n Ferreira, P., F.; Banerjee, A.; Scott, A., D.; Khalique, Z.; Yang, G.; Rajakulasingam, R.; Dwornik, M.; De Silva, R.; Pennell, D., J.; Firmin, D., N.; and Nielles-Vallespin, S.\n\n\n \n\n\n\n Journal of Magnetic Resonance Imaging, 56(6): 1691-1704. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"AcceleratingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Accelerating Cardiac Diffusion Tensor Imaging With a U-Net Based Model: Toward Single Breath-Hold},\n type = {article},\n year = {2022},\n keywords = {CNN,U-Net,cardiac,deep learning,diffusion tensor imaging},\n pages = {1691-1704},\n volume = {56},\n id = {142db2f8-6eae-36fb-bc62-4ebe20624f7a},\n created = {2024-01-13T07:02:55.730Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:17:23.777Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Background: In vivo cardiac diffusion tensor imaging (cDTI) characterizes myocardial microstructure. Despite its potential clinical impact, considerable technical challenges exist due to the inherent low signal-to-noise ratio. Purpose: To reduce scan time toward one breath-hold by reconstructing diffusion tensors for in vivo cDTI with a fitting-free deep learning approach. Study type: Retrospective. Population: A total of 197 healthy controls, 547 cardiac patients. Field strength/sequence: A 3 T, diffusion-weighted stimulated echo acquisition mode single-shot echo-planar imaging sequence. Assessment: A U-Net was trained to reconstruct the diffusion tensor elements of the reference results from reduced datasets that could be acquired in 5, 3 or 1 breath-hold(s) (BH) per slice. Fractional anisotropy (FA), mean diffusivity (MD), helix angle (HA), and sheetlet angle (E2A) were calculated and compared to the same measures when using a conventional linear-least-square (LLS) tensor fit with the same reduced datasets. A conventional LLS tensor fit with all available data (12 ± 2.0 [mean ± sd] breath-holds) was used as the reference baseline. Statistical tests: Wilcoxon signed rank/rank sum and Kruskal–Wallis tests. Statistical significance threshold was set at P = 0.05. Intersubject measures are quoted as median [interquartile range]. Results: For global mean or median results, both the LLS and U-Net methods with reduced datasets present a bias for some of the results. For both LLS and U-Net, there is a small but significant difference from the reference results except for LLS: MD 5BH (P = 0.38) and MD 3BH (P = 0.09). When considering direct pixel-wise errors the U-Net model outperformed significantly the LLS tensor fit for reduced datasets that can be acquired in three or just one breath-hold for all parameters. Data conclusion: Diffusion tensor prediction with a trained U-Net is a promising approach to minimize the number of breath-holds needed in clinical cDTI studies. Evidence Level: 4. Technical Efficacy: Stage 1.},\n bibtype = {article},\n author = {Ferreira, Pedro F. and Banerjee, Arjun and Scott, Andrew D. and Khalique, Zohya and Yang, Guang and Rajakulasingam, Ramyah and Dwornik, Maria and De Silva, Ranil and Pennell, Dudley J. and Firmin, David N. and Nielles-Vallespin, Sonia},\n doi = {10.1002/jmri.28199},\n journal = {Journal of Magnetic Resonance Imaging},\n number = {6}\n}
\n
\n\n\n
\n Background: In vivo cardiac diffusion tensor imaging (cDTI) characterizes myocardial microstructure. Despite its potential clinical impact, considerable technical challenges exist due to the inherent low signal-to-noise ratio. Purpose: To reduce scan time toward one breath-hold by reconstructing diffusion tensors for in vivo cDTI with a fitting-free deep learning approach. Study type: Retrospective. Population: A total of 197 healthy controls, 547 cardiac patients. Field strength/sequence: A 3 T, diffusion-weighted stimulated echo acquisition mode single-shot echo-planar imaging sequence. Assessment: A U-Net was trained to reconstruct the diffusion tensor elements of the reference results from reduced datasets that could be acquired in 5, 3 or 1 breath-hold(s) (BH) per slice. Fractional anisotropy (FA), mean diffusivity (MD), helix angle (HA), and sheetlet angle (E2A) were calculated and compared to the same measures when using a conventional linear-least-square (LLS) tensor fit with the same reduced datasets. A conventional LLS tensor fit with all available data (12 ± 2.0 [mean ± sd] breath-holds) was used as the reference baseline. Statistical tests: Wilcoxon signed rank/rank sum and Kruskal–Wallis tests. Statistical significance threshold was set at P = 0.05. Intersubject measures are quoted as median [interquartile range]. Results: For global mean or median results, both the LLS and U-Net methods with reduced datasets present a bias for some of the results. For both LLS and U-Net, there is a small but significant difference from the reference results except for LLS: MD 5BH (P = 0.38) and MD 3BH (P = 0.09). When considering direct pixel-wise errors the U-Net model outperformed significantly the LLS tensor fit for reduced datasets that can be acquired in three or just one breath-hold for all parameters. Data conclusion: Diffusion tensor prediction with a trained U-Net is a promising approach to minimize the number of breath-holds needed in clinical cDTI studies. Evidence Level: 4. Technical Efficacy: Stage 1.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Edge-enhanced dual discriminator generative adversarial network for fast MRI with parallel imaging using multi-view information.\n \n \n \n \n\n\n \n Huang, J.; Ding, W.; Lv, J.; Yang, J.; Dong, H.; Del Ser, J.; Xia, J.; Ren, T.; Wong, S., T.; and Yang, G.\n\n\n \n\n\n\n Applied Intelligence, 52(13): 14693-14710. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Edge-enhancedPaper\n  \n \n \n \"Edge-enhancedWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Edge-enhanced dual discriminator generative adversarial network for fast MRI with parallel imaging using multi-view information},\n type = {article},\n year = {2022},\n keywords = {Edge enhancement,Fast MRI,Generative adversarial networks,Multi-view learning,Parallel imaging},\n pages = {14693-14710},\n volume = {52},\n websites = {https://doi.org/10.1007/s10489-021-03092-w},\n publisher = {Springer US},\n id = {f288e0e4-6f63-34a1-b31c-376bfa2061b3},\n created = {2024-01-13T07:02:55.742Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:18:12.132Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {In clinical medicine, magnetic resonance imaging (MRI) is one of the most important tools for diagnosis, triage, prognosis, and treatment planning. However, MRI suffers from an inherent slow data acquisition process because data is collected sequentially in k-space. In recent years, most MRI reconstruction methods proposed in the literature focus on holistic image reconstruction rather than enhancing the edge information. This work steps aside this general trend by elaborating on the enhancement of edge information. Specifically, we introduce a novel parallel imaging coupled dual discriminator generative adversarial network (PIDD-GAN) for fast multi-channel MRI reconstruction by incorporating multi-view information. The dual discriminator design aims to improve the edge information in MRI reconstruction. One discriminator is used for holistic image reconstruction, whereas the other one is responsible for enhancing edge information. An improved U-Net with local and global residual learning is proposed for the generator. Frequency channel attention blocks (FCA Blocks) are embedded in the generator for incorporating attention mechanisms. Content loss is introduced to train the generator for better reconstruction quality. We performed comprehensive experiments on Calgary-Campinas public brain MR dataset and compared our method with state-of-the-art MRI reconstruction methods. Ablation studies of residual learning were conducted on the MICCAI13 dataset to validate the proposed modules. Results show that our PIDD-GAN provides high-quality reconstructed MR images, with well-preserved edge information. The time of single-image reconstruction is below 5ms, which meets the demand of faster processing.},\n bibtype = {article},\n author = {Huang, Jiahao and Ding, Weiping and Lv, Jun and Yang, Jingwen and Dong, Hao and Del Ser, Javier and Xia, Jun and Ren, Tiaojuan and Wong, Stephen T. and Yang, Guang},\n doi = {10.1007/s10489-021-03092-w},\n journal = {Applied Intelligence},\n number = {13}\n}
\n
\n\n\n
\n In clinical medicine, magnetic resonance imaging (MRI) is one of the most important tools for diagnosis, triage, prognosis, and treatment planning. However, MRI suffers from an inherent slow data acquisition process because data is collected sequentially in k-space. In recent years, most MRI reconstruction methods proposed in the literature focus on holistic image reconstruction rather than enhancing the edge information. This work steps aside this general trend by elaborating on the enhancement of edge information. Specifically, we introduce a novel parallel imaging coupled dual discriminator generative adversarial network (PIDD-GAN) for fast multi-channel MRI reconstruction by incorporating multi-view information. The dual discriminator design aims to improve the edge information in MRI reconstruction. One discriminator is used for holistic image reconstruction, whereas the other one is responsible for enhancing edge information. An improved U-Net with local and global residual learning is proposed for the generator. Frequency channel attention blocks (FCA Blocks) are embedded in the generator for incorporating attention mechanisms. Content loss is introduced to train the generator for better reconstruction quality. We performed comprehensive experiments on Calgary-Campinas public brain MR dataset and compared our method with state-of-the-art MRI reconstruction methods. Ablation studies of residual learning were conducted on the MICCAI13 dataset to validate the proposed modules. Results show that our PIDD-GAN provides high-quality reconstructed MR images, with well-preserved edge information. The time of single-image reconstruction is below 5ms, which meets the demand of faster processing.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n LKAU-Net: 3D Large-Kernel Attention-Based U-Net for Automatic MRI Brain Tumor Segmentation.\n \n \n \n \n\n\n \n Li, H.; Nan, Y.; and Yang, G.\n\n\n \n\n\n\n Volume 3 . Annual Conference on Medical Image Understanding and Analysis 2022, pages 313-327. Springer International Publishing, 2022.\n \n\n\n\n
\n\n\n\n \n \n \"AnnualPaper\n  \n \n \n \"AnnualWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2022},\n keywords = {Attention,Brain tumor segmentation,Deep learning,MRI,brain tumor segmentation},\n pages = {313-327},\n volume = {3},\n websites = {http://dx.doi.org/10.1007/978-3-031-12053-4_24,https://link.springer.com/10.1007/978-3-031-12053-4_24},\n publisher = {Springer International Publishing},\n id = {7eeb3a20-4f1f-3654-b21d-1e54b01154c1},\n created = {2024-01-13T07:02:55.868Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:18.757Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {inbook},\n author = {Li, Hao and Nan, Yang and Yang, Guang},\n doi = {10.1007/978-3-031-12053-4_24},\n chapter = {LKAU-Net: 3D Large-Kernel Attention-Based U-Net for Automatic MRI Brain Tumor Segmentation},\n title = {Annual Conference on Medical Image Understanding and Analysis 2022}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generative Adversarial Network Powered Fast Magnetic Resonance Imaging—Comparative Study and New Perspectives.\n \n \n \n \n\n\n \n Yang, G.; Lv, J.; Chen, Y.; Huang, J.; and Zhu, J.\n\n\n \n\n\n\n Volume 217 . Generative Adversarial Learning: Architectures and Applications, pages 305-339. Springer International Publishing, 2022.\n \n\n\n\n
\n\n\n\n \n \n \"GenerativePaper\n  \n \n \n \"GenerativeWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2022},\n keywords = {Compressive sensing,Deep learning,Fast magnetic resonance imaging (mri),Generative adversarial networks (gan)},\n pages = {305-339},\n volume = {217},\n websites = {http://dx.doi.org/10.1007/978-3-030-91390-8_13,https://link.springer.com/10.1007/978-3-030-91390-8_13},\n publisher = {Springer International Publishing},\n id = {0d6a5884-9d9a-37c9-92e3-6c7d0deb8611},\n created = {2024-01-13T07:02:55.869Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:18.650Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Magnetic Resonance Imaging (MRI) is a vital component of medical imaging. When compared to other image modalities, it has advantages such as the absence of radiation, superior soft tissue contrast, and complementary multiple sequence information. However, one drawback of MRI is its comparatively slow scanning and reconstruction compared to other image modalities, limiting its usage in some clinical applications when imaging time is critical. Traditional compressive sensing based MRI (CS-MRI) reconstruction can speed up MRI acquisition, but suffers from a long iterative process and noise-induced artefacts. Recently, Deep Neural Networks (DNNs) have been used in sparse MRI reconstruction models to recreate relatively high-quality images from heavily undersampled k-space data, allowing for much faster MRI scanning. However, there are still some hurdles to tackle. For example, directly training DNNs based on L1/L2 distance to the target fully sampled images could result in blurry reconstruction because L1/L2 loss can only enforce overall image or patch similarity and does not take into account local information such as anatomical sharpness. It is also hard to preserve fine image details while maintaining a natural appearance. More recently, Generative Adversarial Networks (GAN) based methods are proposed to solve fast MRI with enhanced image perceptual quality. The encoder obtains a latent space for the undersampling image, and the image is reconstructed by the decoder using the GAN loss. In this chapter, we review the GAN powered fast MRI methods with a comparative study on various anatomical datasets to demonstrate the generalisability and robustness of this kind of fast MRI while providing future perspectives.},\n bibtype = {inbook},\n author = {Yang, Guang and Lv, Jun and Chen, Yutong and Huang, Jiahao and Zhu, Jin},\n doi = {10.1007/978-3-030-91390-8_13},\n chapter = {Generative Adversarial Network Powered Fast Magnetic Resonance Imaging—Comparative Study and New Perspectives},\n title = {Generative Adversarial Learning: Architectures and Applications}\n}
\n
\n\n\n
\n Magnetic Resonance Imaging (MRI) is a vital component of medical imaging. When compared to other image modalities, it has advantages such as the absence of radiation, superior soft tissue contrast, and complementary multiple sequence information. However, one drawback of MRI is its comparatively slow scanning and reconstruction compared to other image modalities, limiting its usage in some clinical applications when imaging time is critical. Traditional compressive sensing based MRI (CS-MRI) reconstruction can speed up MRI acquisition, but suffers from a long iterative process and noise-induced artefacts. Recently, Deep Neural Networks (DNNs) have been used in sparse MRI reconstruction models to recreate relatively high-quality images from heavily undersampled k-space data, allowing for much faster MRI scanning. However, there are still some hurdles to tackle. For example, directly training DNNs based on L1/L2 distance to the target fully sampled images could result in blurry reconstruction because L1/L2 loss can only enforce overall image or patch similarity and does not take into account local information such as anatomical sharpness. It is also hard to preserve fine image details while maintaining a natural appearance. More recently, Generative Adversarial Networks (GAN) based methods are proposed to solve fast MRI with enhanced image perceptual quality. The encoder obtains a latent space for the undersampling image, and the image is reconstructed by the decoder using the GAN loss. In this chapter, we review the GAN powered fast MRI methods with a comparative study on various anatomical datasets to demonstrate the generalisability and robustness of this kind of fast MRI while providing future perspectives.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust weakly supervised learning for COVID-19 recognition using multi-center CT images.\n \n \n \n \n\n\n \n Ye, Q.; Gao, Y.; Ding, W.; Niu, Z.; Wang, C.; Jiang, Y.; Wang, M.; Fang, E., F.; Menpes-Smith, W.; Xia, J.; and Yang, G.\n\n\n \n\n\n\n Applied Soft Computing, 116: 108291. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n \n \"RobustWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Robust weakly supervised learning for COVID-19 recognition using multi-center CT images},\n type = {article},\n year = {2022},\n keywords = {COVID-19,Medical image analysis,Multi-domain shift,Multicenter data processing,Weakly supervised learning},\n pages = {108291},\n volume = {116},\n websites = {https://doi.org/10.1016/j.asoc.2021.108291},\n publisher = {Elsevier B.V.},\n id = {392cc1f3-4374-3cf4-8d8c-64bfdd1de492},\n created = {2024-01-13T07:02:55.871Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:16:36.557Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {The world is currently experiencing an ongoing pandemic of an infectious disease named coronavirus disease 2019 (i.e., COVID-19), which is caused by the severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2). Computed Tomography (CT) plays an important role in assessing the severity of the infection and can also be used to identify those symptomatic and asymptomatic COVID-19 carriers. With a surge of the cumulative number of COVID-19 patients, radiologists are increasingly stressed to examine the CT scans manually. Therefore, an automated 3D CT scan recognition tool is highly in demand since the manual analysis is time-consuming for radiologists and their fatigue can cause possible misjudgment. However, due to various technical specifications of CT scanners located in different hospitals, the appearance of CT images can be significantly different leading to the failure of many automated image recognition approaches. The multi-domain shift problem for the multi-center and multi-scanner studies is therefore nontrivial that is also crucial for a dependable recognition and critical for reproducible and objective diagnosis and prognosis. In this paper, we proposed a COVID-19 CT scan recognition model namely coronavirus information fusion and diagnosis network (CIFD-Net) that can efficiently handle the multi-domain shift problem via a new robust weakly supervised learning paradigm. Our model can resolve the problem of different appearance in CT scan images reliably and efficiently while attaining higher accuracy compared to other state-of-the-art methods.},\n bibtype = {article},\n author = {Ye, Qinghao and Gao, Yuan and Ding, Weiping and Niu, Zhangming and Wang, Chengjia and Jiang, Yinghui and Wang, Minhao and Fang, Evandro Fei and Menpes-Smith, Wade and Xia, Jun and Yang, Guang},\n doi = {10.1016/j.asoc.2021.108291},\n journal = {Applied Soft Computing}\n}
\n
\n\n\n
\n The world is currently experiencing an ongoing pandemic of an infectious disease named coronavirus disease 2019 (i.e., COVID-19), which is caused by the severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2). Computed Tomography (CT) plays an important role in assessing the severity of the infection and can also be used to identify those symptomatic and asymptomatic COVID-19 carriers. With a surge of the cumulative number of COVID-19 patients, radiologists are increasingly stressed to examine the CT scans manually. Therefore, an automated 3D CT scan recognition tool is highly in demand since the manual analysis is time-consuming for radiologists and their fatigue can cause possible misjudgment. However, due to various technical specifications of CT scanners located in different hospitals, the appearance of CT images can be significantly different leading to the failure of many automated image recognition approaches. The multi-domain shift problem for the multi-center and multi-scanner studies is therefore nontrivial that is also crucial for a dependable recognition and critical for reproducible and objective diagnosis and prognosis. In this paper, we proposed a COVID-19 CT scan recognition model namely coronavirus information fusion and diagnosis network (CIFD-Net) that can efficiently handle the multi-domain shift problem via a new robust weakly supervised learning paradigm. Our model can resolve the problem of different appearance in CT scan images reliably and efficiently while attaining higher accuracy compared to other state-of-the-art methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Faster Diffusion Cardiac MRI with Deep Learning-Based Breath Hold Reduction.\n \n \n \n \n\n\n \n Tänzer, M.; Ferreira, P.; Scott, A.; Khalique, Z.; Dwornik, M.; Pennell, D.; Yang, G.; Rueckert, D.; and Nielles-Vallespin, S.\n\n\n \n\n\n\n Volume 3 . Medical Image Understanding and Analysis 2022, pages 101-115. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"MedicalPaper\n  \n \n \n \"MedicalWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2022},\n keywords = {Attention,Brain tumor segmentation,Deep learning,MRI,brain tumor segmentation},\n pages = {101-115},\n volume = {3},\n websites = {http://dx.doi.org/10.1007/978-3-031-12053-4_24,https://link.springer.com/10.1007/978-3-031-12053-4_8},\n id = {121f0f19-d99e-384d-bd1e-664542794bf8},\n created = {2024-01-13T07:02:55.975Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T11:51:49.849Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {inbook},\n author = {Tänzer, Michael and Ferreira, Pedro and Scott, Andrew and Khalique, Zohya and Dwornik, Maria and Pennell, Dudley and Yang, Guang and Rueckert, Daniel and Nielles-Vallespin, Sonia},\n doi = {10.1007/978-3-031-12053-4_8},\n chapter = {Faster Diffusion Cardiac MRI with Deep Learning-Based Breath Hold Reduction},\n title = {Medical Image Understanding and Analysis 2022}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n AI-Based Reconstruction for Fast MRI-A Systematic Review and Meta-Analysis.\n \n \n \n \n\n\n \n Chen, Y.; Schonlieb, C., B.; Lio, P.; Leiner, T.; Dragotti, P., L.; Wang, G.; Rueckert, D.; Firmin, D.; and Yang, G.\n\n\n \n\n\n\n Proceedings of the IEEE, 110(2): 224-245. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"AI-BasedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {AI-Based Reconstruction for Fast MRI-A Systematic Review and Meta-Analysis},\n type = {article},\n year = {2022},\n keywords = {Compressed sensing (CS),Deep learning,Magnetic resonance imaging (MRI),Neural network},\n pages = {224-245},\n volume = {110},\n id = {48d4ffaa-f81b-3b70-8753-364ce6427097},\n created = {2024-01-13T07:02:56.131Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:17:06.056Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Compressed sensing (CS) has been playing a key role in accelerating the magnetic resonance imaging (MRI) acquisition process. With the resurgence of artificial intelligence, deep neural networks and CS algorithms are being integrated to redefine the state of the art of fast MRI. The past several years have witnessed substantial growth in the complexity, diversity, and performance of deep-learning-based CS techniques that are dedicated to fast MRI. In this meta-analysis, we systematically review the deep-learning-based CS techniques for fast MRI, describe key model designs, highlight breakthroughs, and discuss promising directions. We have also introduced a comprehensive analysis framework and a classification system to assess the pivotal role of deep learning in CS-based acceleration for MRI.},\n bibtype = {article},\n author = {Chen, Yutong and Schonlieb, Carola Bibiane and Lio, Pietro and Leiner, Tim and Dragotti, Pier Luigi and Wang, Ge and Rueckert, Daniel and Firmin, David and Yang, Guang},\n doi = {10.1109/JPROC.2022.3141367},\n journal = {Proceedings of the IEEE},\n number = {2}\n}
\n
\n\n\n
\n Compressed sensing (CS) has been playing a key role in accelerating the magnetic resonance imaging (MRI) acquisition process. With the resurgence of artificial intelligence, deep neural networks and CS algorithms are being integrated to redefine the state of the art of fast MRI. The past several years have witnessed substantial growth in the complexity, diversity, and performance of deep-learning-based CS techniques that are dedicated to fast MRI. In this meta-analysis, we systematically review the deep-learning-based CS techniques for fast MRI, describe key model designs, highlight breakthroughs, and discuss promising directions. We have also introduced a comprehensive analysis framework and a classification system to assess the pivotal role of deep learning in CS-based acceleration for MRI.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Explainable AI (XAI) In Biomedical Signal and Image Processing: Promises and Challenges.\n \n \n \n \n\n\n \n Yang, G.; Rao, A.; Fernandez-Maloigne, C.; Calhoun, V.; and Menegaz, G.\n\n\n \n\n\n\n In 2022 IEEE International Conference on Image Processing (ICIP), pages 1531-1535, 10 2022. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"ExplainablePaper\n  \n \n \n \"ExplainableWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Explainable AI (XAI) In Biomedical Signal and Image Processing: Promises and Challenges},\n type = {inproceedings},\n year = {2022},\n pages = {1531-1535},\n issue = {2018},\n websites = {https://ieeexplore.ieee.org/document/9897629/},\n month = {10},\n publisher = {IEEE},\n day = {16},\n id = {9a7306cb-ec87-36cd-8d10-d0e5508f89b6},\n created = {2024-01-13T07:02:56.150Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:18.795Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yang, Guang and Rao, Arvind and Fernandez-Maloigne, Christine and Calhoun, Vince and Menegaz, Gloria},\n doi = {10.1109/ICIP46576.2022.9897629},\n booktitle = {2022 IEEE International Conference on Image Processing (ICIP)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 3D AGSE-VNet: an automatic brain tumor MRI data segmentation framework.\n \n \n \n \n\n\n \n Guan, X.; Yang, G.; Ye, J.; Yang, W.; Xu, X.; Jiang, W.; and Lai, X.\n\n\n \n\n\n\n BMC Medical Imaging, 22(1): 1-18. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"3DPaper\n  \n \n \n \"3DWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {3D AGSE-VNet: an automatic brain tumor MRI data segmentation framework},\n type = {article},\n year = {2022},\n keywords = {Automatic segmentation,Brain tumor,Deep learning,Magnetic resonance imaging,VNet},\n pages = {1-18},\n volume = {22},\n websites = {https://doi.org/10.1186/s12880-021-00728-8},\n publisher = {BioMed Central},\n id = {e32905d3-9f70-327d-9e44-2e8cbc6e64cd},\n created = {2024-01-13T07:02:56.588Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:18:23.493Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Background: Glioma is the most common brain malignant tumor, with a high morbidity rate and a mortality rate of more than three percent, which seriously endangers human health. The main method of acquiring brain tumors in the clinic is MRI. Segmentation of brain tumor regions from multi-modal MRI scan images is helpful for treatment inspection, post-diagnosis monitoring, and effect evaluation of patients. However, the common operation in clinical brain tumor segmentation is still manual segmentation, lead to its time-consuming and large performance difference between different operators, a consistent and accurate automatic segmentation method is urgently needed. With the continuous development of deep learning, researchers have designed many automatic segmentation algorithms; however, there are still some problems: (1) The research of segmentation algorithm mostly stays on the 2D plane, this will reduce the accuracy of 3D image feature extraction to a certain extent. (2) MRI images have gray-scale offset fields that make it difficult to divide the contours accurately. Methods: To meet the above challenges, we propose an automatic brain tumor MRI data segmentation framework which is called AGSE-VNet. In our study, the Squeeze and Excite (SE) module is added to each encoder, the Attention Guide Filter (AG) module is added to each decoder, using the channel relationship to automatically enhance the useful information in the channel to suppress the useless information, and use the attention mechanism to guide the edge information and remove the influence of irrelevant information such as noise. Results: We used the BraTS2020 challenge online verification tool to evaluate our approach. The focus of verification is that the Dice scores of the whole tumor, tumor core and enhanced tumor are 0.68, 0.85 and 0.70, respectively. Conclusion: Although MRI images have different intensities, AGSE-VNet is not affected by the size of the tumor, and can more accurately extract the features of the three regions, it has achieved impressive results and made outstanding contributions to the clinical diagnosis and treatment of brain tumor patients.},\n bibtype = {article},\n author = {Guan, Xi and Yang, Guang and Ye, Jianming and Yang, Weiji and Xu, Xiaomei and Jiang, Weiwei and Lai, Xiaobo},\n doi = {10.1186/s12880-021-00728-8},\n journal = {BMC Medical Imaging},\n number = {1}\n}
\n
\n\n\n
\n Background: Glioma is the most common brain malignant tumor, with a high morbidity rate and a mortality rate of more than three percent, which seriously endangers human health. The main method of acquiring brain tumors in the clinic is MRI. Segmentation of brain tumor regions from multi-modal MRI scan images is helpful for treatment inspection, post-diagnosis monitoring, and effect evaluation of patients. However, the common operation in clinical brain tumor segmentation is still manual segmentation, lead to its time-consuming and large performance difference between different operators, a consistent and accurate automatic segmentation method is urgently needed. With the continuous development of deep learning, researchers have designed many automatic segmentation algorithms; however, there are still some problems: (1) The research of segmentation algorithm mostly stays on the 2D plane, this will reduce the accuracy of 3D image feature extraction to a certain extent. (2) MRI images have gray-scale offset fields that make it difficult to divide the contours accurately. Methods: To meet the above challenges, we propose an automatic brain tumor MRI data segmentation framework which is called AGSE-VNet. In our study, the Squeeze and Excite (SE) module is added to each encoder, the Attention Guide Filter (AG) module is added to each decoder, using the channel relationship to automatically enhance the useful information in the channel to suppress the useless information, and use the attention mechanism to guide the edge information and remove the influence of irrelevant information such as noise. Results: We used the BraTS2020 challenge online verification tool to evaluate our approach. The focus of verification is that the Dice scores of the whole tumor, tumor core and enhanced tumor are 0.68, 0.85 and 0.70, respectively. Conclusion: Although MRI images have different intensities, AGSE-VNet is not affected by the size of the tumor, and can more accurately extract the features of the three regions, it has achieved impressive results and made outstanding contributions to the clinical diagnosis and treatment of brain tumor patients.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unsupervised Image Registration towards Enhancing Performance and Explainability in Cardiac and Brain Image Analysis.\n \n \n \n \n\n\n \n Wang, C.; Yang, G.; and Papanastasiou, G.\n\n\n \n\n\n\n Sensors, 22(6): 2125. 3 2022.\n \n\n\n\n
\n\n\n\n \n \n \"UnsupervisedPaper\n  \n \n \n \"UnsupervisedWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Unsupervised Image Registration towards Enhancing Performance and Explainability in Cardiac and Brain Image Analysis},\n type = {article},\n year = {2022},\n keywords = {deep learning,explainable deep learning,inverse-consistency,multi-modality image registration,unsupervised image registration},\n pages = {2125},\n volume = {22},\n websites = {https://www.mdpi.com/1424-8220/22/6/2125},\n month = {3},\n day = {9},\n id = {9558b6ce-d2b1-3c7f-957a-10819f0330df},\n created = {2024-01-13T07:02:56.653Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:19.153Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Magnetic Resonance Imaging (MRI) typically recruits multiple sequences (defined here as “modalities”). As each modality is designed to offer different anatomical and functional clinical information, there are evident disparities in the imaging content across modalities. Inter- and intra-modality affine and non-rigid image registration is an essential medical image analysis process in clinical imaging, as for example before imaging biomarkers need to be derived and clinically evaluated across different MRI modalities, time phases and slices. Although commonly needed in real clinical scenarios, affine and non-rigid image registration is not extensively investigated using a single unsupervised model architecture. In our work, we present an unsupervised deep learning registration methodology that can accurately model affine and non-rigid transformations, simultaneously. Moreover, inverse-consistency is a fundamental inter-modality registration property that is not considered in deep learning registration algorithms. To address inverse consistency, our methodology performs bi-directional cross-modality image synthesis to learn modality-invariant latent representations, and involves two factorised transformation networks (one per each encoder-decoder channel) and an inverse-consistency loss to learn topology-preserving anatomical transformations. Overall, our model (named “FIRE”) shows improved performances against the reference standard baseline method (i.e., Symmetric Normalization implemented using the ANTs toolbox) on multi-modality brain 2D and 3D MRI and intra-modality cardiac 4D MRI data experiments. We focus on explaining model-data components to enhance model explainability in medical image registration. On computational time experiments, we show that the FIRE model performs on a memory-saving mode, as it can inherently learn topology-preserving image registration directly in the training phase. We therefore demonstrate an efficient and versatile registration technique that can have merit in multi-modal image registrations in the clinical setting.},\n bibtype = {article},\n author = {Wang, Chengjia and Yang, Guang and Papanastasiou, Giorgos},\n doi = {10.3390/s22062125},\n journal = {Sensors},\n number = {6}\n}
\n
\n\n\n
\n Magnetic Resonance Imaging (MRI) typically recruits multiple sequences (defined here as “modalities”). As each modality is designed to offer different anatomical and functional clinical information, there are evident disparities in the imaging content across modalities. Inter- and intra-modality affine and non-rigid image registration is an essential medical image analysis process in clinical imaging, as for example before imaging biomarkers need to be derived and clinically evaluated across different MRI modalities, time phases and slices. Although commonly needed in real clinical scenarios, affine and non-rigid image registration is not extensively investigated using a single unsupervised model architecture. In our work, we present an unsupervised deep learning registration methodology that can accurately model affine and non-rigid transformations, simultaneously. Moreover, inverse-consistency is a fundamental inter-modality registration property that is not considered in deep learning registration algorithms. To address inverse consistency, our methodology performs bi-directional cross-modality image synthesis to learn modality-invariant latent representations, and involves two factorised transformation networks (one per each encoder-decoder channel) and an inverse-consistency loss to learn topology-preserving anatomical transformations. Overall, our model (named “FIRE”) shows improved performances against the reference standard baseline method (i.e., Symmetric Normalization implemented using the ANTs toolbox) on multi-modality brain 2D and 3D MRI and intra-modality cardiac 4D MRI data experiments. We focus on explaining model-data components to enhance model explainability in medical image registration. On computational time experiments, we show that the FIRE model performs on a memory-saving mode, as it can inherently learn topology-preserving image registration directly in the training phase. We therefore demonstrate an efficient and versatile registration technique that can have merit in multi-modal image registrations in the clinical setting.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Position of the AI for Health Imaging (AI4HI) network on metadata models for imaging biobanks.\n \n \n \n \n\n\n \n Kondylakis, H.; Ciarrocchi, E.; Cerda-Alberich, L.; Chouvarda, I.; Fromont, L., A.; Garcia-Aznar, J., M.; Kalokyri, V.; Kosvyra, A.; Walker, D.; Yang, G.; and Neri, E.\n\n\n \n\n\n\n European Radiology Experimental, 6(1): 1-15. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"PositionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Position of the AI for Health Imaging (AI4HI) network on metadata models for imaging biobanks},\n type = {article},\n year = {2022},\n keywords = {Artificial intelligence,Diagnostic imaging,Metadata,Radiation therapy,Radiomics},\n pages = {1-15},\n volume = {6},\n publisher = {European Radiology Experimental},\n id = {4a2bf0d2-1a1c-3e1b-a379-cab39673ae2a},\n created = {2024-01-13T07:02:56.659Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:19:00.870Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {A huge amount of imaging data is becoming available worldwide and an incredible range of possible improvements can be provided by artificial intelligence algorithms in clinical care for diagnosis and decision support. In this context, it has become essential to properly manage and handle these medical images and to define which metadata have to be considered, in order for the images to provide their full potential. Metadata are additional data associated with the images, which provide a complete description of the image acquisition, curation, analysis, and of the relevant clinical variables associated with the images. Currently, several data models are available to describe one or more subcategories of metadata, but a unique, common, and standard data model capable of fully representing the heterogeneity of medical metadata has not been yet developed. This paper reports the state of the art on metadata models for medical imaging, the current limitations and further developments, and describes the strategy adopted by the Horizon 2020 “AI for Health Imaging” projects, which are all dedicated to the creation of imaging biobanks.},\n bibtype = {article},\n author = {Kondylakis, Haridimos and Ciarrocchi, Esther and Cerda-Alberich, Leonor and Chouvarda, Ioanna and Fromont, Lauren A. and Garcia-Aznar, Jose Manuel and Kalokyri, Varvara and Kosvyra, Alexandra and Walker, Dawn and Yang, Guang and Neri, Emanuele},\n doi = {10.1186/s41747-022-00281-1},\n journal = {European Radiology Experimental},\n number = {1}\n}
\n
\n\n\n
\n A huge amount of imaging data is becoming available worldwide and an incredible range of possible improvements can be provided by artificial intelligence algorithms in clinical care for diagnosis and decision support. In this context, it has become essential to properly manage and handle these medical images and to define which metadata have to be considered, in order for the images to provide their full potential. Metadata are additional data associated with the images, which provide a complete description of the image acquisition, curation, analysis, and of the relevant clinical variables associated with the images. Currently, several data models are available to describe one or more subcategories of metadata, but a unique, common, and standard data model capable of fully representing the heterogeneity of medical metadata has not been yet developed. This paper reports the state of the art on metadata models for medical imaging, the current limitations and further developments, and describes the strategy adopted by the Horizon 2020 “AI for Health Imaging” projects, which are all dedicated to the creation of imaging biobanks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep Learning-enabled Prostate Segmentation: Large Cohort Evaluation with Inter-Reader Variability Analysis.\n \n \n \n \n\n\n \n Liu, Y.; Qi, M.; Surawech, C.; Zheng, H.; Nguyen, D.; Yang, G.; Raman, S.; and Sung, K.\n\n\n \n\n\n\n In International Society for Magnetic Resonance in Medicine, pages 1-3, 2022. \n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Deep Learning-enabled Prostate Segmentation: Large Cohort Evaluation with Inter-Reader Variability Analysis},\n type = {inproceedings},\n year = {2022},\n pages = {1-3},\n id = {b0f6a842-2c8d-3012-bc51-e77b73a50acc},\n created = {2024-01-13T07:02:56.735Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:52:47.547Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Liu, Yongkai and Qi, Miao and Surawech, Chuthaporn and Zheng, Haoxin and Nguyen, Dan and Yang, Guang and Raman, Steven and Sung, Kyung},\n booktitle = {International Society for Magnetic Resonance in Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Review of Data Types and Model Dimensionality for Cardiac DTI SMS-Related Artefact Removal.\n \n \n \n \n\n\n \n Tänzer, M.; Yook, S., H.; Ferreira, P.; Yang, G.; Rueckert, D.; and Nielles-Vallespin, S.\n\n\n \n\n\n\n Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 13593 LNCS: 123-132. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"ReviewPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Review of Data Types and Model Dimensionality for Cardiac DTI SMS-Related Artefact Removal},\n type = {article},\n year = {2022},\n keywords = {Cardiac MRI,Deep learning,Diffusion tensor imaging,MRI},\n pages = {123-132},\n volume = {13593 LNCS},\n id = {77dfa9c5-cf58-3435-80c0-fa9cbf556137},\n created = {2024-01-13T07:02:56.740Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:19:39.841Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {As diffusion tensor imaging (DTI) gains popularity in cardiac imaging due to its unique ability to non-invasively assess the cardiac microstructure, deep learning-based Artificial Intelligence is becoming a crucial tool in mitigating some of its drawbacks, such as the long scan times. As it often happens in fast-paced research environments, a lot of emphasis has been put on showing the capability of deep learning while often not enough time has been spent investigating what input and architectural properties would benefit cardiac DTI acceleration the most. In this work, we compare the effect of several input types (magnitude images vs complex images), multiple dimensionalities (2D vs 3D operations), and multiple input types (single slice vs multi-slice) on the performance of a model trained to remove artefacts caused by a simultaneous multi-slice (SMS) acquisition. Despite our initial intuition, our experiments show that, for a fixed number of parameters, simpler 2D real-valued models outperform their more advanced 3D or complex counterparts. The best performance is although obtained by a real-valued model trained using both the magnitude and phase components of the acquired data. We believe this behaviour to be due to real-valued models making better use of the lower number of parameters, and to 3D models not being able to exploit the spatial information because of the low SMS acceleration factor used in our experiments.},\n bibtype = {article},\n author = {Tänzer, Michael and Yook, Sea Hee and Ferreira, Pedro and Yang, Guang and Rueckert, Daniel and Nielles-Vallespin, Sonia},\n doi = {10.1007/978-3-031-23443-9_12},\n journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n As diffusion tensor imaging (DTI) gains popularity in cardiac imaging due to its unique ability to non-invasively assess the cardiac microstructure, deep learning-based Artificial Intelligence is becoming a crucial tool in mitigating some of its drawbacks, such as the long scan times. As it often happens in fast-paced research environments, a lot of emphasis has been put on showing the capability of deep learning while often not enough time has been spent investigating what input and architectural properties would benefit cardiac DTI acceleration the most. In this work, we compare the effect of several input types (magnitude images vs complex images), multiple dimensionalities (2D vs 3D operations), and multiple input types (single slice vs multi-slice) on the performance of a model trained to remove artefacts caused by a simultaneous multi-slice (SMS) acquisition. Despite our initial intuition, our experiments show that, for a fixed number of parameters, simpler 2D real-valued models outperform their more advanced 3D or complex counterparts. The best performance is although obtained by a real-valued model trained using both the magnitude and phase components of the acquired data. We believe this behaviour to be due to real-valued models making better use of the lower number of parameters, and to 3D models not being able to exploit the spatial information because of the low SMS acceleration factor used in our experiments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unsupervised Tissue Segmentation via Deep Constrained Gaussian Network.\n \n \n \n \n\n\n \n Nan, Y.; Tang, P.; Zhang, G.; Zeng, C.; Liu, Z.; Gao, Z.; Zhang, H.; and Yang, G.\n\n\n \n\n\n\n IEEE Transactions on Medical Imaging, 41(12): 3799-3811. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"UnsupervisedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Unsupervised Tissue Segmentation via Deep Constrained Gaussian Network},\n type = {article},\n year = {2022},\n keywords = {Semantic segmentation,deep mixture models,tissue segmentation,unsupervised learning,unsupervised segmentation},\n pages = {3799-3811},\n volume = {41},\n publisher = {IEEE},\n id = {147d4461-b783-362f-9c3c-49172decb343},\n created = {2024-01-13T08:14:13.839Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:15:52.681Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n source_type = {JOUR},\n private_publication = {false},\n abstract = {Tissue segmentation is the mainstay of pathological examination, whereas the manual delineation is unduly burdensome. To assist this time-consuming and subjective manual step, researchers have devised methods to automatically segment structures in pathological images. Recently, automated machine and deep learning based methods dominate tissue segmentation research studies. However, most machine and deep learning based approaches are supervised and developed using a large number of training samples, in which the pixel-wise annotations are expensive and sometimes can be impossible to obtain. This paper introduces a novel unsupervised learning paradigm by integrating an end-to-end deep mixture model with a constrained indicator to acquire accurate semantic tissue segmentation. This constraint aims to centralise the components of deep mixture models during the calculation of the optimisation function. In so doing, the redundant or empty class issues, which are common in current unsupervised learning methods, can be greatly reduced. By validation on both public and in-house datasets, the proposed deep constrained Gaussian network achieves significantly (Wilcoxon signed-rank test) better performance (with the average Dice scores of 0.737 and 0.735, respectively) on tissue segmentation with improved stability and robustness, compared to other existing unsupervised segmentation approaches. Furthermore, the proposed method presents a similar performance (p-value >0.05) compared to the fully supervised U-Net.},\n bibtype = {article},\n author = {Nan, Yang and Tang, Peng and Zhang, Guyue and Zeng, Caihong and Liu, Zhihong and Gao, Zhifan and Zhang, Heye and Yang, Guang},\n doi = {10.1109/TMI.2022.3195123},\n journal = {IEEE Transactions on Medical Imaging},\n number = {12}\n}
\n
\n\n\n
\n Tissue segmentation is the mainstay of pathological examination, whereas the manual delineation is unduly burdensome. To assist this time-consuming and subjective manual step, researchers have devised methods to automatically segment structures in pathological images. Recently, automated machine and deep learning based methods dominate tissue segmentation research studies. However, most machine and deep learning based approaches are supervised and developed using a large number of training samples, in which the pixel-wise annotations are expensive and sometimes can be impossible to obtain. This paper introduces a novel unsupervised learning paradigm by integrating an end-to-end deep mixture model with a constrained indicator to acquire accurate semantic tissue segmentation. This constraint aims to centralise the components of deep mixture models during the calculation of the optimisation function. In so doing, the redundant or empty class issues, which are common in current unsupervised learning methods, can be greatly reduced. By validation on both public and in-house datasets, the proposed deep constrained Gaussian network achieves significantly (Wilcoxon signed-rank test) better performance (with the average Dice scores of 0.737 and 0.735, respectively) on tissue segmentation with improved stability and robustness, compared to other existing unsupervised segmentation approaches. Furthermore, the proposed method presents a similar performance (p-value >0.05) compared to the fully supervised U-Net.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data Harmonisation for Information Fusion in Digital Healthcare: A State-of-the-Art Systematic Review, Meta-Analysis and Future Research Directions.\n \n \n \n \n\n\n \n Nan, Y.; Del Ser, J.; Walsh, S., S.; Schönlieb, C.; Roberts, M.; Selby, I.; Howard, K.; Owen, J.; Neville, J.; Guiot, J.; Ernst, B.; Pastor, A.; Alberich-Bayarri, A.; Menzel, M., I.; Walsh, S., S.; Vos, W.; Flerin, N.; Charbonnier, J., P.; van Rikxoort, E.; Chatterjee, A.; Woodruff, H.; Lambin, P.; Cerdá-Alberich, L.; Martí-Bonmatí, L.; Herrera, F.; and Yang, G.\n\n\n \n\n\n\n Information Fusion, 82(December 2021): 99-122. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"DataPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Data Harmonisation for Information Fusion in Digital Healthcare: A State-of-the-Art Systematic Review, Meta-Analysis and Future Research Directions},\n type = {article},\n year = {2022},\n keywords = {Information fusion,data harmonisation,data standardisation,domain adaptation,reproducibility},\n pages = {99-122},\n volume = {82},\n id = {4fc9dbe6-cee1-37a8-b28f-9dc3b7d7876a},\n created = {2024-01-13T08:14:14.103Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:18:33.158Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n source_type = {JOUR},\n private_publication = {false},\n abstract = {Removing the bias and variance of multicentre data has always been a challenge in large scale digital healthcare studies, which requires the ability to integrate clinical features extracted from data acquired by different scanners and protocols to improve stability and robustness. Previous studies have described various computational approaches to fuse single modality multicentre datasets. However, these surveys rarely focused on evaluation metrics and lacked a checklist for computational data harmonisation studies. In this systematic review, we summarise the computational data harmonisation approaches for multi-modality data in the digital healthcare field, including harmonisation strategies and evaluation metrics based on different theories. In addition, a comprehensive checklist that summarises common practices for data harmonisation studies is proposed to guide researchers to report their research findings more effectively. Last but not least, flowcharts presenting possible ways for methodology and metric selection are proposed and the limitations of different methods have been surveyed for future research.},\n bibtype = {article},\n author = {Nan, Yang and Del Ser, Javier and Walsh, Sean Simon and Schönlieb, Carola and Roberts, Michael and Selby, Ian and Howard, Kit and Owen, John and Neville, Jon and Guiot, Julien and Ernst, Benoit and Pastor, Ana and Alberich-Bayarri, Angel and Menzel, Marion I. and Walsh, Sean Simon and Vos, Wim and Flerin, Nina and Charbonnier, Jean Paul and van Rikxoort, Eva and Chatterjee, Avishek and Woodruff, Henry and Lambin, Philippe and Cerdá-Alberich, Leonor and Martí-Bonmatí, Luis and Herrera, Francisco and Yang, Guang},\n doi = {10.1016/j.inffus.2022.01.001},\n journal = {Information Fusion},\n number = {December 2021}\n}
\n
\n\n\n
\n Removing the bias and variance of multicentre data has always been a challenge in large scale digital healthcare studies, which requires the ability to integrate clinical features extracted from data acquired by different scanners and protocols to improve stability and robustness. Previous studies have described various computational approaches to fuse single modality multicentre datasets. However, these surveys rarely focused on evaluation metrics and lacked a checklist for computational data harmonisation studies. In this systematic review, we summarise the computational data harmonisation approaches for multi-modality data in the digital healthcare field, including harmonisation strategies and evaluation metrics based on different theories. In addition, a comprehensive checklist that summarises common practices for data harmonisation studies is proposed to guide researchers to report their research findings more effectively. Last but not least, flowcharts presenting possible ways for methodology and metric selection are proposed and the limitations of different methods have been surveyed for future research.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Synthetic Velocity Mapping Cardiac MRI Coupled with Automated Left Ventricle Segmentation.\n \n \n \n \n\n\n \n Xing, X.; Wu, Y.; Firmin, D.; Gatehouse, P.; and Yang, G.\n\n\n \n\n\n\n In SPIE Medical Imaging 2022, pages 103, 2022. \n \n\n\n\n
\n\n\n\n \n \n \"SyntheticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Synthetic Velocity Mapping Cardiac MRI Coupled with Automated Left Ventricle Segmentation},\n type = {inproceedings},\n year = {2022},\n pages = {103},\n issue = {April 2022},\n id = {7e552b5d-1b05-39d0-8e29-4146352c9aef},\n created = {2024-01-13T08:14:16.324Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:29.078Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n source_type = {CONF},\n private_publication = {false},\n abstract = {Temporal patterns of cardiac motion provide important information for cardiac disease diagnosis. This pattern could be obtained by three-directional CINE multi-slice left ventricular myocardial velocity mapping (3Dir MVM), which is a cardiac MR technique providing magnitude and phase information of the myocardial motion simultaneously. However, long acquisition time limits the usage of this technique by causing breathing artifacts, while shortening the time causes low temporal resolution and may provide an inaccurate assessment of cardiac motion. In this study, we proposed a frame synthesis algorithm to increase the temporal resolution of 3Dir MVM data. Our algorithm is featured by 1) three attention-based encoders which accept magnitude images, phase images, and myocardium segmentation masks respectively as inputs; 2) three decoders that output the interpolated frames and corresponding myocardium segmentation results; and 3) loss functions highlighting myocardium pixels. Our algorithm can not only increase the temporal resolution 3Dir MVMs, but can also generates the myocardium segmentation results at the same time.},\n bibtype = {inproceedings},\n author = {Xing, Xiaodan and Wu, Yinzhe and Firmin, David and Gatehouse, Peter and Yang, Guang},\n doi = {10.1117/12.2611118},\n booktitle = {SPIE Medical Imaging 2022}\n}
\n
\n\n\n
\n Temporal patterns of cardiac motion provide important information for cardiac disease diagnosis. This pattern could be obtained by three-directional CINE multi-slice left ventricular myocardial velocity mapping (3Dir MVM), which is a cardiac MR technique providing magnitude and phase information of the myocardial motion simultaneously. However, long acquisition time limits the usage of this technique by causing breathing artifacts, while shortening the time causes low temporal resolution and may provide an inaccurate assessment of cardiac motion. In this study, we proposed a frame synthesis algorithm to increase the temporal resolution of 3Dir MVM data. Our algorithm is featured by 1) three attention-based encoders which accept magnitude images, phase images, and myocardium segmentation masks respectively as inputs; 2) three decoders that output the interpolated frames and corresponding myocardium segmentation results; and 3) loss functions highlighting myocardium pixels. Our algorithm can not only increase the temporal resolution 3Dir MVMs, but can also generates the myocardium segmentation results at the same time.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Amelioration of Alzheimer’s disease pathology by mitophagy inducers identified via machine learning and a cross-species workflow.\n \n \n \n \n\n\n \n Xie, C.; Zhuang, X.; Niu, Z.; Ai, R.; Lautrup, S.; Zheng, S.; Jiang, Y.; Han, R.; Sen Gupta, T.; and Cao, S.\n\n\n \n\n\n\n Nature Biomedical Engineering. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"AmeliorationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Amelioration of Alzheimer’s disease pathology by mitophagy inducers identified via machine learning and a cross-species workflow},\n type = {article},\n year = {2022},\n id = {1f5b13c4-f962-3be5-a8ab-e1b3489cc966},\n created = {2024-01-13T08:14:16.715Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:17:49.304Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {xie_amelioration_2022},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Xie, Chenglong and Zhuang, Xu-Xu and Niu, Zhangming and Ai, Ruixue and Lautrup, Sofie and Zheng, Shuangjia and Jiang, Yinghui and Han, Ruiyu and Sen Gupta, Tanima and Cao, Shuqin},\n journal = {Nature Biomedical Engineering}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Human Treelike Tubular Structure Segmentation: A Comprehensive Review and Future Perspectives.\n \n \n \n \n\n\n \n Li, H.; Tang, Z.; Nan, Y.; and Yang, G.\n\n\n \n\n\n\n Computers in Biology and Medicine, 151(PA): 106241. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"HumanPaper\n  \n \n \n \"HumanWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Human Treelike Tubular Structure Segmentation: A Comprehensive Review and Future Perspectives},\n type = {article},\n year = {2022},\n keywords = {Airways,Blood vessels,Medical imaging,Review,Segmentation,Treelike tubular structure},\n pages = {106241},\n volume = {151},\n websites = {https://doi.org/10.1016/j.compbiomed.2022.106241},\n publisher = {Elsevier Ltd},\n id = {70a4c340-6dcf-348d-a597-e11c19158719},\n created = {2024-01-13T08:14:16.890Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:15:24.323Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n source_type = {JOUR},\n private_publication = {false},\n abstract = {Various structures in human physiology follow a treelike morphology, which often expresses complexity at very fine scales. Examples of such structures are intrathoracic airways, retinal blood vessels, and hepatic blood vessels. Large collections of 2D and 3D images have been made available by medical imaging modalities such as magnetic resonance imaging (MRI), computed tomography (CT), Optical coherence tomography (OCT) and ultrasound in which the spatial arrangement can be observed. Segmentation of these structures in medical imaging is of great importance since the analysis of the structure provides insights into disease diagnosis, treatment planning, and prognosis. Manually labelling extensive data by radiologists is often time-consuming and error-prone. As a result, automated or semi-automated computational models have become a popular research field of medical imaging in the past two decades, and many have been developed to date. In this survey, we aim to provide a comprehensive review of currently publicly available datasets, segmentation algorithms, and evaluation metrics. In addition, current challenges and future research directions are discussed.},\n bibtype = {article},\n author = {Li, Hao and Tang, Zeyu and Nan, Yang and Yang, Guang},\n doi = {10.1016/j.compbiomed.2022.106241},\n journal = {Computers in Biology and Medicine},\n number = {PA}\n}
\n
\n\n\n
\n Various structures in human physiology follow a treelike morphology, which often expresses complexity at very fine scales. Examples of such structures are intrathoracic airways, retinal blood vessels, and hepatic blood vessels. Large collections of 2D and 3D images have been made available by medical imaging modalities such as magnetic resonance imaging (MRI), computed tomography (CT), Optical coherence tomography (OCT) and ultrasound in which the spatial arrangement can be observed. Segmentation of these structures in medical imaging is of great importance since the analysis of the structure provides insights into disease diagnosis, treatment planning, and prognosis. Manually labelling extensive data by radiologists is often time-consuming and error-prone. As a result, automated or semi-automated computational models have become a popular research field of medical imaging in the past two decades, and many have been developed to date. In this survey, we aim to provide a comprehensive review of currently publicly available datasets, segmentation algorithms, and evaluation metrics. In addition, current challenges and future research directions are discussed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data and Physics Driven Learning Models for Fast MRI -- Fundamentals and Methodologies from CNN, GAN to Attention and Transformers.\n \n \n \n \n\n\n \n Huang, J.; Fang, Y.; Nan, Y.; Wu, H.; Wu, Y.; Gao, Z.; Li, Y.; Wang, Z.; Lio, P.; Rueckert, D.; Eldar, Y., C.; and Yang, G.\n\n\n \n\n\n\n , (August). 2022.\n \n\n\n\n
\n\n\n\n \n \n \"DataPaper\n  \n \n \n \"DataWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Data and Physics Driven Learning Models for Fast MRI -- Fundamentals and Methodologies from CNN, GAN to Attention and Transformers},\n type = {article},\n year = {2022},\n websites = {http://arxiv.org/abs/2204.01706},\n id = {30d483b1-90c7-35f0-94b6-ac7dba0442a2},\n created = {2024-01-13T12:15:09.502Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:15:57.661Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Research studies have shown no qualms about using data driven deep learning models for downstream tasks in medical image analysis, e.g., anatomy segmentation and lesion detection, disease diagnosis and prognosis, and treatment planning. However, deep learning models are not the sovereign remedy for medical image analysis when the upstream imaging is not being conducted properly (with artefacts). This has been manifested in MRI studies, where the scanning is typically slow, prone to motion artefacts, with a relatively low signal to noise ratio, and poor spatial and/or temporal resolution. Recent studies have witnessed substantial growth in the development of deep learning techniques for propelling fast MRI. This article aims to (1) introduce the deep learning based data driven techniques for fast MRI including convolutional neural network and generative adversarial network based methods, (2) survey the attention and transformer based models for speeding up MRI reconstruction, and (3) detail the research in coupling physics and data driven models for MRI acceleration. Finally, we will demonstrate through a few clinical applications, explain the importance of data harmonisation and explainable models for such fast MRI techniques in multicentre and multi-scanner studies, and discuss common pitfalls in current research and recommendations for future research directions.},\n bibtype = {article},\n author = {Huang, Jiahao and Fang, Yingying and Nan, Yang and Wu, Huanjun and Wu, Yinzhe and Gao, Zhifan and Li, Yang and Wang, Zidong and Lio, Pietro and Rueckert, Daniel and Eldar, Yonina C. and Yang, Guang},\n number = {August}\n}
\n
\n\n\n
\n Research studies have shown no qualms about using data driven deep learning models for downstream tasks in medical image analysis, e.g., anatomy segmentation and lesion detection, disease diagnosis and prognosis, and treatment planning. However, deep learning models are not the sovereign remedy for medical image analysis when the upstream imaging is not being conducted properly (with artefacts). This has been manifested in MRI studies, where the scanning is typically slow, prone to motion artefacts, with a relatively low signal to noise ratio, and poor spatial and/or temporal resolution. Recent studies have witnessed substantial growth in the development of deep learning techniques for propelling fast MRI. This article aims to (1) introduce the deep learning based data driven techniques for fast MRI including convolutional neural network and generative adversarial network based methods, (2) survey the attention and transformer based models for speeding up MRI reconstruction, and (3) detail the research in coupling physics and data driven models for MRI acceleration. Finally, we will demonstrate through a few clinical applications, explain the importance of data harmonisation and explainable models for such fast MRI techniques in multicentre and multi-scanner studies, and discuss common pitfalls in current research and recommendations for future research directions.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (45)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Association between Left Ventricular Global Function Index and Outcomes in Patients with Dilated Cardiomyopathy.\n \n \n \n \n\n\n \n Liu, T.; Zhou, Z.; Bo, K.; Gao, Y.; Wang, H.; Wang, R.; Liu, W.; Chang, S.; Liu, Y.; and Sun, Y.\n\n\n \n\n\n\n Frontiers in Cardiovascular Medicine. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"AssociationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Association between Left Ventricular Global Function Index and Outcomes in Patients with Dilated Cardiomyopathy},\n type = {article},\n year = {2021},\n id = {ac7a8f2b-c0e2-37f8-8672-816a49f7021e},\n created = {2024-01-13T05:46:20.142Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:11:37.971Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {liu_association_2021},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Liu, Tong and Zhou, Zhen and Bo, Kairui and Gao, Yifeng and Wang, Hui and Wang, Rui and Liu, Wei and Chang, Sanshuai and Liu, Yuanyuan and Sun, Yuqing},\n journal = {Frontiers in Cardiovascular Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Comparative Study of Radiomics and Deep-learning Based Methods for Pulmonary Nodule Malignancy Prediction in Low Dose CT Images.\n \n \n \n \n\n\n \n Astaraki, M.; Yang, G.; Zakko, Y.; Toma-Dasu, L.; Smedby, Ö.; and Wang, C.\n\n\n \n\n\n\n Frontiers in Oncology. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {A Comparative Study of Radiomics and Deep-learning Based Methods for Pulmonary Nodule Malignancy Prediction in Low Dose CT Images},\n type = {article},\n year = {2021},\n id = {495b08b3-f6cb-3492-8cd0-589fc580d300},\n created = {2024-01-13T05:46:20.618Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:14:04.667Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {astaraki_comparative_2021},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Astaraki, Mehdi and Yang, Guang and Zakko, Yousuf and Toma-Dasu, Luliana and Smedby, Örjan and Wang, Chunliang},\n journal = {Frontiers in Oncology}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-Channel U-Net (MCUNet) Based Fast and Automated Segmentation for the 3-Directional Multislice Cine Myocardial Velocity Mapping.\n \n \n \n \n\n\n \n Wu, Y.; Hatipoglu, S.; Alonso-Álvarez, D.; Gatehouse, P.; Firmin, D.; Keegan, J.; and Yang, G.\n\n\n \n\n\n\n In Society for Cardiovascular Magnetic Resonance (SCMR) 24th Annual Meeting, 2021. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-ChannelPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Multi-Channel U-Net (MCUNet) Based Fast and Automated Segmentation for the 3-Directional Multislice Cine Myocardial Velocity Mapping},\n type = {inproceedings},\n year = {2021},\n id = {6df6289e-42dd-3252-8816-97d8d693fb60},\n created = {2024-01-13T05:46:21.378Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:18:17.999Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {wu_multi-channel_2021-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Wu, Yinzhe and Hatipoglu, Suzan and Alonso-Álvarez, Diego and Gatehouse, Peter and Firmin, David and Keegan, Jennifer and Yang, Guang},\n booktitle = {Society for Cardiovascular Magnetic Resonance (SCMR) 24th Annual Meeting}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ME-Net: Multi-Encoder Net Framework for Brain Tumor Segmentation.\n \n \n \n \n\n\n \n Zhang, W.; Yang, G.; Huang, H.; Yang, W.; Xu, X.; Liu, Y.; and Lai, X.\n\n\n \n\n\n\n International Journal of Imaging Systems and Technology. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"ME-Net:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {ME-Net: Multi-Encoder Net Framework for Brain Tumor Segmentation},\n type = {article},\n year = {2021},\n id = {40605ac0-4e0b-3674-8c94-8f9a46948b55},\n created = {2024-01-13T05:46:21.410Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:18:27.498Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhang_me-net_2021-1},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Zhang, Wenbo and Yang, Guang and Huang, He and Yang, Weiji and Xu, Xiaomei and Liu, Yongkai and Lai, Xiaobo},\n journal = {International Journal of Imaging Systems and Technology}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Atrial Scar Segmentation from 3D Late Gadolinium Enhanced Datasets: Effect of Time After Contrast Injection.\n \n \n \n \n\n\n \n Yang, G.; Chen, J.; Zhang, H.; Wage, R.; Allen, J.; Wong, T.; Mohiaddin, R.; Firmin, D.; and Keegan, J.\n\n\n \n\n\n\n In Society for Cardiovascular Magnetic Resonance (SCMR) 24th Annual Meeting, 2021. \n \n\n\n\n
\n\n\n\n \n \n \"AtrialPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Atrial Scar Segmentation from 3D Late Gadolinium Enhanced Datasets: Effect of Time After Contrast Injection},\n type = {inproceedings},\n year = {2021},\n id = {5682e34a-941f-31eb-b7c6-bdfc7c27bcf4},\n created = {2024-01-13T05:46:21.440Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:18:28.543Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_atrial_2021-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yang, Guang and Chen, Jun and Zhang, Heye and Wage, Rick and Allen, Jack and Wong, Tom and Mohiaddin, Raad and Firmin, David and Keegan, Jennifer},\n booktitle = {Society for Cardiovascular Magnetic Resonance (SCMR) 24th Annual Meeting}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast and Automated Segmentation for the Three-Directional Multi-slice Cine Myocardial Velocity Mapping.\n \n \n \n \n\n\n \n Wu, Y.; Hatipoglu, S.; Alonso-Álvarez, D.; Gatehouse, P.; Li, B.; Gao, Y.; Firmin, D.; Keegan, J.; and Yang, G.\n\n\n \n\n\n\n Diagnostics. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"FastPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Fast and Automated Segmentation for the Three-Directional Multi-slice Cine Myocardial Velocity Mapping},\n type = {article},\n year = {2021},\n id = {2c335cfc-6706-32f0-9aae-8215edd42262},\n created = {2024-01-13T05:46:21.930Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:19:22.517Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {wu_fast_2021},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Wu, Yinzhe and Hatipoglu, Suzan and Alonso-Álvarez, Diego and Gatehouse, Peter and Li, Binghuan and Gao, Yikai and Firmin, David and Keegan, Jennifer and Yang, Guang},\n journal = {Diagnostics}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transfer Learning Enhanced Generative Adversarial Networks for Multi-Channel MRI Reconstruction.\n \n \n \n \n\n\n \n Lv, J.; Li, G.; Tong, X.; Chen, W.; Huang, J.; Wang, C.; and Yang, G.\n\n\n \n\n\n\n Computers in Biology and Medicine. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"TransferPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Transfer Learning Enhanced Generative Adversarial Networks for Multi-Channel MRI Reconstruction},\n type = {article},\n year = {2021},\n id = {5fe940c2-094b-37f2-b49f-85a0df95ce96},\n created = {2024-01-13T05:46:22.136Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:20:16.251Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {lv_transfer_2021},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Lv, Jun and Li, Guangyuan and Tong, Xiangrong and Chen, Weibo and Huang, Jiahao and Wang, Chengyan and Yang, Guang},\n journal = {Computers in Biology and Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n FIRE: Unsupervised Bi-directional Inter- and Intra-modality Registration Using Deep Networks.\n \n \n \n \n\n\n \n Wang, C.; Yang, G.; and Papanastasiou, G.\n\n\n \n\n\n\n In IEEE International Symposium on Computer-Based Medical Systems (CBMS 2021), 2021. \n \n\n\n\n
\n\n\n\n \n \n \"FIRE:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {FIRE: Unsupervised Bi-directional Inter- and Intra-modality Registration Using Deep Networks},\n type = {inproceedings},\n year = {2021},\n id = {c123e711-f550-3a61-9f87-34ba0de209f2},\n created = {2024-01-13T05:46:22.606Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:20:36.057Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {wang_fire_2021-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Wang, Chengjia and Yang, Guang and Papanastasiou, Giorgos},\n booktitle = {IEEE International Symposium on Computer-Based Medical Systems (CBMS 2021)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Recent Advances in Fibrosis and Scar Segmentation from Cardiac MRI: A State-of-the-Art Review and Future Perspectives.\n \n \n \n \n\n\n \n Wu, Y.; Tang, Z.; Li, B.; Firmin, D.; and Yang, G.\n\n\n \n\n\n\n Frontiers in Physiology. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"RecentPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Recent Advances in Fibrosis and Scar Segmentation from Cardiac MRI: A State-of-the-Art Review and Future Perspectives},\n type = {article},\n year = {2021},\n id = {39a3b880-60d5-3992-8aad-b81dbef6e96b},\n created = {2024-01-13T05:46:22.711Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:20:39.778Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {wu_recent_2021},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Wu, Yinzhe and Tang, Zeyu and Li, Binghuan and Firmin, David and Yang, Guang},\n journal = {Frontiers in Physiology}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Temporal Cue Guided Video Highlight Detection with Low-Rank Audio-Visual Fusion.\n \n \n \n \n\n\n \n Ye, Q.; Shen, X.; Gao, Y.; Wang, Z.; Bi, Q.; Li, P.; and Yang, G.\n\n\n \n\n\n\n In International Conference on Computer Vision (ICCV 2021), 2021. \n \n\n\n\n
\n\n\n\n \n \n \"TemporalPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Temporal Cue Guided Video Highlight Detection with Low-Rank Audio-Visual Fusion},\n type = {inproceedings},\n year = {2021},\n id = {c003a824-13b3-3c3c-9f79-10ef930061bb},\n created = {2024-01-13T05:46:22.970Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:03:03.547Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {ye_temporal_2021},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Ye, Qinghao and Shen, Xiyue and Gao, Yuan and Wang, Zirui and Bi, Qi and Li, Ping and Yang, Guang},\n booktitle = {International Conference on Computer Vision (ICCV 2021)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MIASSR: An Approach for Medical Image Arbitrary Scale Super-Resolution.\n \n \n \n \n\n\n \n Zhu, J.; Tan, C.; Yang, J.; Yang*(Co-last), G.; and Lio*(Co-last), P.\n\n\n \n\n\n\n arXiv preprint arXiv:2105.10738. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"MIASSR:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {MIASSR: An Approach for Medical Image Arbitrary Scale Super-Resolution},\n type = {article},\n year = {2021},\n id = {ade820e1-c375-32fa-89b8-00623cdbfb68},\n created = {2024-01-13T05:46:28.271Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:15:39.492Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhu_miassr_2021-1},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Zhu, Jin and Tan, Chuan and Yang, Junwei and Yang*(Co-last), Guang and Lio*(Co-last), Pietro},\n journal = {arXiv preprint arXiv:2105.10738}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Textured-Based Deep Learning in Prostate Cancer Classification with 3T Multiparametric MRI: Comparison with PI-RADS-Based Classification.\n \n \n \n \n\n\n \n Liu, Y.; Zheng, H.; Liang, Z.; Miao, Q.; Brisbane, W.; Marks, L.; Raman, S.; Reiter, R.; Yang, G.; and Sung, K.\n\n\n \n\n\n\n Diagnostics. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Textured-BasedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Textured-Based Deep Learning in Prostate Cancer Classification with 3T Multiparametric MRI: Comparison with PI-RADS-Based Classification},\n type = {article},\n year = {2021},\n id = {c83ad130-bc0d-34b4-a49c-a13c9d55edc1},\n created = {2024-01-13T05:46:28.444Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:16:08.266Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {liu_textured-based_2021},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Liu, Yongkai and Zheng, Haoxin and Liang, Zhengrong and Miao, Qi and Brisbane, Wayne and Marks, Leonard and Raman, Steven and Reiter, Robert and Yang, Guang and Sung, Kyunghyun},\n journal = {Diagnostics}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Annealing Genetic GAN for Imbalanced Web Data Learning.\n \n \n \n \n\n\n \n Hao, J.; Wang, C.; Yang, G.; Gao, Z.; Zhang, J.; and Zhang, H.\n\n\n \n\n\n\n IEEE Transactions on Multimedia. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"AnnealingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Annealing Genetic GAN for Imbalanced Web Data Learning},\n type = {article},\n year = {2021},\n id = {d503e87a-e0b4-37ac-8b60-d365b66f79b9},\n created = {2024-01-13T05:46:29.148Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:16:30.080Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {hao_annealing_2021},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Hao, Jingyu and Wang, Chengjia and Yang, Guang and Gao, Zhifan and Zhang, Jinglin and Zhang, Heye},\n journal = {IEEE Transactions on Multimedia}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ageing and Alzheimer’s Disease: Application of Artificial Intelligence in Mechanistic Studies, Diagnosis, and Drug Development.\n \n \n \n \n\n\n \n Ai, R.; Jin, X.; Tang, B.; Yang, G.; Niu, Z.; and Fang, E., F.\n\n\n \n\n\n\n Artificial Intelligence in Medicine. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"ArtificialPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2021},\n id = {7bdae814-d4d1-3ae4-9231-fa21339521ff},\n created = {2024-01-13T05:46:29.423Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:04:51.770Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {ai_ageing_2021},\n source_type = {incollection},\n private_publication = {false},\n bibtype = {inbook},\n author = {Ai, Ruixue and Jin, Xurui and Tang, Bowen and Yang, Guang and Niu, Zhangming and Fang, Evandro F},\n chapter = {Ageing and Alzheimer’s Disease: Application of Artificial Intelligence in Mechanistic Studies, Diagnosis, and Drug Development},\n title = {Artificial Intelligence in Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep Learning Enables Prostate MRI Segmentation: A Large Cohort Evaluation with Inter-rater Variability Analysis.\n \n \n \n \n\n\n \n Liu, Y.; Miao, Q.; Surawech, C.; Zheng, H.; Nguyen, D.; Yang, G.; Raman, S.; and Sung, K.\n\n\n \n\n\n\n Frontiers in Oncology. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Deep Learning Enables Prostate MRI Segmentation: A Large Cohort Evaluation with Inter-rater Variability Analysis},\n type = {article},\n year = {2021},\n id = {1bb08e2a-7960-3673-8f2e-c2446eb9c1df},\n created = {2024-01-13T05:46:29.435Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:04:58.516Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {liu_deep_2021},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Liu, Yongkai and Miao, Qi and Surawech, Chuthaporn and Zheng, Haoxin and Nguyen, Dan and Yang, Guang and Raman, Steven and Sung, Kyunghyun},\n journal = {Frontiers in Oncology}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Global and regional reproducibility of phasic left ventricular myocardial velocities obtained by three-directional cine myocardial velocity mapping.\n \n \n \n\n\n \n Hatipoglu, S.; Keegan, J.; Alonso-Álvarez, D.; Wu, Y.; Yang, G.; Wage, R.; Firmin, D.; and Gatehouse, P.\n\n\n \n\n\n\n In Society for Cardiovascular Magnetic Resonance (SCMR) 24th Annual Meeting, 2021. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Global and regional reproducibility of phasic left ventricular myocardial velocities obtained by three-directional cine myocardial velocity mapping},\n type = {inproceedings},\n year = {2021},\n id = {693a22ce-da28-3193-b19c-64c8df853967},\n created = {2024-01-13T05:46:30.101Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:23:14.356Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {hatipoglu_global_2021},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Hatipoglu, Suzan and Keegan, Jennifer and Alonso-Álvarez, Diego and Wu, Yinzhe and Yang, Guang and Wage, Rick and Firmin, David and Gatehouse, Peter},\n booktitle = {Society for Cardiovascular Magnetic Resonance (SCMR) 24th Annual Meeting}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n High-Resolution Pelvic MRI Reconstruction Using a Generative Adversarial Network with Attention and Cyclic Loss.\n \n \n \n \n\n\n \n Li, G.; Lv, J.; Tong, X.; Wang, C.; and Yang, G.\n\n\n \n\n\n\n IEEE Access. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"High-ResolutionPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {High-Resolution Pelvic MRI Reconstruction Using a Generative Adversarial Network with Attention and Cyclic Loss},\n type = {article},\n year = {2021},\n id = {ae0f4624-ece4-390b-97b3-6daff7cd858b},\n created = {2024-01-13T05:46:30.770Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:16:48.074Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {li_high-resolution_2021},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Li, Guangyuan and Lv, Jun and Tong, Xiangrong and Wang, Chengyan and Yang, Guang},\n journal = {IEEE Access}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Arbitrary Scale Super-Resolution for Medical Images.\n \n \n \n \n\n\n \n Zhu, J.; Tan, C.; Yang, J.; Yang, G.; and Lio’, P.\n\n\n \n\n\n\n International journal of neural systems, 31(10): 2150037. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"ArbitraryPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Arbitrary Scale Super-Resolution for Medical Images},\n type = {article},\n year = {2021},\n pages = {2150037},\n volume = {31},\n id = {6189cde0-4745-30b4-a35c-ca62dd2fad51},\n created = {2024-01-13T05:46:30.950Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:17:03.674Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhu_arbitrary_2021},\n source_type = {article},\n notes = {ISBN: 0129-0657<br/>Publisher: World Scientific},\n private_publication = {false},\n bibtype = {article},\n author = {Zhu, Jin and Tan, Chuan and Yang, Junwei and Yang, Guang and Lio’, Pietro},\n journal = {International journal of neural systems},\n number = {10}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive Hierarchical Dual Consistency for Semi-Supervised Left Atrium Segmentation on Cross-Domain Data.\n \n \n \n \n\n\n \n Chen, J.; Zhang, H.; Mohaiddin, R.; Wong, T.; Firmin, D.; Keegan, J.; and Yang, G.\n\n\n \n\n\n\n IEEE Transactions on Medical Imaging. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Adaptive Hierarchical Dual Consistency for Semi-Supervised Left Atrium Segmentation on Cross-Domain Data},\n type = {article},\n year = {2021},\n id = {9ee43ab9-3ade-34dd-8a5e-b497cd86576c},\n created = {2024-01-13T05:46:31.053Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:17:17.453Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {chen_adaptive_2021},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Chen, Jun and Zhang, Heye and Mohaiddin, Raad and Wong, Tom and Firmin, David and Keegan, Jennifer and Yang, Guang},\n journal = {IEEE Transactions on Medical Imaging}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Which GAN? A comparative study of generative adversarial network-based fast MRI reconstruction.\n \n \n \n \n\n\n \n Lv, J.; Zhu, J.; and Yang, G.\n\n\n \n\n\n\n Philosophical Transactions of the Royal Society A: Mathematical, Physical and Engineering Sciences, 379(2200). 2021.\n \n\n\n\n
\n\n\n\n \n \n \"WhichPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Which GAN? A comparative study of generative adversarial network-based fast MRI reconstruction},\n type = {article},\n year = {2021},\n keywords = {deep learning,generative adversarial network,magnetic resonance imaging,reconstruction},\n volume = {379},\n id = {c9342991-7523-33d2-9872-606a6f4c62be},\n created = {2024-01-13T06:15:54.074Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.567Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Fast magnetic resonance imaging (MRI) is crucial for clinical applications that can alleviate motion artefacts and increase patient throughput. K-space undersampling is an obvious approach to accelerate MR acquisition. However, undersampling of k-space data can result in blurring and aliasing artefacts for the reconstructed images. Recently, several studies have been proposed to use deep learning-based data-driven models for MRI reconstruction and have obtained promising results. However, the comparison of these methods remains limited because the models have not been trained on the same datasets and the validation strategies may be different. The purpose of this work is to conduct a comparative study to investigate the generative adversarial network (GAN)-based models for MRI reconstruction. We reimplemented and benchmarked four widely used GAN-based architectures including DAGAN, ReconGAN, RefineGAN and KIGAN. These four frameworks were trained and tested on brain, knee and liver MRI images using twofold, fourfold and sixfold accelerations, respectively, with a random undersampling mask. Both quantitative evaluations and qualitative visualization have shown that the RefineGAN method has achieved superior performance in reconstruction with better accuracy and perceptual quality compared to other GAN-based methods. This article is part of the theme issue 'Synergistic tomographic image reconstruction: Part 1'.},\n bibtype = {article},\n author = {Lv, Jun and Zhu, Jin and Yang, Guang},\n doi = {10.1098/rsta.2020.0203},\n journal = {Philosophical Transactions of the Royal Society A: Mathematical, Physical and Engineering Sciences},\n number = {2200}\n}
\n
\n\n\n
\n Fast magnetic resonance imaging (MRI) is crucial for clinical applications that can alleviate motion artefacts and increase patient throughput. K-space undersampling is an obvious approach to accelerate MR acquisition. However, undersampling of k-space data can result in blurring and aliasing artefacts for the reconstructed images. Recently, several studies have been proposed to use deep learning-based data-driven models for MRI reconstruction and have obtained promising results. However, the comparison of these methods remains limited because the models have not been trained on the same datasets and the validation strategies may be different. The purpose of this work is to conduct a comparative study to investigate the generative adversarial network (GAN)-based models for MRI reconstruction. We reimplemented and benchmarked four widely used GAN-based architectures including DAGAN, ReconGAN, RefineGAN and KIGAN. These four frameworks were trained and tested on brain, knee and liver MRI images using twofold, fourfold and sixfold accelerations, respectively, with a random undersampling mask. Both quantitative evaluations and qualitative visualization have shown that the RefineGAN method has achieved superior performance in reconstruction with better accuracy and perceptual quality compared to other GAN-based methods. This article is part of the theme issue 'Synergistic tomographic image reconstruction: Part 1'.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Wavelet improved GAN for MRI reconstruction.\n \n \n \n \n\n\n \n Chen, Y.; Firmin, D.; and Yang, G.\n\n\n \n\n\n\n 2021.\n \n\n\n\n
\n\n\n\n \n \n \"WaveletPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@misc{\n title = {Wavelet improved GAN for MRI reconstruction},\n type = {misc},\n year = {2021},\n source = {SPIE Medical Imaging 2021},\n keywords = {compressed sensing,generative adversarial network,mri,wavelet packet decomposition},\n pages = {37},\n volume = {1159513},\n issue = {February},\n id = {4486d93b-5a8f-3731-9017-3114f2877841},\n created = {2024-01-13T06:15:54.120Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:18.351Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Background: Compressed sensing magnetic resonance imaging (CS-MRI) is an important technique of accel- erating the acquisition process of magnetic resonance (MR) images by undersampling. It has the potential of reducing MR scanning time and costs, thus minimising patient discomfort. Motivation: One of the successful CS-MRI techniques to recover the original image from undersampled images is generative adversarial network (GAN). However, GAN-based techniques suffer from three key limitations: training instability, slow convergence and input size constraints. Method and Result: In this study, we propose a novel GAN-based CS-MRI technique: WPD-DAGAN (Wavelet Packet Decomposition Improved de-aliaising GAN). We incorporate Wasserstein loss function and a novel structure based on wavelet packet decomposition (WPD) into the de-aliaising GAN (DAGAN) architecture, which is a well established GAN-based CS-MRI technique. We show that the proposed network architecture achieves a significant performance improvement over the state-of-the-art CS-MRI techniques.},\n bibtype = {misc},\n author = {Chen, Yutong and Firmin, David and Yang, Guang},\n doi = {10.1117/12.2581004}\n}
\n
\n\n\n
\n Background: Compressed sensing magnetic resonance imaging (CS-MRI) is an important technique of accel- erating the acquisition process of magnetic resonance (MR) images by undersampling. It has the potential of reducing MR scanning time and costs, thus minimising patient discomfort. Motivation: One of the successful CS-MRI techniques to recover the original image from undersampled images is generative adversarial network (GAN). However, GAN-based techniques suffer from three key limitations: training instability, slow convergence and input size constraints. Method and Result: In this study, we propose a novel GAN-based CS-MRI technique: WPD-DAGAN (Wavelet Packet Decomposition Improved de-aliaising GAN). We incorporate Wasserstein loss function and a novel structure based on wavelet packet decomposition (WPD) into the de-aliaising GAN (DAGAN) architecture, which is a well established GAN-based CS-MRI technique. We show that the proposed network architecture achieves a significant performance improvement over the state-of-the-art CS-MRI techniques.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fully automatic framework for comprehensive coronary artery calcium scores analysis on non-contrast cardiac-gated CT scan: Total and vessel-specific quantifications.\n \n \n \n \n\n\n \n Zhang, N.; Yang, G.; Zhang, W.; Wang, W.; Zhou, Z.; Zhang, H.; Xu, L.; and Chen, Y.\n\n\n \n\n\n\n European Journal of Radiology, 134(September 2020): 109420. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"FullyPaper\n  \n \n \n \"FullyWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Fully automatic framework for comprehensive coronary artery calcium scores analysis on non-contrast cardiac-gated CT scan: Total and vessel-specific quantifications},\n type = {article},\n year = {2021},\n keywords = {Calcium,Coronary artery disease,Deep learning,Tomo,Tomography,X-ray computed},\n pages = {109420},\n volume = {134},\n websites = {https://doi.org/10.1016/j.ejrad.2020.109420},\n publisher = {Elsevier B.V.},\n id = {07c2e3e1-33f8-3d66-a9c6-d72c2d5e6444},\n created = {2024-01-13T06:15:54.122Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.574Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Objectives: To develop a fully automatic multiview shape constraint framework for comprehensive coronary artery calcium scores (CACS) quantification via deep learning on nonenhanced cardiac CT images. Methods: In this retrospective single-centre study, a multi-task deep learning framework was proposed to detect and quantify coronary artery calcification from CT images collected between October 2018 and March 2019. A total of 232 non-contrast cardiac-gated CT scans were retrieved and studied (80 % for model training and 20 % for testing). CACS results of testing datasets (n = 46), including Agatston score, calcium volume score, calcium mass score, were calculated fully automatically and manually at total and vessel-specific levels, respectively. Results: No significant differences were found in CACS quantification obtained using automatic or manual methods at total and vessel-specific levels (Agatston score: automatic 535.3 vs. manual 542.0, P = 0.993; calcium volume score: automatic 454.2 vs. manual 460.6, P = 0.990; calcium mass score: automatic 128.9 vs. manual 129.5, P = 0.992). Compared to the ground truth, the number of calcified vessels can be accurate recognized automatically (total: automatic 107 vs. manual 102, P = 0.125; left main artery: automatic 15 vs. manual 14, P = 1.000; left ascending artery: automatic 37 vs. manual 37, P = 1.000; left circumflex artery: automatic 22 vs. manual 20, P = 0.625; right coronary artery: automatic 33 vs. manual 31, P = 0.500). At the patient's level, there was no statistic difference existed in the classification of Agatston scoring (P = 0.317) and the number of calcified vessels (P = 0.102) between the automatic and manual results. Conclusions: The proposed framework can achieve reliable and comprehensive quantification for the CACS, including the calcified extent and distribution indicators at both total and vessel-specific levels.},\n bibtype = {article},\n author = {Zhang, Nan and Yang, Guang and Zhang, Weiwei and Wang, Wenjing and Zhou, Zhen and Zhang, Heye and Xu, Lei and Chen, Yundai},\n doi = {10.1016/j.ejrad.2020.109420},\n journal = {European Journal of Radiology},\n number = {September 2020}\n}
\n
\n\n\n
\n Objectives: To develop a fully automatic multiview shape constraint framework for comprehensive coronary artery calcium scores (CACS) quantification via deep learning on nonenhanced cardiac CT images. Methods: In this retrospective single-centre study, a multi-task deep learning framework was proposed to detect and quantify coronary artery calcification from CT images collected between October 2018 and March 2019. A total of 232 non-contrast cardiac-gated CT scans were retrieved and studied (80 % for model training and 20 % for testing). CACS results of testing datasets (n = 46), including Agatston score, calcium volume score, calcium mass score, were calculated fully automatically and manually at total and vessel-specific levels, respectively. Results: No significant differences were found in CACS quantification obtained using automatic or manual methods at total and vessel-specific levels (Agatston score: automatic 535.3 vs. manual 542.0, P = 0.993; calcium volume score: automatic 454.2 vs. manual 460.6, P = 0.990; calcium mass score: automatic 128.9 vs. manual 129.5, P = 0.992). Compared to the ground truth, the number of calcified vessels can be accurate recognized automatically (total: automatic 107 vs. manual 102, P = 0.125; left main artery: automatic 15 vs. manual 14, P = 1.000; left ascending artery: automatic 37 vs. manual 37, P = 1.000; left circumflex artery: automatic 22 vs. manual 20, P = 0.625; right coronary artery: automatic 33 vs. manual 31, P = 0.500). At the patient's level, there was no statistic difference existed in the classification of Agatston scoring (P = 0.317) and the number of calcified vessels (P = 0.102) between the automatic and manual results. Conclusions: The proposed framework can achieve reliable and comprehensive quantification for the CACS, including the calcified extent and distribution indicators at both total and vessel-specific levels.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n FA-GAN: Fused attentive generative adversarial networks for MRI image super-resolution.\n \n \n \n \n\n\n \n Jiang, M.; Zhi, M.; Wei, L.; Yang, X.; Zhang, J.; Li, Y.; Wang, P.; Huang, J.; and Yang, G.\n\n\n \n\n\n\n Computerized Medical Imaging and Graphics, 92(July): 101969. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"FA-GAN:Paper\n  \n \n \n \"FA-GAN:Website\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {FA-GAN: Fused attentive generative adversarial networks for MRI image super-resolution},\n type = {article},\n year = {2021},\n keywords = {Attention,MRI,Mechanism,Super-resolution,Generative adversarial networks,A},\n pages = {101969},\n volume = {92},\n websites = {https://doi.org/10.1016/j.compmedimag.2021.101969},\n publisher = {Elsevier Ltd},\n id = {1ed4f56d-69b2-3aff-b2b7-c4854b2a8971},\n created = {2024-01-13T06:15:54.140Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.509Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {High-resolution magnetic resonance images can provide fine-grained anatomical information, but acquiring such data requires a long scanning time. In this paper, a framework called the Fused Attentive Generative Adversarial Networks(FA-GAN) is proposed to generate the super- resolution MR image from low-resolution magnetic resonance images, which can reduce the scanning time effectively but with high resolution MR images. In the framework of the FA-GAN, the local fusion feature block, consisting of different three-pass networks by using different convolution kernels, is proposed to extract image features at different scales. And the global feature fusion module, including the channel attention module, the self-attention module, and the fusion operation, is designed to enhance the important features of the MR image. Moreover, the spectral normalization process is introduced to make the discriminator network stable. 40 sets of 3D magnetic resonance images (each set of images contains 256 slices) are used to train the network, and 10 sets of images are used to test the proposed method. The experimental results show that the PSNR and SSIM values of the super-resolution magnetic resonance image generated by the proposed FA-GAN method are higher than the state-of-the-art reconstruction methods.},\n bibtype = {article},\n author = {Jiang, Mingfeng and Zhi, Minghao and Wei, Liying and Yang, Xiaocheng and Zhang, Jucheng and Li, Yongming and Wang, Pin and Huang, Jiahao and Yang, Guang},\n doi = {10.1016/j.compmedimag.2021.101969},\n journal = {Computerized Medical Imaging and Graphics},\n number = {July}\n}
\n
\n\n\n
\n High-resolution magnetic resonance images can provide fine-grained anatomical information, but acquiring such data requires a long scanning time. In this paper, a framework called the Fused Attentive Generative Adversarial Networks(FA-GAN) is proposed to generate the super- resolution MR image from low-resolution magnetic resonance images, which can reduce the scanning time effectively but with high resolution MR images. In the framework of the FA-GAN, the local fusion feature block, consisting of different three-pass networks by using different convolution kernels, is proposed to extract image features at different scales. And the global feature fusion module, including the channel attention module, the self-attention module, and the fusion operation, is designed to enhance the important features of the MR image. Moreover, the spectral normalization process is introduced to make the discriminator network stable. 40 sets of 3D magnetic resonance images (each set of images contains 256 slices) are used to train the network, and 10 sets of images are used to test the proposed method. The experimental results show that the PSNR and SSIM values of the super-resolution magnetic resonance image generated by the proposed FA-GAN method are higher than the state-of-the-art reconstruction methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 3D PBV-Net: An automated prostate MRI data segmentation method.\n \n \n \n \n\n\n \n Jin, Y.; Yang, G.; Fang, Y.; Li, R.; Xu, X.; Liu, Y.; and Lai, X.\n\n\n \n\n\n\n Computers in Biology and Medicine, 128(November 2020): 104160. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"3DPaper\n  \n \n \n \"3DWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {3D PBV-Net: An automated prostate MRI data segmentation method},\n type = {article},\n year = {2021},\n keywords = {Enabling technology,Prostate cancer,MRI,Automated segmentation,Telehea,Telehealth care},\n pages = {104160},\n volume = {128},\n websites = {https://doi.org/10.1016/j.compbiomed.2020.104160},\n publisher = {Elsevier Ltd},\n id = {80d5da3b-a4b5-35d2-90fa-39e29487d229},\n created = {2024-01-13T06:15:54.163Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.623Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Prostate cancer is one of the most common deadly diseases in men worldwide, which is seriously affecting people's life and health. Reliable and automated segmentation of the prostate gland in MRI data is exceptionally critical for diagnosis and treatment planning of prostate cancer. Although many automated segmentation methods have emerged, including deep learning based approaches, segmentation performance is still poor due to the large variability of image appearance, anisotropic spatial resolution, and imaging interference. This study proposes an automated prostate MRI data segmentation approach using bicubic interpolation with improved 3D V-Net (dubbed 3D PBV-Net). Considering the low-frequency components in the prostate gland, the bicubic interpolation is applied to preprocess the MRI data. On this basis, a 3D PBV-Net is developed to perform prostate MRI data segmentation. To illustrate the effectiveness of our approach, we evaluate the proposed 3D PBV-Net on two clinical prostate MRI data datasets, i.e., PROMISE 12 and TPHOH, with the manual delineations available as the ground truth. Our approach generates promising segmentation results, which have achieved 97.65% and 98.29% of average accuracy, 0.9613 and 0.9765 of Dice metric, 3.120 mm and 0.9382 mm of Hausdorff distance, and average boundary distance of 1.708, 0.7950 on PROMISE 12 and TPHOH datasets, respectively. Our method has effectively improved the accuracy of automated segmentation of the prostate MRI data and is promising to meet the accuracy requirements for telehealth applications.},\n bibtype = {article},\n author = {Jin, Yao and Yang, Guang and Fang, Ying and Li, Ruipeng and Xu, Xiaomei and Liu, Yongkai and Lai, Xiaobo},\n doi = {10.1016/j.compbiomed.2020.104160},\n journal = {Computers in Biology and Medicine},\n number = {November 2020}\n}
\n
\n\n\n
\n Prostate cancer is one of the most common deadly diseases in men worldwide, which is seriously affecting people's life and health. Reliable and automated segmentation of the prostate gland in MRI data is exceptionally critical for diagnosis and treatment planning of prostate cancer. Although many automated segmentation methods have emerged, including deep learning based approaches, segmentation performance is still poor due to the large variability of image appearance, anisotropic spatial resolution, and imaging interference. This study proposes an automated prostate MRI data segmentation approach using bicubic interpolation with improved 3D V-Net (dubbed 3D PBV-Net). Considering the low-frequency components in the prostate gland, the bicubic interpolation is applied to preprocess the MRI data. On this basis, a 3D PBV-Net is developed to perform prostate MRI data segmentation. To illustrate the effectiveness of our approach, we evaluate the proposed 3D PBV-Net on two clinical prostate MRI data datasets, i.e., PROMISE 12 and TPHOH, with the manual delineations available as the ground truth. Our approach generates promising segmentation results, which have achieved 97.65% and 98.29% of average accuracy, 0.9613 and 0.9765 of Dice metric, 3.120 mm and 0.9382 mm of Hausdorff distance, and average boundary distance of 1.708, 0.7950 on PROMISE 12 and TPHOH datasets, respectively. Our method has effectively improved the accuracy of automated segmentation of the prostate MRI data and is promising to meet the accuracy requirements for telehealth applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-task learning with Multi-view Weighted Fusion Attention for artery-specific calcification analysis.\n \n \n \n \n\n\n \n Zhang, W.; Yang, G.; Zhang, N.; Xu, L.; Wang, X.; Zhang, Y.; Zhang, H.; Del Ser, J.; and de Albuquerque, V., H., C.\n\n\n \n\n\n\n Information Fusion, 71(February): 64-76. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Multi-taskPaper\n  \n \n \n \"Multi-taskWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Multi-task learning with Multi-view Weighted Fusion Attention for artery-specific calcification analysis},\n type = {article},\n year = {2021},\n keywords = {Artery-specific calcification analysis,Multi-view,Multi-task learning,Multi-view Weighted Fusion Attention,Multi-view learning},\n pages = {64-76},\n volume = {71},\n websites = {https://doi.org/10.1016/j.inffus.2021.01.009},\n publisher = {Elsevier B.V.},\n id = {a4950c29-54d4-3e64-a394-408f82809196},\n created = {2024-01-13T06:15:54.267Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.079Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {In general, artery-specific calcification analysis comprises the simultaneous calcification segmentation and quantification tasks. It can help provide a thorough assessment for calcification of different coronary arteries, and further allow for an efficient and rapid diagnosis of cardiovascular diseases (CVD). However, as a high-dimensional multi-type estimation problem, artery-specific calcification analysis has not been profoundly investigated due to the intractability of obtaining discriminative feature representations. In this work, we propose a Multi-task learning network with Multi-view Weighted Fusion Attention (MMWFAnet) to solve this challenging problem. The MMWFAnet first employs a Multi-view Weighted Fusion Attention (MWFA) module to extract discriminative feature representations by enhancing the collaboration of multiple views. Specifically, MWFA weights these views to improve multi-view learning for calcification features. Based on the fusion of these multiple views, the proposed approach takes advantage of multi-task learning to obtain accurate segmentation and quantification of artery-specific calcification simultaneously. We perform experimental studies on 676 non-contrast Computed Tomography scans, achieving state-of-the-art performance in terms of multiple evaluation metrics. These compelling results evince that the proposed MMWFAnet is capable of improving the effectivity and efficiency of clinical CVD diagnosis.},\n bibtype = {article},\n author = {Zhang, Weiwei and Yang, Guang and Zhang, Nan and Xu, Lei and Wang, Xiaoqing and Zhang, Yanping and Zhang, Heye and Del Ser, Javier and de Albuquerque, Victor Hugo C.},\n doi = {10.1016/j.inffus.2021.01.009},\n journal = {Information Fusion},\n number = {February}\n}
\n
\n\n\n
\n In general, artery-specific calcification analysis comprises the simultaneous calcification segmentation and quantification tasks. It can help provide a thorough assessment for calcification of different coronary arteries, and further allow for an efficient and rapid diagnosis of cardiovascular diseases (CVD). However, as a high-dimensional multi-type estimation problem, artery-specific calcification analysis has not been profoundly investigated due to the intractability of obtaining discriminative feature representations. In this work, we propose a Multi-task learning network with Multi-view Weighted Fusion Attention (MMWFAnet) to solve this challenging problem. The MMWFAnet first employs a Multi-view Weighted Fusion Attention (MWFA) module to extract discriminative feature representations by enhancing the collaboration of multiple views. Specifically, MWFA weights these views to improve multi-view learning for calcification features. Based on the fusion of these multiple views, the proposed approach takes advantage of multi-task learning to obtain accurate segmentation and quantification of artery-specific calcification simultaneously. We perform experimental studies on 676 non-contrast Computed Tomography scans, achieving state-of-the-art performance in terms of multiple evaluation metrics. These compelling results evince that the proposed MMWFAnet is capable of improving the effectivity and efficiency of clinical CVD diagnosis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Myocardial extracellular volume fraction quantification in an animal model of the doxorubicin-induced myocardial fibrosis: A synthetic hematocrit method using 3T cardiac magnetic resonance.\n \n \n \n \n\n\n \n Zhou, Z.; Wang, R.; Wang, H.; Liu, Y.; Lu, D.; Sun, Z.; Yang, G.; and Xu, L.\n\n\n \n\n\n\n Quantitative Imaging in Medicine and Surgery, 11(2): 510-520. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"MyocardialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Myocardial extracellular volume fraction quantification in an animal model of the doxorubicin-induced myocardial fibrosis: A synthetic hematocrit method using 3T cardiac magnetic resonance},\n type = {article},\n year = {2021},\n keywords = {Cardiac magnetic resonance imaging (CMR imaging),Collagen volume fraction (CVF),Correlation,Diffuse interstitial myocardial fibrosis,Extracellular volume fraction (ECV fraction)},\n pages = {510-520},\n volume = {11},\n id = {079f91c4-ba96-3961-a004-689b7394f53c},\n created = {2024-01-13T06:15:54.278Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.707Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Background: Visualization of diffuse myocardial fibrosis is challenging and mainly relies on histology. Cardiac magnetic resonance (CMR), which uses extracellular contrast agents, is a rapidly developing technique for measuring the extracellular volume (ECV). The objective of this study was to evaluate the feasibility of the synthetic myocardial ECV fraction based on 3.0 T CMR compared with the conventional ECV fraction. Methods: This study was approved by the local animal care and ethics committee. Fifteen beagle models with diffuse myocardial fibrosis, including 12 experimental and three control subjects, were generated by injecting doxorubicin 30 mg/m2 intravenously every three weeks for 24 weeks. Short-axis (SAX) and 4-chamber long-axis (LAX) T1 maps were acquired for both groups. The association between hematocrit (Hct) and native T1blood was derived from 9 non-contrast CMR T1 maps of 3 control beagles using regression analysis. Synthetic ECV was then calculated using the synthetic Hct and compared with conventional ECV at baseline and the 16th and 24th week after doxorubicin administration. The collagen volume fraction (CVF) value was measured on digital biopsy samples. Bland-Altman plots were used to analyze the agreement between conventional and synthetic ECV. Correlation analyses were performed to explore the association among conventional ECV, synthetic ECV, CVF, and left ventricular ejection fraction (LVEF). Results: The regression model synthetic Hct = 816.46 R1blood - 0.01 (R2=0.617; P=0.012) was used to predict the Hct from native T1blood values. The conventional and synthetic ECV fractions of experimental animals at the 16th and 24th week after modeling were significantly higher than those measured at the baseline (31.4%±2.2% and 36.3%±2.1% vs. 22.9%±1.7%; 29.9%±2.4% and 36.1%±2.6% vs. 22.0%±2.4%; all with P<0.05). Bland-Altman plots showed a bias (1.0%) between conventional and synthetic ECV with 95% limits of agreement of -2.5% to 4.4% in the per-subject analysis (n=21) and a bias (1.0%) between conventional and synthetic ECV with 95% limits of agreement of -2.4% to 4.3% in the per-segment analysis (n=294). Conventional and synthetic ECV were well correlated with CVF (r=0.937 and 0.925, all with P<0.001, n=10). Conclusions: Our study showed promising results for using synthetic ECV compared with the conventional ECV for providing accurate quantification of myocardial ECV without the need for blood sampling.},\n bibtype = {article},\n author = {Zhou, Zhen and Wang, Rui and Wang, Hui and Liu, Yi and Lu, Dongxu and Sun, Zhonghua and Yang, Guang and Xu, Lei},\n doi = {10.21037/QIMS-20-501},\n journal = {Quantitative Imaging in Medicine and Surgery},\n number = {2}\n}
\n
\n\n\n
\n Background: Visualization of diffuse myocardial fibrosis is challenging and mainly relies on histology. Cardiac magnetic resonance (CMR), which uses extracellular contrast agents, is a rapidly developing technique for measuring the extracellular volume (ECV). The objective of this study was to evaluate the feasibility of the synthetic myocardial ECV fraction based on 3.0 T CMR compared with the conventional ECV fraction. Methods: This study was approved by the local animal care and ethics committee. Fifteen beagle models with diffuse myocardial fibrosis, including 12 experimental and three control subjects, were generated by injecting doxorubicin 30 mg/m2 intravenously every three weeks for 24 weeks. Short-axis (SAX) and 4-chamber long-axis (LAX) T1 maps were acquired for both groups. The association between hematocrit (Hct) and native T1blood was derived from 9 non-contrast CMR T1 maps of 3 control beagles using regression analysis. Synthetic ECV was then calculated using the synthetic Hct and compared with conventional ECV at baseline and the 16th and 24th week after doxorubicin administration. The collagen volume fraction (CVF) value was measured on digital biopsy samples. Bland-Altman plots were used to analyze the agreement between conventional and synthetic ECV. Correlation analyses were performed to explore the association among conventional ECV, synthetic ECV, CVF, and left ventricular ejection fraction (LVEF). Results: The regression model synthetic Hct = 816.46 R1blood - 0.01 (R2=0.617; P=0.012) was used to predict the Hct from native T1blood values. The conventional and synthetic ECV fractions of experimental animals at the 16th and 24th week after modeling were significantly higher than those measured at the baseline (31.4%±2.2% and 36.3%±2.1% vs. 22.9%±1.7%; 29.9%±2.4% and 36.1%±2.6% vs. 22.0%±2.4%; all with P<0.05). Bland-Altman plots showed a bias (1.0%) between conventional and synthetic ECV with 95% limits of agreement of -2.5% to 4.4% in the per-subject analysis (n=21) and a bias (1.0%) between conventional and synthetic ECV with 95% limits of agreement of -2.4% to 4.3% in the per-segment analysis (n=294). Conventional and synthetic ECV were well correlated with CVF (r=0.937 and 0.925, all with P<0.001, n=10). Conclusions: Our study showed promising results for using synthetic ECV compared with the conventional ECV for providing accurate quantification of myocardial ECV without the need for blood sampling.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n DiCyc: GAN-based deformation invariant cross-domain information fusion for medical image synthesis.\n \n \n \n \n\n\n \n Wang, C.; Yang, G.; Papanastasiou, G.; Tsaftaris, S., A.; Newby, D., E.; Gray, C.; Macnaught, G.; and MacGillivray, T., J.\n\n\n \n\n\n\n Information Fusion, 67(May 2020): 147-160. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"DiCyc:Paper\n  \n \n \n \"DiCyc:Website\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {DiCyc: GAN-based deformation invariant cross-domain information fusion for medical image synthesis},\n type = {article},\n year = {2021},\n keywords = {Information fusion,GAN,Image synthesis},\n pages = {147-160},\n volume = {67},\n websites = {https://doi.org/10.1016/j.inffus.2020.10.015},\n publisher = {Elsevier B.V.},\n id = {367c84aa-6246-3131-ab50-472a1c98362a},\n created = {2024-01-13T06:15:54.288Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.706Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Cycle-consistent generative adversarial network (CycleGAN) has been widely used for cross-domain medical image synthesis tasks particularly due to its ability to deal with unpaired data. However, most CycleGAN-based synthesis methods cannot achieve good alignment between the synthesized images and data from the source domain, even with additional image alignment losses. This is because the CycleGAN generator network can encode the relative deformations and noises associated to different domains. This can be detrimental for the downstream applications that rely on the synthesized images, such as generating pseudo-CT for PET-MR attenuation correction. In this paper, we present a deformation invariant cycle-consistency model that can filter out these domain-specific deformation. The deformation is globally parameterized by thin-plate-spline (TPS), and locally learned by modified deformable convolutional layers. Robustness to domain-specific deformations has been evaluated through experiments on multi-sequence brain MR data and multi-modality abdominal CT and MR data. Experiment results demonstrated that our method can achieve better alignment between the source and target data while maintaining superior image quality of signal compared to several state-of-the-art CycleGAN-based methods.},\n bibtype = {article},\n author = {Wang, Chengjia and Yang, Guang and Papanastasiou, Giorgos and Tsaftaris, Sotirios A. and Newby, David E. and Gray, Calum and Macnaught, Gillian and MacGillivray, Tom J.},\n doi = {10.1016/j.inffus.2020.10.015},\n journal = {Information Fusion},\n number = {May 2020}\n}
\n
\n\n\n
\n Cycle-consistent generative adversarial network (CycleGAN) has been widely used for cross-domain medical image synthesis tasks particularly due to its ability to deal with unpaired data. However, most CycleGAN-based synthesis methods cannot achieve good alignment between the synthesized images and data from the source domain, even with additional image alignment losses. This is because the CycleGAN generator network can encode the relative deformations and noises associated to different domains. This can be detrimental for the downstream applications that rely on the synthesized images, such as generating pseudo-CT for PET-MR attenuation correction. In this paper, we present a deformation invariant cycle-consistency model that can filter out these domain-specific deformation. The deformation is globally parameterized by thin-plate-spline (TPS), and locally learned by modified deformable convolutional layers. Robustness to domain-specific deformations has been evaluated through experiments on multi-sequence brain MR data and multi-modality abdominal CT and MR data. Experiment results demonstrated that our method can achieve better alignment between the source and target data while maintaining superior image quality of signal compared to several state-of-the-art CycleGAN-based methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A 2-year investigation of the impact of the computed tomography–derived fractional flow reserve calculated using a deep learning algorithm on routine decision-making for coronary artery disease management.\n \n \n \n \n\n\n \n Liu, X.; Mo, X.; Zhang, H.; Yang, G.; Shi, C.; and Hau, W., K.\n\n\n \n\n\n\n European Radiology, 31(9): 7039-7046. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {A 2-year investigation of the impact of the computed tomography–derived fractional flow reserve calculated using a deep learning algorithm on routine decision-making for coronary artery disease management},\n type = {article},\n year = {2021},\n keywords = {Computed tomography angiography,Myocardial fractio,Coronary artery disease,Deep learning,Myocardial fractional flow reserve,Myocardial revascularisation},\n pages = {7039-7046},\n volume = {31},\n publisher = {European Radiology},\n id = {0e606535-4c97-3ad7-b96f-22ac4a5ecde2},\n created = {2024-01-13T06:15:54.334Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.131Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Objective: This study aims to investigate the safety and feasibility of using a deep learning algorithm to calculate computed tomography angiography–based fractional flow reserve (DL-FFRCT) as an alternative to invasive coronary angiography (ICA) in the selection of patients for coronary intervention. Materials and methods: Patients (N = 296) with symptomatic coronary artery disease identified by coronary computed tomography angiography (CTA) with stenosis over 50% were retrospectively enrolled from a single centre in this study. ICA-guided interventions were performed in patients at admission, and DL-FFRCT was conducted retrospectively. The influences on decision-making by using DL-FFRCT and the clinical outcome were compared to those of ICA-guided care for symptomatic CAD at the 2-year follow-up evaluation. Result: Two hundred forty-three patients were evaluated. Up to 72% of diagnostic ICA studies could have been avoided by using a DL-FFRCT value > 0.8 as a cut-off for intervention. A similar major adverse cardiovascular event (MACE) rate was observed in patients who underwent revascularisation with a DL-FFRCT value ≤ 0.8 (2.9%) compared to that of ICA-guided interventions (3.3%) (stented lesions with ICA stenosis > 75%) (p = 0.838). Conclusion: DL-FFRCT can reduce the need for diagnostic coronary angiography when identifying patients suitable for coronary intervention. A low MACE rate was found in a 2-year follow-up investigation. Key Points: • Seventy-two percent of diagnostic ICA studies could have been avoided by using a DL-FFRCT value > 0.8 as a cut-off for intervention. • Coronary artery stenting based on the diagnosis by using a 320-detector row CT scanner and a positive DL-FFRCT value could potentially be associated with a lower occurrence rate of major adverse cardiovascular events (2.9%) within the first 2 years. • A low event rate was found when intervention was performed in tandem lesions with haemodynamic significance based on DL-FFRCT < 0.8 as a cut-off value.},\n bibtype = {article},\n author = {Liu, Xin and Mo, Xukai and Zhang, Heye and Yang, Guang and Shi, Changzheng and Hau, William Kongtou},\n doi = {10.1007/s00330-021-07771-7},\n journal = {European Radiology},\n number = {9}\n}
\n
\n\n\n
\n Objective: This study aims to investigate the safety and feasibility of using a deep learning algorithm to calculate computed tomography angiography–based fractional flow reserve (DL-FFRCT) as an alternative to invasive coronary angiography (ICA) in the selection of patients for coronary intervention. Materials and methods: Patients (N = 296) with symptomatic coronary artery disease identified by coronary computed tomography angiography (CTA) with stenosis over 50% were retrospectively enrolled from a single centre in this study. ICA-guided interventions were performed in patients at admission, and DL-FFRCT was conducted retrospectively. The influences on decision-making by using DL-FFRCT and the clinical outcome were compared to those of ICA-guided care for symptomatic CAD at the 2-year follow-up evaluation. Result: Two hundred forty-three patients were evaluated. Up to 72% of diagnostic ICA studies could have been avoided by using a DL-FFRCT value > 0.8 as a cut-off for intervention. A similar major adverse cardiovascular event (MACE) rate was observed in patients who underwent revascularisation with a DL-FFRCT value ≤ 0.8 (2.9%) compared to that of ICA-guided interventions (3.3%) (stented lesions with ICA stenosis > 75%) (p = 0.838). Conclusion: DL-FFRCT can reduce the need for diagnostic coronary angiography when identifying patients suitable for coronary intervention. A low MACE rate was found in a 2-year follow-up investigation. Key Points: • Seventy-two percent of diagnostic ICA studies could have been avoided by using a DL-FFRCT value > 0.8 as a cut-off for intervention. • Coronary artery stenting based on the diagnosis by using a 320-detector row CT scanner and a positive DL-FFRCT value could potentially be associated with a lower occurrence rate of major adverse cardiovascular events (2.9%) within the first 2 years. • A low event rate was found when intervention was performed in tandem lesions with haemodynamic significance based on DL-FFRCT < 0.8 as a cut-off value.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multitask Learning for Estimating Multitype Cardiac Indices in MRI and CT Based on Adversarial Reverse Mapping.\n \n \n \n \n\n\n \n Yu, C.; Gao, Z.; Zhang, W.; Yang, G.; Zhao, S.; Zhang, H.; Zhang, Y.; and Li, S.\n\n\n \n\n\n\n IEEE Transactions on Neural Networks and Learning Systems, 32(2): 493-506. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"MultitaskPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Multitask Learning for Estimating Multitype Cardiac Indices in MRI and CT Based on Adversarial Reverse Mapping},\n type = {article},\n year = {2021},\n keywords = {Adversarial training,multitask learning,multitype cardiac indices,reverse mapping},\n pages = {493-506},\n volume = {32},\n publisher = {IEEE},\n id = {2fc3d5eb-1495-3123-8514-ef83b76673a1},\n created = {2024-01-13T06:15:54.337Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.674Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {The estimation of multitype cardiac indices from cardiac magnetic resonance imaging (MRI) and computed tomography (CT) images attracts great attention because of its clinical potential for comprehensive function assessment. However, the most exiting model can only work in one imaging modality (MRI or CT) without transferable capability. In this article, we propose the multitask learning method with the reverse inferring for estimating multitype cardiac indices in MRI and CT. Different from the existing forward inferring methods, our method builds a reverse mapping network that maps the multitype cardiac indices to cardiac images. The task dependencies are then learned and shared to multitask learning networks using an adversarial training approach. Finally, we transfer the parameters learned from MRI to CT. A series of experiments were conducted in which we first optimized the performance of our framework via ten-fold cross-validation of over 2900 cardiac MRI images. Then, the fine-tuned network was run on an independent data set with 2360 cardiac CT images. The results of all the experiments conducted on the proposed adversarial reverse mapping show excellent performance in estimating multitype cardiac indices.},\n bibtype = {article},\n author = {Yu, Chengjin and Gao, Zhifan and Zhang, Weiwei and Yang, Guang and Zhao, Shu and Zhang, Heye and Zhang, Yanping and Li, Shuo},\n doi = {10.1109/TNNLS.2020.2984955},\n journal = {IEEE Transactions on Neural Networks and Learning Systems},\n number = {2}\n}
\n
\n\n\n
\n The estimation of multitype cardiac indices from cardiac magnetic resonance imaging (MRI) and computed tomography (CT) images attracts great attention because of its clinical potential for comprehensive function assessment. However, the most exiting model can only work in one imaging modality (MRI or CT) without transferable capability. In this article, we propose the multitask learning method with the reverse inferring for estimating multitype cardiac indices in MRI and CT. Different from the existing forward inferring methods, our method builds a reverse mapping network that maps the multitype cardiac indices to cardiac images. The task dependencies are then learned and shared to multitask learning networks using an adversarial training approach. Finally, we transfer the parameters learned from MRI to CT. A series of experiments were conducted in which we first optimized the performance of our framework via ten-fold cross-validation of over 2900 cardiac MRI images. Then, the fine-tuned network was run on an independent data set with 2360 cardiac CT images. The results of all the experiments conducted on the proposed adversarial reverse mapping show excellent performance in estimating multitype cardiac indices.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated multi-channel segmentation for the 4D myocardial velocity mapping cardiac MR.\n \n \n \n \n\n\n \n Wu, Y.; Hatipoglu, S.; Alonso-Álvarez, D.; Gatehouse, P.; Firmin, D.; Keegan, J.; and Yang, G.\n\n\n \n\n\n\n In Drukker, K.; and Mazurowski, M., A., editor(s), Medical Imaging 2021: Computer-Aided Diagnosis, pages 22, 2 2021. SPIE\n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n \n \"AutomatedWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Automated multi-channel segmentation for the 4D myocardial velocity mapping cardiac MR},\n type = {inproceedings},\n year = {2021},\n pages = {22},\n issue = {February},\n websites = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/11597/2580629/Automated-multi-channel-segmentation-for-the-4D-myocardial-velocity-mapping/10.1117/12.2580629.full},\n month = {2},\n publisher = {SPIE},\n day = {15},\n id = {e063c17f-76cd-3946-9963-5ea3b565b1fe},\n created = {2024-01-13T06:15:54.393Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:18.442Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Four-dimensional (4D) left ventricular myocardial velocity mapping (MVM) is a cardiac magnetic resonance (CMR) technique that allows assessment of cardiac motion in three orthogonal directions. Accurate and reproducible delineation of the myocardium is crucial for accurate analysis of peak systolic and diastolic myocardial velocities. In addition to the conventionally available magnitude CMR data, 4D MVM also acquires three velocity-encoded phase datasets which are used to generate velocity maps. These can be used to facilitate and improve myocardial delineation. Based on the success of deep learning in medical image processing, we propose a novel automated framework that improves the standard U-Net based methods on these CMR multi-channel data (magnitude and phase) by cross-channel fusion with attention module and shape information based post-processing to achieve accurate delineation of both epicardium and endocardium contours. To evaluate the results, we employ the widely used Dice scores and the quantification of myocardial longitudinal peak velocities. Our proposed network trained with multi-channel data shows enhanced performance compared to standard UNet based networks trained with single-channel data. Based on the results, our method provides compelling evidence for the design and application for the multi-channel image analysis of the 4D MVM CMR data.},\n bibtype = {inproceedings},\n author = {Wu, Yinzhe and Hatipoglu, Suzan and Alonso-Álvarez, Diego and Gatehouse, Peter and Firmin, David and Keegan, Jennifer and Yang, Guang},\n editor = {Drukker, Karen and Mazurowski, Maciej A.},\n doi = {10.1117/12.2580629},\n booktitle = {Medical Imaging 2021: Computer-Aided Diagnosis}\n}
\n
\n\n\n
\n Four-dimensional (4D) left ventricular myocardial velocity mapping (MVM) is a cardiac magnetic resonance (CMR) technique that allows assessment of cardiac motion in three orthogonal directions. Accurate and reproducible delineation of the myocardium is crucial for accurate analysis of peak systolic and diastolic myocardial velocities. In addition to the conventionally available magnitude CMR data, 4D MVM also acquires three velocity-encoded phase datasets which are used to generate velocity maps. These can be used to facilitate and improve myocardial delineation. Based on the success of deep learning in medical image processing, we propose a novel automated framework that improves the standard U-Net based methods on these CMR multi-channel data (magnitude and phase) by cross-channel fusion with attention module and shape information based post-processing to achieve accurate delineation of both epicardium and endocardium contours. To evaluate the results, we employ the widely used Dice scores and the quantification of myocardial longitudinal peak velocities. Our proposed network trained with multi-channel data shows enhanced performance compared to standard UNet based networks trained with single-channel data. Based on the results, our method provides compelling evidence for the design and application for the multi-channel image analysis of the 4D MVM CMR data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Texture-Based Deep Learning for Prostate Cancer Classification with Multiparametric MRI.\n \n \n \n \n\n\n \n Liu, Y.; Zheng, H.; Liang, Z.; Qi, M.; Brisbane, W.; Marks, L.; Raman, S.; Reiter, R.; Yang, G.; and Sung, K.\n\n\n \n\n\n\n In International Society for Magnetic Resonance in Medicine, pages 1-3, 2021. \n \n\n\n\n
\n\n\n\n \n \n \"Texture-BasedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Texture-Based Deep Learning for Prostate Cancer Classification with Multiparametric MRI},\n type = {inproceedings},\n year = {2021},\n keywords = {Magnetic resonance imaging,Magnetic resonance spectroscopy,Prostate cancer},\n pages = {1-3},\n id = {c3476c88-4ec4-3f15-a656-a636d908f997},\n created = {2024-01-13T06:15:54.538Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T11:51:49.893Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Liu, Yongkai and Zheng, Haoxin and Liang, Zhengrong and Qi, Miao and Brisbane, Wayne and Marks, Leonard and Raman, Steven and Reiter, Robert and Yang, Guang and Sung, Kyunghyun},\n booktitle = {International Society for Magnetic Resonance in Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Industrial Cyber-Physical Systems-Based Cloud IoT Edge for Federated Heterogeneous Distillation.\n \n \n \n \n\n\n \n Wang, C.; Yang, G.; Papanastasiou, G.; Zhang, H.; Rodrigues, J., J.; and De Albuquerque, V., H., C.\n\n\n \n\n\n\n IEEE Transactions on Industrial Informatics, 17(8): 5511-5521. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"IndustrialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Industrial Cyber-Physical Systems-Based Cloud IoT Edge for Federated Heterogeneous Distillation},\n type = {article},\n year = {2021},\n keywords = {Deep learning,Internet of Things (IoT),heterogeneous classifiers,knowledge distillation (KD),online learning},\n pages = {5511-5521},\n volume = {17},\n id = {066bd4ba-c52f-3f0d-8d2d-5fa52a2d2443},\n created = {2024-01-13T06:15:54.590Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.745Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Deep convoloutional networks have been widely deployed in modern cyber-physical systems performing different visual classification tasks. As the fog and edge devices have different computing capacity and perform different subtasks, models trained for one device may not be deployable on another. Knowledge distillation technique can effectively compress well trained convolutional neural networks into light-weight models suitable to different devices. However, due to privacy issue and transmission cost, manually annotated data for training the deep learning models are usually gradually collected and archived in different sites. Simply training a model on powerful cloud servers and compressing them for particular edge devices failed to use the distributed data stored at different sites. This offline training approach is also inefficient to deal with new data collected from the edge devices. To overcome these obstacles, in this article, we propose the heterogeneous brain storming (HBS) method for object recognition tasks in real-world Internet of Things (IoT) scenarios. Our method enables flexible bidirectional federated learning of heterogeneous models trained on distributed datasets with a new 'brain storming' mechanism and optimizable temperature parameters. In our comparison experiments, this HBS method outperformed multiple state-of-the-art single-model compression methods, as well as the newest multinetwork knowledge distillation methods with both homogeneous and heterogeneous classifiers. The ablation experiment results proved that the trainable temperature parameter into the conventional knowledge distillation loss can effectively ease the learning process of student networks in different methods. To the best of authors' knowledge, this is the first IoT-oriented method that allows asynchronous bidirectional heterogeneous knowledge distillation in deep networks.},\n bibtype = {article},\n author = {Wang, Chengjia and Yang, Guang and Papanastasiou, Giorgos and Zhang, Heye and Rodrigues, Joel J.P.C. and De Albuquerque, Victor Hugo C.},\n doi = {10.1109/TII.2020.3007407},\n journal = {IEEE Transactions on Industrial Informatics},\n number = {8}\n}
\n
\n\n\n
\n Deep convoloutional networks have been widely deployed in modern cyber-physical systems performing different visual classification tasks. As the fog and edge devices have different computing capacity and perform different subtasks, models trained for one device may not be deployable on another. Knowledge distillation technique can effectively compress well trained convolutional neural networks into light-weight models suitable to different devices. However, due to privacy issue and transmission cost, manually annotated data for training the deep learning models are usually gradually collected and archived in different sites. Simply training a model on powerful cloud servers and compressing them for particular edge devices failed to use the distributed data stored at different sites. This offline training approach is also inefficient to deal with new data collected from the edge devices. To overcome these obstacles, in this article, we propose the heterogeneous brain storming (HBS) method for object recognition tasks in real-world Internet of Things (IoT) scenarios. Our method enables flexible bidirectional federated learning of heterogeneous models trained on distributed datasets with a new 'brain storming' mechanism and optimizable temperature parameters. In our comparison experiments, this HBS method outperformed multiple state-of-the-art single-model compression methods, as well as the newest multinetwork knowledge distillation methods with both homogeneous and heterogeneous classifiers. The ablation experiment results proved that the trainable temperature parameter into the conventional knowledge distillation loss can effectively ease the learning process of student networks in different methods. To the best of authors' knowledge, this is the first IoT-oriented method that allows asynchronous bidirectional heterogeneous knowledge distillation in deep networks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Common pitfalls and recommendations for using machine learning to detect and prognosticate for COVID-19 using chest radiographs and CT scans.\n \n \n \n \n\n\n \n Roberts, M.; Driggs, D.; Thorpe, M.; Gilbey, J.; Yeung, M.; Ursprung, S.; Aviles-Rivero, A., I.; Etmann, C.; McCague, C.; Beer, L.; Weir-McCall, J., R.; Teng, Z.; Gkrania-Klotsas, E.; Ruggiero, A.; Korhonen, A.; Jefferson, E.; Ako, E.; Langs, G.; Gozaliasl, G.; Yang, G.; Prosch, H.; Preller, J.; Stanczuk, J.; Tang, J.; Hofmanninger, J.; Babar, J.; Sánchez, L., E.; Thillai, M.; Gonzalez, P., M.; Teare, P.; Zhu, X.; Patel, M.; Cafolla, C.; Azadbakht, H.; Jacob, J.; Lowe, J.; Zhang, K.; Bradley, K.; Wassin, M.; Holzer, M.; Ji, K.; Ortet, M., D.; Ai, T.; Walton, N.; Lio, P.; Stranks, S.; Shadbahr, T.; Lin, W.; Zha, Y.; Niu, Z.; Rudd, J., H.; Sala, E.; and Schönlieb, C., B.\n\n\n \n\n\n\n Nature Machine Intelligence, 3(3): 199-217. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"CommonPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Common pitfalls and recommendations for using machine learning to detect and prognosticate for COVID-19 using chest radiographs and CT scans},\n type = {article},\n year = {2021},\n pages = {199-217},\n volume = {3},\n id = {1e7f9170-24be-3892-9c28-30a22304c004},\n created = {2024-01-13T06:15:54.683Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.262Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Machine learning methods offer great promise for fast and accurate detection and prognostication of coronavirus disease 2019 (COVID-19) from standard-of-care chest radiographs (CXR) and chest computed tomography (CT) images. Many articles have been published in 2020 describing new machine learning-based models for both of these tasks, but it is unclear which are of potential clinical utility. In this systematic review, we consider all published papers and preprints, for the period from 1 January 2020 to 3 October 2020, which describe new machine learning models for the diagnosis or prognosis of COVID-19 from CXR or CT images. All manuscripts uploaded to bioRxiv, medRxiv and arXiv along with all entries in EMBASE and MEDLINE in this timeframe are considered. Our search identified 2,212 studies, of which 415 were included after initial screening and, after quality screening, 62 studies were included in this systematic review. Our review finds that none of the models identified are of potential clinical use due to methodological flaws and/or underlying biases. This is a major weakness, given the urgency with which validated COVID-19 models are needed. To address this, we give many recommendations which, if followed, will solve these issues and lead to higher-quality model development and well-documented manuscripts.},\n bibtype = {article},\n author = {Roberts, Michael and Driggs, Derek and Thorpe, Matthew and Gilbey, Julian and Yeung, Michael and Ursprung, Stephan and Aviles-Rivero, Angelica I. and Etmann, Christian and McCague, Cathal and Beer, Lucian and Weir-McCall, Jonathan R. and Teng, Zhongzhao and Gkrania-Klotsas, Effrossyni and Ruggiero, Alessandro and Korhonen, Anna and Jefferson, Emily and Ako, Emmanuel and Langs, Georg and Gozaliasl, Ghassem and Yang, Guang and Prosch, Helmut and Preller, Jacobus and Stanczuk, Jan and Tang, Jing and Hofmanninger, Johannes and Babar, Judith and Sánchez, Lorena Escudero and Thillai, Muhunthan and Gonzalez, Paula Martin and Teare, Philip and Zhu, Xiaoxiang and Patel, Mishal and Cafolla, Conor and Azadbakht, Hojjat and Jacob, Joseph and Lowe, Josh and Zhang, Kang and Bradley, Kyle and Wassin, Marcel and Holzer, Markus and Ji, Kangyu and Ortet, Maria Delgado and Ai, Tao and Walton, Nicholas and Lio, Pietro and Stranks, Samuel and Shadbahr, Tolou and Lin, Weizhe and Zha, Yunfei and Niu, Zhangming and Rudd, James H.F. and Sala, Evis and Schönlieb, Carola Bibiane},\n doi = {10.1038/s42256-021-00307-0},\n journal = {Nature Machine Intelligence},\n number = {3}\n}
\n
\n\n\n
\n Machine learning methods offer great promise for fast and accurate detection and prognostication of coronavirus disease 2019 (COVID-19) from standard-of-care chest radiographs (CXR) and chest computed tomography (CT) images. Many articles have been published in 2020 describing new machine learning-based models for both of these tasks, but it is unclear which are of potential clinical utility. In this systematic review, we consider all published papers and preprints, for the period from 1 January 2020 to 3 October 2020, which describe new machine learning models for the diagnosis or prognosis of COVID-19 from CXR or CT images. All manuscripts uploaded to bioRxiv, medRxiv and arXiv along with all entries in EMBASE and MEDLINE in this timeframe are considered. Our search identified 2,212 studies, of which 415 were included after initial screening and, after quality screening, 62 studies were included in this systematic review. Our review finds that none of the models identified are of potential clinical use due to methodological flaws and/or underlying biases. This is a major weakness, given the urgency with which validated COVID-19 models are needed. To address this, we give many recommendations which, if followed, will solve these issues and lead to higher-quality model development and well-documented manuscripts.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multiparameter Synchronous Measurement with IVUS Images for Intelligently Diagnosing Coronary Cardiac Disease.\n \n \n \n \n\n\n \n Cao, Y.; Wang, Z.; Liu, Z.; Li, Y.; Xiao, X.; Sun, L.; Zhang, Y.; Hou, H.; Zhang, P.; and Yang, G.\n\n\n \n\n\n\n IEEE Transactions on Instrumentation and Measurement, 70: 1-10. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"MultiparameterPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Multiparameter Synchronous Measurement with IVUS Images for Intelligently Diagnosing Coronary Cardiac Disease},\n type = {article},\n year = {2021},\n keywords = {Bland-Altman plot,Deeplab v3+,intravenous ultrasound,multiparameter measurement,vascular wall segmentation},\n pages = {1-10},\n volume = {70},\n publisher = {IEEE},\n id = {c4245c8f-2e95-3dcb-9bd0-bb4d7194e0f8},\n created = {2024-01-13T06:15:54.750Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.762Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Intravascular ultrasound (IVUS) can provide high-resolution cross-sectional images of coronary arteries, showing detailed information of the vascular lumen, tube wall, and athermanous plaques, which is helpful for the discovery or identification of early coronary atherosclerotic plaques. Multiple parameters extractable from the IVUS image can help the cardiologist analyze the pathology and assist in disease diagnosis and postoperative treatment. Typically, cardiologists manually label the intima and adventitia in the IVUS image, and obtain a limited number of parameters through the IVUS instrument, which is time consuming and labor-intensive. To assist the cardiologist in automatically obtaining more clinically relevant value parameters, a fully automatic IVUS multiparameter extraction framework is proposed. Based on the intima and adventitia obtained by DeepLab V3+, we propose a targeted noise reduction preprocessing framework adapted to IVUS. The framework implements the basic parameter extraction of IVUS through two newly proposed algorithms. And through the standard medical formula, the basic parameters are converted into 10 standard medical indicators. Standardized medical indicators are obtained by clinically relevant basic parameters. In terms of accuracy, this article used a clinical database obtained from Qilu Hospital of Shandong University and compared the results of the framework with the gold standard of cardiologists. The relative error of continuous IVUS main parameters between frames did not exceed 10.10%. The relative error of independent IVUS did not exceed 10.03%. Based on the distribution consistency of the parameters and the gold standard, a Bland-Altman plot of the parameters is proposed. It was verified that this distribution is basically in line with the gold standard of cardiologists. The algorithm in this article obtained a total of 10 parameters, far exceeding the parameters obtained by cardiologists and traditional IVUS machines. Its accuracy and speed can also meet the requirements of cardiologists for clinical diagnosis.},\n bibtype = {article},\n author = {Cao, Yankun and Wang, Ziqiao and Liu, Zhi and Li, Yujun and Xiao, Xiaoyan and Sun, Longkun and Zhang, Yang and Hou, Haixia and Zhang, Pengfei and Yang, Guang},\n doi = {10.1109/TIM.2020.3036067},\n journal = {IEEE Transactions on Instrumentation and Measurement}\n}
\n
\n\n\n
\n Intravascular ultrasound (IVUS) can provide high-resolution cross-sectional images of coronary arteries, showing detailed information of the vascular lumen, tube wall, and athermanous plaques, which is helpful for the discovery or identification of early coronary atherosclerotic plaques. Multiple parameters extractable from the IVUS image can help the cardiologist analyze the pathology and assist in disease diagnosis and postoperative treatment. Typically, cardiologists manually label the intima and adventitia in the IVUS image, and obtain a limited number of parameters through the IVUS instrument, which is time consuming and labor-intensive. To assist the cardiologist in automatically obtaining more clinically relevant value parameters, a fully automatic IVUS multiparameter extraction framework is proposed. Based on the intima and adventitia obtained by DeepLab V3+, we propose a targeted noise reduction preprocessing framework adapted to IVUS. The framework implements the basic parameter extraction of IVUS through two newly proposed algorithms. And through the standard medical formula, the basic parameters are converted into 10 standard medical indicators. Standardized medical indicators are obtained by clinically relevant basic parameters. In terms of accuracy, this article used a clinical database obtained from Qilu Hospital of Shandong University and compared the results of the framework with the gold standard of cardiologists. The relative error of continuous IVUS main parameters between frames did not exceed 10.10%. The relative error of independent IVUS did not exceed 10.03%. Based on the distribution consistency of the parameters and the gold standard, a Bland-Altman plot of the parameters is proposed. It was verified that this distribution is basically in line with the gold standard of cardiologists. The algorithm in this article obtained a total of 10 parameters, far exceeding the parameters obtained by cardiologists and traditional IVUS machines. Its accuracy and speed can also meet the requirements of cardiologists for clinical diagnosis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Deep Multi-Task Learning Framework for Brain Tumor Segmentation.\n \n \n \n \n\n\n \n Huang, H.; Yang, G.; Zhang, W.; Xu, X.; Yang, W.; Jiang, W.; and Lai, X.\n\n\n \n\n\n\n Frontiers in Oncology, 11(June): 1-16. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {A Deep Multi-Task Learning Framework for Brain Tumor Segmentation},\n type = {article},\n year = {2021},\n keywords = {automatic segmentation,automatic segmentation, brain tumor, deep multi-ta,brain tumor,deep multi-task learning framework,magnetic resonance imaging,multi-depth fusion module},\n pages = {1-16},\n volume = {11},\n id = {c9b1feaf-3015-35f6-954e-809c42f34426},\n created = {2024-01-13T06:15:54.952Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.253Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Glioma is the most common primary central nervous system tumor, accounting for about half of all intracranial primary tumors. As a non-invasive examination method, MRI has an extremely important guiding role in the clinical intervention of tumors. However, manually segmenting brain tumors from MRI requires a lot of time and energy for doctors, which affects the implementation of follow-up diagnosis and treatment plans. With the development of deep learning, medical image segmentation is gradually automated. However, brain tumors are easily confused with strokes and serious imbalances between classes make brain tumor segmentation one of the most difficult tasks in MRI segmentation. In order to solve these problems, we propose a deep multi-task learning framework and integrate a multi-depth fusion module in the framework to accurately segment brain tumors. In this framework, we have added a distance transform decoder based on the V-Net, which can make the segmentation contour generated by the mask decoder more accurate and reduce the generation of rough boundaries. In order to combine the different tasks of the two decoders, we weighted and added their corresponding loss functions, where the distance map prediction regularized the mask prediction. At the same time, the multi-depth fusion module in the encoder can enhance the ability of the network to extract features. The accuracy of the model will be evaluated online using the multispectral MRI records of the BraTS 2018, BraTS 2019, and BraTS 2020 datasets. This method obtains high-quality segmentation results, and the average Dice is as high as 78%. The experimental results show that this model has great potential in segmenting brain tumors automatically and accurately.},\n bibtype = {article},\n author = {Huang, He and Yang, Guang and Zhang, Wenbo and Xu, Xiaomei and Yang, Weiji and Jiang, Weiwei and Lai, Xiaobo},\n doi = {10.3389/fonc.2021.690244},\n journal = {Frontiers in Oncology},\n number = {June}\n}
\n
\n\n\n
\n Glioma is the most common primary central nervous system tumor, accounting for about half of all intracranial primary tumors. As a non-invasive examination method, MRI has an extremely important guiding role in the clinical intervention of tumors. However, manually segmenting brain tumors from MRI requires a lot of time and energy for doctors, which affects the implementation of follow-up diagnosis and treatment plans. With the development of deep learning, medical image segmentation is gradually automated. However, brain tumors are easily confused with strokes and serious imbalances between classes make brain tumor segmentation one of the most difficult tasks in MRI segmentation. In order to solve these problems, we propose a deep multi-task learning framework and integrate a multi-depth fusion module in the framework to accurately segment brain tumors. In this framework, we have added a distance transform decoder based on the V-Net, which can make the segmentation contour generated by the mask decoder more accurate and reduce the generation of rough boundaries. In order to combine the different tasks of the two decoders, we weighted and added their corresponding loss functions, where the distance map prediction regularized the mask prediction. At the same time, the multi-depth fusion module in the encoder can enhance the ability of the network to extract features. The accuracy of the model will be evaluated online using the multispectral MRI records of the BraTS 2018, BraTS 2019, and BraTS 2020 datasets. This method obtains high-quality segmentation results, and the average Dice is as high as 78%. The experimental results show that this model has great potential in segmenting brain tumors automatically and accurately.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Explainable AI for COVID-19 CT Classifiers: An initial comparison study.\n \n \n \n \n\n\n \n Ye, Q.; Xia, J.; and Yang, G.\n\n\n \n\n\n\n Proceedings - IEEE Symposium on Computer-Based Medical Systems, 2021-June: 521-526. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"ExplainablePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Explainable AI for COVID-19 CT Classifiers: An initial comparison study},\n type = {article},\n year = {2021},\n keywords = {COVID-19,CT,Classification,Deep Learning,Explainable AI},\n pages = {521-526},\n volume = {2021-June},\n id = {59515a2f-3fd7-3c31-afe7-589ed64f005a},\n created = {2024-01-13T06:15:54.953Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.831Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Artificial Intelligence (AI) has made leapfrogs in development across all the industrial sectors especially when deep learning has been introduced. Deep learning helps to learn the behaviour of an entity through methods of recognising and interpreting patterns. Despite its limitless potential, the mystery is how deep learning algorithms make a decision in the first place. Explainable AI (XAI) is the key to unlocking AI and the black-box for deep learning. XAI is an AI model that is programmed to explain its goals, logic, and decision making so that the end users can understand. The end users can be domain experts, regulatory agencies, managers and executive board members, data scientists, users that use AI, with or without awareness, or someone who is affected by the decisions of an AI model. Chest CT has emerged as a valuable tool for the clinical diagnostic and treatment management of the lung diseases associated with COVID-19. AI can support rapid evaluation of CT scans to differentiate COVID-19 findings from other lung diseases. However, how these AI tools or deep learning algorithms reach such a decision and which are the most influential features derived from these neural networks with typically deep layers are not clear. The aim of this study is to propose and develop XAI strategies for COVID-19 classification models with an investigation of comparison. The results demonstrate promising quantification and qualitative visualisations that can further enhance the clinician's understanding and decision making with more granular information from the results given by the learned XAI models.},\n bibtype = {article},\n author = {Ye, Qinghao and Xia, Jun and Yang, Guang},\n doi = {10.1109/CBMS52027.2021.00103},\n journal = {Proceedings - IEEE Symposium on Computer-Based Medical Systems}\n}
\n
\n\n\n
\n Artificial Intelligence (AI) has made leapfrogs in development across all the industrial sectors especially when deep learning has been introduced. Deep learning helps to learn the behaviour of an entity through methods of recognising and interpreting patterns. Despite its limitless potential, the mystery is how deep learning algorithms make a decision in the first place. Explainable AI (XAI) is the key to unlocking AI and the black-box for deep learning. XAI is an AI model that is programmed to explain its goals, logic, and decision making so that the end users can understand. The end users can be domain experts, regulatory agencies, managers and executive board members, data scientists, users that use AI, with or without awareness, or someone who is affected by the decisions of an AI model. Chest CT has emerged as a valuable tool for the clinical diagnostic and treatment management of the lung diseases associated with COVID-19. AI can support rapid evaluation of CT scans to differentiate COVID-19 findings from other lung diseases. However, how these AI tools or deep learning algorithms reach such a decision and which are the most influential features derived from these neural networks with typically deep layers are not clear. The aim of this study is to propose and develop XAI strategies for COVID-19 classification models with an investigation of comparison. The results demonstrate promising quantification and qualitative visualisations that can further enhance the clinician's understanding and decision making with more granular information from the results given by the learned XAI models.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Can Clinical Symptoms and Laboratory Results Predict CT Abnormality? Initial Findings Using Novel Machine Learning Techniques in Children With COVID-19 Infections.\n \n \n \n \n\n\n \n Ma, H.; Ye, Q.; Ding, W.; Jiang, Y.; Wang, M.; Niu, Z.; Zhou, X.; Gao, Y.; Wang, C.; Menpes-Smith, W.; Fang, E., F.; Shao, J.; Xia, J.; and Yang, G.\n\n\n \n\n\n\n Frontiers in Medicine, 8(June): 1-10. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"CanPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Can Clinical Symptoms and Laboratory Results Predict CT Abnormality? Initial Findings Using Novel Machine Learning Techniques in Children With COVID-19 Infections},\n type = {article},\n year = {2021},\n keywords = {COVID-19, decision trees, machine learning, RT-PCR,RT-PCR—polymerase chain reaction with reverse tran,artificial intelligence,decision trees,machine learning,pediatric},\n pages = {1-10},\n volume = {8},\n id = {88a3d812-c399-3ac6-8dcf-93d1c02115c0},\n created = {2024-01-13T06:15:55.036Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.271Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {The rapid spread of coronavirus 2019 disease (COVID-19) has manifested a global public health crisis, and chest CT has been proven to be a powerful tool for screening, triage, evaluation and prognosis in COVID-19 patients. However, CT is not only costly but also associated with an increased incidence of cancer, in particular for children. This study will question whether clinical symptoms and laboratory results can predict the CT outcomes for the pediatric patients with positive RT-PCR testing results in order to determine the necessity of CT for such a vulnerable group. Clinical data were collected from 244 consecutive pediatric patients (16 years of age and under) treated at Wuhan Children's Hospital with positive RT-PCR testing, and the chest CT were performed within 3 days of clinical data collection, from January 21 to March 8, 2020. This study was approved by the local ethics committee of Wuhan Children's Hospital. Advanced decision tree based machine learning models were developed for the prediction of CT outcomes. Results have shown that age, lymphocyte, neutrophils, ferritin and C-reactive protein are the most related clinical indicators for predicting CT outcomes for pediatric patients with positive RT-PCR testing. Our decision support system has managed to achieve an AUC of 0.84 with 0.82 accuracy and 0.84 sensitivity for predicting CT outcomes. Our model can effectively predict CT outcomes, and our findings have indicated that the use of CT should be reconsidered for pediatric patients, as it may not be indispensable.},\n bibtype = {article},\n author = {Ma, Huijing and Ye, Qinghao and Ding, Weiping and Jiang, Yinghui and Wang, Minhao and Niu, Zhangming and Zhou, Xi and Gao, Yuan and Wang, Chengjia and Menpes-Smith, Wade and Fang, Evandro Fei and Shao, Jianbo and Xia, Jun and Yang, Guang},\n doi = {10.3389/fmed.2021.699984},\n journal = {Frontiers in Medicine},\n number = {June}\n}
\n
\n\n\n
\n The rapid spread of coronavirus 2019 disease (COVID-19) has manifested a global public health crisis, and chest CT has been proven to be a powerful tool for screening, triage, evaluation and prognosis in COVID-19 patients. However, CT is not only costly but also associated with an increased incidence of cancer, in particular for children. This study will question whether clinical symptoms and laboratory results can predict the CT outcomes for the pediatric patients with positive RT-PCR testing results in order to determine the necessity of CT for such a vulnerable group. Clinical data were collected from 244 consecutive pediatric patients (16 years of age and under) treated at Wuhan Children's Hospital with positive RT-PCR testing, and the chest CT were performed within 3 days of clinical data collection, from January 21 to March 8, 2020. This study was approved by the local ethics committee of Wuhan Children's Hospital. Advanced decision tree based machine learning models were developed for the prediction of CT outcomes. Results have shown that age, lymphocyte, neutrophils, ferritin and C-reactive protein are the most related clinical indicators for predicting CT outcomes for pediatric patients with positive RT-PCR testing. Our decision support system has managed to achieve an AUC of 0.84 with 0.82 accuracy and 0.84 sensitivity for predicting CT outcomes. Our model can effectively predict CT outcomes, and our findings have indicated that the use of CT should be reconsidered for pediatric patients, as it may not be indispensable.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Machine learning for covid-19 diagnosis and prognostication: Lessons for amplifying the signal while reducing the noise.\n \n \n \n \n\n\n \n Driggs, D.; Selby, I.; Roberts, M.; Gkrania-Klotsas, E.; Rudd, J., H.; Yang, G.; Babar, J.; Sala, E.; and Schönlieb, C., B.\n\n\n \n\n\n\n Radiology: Artificial Intelligence, 3(4). 2021.\n \n\n\n\n
\n\n\n\n \n \n \"MachinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Machine learning for covid-19 diagnosis and prognostication: Lessons for amplifying the signal while reducing the noise},\n type = {article},\n year = {2021},\n volume = {3},\n id = {0c2cd610-d680-37d2-912f-b7a68548b22a},\n created = {2024-01-13T06:15:55.130Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.363Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {article},\n author = {Driggs, Derek and Selby, Ian and Roberts, Michael and Gkrania-Klotsas, Effrossyni and Rudd, James H.F. and Yang, Guang and Babar, Judith and Sala, Evis and Schönlieb, Carola Bibiane},\n doi = {10.1148/ryai.2021210011},\n journal = {Radiology: Artificial Intelligence},\n number = {4}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Recent advances in artificial intelligence for cardiac imaging.\n \n \n \n \n\n\n \n Yang, G.; Zhang, H.; Firmin, D.; and Li, S.\n\n\n \n\n\n\n Computerized Medical Imaging and Graphics, 90. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"RecentPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Recent advances in artificial intelligence for cardiac imaging},\n type = {article},\n year = {2021},\n volume = {90},\n id = {f02789e4-8b2d-3afa-96ff-3ae73aa9888f},\n created = {2024-01-13T06:15:55.173Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.349Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {article},\n author = {Yang, Guang and Zhang, Heye and Firmin, David and Li, Shuo},\n doi = {10.1016/j.compmedimag.2021.101928},\n journal = {Computerized Medical Imaging and Graphics}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Three-Dimensional Embedded Attentive RNN (3D-EAR) Segmentor for Left Ventricle Delineation from Myocardial Velocity Mapping.\n \n \n \n \n\n\n \n Kuang, M.; Wu, Y.; Alonso-Álvarez, D.; Firmin, D.; Keegan, J.; Gatehouse, P.; and Yang, G.\n\n\n \n\n\n\n Volume 12738 . Functional Imaging and Modeling of the Heart. FIMH 2021. Lecture Notes in Computer Science, pages 55-62. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"FunctionalPaper\n  \n \n \n \"FunctionalWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2021},\n pages = {55-62},\n volume = {12738},\n websites = {https://link.springer.com/10.1007/978-3-030-78710-3_6},\n id = {b1774d45-d9a2-3c0d-bf69-7aace471ae6e},\n created = {2024-01-13T06:15:55.229Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T11:51:49.797Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {inbook},\n author = {Kuang, Mengmeng and Wu, Yinzhe and Alonso-Álvarez, Diego and Firmin, David and Keegan, Jennifer and Gatehouse, Peter and Yang, Guang},\n doi = {10.1007/978-3-030-78710-3_6},\n chapter = {Three-Dimensional Embedded Attentive RNN (3D-EAR) Segmentor for Left Ventricle Delineation from Myocardial Velocity Mapping},\n title = {Functional Imaging and Modeling of the Heart. FIMH 2021. Lecture Notes in Computer Science}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Association between right ventricular strain and outcomes in patients with dilated cardiomyopathy.\n \n \n \n \n\n\n \n Liu, T.; Gao, Y.; Wang, H.; Zhou, Z.; Wang, R.; Chang, S., S.; Liu, Y.; Sun, Y.; Rui, H.; Yang, G.; Firmin, D.; Dong, J.; and Xu, L.\n\n\n \n\n\n\n Heart, 107(15): 1233-1239. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"AssociationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Association between right ventricular strain and outcomes in patients with dilated cardiomyopathy},\n type = {article},\n year = {2021},\n keywords = {advanced cardiac imaging,cardiac imaging and diagnostics,cardiac magnetic resonance (CMR) imaging,heart failure,myocardial disease},\n pages = {1233-1239},\n volume = {107},\n id = {73fb66a0-8a64-3550-9cf9-07b832f4fbb0},\n created = {2024-01-13T06:15:56.165Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:24:52.921Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Objective To explore the association between three-dimensional (3D) cardiac magnetic resonance (CMR) feature tracking (FT) right ventricular peak global longitudinal strain (RVpGLS) and major adverse cardiovascular events (MACEs) in patients with stage C or D heart failure (HF) with non-ischaemic dilated cardiomyopathy (NIDCM) but without atrial fibrillation (AF). Methods Patients with dilated cardiomyopathy were enrolled in this prospective cohort study. Comprehensive clinical and biochemical analysis and CMR imaging were performed. All patients were followed up for MACEs. Results A total of 192 patients (age 53±14 years) were eligible for this study. A combination of cardiovascular death and cardiac transplantation occurred in 18 subjects during the median follow-up of 567 (311, 920) days. Brain natriuretic peptide, creatinine, left ventricular (LV) end-diastolic volume, LV end-systolic volume, right ventricular (RV) end-diastolic volume and RVpGLS from CMR were associated with the outcomes. The multivariate Cox regression model adjusting for traditional risk factors and CMR variables detected a significant association between RVpGLS and MACEs in patients with stage C or D HF with NIDCM without AF. Kaplan-Meier analysis based on RVpGLS cut-off value revealed that patients with RVpGLS <-8.5% showed more favourable clinical outcomes than those with RVpGLS ≥-8.5% (p=0.0037). Subanalysis found that this association remained unchanged. Conclusions RVpGLS-derived from 3D CMR FT is associated with a significant prognostic impact in patients with NIDCM with stage C or D HF and without AF.},\n bibtype = {article},\n author = {Liu, Tong and Gao, Yifeng and Wang, Hui and Zhou, Zhen and Wang, Rui and Chang, San Shuai and Liu, Yuanyuan and Sun, Yuqing and Rui, Hongliang and Yang, Guang and Firmin, David and Dong, Jianzeng and Xu, Lei},\n doi = {10.1136/heartjnl-2020-317949},\n journal = {Heart},\n number = {15}\n}
\n
\n\n\n
\n Objective To explore the association between three-dimensional (3D) cardiac magnetic resonance (CMR) feature tracking (FT) right ventricular peak global longitudinal strain (RVpGLS) and major adverse cardiovascular events (MACEs) in patients with stage C or D heart failure (HF) with non-ischaemic dilated cardiomyopathy (NIDCM) but without atrial fibrillation (AF). Methods Patients with dilated cardiomyopathy were enrolled in this prospective cohort study. Comprehensive clinical and biochemical analysis and CMR imaging were performed. All patients were followed up for MACEs. Results A total of 192 patients (age 53±14 years) were eligible for this study. A combination of cardiovascular death and cardiac transplantation occurred in 18 subjects during the median follow-up of 567 (311, 920) days. Brain natriuretic peptide, creatinine, left ventricular (LV) end-diastolic volume, LV end-systolic volume, right ventricular (RV) end-diastolic volume and RVpGLS from CMR were associated with the outcomes. The multivariate Cox regression model adjusting for traditional risk factors and CMR variables detected a significant association between RVpGLS and MACEs in patients with stage C or D HF with NIDCM without AF. Kaplan-Meier analysis based on RVpGLS cut-off value revealed that patients with RVpGLS <-8.5% showed more favourable clinical outcomes than those with RVpGLS ≥-8.5% (p=0.0037). Subanalysis found that this association remained unchanged. Conclusions RVpGLS-derived from 3D CMR FT is associated with a significant prognostic impact in patients with NIDCM with stage C or D HF and without AF.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A mathematical model for predicting intracranial pressure based on noninvasively acquired PC-MRI parameters in communicating hydrocephalus.\n \n \n \n \n\n\n \n Long, J.; Sun, D.; Zhou, X.; Huang, X.; Hu, J.; Xia, J.; and Yang, G.\n\n\n \n\n\n\n Journal of Clinical Monitoring and Computing, 35(6): 1325-1332. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n \n \"AWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {A mathematical model for predicting intracranial pressure based on noninvasively acquired PC-MRI parameters in communicating hydrocephalus},\n type = {article},\n year = {2021},\n keywords = {Cerebrospinal fluid parameters,Communicating hydrocephalus,Intracranial pressure,Levenberg–marquardt optimisation,Phase-contrast cine MRI},\n pages = {1325-1332},\n volume = {35},\n websites = {https://doi.org/10.1007/s10877-020-00598-5},\n publisher = {Springer Netherlands},\n id = {6f13e695-6b84-3c8b-84a5-c2737ebcee6e},\n created = {2024-01-13T06:15:56.172Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:24:51.158Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {To develop and validate a mathematical model for predicting intracranial pressure (ICP) noninvasively using phase-contrast cine MRI (PC-MRI). We performed a retrospective analysis of PC-MRI from patients with communicating hydrocephalus (n = 138). The patients were recruited from Shenzhen Second People’s Hospital between November 2017 and April 2020, and randomly allocated into training (n = 97) and independent validation (n = 41) groups. All participants underwent lumbar puncture and PC-MRI in order to evaluate ICP and cerebrospinal fluid (CSF) parameters (i.e., aqueduct diameter and flow velocity), respectively. A novel ICP-predicting model was then developed based on the nonlinear relationships between the CSF parameters, using the Levenberg–Marquardt and general global optimisation methods. There was no significant difference in baseline demographic characteristics between the training and independent validation groups. The accuracy of the model for predicting ICP was 0.899 in the training cohort (n = 97) and 0.861 in the independent validation cohort (n = 41). We obtained an ICP-predicting model that showed excellent performance in the noninvasive diagnosis of clinically significant communicating hydrocephalus.},\n bibtype = {article},\n author = {Long, Jia and Sun, Deshun and Zhou, Xi and Huang, Xianjian and Hu, Jiani and Xia, Jun and Yang, Guang},\n doi = {10.1007/s10877-020-00598-5},\n journal = {Journal of Clinical Monitoring and Computing},\n number = {6}\n}
\n
\n\n\n
\n To develop and validate a mathematical model for predicting intracranial pressure (ICP) noninvasively using phase-contrast cine MRI (PC-MRI). We performed a retrospective analysis of PC-MRI from patients with communicating hydrocephalus (n = 138). The patients were recruited from Shenzhen Second People’s Hospital between November 2017 and April 2020, and randomly allocated into training (n = 97) and independent validation (n = 41) groups. All participants underwent lumbar puncture and PC-MRI in order to evaluate ICP and cerebrospinal fluid (CSF) parameters (i.e., aqueduct diameter and flow velocity), respectively. A novel ICP-predicting model was then developed based on the nonlinear relationships between the CSF parameters, using the Levenberg–Marquardt and general global optimisation methods. There was no significant difference in baseline demographic characteristics between the training and independent validation groups. The accuracy of the model for predicting ICP was 0.899 in the training cohort (n = 97) and 0.861 in the independent validation cohort (n = 41). We obtained an ICP-predicting model that showed excellent performance in the noninvasive diagnosis of clinically significant communicating hydrocephalus.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Physiologically personalized coronary blood flow model to improve the estimation of non-invasive fractional flow reserve.\n \n \n \n \n\n\n \n Liu, X.; Xu, C.; Rao, S.; Zhang, Y.; Ghista, D.; Gao, Z.; and Yang, G.\n\n\n \n\n\n\n Medical Physics, 49(1): 583-597. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"PhysiologicallyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Physiologically personalized coronary blood flow model to improve the estimation of non-invasive fractional flow reserve},\n type = {article},\n year = {2021},\n pages = {583-597},\n volume = {49},\n id = {b4a66dec-eeb1-3e2c-b4a6-4f6128027dcf},\n created = {2024-01-13T08:14:16.710Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:17:10.657Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {liu_physiologically_2021},\n source_type = {article},\n private_publication = {false},\n abstract = {Purpose: Coronary outlet resistance is influenced by the quantification and distribution of resting coronary blood flow. It is crucial for a more physiologically accurate estimation of fractional flow reserve (FFR) derived from computed tomography angiography (CTA), referred to as FFRCT. This study presents a physiologically personalized (PP)-based coronary blood flow model involving the outlet boundary condition (BC) and a standardized outlet truncation strategy to estimate the outlet resistance and FFRCT. Methods: In this study, a total of 274 vessels were retrospectively collected from 221 patients who underwent coronary CTA and invasive FFR within 14 days. For FFRCT determination, we have employed a PP-based outlet BC model involving personalized physiological parameters and left ventricular mass (LVM) to quantify resting coronary blood flow. We evaluated the improvement achieved in the diagnostic performance of FFRCT by using the PP-based outlet BC model relative to the LVM-based model, with respect to the invasive FFR. Additionally, in order to evaluate the impact of the outlet truncation strategy on FFRCT, 68 vessels were randomly selected and analyzed independently by two operators, by using two different outlet truncation strategies at 1-month intervals. Results: The per-vessel diagnostic performance of the PP-based outlet BC model was improved, based on invasive FFR as reference, compared to the LVM-based model: (i) accuracy/sensitivity/specificity: 91.2%/90.4%/91.8% versus 86.5%/84.6%/87.6%, for the entire dataset of 274 vessels, (ii) accuracy/sensitivity/specificity: 88.7%/82.4%/90.4% versus 82.4%/ 76.5%/84.0%, for moderately stenosis lesions. The standardized outlet truncation strategy showed good repeatability with the Kappa coefficient of 0.908. Conclusions: It has been shown that our PP-based outlet BC model and standardized outlet truncation strategy can improve the diagnostic performance and repeatability of FFRCT.},\n bibtype = {article},\n author = {Liu, Xiujian and Xu, Chuangye and Rao, Simin and Zhang, Ye and Ghista, Dhanjoo and Gao, Zhifan and Yang, Guang},\n doi = {10.1002/mp.15363},\n journal = {Medical Physics},\n number = {1}\n}
\n
\n\n\n
\n Purpose: Coronary outlet resistance is influenced by the quantification and distribution of resting coronary blood flow. It is crucial for a more physiologically accurate estimation of fractional flow reserve (FFR) derived from computed tomography angiography (CTA), referred to as FFRCT. This study presents a physiologically personalized (PP)-based coronary blood flow model involving the outlet boundary condition (BC) and a standardized outlet truncation strategy to estimate the outlet resistance and FFRCT. Methods: In this study, a total of 274 vessels were retrospectively collected from 221 patients who underwent coronary CTA and invasive FFR within 14 days. For FFRCT determination, we have employed a PP-based outlet BC model involving personalized physiological parameters and left ventricular mass (LVM) to quantify resting coronary blood flow. We evaluated the improvement achieved in the diagnostic performance of FFRCT by using the PP-based outlet BC model relative to the LVM-based model, with respect to the invasive FFR. Additionally, in order to evaluate the impact of the outlet truncation strategy on FFRCT, 68 vessels were randomly selected and analyzed independently by two operators, by using two different outlet truncation strategies at 1-month intervals. Results: The per-vessel diagnostic performance of the PP-based outlet BC model was improved, based on invasive FFR as reference, compared to the LVM-based model: (i) accuracy/sensitivity/specificity: 91.2%/90.4%/91.8% versus 86.5%/84.6%/87.6%, for the entire dataset of 274 vessels, (ii) accuracy/sensitivity/specificity: 88.7%/82.4%/90.4% versus 82.4%/ 76.5%/84.0%, for moderately stenosis lesions. The standardized outlet truncation strategy showed good repeatability with the Kappa coefficient of 0.908. Conclusions: It has been shown that our PP-based outlet BC model and standardized outlet truncation strategy can improve the diagnostic performance and repeatability of FFRCT.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Incorporating Boundary Uncertainty into Loss Functions for Biomedical Image Segmentation.\n \n \n \n \n\n\n \n Yeung, M.; Yang, G.; Sala, E.; Schönlieb, C.; and Rundo, L.\n\n\n \n\n\n\n arXiv preprint arXiv:2111.00533. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"IncorporatingPaper\n  \n \n \n \"IncorporatingWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Incorporating Boundary Uncertainty into Loss Functions for Biomedical Image Segmentation},\n type = {article},\n year = {2021},\n websites = {http://arxiv.org/abs/2111.00533},\n id = {f5a635b8-5761-3d07-bcf2-04767148be9f},\n created = {2024-01-13T08:14:16.952Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:15:15.601Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n source_type = {GEN},\n private_publication = {false},\n abstract = {Manual segmentation is used as the gold-standard for evaluating neural networks on automated image segmentation tasks. Due to considerable heterogeneity in shapes, colours and textures, demarcating object boundaries is particularly difficult in biomedical images, resulting in significant inter and intra-rater variability. Approaches, such as soft labelling and distance penalty term, apply a global transformation to the ground truth, redefining the loss function with respect to uncertainty. However, global operations are computationally expensive, and neither approach accurately reflects the uncertainty underlying manual annotation. In this paper, we propose the Boundary Uncertainty, which uses morphological operations to restrict soft labelling to object boundaries, providing an appropriate representation of uncertainty in ground truth labels, and may be adapted to enable robust model training where systematic manual segmentation errors are present. We incorporate Boundary Uncertainty with the Dice loss, achieving consistently improved performance across three well-validated biomedical imaging datasets compared to soft labelling and distance-weighted penalty. Boundary Uncertainty not only more accurately reflects the segmentation process, but it is also efficient, robust to segmentation errors and exhibits better generalisation.},\n bibtype = {article},\n author = {Yeung, Michael and Yang, Guang and Sala, Evis and Schönlieb, Carola-Bibiane and Rundo, Leonardo},\n journal = {arXiv preprint arXiv:2111.00533}\n}
\n
\n\n\n
\n Manual segmentation is used as the gold-standard for evaluating neural networks on automated image segmentation tasks. Due to considerable heterogeneity in shapes, colours and textures, demarcating object boundaries is particularly difficult in biomedical images, resulting in significant inter and intra-rater variability. Approaches, such as soft labelling and distance penalty term, apply a global transformation to the ground truth, redefining the loss function with respect to uncertainty. However, global operations are computationally expensive, and neither approach accurately reflects the uncertainty underlying manual annotation. In this paper, we propose the Boundary Uncertainty, which uses morphological operations to restrict soft labelling to object boundaries, providing an appropriate representation of uncertainty in ground truth labels, and may be adapted to enable robust model training where systematic manual segmentation errors are present. We incorporate Boundary Uncertainty with the Dice loss, achieving consistently improved performance across three well-validated biomedical imaging datasets compared to soft labelling and distance-weighted penalty. Boundary Uncertainty not only more accurately reflects the segmentation process, but it is also efficient, robust to segmentation errors and exhibits better generalisation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Parallel Imaging Coupled Generative Adversarial Network for Accelerated Multi-Channel MRI Reconstruction.\n \n \n \n \n\n\n \n Lv, J.; Wang, C.; and Yang, G.\n\n\n \n\n\n\n In International Society for Magnetic Resonance in Medicine, 2021. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {A Parallel Imaging Coupled Generative Adversarial Network for Accelerated Multi-Channel MRI Reconstruction},\n type = {inproceedings},\n year = {2021},\n id = {af22d156-20b5-3f2d-8128-9bd0d47b5b30},\n created = {2024-01-13T08:14:17.096Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:16:47.380Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n source_type = {CONF},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Lv, Jun and Wang, Chengyan and Yang, Guang},\n booktitle = {International Society for Magnetic Resonance in Medicine}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (26)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n In-vivo cardiac diffusion weighted image registration aided by AI semantic segmentation.\n \n \n \n\n\n \n Ferreira, P.; Martı́n, R.; Khalique, Z.; Scott, A.; Yang, G.; Nielles-Vallespin, S.; Pennell, D.; and Firmin, D.\n\n\n \n\n\n\n In International Society for Magnetic Resonance in Medicine, 2020. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {In-vivo cardiac diffusion weighted image registration aided by AI semantic segmentation},\n type = {inproceedings},\n year = {2020},\n id = {f822577e-ef00-3615-a7a0-01c78a6ce574},\n created = {2024-01-13T05:46:21.056Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.709Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {ferreira_-vivo_2020},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Ferreira, Pedro and Martı́n, Raquel and Khalique, Zohya and Scott, Andrew and Yang, Guang and Nielles-Vallespin, Sonia and Pennell, Dudley and Firmin, David},\n booktitle = {International Society for Magnetic Resonance in Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automating the ABCD Rule for Melanoma Detection: A Survey.\n \n \n \n \n\n\n \n Ali, A.; Li, J.; and Yang, G.\n\n\n \n\n\n\n IEEE Access,83333-83346. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"AutomatingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Automating the ABCD Rule for Melanoma Detection: A Survey},\n type = {article},\n year = {2020},\n pages = {83333-83346},\n id = {e26fd347-353f-33c6-843f-fb46959dbdc3},\n created = {2024-01-13T05:46:21.280Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:18:11.884Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {ali_automating_2020-1},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Ali, Abder-Rahman and Li, Jingpeng and Yang, Guang},\n journal = {IEEE Access}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Novel Fuzzy Multilayer Perceptron (F-MLP) for the Detection of Irregularity in Skin Lesion Border Using Dermoscopic Images.\n \n \n \n \n\n\n \n Ali, A.; Li, J.; Kanwal, S.; Yang, G.; Hussain, A.; and Jane O'shea, S.\n\n\n \n\n\n\n Frontiers in Medicine. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {A Novel Fuzzy Multilayer Perceptron (F-MLP) for the Detection of Irregularity in Skin Lesion Border Using Dermoscopic Images},\n type = {article},\n year = {2020},\n id = {82b608f0-310b-3f6e-b297-009baba16357},\n created = {2024-01-13T05:46:21.423Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:18:20.402Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {ali_novel_2020-1},\n source_type = {article},\n notes = {Publisher: Frontiers},\n private_publication = {false},\n bibtype = {article},\n author = {Ali, Abder-Rahman and Li, Jingpeng and Kanwal, Summrina and Yang, Guang and Hussain, Amir and Jane O'shea, Sally},\n journal = {Frontiers in Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Systematic and Comprehensive Automated Ventricle Segmentation on Ventricle Images of the Elderly Patients: A Retrospective Study.\n \n \n \n \n\n\n \n Zhou, X.; Ye, Q.; Jiang, Y.; Wang, M.; Niu, Z.; Menpes-Smith, W.; Fang, E., F.; Liu, Z.; Xia, J.; and Yang, G.\n\n\n \n\n\n\n Frontiers in Aging Neuroscience. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"SystematicPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Systematic and Comprehensive Automated Ventricle Segmentation on Ventricle Images of the Elderly Patients: A Retrospective Study},\n type = {article},\n year = {2020},\n id = {6eb8de11-cb26-3eaa-95f8-9326b63152d0},\n created = {2024-01-13T05:46:21.581Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:18:54.179Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhou_systematic_2020-1},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Zhou, Xi and Ye, Qinghao and Jiang, Yinghui and Wang, Minhao and Niu, Zhangming and Menpes-Smith, Wade and Fang, Evandro Fei and Liu, Zhi and Xia, Jun and Yang, Guang},\n journal = {Frontiers in Aging Neuroscience}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep RetinaNet for Dynamic Left Ventricle Detection in Multiview Echocardiography Classification.\n \n \n \n \n\n\n \n Yang, M.; Xiao, X.; Liu, Z.; Sun, L.; Guo, W.; Cui, L.; Sun, D.; Zhang, P.; and Yang, G.\n\n\n \n\n\n\n Scientific Programming. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Deep RetinaNet for Dynamic Left Ventricle Detection in Multiview Echocardiography Classification},\n type = {article},\n year = {2020},\n id = {ce08b601-5684-309a-b4e4-e4579c38e0df},\n created = {2024-01-13T05:46:21.641Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:18:59.963Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_deep_2020-1},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Yang, Meijun and Xiao, Xiao-Yan and Liu, Zhi and Sun, Longkun and Guo, Wei and Cui, Lizhen and Sun, Dianmin and Zhang, Pengfei and Yang, Guang},\n journal = {Scientific Programming}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SARA-GAN: Self-Attention and Relative Average Discriminator Based Generative Adversarial Networks for Fast Compressed Sensing MRI Reconstruction.\n \n \n \n \n\n\n \n Yuan, Z.; Jiang, M.; Wang, Y.; Wei, B.; Li, Y.; Wang, P.; Menpes-Smith, W.; Niu, Z.; and Yang, G.\n\n\n \n\n\n\n Frontiers in Neuroinformatics. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"SARA-GAN:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {SARA-GAN: Self-Attention and Relative Average Discriminator Based Generative Adversarial Networks for Fast Compressed Sensing MRI Reconstruction},\n type = {article},\n year = {2020},\n id = {7fa164dc-abba-3328-aefb-10e4f2f5aa72},\n created = {2024-01-13T05:46:21.835Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:19:18.682Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yuan_sara-gan_2020},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Yuan, Zhenmou and Jiang, Mingfeng and Wang, Yaming and Wei, Bo and Li, Yongming and Wang, Pin and Menpes-Smith, Wade and Niu, Zhangming and Yang, Guang},\n journal = {Frontiers in Neuroinformatics}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparison Study of Radiomics and Deep Learning Based Methods for Thyroid Nodules Classification Using Ultrasound Images.\n \n \n \n \n\n\n \n Wang, Y.; Yue, W.; Li, X.; Liu, S.; Guo, L.; Xu, H.; Zhang, H.; and Yang, G.\n\n\n \n\n\n\n IEEE Access,52010-52017. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"ComparisonPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Comparison Study of Radiomics and Deep Learning Based Methods for Thyroid Nodules Classification Using Ultrasound Images},\n type = {article},\n year = {2020},\n pages = {52010-52017},\n id = {d2278f28-6638-3707-b7f9-7e173f659d9e},\n created = {2024-01-13T05:46:21.836Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:19:14.856Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {wang_comparison_2020},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Wang, Yongfeng and Yue, Wenwen and Li, Xiaolong and Liu, Shuyu and Guo, Lehang and Xu, Huixiong and Zhang, Heye and Yang, Guang},\n journal = {IEEE Access}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multimodal MRI to aid prediction of low-grade glioma growth characteristics.\n \n \n \n\n\n \n Howe, F.; Jones, T.; Rich, P.; Colman, J.; Yang, G.; Raschke, F.; Liang, V.; Denley, A.; and Barrick, T.\n\n\n \n\n\n\n In International Society for Magnetic Resonance in Medicine, 2020. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Multimodal MRI to aid prediction of low-grade glioma growth characteristics},\n type = {inproceedings},\n year = {2020},\n id = {3c05b816-21f9-3002-a5b6-455b5a3b0c9c},\n created = {2024-01-13T05:46:21.873Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.699Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {howe_multimodal_2020},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Howe, Franklyn and Jones, Timothy and Rich, Philip and Colman, Jordan and Yang, Guang and Raschke, Felix and Liang, Venus and Denley, Alex and Barrick, Thomas},\n booktitle = {International Society for Magnetic Resonance in Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MV-RAN: Multiview Recurrent Aggregation Network for Echocardiographic Sequences Segmentation and Full Cardiac Cycle Analysis.\n \n \n \n \n\n\n \n Li, M.; Wang, C.; Zhang, H.; and Yang, G.\n\n\n \n\n\n\n Computers in Biology and Medicine,103728. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"MV-RAN:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {MV-RAN: Multiview Recurrent Aggregation Network for Echocardiographic Sequences Segmentation and Full Cardiac Cycle Analysis},\n type = {article},\n year = {2020},\n pages = {103728},\n id = {dede1a4e-e69a-304d-8278-533290061f6e},\n created = {2024-01-13T05:46:21.958Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:19:30.823Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {li_mv-ran_2020},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Li, Ming and Wang, Chengjia and Zhang, Heye and Yang, Guang},\n journal = {Computers in Biology and Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring Uncertainty Measures in Bayesian Deep Attentive Neural Networks for Prostate Zonal Segmentation.\n \n \n \n \n\n\n \n Liu, Y.; Yang, G.; Hosseiny, M.; Azadikhah, A.; Afshari Mirak, S.; Miao, Q.; Raman, S.; and Sung, K.\n\n\n \n\n\n\n IEEE Access,151817-151828. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Exploring Uncertainty Measures in Bayesian Deep Attentive Neural Networks for Prostate Zonal Segmentation},\n type = {article},\n year = {2020},\n pages = {151817-151828},\n id = {2c7d8b72-f377-34fa-9082-f767b3d59023},\n created = {2024-01-13T05:46:22.110Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:20:20.051Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {liu_exploring_2020-1},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Liu, Yongkai and Yang, Guang and Hosseiny, Melina and Azadikhah, Afshin and Afshari Mirak, Sohrab and Miao, Qi and Raman, Steven and Sung, Kyunghyun},\n journal = {IEEE Access}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep Attentive Wasserstein Generative Adversarial Networks for MRI Reconstruction with Recurrent Context-Awareness.\n \n \n \n \n\n\n \n Guo, Y.; Wang, C.; Zhang, H.; and Yang, G.\n\n\n \n\n\n\n In Medical Image Computing and Computer Assisted Intervention (MICCAI 2020), 2020. \n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Deep Attentive Wasserstein Generative Adversarial Networks for MRI Reconstruction with Recurrent Context-Awareness},\n type = {inproceedings},\n year = {2020},\n id = {91702e8b-0fc7-321e-b2d2-77b445041052},\n created = {2024-01-13T05:46:22.425Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:20:23.927Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {guo_deep_2020},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Guo, Yifeng and Wang, Chengjia and Zhang, Heye and Yang, Guang},\n booktitle = {Medical Image Computing and Computer Assisted Intervention (MICCAI 2020)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Idiopathic Normal Pressure Hydrocephalus and Elderly Acquired Hydrocephalus: Evaluation with Cerebrospinal Fluid Flow and Ventricular Volume Parameters.\n \n \n \n \n\n\n \n He, W.; Zhou, X.; Long, J.; Xu, Q.; Huang, X.; Jiang, J.; Xia, J.; and Yang, G.\n\n\n \n\n\n\n Frontiers in Aging Neuroscience. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"IdiopathicPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Idiopathic Normal Pressure Hydrocephalus and Elderly Acquired Hydrocephalus: Evaluation with Cerebrospinal Fluid Flow and Ventricular Volume Parameters},\n type = {article},\n year = {2020},\n id = {800c2117-fd8e-3825-a7d4-b73a8fbb7724},\n created = {2024-01-13T05:46:22.495Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:20:34.298Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {he_idiopathic_2020},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {He, Wen-Jie and Zhou, Xi and Long, Jia and Xu, Qi-Zhong and Huang, Xian-jian and Jiang, Jun and Xia, Jun and Yang, Guang},\n journal = {Frontiers in Aging Neuroscience}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Weakly Supervised Deep Learning for COVID-19 Infection Detection and Classification from CT Images.\n \n \n \n \n\n\n \n Hu, S.; Gao, Y.; Niu, Z.; Jiang, Y.; Li, L.; Xiao, X.; Wang, M.; Fang, E., F.; Menpes-Smith, W.; Xia, J.; and others\n\n\n \n\n\n\n IEEE Access,118869-118883. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"WeaklyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Weakly Supervised Deep Learning for COVID-19 Infection Detection and Classification from CT Images},\n type = {article},\n year = {2020},\n pages = {118869-118883},\n id = {0dafdd34-983d-3efd-8563-187f528d5f8f},\n created = {2024-01-13T05:46:23.188Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:21:23.373Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {hu_weakly_2020-1},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Hu, Shaoping and Gao, Yuan and Niu, Zhangming and Jiang, Yinghui and Li, Lao and Xiao, Xianglu and Wang, Minhao and Fang, Evandro Fei and Menpes-Smith, Wade and Xia, Jun and others, undefined},\n journal = {IEEE Access}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Machine learning for COVID-19 detection and prognostication using chest radiographs and CT scans: a systematic methodological review.\n \n \n \n \n\n\n \n Roberts, M.; Driggs, D.; Thorpe, M.; Gilbey, J.; Yeung, M.; Ursprung, S.; Aviles-Rivero, A., I.; Etmann, C.; McCague, C.; Beer, L.; and others\n\n\n \n\n\n\n arXiv preprint arXiv:2008.06388. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"MachinePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Machine learning for COVID-19 detection and prognostication using chest radiographs and CT scans: a systematic methodological review},\n type = {article},\n year = {2020},\n id = {3289a3de-8718-3a34-940b-fc91c8e94a88},\n created = {2024-01-13T05:46:23.313Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:15:27.383Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {roberts_machine_2020},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Roberts, Michael and Driggs, Derek and Thorpe, Matthew and Gilbey, Julian and Yeung, Michael and Ursprung, Stephan and Aviles-Rivero, Angelica I and Etmann, Christian and McCague, Cathal and Beer, Lucian and others, undefined},\n journal = {arXiv preprint arXiv:2008.06388}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Simultaneous Left Atrium Anatomy and Scar Segmentations via Deep Learning in Multiview Information with Attention.\n \n \n \n \n\n\n \n Yang, G.; Chen, J.; Gao, Z.; Li, S.; Ni, H.; Angelini, E.; Wong, T.; Mohiaddin, R.; Nyktari, E.; Wage, R.; and others\n\n\n \n\n\n\n Future Generation Computer Systems. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"SimultaneousPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Simultaneous Left Atrium Anatomy and Scar Segmentations via Deep Learning in Multiview Information with Attention},\n type = {article},\n year = {2020},\n id = {0d0b8719-7321-32b0-9e17-7d4dfd452add},\n created = {2024-01-13T05:46:23.674Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:21:53.463Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_simultaneous_2020-1},\n source_type = {article},\n notes = {Publisher: Elsevier},\n private_publication = {false},\n bibtype = {article},\n author = {Yang, Guang and Chen, Jun and Gao, Zhifan and Li, Shuo and Ni, Hao and Angelini, Elsa and Wong, Tom and Mohiaddin, Raad and Nyktari, Eva and Wage, Ricardo and others, undefined},\n journal = {Future Generation Computer Systems}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Direct Quantification of Coronary Artery Stenosis through Hierarchical Attentive Multi-view Learning.\n \n \n \n \n\n\n \n Zhang, D.; Yang, G.; Zhao, S.; Zhang, Y.; Zhang, H.; Ghista, D.; and Li, S.\n\n\n \n\n\n\n IEEE Transactions on Medical Imaging. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"DirectPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Direct Quantification of Coronary Artery Stenosis through Hierarchical Attentive Multi-view Learning},\n type = {article},\n year = {2020},\n id = {1d1b6252-8fb0-30f6-9301-392b2669af93},\n created = {2024-01-13T05:46:24.289Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:22:23.739Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhang_direct_2020-1},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Zhang, Dong and Yang, Guang and Zhao, Shu and Zhang, Yangping and Zhang, Heye and Ghista, Dhanjoo and Li, Shuo},\n journal = {IEEE Transactions on Medical Imaging}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SaliencyGAN: Deep Learning Semisupervised Salient Object Detection in the Fog of IoT.\n \n \n \n \n\n\n \n Wang, C.; Dong, S.; Zhao, X.; Papanastasiou, G.; Zhang, H.; and Yang, G.\n\n\n \n\n\n\n IEEE Transactions on Industrial Informatics, 16(4): 2667-2676. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"SaliencyGAN:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {SaliencyGAN: Deep Learning Semisupervised Salient Object Detection in the Fog of IoT},\n type = {article},\n year = {2020},\n keywords = {Convolutional neural networks (CNNs),Internet of Things (IoT),deep learning,generative adversarial network (GAN),salient object detection (SOD)},\n pages = {2667-2676},\n volume = {16},\n id = {2a3d44d4-384a-3d6a-a3b0-5c0be0fd88db},\n created = {2024-01-13T06:15:54.546Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:25:03.312Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {In modern Internet of Things (IoT), visual analysis and predictions are often performed by deep learning models. Salient object detection (SOD) is a fundamental preprocessing for these applications. Executing SOD on the fog devices is a challenging task due to the diversity of data and fog devices. To adopt convolutional neural networks (CNN) on fog-cloud infrastructures for SOD-based applications, we introduce a semisupervised adversarial learning method in this article. The proposed model, named as SaliencyGAN, is empowered by a novel concatenated generative adversarial network (GAN) framework with partially shared parameters. The backbone CNN can be chosen flexibly based on the specific devices and applications. In the meanwhile, our method uses both the labeled and unlabeled data from different problem domains for training. Using multiple popular benchmark datasets, we compared state-of-the-art baseline methods to our SaliencyGAN obtained with 10-100% labeled training data. SaliencyGAN gained performance comparable to the supervised baselines when the percentage of labeled data reached 30%, and outperformed the weakly supervised and unsupervised baselines. Furthermore, our ablation study shows that SaliencyGAN were more robust to the common 'mode missing' (or 'mode collapse') issue compared to the selected popular GAN models. The visualized ablation results have proved that SaliencyGAN learned a better estimation of data distributions. To the best of our knowledge, this is the first IoT-oriented semisupervised SOD method.},\n bibtype = {article},\n author = {Wang, Chengjia and Dong, Shizhou and Zhao, Xiaofeng and Papanastasiou, Giorgos and Zhang, Heye and Yang, Guang},\n doi = {10.1109/TII.2019.2945362},\n journal = {IEEE Transactions on Industrial Informatics},\n number = {4}\n}
\n
\n\n\n
\n In modern Internet of Things (IoT), visual analysis and predictions are often performed by deep learning models. Salient object detection (SOD) is a fundamental preprocessing for these applications. Executing SOD on the fog devices is a challenging task due to the diversity of data and fog devices. To adopt convolutional neural networks (CNN) on fog-cloud infrastructures for SOD-based applications, we introduce a semisupervised adversarial learning method in this article. The proposed model, named as SaliencyGAN, is empowered by a novel concatenated generative adversarial network (GAN) framework with partially shared parameters. The backbone CNN can be chosen flexibly based on the specific devices and applications. In the meanwhile, our method uses both the labeled and unlabeled data from different problem domains for training. Using multiple popular benchmark datasets, we compared state-of-the-art baseline methods to our SaliencyGAN obtained with 10-100% labeled training data. SaliencyGAN gained performance comparable to the supervised baselines when the percentage of labeled data reached 30%, and outperformed the weakly supervised and unsupervised baselines. Furthermore, our ablation study shows that SaliencyGAN were more robust to the common 'mode missing' (or 'mode collapse') issue compared to the selected popular GAN models. The visualized ablation results have proved that SaliencyGAN learned a better estimation of data distributions. To the best of our knowledge, this is the first IoT-oriented semisupervised SOD method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Annealing Genetic GAN for Minority Oversampling.\n \n \n \n \n\n\n \n Hao, J.; Wang, C.; Zhang, H.; and Yang, G.\n\n\n \n\n\n\n 31st British Machine Vision Conference, BMVC 2020,1-12. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"AnnealingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Annealing Genetic GAN for Minority Oversampling},\n type = {article},\n year = {2020},\n pages = {1-12},\n id = {72d2d0da-a872-3bcb-804c-b54249dca18b},\n created = {2024-01-13T06:15:54.662Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.742Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {The key to overcome class imbalance problems is to capture the distribution of minority class accurately. Generative Adversarial Networks (GANs) have shown some potentials to tackle class imbalance problems due to their capability of reproducing data distributions given ample training data samples. However, the scarce samples of one or more classes still pose a great challenge for GANs to learn accurate distributions for the minority classes. In this work, we propose an Annealing Genetic GAN (AGGAN) method, which aims to reproduce the distributions closest to the ones of the minority classes using only limited data samples. Our AGGAN renovates the training of GANs as an evolutionary process that incorporates the mechanism of simulated annealing. In particular, the generator uses different training strategies to generate multiple offspring and retain the best. Then, we use the Metropolis criterion in the simulated annealing to decide whether we should update the best offspring for the generator. As the Metropolis criterion allows a certain chance to accept the worse solutions, it enables our AGGAN steering away from the local optimum. According to both theoretical analysis and experimental studies on multiple imbalanced image datasets, we prove that the proposed training strategy can enable our AGGAN to reproduce the distributions of minority classes from scarce samples and provide an effective and robust solution for the class imbalance problem.},\n bibtype = {article},\n author = {Hao, Jingyu and Wang, Chengjia and Zhang, Heye and Yang, Guang},\n journal = {31st British Machine Vision Conference, BMVC 2020}\n}
\n
\n\n\n
\n The key to overcome class imbalance problems is to capture the distribution of minority class accurately. Generative Adversarial Networks (GANs) have shown some potentials to tackle class imbalance problems due to their capability of reproducing data distributions given ample training data samples. However, the scarce samples of one or more classes still pose a great challenge for GANs to learn accurate distributions for the minority classes. In this work, we propose an Annealing Genetic GAN (AGGAN) method, which aims to reproduce the distributions closest to the ones of the minority classes using only limited data samples. Our AGGAN renovates the training of GANs as an evolutionary process that incorporates the mechanism of simulated annealing. In particular, the generator uses different training strategies to generate multiple offspring and retain the best. Then, we use the Metropolis criterion in the simulated annealing to decide whether we should update the best offspring for the generator. As the Metropolis criterion allows a certain chance to accept the worse solutions, it enables our AGGAN steering away from the local optimum. According to both theoretical analysis and experimental studies on multiple imbalanced image datasets, we prove that the proposed training strategy can enable our AGGAN to reproduce the distributions of minority classes from scarce samples and provide an effective and robust solution for the class imbalance problem.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automating in vivo cardiac diffusion tensor postprocessing with deep learning–based segmentation.\n \n \n \n \n\n\n \n Ferreira, P., F.; Martin, R., R.; Scott, A., D.; Khalique, Z.; Yang, G.; Nielles-Vallespin, S.; Pennell, D., J.; and Firmin, D., N.\n\n\n \n\n\n\n Magnetic Resonance in Medicine, 84(5): 2801-2814. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"AutomatingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Automating in vivo cardiac diffusion tensor postprocessing with deep learning–based segmentation},\n type = {article},\n year = {2020},\n keywords = {cardiac,deep learning,diffusion tensor imaging,image processing,machine learning},\n pages = {2801-2814},\n volume = {84},\n id = {c5a5bd2e-66e2-367d-8c52-c7059c95d884},\n created = {2024-01-13T06:15:54.699Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:24:57.171Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Purpose: In this work we develop and validate a fully automated postprocessing framework for in vivo diffusion tensor cardiac magnetic resonance (DT-CMR) data powered by deep learning. Methods: A U-Net based convolutional neural network was developed and trained to segment the heart in short-axis DT-CMR images. This was used as the basis to automate and enhance several stages of the DT-CMR tensor calculation workflow, including image registration and removal of data corrupted with artifacts, and to segment the left ventricle. Previously collected and analyzed scans (348 healthy scans and 144 cardiomyopathy patient scans) were used to train and validate the U-Net. All data were acquired at 3 T with a STEAM-EPI sequence. The DT-CMR postprocessing and U-Net training/testing were performed with MATLAB and Python TensorFlow, respectively. Results: The U-Net achieved a median Dice coefficient of 0.93 [0.92, 0.94] for the segmentation of the left-ventricular myocardial region. The image registration of diffusion images improved with the U-Net segmentation (P <.0001), and the identification of corrupted images achieved an F1 score of 0.70 when compared with an experienced user. Finally, the resulting tensor measures showed good agreement between an experienced user and the fully automated method. Conclusion: The trained U-Net successfully automated the DT-CMR postprocessing, supporting real-time results and reducing human workload. The automatic segmentation of the heart improved image registration, resulting in improvements of the calculated DT parameters.},\n bibtype = {article},\n author = {Ferreira, Pedro F. and Martin, Raquel R. and Scott, Andrew D. and Khalique, Zohya and Yang, Guang and Nielles-Vallespin, Sonia and Pennell, Dudley J. and Firmin, David N.},\n doi = {10.1002/mrm.28294},\n journal = {Magnetic Resonance in Medicine},\n number = {5}\n}
\n
\n\n\n
\n Purpose: In this work we develop and validate a fully automated postprocessing framework for in vivo diffusion tensor cardiac magnetic resonance (DT-CMR) data powered by deep learning. Methods: A U-Net based convolutional neural network was developed and trained to segment the heart in short-axis DT-CMR images. This was used as the basis to automate and enhance several stages of the DT-CMR tensor calculation workflow, including image registration and removal of data corrupted with artifacts, and to segment the left ventricle. Previously collected and analyzed scans (348 healthy scans and 144 cardiomyopathy patient scans) were used to train and validate the U-Net. All data were acquired at 3 T with a STEAM-EPI sequence. The DT-CMR postprocessing and U-Net training/testing were performed with MATLAB and Python TensorFlow, respectively. Results: The U-Net achieved a median Dice coefficient of 0.93 [0.92, 0.94] for the segmentation of the left-ventricular myocardial region. The image registration of diffusion images improved with the U-Net segmentation (P <.0001), and the identification of corrupted images achieved an F1 score of 0.70 when compared with an experienced user. Finally, the resulting tensor measures showed good agreement between an experienced user and the fully automated method. Conclusion: The trained U-Net successfully automated the DT-CMR postprocessing, supporting real-time results and reducing human workload. The automatic segmentation of the heart improved image registration, resulting in improvements of the calculated DT parameters.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A research agenda for ageing in China in the 21st century (2nd edition): Focusing on basic and translational research, long-term care, policy and social networks.\n \n \n \n \n\n\n \n Fang, E., F.; Xie, C.; Schenkel, J., A.; Wu, C.; Long, Q.; Cui, H.; Aman, Y.; Frank, J.; Liao, J.; Zou, H.; Wang, N., Y.; Wu, J.; Liu, X.; Li, T.; Fang, Y.; Niu, Z.; Yang, G.; Hong, J.; Wang, Q.; Chen, G.; Li, J.; Chen, H., Z.; Kang, L.; Su, H.; Gilmour, B., C.; Zhu, X.; Jiang, H.; He, N.; Tao, J.; Leng, S., X.; Tong, T.; and Woo, J.\n\n\n \n\n\n\n Ageing Research Reviews, 64(September). 2020.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {A research agenda for ageing in China in the 21st century (2nd edition): Focusing on basic and translational research, long-term care, policy and social networks},\n type = {article},\n year = {2020},\n keywords = {Ageing policy,Dementia,Inflammageing,Oral ageing,Sexually transmitted diseases,Square dancing},\n volume = {64},\n id = {682f9820-bfcb-32f4-955e-3ccf3a46c22e},\n created = {2024-01-13T06:15:54.860Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.875Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {One of the key issues facing public healthcare is the global trend of an increasingly ageing society which continues to present policy makers and caregivers with formidable healthcare and socio-economic challenges. Ageing is the primary contributor to a broad spectrum of chronic disorders all associated with a lower quality of life in the elderly. In 2019, the Chinese population constituted 18 % of the world population, with 164.5 million Chinese citizens aged 65 and above (65+), and 26 million aged 80 or above (80+). China has become an ageing society, and as it continues to age it will continue to exacerbate the burden borne by current family and public healthcare systems. Major healthcare challenges involved with caring for the elderly in China include the management of chronic non-communicable diseases (CNCDs), physical frailty, neurodegenerative diseases, cardiovascular diseases, with emerging challenges such as providing sufficient dental care, combating the rising prevalence of sexually transmitted diseases among nursing home communities, providing support for increased incidences of immune diseases, and the growing necessity to provide palliative care for the elderly. At the governmental level, it is necessary to make long-term strategic plans to respond to the pressures of an ageing society, especially to establish a nationwide, affordable, annual health check system to facilitate early diagnosis and provide access to affordable treatments. China has begun work on several activities to address these issues including the recent completion of the of the Ten-year Health-Care Reform project, the implementation of the Healthy China 2030 Action Plan, and the opening of the National Clinical Research Center for Geriatric Disorders. There are also societal challenges, namely the shift from an extended family system in which the younger provide home care for their elderly family members, to the current trend in which young people are increasingly migrating towards major cities for work, increasing reliance on nursing homes to compensate, especially following the outcomes of the ‘one child policy’ and the ‘empty-nest elderly’ phenomenon. At the individual level, it is important to provide avenues for people to seek and improve their own knowledge of health and disease, to encourage them to seek medical check-ups to prevent/manage illness, and to find ways to promote modifiable health-related behaviors (social activity, exercise, healthy diets, reasonable diet supplements) to enable healthier, happier, longer, and more productive lives in the elderly. Finally, at the technological or treatment level, there is a focus on modern technologies to counteract the negative effects of ageing. Researchers are striving to produce drugs that can mimic the effects of ‘exercising more, eating less’, while other anti-ageing molecules from molecular gerontologists could help to improve ‘healthspan’ in the elderly. Machine learning, ‘Big Data’, and other novel technologies can also be used to monitor disease patterns at the population level and may be used to inform policy design in the future. Collectively, synergies across disciplines on policies, geriatric care, drug development, personal awareness, the use of big data, machine learning and personalized medicine will transform China into a country that enables the most for its elderly, maximizing and celebrating their longevity in the coming decades. This is the 2nd edition of the review paper (Fang EF et al., Ageing Re. Rev. 2015).},\n bibtype = {article},\n author = {Fang, Evandro F. and Xie, Chenglong and Schenkel, Joseph A. and Wu, Chenkai and Long, Qian and Cui, Honghua and Aman, Yahyah and Frank, Johannes and Liao, Jing and Zou, Huachun and Wang, Ninie Y. and Wu, Jing and Liu, Xiaoting and Li, Tao and Fang, Yuan and Niu, Zhangming and Yang, Guang and Hong, Jiangshui and Wang, Qian and Chen, Guobing and Li, Jun and Chen, Hou Zao and Kang, Lin and Su, Huanxing and Gilmour, Brian C. and Zhu, Xinqiang and Jiang, Hong and He, Na and Tao, Jun and Leng, Sean Xiao and Tong, Tanjun and Woo, Jean},\n doi = {10.1016/j.arr.2020.101174},\n journal = {Ageing Research Reviews},\n number = {September}\n}
\n
\n\n\n
\n One of the key issues facing public healthcare is the global trend of an increasingly ageing society which continues to present policy makers and caregivers with formidable healthcare and socio-economic challenges. Ageing is the primary contributor to a broad spectrum of chronic disorders all associated with a lower quality of life in the elderly. In 2019, the Chinese population constituted 18 % of the world population, with 164.5 million Chinese citizens aged 65 and above (65+), and 26 million aged 80 or above (80+). China has become an ageing society, and as it continues to age it will continue to exacerbate the burden borne by current family and public healthcare systems. Major healthcare challenges involved with caring for the elderly in China include the management of chronic non-communicable diseases (CNCDs), physical frailty, neurodegenerative diseases, cardiovascular diseases, with emerging challenges such as providing sufficient dental care, combating the rising prevalence of sexually transmitted diseases among nursing home communities, providing support for increased incidences of immune diseases, and the growing necessity to provide palliative care for the elderly. At the governmental level, it is necessary to make long-term strategic plans to respond to the pressures of an ageing society, especially to establish a nationwide, affordable, annual health check system to facilitate early diagnosis and provide access to affordable treatments. China has begun work on several activities to address these issues including the recent completion of the of the Ten-year Health-Care Reform project, the implementation of the Healthy China 2030 Action Plan, and the opening of the National Clinical Research Center for Geriatric Disorders. There are also societal challenges, namely the shift from an extended family system in which the younger provide home care for their elderly family members, to the current trend in which young people are increasingly migrating towards major cities for work, increasing reliance on nursing homes to compensate, especially following the outcomes of the ‘one child policy’ and the ‘empty-nest elderly’ phenomenon. At the individual level, it is important to provide avenues for people to seek and improve their own knowledge of health and disease, to encourage them to seek medical check-ups to prevent/manage illness, and to find ways to promote modifiable health-related behaviors (social activity, exercise, healthy diets, reasonable diet supplements) to enable healthier, happier, longer, and more productive lives in the elderly. Finally, at the technological or treatment level, there is a focus on modern technologies to counteract the negative effects of ageing. Researchers are striving to produce drugs that can mimic the effects of ‘exercising more, eating less’, while other anti-ageing molecules from molecular gerontologists could help to improve ‘healthspan’ in the elderly. Machine learning, ‘Big Data’, and other novel technologies can also be used to monitor disease patterns at the population level and may be used to inform policy design in the future. Collectively, synergies across disciplines on policies, geriatric care, drug development, personal awareness, the use of big data, machine learning and personalized medicine will transform China into a country that enables the most for its elderly, maximizing and celebrating their longevity in the coming decades. This is the 2nd edition of the review paper (Fang EF et al., Ageing Re. Rev. 2015).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Atrial scar quantification via multi-scale CNN in the graph-cuts framework.\n \n \n \n \n\n\n \n Li, L.; Wu, F.; Yang, G.; Xu, L.; Wong, T.; Mohiaddin, R.; Firmin, D.; Keegan, J.; and Zhuang, X.\n\n\n \n\n\n\n Medical Image Analysis, 60. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"AtrialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Atrial scar quantification via multi-scale CNN in the graph-cuts framework},\n type = {article},\n year = {2020},\n keywords = {Atrial fibrillation,Left atrium,LGE MRI,Scar segme,Graph learning,Multi-scale CNN,Scar segmentation},\n volume = {60},\n publisher = {Elsevier B.V.},\n id = {3776320a-7c60-3ac7-a178-1d208a1251a6},\n created = {2024-01-13T06:15:55.039Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T07:02:58.882Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Late gadolinium enhancement magnetic resonance imaging (LGE MRI) appears to be a promising alternative for scar assessment in patients with atrial fibrillation (AF). Automating the quantification and analysis of atrial scars can be challenging due to the low image quality. In this work, we propose a fully automated method based on the graph-cuts framework, where the potentials of the graph are learned on a surface mesh of the left atrium (LA) using a multi-scale convolutional neural network (MS-CNN). For validation, we have included fifty-eight images with manual delineations. MS-CNN, which can efficiently incorporate both the local and global texture information of the images, has been shown to evidently improve the segmentation accuracy of the proposed graph-cuts based method. The segmentation could be further improved when the contribution between the t-link and n-link weights of the graph is balanced. The proposed method achieves a mean accuracy of 0.856 ± 0.033 and mean Dice score of 0.702 ± 0.071 for LA scar quantification. Compared to the conventional methods, which are based on the manual delineation of LA for initialization, our method is fully automatic and has demonstrated significantly better Dice score and accuracy (p < 0.01). The method is promising and can be potentially useful in diagnosis and prognosis of AF.},\n bibtype = {article},\n author = {Li, Lei and Wu, Fuping and Yang, Guang and Xu, Lingchao and Wong, Tom and Mohiaddin, Raad and Firmin, David and Keegan, Jennifer and Zhuang, Xiahai},\n doi = {10.1016/j.media.2019.101595},\n journal = {Medical Image Analysis}\n}
\n
\n\n\n
\n Late gadolinium enhancement magnetic resonance imaging (LGE MRI) appears to be a promising alternative for scar assessment in patients with atrial fibrillation (AF). Automating the quantification and analysis of atrial scars can be challenging due to the low image quality. In this work, we propose a fully automated method based on the graph-cuts framework, where the potentials of the graph are learned on a surface mesh of the left atrium (LA) using a multi-scale convolutional neural network (MS-CNN). For validation, we have included fifty-eight images with manual delineations. MS-CNN, which can efficiently incorporate both the local and global texture information of the images, has been shown to evidently improve the segmentation accuracy of the proposed graph-cuts based method. The segmentation could be further improved when the contribution between the t-link and n-link weights of the graph is balanced. The proposed method achieves a mean accuracy of 0.856 ± 0.033 and mean Dice score of 0.702 ± 0.071 for LA scar quantification. Compared to the conventional methods, which are based on the manual delineation of LA for initialization, our method is fully automatic and has demonstrated significantly better Dice score and accuracy (p < 0.01). The method is promising and can be potentially useful in diagnosis and prognosis of AF.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The NAD+-mitophagy axis in healthy longevity and in artificial intelligence-based clinical applications.\n \n \n \n \n\n\n \n Aman, Y.; Frank, J.; Lautrup, S., H.; Matysek, A.; Niu, Z.; Yang, G.; Shi, L.; Bergersen, L., H.; Storm-Mathisen, J.; Rasmussen, L., J.; Bohr, V., A.; Nilsen, H.; Fang, E., F.; and others\n\n\n \n\n\n\n Mechanisms of Ageing and Development, 185(November 2019): 111194. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n \n \"TheWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {The NAD+-mitophagy axis in healthy longevity and in artificial intelligence-based clinical applications},\n type = {article},\n year = {2020},\n keywords = {Age-related diseases,Ageing,Alzheimer's disease,Artificial intelligence,Mitophagy,NAD+},\n pages = {111194},\n volume = {185},\n websites = {https://doi.org/10.1016/j.mad.2019.111194},\n publisher = {Elsevier},\n id = {ffc232fa-5948-3833-ac2c-bcaade17422c},\n created = {2024-01-13T08:14:14.062Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:18:16.815Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {aman_nad-mitophagy_2019-1},\n source_type = {article},\n private_publication = {false},\n abstract = {Nicotinamide adenine dinucleotide (NAD+) is an important natural molecule involved in fundamental biological processes, including the TCA cycle, OXPHOS, β-oxidation, and is a co-factor for proteins promoting healthy longevity. NAD+ depletion is associated with the hallmarks of ageing and may contribute to a wide range of age-related diseases including metabolic disorders, cancer, and neurodegenerative diseases. One of the central pathways by which NAD+ promotes healthy ageing is through regulation of mitochondrial homeostasis via mitochondrial biogenesis and the clearance of damaged mitochondria via mitophagy. Here, we highlight the contribution of the NAD+-mitophagy axis to ageing and age-related diseases, and evaluate how boosting NAD+ levels may emerge as a promising therapeutic strategy to counter ageing as well as neurodegenerative diseases including Alzheimer's disease. The potential use of artificial intelligence to understand the roles and molecular mechanisms of the NAD+-mitophagy axis in ageing is discussed, including possible applications in drug target identification and validation, compound screening and lead compound discovery, biomarker development, as well as efficacy and safety assessment. Advances in our understanding of the molecular and cellular roles of NAD+ in mitophagy will lead to novel approaches for facilitating healthy mitochondrial homoeostasis that may serve as a promising therapeutic strategy to counter ageing-associated pathologies and/or accelerated ageing.},\n bibtype = {article},\n author = {Aman, Yahyah and Frank, Johannes and Lautrup, Sofie Hindkjær and Matysek, Adrian and Niu, Zhangming and Yang, Guang and Shi, Liu and Bergersen, Linda H. and Storm-Mathisen, Jon and Rasmussen, Lene J. and Bohr, Vilhelm A. and Nilsen, Hilde and Fang, Evandro F. and others, undefined},\n doi = {10.1016/j.mad.2019.111194},\n journal = {Mechanisms of Ageing and Development},\n number = {November 2019}\n}
\n
\n\n\n
\n Nicotinamide adenine dinucleotide (NAD+) is an important natural molecule involved in fundamental biological processes, including the TCA cycle, OXPHOS, β-oxidation, and is a co-factor for proteins promoting healthy longevity. NAD+ depletion is associated with the hallmarks of ageing and may contribute to a wide range of age-related diseases including metabolic disorders, cancer, and neurodegenerative diseases. One of the central pathways by which NAD+ promotes healthy ageing is through regulation of mitochondrial homeostasis via mitochondrial biogenesis and the clearance of damaged mitochondria via mitophagy. Here, we highlight the contribution of the NAD+-mitophagy axis to ageing and age-related diseases, and evaluate how boosting NAD+ levels may emerge as a promising therapeutic strategy to counter ageing as well as neurodegenerative diseases including Alzheimer's disease. The potential use of artificial intelligence to understand the roles and molecular mechanisms of the NAD+-mitophagy axis in ageing is discussed, including possible applications in drug target identification and validation, compound screening and lead compound discovery, biomarker development, as well as efficacy and safety assessment. Advances in our understanding of the molecular and cellular roles of NAD+ in mitophagy will lead to novel approaches for facilitating healthy mitochondrial homoeostasis that may serve as a promising therapeutic strategy to counter ageing-associated pathologies and/or accelerated ageing.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Salient Object Detection in the Distributed Cloud-Edge Intelligent Network.\n \n \n \n \n\n\n \n Gao, Z.; Dong, S.; Sun, S.; Wang, X.; Yang, G.; Wu, W.; Li, S.; Zhang, H.; and Albuquerque, V., H., C.\n\n\n \n\n\n\n IEEE Network. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"SalientPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Salient Object Detection in the Distributed Cloud-Edge Intelligent Network},\n type = {article},\n year = {2020},\n id = {513ba656-976a-3682-8e12-8b7ff25d1676},\n created = {2024-01-13T08:14:16.390Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:15:05.701Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n source_type = {JOUR},\n private_publication = {false},\n bibtype = {article},\n author = {Gao, Zhifan and Dong, Shizhou and Sun, Shanhui and Wang, Xin and Yang, Guang and Wu, Wanqing and Li, Shuo and Zhang, Heye and Albuquerque, Victor Hugo C},\n journal = {IEEE Network}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A machine learning approach to automatic detection of irregularity in skin lesion border using dermoscopic images.\n \n \n \n \n\n\n \n Ali, A.; Li, J.; Yang, G.; and O’Shea, S., J.\n\n\n \n\n\n\n PeerJ Computer Science. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {A machine learning approach to automatic detection of irregularity in skin lesion border using dermoscopic images},\n type = {article},\n year = {2020},\n id = {74b8f943-f265-38c4-b09d-ecc1e5716a48},\n created = {2024-01-13T08:14:16.394Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:15:01.351Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {ali_machine_2020-1},\n source_type = {JOUR},\n private_publication = {false},\n bibtype = {article},\n author = {Ali, Abder-Rahman and Li, Jingpeng and Yang, Guang and O’Shea, Sally Jane},\n journal = {PeerJ Computer Science}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Catheter ablation vs. thoracoscopic surgical ablation in long-standing persistent atrial fibrillation: CASA-AF randomized controlled trial.\n \n \n \n \n\n\n \n Haldar, S.; Khan, H., R.; Boyalla, V.; Kralj-Hans, I.; Jones, S.; Lord, J.; Onyimadu, O.; Satishkumar, A.; Bahrami, T.; De Souza, A.; and others\n\n\n \n\n\n\n European Heart Journal. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"CatheterPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Catheter ablation vs. thoracoscopic surgical ablation in long-standing persistent atrial fibrillation: CASA-AF randomized controlled trial},\n type = {article},\n year = {2020},\n id = {e8cf774b-5892-30df-bf3f-d6f2305c43e6},\n created = {2024-01-13T08:14:16.893Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:18:04.608Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {haldar_catheter_2020},\n source_type = {JOUR},\n private_publication = {false},\n bibtype = {article},\n author = {Haldar, Shouvik and Khan, Habib Rehman and Boyalla, Vennela and Kralj-Hans, Ines and Jones, Simon and Lord, Joanne and Onyimadu, Oluchukwu and Satishkumar, Anitha and Bahrami, Toufan and De Souza, Anthony and others, undefined},\n journal = {European Heart Journal}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n USR-Net: A Simple Unsupervised Single-Image Super-Resolution Method for Late Gadolinium Enhancement CMR.\n \n \n \n\n\n \n Zhu, J.; Yang, G.; Wong, T.; Mohiaddin, R.; Firmin, D.; Keegan, J.; and Lio, P.\n\n\n \n\n\n\n In International Society for Magnetic Resonance in Medicine, 2020. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {USR-Net: A Simple Unsupervised Single-Image Super-Resolution Method for Late Gadolinium Enhancement CMR},\n type = {inproceedings},\n year = {2020},\n id = {f20f02f5-05d5-3b74-8ed1-cb1caf5e9810},\n created = {2024-01-13T12:10:01.125Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.125Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n source_type = {CONF},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Zhu, Jin and Yang, Guang and Wong, Tom and Mohiaddin, Raad and Firmin, David and Keegan, Jennifer and Lio, Pietro},\n booktitle = {International Society for Magnetic Resonance in Medicine}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (18)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n A ROI Focused Multi-Scale Super-Resolution Method for the Diffusion Tensor Cardiac Magnetic Resonance.\n \n \n \n\n\n \n Zhu, J.; Yang, G.; Ferreira, P.; Scott, A.; Nielles-Vallespin, S.; Keegan, J.; Pennell, D.; Lio, P.; and Firmin, D.\n\n\n \n\n\n\n In International Society for Magnetic Resonance in Medicine, 2019. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {A ROI Focused Multi-Scale Super-Resolution Method for the Diffusion Tensor Cardiac Magnetic Resonance},\n type = {inproceedings},\n year = {2019},\n id = {0645a9d1-faf5-3515-b724-87c70bdb0c3a},\n created = {2024-01-13T05:46:20.970Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.687Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhu_roi_2019},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Zhu, Jin and Yang, Guang and Ferreira, Pedro and Scott, Andrew and Nielles-Vallespin, Sonia and Keegan, Jennifer and Pennell, Dudley and Lio, Pietro and Firmin, David},\n booktitle = {International Society for Magnetic Resonance in Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Deep Learning Based Approach to Skin Lesion Border Extraction With a Novel Edge Detector in Dermoscopy Images.\n \n \n \n \n\n\n \n Ali, A.; Li, J.; O'Shea, S., J.; Yang, G.; Trappenberg, T.; and Ye, X.\n\n\n \n\n\n\n In IEEE International Joint Conference on Neural Networks, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {A Deep Learning Based Approach to Skin Lesion Border Extraction With a Novel Edge Detector in Dermoscopy Images},\n type = {inproceedings},\n year = {2019},\n id = {2666d112-3f6b-364d-aa33-225487c62a72},\n created = {2024-01-13T05:46:21.037Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:18:08.909Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {ali_deep_2019-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Ali, Abder-Rahman and Li, Jingpeng and O'Shea, Sally Jane and Yang, Guang and Trappenberg, Thomas and Ye, Xujiong},\n booktitle = {IEEE International Joint Conference on Neural Networks}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Direct Quantification for Coronary Artery Stenosis Using Multiview Learning.\n \n \n \n \n\n\n \n Zhang, D.; Yang, G.; Zhao, S.; Zhang, Y.; Zhang, H.; and Li, S.\n\n\n \n\n\n\n In Medical Image Computing and Computer Assisted Intervention (MICCAI 2019), 2019. \n \n\n\n\n
\n\n\n\n \n \n \"DirectPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Direct Quantification for Coronary Artery Stenosis Using Multiview Learning},\n type = {inproceedings},\n year = {2019},\n id = {0ffbd17d-495f-3bdf-b850-793201bf5be1},\n created = {2024-01-13T05:46:21.304Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:18:14.103Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhang_direct_2019-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Zhang, Dong and Yang, Guang and Zhao, Shu and Zhang, Yanping and Zhang, Heye and Li, Shuo},\n booktitle = {Medical Image Computing and Computer Assisted Intervention (MICCAI 2019)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Prostate Zonal Segmentation Using Fully Convolutional Network with Feature Pyramid Attention.\n \n \n \n \n\n\n \n Liu, Y.; Yang, G.; Mirak, S., A.; Hosseiny, M.; Azadikhah, A.; Zhong, X.; Reiter, R.; Lee, Y.; Raman, S.; and Sung, K.\n\n\n \n\n\n\n IEEE Access,163626-163632. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Automatic Prostate Zonal Segmentation Using Fully Convolutional Network with Feature Pyramid Attention},\n type = {article},\n year = {2019},\n pages = {163626-163632},\n id = {923ed8c6-af8f-3f18-a087-345aa9b42d89},\n created = {2024-01-13T05:46:21.548Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:18:51.294Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {liu_automatic_2019},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Liu, Yongkai and Yang, Guang and Mirak, Sohrab Afshari and Hosseiny, Melina and Azadikhah, Afshin and Zhong, Xinran and Reiter, Robert and Lee, Yeejin and Raman, Steven and Sung, Kyunghyun},\n journal = {IEEE Access}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Discriminative Consistent Domain Generation for Semi-supervised Learning.\n \n \n \n \n\n\n \n Chen, J.; Zhang, H.; Zhang, Y.; Zhao, S.; Mohiaddin, R.; Wong, T.; Firmin, D.; Yang, G.; and Keegan, J.\n\n\n \n\n\n\n In Medical Image Computing and Computer Assisted Intervention (MICCAI 2019), 2019. \n \n\n\n\n
\n\n\n\n \n \n \"DiscriminativePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Discriminative Consistent Domain Generation for Semi-supervised Learning},\n type = {inproceedings},\n year = {2019},\n id = {427772bd-bbc4-34fc-bfd4-87135372307a},\n created = {2024-01-13T05:46:22.349Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:20:27.136Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {chen_discriminative_2019-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Chen, Jun and Zhang, Heye and Zhang, Yanping and Zhao, Shu and Mohiaddin, Raad and Wong, Tom and Firmin, David and Yang, Guang and Keegan, Jennifer},\n booktitle = {Medical Image Computing and Computer Assisted Intervention (MICCAI 2019)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Evaluation of Algorithms for Multi-Modality Whole Heart Segmentation: An Open-Access Grand Challenge.\n \n \n \n \n\n\n \n Zhuang, X.; Li, L.; Payer, C.; Stern, D.; Urschler, M.; Heinrich, M., P.; Oster, J.; Wang, C.; Smedby, O.; Bian, C.; and others\n\n\n \n\n\n\n Medical Image Analysis. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"EvaluationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Evaluation of Algorithms for Multi-Modality Whole Heart Segmentation: An Open-Access Grand Challenge},\n type = {article},\n year = {2019},\n id = {56db9efa-4f78-3832-98f8-6e42c4e01506},\n created = {2024-01-13T05:46:23.117Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:21:28.218Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {zhuang_evaluation_2019},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Zhuang, Xiahai and Li, Lei and Payer, Christian and Stern, Darko and Urschler, Martin and Heinrich, Mattias P and Oster, Julien and Wang, Chunliang and Smedby, Orjan and Bian, Cheng and others, undefined},\n journal = {Medical Image Analysis}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Deep Learning Based Left Atrium Anatomy Segmentation and Scar Delineation in 3D Late Gadolinium Enhanced CMR Images.\n \n \n \n\n\n \n Yang, G.; Chen, J.; Gao, Z.; Ni, H.; Angelini, E.; Wong, T.; Mohiaddin, R.; Nyktari, E.; Wage, R.; Xu, L.; and others\n\n\n \n\n\n\n In International Society for Magnetic Resonance in Medicine, 2019. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {A Deep Learning Based Left Atrium Anatomy Segmentation and Scar Delineation in 3D Late Gadolinium Enhanced CMR Images},\n type = {inproceedings},\n year = {2019},\n id = {aa5c234d-6dc0-36ef-a1ab-22234b582056},\n created = {2024-01-13T05:46:23.817Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.695Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_deep_2019},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yang, Guang and Chen, Jun and Gao, Zhifan and Ni, Hao and Angelini, Elsa and Wong, Tom and Mohiaddin, Raad and Nyktari, Eva and Wage, Ricardo and Xu, Lei and others, undefined},\n booktitle = {International Society for Magnetic Resonance in Medicine}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Two-Stage U-Net Model for 3D Multi-class Segmentation on Full-Resolution Cardiac Data.\n \n \n \n \n\n\n \n Wang, C.; MacGillivray, T.; Macnaught, G.; Yang, G.; and Newby, D.\n\n\n \n\n\n\n In International Workshop on Statistical Atlases and Computational Models of the Heart, pages 191-199, 2019. Springer\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {A Two-Stage U-Net Model for 3D Multi-class Segmentation on Full-Resolution Cardiac Data},\n type = {inproceedings},\n year = {2019},\n pages = {191-199},\n publisher = {Springer},\n id = {1c2e2d40-5c61-3314-a943-6e485619d4b2},\n created = {2024-01-13T05:46:28.485Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:22:26.285Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {wang_two-stage_2018-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Wang, Chengjia and MacGillivray, Tom and Macnaught, Gillian and Yang, Guang and Newby, David},\n booktitle = {International Workshop on Statistical Atlases and Computational Models of the Heart}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tissue-type mapping of gliomas.\n \n \n \n \n\n\n \n Raschke, F.; Barrick, T., R.; Jones, T., L.; Yang, G.; Ye, X.; and Howe, F., A.\n\n\n \n\n\n\n NeuroImage: Clinical, 21(July 2018): 101648. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"Tissue-typePaper\n  \n \n \n \"Tissue-typeWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Tissue-type mapping of gliomas},\n type = {article},\n year = {2019},\n keywords = {Glioma,Magnetic resonance spectroscopy (MRS),Multimodal MRI,Nosologic imaging,Pattern recognition},\n pages = {101648},\n volume = {21},\n websites = {https://doi.org/10.1016/j.nicl.2018.101648},\n publisher = {Elsevier},\n id = {262a215c-aba9-35e4-b424-262cb2c2b8c0},\n created = {2024-01-13T06:15:55.531Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:23:29.302Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Purpose: To develop a statistical method of combining multimodal MRI (mMRI) of adult glial brain tumours to generate tissue heterogeneity maps that indicate tumour grade and infiltration margins. Materials and methods: We performed a retrospective analysis of mMRI from patients with histological diagnosis of glioma (n = 25). 1 H Magnetic Resonance Spectroscopic Imaging (MRSI) was used to label regions of “pure” low- or high-grade tumour across image types. Normal brain and oedema characteristics were defined from healthy controls (n = 10) and brain metastasis patients (n = 10) respectively. Probability density distributions (PDD) for each tissue type were extracted from intensity normalised proton density and T 2 -weighted images, and p and q diffusion maps. Superpixel segmentation and Bayesian inference was used to produce whole-brain tissue-type maps. Results: Total lesion volumes derived automatically from tissue-type maps correlated with those from manual delineation (p < 0.001, r = 0.87). Large high-grade volumes were determined in all grade III & IV (n = 16) tumours, in grade II gemistocytic rich astrocytomas (n = 3) and one astrocytoma with a histological diagnosis of grade II. For patients with known outcome (n = 20), patients with survival time < 2 years (3 grade II, 2 grade III and 10 grade IV) had a high-grade volume significantly greater than zero (Wilcoxon signed rank p < 0.0001) and also significantly greater high grade volume than the 5 grade II patients with survival >2 years (Mann Witney p = 0.0001). Regions classified from mMRI as oedema had non-tumour-like 1 H MRS characteristics. Conclusions: 1 H MRSI can label tumour tissue types to enable development of a mMRI tissue type mapping algorithm, with potential to aid management of patients with glial tumours.},\n bibtype = {article},\n author = {Raschke, Felix and Barrick, Thomas R. and Jones, Timothy L. and Yang, Guang and Ye, Xujiong and Howe, Franklyn A.},\n doi = {10.1016/j.nicl.2018.101648},\n journal = {NeuroImage: Clinical},\n number = {July 2018}\n}
\n
\n\n\n
\n Purpose: To develop a statistical method of combining multimodal MRI (mMRI) of adult glial brain tumours to generate tissue heterogeneity maps that indicate tumour grade and infiltration margins. Materials and methods: We performed a retrospective analysis of mMRI from patients with histological diagnosis of glioma (n = 25). 1 H Magnetic Resonance Spectroscopic Imaging (MRSI) was used to label regions of “pure” low- or high-grade tumour across image types. Normal brain and oedema characteristics were defined from healthy controls (n = 10) and brain metastasis patients (n = 10) respectively. Probability density distributions (PDD) for each tissue type were extracted from intensity normalised proton density and T 2 -weighted images, and p and q diffusion maps. Superpixel segmentation and Bayesian inference was used to produce whole-brain tissue-type maps. Results: Total lesion volumes derived automatically from tissue-type maps correlated with those from manual delineation (p < 0.001, r = 0.87). Large high-grade volumes were determined in all grade III & IV (n = 16) tumours, in grade II gemistocytic rich astrocytomas (n = 3) and one astrocytoma with a histological diagnosis of grade II. For patients with known outcome (n = 20), patients with survival time < 2 years (3 grade II, 2 grade III and 10 grade IV) had a high-grade volume significantly greater than zero (Wilcoxon signed rank p < 0.0001) and also significantly greater high grade volume than the 5 grade II patients with survival >2 years (Mann Witney p = 0.0001). Regions classified from mMRI as oedema had non-tumour-like 1 H MRS characteristics. Conclusions: 1 H MRSI can label tumour tissue types to enable development of a mMRI tissue type mapping algorithm, with potential to aid management of patients with glial tumours.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic skin lesion segmentation by coupling deep fully convolutional networks and shallow network with textons.\n \n \n \n \n\n\n \n Zhang, L.; Yang, G.; and Ye, X.\n\n\n \n\n\n\n Journal of Medical Imaging, 6(02): 1. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Automatic skin lesion segmentation by coupling deep fully convolutional networks and shallow network with textons},\n type = {article},\n year = {2019},\n keywords = {15,19,2018,2019,29,accepted for publication mar,fully convolutional networks,melanoma,paper 18210rr received sep,published online apr,skin lesion segmentation,textons},\n pages = {1},\n volume = {6},\n id = {d1e5e722-24cd-3b43-90b5-a73af1054919},\n created = {2024-01-13T06:15:55.837Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:24:26.101Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Segmentation of skin lesions is an important step in computer-aided diagnosis of melanoma; it is also a very challenging task due to fuzzy lesion boundaries and heterogeneous lesion textures. We present a fully automatic method for skin lesion segmentation based on deep fully convolutional networks (FCNs). We investigate a shallow encoding network to model clinically valuable prior knowledge, in which spatial filters simulating simple cell receptive fields function in the primary visual cortex (V1) is considered. An effective fusing strategy using skip connections and convolution operators is then leveraged to couple prior knowledge encoded via shallow network with hierarchical data-driven features learned from the FCNs for detailed segmentation of the skin lesions. To our best knowledge, this is the first time the domain-specific hand craft features have been built into a deep network trained in an end-to-end manner for skin lesion segmentation. The method has been evaluated on both ISBI 2016 and ISBI 2017 skin lesion challenge datasets. We provide comparative evidence to demonstrate that our newly designed network can gain accuracy for lesion segmentation by coupling the prior knowledge encoded by the shallow network with the deep FCNs. Our method is robust without the need for data augmentation or comprehensive parameter tuning, and the experimental results show great promise of the method with effective model generalization compared to other state-of-the-art-methods.},\n bibtype = {article},\n author = {Zhang, Lei and Yang, Guang and Ye, Xujiong},\n doi = {10.1117/1.jmi.6.2.024001},\n journal = {Journal of Medical Imaging},\n number = {02}\n}
\n
\n\n\n
\n Segmentation of skin lesions is an important step in computer-aided diagnosis of melanoma; it is also a very challenging task due to fuzzy lesion boundaries and heterogeneous lesion textures. We present a fully automatic method for skin lesion segmentation based on deep fully convolutional networks (FCNs). We investigate a shallow encoding network to model clinically valuable prior knowledge, in which spatial filters simulating simple cell receptive fields function in the primary visual cortex (V1) is considered. An effective fusing strategy using skip connections and convolution operators is then leveraged to couple prior knowledge encoded via shallow network with hierarchical data-driven features learned from the FCNs for detailed segmentation of the skin lesions. To our best knowledge, this is the first time the domain-specific hand craft features have been built into a deep network trained in an end-to-end manner for skin lesion segmentation. The method has been evaluated on both ISBI 2016 and ISBI 2017 skin lesion challenge datasets. We provide comparative evidence to demonstrate that our newly designed network can gain accuracy for lesion segmentation by coupling the prior knowledge encoded by the shallow network with the deep FCNs. Our method is robust without the need for data augmentation or comprehensive parameter tuning, and the experimental results show great promise of the method with effective model generalization compared to other state-of-the-art-methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Lesion focused super-resolution.\n \n \n \n \n\n\n \n Zhu, J.; Yang, G.; and Lio, P.\n\n\n \n\n\n\n In SPIE Medical Imaging 2019, pages 56, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"LesionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Lesion focused super-resolution},\n type = {inproceedings},\n year = {2019},\n pages = {56},\n issue = {March 2019},\n id = {28d1960b-838b-3adb-ba4f-e3d51726b6c9},\n created = {2024-01-13T06:15:55.983Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:18.428Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Super-resolution (SR) for image enhancement has great importance in medical image applications. Broadly speaking, there are two types of SR, one requires multiple low resolution (LR) images from different views of the same object to be reconstructed to the high resolution (HR) output, and the other one relies on the learning from a large amount of training datasets, i.e., LR-HR pairs. In real clinical environment, acquiring images from multi-views is expensive and sometimes infeasible. In this paper, we present a novel Generative Adversarial Networks (GAN) based learning framework to achieve SR from its LR version. By performing simulation based studies on the Multimodal Brain Tumor Segmentation Challenge (BraTS) datasets, we demonstrate the efficacy of our method in application of brain tumor MRI enhancement. Compared to bilinear interpolation and other state-of-the-art SR methods, our model is lesion focused, which is not only resulted in better perceptual image quality without blurring, but also more efficient and directly benefit for the following clinical tasks, e.g., lesion detection and abnormality enhancement. Therefore, we can envisage the application of our SR method to boost image spatial resolution while maintaining crucial diagnostic information for further clinical tasks.},\n bibtype = {inproceedings},\n author = {Zhu, Jin and Yang, Guang and Lio, Pietro},\n doi = {10.1117/12.2512576},\n booktitle = {SPIE Medical Imaging 2019}\n}
\n
\n\n\n
\n Super-resolution (SR) for image enhancement has great importance in medical image applications. Broadly speaking, there are two types of SR, one requires multiple low resolution (LR) images from different views of the same object to be reconstructed to the high resolution (HR) output, and the other one relies on the learning from a large amount of training datasets, i.e., LR-HR pairs. In real clinical environment, acquiring images from multi-views is expensive and sometimes infeasible. In this paper, we present a novel Generative Adversarial Networks (GAN) based learning framework to achieve SR from its LR version. By performing simulation based studies on the Multimodal Brain Tumor Segmentation Challenge (BraTS) datasets, we demonstrate the efficacy of our method in application of brain tumor MRI enhancement. Compared to bilinear interpolation and other state-of-the-art SR methods, our model is lesion focused, which is not only resulted in better perceptual image quality without blurring, but also more efficient and directly benefit for the following clinical tasks, e.g., lesion detection and abnormality enhancement. Therefore, we can envisage the application of our SR method to boost image spatial resolution while maintaining crucial diagnostic information for further clinical tasks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How Can We Make Gan Perform Better in Single Medical Image Super-Resolution? A Lesion Focused Multi-Scale Approach.\n \n \n \n \n\n\n \n Zhu, J.; Yang, G.; and Lio, P.\n\n\n \n\n\n\n In 2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019), pages 1669-1673, 4 2019. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"HowPaper\n  \n \n \n \"HowWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {How Can We Make Gan Perform Better in Single Medical Image Super-Resolution? A Lesion Focused Multi-Scale Approach},\n type = {inproceedings},\n year = {2019},\n pages = {1669-1673},\n issue = {Isbi},\n websites = {https://ieeexplore.ieee.org/document/8759517/},\n month = {4},\n publisher = {IEEE},\n id = {63dd6e9f-0b08-3a0a-909d-159291fb2e1f},\n created = {2024-01-13T06:15:56.000Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:18.243Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Zhu, Jin and Yang, Guang and Lio, Pietro},\n doi = {10.1109/ISBI.2019.8759517},\n booktitle = {2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n TPSDicyc: Improved Deformation Invariant Cross-domain Medical Image Synthesis.\n \n \n \n \n\n\n \n Wang, C.; Papanastasiou, G.; Tsaftaris, S.; Yang, G.; Gray, C.; Newby, D.; Macnaught, G.; and MacGillivray, T.\n\n\n \n\n\n\n Volume 11905 LNCS Springer International Publishing, 2019.\n \n\n\n\n
\n\n\n\n \n \n \"TPSDicyc:Paper\n  \n \n \n \"TPSDicyc:Website\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{\n title = {TPSDicyc: Improved Deformation Invariant Cross-domain Medical Image Synthesis},\n type = {book},\n year = {2019},\n source = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},\n pages = {245-254},\n volume = {11905 LNCS},\n websites = {http://dx.doi.org/10.1007/978-3-030-33843-5_23},\n publisher = {Springer International Publishing},\n id = {69d8c83f-f52b-37fd-b07a-838cee3d4eb9},\n created = {2024-01-13T06:15:56.027Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:24:34.819Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Cycle-consistent generative adversarial network (CycleGAN) has been widely used for cross-domain medical image systhesis tasks particularly due to its ability to deal with unpaired data. However, most CycleGAN-based synthesis methods can not achieve good alignment between the synthesized images and data from the source domain, even with additional image alignment losses. This is because the CycleGAN generator network can encode the relative deformations and noises associated to different domains. This can be detrimental for the downstream applications that rely on the synthesized images, such as generating pseudo-CT for PET-MR attenuation correction. In this paper, we present a deformation invariant model based on the deformation-invariant CycleGAN (DicycleGAN) architecture and the spatial transformation network (STN) using thin-plate-spline (TPS). The proposed method can be trained with unpaired and unaligned data, and generate synthesised images aligned with the source data. Robustness to the presence of relative deformations between data from the source and target domain has been evaluated through experiments on multi-sequence brain MR data and multi-modality abdominal CT and MR data. Experiment results demonstrated that our method can achieve better alignment between the source and target data while maintaining superior image quality of signal compared to several state-of-the-art CycleGAN-based methods.},\n bibtype = {book},\n author = {Wang, Chengjia and Papanastasiou, Giorgos and Tsaftaris, Sotirios and Yang, Guang and Gray, Calum and Newby, David and Macnaught, Gillian and MacGillivray, Tom},\n doi = {10.1007/978-3-030-33843-5_23}\n}
\n
\n\n\n
\n Cycle-consistent generative adversarial network (CycleGAN) has been widely used for cross-domain medical image systhesis tasks particularly due to its ability to deal with unpaired data. However, most CycleGAN-based synthesis methods can not achieve good alignment between the synthesized images and data from the source domain, even with additional image alignment losses. This is because the CycleGAN generator network can encode the relative deformations and noises associated to different domains. This can be detrimental for the downstream applications that rely on the synthesized images, such as generating pseudo-CT for PET-MR attenuation correction. In this paper, we present a deformation invariant model based on the deformation-invariant CycleGAN (DicycleGAN) architecture and the spatial transformation network (STN) using thin-plate-spline (TPS). The proposed method can be trained with unpaired and unaligned data, and generate synthesised images aligned with the source data. Robustness to the presence of relative deformations between data from the source and target domain has been evaluated through experiments on multi-sequence brain MR data and multi-modality abdominal CT and MR data. Experiment results demonstrated that our method can achieve better alignment between the source and target data while maintaining superior image quality of signal compared to several state-of-the-art CycleGAN-based methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n 3D U2-Net: A 3D Universal U-Net for Multi-domain Medical Image Segmentation.\n \n \n \n \n\n\n \n Huang, C.; Han, H.; Yao, Q.; Zhu, S.; and Zhou, K., S.\n\n\n \n\n\n\n Volume 1 Springer International Publishing, 2019.\n \n\n\n\n
\n\n\n\n \n \n \"3DPaper\n  \n \n \n \"3DWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@book{\n title = {3D U2-Net: A 3D Universal U-Net for Multi-domain Medical Image Segmentation},\n type = {book},\n year = {2019},\n source = {Proceeding of the International Conference on Medical Image Computing and Computer Assisted Interventions},\n keywords = {Multi-domain learning,Segmentation,Universal model},\n pages = {291-299},\n volume = {1},\n websites = {http://dx.doi.org/10.1007/978-3-030-32245-8_33},\n publisher = {Springer International Publishing},\n id = {dc353a48-a502-307f-a646-8928f5bbee35},\n created = {2024-01-13T06:15:56.027Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:24:48.304Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Fully convolutional neural networks like U-Net have been the state-of-the-art methods in medical image segmentation. Practically, a network is highly specialized and trained separately for each segmenta- tion task. Instead of a collection of multiple models, it is highly desirable to learn a universal data representation for different tasks, ideally a sin- gle model with the addition of a minimal number of parameters steered to each task. Inspired by the recent success of multi-domain learning in image classification, for the first time we explore a promising univer- sal architecture that handles multiple medical segmentation tasks and is extendable for new tasks, regardless of different organs and imaging modalities. Our 3D Universal U-Net (3D U2-Net) is built upon sepa- rable convolution, assuming that images from different domains have domain-specific spatial correlations which can be probed with channel- wise convolution while also share cross-channel correlations which can be modeled with pointwise convolution. We evaluate the 3D U2-Net on five organ segmentation datasets. Experimental results show that this uni- versal network is capable of competing with traditional models in terms of segmentation accuracy, while requiring only about 1% of the param- eters. Additionally, we observe that the architecture can be easily and effectively adapted to a new domain without sacrificing performance in the domains used to learn the shared parameterization of the universal network. We put the code of 3D U2-Net into public domain (https:// github.com/huangmozhilv/u2net},\n bibtype = {book},\n author = {Huang, Chao and Han, Hu and Yao, Qingsong and Zhu, Shankan and Zhou, Kevin S.},\n doi = {10.1007/978-3-030-32245-8}\n}
\n
\n\n\n
\n Fully convolutional neural networks like U-Net have been the state-of-the-art methods in medical image segmentation. Practically, a network is highly specialized and trained separately for each segmenta- tion task. Instead of a collection of multiple models, it is highly desirable to learn a universal data representation for different tasks, ideally a sin- gle model with the addition of a minimal number of parameters steered to each task. Inspired by the recent success of multi-domain learning in image classification, for the first time we explore a promising univer- sal architecture that handles multiple medical segmentation tasks and is extendable for new tasks, regardless of different organs and imaging modalities. Our 3D Universal U-Net (3D U2-Net) is built upon sepa- rable convolution, assuming that images from different domains have domain-specific spatial correlations which can be probed with channel- wise convolution while also share cross-channel correlations which can be modeled with pointwise convolution. We evaluate the 3D U2-Net on five organ segmentation datasets. Experimental results show that this uni- versal network is capable of competing with traditional models in terms of segmentation accuracy, while requiring only about 1% of the param- eters. Additionally, we observe that the architecture can be easily and effectively adapted to a new domain without sacrificing performance in the domains used to learn the shared parameterization of the universal network. We put the code of 3D U2-Net into public domain (https:// github.com/huangmozhilv/u2net\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep learning for diagnosis of chronic myocardial infarction on nonenhanced cardiac cine MRI.\n \n \n \n \n\n\n \n Zhang, N.; Yang, G.; Gao, Z.; Xu, C.; Zhang, Y.; Shi, R.; Keegan, J.; Xu, L.; Zhang, H.; Fan, Z.; and Firmin, D.\n\n\n \n\n\n\n Radiology, 291(3): 606-607. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Deep learning for diagnosis of chronic myocardial infarction on nonenhanced cardiac cine MRI},\n type = {article},\n year = {2019},\n pages = {606-607},\n volume = {291},\n id = {e6589659-cac8-3f48-97c1-b43290215ca5},\n created = {2024-01-13T06:15:56.173Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:24:44.795Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Background: Renal impairment is common in patients with coronary artery disease and, if severe, late gadolinium enhancement (LGE) imaging for myocardial infarction (MI) evaluation cannot be performed. Purpose: To develop a fully automatic framework for chronic MI delineation via deep learning on non-contrast material-enhanced cardiac cine MRI. Materials and Methods: In this retrospective single-center study, a deep learning model was developed to extract motion features from the left ventricle and delineate MI regions on nonenhanced cardiac cine MRI collected between October 2015 and March 2017. Patients with chronic MI, as well as healthy control patients, had both nonenhanced cardiac cine (25 phases per cardiac cycle) and LGE MRI examinations. Eighty percent of MRI examinations were used for the training data set and 20% for the independent testing data set. Chronic MI regions on LGE MRI were defined as ground truth. Diagnostic performance was assessed by analysis of the area under the receiver operating characteristic curve (AUC). MI area and MI area percentage from nonenhanced cardiac cine and LGE MRI were compared by using the Pearson correlation, paired t test, and Bland-Altman analysis. Results: Study participants included 212 patients with chronic MI (men, 171; age, 57.2 years 6 12.5) and 87 healthy control patients (men, 42; age, 43.3 years 6 15.5). Using the full cardiac cine MRI, the per-segment sensitivity and specificity for detecting chronic MI in the independent test set was 89.8% and 99.1%, respectively, with an AUC of 0.94. There were no differences between nonenhanced cardiac cine and LGE MRI analyses in number of MI segments (114 vs 127, respectively; P = .38), per-patient MI area (6.2 cm2 6 2.8 vs 5.5 cm2 6 2.3, respectively; P = .27; correlation coefficient, r = 0.88), and MI area percentage (21.5% 6 17.3 vs 18.5% 6 15.4; P = .17; correlation coefficient, r = 0.89). Conclusion: The proposed deep learning framework on nonenhanced cardiac cine MRI enables the confirmation (presence), detection (position), and delineation (transmurality and size) of chronic myocardial infarction. However, future larger-scale multicenter studies are required for a full validation.},\n bibtype = {article},\n author = {Zhang, Nan and Yang, Guang and Gao, Zhifan and Xu, Chenchu and Zhang, Yanping and Shi, Rui and Keegan, Jennifer and Xu, Lei and Zhang, Heye and Fan, Zhanming and Firmin, David},\n doi = {10.1148/radiol.2019182304},\n journal = {Radiology},\n number = {3}\n}
\n
\n\n\n
\n Background: Renal impairment is common in patients with coronary artery disease and, if severe, late gadolinium enhancement (LGE) imaging for myocardial infarction (MI) evaluation cannot be performed. Purpose: To develop a fully automatic framework for chronic MI delineation via deep learning on non-contrast material-enhanced cardiac cine MRI. Materials and Methods: In this retrospective single-center study, a deep learning model was developed to extract motion features from the left ventricle and delineate MI regions on nonenhanced cardiac cine MRI collected between October 2015 and March 2017. Patients with chronic MI, as well as healthy control patients, had both nonenhanced cardiac cine (25 phases per cardiac cycle) and LGE MRI examinations. Eighty percent of MRI examinations were used for the training data set and 20% for the independent testing data set. Chronic MI regions on LGE MRI were defined as ground truth. Diagnostic performance was assessed by analysis of the area under the receiver operating characteristic curve (AUC). MI area and MI area percentage from nonenhanced cardiac cine and LGE MRI were compared by using the Pearson correlation, paired t test, and Bland-Altman analysis. Results: Study participants included 212 patients with chronic MI (men, 171; age, 57.2 years 6 12.5) and 87 healthy control patients (men, 42; age, 43.3 years 6 15.5). Using the full cardiac cine MRI, the per-segment sensitivity and specificity for detecting chronic MI in the independent test set was 89.8% and 99.1%, respectively, with an AUC of 0.94. There were no differences between nonenhanced cardiac cine and LGE MRI analyses in number of MI segments (114 vs 127, respectively; P = .38), per-patient MI area (6.2 cm2 6 2.8 vs 5.5 cm2 6 2.3, respectively; P = .27; correlation coefficient, r = 0.88), and MI area percentage (21.5% 6 17.3 vs 18.5% 6 15.4; P = .17; correlation coefficient, r = 0.89). Conclusion: The proposed deep learning framework on nonenhanced cardiac cine MRI enables the confirmation (presence), detection (position), and delineation (transmurality and size) of chronic myocardial infarction. However, future larger-scale multicenter studies are required for a full validation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Two-Stage U-Net Model for 3D Multi-class Segmentation on Full-Resolution Cardiac Data.\n \n \n \n \n\n\n \n Wang, C.; MacGillivray, T.; Macnaught, G.; Yang, G.; and Newby, D.\n\n\n \n\n\n\n International Workshop on Statistical Atlases and Computational Models of the Heart, pages 191-199. Springer, Cham, 2019.\n \n\n\n\n
\n\n\n\n \n \n \"InternationalWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2019},\n pages = {191-199},\n websites = {http://link.springer.com/10.1007/978-3-030-12029-0_21},\n publisher = {Springer, Cham},\n id = {7931f6a1-5b23-3e6c-9576-b403806c032a},\n created = {2024-01-13T08:14:14.324Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:37:43.899Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n source_type = {CONF},\n private_publication = {false},\n bibtype = {inbook},\n author = {Wang, Chengjia and MacGillivray, Tom and Macnaught, Gillian and Yang, Guang and Newby, David},\n doi = {10.1007/978-3-030-12029-0_21},\n chapter = {A Two-Stage U-Net Model for 3D Multi-class Segmentation on Full-Resolution Cardiac Data},\n title = {International Workshop on Statistical Atlases and Computational Models of the Heart}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Atrial scar segmentation via potential learning in the graph-cut framework.\n \n \n \n \n\n\n \n Li, L.; Yang, G.; Wu, F.; Wong, T.; Mohiaddin, R.; Firmin, D.; Keegan, J.; Xu, L.; and Zhuang, X.\n\n\n \n\n\n\n In International Workshop on Statistical Atlases and Computational Models of the Heart, pages 152-160, 2019. Springer, Cham\n \n\n\n\n
\n\n\n\n \n \n \"AtrialPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Atrial scar segmentation via potential learning in the graph-cut framework},\n type = {inproceedings},\n year = {2019},\n pages = {152-160},\n publisher = {Springer, Cham},\n id = {c3e15d87-d19a-3692-84d3-b2c3d2b95486},\n created = {2024-01-13T08:14:16.718Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:17:13.993Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {li_atrial_2019-3},\n source_type = {CONF},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Li, Lei and Yang, Guang and Wu, Fuping and Wong, Tom and Mohiaddin, Raad and Firmin, David and Keegan, Jenny and Xu, Lingchao and Zhuang, Xiahai},\n booktitle = {International Workshop on Statistical Atlases and Computational Models of the Heart}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Single-Image Super-Resolution Method for Late Gadolinium Enhancement CMR.\n \n \n \n\n\n \n Zhu, J.; Yang, G.; Wong, T.; Mohiaddin, R.; Firmin, D.; Keegan, J.; and Lio, P.\n\n\n \n\n\n\n In International Society for Magnetic Resonance in Medicine, 2019. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {A Single-Image Super-Resolution Method for Late Gadolinium Enhancement CMR},\n type = {inproceedings},\n year = {2019},\n id = {a960c3b6-29ae-395b-b65d-1dc1acce403a},\n created = {2024-01-13T12:10:01.116Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.116Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n source_type = {CONF},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Zhu, Jin and Yang, Guang and Wong, Tom and Mohiaddin, Raad and Firmin, David and Keegan, Jennifer and Lio, Pietro},\n booktitle = {International Society for Magnetic Resonance in Medicine}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (21)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Bayesian VoxDRN: A Probabilistic Deep Voxelwise Dilated Residual Network for Whole Heart Segmentation from 3D MR Images.\n \n \n \n \n\n\n \n Shi, Z.; Zeng, G.; Zhang, L.; Zhuang, X.; Li, L.; Yang, G.; and Zheng, G.\n\n\n \n\n\n\n In Medical Image Computing and Computer Assisted Intervention (MICCAI 2018), 2018. \n \n\n\n\n
\n\n\n\n \n \n \"BayesianPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Bayesian VoxDRN: A Probabilistic Deep Voxelwise Dilated Residual Network for Whole Heart Segmentation from 3D MR Images},\n type = {inproceedings},\n year = {2018},\n id = {12ceae2a-a891-32e8-888d-f3f259e29fe8},\n created = {2024-01-13T05:46:20.792Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:18:07.581Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {shi_bayesian_2018-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Shi, Zenglin and Zeng, Guodong and Zhang, Le and Zhuang, Xiahai and Li, Lei and Yang, Guang and Zheng, Guoyan},\n booktitle = {Medical Image Computing and Computer Assisted Intervention (MICCAI 2018)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Deep U-Net Reconstruction for Undersampled Spiral Diffusion Tensor Cardiovascular Magnetic Resonance.\n \n \n \n\n\n \n Luk YH, A.; Yang, G.; Ferreira, P.; Nielles-Vallespin, S.; Gorodezky, M.; Khalique, Z.; Pennell, D.; Firmin, D.; and Scott, A.\n\n\n \n\n\n\n In International Society for Magnetic Resonance in Medicine Workshop on Machine Learning, Part II, 2018. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Deep U-Net Reconstruction for Undersampled Spiral Diffusion Tensor Cardiovascular Magnetic Resonance},\n type = {inproceedings},\n year = {2018},\n id = {565106b5-cb67-3697-92cd-2f0c9e1a4326},\n created = {2024-01-13T05:46:20.965Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.680Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {luk_yh_deep_2018},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Luk YH, Augustine and Yang, Guang and Ferreira, Pedro and Nielles-Vallespin, Sonia and Gorodezky, Margarita and Khalique, Zohya and Pennell, Dudley and Firmin, David and Scott, Andrew},\n booktitle = {International Society for Magnetic Resonance in Medicine Workshop on Machine Learning, Part II}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Automatic myocardial segmentation of cardiovascular diffusion tensor images with a convolutional neural network.\n \n \n \n\n\n \n Ferreira, P.; Khalique, Z.; Scott, A.; Yang, G.; Nielles-Vallespin, S.; Pennell, D.; and Firmin, D.\n\n\n \n\n\n\n In International Society for Magnetic Resonance in Medicine Workshop on Machine Learning, Part II, 2018. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Automatic myocardial segmentation of cardiovascular diffusion tensor images with a convolutional neural network},\n type = {inproceedings},\n year = {2018},\n id = {72645b84-50c8-30c8-bd54-d3bc41cb72f4},\n created = {2024-01-13T05:46:21.075Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.804Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {ferreira_automatic_2018-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Ferreira, Pedro and Khalique, Zohya and Scott, Andrew and Yang, Guang and Nielles-Vallespin, Sonia and Pennell, Dudley and Firmin, David},\n booktitle = {International Society for Magnetic Resonance in Medicine Workshop on Machine Learning, Part II}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Deep Learning Using TensorLayer.\n \n \n \n\n\n \n Dong, H.; Guo, Y.; and Yang, G.\n\n\n \n\n\n\n ISBN 978-7-121-32622-6, Publishing House of Electronics Industry, 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{\n title = {Deep Learning Using TensorLayer},\n type = {book},\n year = {2018},\n publisher = {ISBN 978-7-121-32622-6, Publishing House of Electronics Industry},\n id = {da4b3c86-1f04-34a5-bc94-340fd405e168},\n created = {2024-01-13T05:46:21.279Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:15:09.918Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {dong_deep_2018-1},\n source_type = {book},\n private_publication = {false},\n bibtype = {book},\n author = {Dong, Hao and Guo, Yike and Yang, Guang}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multiview Sequential Learning and Dilated Residual Learning for a Fully Automatic Delineation of the Left Atrium and Pulmonary Veins from Late Gadolinium-Enhanced Cardiac MRI Images.\n \n \n \n \n\n\n \n Yang, G.; Chen, J.; Gao, Z.; Zhang, H.; Ni, H.; Angelini, E.; Mohiaddin, R.; Wong, T.; Keegan, J.; and Firmin, D.\n\n\n \n\n\n\n In The 40th IEEE International Engineering in Medicine and Biology Conference, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"MultiviewPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Multiview Sequential Learning and Dilated Residual Learning for a Fully Automatic Delineation of the Left Atrium and Pulmonary Veins from Late Gadolinium-Enhanced Cardiac MRI Images},\n type = {inproceedings},\n year = {2018},\n id = {4e32e511-d30e-3bd0-b5a6-a6f8c6439418},\n created = {2024-01-13T05:46:21.615Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:19:01.850Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_multiview_2018-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yang, Guang and Chen, Jun and Gao, Zhifan and Zhang, Heye and Ni, Hao and Angelini, Elsa and Mohiaddin, Raad and Wong, Tom and Keegan, Jennifer and Firmin, David},\n booktitle = {The 40th IEEE International Engineering in Medicine and Biology Conference}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Holistic and Deep Feature Pyramids for Saliency Detection.\n \n \n \n \n\n\n \n Dong, S.; Gao, Z.; Sun, S.; Wang, X.; Li, M.; Zhang, H.; Yang, G.; Liu, H.; and Li, S.\n\n\n \n\n\n\n In British Machine Vision Conference, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"HolisticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Holistic and Deep Feature Pyramids for Saliency Detection},\n type = {inproceedings},\n year = {2018},\n id = {23f3ba69-b01c-330d-baaa-218ce31c6f5d},\n created = {2024-01-13T05:46:21.828Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:19:12.380Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {dong_holistic_2018-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Dong, Shizhong and Gao, Zhifan and Sun, Shanhui and Wang, Xin and Li, Ming and Zhang, Heye and Yang, Guang and Liu, Huafeng and Li, Shuo},\n booktitle = {British Machine Vision Conference}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Review for \"Guide to Medical Image Analysis: Methods and Algorithms\".\n \n \n \n \n\n\n \n Yang, G.\n\n\n \n\n\n\n Technical Report 2018.\n \n\n\n\n
\n\n\n\n \n \n \"ReviewWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{\n title = {Review for "Guide to Medical Image Analysis: Methods and Algorithms"},\n type = {techreport},\n year = {2018},\n websites = {http://www.iapr.org/docs/newsletter-2018},\n id = {ef4ce083-8f2a-3465-ae4f-1269f1a58b11},\n created = {2024-01-13T05:46:21.849Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:15:09.929Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_review_2018},\n source_type = {book},\n notes = {Issue: http://www.iapr.org/docs/newsletter-2018<br/>Publication Title: January 2018 Issue of the International Association of Pattern Recognition Newsletter, Page 32-34},\n private_publication = {false},\n bibtype = {techreport},\n author = {Yang, Guang}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep Learning intra-image and inter-images features for Co-saliency detection.\n \n \n \n \n\n\n \n Li, M.; Dong, S.; Zhang, K.; Gao, Z.; Wu, X.; Zhang, H.; Yang, G.; and Li, S.\n\n\n \n\n\n\n In British Machine Vision Conference, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Deep Learning intra-image and inter-images features for Co-saliency detection},\n type = {inproceedings},\n year = {2018},\n id = {0b787fee-ce1c-340e-9d7a-8ff3d50e6b6e},\n created = {2024-01-13T05:46:21.967Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:19:47.946Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {li_deep_2018-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Li, Min and Dong, Shizhou and Zhang, Kun and Gao, Zhifan and Wu, Xi and Zhang, Heye and Yang, Guang and Li, Shuo},\n booktitle = {British Machine Vision Conference}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Deep Compressive Sensing for the Reconstruction of Diffusion Tensor Cardiac MRI.\n \n \n \n \n\n\n \n Schlemper, J.; Yang, G.; Ferreira, P.; Scott, A.; McGill, L.; Khalique, Z.; Gorodezky, M.; Roehl, M.; Keegan, J.; Pennell, D.; and others\n\n\n \n\n\n\n In International Conference on Medical Image Computing and Computer-Assisted Intervention, pages 295-303, 2018. Springer\n \n\n\n\n
\n\n\n\n \n \n \"StochasticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Stochastic Deep Compressive Sensing for the Reconstruction of Diffusion Tensor Cardiac MRI},\n type = {inproceedings},\n year = {2018},\n pages = {295-303},\n publisher = {Springer},\n id = {440113cb-8f1a-3c43-8e46-2061fcd673f5},\n created = {2024-01-13T05:46:22.983Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:21:15.770Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {schlemper_stochastic_2018},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Schlemper, Jo and Yang, Guang and Ferreira, Pedro and Scott, Andrew and McGill, Laura-Ann and Khalique, Zohya and Gorodezky, Margarita and Roehl, Malte and Keegan, Jennifer and Pennell, Dudley and others, undefined},\n booktitle = {International Conference on Medical Image Computing and Computer-Assisted Intervention}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n DAGAN: Deep de-aliasing generative adversarial networks for fast compressed sensing MRI reconstruction.\n \n \n \n \n\n\n \n Yang, G.; Yu, S.; Dong, H.; Slabaugh, G.; Dragotti, P., L.; Ye, X.; Liu, F.; Arridge, S.; Keegan, J.; Guo, Y.; and others\n\n\n \n\n\n\n IEEE Transactions on Medical Imaging, 37(6): 1310-1321. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"DAGAN:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {DAGAN: Deep de-aliasing generative adversarial networks for fast compressed sensing MRI reconstruction},\n type = {article},\n year = {2018},\n pages = {1310-1321},\n volume = {37},\n id = {8b864e46-ff91-3232-9810-b0b5cfc864b6},\n created = {2024-01-13T05:46:23.632Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:21:44.476Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_dagan_2018},\n source_type = {article},\n notes = {Publisher: IEEE},\n private_publication = {false},\n bibtype = {article},\n author = {Yang, Guang and Yu, Simiao and Dong, Hao and Slabaugh, Greg and Dragotti, Pier Luigi and Ye, Xujiong and Liu, Fangde and Arridge, Simon and Keegan, Jennifer and Guo, Yike and others, undefined},\n journal = {IEEE Transactions on Medical Imaging},\n number = {6}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adversarial and Perceptual Refinement for Compressed Sensing MRI Reconstruction.\n \n \n \n \n\n\n \n Seitzer, M.; Yang, G.; Schlemper, J.; Oktay, O.; Wuerfl, T.; Christlein, V.; Wong, T.; Mohiaddin, R.; Firmin, D.; Keegan, J.; and others\n\n\n \n\n\n\n In Medical Image Computing and Computer Assisted Intervention (MICCAI 2018), 2018. \n \n\n\n\n
\n\n\n\n \n \n \"AdversarialPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Adversarial and Perceptual Refinement for Compressed Sensing MRI Reconstruction},\n type = {inproceedings},\n year = {2018},\n id = {2c3198b2-609a-37fd-8e37-dd835cec6f4d},\n created = {2024-01-13T05:46:29.268Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:16:33.979Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {seitzer_adversarial_2018},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Seitzer, Maximilian and Yang, Guang and Schlemper, Jo and Oktay, Ozan and Wuerfl, Tobias and Christlein, Vincent and Wong, Tom and Mohiaddin, Raad and Firmin, David and Keegan, Jennifer and others, undefined},\n booktitle = {Medical Image Computing and Computer Assisted Intervention (MICCAI 2018)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Left Atrial Scarring Segmentation from Delayed-Enhancement Cardiac MRI Images: A Deep Learning Approach.\n \n \n \n \n\n\n \n Yang, G.; Zhuang, X.; Khan, H.; Nyktari, E.; Haldar, S.; Li, L.; Wage, R.; Ye, X.; Slabaugh, G.; Mohiaddin, R.; Wong, T.; Keegan, J.; and Firmin, D.\n\n\n \n\n\n\n Cardiovascular Imaging and Image Analysis,109-130. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"LeftPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Left Atrial Scarring Segmentation from Delayed-Enhancement Cardiac MRI Images: A Deep Learning Approach},\n type = {article},\n year = {2018},\n pages = {109-130},\n id = {f6a6d20f-0caa-3ca9-b5e3-b63d675819a3},\n created = {2024-01-13T06:15:53.746Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:23:50.786Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {The late gadolinium-enhanced (LGE) MRI technique is a well-validated method for fibrosis detection in the myocardium. With this technique, the altered wash-in and wash-out contrast agent kinetics in ?brotic and healthy myocardium results in scar tissue being seen with high or enhanced signal relative to normal tissue which is nulled. Recently, great progress on LGE MRI has resulted in improved visualization of fibrosis in the left atrium (LA). This provides valuable information for treatment planning, image-based procedure guidance, and clinical management in patients with atrial fibrillation (AF). Nevertheless, precise and objective atrial scarring segmentation is required for accurate assessment of AF patients using LGE MRI. This is a very challenging task, not only because of the limited quality and resolution of the LGE MRI images acquired in AF but also due to the thinner wall and unpredictable morphology of the LA. Accurate and reliable segmentation of the anatomical structure of the LA myocardium is a prerequisite for accurate atrial scarring segmentation. Most current studies rely on manual segmentation of the anatomical structures, which is very labor-intensive and subject to inter- and intra-observer variability. The subsequent atrial scarring segmentation is normally based on unsupervised learning methods, for example, using thresholding, histogram analysis, clustering and graph-cut based approaches, which have variable accuracy. In this study, we present a fully automated multi-atlas propagation based whole heart segmentation method to derive the anatomical structure of the LA myocardium and pulmonary veins. This is followed by a supervised deep learning method for atrial scarring segmentation. Twenty clinical LGE MRI scans from longstanding persistent AF patients were entered into this study retrospectively. We have demonstrated that our fully automatic method can achieve accurate and reliable atrial scarring segmentation compared to manual delineated ground truth.},\n bibtype = {article},\n author = {Yang, Guang and Zhuang, Xiahai and Khan, Habib and Nyktari, Eva and Haldar, Shouvik and Li, Lei and Wage, Rick and Ye, Xujiong and Slabaugh, Greg and Mohiaddin, Raad and Wong, Tom and Keegan, Jennifer and Firmin, David},\n doi = {10.1201/9780429441493-6},\n journal = {Cardiovascular Imaging and Image Analysis}\n}
\n
\n\n\n
\n The late gadolinium-enhanced (LGE) MRI technique is a well-validated method for fibrosis detection in the myocardium. With this technique, the altered wash-in and wash-out contrast agent kinetics in ?brotic and healthy myocardium results in scar tissue being seen with high or enhanced signal relative to normal tissue which is nulled. Recently, great progress on LGE MRI has resulted in improved visualization of fibrosis in the left atrium (LA). This provides valuable information for treatment planning, image-based procedure guidance, and clinical management in patients with atrial fibrillation (AF). Nevertheless, precise and objective atrial scarring segmentation is required for accurate assessment of AF patients using LGE MRI. This is a very challenging task, not only because of the limited quality and resolution of the LGE MRI images acquired in AF but also due to the thinner wall and unpredictable morphology of the LA. Accurate and reliable segmentation of the anatomical structure of the LA myocardium is a prerequisite for accurate atrial scarring segmentation. Most current studies rely on manual segmentation of the anatomical structures, which is very labor-intensive and subject to inter- and intra-observer variability. The subsequent atrial scarring segmentation is normally based on unsupervised learning methods, for example, using thresholding, histogram analysis, clustering and graph-cut based approaches, which have variable accuracy. In this study, we present a fully automated multi-atlas propagation based whole heart segmentation method to derive the anatomical structure of the LA myocardium and pulmonary veins. This is followed by a supervised deep learning method for atrial scarring segmentation. Twenty clinical LGE MRI scans from longstanding persistent AF patients were entered into this study retrospectively. We have demonstrated that our fully automatic method can achieve accurate and reliable atrial scarring segmentation compared to manual delineated ground truth.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multiview two-task recursive attention model for left atrium and atrial scars segmentation.\n \n \n \n \n\n\n \n Chen, J.; Yang, G.; Gao, Z.; Ni, H.; Angelini, E.; Mohiaddin, R.; Wong, T.; Zhang, Y.; Du, X.; Zhang, H.; Keegan, J.; and Firmin, D.\n\n\n \n\n\n\n Volume 11071 LNCS Springer International Publishing, 2018.\n \n\n\n\n
\n\n\n\n \n \n \"MultiviewPaper\n  \n \n \n \"MultiviewWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{\n title = {Multiview two-task recursive attention model for left atrium and atrial scars segmentation},\n type = {book},\n year = {2018},\n source = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},\n pages = {455-463},\n volume = {11071 LNCS},\n websites = {http://dx.doi.org/10.1007/978-3-030-00934-2_51},\n publisher = {Springer International Publishing},\n id = {713383c1-4c68-39f1-b7c7-12f49b63540d},\n created = {2024-01-13T06:15:53.756Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:24:04.828Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Late Gadolinium Enhanced Cardiac MRI (LGE-CMRI) for detecting atrial scars in atrial fibrillation (AF) patients has recently emerged as a promising technique to stratify patients, guide ablation therapy and predict treatment success. Visualisation and quantification of scar tissues require a segmentation of both the left atrium (LA) and the high intensity scar regions from LGE-CMRI images. These two segmentation tasks are challenging due to the cancelling of healthy tissue signal, low signal-to-noise ratio and often limited image quality in these patients. Most approaches require manual supervision and/or a second bright-blood MRI acquisition for anatomical segmentation. Segmenting both the LA anatomy and the scar tissues automatically from a single LGE-CMRI acquisition is highly in demand. In this study, we proposed a novel fully automated multiview two-task (MVTT) recursive attention model working directly on LGE-CMRI images that combines a sequential learning and a dilated residual learning to segment the LA (including attached pulmonary veins) and delineate the atrial scars simultaneously via an innovative attention model. Compared to other state-of-the-art methods, the proposed MVTT achieves compelling improvement, enabling to generate a patient-specific anatomical and atrial scar assessment model.},\n bibtype = {book},\n author = {Chen, Jun and Yang, Guang and Gao, Zhifan and Ni, Hao and Angelini, Elsa and Mohiaddin, Raad and Wong, Tom and Zhang, Yanping and Du, Xiuquan and Zhang, Heye and Keegan, Jennifer and Firmin, David},\n doi = {10.1007/978-3-030-00934-2_51}\n}
\n
\n\n\n
\n Late Gadolinium Enhanced Cardiac MRI (LGE-CMRI) for detecting atrial scars in atrial fibrillation (AF) patients has recently emerged as a promising technique to stratify patients, guide ablation therapy and predict treatment success. Visualisation and quantification of scar tissues require a segmentation of both the left atrium (LA) and the high intensity scar regions from LGE-CMRI images. These two segmentation tasks are challenging due to the cancelling of healthy tissue signal, low signal-to-noise ratio and often limited image quality in these patients. Most approaches require manual supervision and/or a second bright-blood MRI acquisition for anatomical segmentation. Segmenting both the LA anatomy and the scar tissues automatically from a single LGE-CMRI acquisition is highly in demand. In this study, we proposed a novel fully automated multiview two-task (MVTT) recursive attention model working directly on LGE-CMRI images that combines a sequential learning and a dilated residual learning to segment the LA (including attached pulmonary veins) and delineate the atrial scars simultaneously via an innovative attention model. Compared to other state-of-the-art methods, the proposed MVTT achieves compelling improvement, enabling to generate a patient-specific anatomical and atrial scar assessment model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generating Magnetic Resonance Spectroscopy Imaging Data of Brain Tumours from Linear, Non-linear and Deep Learning Models.\n \n \n \n \n\n\n \n Olliverre, N.; Yang, G.; Slabaugh, G.; Reyes-Aldasoro, C., C.; and Alonso, E.\n\n\n \n\n\n\n Volume 11037 LNCS . Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pages 130-138. Springer International Publishing, 2018.\n \n\n\n\n
\n\n\n\n \n \n \"LecturePaper\n  \n \n \n \"LectureWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inbook{\n type = {inbook},\n year = {2018},\n pages = {130-138},\n volume = {11037 LNCS},\n websites = {http://dx.doi.org/10.1007/978-3-030-00536-8_14,https://link.springer.com/10.1007/978-3-030-00536-8_14},\n publisher = {Springer International Publishing},\n id = {b008968c-b697-3a73-ae9a-c9bd389264f0},\n created = {2024-01-13T06:15:53.902Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:18.513Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {inbook},\n author = {Olliverre, Nathan and Yang, Guang and Slabaugh, Gregory and Reyes-Aldasoro, Constantino Carlos and Alonso, Eduardo},\n doi = {10.1007/978-3-030-00536-8_14},\n chapter = {Generating Magnetic Resonance Spectroscopy Imaging Data of Brain Tumours from Linear, Non-linear and Deep Learning Models},\n title = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Supervised learning based multimodal MRI brain tumour segmentation using texture features from supervoxels.\n \n \n \n \n\n\n \n Soltaninejad, M.; Yang, G.; Lambrou, T.; Allinson, N.; Jones, T., L.; Barrick, T., R.; Howe, F., A.; and Ye, X.\n\n\n \n\n\n\n Computer Methods and Programs in Biomedicine, 157: 69-84. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"SupervisedPaper\n  \n \n \n \"SupervisedWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Supervised learning based multimodal MRI brain tumour segmentation using texture features from supervoxels},\n type = {article},\n year = {2018},\n keywords = {Brain tumour segmentation,Diffusion tensor imaging,Multimodal MRI,Random forests,Supervoxel,Textons},\n pages = {69-84},\n volume = {157},\n websites = {https://doi.org/10.1016/j.cmpb.2018.01.003},\n publisher = {Elsevier B.V.},\n id = {02300617-5860-3be4-8776-6b083054a0d2},\n created = {2024-01-13T06:15:53.918Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:24:12.975Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Background: Accurate segmentation of brain tumour in magnetic resonance images (MRI) is a difficult task due to various tumour types. Using information and features from multimodal MRI including structural MRI and isotropic (p) and anisotropic (q) components derived from the diffusion tensor imaging (DTI) may result in a more accurate analysis of brain images. Methods: We propose a novel 3D supervoxel based learning method for segmentation of tumour in multimodal MRI brain images (conventional MRI and DTI). Supervoxels are generated using the information across the multimodal MRI dataset. For each supervoxel, a variety of features including histograms of texton descriptor, calculated using a set of Gabor filters with different sizes and orientations, and first order intensity statistical features are extracted. Those features are fed into a random forests (RF) classifier to classify each supervoxel into tumour core, oedema or healthy brain tissue. Results: The method is evaluated on two datasets: 1) Our clinical dataset: 11 multimodal images of patients and 2) BRATS 2013 clinical dataset: 30 multimodal images. For our clinical dataset, the average detection sensitivity of tumour (including tumour core and oedema) using multimodal MRI is 86% with balanced error rate (BER) 7%; while the Dice score for automatic tumour segmentation against ground truth is 0.84. The corresponding results of the BRATS 2013 dataset are 96%, 2% and 0.89, respectively. Conclusion: The method demonstrates promising results in the segmentation of brain tumour. Adding features from multimodal MRI images can largely increase the segmentation accuracy. The method provides a close match to expert delineation across all tumour grades, leading to a faster and more reproducible method of brain tumour detection and delineation to aid patient management.},\n bibtype = {article},\n author = {Soltaninejad, Mohammadreza and Yang, Guang and Lambrou, Tryphon and Allinson, Nigel and Jones, Timothy L. and Barrick, Thomas R. and Howe, Franklyn A. and Ye, Xujiong},\n doi = {10.1016/j.cmpb.2018.01.003},\n journal = {Computer Methods and Programs in Biomedicine}\n}
\n
\n\n\n
\n Background: Accurate segmentation of brain tumour in magnetic resonance images (MRI) is a difficult task due to various tumour types. Using information and features from multimodal MRI including structural MRI and isotropic (p) and anisotropic (q) components derived from the diffusion tensor imaging (DTI) may result in a more accurate analysis of brain images. Methods: We propose a novel 3D supervoxel based learning method for segmentation of tumour in multimodal MRI brain images (conventional MRI and DTI). Supervoxels are generated using the information across the multimodal MRI dataset. For each supervoxel, a variety of features including histograms of texton descriptor, calculated using a set of Gabor filters with different sizes and orientations, and first order intensity statistical features are extracted. Those features are fed into a random forests (RF) classifier to classify each supervoxel into tumour core, oedema or healthy brain tissue. Results: The method is evaluated on two datasets: 1) Our clinical dataset: 11 multimodal images of patients and 2) BRATS 2013 clinical dataset: 30 multimodal images. For our clinical dataset, the average detection sensitivity of tumour (including tumour core and oedema) using multimodal MRI is 86% with balanced error rate (BER) 7%; while the Dice score for automatic tumour segmentation against ground truth is 0.84. The corresponding results of the BRATS 2013 dataset are 96%, 2% and 0.89, respectively. Conclusion: The method demonstrates promising results in the segmentation of brain tumour. Adding features from multimodal MRI images can largely increase the segmentation accuracy. The method provides a close match to expert delineation across all tumour grades, leading to a faster and more reproducible method of brain tumour detection and delineation to aid patient management.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fully automatic segmentation and objective assessment of atrial scars for long-standing persistent atrial fibrillation patients using late gadolinium-enhanced MRI.\n \n \n \n \n\n\n \n Yang, G.; Zhuang, X.; Khan, H.; Haldar, S.; Nyktari, E.; Li, L.; Wage, R.; Ye, X.; Slabaugh, G.; Mohiaddin, R.; Wong, T.; Keegan, J.; and Firmin, D.\n\n\n \n\n\n\n Medical Physics, 45(4): 1562-1576. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"FullyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Fully automatic segmentation and objective assessment of atrial scars for long-standing persistent atrial fibrillation patients using late gadolinium-enhanced MRI},\n type = {article},\n year = {2018},\n keywords = {atrial fibrillation,cardiovascular magnetic resonance imaging,late gadolinium-enhanced MRI,medical image segmentation,whole heart segmentation},\n pages = {1562-1576},\n volume = {45},\n id = {6109a8fd-0be8-3a27-af87-d0af9facf25b},\n created = {2024-01-13T06:15:55.651Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:23:34.578Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Purpose: Atrial fibrillation (AF) is the most common heart rhythm disorder and causes considerable morbidity and mortality, resulting in a large public health burden that is increasing as the population ages. It is associated with atrial fibrosis, the amount and distribution of which can be used to stratify patients and to guide subsequent electrophysiology ablation treatment. Atrial fibrosis may be assessed noninvasively using late gadolinium-enhanced (LGE) magnetic resonance imaging (MRI) where scar tissue is visualized as a region of signal enhancement. However, manual segmentation of the heart chambers and of the atrial scar tissue is time consuming and subject to interoperator variability, particularly as image quality in AF is often poor. In this study, we propose a novel fully automatic pipeline to achieve accurate and objective segmentation of the heart (from MRI Roadmap data) and of scar tissue within the heart (from LGE MRI data) acquired in patients with AF. Methods: Our fully automatic pipeline uniquely combines: (a) a multiatlas-based whole heart segmentation (MA-WHS) to determine the cardiac anatomy from an MRI Roadmap acquisition which is then mapped to LGE MRI, and (b) a super-pixel and supervised learning based approach to delineate the distribution and extent of atrial scarring in LGE MRI. We compared the accuracy of the automatic analysis to manual ground truth segmentations in 37 patients with persistent long-standing AF. Results: Both our MA-WHS and atrial scarring segmentations showed accurate delineations of cardiac anatomy (mean Dice = 89%) and atrial scarring (mean Dice = 79%), respectively, compared to the established ground truth from manual segmentation. In addition, compared to the ground truth, we obtained 88% segmentation accuracy, with 90% sensitivity and 79% specificity. Receiver operating characteristic analysis achieved an average area under the curve of 0.91. Conclusion: Compared with previously studied methods with manual interventions, our innovative pipeline demonstrated comparable results, but was computed fully automatically. The proposed segmentation methods allow LGE MRI to be used as an objective assessment tool for localization, visualization, and quantitation of atrial scarring and to guide ablation treatment.},\n bibtype = {article},\n author = {Yang, Guang and Zhuang, Xiahai and Khan, Habib and Haldar, Shouvik and Nyktari, Eva and Li, Lei and Wage, Ricardo and Ye, Xujiong and Slabaugh, Greg and Mohiaddin, Raad and Wong, Tom and Keegan, Jennifer and Firmin, David},\n doi = {10.1002/mp.12832},\n journal = {Medical Physics},\n number = {4}\n}
\n
\n\n\n
\n Purpose: Atrial fibrillation (AF) is the most common heart rhythm disorder and causes considerable morbidity and mortality, resulting in a large public health burden that is increasing as the population ages. It is associated with atrial fibrosis, the amount and distribution of which can be used to stratify patients and to guide subsequent electrophysiology ablation treatment. Atrial fibrosis may be assessed noninvasively using late gadolinium-enhanced (LGE) magnetic resonance imaging (MRI) where scar tissue is visualized as a region of signal enhancement. However, manual segmentation of the heart chambers and of the atrial scar tissue is time consuming and subject to interoperator variability, particularly as image quality in AF is often poor. In this study, we propose a novel fully automatic pipeline to achieve accurate and objective segmentation of the heart (from MRI Roadmap data) and of scar tissue within the heart (from LGE MRI data) acquired in patients with AF. Methods: Our fully automatic pipeline uniquely combines: (a) a multiatlas-based whole heart segmentation (MA-WHS) to determine the cardiac anatomy from an MRI Roadmap acquisition which is then mapped to LGE MRI, and (b) a super-pixel and supervised learning based approach to delineate the distribution and extent of atrial scarring in LGE MRI. We compared the accuracy of the automatic analysis to manual ground truth segmentations in 37 patients with persistent long-standing AF. Results: Both our MA-WHS and atrial scarring segmentations showed accurate delineations of cardiac anatomy (mean Dice = 89%) and atrial scarring (mean Dice = 79%), respectively, compared to the established ground truth from manual segmentation. In addition, compared to the ground truth, we obtained 88% segmentation accuracy, with 90% sensitivity and 79% specificity. Receiver operating characteristic analysis achieved an average area under the curve of 0.91. Conclusion: Compared with previously studied methods with manual interventions, our innovative pipeline demonstrated comparable results, but was computed fully automatically. The proposed segmentation methods allow LGE MRI to be used as an objective assessment tool for localization, visualization, and quantitation of atrial scarring and to guide ablation treatment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Identifying the Best Machine Learning Algorithms for Brain Tumor Segmentation, Progression Assessment, and Overall Survival Prediction in the BRATS Challenge.\n \n \n \n \n\n\n \n Bakas, S.; Reyes, M.; Jakab, A.; Bauer, S.; Rempfler, M.; Crimi, A.; Shinohara, R., T.; Berger, C.; Ha, S., M.; Rozycki, M.; Prastawa, M.; Alberts, E.; Lipkova, J.; Freymann, J.; Kirby, J.; Bilello, M.; Fathallah-Shaykh, H.; Wiest, R.; Kirschke, J.; Wiestler, B.; Colen, R.; Kotrotsou, A.; Lamontagne, P.; Marcus, D.; Milchenko, M.; Nazeri, A.; Weber, M.; Mahajan, A.; Baid, U.; Gerstner, E.; Kwon, D.; Acharya, G.; Agarwal, M.; Alam, M.; Albiol, A.; Albiol, A.; Albiol, F., J.; Alex, V.; Allinson, N.; Amorim, P., H., A.; Amrutkar, A.; Anand, G.; Andermatt, S.; Arbel, T.; Arbelaez, P.; Avery, A.; Azmat, M.; B., P.; Bai, W.; Banerjee, S.; Barth, B.; Batchelder, T.; Batmanghelich, K.; Battistella, E.; Beers, A.; Belyaev, M.; Bendszus, M.; Benson, E.; Bernal, J.; Bharath, H., N.; Biros, G.; Bisdas, S.; Brown, J.; Cabezas, M.; Cao, S.; Cardoso, J., M.; Carver, E., N.; Casamitjana, A.; Castillo, L., S.; Catà, M.; Cattin, P.; Cerigues, A.; Chagas, V., S.; Chandra, S.; Chang, Y.; Chang, S.; Chang, K.; Chazalon, J.; Chen, S.; Chen, W.; Chen, J., W.; Chen, Z.; Cheng, K.; Choudhury, A., R.; Chylla, R.; Clérigues, A.; Colleman, S.; Colmeiro, R., G., R.; Combalia, M.; Costa, A.; Cui, X.; Dai, Z.; Dai, L.; Daza, L., A.; Deutsch, E.; Ding, C.; Dong, C.; Dong, S.; Dudzik, W.; Eaton-Rosen, Z.; Egan, G.; Escudero, G.; Estienne, T.; Everson, R.; Fabrizio, J.; Fan, Y.; Fang, L.; Feng, X.; Ferrante, E.; Fidon, L.; Fischer, M.; French, A., P.; Fridman, N.; Fu, H.; Fuentes, D.; Gao, Y.; Gates, E.; Gering, D.; Gholami, A.; Gierke, W.; Glocker, B.; Gong, M.; González-Villá, S.; Grosges, T.; Guan, Y.; Guo, S.; Gupta, S.; Han, W.; Han, I., S.; Harmuth, K.; He, H.; Hernández-Sabaté, A.; Herrmann, E.; Himthani, N.; Hsu, W.; Hsu, C.; Hu, X.; Hu, X.; Hu, Y.; Hu, Y.; Hua, R.; Huang, T.; Huang, W.; Van Huffel, S.; Huo, Q.; HV, V.; Iftekharuddin, K., M.; Isensee, F.; Islam, M.; Jackson, A., S.; Jambawalikar, S., R.; Jesson, A.; Jian, W.; Jin, P.; Jose, V., J., M.; Jungo, A.; Kainz, B.; Kamnitsas, K.; Kao, P.; Karnawat, A.; Kellermeier, T.; Kermi, A.; Keutzer, K.; Khadir, M., T.; Khened, M.; Kickingereder, P.; Kim, G.; King, N.; Knapp, H.; Knecht, U.; Kohli, L.; Kong, D.; Kong, X.; Koppers, S.; Kori, A.; Krishnamurthi, G.; Krivov, E.; Kumar, P.; Kushibar, K.; Lachinov, D.; Lambrou, T.; Lee, J.; Lee, C.; Lee, Y.; Lee, M.; Lefkovits, S.; Lefkovits, L.; Levitt, J.; Li, T.; Li, H.; Li, W.; Li, H.; Li, X.; Li, Y.; Li, H.; Li, Z.; Li, X.; Li, Z.; Li, X.; Li, W.; Lin, Z.; Lin, F.; Lio, P.; Liu, C.; Liu, B.; Liu, X.; Liu, M.; Liu, J.; Liu, L.; Llado, X.; Lopez, M., M.; Lorenzo, P., R.; Lu, Z.; Luo, L.; Luo, Z.; Ma, J.; Ma, K.; Mackie, T.; Madabushi, A.; Mahmoudi, I.; Maier-Hein, K., H.; Maji, P.; Mammen, C.; Mang, A.; Manjunath, B., S.; Marcinkiewicz, M.; McDonagh, S.; McKenna, S.; McKinley, R.; Mehl, M.; Mehta, S.; Mehta, R.; Meier, R.; Meinel, C.; Merhof, D.; Meyer, C.; Miller, R.; Mitra, S.; Moiyadi, A.; Molina-Garcia, D.; Monteiro, M., A., B.; Mrukwa, G.; Myronenko, A.; Nalepa, J.; Ngo, T.; Nie, D.; Ning, H.; Niu, C.; Nuechterlein, N., K.; Oermann, E.; Oliveira, A.; Oliveira, D., D., C.; Oliver, A.; Osman, A., F., I.; Ou, Y.; Ourselin, S.; Paragios, N.; Park, M., S.; Paschke, B.; Pauloski, J., G.; Pawar, K.; Pawlowski, N.; Pei, L.; Peng, S.; Pereira, S., M.; Perez-Beteta, J.; Perez-Garcia, V., M.; Pezold, S.; Pham, B.; Phophalia, A.; Piella, G.; Pillai, G., N.; Piraud, M.; Pisov, M.; Popli, A.; Pound, M., P.; Pourreza, R.; Prasanna, P.; Prkovska, V.; Pridmore, T., P.; Puch, S.; Puybareau, É.; Qian, B.; Qiao, X.; Rajchl, M.; Rane, S.; Rebsamen, M.; Ren, H.; Ren, X.; Revanuru, K.; Rezaei, M.; Rippel, O.; Rivera, L., C.; Robert, C.; Rosen, B.; Rueckert, D.; Safwan, M.; Salem, M.; Salvi, J.; Sanchez, I.; Sánchez, I.; Santos, H., M.; Sartor, E.; Schellingerhout, D.; Scheufele, K.; Scott, M., R.; Scussel, A., A.; Sedlar, S.; Serrano-Rubio, J., P.; Shah, N., J.; Shah, N.; Shaikh, M.; Shankar, B., U.; Shboul, Z.; Shen, H.; Shen, D.; Shen, L.; Shen, H.; Shenoy, V.; Shi, F.; Shin, H., E.; Shu, H.; Sima, D.; Sinclair, M.; Smedby, O.; Snyder, J., M.; Soltaninejad, M.; Song, G.; Soni, M.; Stawiaski, J.; Subramanian, S.; Sun, L.; Sun, R.; Sun, J.; Sun, K.; Sun, Y.; Sun, G.; Sun, S.; Suter, Y., R.; Szilagyi, L.; Talbar, S.; Tao, D.; Tao, D.; Teng, Z.; Thakur, S.; Thakur, M., H.; Tharakan, S.; Tiwari, P.; Tochon, G.; Tran, T.; Tsai, Y., M.; Tseng, K.; Tuan, T., A.; Turlapov, V.; Tustison, N.; Vakalopoulou, M.; Valverde, S.; Vanguri, R.; Vasiliev, E.; Ventura, J.; Vera, L.; Vercauteren, T.; Verrastro, C., A.; Vidyaratne, L.; Vilaplana, V.; Vivekanandan, A.; Wang, G.; Wang, Q.; Wang, C., J.; Wang, W.; Wang, D.; Wang, R.; Wang, Y.; Wang, C.; Wang, G.; Wen, N.; Wen, X.; Weninger, L.; Wick, W.; Wu, S.; Wu, Q.; Wu, Y.; Xia, Y.; Xu, Y.; Xu, X.; Xu, P.; Yang, T.; Yang, X.; Yang, H.; Yang, J.; Yang, H.; Yang, G.; Yao, H.; Ye, X.; Yin, C.; Young-Moxon, B.; Yu, J.; Yue, X.; Zhang, S.; Zhang, A.; Zhang, K.; Zhang, X.; Zhang, L.; Zhang, X.; Zhang, Y.; Zhang, L.; Zhang, J.; Zhang, X.; Zhang, T.; Zhao, S.; Zhao, Y.; Zhao, X.; Zhao, L.; Zheng, Y.; Zhong, L.; Zhou, C.; Zhou, X.; Zhou, F.; Zhu, H.; Zhu, J.; Zhuge, Y.; Zong, W.; Kalpathy-Cramer, J.; Farahani, K.; Davatzikos, C.; van Leemput, K.; and Menze, B.\n\n\n \n\n\n\n arXiv preprint arXiv:1811.02629. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"IdentifyingPaper\n  \n \n \n \"IdentifyingWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Identifying the Best Machine Learning Algorithms for Brain Tumor Segmentation, Progression Assessment, and Overall Survival Prediction in the BRATS Challenge},\n type = {article},\n year = {2018},\n websites = {http://arxiv.org/abs/1811.02629},\n id = {9220cb01-73b4-3bb5-b1b2-7aac710a23e9},\n created = {2024-01-13T06:15:56.440Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:18.970Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Gliomas are the most common primary brain malignancies, with different degrees of aggressiveness, variable prognosis and various heterogeneous histologic sub-regions, i.e., peritumoral edematous/invaded tissue, necrotic core, active and non-enhancing core. This intrinsic heterogeneity is also portrayed in their radio-phenotype, as their sub-regions are depicted by varying intensity profiles disseminated across multi-parametric magnetic resonance imaging (mpMRI) scans, reflecting varying biological properties. Their heterogeneous shape, extent, and location are some of the factors that make these tumors difficult to resect, and in some cases inoperable. The amount of resected tumor is a factor also considered in longitudinal scans, when evaluating the apparent tumor for potential diagnosis of progression. Furthermore, there is mounting evidence that accurate segmentation of the various tumor sub-regions can offer the basis for quantitative image analysis towards prediction of patient overall survival. This study assesses the state-of-the-art machine learning (ML) methods used for brain tumor image analysis in mpMRI scans, during the last seven instances of the International Brain Tumor Segmentation (BraTS) challenge, i.e., 2012-2018. Specifically, we focus on i) evaluating segmentations of the various glioma sub-regions in pre-operative mpMRI scans, ii) assessing potential tumor progression by virtue of longitudinal growth of tumor sub-regions, beyond use of the RECIST/RANO criteria, and iii) predicting the overall survival from pre-operative mpMRI scans of patients that underwent gross total resection. Finally, we investigate the challenge of identifying the best ML algorithms for each of these tasks, considering that apart from being diverse on each instance of the challenge, the multi-institutional mpMRI BraTS dataset has also been a continuously evolving/growing dataset.},\n bibtype = {article},\n author = {Bakas, Spyridon and Reyes, Mauricio and Jakab, Andras and Bauer, Stefan and Rempfler, Markus and Crimi, Alessandro and Shinohara, Russell Takeshi and Berger, Christoph and Ha, Sung Min and Rozycki, Martin and Prastawa, Marcel and Alberts, Esther and Lipkova, Jana and Freymann, John and Kirby, Justin and Bilello, Michel and Fathallah-Shaykh, Hassan and Wiest, Roland and Kirschke, Jan and Wiestler, Benedikt and Colen, Rivka and Kotrotsou, Aikaterini and Lamontagne, Pamela and Marcus, Daniel and Milchenko, Mikhail and Nazeri, Arash and Weber, Marc-Andre and Mahajan, Abhishek and Baid, Ujjwal and Gerstner, Elizabeth and Kwon, Dongjin and Acharya, Gagan and Agarwal, Manu and Alam, Mahbubul and Albiol, Alberto and Albiol, Antonio and Albiol, Francisco J. and Alex, Varghese and Allinson, Nigel and Amorim, Pedro H. A. and Amrutkar, Abhijit and Anand, Ganesh and Andermatt, Simon and Arbel, Tal and Arbelaez, Pablo and Avery, Aaron and Azmat, Muneeza and B., Pranjal and Bai, W and Banerjee, Subhashis and Barth, Bill and Batchelder, Thomas and Batmanghelich, Kayhan and Battistella, Enzo and Beers, Andrew and Belyaev, Mikhail and Bendszus, Martin and Benson, Eze and Bernal, Jose and Bharath, Halandur Nagaraja and Biros, George and Bisdas, Sotirios and Brown, James and Cabezas, Mariano and Cao, Shilei and Cardoso, Jorge M. and Carver, Eric N and Casamitjana, Adrià and Castillo, Laura Silvana and Catà, Marcel and Cattin, Philippe and Cerigues, Albert and Chagas, Vinicius S. and Chandra, Siddhartha and Chang, Yi-Ju and Chang, Shiyu and Chang, Ken and Chazalon, Joseph and Chen, Shengcong and Chen, Wei and Chen, Jefferson W and Chen, Zhaolin and Cheng, Kun and Choudhury, Ahana Roy and Chylla, Roger and Clérigues, Albert and Colleman, Steven and Colmeiro, Ramiro German Rodriguez and Combalia, Marc and Costa, Anthony and Cui, Xiaomeng and Dai, Zhenzhen and Dai, Lutao and Daza, Laura Alexandra and Deutsch, Eric and Ding, Changxing and Dong, Chao and Dong, Shidu and Dudzik, Wojciech and Eaton-Rosen, Zach and Egan, Gary and Escudero, Guilherme and Estienne, Théo and Everson, Richard and Fabrizio, Jonathan and Fan, Yong and Fang, Longwei and Feng, Xue and Ferrante, Enzo and Fidon, Lucas and Fischer, Martin and French, Andrew P. and Fridman, Naomi and Fu, Huan and Fuentes, David and Gao, Yaozong and Gates, Evan and Gering, David and Gholami, Amir and Gierke, Willi and Glocker, Ben and Gong, Mingming and González-Villá, Sandra and Grosges, T. and Guan, Yuanfang and Guo, Sheng and Gupta, Sudeep and Han, Woo-Sup and Han, Il Song and Harmuth, Konstantin and He, Huiguang and Hernández-Sabaté, Aura and Herrmann, Evelyn and Himthani, Naveen and Hsu, Winston and Hsu, Cheyu and Hu, Xiaojun and Hu, Xiaobin and Hu, Yan and Hu, Yifan and Hua, Rui and Huang, Teng-Yi and Huang, Weilin and Van Huffel, Sabine and Huo, Quan and HV, Vivek and Iftekharuddin, Khan M. and Isensee, Fabian and Islam, Mobarakol and Jackson, Aaron S. and Jambawalikar, Sachin R. and Jesson, Andrew and Jian, Weijian and Jin, Peter and Jose, V Jeya Maria and Jungo, Alain and Kainz, B and Kamnitsas, Konstantinos and Kao, Po-Yu and Karnawat, Ayush and Kellermeier, Thomas and Kermi, Adel and Keutzer, Kurt and Khadir, Mohamed Tarek and Khened, Mahendra and Kickingereder, Philipp and Kim, Geena and King, Nik and Knapp, Haley and Knecht, Urspeter and Kohli, Lisa and Kong, Deren and Kong, Xiangmao and Koppers, Simon and Kori, Avinash and Krishnamurthi, Ganapathy and Krivov, Egor and Kumar, Piyush and Kushibar, Kaisar and Lachinov, Dmitrii and Lambrou, Tryphon and Lee, Joon and Lee, Chengen and Lee, Yuehchou and Lee, M and Lefkovits, Szidonia and Lefkovits, Laszlo and Levitt, James and Li, Tengfei and Li, Hongwei and Li, Wenqi and Li, Hongyang and Li, Xiaochuan and Li, Yuexiang and Li, Heng and Li, Zhenye and Li, Xiaoyu and Li, Zeju and Li, XiaoGang and Li, Wenqi and Lin, Zheng-Shen and Lin, Fengming and Lio, Pietro and Liu, Chang and Liu, Boqiang and Liu, Xiang and Liu, Mingyuan and Liu, Ju and Liu, Luyan and Llado, Xavier and Lopez, Marc Moreno and Lorenzo, Pablo Ribalta and Lu, Zhentai and Luo, Lin and Luo, Zhigang and Ma, Jun and Ma, Kai and Mackie, Thomas and Madabushi, Anant and Mahmoudi, Issam and Maier-Hein, Klaus H. and Maji, Pradipta and Mammen, CP and Mang, Andreas and Manjunath, B. S. and Marcinkiewicz, Michal and McDonagh, S and McKenna, Stephen and McKinley, Richard and Mehl, Miriam and Mehta, Sachin and Mehta, Raghav and Meier, Raphael and Meinel, Christoph and Merhof, Dorit and Meyer, Craig and Miller, Robert and Mitra, Sushmita and Moiyadi, Aliasgar and Molina-Garcia, David and Monteiro, Miguel A. B. and Mrukwa, Grzegorz and Myronenko, Andriy and Nalepa, Jakub and Ngo, Thuyen and Nie, Dong and Ning, Holly and Niu, Chen and Nuechterlein, Nicholas K and Oermann, Eric and Oliveira, Arlindo and Oliveira, Diego D. C. and Oliver, Arnau and Osman, Alexander F. I. and Ou, Yu-Nian and Ourselin, Sebastien and Paragios, Nikos and Park, Moo Sung and Paschke, Brad and Pauloski, J. Gregory and Pawar, Kamlesh and Pawlowski, Nick and Pei, Linmin and Peng, Suting and Pereira, Silvio M. and Perez-Beteta, Julian and Perez-Garcia, Victor M. and Pezold, Simon and Pham, Bao and Phophalia, Ashish and Piella, Gemma and Pillai, G. N. and Piraud, Marie and Pisov, Maxim and Popli, Anmol and Pound, Michael P. and Pourreza, Reza and Prasanna, Prateek and Prkovska, Vesna and Pridmore, Tony P. and Puch, Santi and Puybareau, Élodie and Qian, Buyue and Qiao, Xu and Rajchl, Martin and Rane, Swapnil and Rebsamen, Michael and Ren, Hongliang and Ren, Xuhua and Revanuru, Karthik and Rezaei, Mina and Rippel, Oliver and Rivera, Luis Carlos and Robert, Charlotte and Rosen, Bruce and Rueckert, Daniel and Safwan, Mohammed and Salem, Mostafa and Salvi, Joaquim and Sanchez, Irina and Sánchez, Irina and Santos, Heitor M. and Sartor, Emmett and Schellingerhout, Dawid and Scheufele, Klaudius and Scott, Matthew R. and Scussel, Artur A. and Sedlar, Sara and Serrano-Rubio, Juan Pablo and Shah, N. Jon and Shah, Nameetha and Shaikh, Mazhar and Shankar, B. Uma and Shboul, Zeina and Shen, Haipeng and Shen, Dinggang and Shen, Linlin and Shen, Haocheng and Shenoy, Varun and Shi, Feng and Shin, Hyung Eun and Shu, Hai and Sima, Diana and Sinclair, M and Smedby, Orjan and Snyder, James M. and Soltaninejad, Mohammadreza and Song, Guidong and Soni, Mehul and Stawiaski, Jean and Subramanian, Shashank and Sun, Li and Sun, Roger and Sun, Jiawei and Sun, Kay and Sun, Yu and Sun, Guoxia and Sun, Shuang and Suter, Yannick R and Szilagyi, Laszlo and Talbar, Sanjay and Tao, Dacheng and Tao, Dacheng and Teng, Zhongzhao and Thakur, Siddhesh and Thakur, Meenakshi H and Tharakan, Sameer and Tiwari, Pallavi and Tochon, Guillaume and Tran, Tuan and Tsai, Yuhsiang M. and Tseng, Kuan-Lun and Tuan, Tran Anh and Turlapov, Vadim and Tustison, Nicholas and Vakalopoulou, Maria and Valverde, Sergi and Vanguri, Rami and Vasiliev, Evgeny and Ventura, Jonathan and Vera, Luis and Vercauteren, Tom and Verrastro, C. A. and Vidyaratne, Lasitha and Vilaplana, Veronica and Vivekanandan, Ajeet and Wang, Guotai and Wang, Qian and Wang, Chiatse J. and Wang, Weichung and Wang, Duo and Wang, Ruixuan and Wang, Yuanyuan and Wang, Chunliang and Wang, Guotai and Wen, Ning and Wen, Xin and Weninger, Leon and Wick, Wolfgang and Wu, Shaocheng and Wu, Qiang and Wu, Yihong and Xia, Yong and Xu, Yanwu and Xu, Xiaowen and Xu, Peiyuan and Yang, Tsai-Ling and Yang, Xiaoping and Yang, Hao-Yu and Yang, Junlin and Yang, Haojin and Yang, Guang and Yao, Hongdou and Ye, Xujiong and Yin, Changchang and Young-Moxon, Brett and Yu, Jinhua and Yue, Xiangyu and Zhang, Songtao and Zhang, Angela and Zhang, Kun and Zhang, Xuejie and Zhang, Lichi and Zhang, Xiaoyue and Zhang, Yazhuo and Zhang, Lei and Zhang, Jianguo and Zhang, Xiang and Zhang, Tianhao and Zhao, Sicheng and Zhao, Yu and Zhao, Xiaomei and Zhao, Liang and Zheng, Yefeng and Zhong, Liming and Zhou, Chenhong and Zhou, Xiaobing and Zhou, Fan and Zhu, Hongtu and Zhu, Jin and Zhuge, Ying and Zong, Weiwei and Kalpathy-Cramer, Jayashree and Farahani, Keyvan and Davatzikos, Christos and van Leemput, Koen and Menze, Bjoern},\n journal = {arXiv preprint arXiv:1811.02629}\n}
\n
\n\n\n
\n Gliomas are the most common primary brain malignancies, with different degrees of aggressiveness, variable prognosis and various heterogeneous histologic sub-regions, i.e., peritumoral edematous/invaded tissue, necrotic core, active and non-enhancing core. This intrinsic heterogeneity is also portrayed in their radio-phenotype, as their sub-regions are depicted by varying intensity profiles disseminated across multi-parametric magnetic resonance imaging (mpMRI) scans, reflecting varying biological properties. Their heterogeneous shape, extent, and location are some of the factors that make these tumors difficult to resect, and in some cases inoperable. The amount of resected tumor is a factor also considered in longitudinal scans, when evaluating the apparent tumor for potential diagnosis of progression. Furthermore, there is mounting evidence that accurate segmentation of the various tumor sub-regions can offer the basis for quantitative image analysis towards prediction of patient overall survival. This study assesses the state-of-the-art machine learning (ML) methods used for brain tumor image analysis in mpMRI scans, during the last seven instances of the International Brain Tumor Segmentation (BraTS) challenge, i.e., 2012-2018. Specifically, we focus on i) evaluating segmentations of the various glioma sub-regions in pre-operative mpMRI scans, ii) assessing potential tumor progression by virtue of longitudinal growth of tumor sub-regions, beyond use of the RECIST/RANO criteria, and iii) predicting the overall survival from pre-operative mpMRI scans of patients that underwent gross total resection. Finally, we investigate the challenge of identifying the best ML algorithms for each of these tasks, considering that apart from being diverse on each instance of the challenge, the multi-institutional mpMRI BraTS dataset has also been a continuously evolving/growing dataset.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n MRI Brain Tumor Segmentation and Patient Survival Prediction Using Random Forests and Fully Convolutional Networks.\n \n \n \n \n\n\n \n Soltaninejad, M.; Zhang, L.; Lambrou, T.; Yang, G.; Allinson, N.; and Ye, X.\n\n\n \n\n\n\n In Medical Image Computing and Computer Assisted Intervention MICCAI 2017 Brainlesion Workshop, volume 10670 LNCS, pages 204-215, 2018. Springer, Cham\n \n\n\n\n
\n\n\n\n \n \n \"MRIPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {MRI Brain Tumor Segmentation and Patient Survival Prediction Using Random Forests and Fully Convolutional Networks},\n type = {inproceedings},\n year = {2018},\n keywords = {Brain tumor segmentation,Deep learning,Fully convolutional networks,MRI,Random forest,Texton},\n pages = {204-215},\n volume = {10670 LNCS},\n publisher = {Springer, Cham},\n id = {5543dc80-9515-3709-be80-8f3c460dc9b0},\n created = {2024-01-13T08:14:13.837Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:16:55.699Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n source_type = {CONF},\n private_publication = {false},\n abstract = {In this paper, we propose a learning based method for automated segmentation of brain tumor in multimodal MRI images, which incorporates two sets of machine-learned and hand-crafted features. Fully convolutional networks (FCN) forms the machine-learned features and texton based histograms are considered as hand-crafted features. Random forest (RF) is used to classify the MRI image voxels into normal brain tissues and different parts of tumors. The volumetric features from the segmented tumor tissues and patient age applying to an RF is used to predict the survival time. The method was evaluated on MICCAI-BRATS 2017 challenge dataset. The mean Dice overlap measures for segmentation of validation dataset are 0.86, 0.78 and 0.66 for whole tumor, core and enhancing tumor, respectively. The validation Hausdorff values are 7.61, 8.70 and 3.76. For the survival prediction task, the classification accuracy, pairwise mean square error and Spearman rank are 0.485, 198749 and 0.334, respectively.},\n bibtype = {inproceedings},\n author = {Soltaninejad, Mohammadreza and Zhang, Lei and Lambrou, Tryphon and Yang, Guang and Allinson, Nigel and Ye, Xujiong},\n doi = {10.1007/978-3-319-75238-9_18},\n booktitle = {Medical Image Computing and Computer Assisted Intervention MICCAI 2017 Brainlesion Workshop}\n}
\n
\n\n\n
\n In this paper, we propose a learning based method for automated segmentation of brain tumor in multimodal MRI images, which incorporates two sets of machine-learned and hand-crafted features. Fully convolutional networks (FCN) forms the machine-learned features and texton based histograms are considered as hand-crafted features. Random forest (RF) is used to classify the MRI image voxels into normal brain tissues and different parts of tumors. The volumetric features from the segmented tumor tissues and patient age applying to an RF is used to predict the survival time. The method was evaluated on MICCAI-BRATS 2017 challenge dataset. The mean Dice overlap measures for segmentation of validation dataset are 0.86, 0.78 and 0.66 for whole tumor, core and enhancing tumor, respectively. The validation Hausdorff values are 7.61, 8.70 and 3.76. For the survival prediction task, the classification accuracy, pairwise mean square error and Spearman rank are 0.485, 198749 and 0.334, respectively.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Atrial Fibrosis Quantification Based on Maximum Likelihood Estimator of Multivariate Images.\n \n \n \n \n\n\n \n Wu, F.; Yang, G.; Li, L.; Xu, L.; Wong, T.; Mohiaddin, R.; Firmin, D.; Keegan, J.; Zhuang, X.; Yang, G.; Wong, T.; Mohiaddin, R.; Firmin, D.; Keegan, J.; Xu, L.; and Zhuang, X.\n\n\n \n\n\n\n In Medical Image Computing and Computer Assisted Intervention (MICCAI 2018), volume 11073 LNCS, pages 604-612, 2018. Springer International Publishing\n \n\n\n\n
\n\n\n\n \n \n \"AtrialPaper\n  \n \n \n \"AtrialWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Atrial Fibrosis Quantification Based on Maximum Likelihood Estimator of Multivariate Images},\n type = {inproceedings},\n year = {2018},\n pages = {604-612},\n volume = {11073 LNCS},\n websites = {http://dx.doi.org/10.1007/978-3-030-00937-3_69},\n publisher = {Springer International Publishing},\n id = {047bb17f-bf09-38c3-be4f-702aaff8adfa},\n created = {2024-01-13T08:14:14.165Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:18:35.734Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n source_type = {CONF},\n private_publication = {false},\n abstract = {We present a fully-automated segmentation and quantification of the left atrial (LA) fibrosis and scars combining two cardiac MRIs, one is the target late gadolinium-enhanced (LGE) image, and the other is an anatomical MRI from the same acquisition session. We formulate the joint distribution of images using a multivariate mixture model (MvMM), and employ the maximum likelihood estimator (MLE) for texture classification of the images simultaneously. The MvMM can also embed transformations assigned to the images to correct the misregistration. The iterated conditional mode algorithm is adopted for optimization. This method first extracts the anatomical shape of the LA, and then estimates a prior probability map. It projects the resulting segmentation onto the LA surface, for quantification and analysis of scarring. We applied the proposed method to 36 clinical data sets and obtained promising results (Accuracy: 0.809±150, Dice: 0.556±187). We compared the method with the conventional algorithms and showed an evidently and statistically better performance (p < 0.03).},\n bibtype = {inproceedings},\n author = {Wu, Fuping and Yang, Guang and Li, Lei and Xu, Lingchao and Wong, Tom and Mohiaddin, Raad and Firmin, David and Keegan, Jennifer and Zhuang, Xiahai and Yang, Guang and Wong, Tom and Mohiaddin, Raad and Firmin, David and Keegan, Jennifer and Xu, Lingchao and Zhuang, Xiahai},\n doi = {10.1007/978-3-030-00937-3_69},\n booktitle = {Medical Image Computing and Computer Assisted Intervention (MICCAI 2018)}\n}
\n
\n\n\n
\n We present a fully-automated segmentation and quantification of the left atrial (LA) fibrosis and scars combining two cardiac MRIs, one is the target late gadolinium-enhanced (LGE) image, and the other is an anatomical MRI from the same acquisition session. We formulate the joint distribution of images using a multivariate mixture model (MvMM), and employ the maximum likelihood estimator (MLE) for texture classification of the images simultaneously. The MvMM can also embed transformations assigned to the images to correct the misregistration. The iterated conditional mode algorithm is adopted for optimization. This method first extracts the anatomical shape of the LA, and then estimates a prior probability map. It projects the resulting segmentation onto the LA surface, for quantification and analysis of scarring. We applied the proposed method to 36 clinical data sets and obtained promising results (Accuracy: 0.809±150, Dice: 0.556±187). We compared the method with the conventional algorithms and showed an evidently and statistically better performance (p < 0.03).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Statistical Atlases and Computational Models of the Heart. ACDC and MMWHS Challenges: 8th International Workshop, STACOM 2017, Held in Conjunction with MICCAI 2017, Quebec City, Canada, September 10-14, 2017, Revised Selected Papers.\n \n \n \n\n\n \n Pop, M.; Sermesant, M.; Jodoin, P.; Lalande, A.; Zhuang, X.; Yang, G.; Young, A.; and Bernard, O.\n\n\n \n\n\n\n Volume 10663 Springer, 2018.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{\n title = {Statistical Atlases and Computational Models of the Heart. ACDC and MMWHS Challenges: 8th International Workshop, STACOM 2017, Held in Conjunction with MICCAI 2017, Quebec City, Canada, September 10-14, 2017, Revised Selected Papers},\n type = {book},\n year = {2018},\n volume = {10663},\n publisher = {Springer},\n id = {a3ca6b9e-07f4-39df-a4bc-727dfcb06cb8},\n created = {2024-01-13T08:14:15.254Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.525Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n source_type = {BOOK},\n private_publication = {false},\n bibtype = {book},\n author = {Pop, Mihaela and Sermesant, Maxime and Jodoin, Pierre-Marc and Lalande, Alain and Zhuang, Xiahai and Yang, Guang and Young, Alistair and Bernard, Olivier}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Deep Poincaré Map: A Novel Approach for Left Ventricle Segmentation.\n \n \n \n \n\n\n \n Mo, Y.; Liu, F.; McIlwraith, D.; Yang, G.; Zhang, J.; He, T.; and Guo, Y.\n\n\n \n\n\n\n Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 11073 LNCS: 561-568. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n \n \"TheWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {The Deep Poincaré Map: A Novel Approach for Left Ventricle Segmentation},\n type = {article},\n year = {2018},\n pages = {561-568},\n volume = {11073 LNCS},\n websites = {http://dx.doi.org/10.1007/978-3-030-00937-3_64},\n publisher = {Springer International Publishing},\n id = {4e36abd3-0539-3544-b7d8-88b4b49bc6e8},\n created = {2024-01-13T08:14:17.035Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:15:33.150Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Precise segmentation of the left ventricle (LV) within cardiac MRI images is a prerequisite for the quantitative measurement of heart function. However, this task is challenging due to the limited availability of labeled data and motion artifacts from cardiac imaging. In this work, we present an iterative segmentation algorithm for LV delineation. By coupling deep learning with a novel dynamic-based labeling scheme, we present a new methodology where a policy model is learned to guide an agent to travel over the image, tracing out a boundary of the ROI – using the magnitude difference of the Poincaré map as a stopping criterion. Our method is evaluated on two datasets, namely the Sunnybrook Cardiac Dataset (SCD) and data from the STACOM 2011 LV segmentation challenge. Our method outperforms the previous research over many metrics. In order to demonstrate the transferability of our method we present encouraging results over the STACOM 2011 data, when using a model trained on the SCD dataset.},\n bibtype = {article},\n author = {Mo, Yuanhan and Liu, Fangde and McIlwraith, Douglas and Yang, Guang and Zhang, Jingqing and He, Taigang and Guo, Yike},\n doi = {10.1007/978-3-030-00937-3_64},\n journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}\n}
\n
\n\n\n
\n Precise segmentation of the left ventricle (LV) within cardiac MRI images is a prerequisite for the quantitative measurement of heart function. However, this task is challenging due to the limited availability of labeled data and motion artifacts from cardiac imaging. In this work, we present an iterative segmentation algorithm for LV delineation. By coupling deep learning with a novel dynamic-based labeling scheme, we present a new methodology where a policy model is learned to guide an agent to travel over the image, tracing out a boundary of the ROI – using the magnitude difference of the Poincaré map as a stopping criterion. Our method is evaluated on two datasets, namely the Sunnybrook Cardiac Dataset (SCD) and data from the STACOM 2011 LV segmentation challenge. Our method outperforms the previous research over many metrics. In order to demonstrate the transferability of our method we present encouraging results over the STACOM 2011 data, when using a model trained on the SCD dataset.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (9)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Tissue type mapping of gliomas using multimodal MRI.\n \n \n \n\n\n \n Raschke, F.; Barrick, T., R.; Yang, G.; Jones, T., L.; Ye, X.; and Howe, F., A.\n\n\n \n\n\n\n In International Society for Magnetic Resonance in Medicine (ISMRM) 21st Annual Meeting, 2017. International Society for Magnetic Resonance in Medicine\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Tissue type mapping of gliomas using multimodal MRI},\n type = {inproceedings},\n year = {2017},\n publisher = {International Society for Magnetic Resonance in Medicine},\n id = {597fc93e-69c4-3322-b310-4277d8d4307a},\n created = {2024-01-13T05:46:21.195Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.553Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {raschke_tissue_2017-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Raschke, Felix and Barrick, Thomas R and Yang, Guang and Jones, Timothy Lloyd and Ye, Xujiong and Howe, Franklyn A},\n booktitle = {International Society for Magnetic Resonance in Medicine (ISMRM) 21st Annual Meeting}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Brain Tumor Detection and Segmentation Using U-Net Based Fully Convolutional Networks.\n \n \n \n \n\n\n \n Dong, H.; Yang, G.; Liu, F.; Mo, Y.; and Guo, Y.\n\n\n \n\n\n\n In Annual Conference on Medical Image Understanding and Analysis, Communications in Computer and Information Science, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"AutomaticPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Automatic Brain Tumor Detection and Segmentation Using U-Net Based Fully Convolutional Networks},\n type = {inproceedings},\n year = {2017},\n id = {d2730d9b-6b5d-3d22-8554-57c96ca9fe73},\n created = {2024-01-13T05:46:21.741Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:19:06.029Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {dong_automatic_2017},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Dong, Hao and Yang, Guang and Liu, Fangde and Mo, Yuanhan and Guo, Yike},\n booktitle = {Annual Conference on Medical Image Understanding and Analysis, Communications in Computer and Information Science}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Segmenting Atrial Fibrosis from Late Gadolinium-Enhanced Cardiac MRI by Deep-Learned Features with Stacked Sparse Auto-Encoders.\n \n \n \n \n\n\n \n Yang, G.; Zhuang, X.; Khan, H.; Haldar, S.; Nyktari, E.; Ye, X.; Slabaugh, G.; Wong, T.; Mohiaddin, R.; Keegan, J.; and others\n\n\n \n\n\n\n In Annual Conference on Medical Image Understanding and Analysis, Communications in Computer and Information Science, pages 195-206, 2017. Springer, Cham\n \n\n\n\n
\n\n\n\n \n \n \"SegmentingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Segmenting Atrial Fibrosis from Late Gadolinium-Enhanced Cardiac MRI by Deep-Learned Features with Stacked Sparse Auto-Encoders},\n type = {inproceedings},\n year = {2017},\n pages = {195-206},\n publisher = {Springer, Cham},\n id = {8b588060-9254-34df-80fb-2aeb540a6df2},\n created = {2024-01-13T05:46:23.399Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:21:33.380Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_segmenting_2017},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yang, Guang and Zhuang, Xiahai and Khan, Habib and Haldar, Shouvik and Nyktari, Eva and Ye, Xujiong and Slabaugh, Greg and Wong, Tom and Mohiaddin, Raad and Keegan, Jennifer and others, undefined},\n booktitle = {Annual Conference on Medical Image Understanding and Analysis, Communications in Computer and Information Science}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep De-Aliasing for Fast Compressive Sensing MRI.\n \n \n \n \n\n\n \n Yu, S.; Dong, H.; Yang, G.; Slabaugh, G.; Dragotti, P., L.; Ye, X.; Liu, F.; Arridge, S.; Keegan, J.; Firmin, D.; and others\n\n\n \n\n\n\n arXiv preprint arXiv:1705.07137. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"DeepPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Deep De-Aliasing for Fast Compressive Sensing MRI},\n type = {article},\n year = {2017},\n id = {dd5119d4-4d47-39be-8830-fb0381f61abf},\n created = {2024-01-13T05:46:23.581Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:21:37.674Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yu_deep_2017-1},\n source_type = {article},\n private_publication = {false},\n bibtype = {article},\n author = {Yu, Simiao and Dong, Hao and Yang, Guang and Slabaugh, Greg and Dragotti, Pier Luigi and Ye, Xujiong and Liu, Fangde and Arridge, Simon and Keegan, Jennifer and Firmin, David and others, undefined},\n journal = {arXiv preprint arXiv:1705.07137}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Differentiation of pre-ablation and post-ablation late gadolinium-enhanced cardiac MRI scans of longstanding persistent atrial fibrillation patients.\n \n \n \n \n\n\n \n Yang, G.; Zhuang, X.; Khan, H.; Haldar, S.; Nyktari, E.; Li, L.; Ye, X.; Slabaugh, G.; Wong, T.; Mohiaddin, R.; Keegan, J.; and Firmin, D.\n\n\n \n\n\n\n Medical Imaging 2017: Computer-Aided Diagnosis, 10134(March 2017): 101340O. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"DifferentiationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Differentiation of pre-ablation and post-ablation late gadolinium-enhanced cardiac MRI scans of longstanding persistent atrial fibrillation patients},\n type = {article},\n year = {2017},\n keywords = {atlas propagation,cardiac,classification,computer-aided diagnosis,image processing,local atlas ranking,machine learning,medical imaging analysis,mri,multi-scale patch,texture analysis,whole heart segmentation},\n pages = {101340O},\n volume = {10134},\n id = {999618e7-b66b-33cf-9b76-612eb3596622},\n created = {2024-01-13T06:15:55.452Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:23:16.093Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Late Gadolinium-Enhanced Cardiac MRI (LGE CMRI) is an emerging non-invasive technique to image and quantify preablation native and post-ablation atrial scarring. Previous studies have reported that enhanced image intensities of the atrial scarring in the LGE CMRI inversely correlate with the left atrial endocardial voltage invasively obtained by electro-anatomical mapping. However, the reported reproducibility of using LGE CMRI to identify and quantify atrial scarring is variable. This may be due to two reasons: first, delineation of the left atrium (LA) and pulmonary veins (PVs) anatomy generally relies on manual operation that is highly subjective, and this could substantially affect the subsequent atrial scarring segmentation; second, simple intensity based image features may not be good enough to detect subtle changes in atrial scarring. In this study, we hypothesized that texture analysis can provide reliable image features for the LGE CMRI images subject to accurate and objective delineation of the heart anatomy based on a fully-automated whole heart segmentation (WHS) method. We tested the extracted texture features to differentiate between pre-ablation and post-ablation LGE CMRI studies in longstanding persistent atrial fibrillation patients. These patients often have extensive native scarring and differentiation from post-ablation scarring can be difficult. Quantification results showed that our method is capable of solving this classification task, and we can envisage further deployment of this texture analysis based method for other clinical problems using LGE CMRI.},\n bibtype = {article},\n author = {Yang, Guang and Zhuang, Xiahai and Khan, Habib and Haldar, Shouvik and Nyktari, Eva and Li, Lei and Ye, Xujiong and Slabaugh, Greg and Wong, Tom and Mohiaddin, Raad and Keegan, Jennifer and Firmin, David},\n doi = {10.1117/12.2250910},\n journal = {Medical Imaging 2017: Computer-Aided Diagnosis},\n number = {March 2017}\n}
\n
\n\n\n
\n Late Gadolinium-Enhanced Cardiac MRI (LGE CMRI) is an emerging non-invasive technique to image and quantify preablation native and post-ablation atrial scarring. Previous studies have reported that enhanced image intensities of the atrial scarring in the LGE CMRI inversely correlate with the left atrial endocardial voltage invasively obtained by electro-anatomical mapping. However, the reported reproducibility of using LGE CMRI to identify and quantify atrial scarring is variable. This may be due to two reasons: first, delineation of the left atrium (LA) and pulmonary veins (PVs) anatomy generally relies on manual operation that is highly subjective, and this could substantially affect the subsequent atrial scarring segmentation; second, simple intensity based image features may not be good enough to detect subtle changes in atrial scarring. In this study, we hypothesized that texture analysis can provide reliable image features for the LGE CMRI images subject to accurate and objective delineation of the heart anatomy based on a fully-automated whole heart segmentation (WHS) method. We tested the extracted texture features to differentiate between pre-ablation and post-ablation LGE CMRI studies in longstanding persistent atrial fibrillation patients. These patients often have extensive native scarring and differentiation from post-ablation scarring can be difficult. Quantification results showed that our method is capable of solving this classification task, and we can envisage further deployment of this texture analysis based method for other clinical problems using LGE CMRI.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pairwise mixture model for unmixing partial volume effect in multi-voxel MR spectroscopy of brain tumour patients.\n \n \n \n \n\n\n \n Olliverre, N.; Asad, M.; Yang, G.; Howe, F.; and Slabaugh, G.\n\n\n \n\n\n\n Medical Imaging 2017: Computer-Aided Diagnosis, 10134(March 2017): 101341R. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"PairwisePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Pairwise mixture model for unmixing partial volume effect in multi-voxel MR spectroscopy of brain tumour patients},\n type = {article},\n year = {2017},\n pages = {101341R},\n volume = {10134},\n id = {61480253-c0d5-345c-add6-b08a6594d63f},\n created = {2024-01-13T06:15:55.534Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:23:23.603Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Multi-Voxel Magnetic Resonance Spectroscopy (MV-MRS) provides an important and insightful technique for the examination of the chemical composition of brain tissue, making it an attractive medical imaging modality for the examination of brain tumours. MRS, however, is affected by the issue of the Partial Volume Effect (PVE), where the signals of multiple tissue types can be found within a single voxel and provides an obstacle to the interpretation of the data. The PVE results from the low resolution achieved in MV-MRS images relating to the signal to noise ratio (SNR). To counteract PVE, this paper proposes a novel Pairwise Mixture Model (PMM), that extends a recently reported Signal Mixture Model (SMM) for representing the MV-MRS signal as normal, low or high grade tissue types. Inspired by Conditional Random Field (CRF) and its continuous variant the PMM incorporates the surrounding voxel neighbourhood into an optimisation problem, the solution of which provides an estimation to a set of coefficients. The values of the estimated coefficients represents the amount of each tissue type (normal, low or high) found within a voxel. These coefficients can then be visualised as a nosological rendering using a coloured grid representing the MV-MRS image overlaid on top of a structural image, such as a Magnetic Resonance Image (MRI). Experimental results show an accuracy of 92.69% in classifying patient tumours as either low or high grade compared against the histopathology for each patient. Compared to 91.96% achieved by the SMM, the proposed PMM method demonstrates the importance of incorporating spatial coherence into the estimation as well as its potential clinical usage.},\n bibtype = {article},\n author = {Olliverre, Nathan and Asad, Muhammad and Yang, Guang and Howe, Franklyn and Slabaugh, Gregory},\n doi = {10.1117/12.2255026},\n journal = {Medical Imaging 2017: Computer-Aided Diagnosis},\n number = {March 2017}\n}
\n
\n\n\n
\n Multi-Voxel Magnetic Resonance Spectroscopy (MV-MRS) provides an important and insightful technique for the examination of the chemical composition of brain tissue, making it an attractive medical imaging modality for the examination of brain tumours. MRS, however, is affected by the issue of the Partial Volume Effect (PVE), where the signals of multiple tissue types can be found within a single voxel and provides an obstacle to the interpretation of the data. The PVE results from the low resolution achieved in MV-MRS images relating to the signal to noise ratio (SNR). To counteract PVE, this paper proposes a novel Pairwise Mixture Model (PMM), that extends a recently reported Signal Mixture Model (SMM) for representing the MV-MRS signal as normal, low or high grade tissue types. Inspired by Conditional Random Field (CRF) and its continuous variant the PMM incorporates the surrounding voxel neighbourhood into an optimisation problem, the solution of which provides an estimation to a set of coefficients. The values of the estimated coefficients represents the amount of each tissue type (normal, low or high) found within a voxel. These coefficients can then be visualised as a nosological rendering using a coloured grid representing the MV-MRS image overlaid on top of a structural image, such as a Magnetic Resonance Image (MRI). Experimental results show an accuracy of 92.69% in classifying patient tumours as either low or high grade compared against the histopathology for each patient. Compared to 91.96% achieved by the SMM, the proposed PMM method demonstrates the importance of incorporating spatial coherence into the estimation as well as its potential clinical usage.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-atlas propagation based left atrium segmentation coupled with super-voxel based pulmonary veins delineation in late gadolinium-enhanced cardiac MRI.\n \n \n \n \n\n\n \n Yang, G.; Zhuang, X.; Khan, H.; Haldar, S.; Nyktari, E.; Li, L.; Ye, X.; Slabaugh, G.; Wong, T.; Mohiaddin, R.; Keegan, J.; and Firmin, D.\n\n\n \n\n\n\n Medical Imaging 2017: Image Processing, 10133(February 2017): 1013313. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Multi-atlasPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Multi-atlas propagation based left atrium segmentation coupled with super-voxel based pulmonary veins delineation in late gadolinium-enhanced cardiac MRI},\n type = {article},\n year = {2017},\n keywords = {atlas propagation,cardiac mri,image processing,local atlas ranking,medical imaging analysis,multi-scale patch,super-voxel,whole heart segmentation},\n pages = {1013313},\n volume = {10133},\n id = {2d2a17c9-fe0c-3b63-9ec6-9f348f593490},\n created = {2024-01-13T06:15:55.727Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:23:32.142Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Late Gadolinium-Enhanced Cardiac MRI (LGE CMRI) is a non-invasive technique, which has shown promise in detecting native and post-ablation atrial scarring. To visualize the scarring, a precise segmentation of the left atrium (LA) and pulmonary veins (PVs) anatomy is performed as a first step—usually from an ECG gated CMRI roadmap acquisition—and the enhanced scar regions from the LGE CMRI images are superimposed. The anatomy of the LA and PVs in particular is highly variable and manual segmentation is labor intensive and highly subjective. In this paper, we developed a multi-atlas propagation based whole heart segmentation (WHS) to delineate the LA and PVs from ECG gated CMRI roadmap scans. While this captures the anatomy of the atrium well, the PVs anatomy is less easily visualized. The process is therefore augmented by semi-automated manual strokes for PVs identification in the registered LGE CMRI data. This allows us to extract more accurate anatomy than the fully automated WHS. Both qualitative visualization and quantitative assessment with respect to manual segmented ground truth showed that our method is efficient and effective with an overall mean Dice score of 0.91.},\n bibtype = {article},\n author = {Yang, Guang and Zhuang, Xiahai and Khan, Habib and Haldar, Shouvik and Nyktari, Eva and Li, Lei and Ye, Xujiong and Slabaugh, Greg and Wong, Tom and Mohiaddin, Raad and Keegan, Jennifer and Firmin, David},\n doi = {10.1117/12.2250926},\n journal = {Medical Imaging 2017: Image Processing},\n number = {February 2017}\n}
\n
\n\n\n
\n Late Gadolinium-Enhanced Cardiac MRI (LGE CMRI) is a non-invasive technique, which has shown promise in detecting native and post-ablation atrial scarring. To visualize the scarring, a precise segmentation of the left atrium (LA) and pulmonary veins (PVs) anatomy is performed as a first step—usually from an ECG gated CMRI roadmap acquisition—and the enhanced scar regions from the LGE CMRI images are superimposed. The anatomy of the LA and PVs in particular is highly variable and manual segmentation is labor intensive and highly subjective. In this paper, we developed a multi-atlas propagation based whole heart segmentation (WHS) to delineate the LA and PVs from ECG gated CMRI roadmap scans. While this captures the anatomy of the atrium well, the PVs anatomy is less easily visualized. The process is therefore augmented by semi-automated manual strokes for PVs identification in the registered LGE CMRI data. This allows us to extract more accurate anatomy than the fully automated WHS. Both qualitative visualization and quantitative assessment with respect to manual segmented ground truth showed that our method is efficient and effective with an overall mean Dice score of 0.91.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A fully automatic deep learning method for atrial scarring segmentation from late gadolinium-enhanced MRI images.\n \n \n \n \n\n\n \n Yang, G.; Zhuang, X.; Khan, H.; Haldar, S.; Nyktari, E.; Ye, X.; Slabaugh, G.; Wong, T.; Mohiaddin, R.; Keegan, J.; and others\n\n\n \n\n\n\n In 2017 IEEE 14th International Symposium on Biomedical Imaging (ISBI), pages 844-848, 2017. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {A fully automatic deep learning method for atrial scarring segmentation from late gadolinium-enhanced MRI images},\n type = {inproceedings},\n year = {2017},\n pages = {844-848},\n publisher = {IEEE},\n id = {9a87a368-7180-3419-b806-efb88f1e353a},\n created = {2024-01-13T08:14:14.108Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:18:13.344Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_fully_2017-3},\n source_type = {CONF},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yang, Guang and Zhuang, Xiahai and Khan, Habib and Haldar, Shouvik and Nyktari, Eva and Ye, Xujiong and Slabaugh, Greg and Wong, Tom and Mohiaddin, Raad and Keegan, Jennifer and others, undefined},\n booktitle = {2017 IEEE 14th International Symposium on Biomedical Imaging (ISBI)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automated brain tumour detection and segmentation using superpixel-based extremely randomized trees in FLAIR MRI.\n \n \n \n \n\n\n \n Soltaninejad, M.; Yang, G.; Lambrou, T.; Allinson, N.; Jones, T., L.; Barrick, T., R.; Howe, F., A.; and Ye, X.\n\n\n \n\n\n\n International journal of computer assisted radiology and surgery, 12(2): 183-203. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"AutomatedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Automated brain tumour detection and segmentation using superpixel-based extremely randomized trees in FLAIR MRI},\n type = {article},\n year = {2017},\n pages = {183-203},\n volume = {12},\n publisher = {Springer},\n id = {77f7a6d3-5c86-3e8c-a2b8-1d22e6941468},\n created = {2024-01-13T08:14:16.715Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:18:01.632Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {soltaninejad_automated_2017-1},\n source_type = {JOUR},\n notes = {<b>From Duplicate 1 (<i>Automated brain tumour detection and segmentation using superpixel-based extremely randomized trees in FLAIR MRI</i> - Soltaninejad, Mohammadreza; Yang, Guang; Lambrou, Tryphon; Allinson, Nigel; Jones, Timothy L; Barrick, Thomas R; Howe, Franklyn A; Ye, Xujiong)<br/></b><br/><b>From Duplicate 1 (<i>Automated brain tumour detection and segmentation using superpixel-based extremely randomized trees in FLAIR MRI</i> - Soltaninejad, Mohammadreza; Yang, Guang; Lambrou, Tryphon; Allinson, Nigel; Jones, Timothy L; Barrick, Thomas R; Howe, Franklyn A; Ye, Xujiong)<br/></b><br/>Publisher: Springer<br/><br/><b>From Duplicate 2 (<i>Automated brain tumour detection and segmentation using superpixel-based extremely randomized trees in FLAIR MRI</i> - Soltaninejad, Mohammadreza; Yang, Guang; Lambrou, Tryphon; Allinson, Nigel; Jones, Timothy L; Barrick, Thomas R; Howe, Franklyn A; Ye, Xujiong)<br/></b><br/>ISBN: 1861-6410<br/>Publisher: Springer},\n private_publication = {false},\n bibtype = {article},\n author = {Soltaninejad, Mohammadreza and Yang, Guang and Lambrou, Tryphon and Allinson, Nigel and Jones, Timothy L and Barrick, Thomas R and Howe, Franklyn A and Ye, Xujiong},\n journal = {International journal of computer assisted radiology and surgery},\n number = {2}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Supervised Partial Volume Effect Unmixing for Brain Tumor Characterization using Multi-voxel MR Spectroscopic Imaging.\n \n \n \n \n\n\n \n Asad, M.; Yang, G.; and Slabaugh, G.\n\n\n \n\n\n\n In 2016 IEEE 13th International Symposium on Biomedical Imaging (ISBI), pages 436-439, 2016. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"SupervisedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Supervised Partial Volume Effect Unmixing for Brain Tumor Characterization using Multi-voxel MR Spectroscopic Imaging},\n type = {inproceedings},\n year = {2016},\n pages = {436-439},\n publisher = {IEEE},\n id = {390a35a7-71b1-3a11-a005-1cbaba0daaaf},\n created = {2024-01-13T05:46:21.664Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:19:03.745Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {asad_supervised_2016-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Asad, Muhammad and Yang, Guang and Slabaugh, Greg},\n booktitle = {2016 IEEE 13th International Symposium on Biomedical Imaging (ISBI)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Super-Resolved Enhancement of a Single Image and Its Application in Cardiac MRI.\n \n \n \n \n\n\n \n Yang, G.; Ye, X.; Slabaugh, G.; Keegan, J.; Mohiaddin, R.; and Firmin, D.\n\n\n \n\n\n\n In International Conference on Image and Signal Processing, pages 179-190, 2016. Springer International Publishing\n \n\n\n\n
\n\n\n\n \n \n \"Super-ResolvedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Super-Resolved Enhancement of a Single Image and Its Application in Cardiac MRI},\n type = {inproceedings},\n year = {2016},\n pages = {179-190},\n publisher = {Springer International Publishing},\n id = {ce32be44-7985-32ee-bfd6-0b708eea09ad},\n created = {2024-01-13T05:46:22.079Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:19:55.538Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_super-resolved_2016-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yang, Guang and Ye, Xujiong and Slabaugh, Greg and Keegan, Jennifer and Mohiaddin, Raad and Firmin, David},\n booktitle = {International Conference on Image and Signal Processing}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Single-Image Super-Resolution and Its Application in Cardiac MRI: A Feasibility Study.\n \n \n \n\n\n \n Yang, G.; Ye, X.; Slabaugh, G.; Keegan, J.; Mohiaddin, R.; and Firmin, D.\n\n\n \n\n\n\n In 2016 IEEE 13th International Symposium on Biomedical Imaging (ISBI), pages 1, 2016. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Single-Image Super-Resolution and Its Application in Cardiac MRI: A Feasibility Study},\n type = {inproceedings},\n year = {2016},\n pages = {1},\n publisher = {IEEE},\n id = {ac31e21e-a9e1-3845-a13c-832cb492a187},\n created = {2024-01-13T05:46:22.768Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.536Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_single-image_2016},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yang, Guang and Ye, Xujiong and Slabaugh, Greg and Keegan, Jennifer and Mohiaddin, Raad and Firmin, David},\n booktitle = {2016 IEEE 13th International Symposium on Biomedical Imaging (ISBI)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the averaging of cardiac diffusion tensor MRI data: The effect of distance function selection.\n \n \n \n \n\n\n \n Giannakidis, A.; Melkus, G.; Yang, G.; and Gullberg, G., T.\n\n\n \n\n\n\n Physics in Medicine and Biology, 61(21): 7765-7786. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {On the averaging of cardiac diffusion tensor MRI data: The effect of distance function selection},\n type = {article},\n year = {2016},\n keywords = {6D manifolds,diffusion tensor magnetic resonanance imaging,distance function, geodesics,myocardium microstructure,primary cardiomyocyte orientation,tensor averaging},\n pages = {7765-7786},\n volume = {61},\n publisher = {IOP Publishing},\n id = {d5451037-4a6e-3eb2-865f-536e0ca84da8},\n created = {2024-01-13T06:15:55.328Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:23:11.453Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Diffusion tensor magnetic resonance imaging (DT-MRI) allows a unique insight into the microstructure of highly-directional tissues. The selection of the most proper distance function for the space of diffusion tensors is crucial in enhancing the clinical application of this imaging modality. Both linear and nonlinear metrics have been proposed in the literature over the years. The debate on the most appropriate DT-MRI distance function is still ongoing. In this paper, we presented a framework to compare the Euclidean, affine-invariant Riemannian and log-Euclidean metrics using actual high-resolution DT-MRI rat heart data. We employed temporal averaging at the diffusion tensor level of three consecutive and identically-acquired DT-MRI datasets from each of five rat hearts as a means to rectify the background noise-induced loss of myocyte directional regularity. This procedure is applied here for the first time in the context of tensor distance function selection. When compared with previous studies that used a different concrete application to juxtapose the various DT-MRI distance functions, this work is unique in that it combined the following: (i) metrics were judged by quantitative - rather than qualitative - criteria, (ii) the comparison tools were non-biased, (iii) a longitudinal comparison operation was used on a same-voxel basis. The statistical analyses of the comparison showed that the three DT-MRI distance functions tend to provide equivalent results. Hence, we came to the conclusion that the tensor manifold for cardiac DT-MRI studies is a curved space of almost zero curvature. The signal to noise ratio dependence of the operations was investigated through simulations. Finally, the 'swelling effect' occurrence following Euclidean averaging was found to be too unimportant to be worth consideration.},\n bibtype = {article},\n author = {Giannakidis, Archontis and Melkus, Gerd and Yang, Guang and Gullberg, Grant T.},\n doi = {10.1088/0031-9155/61/21/7765},\n journal = {Physics in Medicine and Biology},\n number = {21}\n}
\n
\n\n\n
\n Diffusion tensor magnetic resonance imaging (DT-MRI) allows a unique insight into the microstructure of highly-directional tissues. The selection of the most proper distance function for the space of diffusion tensors is crucial in enhancing the clinical application of this imaging modality. Both linear and nonlinear metrics have been proposed in the literature over the years. The debate on the most appropriate DT-MRI distance function is still ongoing. In this paper, we presented a framework to compare the Euclidean, affine-invariant Riemannian and log-Euclidean metrics using actual high-resolution DT-MRI rat heart data. We employed temporal averaging at the diffusion tensor level of three consecutive and identically-acquired DT-MRI datasets from each of five rat hearts as a means to rectify the background noise-induced loss of myocyte directional regularity. This procedure is applied here for the first time in the context of tensor distance function selection. When compared with previous studies that used a different concrete application to juxtapose the various DT-MRI distance functions, this work is unique in that it combined the following: (i) metrics were judged by quantitative - rather than qualitative - criteria, (ii) the comparison tools were non-biased, (iii) a longitudinal comparison operation was used on a same-voxel basis. The statistical analyses of the comparison showed that the three DT-MRI distance functions tend to provide equivalent results. Hence, we came to the conclusion that the tensor manifold for cardiac DT-MRI studies is a curved space of almost zero curvature. The signal to noise ratio dependence of the operations was investigated through simulations. Finally, the 'swelling effect' occurrence following Euclidean averaging was found to be too unimportant to be worth consideration.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Morphometric model for discrimination between glioblastoma multiforme and solitary metastasis using three‐dimensional shape analysis.\n \n \n \n \n\n\n \n Yang, G.; Jones, T., L.; Howe, F., A.; and Barrick, T., R.\n\n\n \n\n\n\n Magnetic Resonance in Medicine, 75(6): 2505-2516. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"MorphometricPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Morphometric model for discrimination between glioblastoma multiforme and solitary metastasis using three‐dimensional shape analysis},\n type = {article},\n year = {2016},\n pages = {2505-2516},\n volume = {75},\n publisher = {Wiley},\n id = {96f118a6-529f-3e6e-af22-2e7531122ccf},\n created = {2024-01-13T08:14:13.932Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:16:51.360Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_morphometric_2016},\n source_type = {JOUR},\n notes = {<b>From Duplicate 2 (<i>Morphometric model for discrimination between glioblastoma multiforme and solitary metastasis using three‐dimensional shape analysis</i> - Yang, Guang; Jones, Timothy L; Howe, Franklyn A; Barrick, Thomas R)<br/></b><br/>Publisher: Wiley},\n private_publication = {false},\n bibtype = {article},\n author = {Yang, Guang and Jones, Timothy L and Howe, Franklyn A and Barrick, Thomas R},\n journal = {Magnetic Resonance in Medicine},\n number = {6}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Combined self-learning based single-image super-resolution and dual-tree complex wavelet transform denoising for medical images.\n \n \n \n \n\n\n \n Yang, G.; Ye, X.; Slabaugh, G.; Keegan, J.; Mohiaddin, R.; and Firmin, D.\n\n\n \n\n\n\n Medical Imaging 2016: Image Processing, 9784(March 2016): 97840L. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"CombinedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Combined self-learning based single-image super-resolution and dual-tree complex wavelet transform denoising for medical images},\n type = {article},\n year = {2016},\n keywords = {denoising,discrete wavelet transform,dual-tree,self-learning,sparse representation,super-resolution},\n pages = {97840L},\n volume = {9784},\n publisher = {International Society for Optics and Photonics},\n id = {4fb5240a-0878-3b41-b5f2-bd63c93a248b},\n created = {2024-01-13T08:14:16.745Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:18:06.532Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_combined_2016},\n source_type = {inproceedings},\n private_publication = {false},\n abstract = {In this paper, we propose a novel self-learning based single-image super-resolution (SR) method, which is coupled with dual-tree complex wavelet transform (DTCWT) based denoising to better recover high-resolution (HR) medical images. Unlike previous methods, this self-learning based SR approach enables us to reconstruct HR medical images from a single low-resolution (LR) image without extra training on HR image datasets in advance. The relationships between the given image and its scaled down versions are modeled using support vector regression with sparse coding and dictionary learning, without explicitly assuming reoccurrence or self-similarity across image scales. In addition, we perform DTCWT based denoising to initialize the HR images at each scale instead of simple bicubic interpolation. We evaluate our method on a variety of medical images. Both quantitative and qualitative results show that the proposed approach outperforms bicubic interpolation and state-of-the-art single-image SR methods while effectively removing noise.},\n bibtype = {article},\n author = {Yang, Guang and Ye, Xujiong and Slabaugh, Greg and Keegan, Jennifer and Mohiaddin, Raad and Firmin, David},\n doi = {10.1117/12.2207440},\n journal = {Medical Imaging 2016: Image Processing},\n number = {March 2016}\n}
\n
\n\n\n
\n In this paper, we propose a novel self-learning based single-image super-resolution (SR) method, which is coupled with dual-tree complex wavelet transform (DTCWT) based denoising to better recover high-resolution (HR) medical images. Unlike previous methods, this self-learning based SR approach enables us to reconstruct HR medical images from a single low-resolution (LR) image without extra training on HR image datasets in advance. The relationships between the given image and its scaled down versions are modeled using support vector regression with sparse coding and dictionary learning, without explicitly assuming reoccurrence or self-similarity across image scales. In addition, we perform DTCWT based denoising to initialize the HR images at each scale instead of simple bicubic interpolation. We evaluate our method on a variety of medical images. Both quantitative and qualitative results show that the proposed approach outperforms bicubic interpolation and state-of-the-art single-image SR methods while effectively removing noise.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Discrete Wavelet Transform-Based Whole-Spectral and Subspectral Analysis for Improved Brain Tumor Clustering Using Single Voxel MR Spectroscopy.\n \n \n \n \n\n\n \n Yang, G.; Nawaz, T.; Barrick, T., R.; Howe, F., A.; and Slabaugh, G.\n\n\n \n\n\n\n IEEE Transactions on Biomedical Engineering, 62(12): 2860-2866. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"DiscretePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Discrete Wavelet Transform-Based Whole-Spectral and Subspectral Analysis for Improved Brain Tumor Clustering Using Single Voxel MR Spectroscopy},\n type = {article},\n year = {2015},\n pages = {2860-2866},\n volume = {62},\n id = {9fe4fd12-c428-30ef-930a-63d44edbe026},\n created = {2024-01-13T05:46:23.981Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:22:04.957Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_discrete_2015},\n source_type = {article},\n notes = {<b>From Duplicate 1 (<i>Discrete Wavelet Transform-Based Whole-Spectral and Subspectral Analysis for Improved Brain Tumor Clustering Using Single Voxel MR Spectroscopy</i> - Yang, Guang; Nawaz, Tahir; Barrick, Thomas R; Howe, Franklyn A; Slabaugh, Greg)<br/></b><br/>Publisher: IEEE<br/><br/><b>From Duplicate 2 (<i>Discrete Wavelet Transform-Based Whole-Spectral and Subspectral Analysis for Improved Brain Tumor Clustering Using Single Voxel MR Spectroscopy</i> - Yang, Guang; Nawaz, Tahir; Barrick, Thomas R; Howe, Franklyn A; Slabaugh, Greg)<br/></b><br/>ISBN: 0018-9294<br/>Publisher: IEEE},\n private_publication = {false},\n bibtype = {article},\n author = {Yang, Guang and Nawaz, Tahir and Barrick, Thomas R and Howe, Franklyn A and Slabaugh, Greg},\n journal = {IEEE Transactions on Biomedical Engineering},\n number = {12}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An image analysis approach to MRI brain tumour grading.\n \n \n \n \n\n\n \n Soltaninejad, M.; Ye, X.; Yang, G.; Allinson, N.; Lambrou, T.; and others\n\n\n \n\n\n\n Oncology News, 9(6): 204-207. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {An image analysis approach to MRI brain tumour grading},\n type = {article},\n year = {2015},\n pages = {204-207},\n volume = {9},\n id = {1e78810c-0175-3c43-8be0-2b967daeb940},\n created = {2024-01-13T05:46:28.272Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:22:18.359Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {soltaninejad_image_2015-1},\n source_type = {article},\n notes = {<b>From Duplicate 1 (<i>An image analysis approach to MRI brain tumour grading</i> - Soltaninejad, Mohammadreza; Ye, Xujiong; Yang, Guang; Allinson, Nigel; Lambrou, Tryphon)<br/></b><br/>ISBN: 1751-4975<br/>Publisher: McDonnell Mackie Publishing<br/><br/><b>From Duplicate 2 (<i>An image analysis approach to MRI brain tumour grading</i> - Soltaninejad, Mohammadreza; Ye, Xujiong; Yang, Guang; Allinson, Nigel; Lambrou, Tryphon; others)<br/></b><br/>Publisher: McDonnell Mackie Publishing},\n private_publication = {false},\n bibtype = {article},\n author = {Soltaninejad, Mohammadreza and Ye, Xujiong and Yang, Guang and Allinson, Nigel and Lambrou, Tryphon and others, undefined},\n journal = {Oncology News},\n number = {6}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Analysing MRI Data to Determine Tumour Type.\n \n \n \n\n\n \n Yang, G.; Barrick, T., R.; Howe, F., A.; Jones, T., L.; and Howe, F., A.\n\n\n \n\n\n\n WO Patent WO/2015/040,434, 4 2015.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{\n title = {Analysing MRI Data to Determine Tumour Type},\n type = {book},\n year = {2015},\n month = {4},\n publisher = {WO Patent WO/2015/040,434},\n id = {72da55da-eef7-360e-9cd4-0ae62e4809c4},\n created = {2024-01-13T05:46:28.628Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.538Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_analysing_2015},\n source_type = {book},\n notes = {<b>From Duplicate 3 (<i>Analysing MRI Data to Determine Tumour Type</i> - Yang, Guang; Barrick, Thomas Richard; Howe, Franklyn Arron)<br/></b><br/>WO Patent WO/2015/079,235},\n private_publication = {false},\n bibtype = {book},\n author = {Yang, Guang and Barrick, Thomas Richard and Howe, Franklyn Arron and Jones, Timothy L and Howe, Franklyn Arron}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An image analysis approach to MRI brain tumour grading.\n \n \n \n \n\n\n \n Soltaninejad, M.; Ye, X.; Yang, G.; Allinson, N.; and Lambrou, T.\n\n\n \n\n\n\n Oncology News, 9(6): 204-207. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {An image analysis approach to MRI brain tumour grading},\n type = {article},\n year = {2015},\n pages = {204-207},\n volume = {9},\n id = {91be2cda-0964-32e9-b653-15be6374fb5c},\n created = {2024-01-13T12:15:09.562Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:15:51.107Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {article},\n author = {Soltaninejad, Mohammadreza and Ye, Xujiong and Yang, Guang and Allinson, Nigel and Lambrou, Tryphon},\n journal = {Oncology News},\n number = {6}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Brain tumor classification using the diffusion tensor image segmentation (D-SEG) technique.\n \n \n \n \n\n\n \n Jones, T., L.; Byrnes, T., J.; Yang, G.; Howe, F., A.; Bell, B., A.; and Barrick, T., R.\n\n\n \n\n\n\n Neuro-oncology, 17(3): 466-476. 8 2014.\n \n\n\n\n
\n\n\n\n \n \n \"BrainPaper\n  \n \n \n \"BrainWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Brain tumor classification using the diffusion tensor image segmentation (D-SEG) technique.},\n type = {article},\n year = {2014},\n keywords = {biomarker,brain tumor,certain cases of malignant,diagnosis and treatment strategy,diffusion tensor imaging,glioblastoma,glioma,may indicate the likely,newly identified brain tumors,segmentation,the imaging characteristics of,until recently},\n pages = {466-476},\n volume = {17},\n websites = {http://www.ncbi.nlm.nih.gov/pubmed/25121771},\n month = {8},\n publisher = {Oxford University Press},\n day = {13},\n id = {29268ea7-ea01-3ad4-88a8-01fce45468aa},\n created = {2023-10-23T08:58:00.363Z},\n accessed = {2015-02-24},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:18.771Z},\n read = {true},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {Jones2014},\n source_type = {article},\n folder_uuids = {61ef7dd2-8dff-4a2e-8438-3e6df5e48c11},\n private_publication = {false},\n abstract = {BACKGROUND: There is an increasing demand for noninvasive brain tumor biomarkers to guide surgery and subsequent oncotherapy. We present a novel whole-brain diffusion tensor imaging (DTI) segmentation (D-SEG) to delineate tumor volumes of interest (VOIs) for subsequent classification of tumor type. D-SEG uses isotropic (p) and anisotropic (q) components of the diffusion tensor to segment regions with similar diffusion characteristics. METHODS: DTI scans were acquired from 95 patients with low- and high-grade glioma, metastases, and meningioma and from 29 healthy subjects. D-SEG uses k-means clustering of the 2D (p,q) space to generate segments with different isotropic and anisotropic diffusion characteristics. RESULTS: Our results are visualized using a novel RGB color scheme incorporating p, q and T2-weighted information within each segment. The volumetric contribution of each segment to gray matter, white matter, and cerebrospinal fluid spaces was used to generate healthy tissue D-SEG spectra. Tumor VOIs were extracted using a semiautomated flood-filling technique and D-SEG spectra were computed within the VOI. Classification of tumor type using D-SEG spectra was performed using support vector machines. D-SEG was computationally fast and stable and delineated regions of healthy tissue from tumor and edema. D-SEG spectra were consistent for each tumor type, with constituent diffusion characteristics potentially reflecting regional differences in tissue microstructure. Support vector machines classified tumor type with an overall accuracy of 94.7%, providing better classification than previously reported. CONCLUSIONS: D-SEG presents a user-friendly, semiautomated biomarker that may provide a valuable adjunct in noninvasive brain tumor diagnosis and treatment planning.},\n bibtype = {article},\n author = {Jones, Timothy L and Byrnes, Tiernan J and Yang, Guang and Howe, Franklyn A and Bell, B Anthony and Barrick, Thomas R},\n doi = {10.1093/neuonc/nou159},\n journal = {Neuro-oncology},\n number = {3}\n}
\n
\n\n\n
\n BACKGROUND: There is an increasing demand for noninvasive brain tumor biomarkers to guide surgery and subsequent oncotherapy. We present a novel whole-brain diffusion tensor imaging (DTI) segmentation (D-SEG) to delineate tumor volumes of interest (VOIs) for subsequent classification of tumor type. D-SEG uses isotropic (p) and anisotropic (q) components of the diffusion tensor to segment regions with similar diffusion characteristics. METHODS: DTI scans were acquired from 95 patients with low- and high-grade glioma, metastases, and meningioma and from 29 healthy subjects. D-SEG uses k-means clustering of the 2D (p,q) space to generate segments with different isotropic and anisotropic diffusion characteristics. RESULTS: Our results are visualized using a novel RGB color scheme incorporating p, q and T2-weighted information within each segment. The volumetric contribution of each segment to gray matter, white matter, and cerebrospinal fluid spaces was used to generate healthy tissue D-SEG spectra. Tumor VOIs were extracted using a semiautomated flood-filling technique and D-SEG spectra were computed within the VOI. Classification of tumor type using D-SEG spectra was performed using support vector machines. D-SEG was computationally fast and stable and delineated regions of healthy tissue from tumor and edema. D-SEG spectra were consistent for each tumor type, with constituent diffusion characteristics potentially reflecting regional differences in tissue microstructure. Support vector machines classified tumor type with an overall accuracy of 94.7%, providing better classification than previously reported. CONCLUSIONS: D-SEG presents a user-friendly, semiautomated biomarker that may provide a valuable adjunct in noninvasive brain tumor diagnosis and treatment planning.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Classification of brain tumour 1 H MR spectra: Extracting features by metabolite quantification or nonlinear manifold learning?.\n \n \n \n \n\n\n \n Yang, G.; Raschke, F.; Barrick, T., R.; and Howe, F., A.\n\n\n \n\n\n\n In Biomedical Imaging (ISBI), 2014 IEEE 11th International Symposium on, pages 1039-1042, 2014. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"ClassificationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Classification of brain tumour 1 H MR spectra: Extracting features by metabolite quantification or nonlinear manifold learning?},\n type = {inproceedings},\n year = {2014},\n pages = {1039-1042},\n publisher = {IEEE},\n id = {d1d6f240-7d25-3f5f-b1db-56ec6c01c3c4},\n created = {2024-01-13T05:46:21.631Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:18:55.979Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_classification_2014},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yang, Guang and Raschke, Felix and Barrick, Thomas R and Howe, Franklyn A},\n booktitle = {Biomedical Imaging (ISBI), 2014 IEEE 11th International Symposium on}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Brain tumour grading in different MRI protocols using SVM on statistical features.\n \n \n \n \n\n\n \n Soltaninejad, M.; Ye, X.; Yang, G.; Allinson, N.; and Lambrou, T.\n\n\n \n\n\n\n Medical Image Understanding and Analysis,259-264. 2014.\n \n\n\n\n
\n\n\n\n \n \n \"BrainPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Brain tumour grading in different MRI protocols using SVM on statistical features},\n type = {article},\n year = {2014},\n keywords = {brain tumour grading,mri images,pattern,recognition,superpixel segmentation,svm classification},\n pages = {259-264},\n id = {e2fc6e0e-6fd8-35fa-91ad-026fb3ebfb30},\n created = {2024-01-13T06:15:55.682Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:23:06.791Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {In this paper a feasibility study of brain MRI data set classification, using ROIs which have been segmented either manually or throug h a superpixel based method in conjunction with statistical pattern recognition me thods is presented. In our study, 471 extracted ROIs from 21 Brain MRI datasets are u sed, in order to establish which features distinguish better between three grading c lasses. Thirty-eight statistical measurements were collected from the ROIs. We found by using the Leave-One-Out method that the combination of the features from th e 1 st and 2 nd order statistics, achieved high classification accuracy in pair-wise grading comparisons.},\n bibtype = {article},\n author = {Soltaninejad, M and Ye, X and Yang, G and Allinson, N and Lambrou, T},\n journal = {Medical Image Understanding and Analysis}\n}
\n
\n\n\n
\n In this paper a feasibility study of brain MRI data set classification, using ROIs which have been segmented either manually or throug h a superpixel based method in conjunction with statistical pattern recognition me thods is presented. In our study, 471 extracted ROIs from 21 Brain MRI datasets are u sed, in order to establish which features distinguish better between three grading c lasses. Thirty-eight statistical measurements were collected from the ROIs. We found by using the Leave-One-Out method that the combination of the features from th e 1 st and 2 nd order statistics, achieved high classification accuracy in pair-wise grading comparisons.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Discrimination between glioblastoma multiforme and solitary metastasis using morphological features derived from the p:q tensor decomposition of diffusion tensor imaging.\n \n \n \n \n\n\n \n Yang, G.; Jones, T., L.; Barrick, T., R.; and Howe, F., A.\n\n\n \n\n\n\n NMR in Biomedicine, 27(9): 1103-1111. 2014.\n \n\n\n\n
\n\n\n\n \n \n \"DiscriminationPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Discrimination between glioblastoma multiforme and solitary metastasis using morphological features derived from the p:q tensor decomposition of diffusion tensor imaging},\n type = {article},\n year = {2014},\n pages = {1103-1111},\n volume = {27},\n id = {4761084c-8e43-3e18-afe5-c023d85ca690},\n created = {2024-01-13T08:14:17.096Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:15:26.531Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_discrimination_2014},\n source_type = {JOUR},\n private_publication = {false},\n bibtype = {article},\n author = {Yang, Guang and Jones, Timothy L and Barrick, Thomas R and Howe, Franklyn A},\n journal = {NMR in Biomedicine},\n number = {9}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Nonlinear Laplacian Eigenmaps Dimension Reduction of in-vivo Magnetic Resonance Spectroscopic Imaging Analysis.\n \n \n \n\n\n \n Yang, G.; Raschke, F.; Barrick, T., R.; and Howe, F., A.\n\n\n \n\n\n\n In International Society for Magnetic Resonance in Medicine (ISMRM) 21st Annual Meeting, pages 1967, 2013. International Society for Magnetic Resonance in Medicine\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Nonlinear Laplacian Eigenmaps Dimension Reduction of in-vivo Magnetic Resonance Spectroscopic Imaging Analysis},\n type = {inproceedings},\n year = {2013},\n pages = {1967},\n publisher = {International Society for Magnetic Resonance in Medicine},\n id = {a887bd39-99e0-37d1-8cbc-ee56b666e1c9},\n created = {2024-01-13T05:46:23.121Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.863Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_nonlinear_2013},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yang, Guang and Raschke, Felix and Barrick, Thomas R and Howe, Franklyn A},\n booktitle = {International Society for Magnetic Resonance in Medicine (ISMRM) 21st Annual Meeting}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Numerical Methods for Coupled Reconstruction and Registration in Digital Breast Tomosynthesis.\n \n \n \n \n\n\n \n Yang, G.; Hipwell, J., H.; Hawkes, D., J.; and Arridge, S., R.\n\n\n \n\n\n\n The Annals of the BMVA, 2013(9): 1-38. 2013.\n \n\n\n\n
\n\n\n\n \n \n \"NumericalPaper\n  \n \n \n \"NumericalWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Numerical Methods for Coupled Reconstruction and Registration in Digital Breast Tomosynthesis},\n type = {article},\n year = {2013},\n pages = {1-38},\n volume = {2013},\n websites = {http://arxiv.org/abs/1307.6008},\n id = {aee8a00a-ab92-3bbc-aac4-eb64314c4361},\n created = {2024-01-13T06:15:55.728Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:14:18.166Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Digital Breast Tomosynthesis (DBT) provides an insight into the fine details of normal fibroglandular tissues and abnormal lesions by reconstructing a pseudo-3D image of the breast. In this respect, DBT overcomes a major limitation of conventional X-ray mammography by reducing the confounding effects caused by the superposition of breast tissue. In a breast cancer screening or diagnostic context, a radiologist is interested in detecting change, which might be indicative of malignant disease. To help automate this task image registration is required to establish spatial correspondence between time points. Typically, images, such as MRI or CT, are first reconstructed and then registered. This approach can be effective if reconstructing using a complete set of data. However, for ill-posed, limited-angle problems such as DBT, estimating the deformation is complicated by the significant artefacts associated with the reconstruction, leading to severe inaccuracies in the registration. This paper presents a mathematical framework, which couples the two tasks and jointly estimates both image intensities and the parameters of a transformation. We evaluate our methods using various computational digital phantoms, uncompressed breast MR images, and in-vivo DBT simulations. Firstly, we compare both iterative and simultaneous methods to the conventional, sequential method using an affine transformation model. We show that jointly estimating image intensities and parametric transformations gives superior results with respect to reconstruction fidelity and registration accuracy. Also, we incorporate a non-rigid B-spline transformation model into our simultaneous method. The results demonstrate a visually plausible recovery of the deformation with preservation of the reconstruction fidelity.},\n bibtype = {article},\n author = {Yang, Guang and Hipwell, John H. and Hawkes, David J. and Arridge, Simon R.},\n journal = {The Annals of the BMVA},\n number = {9}\n}
\n
\n\n\n
\n Digital Breast Tomosynthesis (DBT) provides an insight into the fine details of normal fibroglandular tissues and abnormal lesions by reconstructing a pseudo-3D image of the breast. In this respect, DBT overcomes a major limitation of conventional X-ray mammography by reducing the confounding effects caused by the superposition of breast tissue. In a breast cancer screening or diagnostic context, a radiologist is interested in detecting change, which might be indicative of malignant disease. To help automate this task image registration is required to establish spatial correspondence between time points. Typically, images, such as MRI or CT, are first reconstructed and then registered. This approach can be effective if reconstructing using a complete set of data. However, for ill-posed, limited-angle problems such as DBT, estimating the deformation is complicated by the significant artefacts associated with the reconstruction, leading to severe inaccuracies in the registration. This paper presents a mathematical framework, which couples the two tasks and jointly estimates both image intensities and the parameters of a transformation. We evaluate our methods using various computational digital phantoms, uncompressed breast MR images, and in-vivo DBT simulations. Firstly, we compare both iterative and simultaneous methods to the conventional, sequential method using an affine transformation model. We show that jointly estimating image intensities and parametric transformations gives superior results with respect to reconstruction fidelity and registration accuracy. Also, we incorporate a non-rigid B-spline transformation model into our simultaneous method. The results demonstrate a visually plausible recovery of the deformation with preservation of the reconstruction fidelity.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Numerical Approaches for Solving the Combined Reconstruction and Registration of Digital Breast Tomosynthesis.\n \n \n \n \n\n\n \n Yang, G.\n\n\n \n\n\n\n Ph.D. Thesis, 2012.\n \n\n\n\n
\n\n\n\n \n \n \"NumericalPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{\n title = {Numerical Approaches for Solving the Combined Reconstruction and Registration of Digital Breast Tomosynthesis},\n type = {phdthesis},\n year = {2012},\n institution = {UCL (University College London)},\n id = {ae5ffd7f-1b4d-34d9-8c46-eab9891feb99},\n created = {2024-01-13T05:46:20.830Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:18:01.869Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_numerical_2012-1},\n source_type = {phdthesis},\n user_context = {PhD Thesis},\n private_publication = {false},\n bibtype = {phdthesis},\n author = {Yang, Guang}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Nonlinear Least Squares Method for Solving the Joint Reconstruction and Registration Problem in Digital Breast Tomosynthesis.\n \n \n \n \n\n\n \n Yang, G.; Hipwell, J., H.; Hawkes, D., J.; and Arridge, S., R.\n\n\n \n\n\n\n In Medical Image Understanding and Analysis, pages 87-92, 2012. The British Machine Vision Association\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {A Nonlinear Least Squares Method for Solving the Joint Reconstruction and Registration Problem in Digital Breast Tomosynthesis},\n type = {inproceedings},\n year = {2012},\n pages = {87-92},\n publisher = {The British Machine Vision Association},\n id = {6f6b1dda-45da-3ef6-84a5-f5ae818fb476},\n created = {2024-01-13T05:46:21.530Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:18:35.642Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_nonlinear_2012-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yang, Guang and Hipwell, John H and Hawkes, David J and Arridge, Simon R},\n booktitle = {Medical Image Understanding and Analysis}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint registration and limited-angle reconstruction of digital breast tomosynthesis.\n \n \n \n \n\n\n \n Yang, G.; Hipwell, J., H.; Tanner, C.; Hawkes, D., J.; and Arridge, S., R.\n\n\n \n\n\n\n In International Workshop on Digital Mammography, pages 713-720, 2012. Springer Berlin Heidelberg\n \n\n\n\n
\n\n\n\n \n \n \"JointPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Joint registration and limited-angle reconstruction of digital breast tomosynthesis},\n type = {inproceedings},\n year = {2012},\n pages = {713-720},\n publisher = {Springer Berlin Heidelberg},\n id = {68dd6aa5-3004-33d0-b7b9-acdbe9be4be3},\n created = {2024-01-13T08:14:16.700Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:16:58.678Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_joint_2012},\n source_type = {CONF},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yang, Guang and Hipwell, John H and Tanner, Christine and Hawkes, David J and Arridge, Simon R},\n booktitle = {International Workshop on Digital Mammography}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2011\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Alternating Reconstruction and Registration for Digital Breast Tomosynthesis.\n \n \n \n\n\n \n Yang, G.; Hipwell, J., H.; Tanner, C.; Hawkes, D., J.; and Arridge, S., R.\n\n\n \n\n\n\n In Proceedings of the UK Radiological Congress 2011, pages 62-63, 2011. The British Institute of Radiology\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Alternating Reconstruction and Registration for Digital Breast Tomosynthesis},\n type = {inproceedings},\n year = {2011},\n pages = {62-63},\n publisher = {The British Institute of Radiology},\n id = {e268a4cf-1db7-34ae-8769-800751da1d19},\n created = {2024-01-13T05:46:21.782Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.404Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_alternating_2011},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yang, Guang and Hipwell, John H and Tanner, C and Hawkes, David J and Arridge, Simon R},\n booktitle = {Proceedings of the UK Radiological Congress 2011}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unconstrained Simultaneous Scheme to Fully Couple Reconstruction and Registration for Digital Breast Tomosynthesis: A Feasible Study.\n \n \n \n \n\n\n \n Yang, G.; Hipwell, J., H.; Hawkes, D., J.; and Arridge, S., R.\n\n\n \n\n\n\n Medical Image Computing and Computer Assisted Intervention MICCAI 2011 Workshop on Breast Image Analysis,25-32. 2011.\n \n\n\n\n
\n\n\n\n \n \n \"UnconstrainedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {Unconstrained Simultaneous Scheme to Fully Couple Reconstruction and Registration for Digital Breast Tomosynthesis: A Feasible Study},\n type = {article},\n year = {2011},\n pages = {25-32},\n id = {67f2d48e-a00a-39c2-be0e-eb9b78b6b05b},\n created = {2024-01-13T06:15:55.724Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T06:22:59.961Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Digital breast tomosynthesis (DBT) provides a pseudo-3D reconstruction which addresses the limitation of superimposition of dense fibro-glandular tissue associated with conventional mammography. Registration of temporal DBT volumes searches for the optimum deformation to transform two observed images of the same object into a common reference frame. This aligns the two images via minimising an objective function that calculates the similarity between the two datasets. In this paper, we present a novel algorithm which combines reconstruction of a pair of temporal DBT acquisitions with their simultaneous registration. We approach this nonlinear inverse problem using a generic unconstrained optimisation scheme. To evaluate the performance of our method we use 2D and 3D software phantoms and demonstrate that this simultaneous approach has comparable results to performing these tasks sequentially or iteratively w.r.t both the reconstruction fidelity and the registration accuracy.},\n bibtype = {article},\n author = {Yang, Guang and Hipwell, John H and Hawkes, David J and Arridge, Simon R},\n journal = {Medical Image Computing and Computer Assisted Intervention MICCAI 2011 Workshop on Breast Image Analysis}\n}
\n
\n\n\n
\n Digital breast tomosynthesis (DBT) provides a pseudo-3D reconstruction which addresses the limitation of superimposition of dense fibro-glandular tissue associated with conventional mammography. Registration of temporal DBT volumes searches for the optimum deformation to transform two observed images of the same object into a common reference frame. This aligns the two images via minimising an objective function that calculates the similarity between the two datasets. In this paper, we present a novel algorithm which combines reconstruction of a pair of temporal DBT acquisitions with their simultaneous registration. We approach this nonlinear inverse problem using a generic unconstrained optimisation scheme. To evaluate the performance of our method we use 2D and 3D software phantoms and demonstrate that this simultaneous approach has comparable results to performing these tasks sequentially or iteratively w.r.t both the reconstruction fidelity and the registration accuracy.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2010\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Collaboration of Reconstruction and Registration for Digital Tomosynthesis Application.\n \n \n \n\n\n \n Yang, G.\n\n\n \n\n\n\n In 2010 Annual CIMST Meeting, 2010. Eidgenössische Technische Hochschule Zürich (ETH Zürich)\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Collaboration of Reconstruction and Registration for Digital Tomosynthesis Application},\n type = {inproceedings},\n year = {2010},\n publisher = {Eidgenössische Technische Hochschule Zürich (ETH Zürich)},\n id = {d28d0e64-6d57-39c8-b23b-c4e4c72c87b5},\n created = {2024-01-13T05:46:21.241Z},\n file_attached = {false},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T12:10:01.540Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_collaboration_2010-1},\n source_type = {inproceedings},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yang, Guang},\n booktitle = {2010 Annual CIMST Meeting}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Combined reconstruction and registration of digital breast tomosynthesis: Sequential method versus iterative method.\n \n \n \n \n\n\n \n Yang, G.; Hipwell, J., H.; Clarkson, M., J.; Tanner, C.; Mertzanidou, T.; Gunn, S.; Ourselin, S.; Hawkes, D., J.; and Arridge, S., R.\n\n\n \n\n\n\n In Medical Image Understanding and Analysis, pages P27-1, 2010. The British Machine Vision Association\n \n\n\n\n
\n\n\n\n \n \n \"CombinedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Combined reconstruction and registration of digital breast tomosynthesis: Sequential method versus iterative method},\n type = {inproceedings},\n year = {2010},\n pages = {P27-1},\n publisher = {The British Machine Vision Association},\n id = {b4024b2d-2580-3ee4-b3fc-c78e85739979},\n created = {2024-01-13T08:14:14.051Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:18:10.153Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_combined_2010-1},\n source_type = {CONF},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yang, Guang and Hipwell, John H and Clarkson, M J and Tanner, C and Mertzanidou, Thomy and Gunn, Spencer and Ourselin, Sebastien and Hawkes, David J and Arridge, Simon R},\n booktitle = {Medical Image Understanding and Analysis}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Combined reconstruction and registration of digital breast tomosynthesis.\n \n \n \n \n\n\n \n Yang, G.; Hipwell, J., H.; Clarkson, M., J.; Tanner, C.; Mertzanidou, T.; Gunn, S.; Ourselin, S.; Hawkes, D., J.; and Arridge, S., R.\n\n\n \n\n\n\n In International Workshop on Digital Mammography, pages 760-768, 2010. Springer, Berlin, Heidelberg\n \n\n\n\n
\n\n\n\n \n \n \"CombinedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Combined reconstruction and registration of digital breast tomosynthesis},\n type = {inproceedings},\n year = {2010},\n pages = {760-768},\n publisher = {Springer, Berlin, Heidelberg},\n id = {727db291-2fb0-3110-a243-af558b69b2d5},\n created = {2024-01-13T08:14:16.543Z},\n file_attached = {true},\n profile_id = {d5b9f595-4104-33d2-b640-e84dee1b7ded},\n last_modified = {2024-01-13T08:17:03.057Z},\n read = {false},\n starred = {false},\n authored = {true},\n confirmed = {true},\n hidden = {false},\n citation_key = {yang_combined_2010},\n source_type = {CONF},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Yang, Guang and Hipwell, John H and Clarkson, Matthew J and Tanner, Christine and Mertzanidou, Thomy and Gunn, Spencer and Ourselin, Sebastien and Hawkes, David J and Arridge, Simon R},\n booktitle = {International Workshop on Digital Mammography}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);