Attention U-Net: Learning Where to Look for the Pancreas. Oktay, O., Schlemper, J., Folgoc, L., L., Lee, M., Heinrich, M., Misawa, K., Mori, K., McDonagh, S., Hammerla, N., Y., Kainz, B., Glocker, B., & Rueckert, D. arXiv:1804.03999 [cs], 5, 2018. Paper Website abstract bibtex We propose a novel attention gate (AG) model for medical imaging that automatically learns to focus on target structures of varying shapes and sizes. Models trained with AGs implicitly learn to suppress irrelevant regions in an input image while highlighting salient features useful for a specific task. This enables us to eliminate the necessity of using explicit external tissue/organ localisation modules of cascaded convolutional neural networks (CNNs). AGs can be easily integrated into standard CNN architectures such as the U-Net model with minimal computational overhead while increasing the model sensitivity and prediction accuracy. The proposed Attention U-Net architecture is evaluated on two large CT abdominal datasets for multi-class image segmentation. Experimental results show that AGs consistently improve the prediction performance of U-Net across different datasets and training sizes while preserving computational efficiency. The code for the proposed architecture is publicly available.
@article{
title = {Attention U-Net: Learning Where to Look for the Pancreas},
type = {article},
year = {2018},
keywords = {Computer Science - Computer Vision and Pattern Rec},
websites = {http://arxiv.org/abs/1804.03999},
month = {5},
id = {7227242e-bfa6-343e-a301-794c63175863},
created = {2022-03-28T09:45:05.973Z},
accessed = {2022-03-27},
file_attached = {true},
profile_id = {235249c2-3ed4-314a-b309-b1ea0330f5d9},
group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},
last_modified = {2022-03-30T07:23:30.129Z},
read = {false},
starred = {false},
authored = {false},
confirmed = {true},
hidden = {false},
citation_key = {oktayAttentionUNetLearning2018},
source_type = {article},
short_title = {Attention U-Net},
notes = {arXiv: 1804.03999},
private_publication = {false},
abstract = {We propose a novel attention gate (AG) model for medical imaging that automatically learns to focus on target structures of varying shapes and sizes. Models trained with AGs implicitly learn to suppress irrelevant regions in an input image while highlighting salient features useful for a specific task. This enables us to eliminate the necessity of using explicit external tissue/organ localisation modules of cascaded convolutional neural networks (CNNs). AGs can be easily integrated into standard CNN architectures such as the U-Net model with minimal computational overhead while increasing the model sensitivity and prediction accuracy. The proposed Attention U-Net architecture is evaluated on two large CT abdominal datasets for multi-class image segmentation. Experimental results show that AGs consistently improve the prediction performance of U-Net across different datasets and training sizes while preserving computational efficiency. The code for the proposed architecture is publicly available.},
bibtype = {article},
author = {Oktay, Ozan and Schlemper, Jo and Folgoc, Loic Le and Lee, Matthew and Heinrich, Mattias and Misawa, Kazunari and Mori, Kensaku and McDonagh, Steven and Hammerla, Nils Y and Kainz, Bernhard and Glocker, Ben and Rueckert, Daniel},
journal = {arXiv:1804.03999 [cs]}
}
Downloads: 0
{"_id":"i8z5chn4fh6YLadm9","bibbaseid":"oktay-schlemper-folgoc-lee-heinrich-misawa-mori-mcdonagh-etal-attentionunetlearningwheretolookforthepancreas-2018","authorIDs":[],"author_short":["Oktay, O.","Schlemper, J.","Folgoc, L., L.","Lee, M.","Heinrich, M.","Misawa, K.","Mori, K.","McDonagh, S.","Hammerla, N., Y.","Kainz, B.","Glocker, B.","Rueckert, D."],"bibdata":{"title":"Attention U-Net: Learning Where to Look for the Pancreas","type":"article","year":"2018","keywords":"Computer Science - Computer Vision and Pattern Rec","websites":"http://arxiv.org/abs/1804.03999","month":"5","id":"7227242e-bfa6-343e-a301-794c63175863","created":"2022-03-28T09:45:05.973Z","accessed":"2022-03-27","file_attached":"true","profile_id":"235249c2-3ed4-314a-b309-b1ea0330f5d9","group_id":"1ff583c0-be37-34fa-9c04-73c69437d354","last_modified":"2022-03-30T07:23:30.129Z","read":false,"starred":false,"authored":false,"confirmed":"true","hidden":false,"citation_key":"oktayAttentionUNetLearning2018","source_type":"article","short_title":"Attention U-Net","notes":"arXiv: 1804.03999","private_publication":false,"abstract":"We propose a novel attention gate (AG) model for medical imaging that automatically learns to focus on target structures of varying shapes and sizes. Models trained with AGs implicitly learn to suppress irrelevant regions in an input image while highlighting salient features useful for a specific task. This enables us to eliminate the necessity of using explicit external tissue/organ localisation modules of cascaded convolutional neural networks (CNNs). AGs can be easily integrated into standard CNN architectures such as the U-Net model with minimal computational overhead while increasing the model sensitivity and prediction accuracy. The proposed Attention U-Net architecture is evaluated on two large CT abdominal datasets for multi-class image segmentation. Experimental results show that AGs consistently improve the prediction performance of U-Net across different datasets and training sizes while preserving computational efficiency. The code for the proposed architecture is publicly available.","bibtype":"article","author":"Oktay, Ozan and Schlemper, Jo and Folgoc, Loic Le and Lee, Matthew and Heinrich, Mattias and Misawa, Kazunari and Mori, Kensaku and McDonagh, Steven and Hammerla, Nils Y and Kainz, Bernhard and Glocker, Ben and Rueckert, Daniel","journal":"arXiv:1804.03999 [cs]","bibtex":"@article{\n title = {Attention U-Net: Learning Where to Look for the Pancreas},\n type = {article},\n year = {2018},\n keywords = {Computer Science - Computer Vision and Pattern Rec},\n websites = {http://arxiv.org/abs/1804.03999},\n month = {5},\n id = {7227242e-bfa6-343e-a301-794c63175863},\n created = {2022-03-28T09:45:05.973Z},\n accessed = {2022-03-27},\n file_attached = {true},\n profile_id = {235249c2-3ed4-314a-b309-b1ea0330f5d9},\n group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},\n last_modified = {2022-03-30T07:23:30.129Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {oktayAttentionUNetLearning2018},\n source_type = {article},\n short_title = {Attention U-Net},\n notes = {arXiv: 1804.03999},\n private_publication = {false},\n abstract = {We propose a novel attention gate (AG) model for medical imaging that automatically learns to focus on target structures of varying shapes and sizes. Models trained with AGs implicitly learn to suppress irrelevant regions in an input image while highlighting salient features useful for a specific task. This enables us to eliminate the necessity of using explicit external tissue/organ localisation modules of cascaded convolutional neural networks (CNNs). AGs can be easily integrated into standard CNN architectures such as the U-Net model with minimal computational overhead while increasing the model sensitivity and prediction accuracy. The proposed Attention U-Net architecture is evaluated on two large CT abdominal datasets for multi-class image segmentation. Experimental results show that AGs consistently improve the prediction performance of U-Net across different datasets and training sizes while preserving computational efficiency. The code for the proposed architecture is publicly available.},\n bibtype = {article},\n author = {Oktay, Ozan and Schlemper, Jo and Folgoc, Loic Le and Lee, Matthew and Heinrich, Mattias and Misawa, Kazunari and Mori, Kensaku and McDonagh, Steven and Hammerla, Nils Y and Kainz, Bernhard and Glocker, Ben and Rueckert, Daniel},\n journal = {arXiv:1804.03999 [cs]}\n}","author_short":["Oktay, O.","Schlemper, J.","Folgoc, L., L.","Lee, M.","Heinrich, M.","Misawa, K.","Mori, K.","McDonagh, S.","Hammerla, N., Y.","Kainz, B.","Glocker, B.","Rueckert, D."],"urls":{"Paper":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c/file/67a0e388-4a0a-22ef-01c6-312bcb0a73da/Oktay_et_al___2018___Attention_U_Net_Learning_Where_to_Look_for_the_Pa.pdf.pdf","Website":"http://arxiv.org/abs/1804.03999"},"biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibbaseid":"oktay-schlemper-folgoc-lee-heinrich-misawa-mori-mcdonagh-etal-attentionunetlearningwheretolookforthepancreas-2018","role":"author","keyword":["Computer Science - Computer Vision and Pattern Rec"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"article","biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","creationDate":"2020-05-25T10:44:59.024Z","downloads":0,"keywords":["computer science - computer vision and pattern rec"],"search_terms":["attention","net","learning","look","pancreas","oktay","schlemper","folgoc","lee","heinrich","misawa","mori","mcdonagh","hammerla","kainz","glocker","rueckert"],"title":"Attention U-Net: Learning Where to Look for the Pancreas","year":2018,"dataSources":["95W8SWCJxBDir7TB5","ya2CyA73rpZseyrZ8","2252seNhipfTmjEBQ"]}