Inception-v4, inception-ResNet and the impact of residual connections on learning. Szegedy, C., Ioffe, S., Vanhoucke, V., & Alemi, A. A. In Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence, of AAAI'17, pages 4278–4284, San Francisco, California, USA, February, 2017. AAAI Press. rate: 5Paper doi abstract bibtex Very deep convolutional networks have been central to the largest advances in image recognition performance in recent years. One example is the Inception architecture that has been shown to achieve very good performance at relatively low computational cost. Recently, the introduction of residual connections in conjunction with a more traditional architecture has yielded state-of-the-art performance in the 2015 ILSVRC challenge; its performance was similar to the latest generation Inception-v3 network. This raises the question: Are there any benefits to combining Inception architectures with residual connections? Here we give clear empirical evidence that training with residual connections accelerates the training of Inception networks significantly. There is also some evidence of residual Inception networks outperforming similarly expensive Inception networks without residual connections by a thin margin. We also present several new streamlined architectures for both residual and non-residual Inception networks. These variations improve the single-frame recognition performance on the ILSVRC 2012 classification task significantly. We further demonstrate how proper activation scaling stabilizes the training of very wide residual Inception networks. With an ensemble of three residual and one Inception-v4 networks, we achieve 3.08% top-5 error on the test set of the ImageNet classification (CLS) challenge.
@inproceedings{szegedy_inception-v4_2017,
address = {San Francisco, California, USA},
series = {{AAAI}'17},
title = {Inception-v4, inception-{ResNet} and the impact of residual connections on learning},
url = {https://dl.acm.org/doi/10.5555/3298023.3298188},
doi = {10.5555/3298023.3298188},
abstract = {Very deep convolutional networks have been central to the largest advances in image recognition performance in recent years. One example is the Inception architecture that has been shown to achieve very good performance at relatively low computational cost. Recently, the introduction of residual connections in conjunction with a more traditional architecture has yielded state-of-the-art performance in the 2015 ILSVRC challenge; its performance was similar to the latest generation Inception-v3 network. This raises the question: Are there any benefits to combining Inception architectures with residual connections? Here we give clear empirical evidence that training with residual connections accelerates the training of Inception networks significantly. There is also some evidence of residual Inception networks outperforming similarly expensive Inception networks without residual connections by a thin margin. We also present several new streamlined architectures for both residual and non-residual Inception networks. These variations improve the single-frame recognition performance on the ILSVRC 2012 classification task significantly. We further demonstrate how proper activation scaling stabilizes the training of very wide residual Inception networks. With an ensemble of three residual and one Inception-v4 networks, we achieve 3.08\% top-5 error on the test set of the ImageNet classification (CLS) challenge.},
language = {en},
urldate = {2023-06-15},
booktitle = {Proceedings of the {Thirty}-{First} {AAAI} {Conference} on {Artificial} {Intelligence}},
publisher = {AAAI Press},
author = {Szegedy, Christian and Ioffe, Sergey and Vanhoucke, Vincent and Alemi, Alexander A.},
month = feb,
year = {2017},
note = {rate: 5},
keywords = {\#AAAI{\textgreater}17, \#CNN, /readed, ❤️, ⭐⭐⭐⭐⭐, 🚩},
pages = {4278--4284},
}
Downloads: 0
{"_id":"dKYZs2hk2r48oyYFJ","bibbaseid":"szegedy-ioffe-vanhoucke-alemi-inceptionv4inceptionresnetandtheimpactofresidualconnectionsonlearning-2017","authorIDs":[],"author_short":["Szegedy, C.","Ioffe, S.","Vanhoucke, V.","Alemi, A. A."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","address":"San Francisco, California, USA","series":"AAAI'17","title":"Inception-v4, inception-ResNet and the impact of residual connections on learning","url":"https://dl.acm.org/doi/10.5555/3298023.3298188","doi":"10.5555/3298023.3298188","abstract":"Very deep convolutional networks have been central to the largest advances in image recognition performance in recent years. One example is the Inception architecture that has been shown to achieve very good performance at relatively low computational cost. Recently, the introduction of residual connections in conjunction with a more traditional architecture has yielded state-of-the-art performance in the 2015 ILSVRC challenge; its performance was similar to the latest generation Inception-v3 network. This raises the question: Are there any benefits to combining Inception architectures with residual connections? Here we give clear empirical evidence that training with residual connections accelerates the training of Inception networks significantly. There is also some evidence of residual Inception networks outperforming similarly expensive Inception networks without residual connections by a thin margin. We also present several new streamlined architectures for both residual and non-residual Inception networks. These variations improve the single-frame recognition performance on the ILSVRC 2012 classification task significantly. We further demonstrate how proper activation scaling stabilizes the training of very wide residual Inception networks. With an ensemble of three residual and one Inception-v4 networks, we achieve 3.08% top-5 error on the test set of the ImageNet classification (CLS) challenge.","language":"en","urldate":"2023-06-15","booktitle":"Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence","publisher":"AAAI Press","author":[{"propositions":[],"lastnames":["Szegedy"],"firstnames":["Christian"],"suffixes":[]},{"propositions":[],"lastnames":["Ioffe"],"firstnames":["Sergey"],"suffixes":[]},{"propositions":[],"lastnames":["Vanhoucke"],"firstnames":["Vincent"],"suffixes":[]},{"propositions":[],"lastnames":["Alemi"],"firstnames":["Alexander","A."],"suffixes":[]}],"month":"February","year":"2017","note":"rate: 5","keywords":"#AAAI\\textgreater17, #CNN, /readed, ❤️, ⭐⭐⭐⭐⭐, 🚩","pages":"4278–4284","bibtex":"@inproceedings{szegedy_inception-v4_2017,\n\taddress = {San Francisco, California, USA},\n\tseries = {{AAAI}'17},\n\ttitle = {Inception-v4, inception-{ResNet} and the impact of residual connections on learning},\n\turl = {https://dl.acm.org/doi/10.5555/3298023.3298188},\n\tdoi = {10.5555/3298023.3298188},\n\tabstract = {Very deep convolutional networks have been central to the largest advances in image recognition performance in recent years. One example is the Inception architecture that has been shown to achieve very good performance at relatively low computational cost. Recently, the introduction of residual connections in conjunction with a more traditional architecture has yielded state-of-the-art performance in the 2015 ILSVRC challenge; its performance was similar to the latest generation Inception-v3 network. This raises the question: Are there any benefits to combining Inception architectures with residual connections? Here we give clear empirical evidence that training with residual connections accelerates the training of Inception networks significantly. There is also some evidence of residual Inception networks outperforming similarly expensive Inception networks without residual connections by a thin margin. We also present several new streamlined architectures for both residual and non-residual Inception networks. These variations improve the single-frame recognition performance on the ILSVRC 2012 classification task significantly. We further demonstrate how proper activation scaling stabilizes the training of very wide residual Inception networks. With an ensemble of three residual and one Inception-v4 networks, we achieve 3.08\\% top-5 error on the test set of the ImageNet classification (CLS) challenge.},\n\tlanguage = {en},\n\turldate = {2023-06-15},\n\tbooktitle = {Proceedings of the {Thirty}-{First} {AAAI} {Conference} on {Artificial} {Intelligence}},\n\tpublisher = {AAAI Press},\n\tauthor = {Szegedy, Christian and Ioffe, Sergey and Vanhoucke, Vincent and Alemi, Alexander A.},\n\tmonth = feb,\n\tyear = {2017},\n\tnote = {rate: 5},\n\tkeywords = {\\#AAAI{\\textgreater}17, \\#CNN, /readed, ❤️, ⭐⭐⭐⭐⭐, 🚩},\n\tpages = {4278--4284},\n}\n\n\n\n","author_short":["Szegedy, C.","Ioffe, S.","Vanhoucke, V.","Alemi, A. A."],"key":"szegedy_inception-v4_2017","id":"szegedy_inception-v4_2017","bibbaseid":"szegedy-ioffe-vanhoucke-alemi-inceptionv4inceptionresnetandtheimpactofresidualconnectionsonlearning-2017","role":"author","urls":{"Paper":"https://dl.acm.org/doi/10.5555/3298023.3298188"},"keyword":["#AAAI\\textgreater17","#CNN","/readed","❤️","⭐⭐⭐⭐⭐","🚩"],"metadata":{"authorlinks":{}},"downloads":0,"html":""},"bibtype":"inproceedings","biburl":"https://bibbase.org/zotero/zzhenry2012","creationDate":"2020-03-03T14:08:15.112Z","downloads":0,"keywords":["#aaai\\textgreater17","#cnn","/readed","❤️","⭐⭐⭐⭐⭐","🚩"],"search_terms":["inception","inception","resnet","impact","residual","connections","learning","szegedy","ioffe","vanhoucke","alemi"],"title":"Inception-v4, inception-ResNet and the impact of residual connections on learning","year":2017,"dataSources":["jAxurbvLP8q5LTdLa","XFrKPG99s5t3W7xuW","nZHrFJKyxKKDaWYM8"]}