Spatial transformer networks. Jaderberg, M., Simonyan, K., Zisserman, A., & Kavukcuoglu, K. Advances in Neural Information Processing Systems, 2015-Janua:2017-2025, 2015. Paper abstract bibtex Convolutional Neural Networks define an exceptionally powerful class of models, but are still limited by the lack of ability to be spatially invariant to the input data in a computationally and parameter efficient manner.In this work we introduce a new learnable module, the Spatial Transformer, which explicitly allows the spatial manipulation of data within the network. This differentiable module can be inserted into existing convolutional architectures, giving neural networks the ability to actively spatially transform feature maps, conditional on the feature map itself, without any extra training supervision or modification to the optimisation process. We show that the use of spatial transformers results in models which learn invariance to translation, scale, rotation and more generic warping, resulting in state-of-the-art performance on several benchmarks, and for a number of classes of transformations.
@article{
title = {Spatial transformer networks},
type = {article},
year = {2015},
pages = {2017-2025},
volume = {2015-Janua},
id = {1836ca8a-16a9-3cdb-85c8-4255c414124e},
created = {2022-01-13T07:21:01.610Z},
file_attached = {true},
profile_id = {bfbbf840-4c42-3914-a463-19024f50b30c},
group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},
last_modified = {2022-01-13T07:21:08.480Z},
read = {false},
starred = {false},
authored = {false},
confirmed = {true},
hidden = {false},
folder_uuids = {d54ba66b-a8cf-41de-8e2d-c3256f322e07},
private_publication = {false},
abstract = {Convolutional Neural Networks define an exceptionally powerful class of models, but are still limited by the lack of ability to be spatially invariant to the input data in a computationally and parameter efficient manner.In this work we introduce a new learnable module, the Spatial Transformer, which explicitly allows the spatial manipulation of data within the network. This differentiable module can be inserted into existing convolutional architectures, giving neural networks the ability to actively spatially transform feature maps, conditional on the feature map itself, without any extra training supervision or modification to the optimisation process. We show that the use of spatial transformers results in models which learn invariance to translation, scale, rotation and more generic warping, resulting in state-of-the-art performance on several benchmarks, and for a number of classes of transformations.},
bibtype = {article},
author = {Jaderberg, Max and Simonyan, Karen and Zisserman, Andrew and Kavukcuoglu, Koray},
journal = {Advances in Neural Information Processing Systems}
}
Downloads: 0
{"_id":"78fR56HXZu8hbATGh","bibbaseid":"jaderberg-simonyan-zisserman-kavukcuoglu-spatialtransformernetworks-2015","downloads":0,"creationDate":"2017-06-08T12:53:07.152Z","title":"Spatial transformer networks","author_short":["Jaderberg, M.","Simonyan, K.","Zisserman, A.","Kavukcuoglu, K."],"year":2015,"bibtype":"article","biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibdata":{"title":"Spatial transformer networks","type":"article","year":"2015","pages":"2017-2025","volume":"2015-Janua","id":"1836ca8a-16a9-3cdb-85c8-4255c414124e","created":"2022-01-13T07:21:01.610Z","file_attached":"true","profile_id":"bfbbf840-4c42-3914-a463-19024f50b30c","group_id":"1ff583c0-be37-34fa-9c04-73c69437d354","last_modified":"2022-01-13T07:21:08.480Z","read":false,"starred":false,"authored":false,"confirmed":"true","hidden":false,"folder_uuids":"d54ba66b-a8cf-41de-8e2d-c3256f322e07","private_publication":false,"abstract":"Convolutional Neural Networks define an exceptionally powerful class of models, but are still limited by the lack of ability to be spatially invariant to the input data in a computationally and parameter efficient manner.In this work we introduce a new learnable module, the Spatial Transformer, which explicitly allows the spatial manipulation of data within the network. This differentiable module can be inserted into existing convolutional architectures, giving neural networks the ability to actively spatially transform feature maps, conditional on the feature map itself, without any extra training supervision or modification to the optimisation process. We show that the use of spatial transformers results in models which learn invariance to translation, scale, rotation and more generic warping, resulting in state-of-the-art performance on several benchmarks, and for a number of classes of transformations.","bibtype":"article","author":"Jaderberg, Max and Simonyan, Karen and Zisserman, Andrew and Kavukcuoglu, Koray","journal":"Advances in Neural Information Processing Systems","bibtex":"@article{\n title = {Spatial transformer networks},\n type = {article},\n year = {2015},\n pages = {2017-2025},\n volume = {2015-Janua},\n id = {1836ca8a-16a9-3cdb-85c8-4255c414124e},\n created = {2022-01-13T07:21:01.610Z},\n file_attached = {true},\n profile_id = {bfbbf840-4c42-3914-a463-19024f50b30c},\n group_id = {1ff583c0-be37-34fa-9c04-73c69437d354},\n last_modified = {2022-01-13T07:21:08.480Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n folder_uuids = {d54ba66b-a8cf-41de-8e2d-c3256f322e07},\n private_publication = {false},\n abstract = {Convolutional Neural Networks define an exceptionally powerful class of models, but are still limited by the lack of ability to be spatially invariant to the input data in a computationally and parameter efficient manner.In this work we introduce a new learnable module, the Spatial Transformer, which explicitly allows the spatial manipulation of data within the network. This differentiable module can be inserted into existing convolutional architectures, giving neural networks the ability to actively spatially transform feature maps, conditional on the feature map itself, without any extra training supervision or modification to the optimisation process. We show that the use of spatial transformers results in models which learn invariance to translation, scale, rotation and more generic warping, resulting in state-of-the-art performance on several benchmarks, and for a number of classes of transformations.},\n bibtype = {article},\n author = {Jaderberg, Max and Simonyan, Karen and Zisserman, Andrew and Kavukcuoglu, Koray},\n journal = {Advances in Neural Information Processing Systems}\n}","author_short":["Jaderberg, M.","Simonyan, K.","Zisserman, A.","Kavukcuoglu, K."],"urls":{"Paper":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c/file/1e2d8258-543e-883d-67a2-539dc5aa9a66/150602025.pdf.pdf"},"biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibbaseid":"jaderberg-simonyan-zisserman-kavukcuoglu-spatialtransformernetworks-2015","role":"author","metadata":{"authorlinks":{}},"downloads":0},"search_terms":["spatial","transformer","networks","jaderberg","simonyan","zisserman","kavukcuoglu"],"keywords":[],"authorIDs":[],"dataSources":["bzxc3uBcwMv3h47xE","ya2CyA73rpZseyrZ8","am4cuScX42bDPukDn","KZFesWbmGy4yc4ZLC","ovn29uG6Mbp3JWCRR","2252seNhipfTmjEBQ"]}