CSNN: An Augmented Spiking based Framework with Perceptron-Inception. Xu, Q., Qi, Y., Yu, H., Shen, J., Tang, H., & Pan, G. In Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence, pages 1646–1652, Stockholm, Sweden, July, 2018. International Joint Conferences on Artificial Intelligence Organization. Paper doi abstract bibtex Spiking Neural Networks (SNNs) represent and transmit information in spikes, which is considered more biologically realistic and computationally powerful than the traditional Artificial Neural Networks. The spiking neurons encode useful temporal information and possess highly anti-noise property. The feature extraction ability of typical SNNs is limited by shallow structures. This paper focuses on improving the feature extraction ability of SNNs in virtue of powerful feature extraction ability of Convolutional Neural Networks (CNNs). CNNs can extract abstract features resorting to the structure of the convolutional feature maps. We propose a CNN-SNN (CSNN) model to combine feature learning ability of CNNs with cognition ability of SNNs. The CSNN model learns the encoded spatiotemporal representations of images in an event-driven way. We evaluate the CSNN model on the MNIST and its variants, including learning capabilities, encoding mechanisms, robustness to noisy stimuli and its classification performance. The results show that CSNN behaves well compared to other cognitive models with significantly fewer neurons and training samples. Our work brings more biological realism into modern image classification models, with the hope that these models can inform how the brain performs this highlevel vision task.
@inproceedings{xu_csnn_2018,
address = {Stockholm, Sweden},
title = {{CSNN}: {An} {Augmented} {Spiking} based {Framework} with {Perceptron}-{Inception}},
isbn = {978-0-9992411-2-7},
shorttitle = {{CSNN}},
url = {https://www.ijcai.org/proceedings/2018/228},
doi = {10.24963/ijcai.2018/228},
abstract = {Spiking Neural Networks (SNNs) represent and transmit information in spikes, which is considered more biologically realistic and computationally powerful than the traditional Artificial Neural Networks. The spiking neurons encode useful temporal information and possess highly anti-noise property. The feature extraction ability of typical SNNs is limited by shallow structures. This paper focuses on improving the feature extraction ability of SNNs in virtue of powerful feature extraction ability of Convolutional Neural Networks (CNNs). CNNs can extract abstract features resorting to the structure of the convolutional feature maps. We propose a CNN-SNN (CSNN) model to combine feature learning ability of CNNs with cognition ability of SNNs. The CSNN model learns the encoded spatiotemporal representations of images in an event-driven way. We evaluate the CSNN model on the MNIST and its variants, including learning capabilities, encoding mechanisms, robustness to noisy stimuli and its classification performance. The results show that CSNN behaves well compared to other cognitive models with significantly fewer neurons and training samples. Our work brings more biological realism into modern image classification models, with the hope that these models can inform how the brain performs this highlevel vision task.},
language = {en},
urldate = {2022-01-19},
booktitle = {Proceedings of the {Twenty}-{Seventh} {International} {Joint} {Conference} on {Artificial} {Intelligence}},
publisher = {International Joint Conferences on Artificial Intelligence Organization},
author = {Xu, Qi and Qi, Yu and Yu, Hang and Shen, Jiangrong and Tang, Huajin and Pan, Gang},
month = jul,
year = {2018},
keywords = {/unread},
pages = {1646--1652},
}
Downloads: 0
{"_id":"9Fe8p4qHpGFnjupEB","bibbaseid":"xu-qi-yu-shen-tang-pan-csnnanaugmentedspikingbasedframeworkwithperceptroninception-2018","author_short":["Xu, Q.","Qi, Y.","Yu, H.","Shen, J.","Tang, H.","Pan, G."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","address":"Stockholm, Sweden","title":"CSNN: An Augmented Spiking based Framework with Perceptron-Inception","isbn":"978-0-9992411-2-7","shorttitle":"CSNN","url":"https://www.ijcai.org/proceedings/2018/228","doi":"10.24963/ijcai.2018/228","abstract":"Spiking Neural Networks (SNNs) represent and transmit information in spikes, which is considered more biologically realistic and computationally powerful than the traditional Artificial Neural Networks. The spiking neurons encode useful temporal information and possess highly anti-noise property. The feature extraction ability of typical SNNs is limited by shallow structures. This paper focuses on improving the feature extraction ability of SNNs in virtue of powerful feature extraction ability of Convolutional Neural Networks (CNNs). CNNs can extract abstract features resorting to the structure of the convolutional feature maps. We propose a CNN-SNN (CSNN) model to combine feature learning ability of CNNs with cognition ability of SNNs. The CSNN model learns the encoded spatiotemporal representations of images in an event-driven way. We evaluate the CSNN model on the MNIST and its variants, including learning capabilities, encoding mechanisms, robustness to noisy stimuli and its classification performance. The results show that CSNN behaves well compared to other cognitive models with significantly fewer neurons and training samples. Our work brings more biological realism into modern image classification models, with the hope that these models can inform how the brain performs this highlevel vision task.","language":"en","urldate":"2022-01-19","booktitle":"Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence","publisher":"International Joint Conferences on Artificial Intelligence Organization","author":[{"propositions":[],"lastnames":["Xu"],"firstnames":["Qi"],"suffixes":[]},{"propositions":[],"lastnames":["Qi"],"firstnames":["Yu"],"suffixes":[]},{"propositions":[],"lastnames":["Yu"],"firstnames":["Hang"],"suffixes":[]},{"propositions":[],"lastnames":["Shen"],"firstnames":["Jiangrong"],"suffixes":[]},{"propositions":[],"lastnames":["Tang"],"firstnames":["Huajin"],"suffixes":[]},{"propositions":[],"lastnames":["Pan"],"firstnames":["Gang"],"suffixes":[]}],"month":"July","year":"2018","keywords":"/unread","pages":"1646–1652","bibtex":"@inproceedings{xu_csnn_2018,\n\taddress = {Stockholm, Sweden},\n\ttitle = {{CSNN}: {An} {Augmented} {Spiking} based {Framework} with {Perceptron}-{Inception}},\n\tisbn = {978-0-9992411-2-7},\n\tshorttitle = {{CSNN}},\n\turl = {https://www.ijcai.org/proceedings/2018/228},\n\tdoi = {10.24963/ijcai.2018/228},\n\tabstract = {Spiking Neural Networks (SNNs) represent and transmit information in spikes, which is considered more biologically realistic and computationally powerful than the traditional Artificial Neural Networks. The spiking neurons encode useful temporal information and possess highly anti-noise property. The feature extraction ability of typical SNNs is limited by shallow structures. This paper focuses on improving the feature extraction ability of SNNs in virtue of powerful feature extraction ability of Convolutional Neural Networks (CNNs). CNNs can extract abstract features resorting to the structure of the convolutional feature maps. We propose a CNN-SNN (CSNN) model to combine feature learning ability of CNNs with cognition ability of SNNs. The CSNN model learns the encoded spatiotemporal representations of images in an event-driven way. We evaluate the CSNN model on the MNIST and its variants, including learning capabilities, encoding mechanisms, robustness to noisy stimuli and its classification performance. The results show that CSNN behaves well compared to other cognitive models with significantly fewer neurons and training samples. Our work brings more biological realism into modern image classification models, with the hope that these models can inform how the brain performs this highlevel vision task.},\n\tlanguage = {en},\n\turldate = {2022-01-19},\n\tbooktitle = {Proceedings of the {Twenty}-{Seventh} {International} {Joint} {Conference} on {Artificial} {Intelligence}},\n\tpublisher = {International Joint Conferences on Artificial Intelligence Organization},\n\tauthor = {Xu, Qi and Qi, Yu and Yu, Hang and Shen, Jiangrong and Tang, Huajin and Pan, Gang},\n\tmonth = jul,\n\tyear = {2018},\n\tkeywords = {/unread},\n\tpages = {1646--1652},\n}\n\n","author_short":["Xu, Q.","Qi, Y.","Yu, H.","Shen, J.","Tang, H.","Pan, G."],"key":"xu_csnn_2018","id":"xu_csnn_2018","bibbaseid":"xu-qi-yu-shen-tang-pan-csnnanaugmentedspikingbasedframeworkwithperceptroninception-2018","role":"author","urls":{"Paper":"https://www.ijcai.org/proceedings/2018/228"},"keyword":["/unread"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"inproceedings","biburl":"https://bibbase.org/zotero/victorjhu","dataSources":["CmHEoydhafhbkXXt5"],"keywords":["/unread"],"search_terms":["csnn","augmented","spiking","based","framework","perceptron","inception","xu","qi","yu","shen","tang","pan"],"title":"CSNN: An Augmented Spiking based Framework with Perceptron-Inception","year":2018}