Characterizing Adversarial Examples Based on Spatial Consistency Information for Semantic Segmentation. Xiao, C., Deng, R., Li, B., Yu, F., Liu, M., & Song, D. In Ferrari, V., Hebert, M., Sminchisescu, C., & Weiss, Y., editors, Computer Vision – ECCV 2018, volume 11214, pages 220–237, Cham, 2018. Springer International Publishing. Series Title: Lecture Notes in Computer SciencePaper doi abstract bibtex Deep Neural Networks (DNNs) have been widely applied in various recognition tasks. However, recently DNNs have been shown to be vulnerable against adversarial examples, which can mislead DNNs to make arbitrary incorrect predictions. While adversarial examples are well studied in classification tasks, other learning problems may have different properties. For instance, semantic segmentation requires additional components such as dilated convolutions and multiscale processing. In this paper, we aim to characterize adversarial examples based on spatial context information in semantic segmentation. We observe that spatial consistency information can be potentially leveraged to detect adversarial examples robustly even when a strong adaptive attacker has access to the model and detection strategies. We also show that adversarial examples based on attacks considered within the paper barely transfer among models, even though transferability is common in classification. Our observations shed new light on developing adversarial attacks and defenses to better understand the vulnerabilities of DNNs.
@inproceedings{ferrari_characterizing_2018,
address = {Cham},
title = {Characterizing {Adversarial} {Examples} {Based} on {Spatial} {Consistency} {Information} for {Semantic} {Segmentation}},
volume = {11214},
isbn = {978-3-030-01248-9 978-3-030-01249-6},
url = {http://link.springer.com/10.1007/978-3-030-01249-6_14},
doi = {10.1007/978-3-030-01249-6_14},
abstract = {Deep Neural Networks (DNNs) have been widely applied in various recognition tasks. However, recently DNNs have been shown to be vulnerable against adversarial examples, which can mislead DNNs to make arbitrary incorrect predictions. While adversarial examples are well studied in classification tasks, other learning problems may have different properties. For instance, semantic segmentation requires additional components such as dilated convolutions and multiscale processing. In this paper, we aim to characterize adversarial examples based on spatial context information in semantic segmentation. We observe that spatial consistency information can be potentially leveraged to detect adversarial examples robustly even when a strong adaptive attacker has access to the model and detection strategies. We also show that adversarial examples based on attacks considered within the paper barely transfer among models, even though transferability is common in classification. Our observations shed new light on developing adversarial attacks and defenses to better understand the vulnerabilities of DNNs.},
language = {en},
urldate = {2020-10-13},
booktitle = {Computer {Vision} – {ECCV} 2018},
publisher = {Springer International Publishing},
author = {Xiao, Chaowei and Deng, Ruizhi and Li, Bo and Yu, Fisher and Liu, Mingyan and Song, Dawn},
editor = {Ferrari, Vittorio and Hebert, Martial and Sminchisescu, Cristian and Weiss, Yair},
year = {2018},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {Computer Science - Computer Vision and Pattern Recognition, Computer Science - Cryptography and Security, Computer Science - Machine Learning},
pages = {220--237},
}
Downloads: 0
{"_id":"icB6BygEWt5u3PpiM","bibbaseid":"xiao-deng-li-yu-liu-song-characterizingadversarialexamplesbasedonspatialconsistencyinformationforsemanticsegmentation-2018","author_short":["Xiao, C.","Deng, R.","Li, B.","Yu, F.","Liu, M.","Song, D."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","address":"Cham","title":"Characterizing Adversarial Examples Based on Spatial Consistency Information for Semantic Segmentation","volume":"11214","isbn":"978-3-030-01248-9 978-3-030-01249-6","url":"http://link.springer.com/10.1007/978-3-030-01249-6_14","doi":"10.1007/978-3-030-01249-6_14","abstract":"Deep Neural Networks (DNNs) have been widely applied in various recognition tasks. However, recently DNNs have been shown to be vulnerable against adversarial examples, which can mislead DNNs to make arbitrary incorrect predictions. While adversarial examples are well studied in classification tasks, other learning problems may have different properties. For instance, semantic segmentation requires additional components such as dilated convolutions and multiscale processing. In this paper, we aim to characterize adversarial examples based on spatial context information in semantic segmentation. We observe that spatial consistency information can be potentially leveraged to detect adversarial examples robustly even when a strong adaptive attacker has access to the model and detection strategies. We also show that adversarial examples based on attacks considered within the paper barely transfer among models, even though transferability is common in classification. Our observations shed new light on developing adversarial attacks and defenses to better understand the vulnerabilities of DNNs.","language":"en","urldate":"2020-10-13","booktitle":"Computer Vision – ECCV 2018","publisher":"Springer International Publishing","author":[{"propositions":[],"lastnames":["Xiao"],"firstnames":["Chaowei"],"suffixes":[]},{"propositions":[],"lastnames":["Deng"],"firstnames":["Ruizhi"],"suffixes":[]},{"propositions":[],"lastnames":["Li"],"firstnames":["Bo"],"suffixes":[]},{"propositions":[],"lastnames":["Yu"],"firstnames":["Fisher"],"suffixes":[]},{"propositions":[],"lastnames":["Liu"],"firstnames":["Mingyan"],"suffixes":[]},{"propositions":[],"lastnames":["Song"],"firstnames":["Dawn"],"suffixes":[]}],"editor":[{"propositions":[],"lastnames":["Ferrari"],"firstnames":["Vittorio"],"suffixes":[]},{"propositions":[],"lastnames":["Hebert"],"firstnames":["Martial"],"suffixes":[]},{"propositions":[],"lastnames":["Sminchisescu"],"firstnames":["Cristian"],"suffixes":[]},{"propositions":[],"lastnames":["Weiss"],"firstnames":["Yair"],"suffixes":[]}],"year":"2018","note":"Series Title: Lecture Notes in Computer Science","keywords":"Computer Science - Computer Vision and Pattern Recognition, Computer Science - Cryptography and Security, Computer Science - Machine Learning","pages":"220–237","bibtex":"@inproceedings{ferrari_characterizing_2018,\n\taddress = {Cham},\n\ttitle = {Characterizing {Adversarial} {Examples} {Based} on {Spatial} {Consistency} {Information} for {Semantic} {Segmentation}},\n\tvolume = {11214},\n\tisbn = {978-3-030-01248-9 978-3-030-01249-6},\n\turl = {http://link.springer.com/10.1007/978-3-030-01249-6_14},\n\tdoi = {10.1007/978-3-030-01249-6_14},\n\tabstract = {Deep Neural Networks (DNNs) have been widely applied in various recognition tasks. However, recently DNNs have been shown to be vulnerable against adversarial examples, which can mislead DNNs to make arbitrary incorrect predictions. While adversarial examples are well studied in classification tasks, other learning problems may have different properties. For instance, semantic segmentation requires additional components such as dilated convolutions and multiscale processing. In this paper, we aim to characterize adversarial examples based on spatial context information in semantic segmentation. We observe that spatial consistency information can be potentially leveraged to detect adversarial examples robustly even when a strong adaptive attacker has access to the model and detection strategies. We also show that adversarial examples based on attacks considered within the paper barely transfer among models, even though transferability is common in classification. Our observations shed new light on developing adversarial attacks and defenses to better understand the vulnerabilities of DNNs.},\n\tlanguage = {en},\n\turldate = {2020-10-13},\n\tbooktitle = {Computer {Vision} – {ECCV} 2018},\n\tpublisher = {Springer International Publishing},\n\tauthor = {Xiao, Chaowei and Deng, Ruizhi and Li, Bo and Yu, Fisher and Liu, Mingyan and Song, Dawn},\n\teditor = {Ferrari, Vittorio and Hebert, Martial and Sminchisescu, Cristian and Weiss, Yair},\n\tyear = {2018},\n\tnote = {Series Title: Lecture Notes in Computer Science},\n\tkeywords = {Computer Science - Computer Vision and Pattern Recognition, Computer Science - Cryptography and Security, Computer Science - Machine Learning},\n\tpages = {220--237},\n}\n\n","author_short":["Xiao, C.","Deng, R.","Li, B.","Yu, F.","Liu, M.","Song, D."],"editor_short":["Ferrari, V.","Hebert, M.","Sminchisescu, C.","Weiss, Y."],"key":"ferrari_characterizing_2018","id":"ferrari_characterizing_2018","bibbaseid":"xiao-deng-li-yu-liu-song-characterizingadversarialexamplesbasedonspatialconsistencyinformationforsemanticsegmentation-2018","role":"author","urls":{"Paper":"http://link.springer.com/10.1007/978-3-030-01249-6_14"},"keyword":["Computer Science - Computer Vision and Pattern Recognition","Computer Science - Cryptography and Security","Computer Science - Machine Learning"],"metadata":{"authorlinks":{}},"downloads":0,"html":""},"bibtype":"inproceedings","biburl":"https://bibbase.org/zotero/bxt101","dataSources":["Wsv2bQ4jPuc7qme8R"],"keywords":["computer science - computer vision and pattern recognition","computer science - cryptography and security","computer science - machine learning"],"search_terms":["characterizing","adversarial","examples","based","spatial","consistency","information","semantic","segmentation","xiao","deng","li","yu","liu","song"],"title":"Characterizing Adversarial Examples Based on Spatial Consistency Information for Semantic Segmentation","year":2018}