Visual Explanations for Convolutional Neural Networks via Latent Traversal of Generative Adversarial Networks (Student Abstract). Dravid, A. & Katsaggelos, A. K. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pages 12939–12940, jun, 2022. Paper doi abstract bibtex Lack of explainability in artificial intelligence, specifically deep neural networks, remains a bottleneck for implementing models in practice. Popular techniques such as Gradient-weighted Class Activation Mapping (Grad-CAM) provide a coarse map of salient features in an image, which rarely tells the whole story of what a convolutional neural network(CNN) learned. Using COVID-19 chest X-rays, we present a method for interpreting what a CNN has learned by utilizing Generative Adversarial Networks (GANs). Our GAN framework disentangles lung structure from COVID-19 features. Using this GAN, we can visualize the transition of a pair of COVID negative lungs in a chest radiograph to a COVID positive pair by interpolating in the latent space of the GAN, which provides fine-grained visualization of how the CNN responds to varying features within the lungs.
@inproceedings{Amil2022,
abstract = {Lack of explainability in artificial intelligence, specifically deep neural networks, remains a bottleneck for implementing models in practice. Popular techniques such as Gradient-weighted Class Activation Mapping (Grad-CAM) provide a coarse map of salient features in an image, which rarely tells the whole story of what a convolutional neural network(CNN) learned. Using COVID-19 chest X-rays, we present a method for interpreting what a CNN has learned by utilizing Generative Adversarial Networks (GANs). Our GAN framework disentangles lung structure from COVID-19 features. Using this GAN, we can visualize the transition of a pair of COVID negative lungs in a chest radiograph to a COVID positive pair by interpolating in the latent space of the GAN, which provides fine-grained visualization of how the CNN responds to varying features within the lungs.},
author = {Dravid, Amil and Katsaggelos, Aggelos K.},
booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence},
doi = {10.1609/aaai.v36i11.21606},
isbn = {1577358767},
issn = {2374-3468},
month = {jun},
number = {11},
pages = {12939--12940},
title = {{Visual Explanations for Convolutional Neural Networks via Latent Traversal of Generative Adversarial Networks (Student Abstract)}},
url = {https://ojs.aaai.org/index.php/AAAI/article/view/21606},
volume = {36},
year = {2022}
}
Downloads: 0
{"_id":"3ciLmZvrjCZ9yLte5","bibbaseid":"dravid-katsaggelos-visualexplanationsforconvolutionalneuralnetworksvialatenttraversalofgenerativeadversarialnetworksstudentabstract-2022","author_short":["Dravid, A.","Katsaggelos, A. K."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","abstract":"Lack of explainability in artificial intelligence, specifically deep neural networks, remains a bottleneck for implementing models in practice. Popular techniques such as Gradient-weighted Class Activation Mapping (Grad-CAM) provide a coarse map of salient features in an image, which rarely tells the whole story of what a convolutional neural network(CNN) learned. Using COVID-19 chest X-rays, we present a method for interpreting what a CNN has learned by utilizing Generative Adversarial Networks (GANs). Our GAN framework disentangles lung structure from COVID-19 features. Using this GAN, we can visualize the transition of a pair of COVID negative lungs in a chest radiograph to a COVID positive pair by interpolating in the latent space of the GAN, which provides fine-grained visualization of how the CNN responds to varying features within the lungs.","author":[{"propositions":[],"lastnames":["Dravid"],"firstnames":["Amil"],"suffixes":[]},{"propositions":[],"lastnames":["Katsaggelos"],"firstnames":["Aggelos","K."],"suffixes":[]}],"booktitle":"Proceedings of the AAAI Conference on Artificial Intelligence","doi":"10.1609/aaai.v36i11.21606","isbn":"1577358767","issn":"2374-3468","month":"jun","number":"11","pages":"12939–12940","title":"Visual Explanations for Convolutional Neural Networks via Latent Traversal of Generative Adversarial Networks (Student Abstract)","url":"https://ojs.aaai.org/index.php/AAAI/article/view/21606","volume":"36","year":"2022","bibtex":"@inproceedings{Amil2022,\nabstract = {Lack of explainability in artificial intelligence, specifically deep neural networks, remains a bottleneck for implementing models in practice. Popular techniques such as Gradient-weighted Class Activation Mapping (Grad-CAM) provide a coarse map of salient features in an image, which rarely tells the whole story of what a convolutional neural network(CNN) learned. Using COVID-19 chest X-rays, we present a method for interpreting what a CNN has learned by utilizing Generative Adversarial Networks (GANs). Our GAN framework disentangles lung structure from COVID-19 features. Using this GAN, we can visualize the transition of a pair of COVID negative lungs in a chest radiograph to a COVID positive pair by interpolating in the latent space of the GAN, which provides fine-grained visualization of how the CNN responds to varying features within the lungs.},\nauthor = {Dravid, Amil and Katsaggelos, Aggelos K.},\nbooktitle = {Proceedings of the AAAI Conference on Artificial Intelligence},\ndoi = {10.1609/aaai.v36i11.21606},\nisbn = {1577358767},\nissn = {2374-3468},\nmonth = {jun},\nnumber = {11},\npages = {12939--12940},\ntitle = {{Visual Explanations for Convolutional Neural Networks via Latent Traversal of Generative Adversarial Networks (Student Abstract)}},\nurl = {https://ojs.aaai.org/index.php/AAAI/article/view/21606},\nvolume = {36},\nyear = {2022}\n}\n","author_short":["Dravid, A.","Katsaggelos, A. K."],"key":"Amil2022","id":"Amil2022","bibbaseid":"dravid-katsaggelos-visualexplanationsforconvolutionalneuralnetworksvialatenttraversalofgenerativeadversarialnetworksstudentabstract-2022","role":"author","urls":{"Paper":"https://ojs.aaai.org/index.php/AAAI/article/view/21606"},"metadata":{"authorlinks":{}}},"bibtype":"inproceedings","biburl":"https://sites.northwestern.edu/ivpl/files/2023/06/IVPL_Updated_publications-1.bib","dataSources":["KTWAakbPXLGfYseXn","ePKPjG8C6yvpk4mEK","ya2CyA73rpZseyrZ8","zFPgsTDAW8aDnb5iN","E6Bth2QB5BYjBMZE7","nbnEjsN7MJhurAK9x","PNQZj6FjzoxxJk4Yi","7FpDWDGJ4KgpDiGfB","bod9ms4MQJHuJgPpp","QR9t5P2cLdJuzhfzK","D8k2SxfC5dKNRFgro","7Dwzbxq93HWrJEhT6","qhF8zxmGcJfvtdeAg","fvDEHD49E2ZRwE3fb","H7crv8NWhZup4d4by","DHqokWsryttGh7pJE","vRJd4wNg9HpoZSMHD","sYxQ6pxFgA59JRhxi","w2WahSbYrbcCKBDsC","XasdXLL99y5rygCmq","3gkSihZQRfAD2KBo3","t5XMbyZbtPBo4wBGS","bEpHM2CtrwW2qE8FP","teJzFLHexaz5AQW5z"],"keywords":[],"search_terms":["visual","explanations","convolutional","neural","networks","via","latent","traversal","generative","adversarial","networks","student","abstract","dravid","katsaggelos"],"title":"Visual Explanations for Convolutional Neural Networks via Latent Traversal of Generative Adversarial Networks (Student Abstract)","year":2022}