Learning loss for active learning. Yoo, D. & Kweon, I. S. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June, 2019. Conference Name: 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) ISBN: 9781728132938 Place: Long Beach, CA, USA Publisher: IEEE Read_Status: New Read_Status_Date: 2025-04-17T15:17:33.844Z Pinned_Collections: KK47UFN3
Paper doi abstract bibtex The performance of deep neural networks improves with more annotated data. The problem is that the budget for annotation is limited. One solution to this is active learning, where a model asks human to annotate data that it perceived as uncertain. A variety of recent methods have been proposed to apply active learning to deep networks but most of them are either designed specific for their target tasks or computationally inefficient for large networks. In this paper, we propose a novel active learning method that is simple but task-agnostic, and works efficiently with the deep networks. We attach a small parametric module, named ``loss prediction module,'' to a target network, and learn it to predict target losses of unlabeled inputs. Then, this module can suggest data that the target model is likely to produce a wrong prediction. This method is task-agnostic as networks are learned from a single loss regardless of target tasks. We rigorously validate our method through image classification, object detection, and human pose estimation, with the recent network architectures. The results demonstrate that our method consistently outperforms the previous methods over the tasks.
@article{yoo_learning_2019,
title = {Learning loss for active learning},
copyright = {https://doi.org/10.15223/policy-029},
url = {https://ieeexplore.ieee.org/document/8954021/},
doi = {10.1109/CVPR.2019.00018},
abstract = {The performance of deep neural networks improves with more annotated data. The problem is that the budget for annotation is limited. One solution to this is active learning, where a model asks human to annotate data that it perceived as uncertain. A variety of recent methods have been proposed to apply active learning to deep networks but most of them are either designed specific for their target tasks or computationally inefficient for large networks. In this paper, we propose a novel active learning method that is simple but task-agnostic, and works efficiently with the deep networks. We attach a small parametric module, named ``loss prediction module,'' to a target network, and learn it to predict target losses of unlabeled inputs. Then, this module can suggest data that the target model is likely to produce a wrong prediction. This method is task-agnostic as networks are learned from a single loss regardless of target tasks. We rigorously validate our method through image classification, object detection, and human pose estimation, with the recent network architectures. The results demonstrate that our method consistently outperforms the previous methods over the tasks.},
language = {en},
urldate = {2025-04-17},
journal = {2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
author = {Yoo, Donggeun and Kweon, In So},
month = jun,
year = {2019},
note = {Conference Name: 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)
ISBN: 9781728132938
Place: Long Beach, CA, USA
Publisher: IEEE
Read\_Status: New
Read\_Status\_Date: 2025-04-17T15:17:33.844Z
Pinned\_Collections: KK47UFN3},
pages = {93--102},
}
Downloads: 0
{"_id":"JK5D2uou3MH4N3eK7","bibbaseid":"yoo-kweon-learninglossforactivelearning-2019","author_short":["Yoo, D.","Kweon, I. S."],"bibdata":{"bibtype":"article","type":"article","title":"Learning loss for active learning","copyright":"https://doi.org/10.15223/policy-029","url":"https://ieeexplore.ieee.org/document/8954021/","doi":"10.1109/CVPR.2019.00018","abstract":"The performance of deep neural networks improves with more annotated data. The problem is that the budget for annotation is limited. One solution to this is active learning, where a model asks human to annotate data that it perceived as uncertain. A variety of recent methods have been proposed to apply active learning to deep networks but most of them are either designed specific for their target tasks or computationally inefficient for large networks. In this paper, we propose a novel active learning method that is simple but task-agnostic, and works efficiently with the deep networks. We attach a small parametric module, named ``loss prediction module,'' to a target network, and learn it to predict target losses of unlabeled inputs. Then, this module can suggest data that the target model is likely to produce a wrong prediction. This method is task-agnostic as networks are learned from a single loss regardless of target tasks. We rigorously validate our method through image classification, object detection, and human pose estimation, with the recent network architectures. The results demonstrate that our method consistently outperforms the previous methods over the tasks.","language":"en","urldate":"2025-04-17","journal":"2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","author":[{"propositions":[],"lastnames":["Yoo"],"firstnames":["Donggeun"],"suffixes":[]},{"propositions":[],"lastnames":["Kweon"],"firstnames":["In","So"],"suffixes":[]}],"month":"June","year":"2019","note":"Conference Name: 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) ISBN: 9781728132938 Place: Long Beach, CA, USA Publisher: IEEE Read_Status: New Read_Status_Date: 2025-04-17T15:17:33.844Z Pinned_Collections: KK47UFN3","pages":"93–102","bibtex":"@article{yoo_learning_2019,\n\ttitle = {Learning loss for active learning},\n\tcopyright = {https://doi.org/10.15223/policy-029},\n\turl = {https://ieeexplore.ieee.org/document/8954021/},\n\tdoi = {10.1109/CVPR.2019.00018},\n\tabstract = {The performance of deep neural networks improves with more annotated data. The problem is that the budget for annotation is limited. One solution to this is active learning, where a model asks human to annotate data that it perceived as uncertain. A variety of recent methods have been proposed to apply active learning to deep networks but most of them are either designed specific for their target tasks or computationally inefficient for large networks. In this paper, we propose a novel active learning method that is simple but task-agnostic, and works efficiently with the deep networks. We attach a small parametric module, named ``loss prediction module,'' to a target network, and learn it to predict target losses of unlabeled inputs. Then, this module can suggest data that the target model is likely to produce a wrong prediction. This method is task-agnostic as networks are learned from a single loss regardless of target tasks. We rigorously validate our method through image classification, object detection, and human pose estimation, with the recent network architectures. The results demonstrate that our method consistently outperforms the previous methods over the tasks.},\n\tlanguage = {en},\n\turldate = {2025-04-17},\n\tjournal = {2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},\n\tauthor = {Yoo, Donggeun and Kweon, In So},\n\tmonth = jun,\n\tyear = {2019},\n\tnote = {Conference Name: 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)\nISBN: 9781728132938\nPlace: Long Beach, CA, USA\nPublisher: IEEE\nRead\\_Status: New\nRead\\_Status\\_Date: 2025-04-17T15:17:33.844Z\nPinned\\_Collections: KK47UFN3},\n\tpages = {93--102},\n}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","author_short":["Yoo, D.","Kweon, I. S."],"key":"yoo_learning_2019","id":"yoo_learning_2019","bibbaseid":"yoo-kweon-learninglossforactivelearning-2019","role":"author","urls":{"Paper":"https://ieeexplore.ieee.org/document/8954021/"},"metadata":{"authorlinks":{}},"html":""},"bibtype":"article","biburl":"https://bibbase.org/zotero/fsimonetta","dataSources":["nZHrFJKyxKKDaWYM8","pzyFFGWvxG2bs63zP"],"keywords":[],"search_terms":["learning","loss","active","learning","yoo","kweon"],"title":"Learning loss for active learning","year":2019}