MAGPIE: Multi-Task Media-Bias Analysis Generalization for Pre-Trained Identification of Expressions. Horych, T., Wessel, M., Wahle, J. P., Ruas, T., Waßmuth, J., Greiner-Petter, A., Aizawa, A., Gipp, B., & Spinde, T. March, 2024. arXiv:2403.07910 [cs]Paper abstract bibtex Media bias detection poses a complex, multifaceted problem traditionally tackled using single-task models and small in-domain datasets, consequently lacking generalizability. To address this, we introduce MAGPIE, the first large-scale multi-task pre-training approach explicitly tailored for media bias detection. To enable pre-training at scale, we present Large Bias Mixture (LBM), a compilation of 59 bias-related tasks. MAGPIE outperforms previous approaches in media bias detection on the Bias Annotation By Experts (BABE) dataset, with a relative improvement of 3.3% F1-score. MAGPIE also performs better than previous models on 5 out of 8 tasks in the Media Bias Identification Benchmark (MBIB). Using a RoBERTa encoder, MAGPIE needs only 15% of finetuning steps compared to single-task approaches. Our evaluation shows, for instance, that tasks like sentiment and emotionality boost all learning, all tasks enhance fake news detection, and scaling tasks leads to the best results. MAGPIE confirms that MTL is a promising approach for addressing media bias detection, enhancing the accuracy and efficiency of existing models. Furthermore, LBM is the first available resource collection focused on media bias MTL.
@misc{horych_magpie_2024-1,
title = {{MAGPIE}: {Multi}-{Task} {Media}-{Bias} {Analysis} {Generalization} for {Pre}-{Trained} {Identification} of {Expressions}},
shorttitle = {{MAGPIE}},
url = {http://arxiv.org/abs/2403.07910},
abstract = {Media bias detection poses a complex, multifaceted problem traditionally tackled using single-task models and small in-domain datasets, consequently lacking generalizability. To address this, we introduce MAGPIE, the first large-scale multi-task pre-training approach explicitly tailored for media bias detection. To enable pre-training at scale, we present Large Bias Mixture (LBM), a compilation of 59 bias-related tasks. MAGPIE outperforms previous approaches in media bias detection on the Bias Annotation By Experts (BABE) dataset, with a relative improvement of 3.3\% F1-score. MAGPIE also performs better than previous models on 5 out of 8 tasks in the Media Bias Identification Benchmark (MBIB). Using a RoBERTa encoder, MAGPIE needs only 15\% of finetuning steps compared to single-task approaches. Our evaluation shows, for instance, that tasks like sentiment and emotionality boost all learning, all tasks enhance fake news detection, and scaling tasks leads to the best results. MAGPIE confirms that MTL is a promising approach for addressing media bias detection, enhancing the accuracy and efficiency of existing models. Furthermore, LBM is the first available resource collection focused on media bias MTL.},
urldate = {2024-03-25},
publisher = {arXiv},
author = {Horych, Tomáš and Wessel, Martin and Wahle, Jan Philip and Ruas, Terry and Waßmuth, Jerome and Greiner-Petter, André and Aizawa, Akiko and Gipp, Bela and Spinde, Timo},
month = mar,
year = {2024},
note = {arXiv:2403.07910 [cs]},
keywords = {!tr, !tr\_author, Computer Science - Computation and Language, Computer Science - Computers and Society, nlp\_media\_bias},
}
Downloads: 0
{"_id":"jpkZXfXKHJ7Ya6BRR","bibbaseid":"horych-wessel-wahle-ruas-wamuth-greinerpetter-aizawa-gipp-etal-magpiemultitaskmediabiasanalysisgeneralizationforpretrainedidentificationofexpressions-2024","author_short":["Horych, T.","Wessel, M.","Wahle, J. P.","Ruas, T.","Waßmuth, J.","Greiner-Petter, A.","Aizawa, A.","Gipp, B.","Spinde, T."],"bibdata":{"bibtype":"misc","type":"misc","title":"MAGPIE: Multi-Task Media-Bias Analysis Generalization for Pre-Trained Identification of Expressions","shorttitle":"MAGPIE","url":"http://arxiv.org/abs/2403.07910","abstract":"Media bias detection poses a complex, multifaceted problem traditionally tackled using single-task models and small in-domain datasets, consequently lacking generalizability. To address this, we introduce MAGPIE, the first large-scale multi-task pre-training approach explicitly tailored for media bias detection. To enable pre-training at scale, we present Large Bias Mixture (LBM), a compilation of 59 bias-related tasks. MAGPIE outperforms previous approaches in media bias detection on the Bias Annotation By Experts (BABE) dataset, with a relative improvement of 3.3% F1-score. MAGPIE also performs better than previous models on 5 out of 8 tasks in the Media Bias Identification Benchmark (MBIB). Using a RoBERTa encoder, MAGPIE needs only 15% of finetuning steps compared to single-task approaches. Our evaluation shows, for instance, that tasks like sentiment and emotionality boost all learning, all tasks enhance fake news detection, and scaling tasks leads to the best results. MAGPIE confirms that MTL is a promising approach for addressing media bias detection, enhancing the accuracy and efficiency of existing models. Furthermore, LBM is the first available resource collection focused on media bias MTL.","urldate":"2024-03-25","publisher":"arXiv","author":[{"propositions":[],"lastnames":["Horych"],"firstnames":["Tomáš"],"suffixes":[]},{"propositions":[],"lastnames":["Wessel"],"firstnames":["Martin"],"suffixes":[]},{"propositions":[],"lastnames":["Wahle"],"firstnames":["Jan","Philip"],"suffixes":[]},{"propositions":[],"lastnames":["Ruas"],"firstnames":["Terry"],"suffixes":[]},{"propositions":[],"lastnames":["Waßmuth"],"firstnames":["Jerome"],"suffixes":[]},{"propositions":[],"lastnames":["Greiner-Petter"],"firstnames":["André"],"suffixes":[]},{"propositions":[],"lastnames":["Aizawa"],"firstnames":["Akiko"],"suffixes":[]},{"propositions":[],"lastnames":["Gipp"],"firstnames":["Bela"],"suffixes":[]},{"propositions":[],"lastnames":["Spinde"],"firstnames":["Timo"],"suffixes":[]}],"month":"March","year":"2024","note":"arXiv:2403.07910 [cs]","keywords":"!tr, !tr_author, Computer Science - Computation and Language, Computer Science - Computers and Society, nlp_media_bias","bibtex":"@misc{horych_magpie_2024-1,\n\ttitle = {{MAGPIE}: {Multi}-{Task} {Media}-{Bias} {Analysis} {Generalization} for {Pre}-{Trained} {Identification} of {Expressions}},\n\tshorttitle = {{MAGPIE}},\n\turl = {http://arxiv.org/abs/2403.07910},\n\tabstract = {Media bias detection poses a complex, multifaceted problem traditionally tackled using single-task models and small in-domain datasets, consequently lacking generalizability. To address this, we introduce MAGPIE, the first large-scale multi-task pre-training approach explicitly tailored for media bias detection. To enable pre-training at scale, we present Large Bias Mixture (LBM), a compilation of 59 bias-related tasks. MAGPIE outperforms previous approaches in media bias detection on the Bias Annotation By Experts (BABE) dataset, with a relative improvement of 3.3\\% F1-score. MAGPIE also performs better than previous models on 5 out of 8 tasks in the Media Bias Identification Benchmark (MBIB). Using a RoBERTa encoder, MAGPIE needs only 15\\% of finetuning steps compared to single-task approaches. Our evaluation shows, for instance, that tasks like sentiment and emotionality boost all learning, all tasks enhance fake news detection, and scaling tasks leads to the best results. MAGPIE confirms that MTL is a promising approach for addressing media bias detection, enhancing the accuracy and efficiency of existing models. Furthermore, LBM is the first available resource collection focused on media bias MTL.},\n\turldate = {2024-03-25},\n\tpublisher = {arXiv},\n\tauthor = {Horych, Tomáš and Wessel, Martin and Wahle, Jan Philip and Ruas, Terry and Waßmuth, Jerome and Greiner-Petter, André and Aizawa, Akiko and Gipp, Bela and Spinde, Timo},\n\tmonth = mar,\n\tyear = {2024},\n\tnote = {arXiv:2403.07910 [cs]},\n\tkeywords = {!tr, !tr\\_author, Computer Science - Computation and Language, Computer Science - Computers and Society, nlp\\_media\\_bias},\n}\n\n","author_short":["Horych, T.","Wessel, M.","Wahle, J. P.","Ruas, T.","Waßmuth, J.","Greiner-Petter, A.","Aizawa, A.","Gipp, B.","Spinde, T."],"key":"horych_magpie_2024-1","id":"horych_magpie_2024-1","bibbaseid":"horych-wessel-wahle-ruas-wamuth-greinerpetter-aizawa-gipp-etal-magpiemultitaskmediabiasanalysisgeneralizationforpretrainedidentificationofexpressions-2024","role":"author","urls":{"Paper":"http://arxiv.org/abs/2403.07910"},"keyword":["!tr","!tr_author","Computer Science - Computation and Language","Computer Science - Computers and Society","nlp_media_bias"],"metadata":{"authorlinks":{}}},"bibtype":"misc","biburl":"https://api.zotero.org/groups/2503580/items?key=9bMVo5bWhsSJ7a6YWgBxjXpk&format=bibtex&limit=100","dataSources":["aJH3D6QaHCDgg2JGg","Zp98Nuv7ftsXLefzT","kHqqD8pzLteJJWS2X"],"keywords":["!tr","!tr_author","computer science - computation and language","computer science - computers and society","nlp_media_bias"],"search_terms":["magpie","multi","task","media","bias","analysis","generalization","pre","trained","identification","expressions","horych","wessel","wahle","ruas","waßmuth","greiner-petter","aizawa","gipp","spinde"],"title":"MAGPIE: Multi-Task Media-Bias Analysis Generalization for Pre-Trained Identification of Expressions","year":2024}