MAGPIE: Multi-Task Analysis of Media-Bias Generalization with Pre-Trained Identification of Expressions. Horych, T., Wessel, M. P., Wahle, J. P., Ruas, T., Waßmuth, J., Greiner-Petter, A., Aizawa, A., Gipp, B., & Spinde, T. In Proc. Joint Int. Conf. Computational Linguistics, Language Resources and Evaluation (LREC-COLING), pages 10903–10920, Torino, Italy, May, 2024. ELRA and ICCL. Core Rank BPaper abstract bibtex Media bias detection poses a complex, multifaceted problem traditionally tackled using single-task models and small in-domain datasets, consequently lacking generalizability. To address this, we introduce MAGPIE, a large-scale multi-task pre-training approach explicitly tailored for media bias detection. To enable large-scale pre-training, we construct Large Bias Mixture (LBM), a compilation of 59 bias-related tasks. MAGPIE outperforms previous approaches in media bias detection on the Bias Annotation By Experts (BABE) dataset, with a relative improvement of 3.3% F1-score. Furthermore, using a RoBERTa encoder, we show that MAGPIE needs only 15% of fine-tuning steps compared to single-task approaches. We provide insight into task learning interference and show that sentiment analysis and emotion detection help learning of all other tasks, and scaling the number of tasks leads to the best results. MAGPIE confirms that MTL is a promising approach for addressing media bias detection, enhancing the accuracy and efficiency of existing models. Furthermore, LBM is the first available resource collection focused on media bias MTL.
@inproceedings{BibbaseHorychWWR24,
address = {Torino, Italy},
title = {{MAGPIE}: {Multi}-{Task} {Analysis} of {Media}-{Bias} {Generalization} with {Pre}-{Trained} {Identification} of {Expressions}},
shorttitle = {{MAGPIE}},
url = {https://aclanthology.org/2024.lrec-main.952},
abstract = {Media bias detection poses a complex, multifaceted problem traditionally tackled using single-task models and small in-domain datasets, consequently lacking generalizability. To address this, we introduce MAGPIE, a large-scale multi-task pre-training approach explicitly tailored for media bias detection. To enable large-scale pre-training, we construct Large Bias Mixture (LBM), a compilation of 59 bias-related tasks. MAGPIE outperforms previous approaches in media bias detection on the Bias Annotation By Experts (BABE) dataset, with a relative improvement of 3.3\% F1-score. Furthermore, using a RoBERTa encoder, we show that MAGPIE needs only 15\% of fine-tuning steps compared to single-task approaches. We provide insight into task learning interference and show that sentiment analysis and emotion detection help learning of all other tasks, and scaling the number of tasks leads to the best results. MAGPIE confirms that MTL is a promising approach for addressing media bias detection, enhancing the accuracy and efficiency of existing models. Furthermore, LBM is the first available resource collection focused on media bias MTL.},
urldate = {2024-05-23},
booktitle = {Proc. {Joint} {Int}. {Conf}. {Computational} {Linguistics}, {Language} {Resources} and {Evaluation} ({LREC}-{COLING})},
publisher = {ELRA and ICCL},
author = {Horych, Tomáš and Wessel, Martin Paul and Wahle, Jan Philip and Ruas, Terry and Waßmuth, Jerome and Greiner-Petter, André and Aizawa, Akiko and Gipp, Bela and Spinde, Timo},
month = may,
year = {2024},
note = {Core Rank B},
pages = {10903--10920},
}
Downloads: 0
{"_id":"d5JsatvGuBh6N5rRZ","bibbaseid":"horych-wessel-wahle-ruas-wamuth-greinerpetter-aizawa-gipp-etal-magpiemultitaskanalysisofmediabiasgeneralizationwithpretrainedidentificationofexpressions-2024","author_short":["Horych, T.","Wessel, M. P.","Wahle, J. P.","Ruas, T.","Waßmuth, J.","Greiner-Petter, A.","Aizawa, A.","Gipp, B.","Spinde, T."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","address":"Torino, Italy","title":"MAGPIE: Multi-Task Analysis of Media-Bias Generalization with Pre-Trained Identification of Expressions","shorttitle":"MAGPIE","url":"https://aclanthology.org/2024.lrec-main.952","abstract":"Media bias detection poses a complex, multifaceted problem traditionally tackled using single-task models and small in-domain datasets, consequently lacking generalizability. To address this, we introduce MAGPIE, a large-scale multi-task pre-training approach explicitly tailored for media bias detection. To enable large-scale pre-training, we construct Large Bias Mixture (LBM), a compilation of 59 bias-related tasks. MAGPIE outperforms previous approaches in media bias detection on the Bias Annotation By Experts (BABE) dataset, with a relative improvement of 3.3% F1-score. Furthermore, using a RoBERTa encoder, we show that MAGPIE needs only 15% of fine-tuning steps compared to single-task approaches. We provide insight into task learning interference and show that sentiment analysis and emotion detection help learning of all other tasks, and scaling the number of tasks leads to the best results. MAGPIE confirms that MTL is a promising approach for addressing media bias detection, enhancing the accuracy and efficiency of existing models. Furthermore, LBM is the first available resource collection focused on media bias MTL.","urldate":"2024-05-23","booktitle":"Proc. Joint Int. Conf. Computational Linguistics, Language Resources and Evaluation (LREC-COLING)","publisher":"ELRA and ICCL","author":[{"propositions":[],"lastnames":["Horych"],"firstnames":["Tomáš"],"suffixes":[]},{"propositions":[],"lastnames":["Wessel"],"firstnames":["Martin","Paul"],"suffixes":[]},{"propositions":[],"lastnames":["Wahle"],"firstnames":["Jan","Philip"],"suffixes":[]},{"propositions":[],"lastnames":["Ruas"],"firstnames":["Terry"],"suffixes":[]},{"propositions":[],"lastnames":["Waßmuth"],"firstnames":["Jerome"],"suffixes":[]},{"propositions":[],"lastnames":["Greiner-Petter"],"firstnames":["André"],"suffixes":[]},{"propositions":[],"lastnames":["Aizawa"],"firstnames":["Akiko"],"suffixes":[]},{"propositions":[],"lastnames":["Gipp"],"firstnames":["Bela"],"suffixes":[]},{"propositions":[],"lastnames":["Spinde"],"firstnames":["Timo"],"suffixes":[]}],"month":"May","year":"2024","note":"Core Rank B","pages":"10903–10920","bibtex":"@inproceedings{BibbaseHorychWWR24,\n\taddress = {Torino, Italy},\n\ttitle = {{MAGPIE}: {Multi}-{Task} {Analysis} of {Media}-{Bias} {Generalization} with {Pre}-{Trained} {Identification} of {Expressions}},\n\tshorttitle = {{MAGPIE}},\n\turl = {https://aclanthology.org/2024.lrec-main.952},\n\tabstract = {Media bias detection poses a complex, multifaceted problem traditionally tackled using single-task models and small in-domain datasets, consequently lacking generalizability. To address this, we introduce MAGPIE, a large-scale multi-task pre-training approach explicitly tailored for media bias detection. To enable large-scale pre-training, we construct Large Bias Mixture (LBM), a compilation of 59 bias-related tasks. MAGPIE outperforms previous approaches in media bias detection on the Bias Annotation By Experts (BABE) dataset, with a relative improvement of 3.3\\% F1-score. Furthermore, using a RoBERTa encoder, we show that MAGPIE needs only 15\\% of fine-tuning steps compared to single-task approaches. We provide insight into task learning interference and show that sentiment analysis and emotion detection help learning of all other tasks, and scaling the number of tasks leads to the best results. MAGPIE confirms that MTL is a promising approach for addressing media bias detection, enhancing the accuracy and efficiency of existing models. Furthermore, LBM is the first available resource collection focused on media bias MTL.},\n\turldate = {2024-05-23},\n\tbooktitle = {Proc. {Joint} {Int}. {Conf}. {Computational} {Linguistics}, {Language} {Resources} and {Evaluation} ({LREC}-{COLING})},\n\tpublisher = {ELRA and ICCL},\n\tauthor = {Horych, Tomáš and Wessel, Martin Paul and Wahle, Jan Philip and Ruas, Terry and Waßmuth, Jerome and Greiner-Petter, André and Aizawa, Akiko and Gipp, Bela and Spinde, Timo},\n\tmonth = may,\n\tyear = {2024},\n\tnote = {Core Rank B},\n\tpages = {10903--10920},\n}\n\n","author_short":["Horych, T.","Wessel, M. P.","Wahle, J. P.","Ruas, T.","Waßmuth, J.","Greiner-Petter, A.","Aizawa, A.","Gipp, B.","Spinde, T."],"key":"BibbaseHorychWWR24","id":"BibbaseHorychWWR24","bibbaseid":"horych-wessel-wahle-ruas-wamuth-greinerpetter-aizawa-gipp-etal-magpiemultitaskanalysisofmediabiasgeneralizationwithpretrainedidentificationofexpressions-2024","role":"author","urls":{"Paper":"https://aclanthology.org/2024.lrec-main.952"},"metadata":{"authorlinks":{}}},"bibtype":"inproceedings","biburl":"https://api.zotero.org/users/7689706/collections/IBJGRWZX/items?key=R0b523dc3oYLxTGap1H4YXgd&format=bibtex&limit=100","dataSources":["wZtCXbB8M6GYSQHMx","aJH3D6QaHCDgg2JGg","kHqqD8pzLteJJWS2X","Zp98Nuv7ftsXLefzT"],"keywords":[],"search_terms":["magpie","multi","task","analysis","media","bias","generalization","pre","trained","identification","expressions","horych","wessel","wahle","ruas","waßmuth","greiner-petter","aizawa","gipp","spinde"],"title":"MAGPIE: Multi-Task Analysis of Media-Bias Generalization with Pre-Trained Identification of Expressions","year":2024}