Curating Cyberbullying Datasets: a Human-AI Collaborative Approach. Gomez, C. E., Sztainberg, M. O., & Trana, R. E. International Journal of Bullying Prevention, 4(1):35–46, 2022. Paper doi abstract bibtex Cyberbullying is the use of digital communication tools and spaces to inflict physical, mental, or emotional distress. This serious form of aggression is frequently targeted at, but not limited to, vulnerable populations. A common problem when creating machine learning models to identify cyberbullying is the availability of accurately annotated, reliable, relevant, and diverse datasets. Datasets intended to train models for cyberbullying detection are typically annotated by human participants, which can introduce the following issues: (1) annotator bias, (2) incorrect annotation due to language and cultural barriers, and (3) the inherent subjectivity of the task can naturally create multiple valid labels for a given comment. The result can be a potentially inadequate dataset with one or more of these overlapping issues. We propose two machine learning approaches to identify and filter unambiguous comments in a cyberbullying dataset of roughly 19,000 comments collected from YouTube that was initially annotated using Amazon Mechanical Turk (AMT). Using consensus filtering methods, comments were classified as unambiguous when an agreement occurred between the AMT workers' majority label and the unanimous algorithmic filtering label. Comments identified as unambiguous were extracted and used to curate new datasets. We then used an artificial neural network to test for performance on these datasets. Compared to the original dataset, the classifier exhibits a large improvement in performance on modified versions of the dataset and can yield insight into the type of data that is consistently classified as bullying or non-bullying. This annotation approach can be expanded from cyberbullying datasets onto any classification corpus that has a similar complexity in scope.
@article{gst22,
abstract = {Cyberbullying is the use of digital communication tools and spaces to inflict physical, mental, or emotional distress. This serious form of aggression is frequently targeted at, but not limited to, vulnerable populations. A common problem when creating machine learning models to identify cyberbullying is the availability of accurately annotated, reliable, relevant, and diverse datasets. Datasets intended to train models for cyberbullying detection are typically annotated by human participants, which can introduce the following issues: (1) annotator bias, (2) incorrect annotation due to language and cultural barriers, and (3) the inherent subjectivity of the task can naturally create multiple valid labels for a given comment. The result can be a potentially inadequate dataset with one or more of these overlapping issues. We propose two machine learning approaches to identify and filter unambiguous comments in a cyberbullying dataset of roughly 19,000 comments collected from YouTube that was initially annotated using Amazon Mechanical Turk (AMT). Using consensus filtering methods, comments were classified as unambiguous when an agreement occurred between the AMT workers' majority label and the unanimous algorithmic filtering label. Comments identified as unambiguous were extracted and used to curate new datasets. We then used an artificial neural network to test for performance on these datasets. Compared to the original dataset, the classifier exhibits a large improvement in performance on modified versions of the dataset and can yield insight into the type of data that is consistently classified as bullying or non-bullying. This annotation approach can be expanded from cyberbullying datasets onto any classification corpus that has a similar complexity in scope.},
author = {Gomez, Christopher E. and Sztainberg, Marcelo O. and Trana, Rachel E.},
doi = {10.1007/s42380-021-00114-6},
issn = {25233661},
journal = {International Journal of Bullying Prevention},
keywords = {Consensus filtering,Cyberbullying,Data annotation,Machine learning,Supervised learning,YouTube},
number = {1},
pages = {35--46},
title = {{Curating Cyberbullying Datasets: a Human-AI Collaborative Approach}},
url = {https://doi.org/10.1007/s42380-021-00114-6},
volume = {4},
year = {2022}
}
Downloads: 0
{"_id":"4STzppjyKfFkrMjQf","bibbaseid":"gomez-sztainberg-trana-curatingcyberbullyingdatasetsahumanaicollaborativeapproach-2022","author_short":["Gomez, C. E.","Sztainberg, M. O.","Trana, R. E."],"bibdata":{"bibtype":"article","type":"article","abstract":"Cyberbullying is the use of digital communication tools and spaces to inflict physical, mental, or emotional distress. This serious form of aggression is frequently targeted at, but not limited to, vulnerable populations. A common problem when creating machine learning models to identify cyberbullying is the availability of accurately annotated, reliable, relevant, and diverse datasets. Datasets intended to train models for cyberbullying detection are typically annotated by human participants, which can introduce the following issues: (1) annotator bias, (2) incorrect annotation due to language and cultural barriers, and (3) the inherent subjectivity of the task can naturally create multiple valid labels for a given comment. The result can be a potentially inadequate dataset with one or more of these overlapping issues. We propose two machine learning approaches to identify and filter unambiguous comments in a cyberbullying dataset of roughly 19,000 comments collected from YouTube that was initially annotated using Amazon Mechanical Turk (AMT). Using consensus filtering methods, comments were classified as unambiguous when an agreement occurred between the AMT workers' majority label and the unanimous algorithmic filtering label. Comments identified as unambiguous were extracted and used to curate new datasets. We then used an artificial neural network to test for performance on these datasets. Compared to the original dataset, the classifier exhibits a large improvement in performance on modified versions of the dataset and can yield insight into the type of data that is consistently classified as bullying or non-bullying. This annotation approach can be expanded from cyberbullying datasets onto any classification corpus that has a similar complexity in scope.","author":[{"propositions":[],"lastnames":["Gomez"],"firstnames":["Christopher","E."],"suffixes":[]},{"propositions":[],"lastnames":["Sztainberg"],"firstnames":["Marcelo","O."],"suffixes":[]},{"propositions":[],"lastnames":["Trana"],"firstnames":["Rachel","E."],"suffixes":[]}],"doi":"10.1007/s42380-021-00114-6","issn":"25233661","journal":"International Journal of Bullying Prevention","keywords":"Consensus filtering,Cyberbullying,Data annotation,Machine learning,Supervised learning,YouTube","number":"1","pages":"35–46","title":"Curating Cyberbullying Datasets: a Human-AI Collaborative Approach","url":"https://doi.org/10.1007/s42380-021-00114-6","volume":"4","year":"2022","bibtex":"@article{gst22,\nabstract = {Cyberbullying is the use of digital communication tools and spaces to inflict physical, mental, or emotional distress. This serious form of aggression is frequently targeted at, but not limited to, vulnerable populations. A common problem when creating machine learning models to identify cyberbullying is the availability of accurately annotated, reliable, relevant, and diverse datasets. Datasets intended to train models for cyberbullying detection are typically annotated by human participants, which can introduce the following issues: (1) annotator bias, (2) incorrect annotation due to language and cultural barriers, and (3) the inherent subjectivity of the task can naturally create multiple valid labels for a given comment. The result can be a potentially inadequate dataset with one or more of these overlapping issues. We propose two machine learning approaches to identify and filter unambiguous comments in a cyberbullying dataset of roughly 19,000 comments collected from YouTube that was initially annotated using Amazon Mechanical Turk (AMT). Using consensus filtering methods, comments were classified as unambiguous when an agreement occurred between the AMT workers' majority label and the unanimous algorithmic filtering label. Comments identified as unambiguous were extracted and used to curate new datasets. We then used an artificial neural network to test for performance on these datasets. Compared to the original dataset, the classifier exhibits a large improvement in performance on modified versions of the dataset and can yield insight into the type of data that is consistently classified as bullying or non-bullying. This annotation approach can be expanded from cyberbullying datasets onto any classification corpus that has a similar complexity in scope.},\nauthor = {Gomez, Christopher E. and Sztainberg, Marcelo O. and Trana, Rachel E.},\ndoi = {10.1007/s42380-021-00114-6},\nissn = {25233661},\njournal = {International Journal of Bullying Prevention},\nkeywords = {Consensus filtering,Cyberbullying,Data annotation,Machine learning,Supervised learning,YouTube},\nnumber = {1},\npages = {35--46},\ntitle = {{Curating Cyberbullying Datasets: a Human-AI Collaborative Approach}},\nurl = {https://doi.org/10.1007/s42380-021-00114-6},\nvolume = {4},\nyear = {2022}\n}\n","author_short":["Gomez, C. E.","Sztainberg, M. O.","Trana, R. E."],"key":"gst22","id":"gst22","bibbaseid":"gomez-sztainberg-trana-curatingcyberbullyingdatasetsahumanaicollaborativeapproach-2022","role":"author","urls":{"Paper":"https://doi.org/10.1007/s42380-021-00114-6"},"keyword":["Consensus filtering","Cyberbullying","Data annotation","Machine learning","Supervised learning","YouTube"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"article","biburl":"https://leandrosweb.com/library.bib","dataSources":["vRJd4wNg9HpoZSMHD"],"keywords":["consensus filtering","cyberbullying","data annotation","machine learning","supervised learning","youtube"],"search_terms":["curating","cyberbullying","datasets","human","collaborative","approach","gomez","sztainberg","trana"],"title":"Curating Cyberbullying Datasets: a Human-AI Collaborative Approach","year":2022}