Dialect prejudice predicts AI decisions about people's character, employability, and criminality. Hofmann, V., Kalluri, P. R., Jurafsky, D., & King, S. March, 2024. arXiv:2403.00742 [cs]Paper doi abstract bibtex Hundreds of millions of people now interact with language models, with uses ranging from serving as a writing aid to informing hiring decisions. Yet these language models are known to perpetuate systematic racial prejudices, making their judgments biased in problematic ways about groups like African Americans. While prior research has focused on overt racism in language models, social scientists have argued that racism with a more subtle character has developed over time. It is unknown whether this covert racism manifests in language models. Here, we demonstrate that language models embody covert racism in the form of dialect prejudice: we extend research showing that Americans hold raciolinguistic stereotypes about speakers of African American English and find that language models have the same prejudice, exhibiting covert stereotypes that are more negative than any human stereotypes about African Americans ever experimentally recorded, although closest to the ones from before the civil rights movement. By contrast, the language models' overt stereotypes about African Americans are much more positive. We demonstrate that dialect prejudice has the potential for harmful consequences by asking language models to make hypothetical decisions about people, based only on how they speak. Language models are more likely to suggest that speakers of African American English be assigned less prestigious jobs, be convicted of crimes, and be sentenced to death. Finally, we show that existing methods for alleviating racial bias in language models such as human feedback training do not mitigate the dialect prejudice, but can exacerbate the discrepancy between covert and overt stereotypes, by teaching language models to superficially conceal the racism that they maintain on a deeper level. Our findings have far-reaching implications for the fair and safe employment of language technology.
@misc{hofmann_dialect_2024,
title = {Dialect prejudice predicts {AI} decisions about people's character, employability, and criminality},
url = {http://arxiv.org/abs/2403.00742},
doi = {10.48550/arXiv.2403.00742},
abstract = {Hundreds of millions of people now interact with language models, with uses ranging from serving as a writing aid to informing hiring decisions. Yet these language models are known to perpetuate systematic racial prejudices, making their judgments biased in problematic ways about groups like African Americans. While prior research has focused on overt racism in language models, social scientists have argued that racism with a more subtle character has developed over time. It is unknown whether this covert racism manifests in language models. Here, we demonstrate that language models embody covert racism in the form of dialect prejudice: we extend research showing that Americans hold raciolinguistic stereotypes about speakers of African American English and find that language models have the same prejudice, exhibiting covert stereotypes that are more negative than any human stereotypes about African Americans ever experimentally recorded, although closest to the ones from before the civil rights movement. By contrast, the language models' overt stereotypes about African Americans are much more positive. We demonstrate that dialect prejudice has the potential for harmful consequences by asking language models to make hypothetical decisions about people, based only on how they speak. Language models are more likely to suggest that speakers of African American English be assigned less prestigious jobs, be convicted of crimes, and be sentenced to death. Finally, we show that existing methods for alleviating racial bias in language models such as human feedback training do not mitigate the dialect prejudice, but can exacerbate the discrepancy between covert and overt stereotypes, by teaching language models to superficially conceal the racism that they maintain on a deeper level. Our findings have far-reaching implications for the fair and safe employment of language technology.},
urldate = {2024-03-10},
publisher = {arXiv},
author = {Hofmann, Valentin and Kalluri, Pratyusha Ria and Jurafsky, Dan and King, Sharese},
month = mar,
year = {2024},
note = {arXiv:2403.00742 [cs]},
}
Downloads: 0
{"_id":"QK3fDgXnSNuJwErzn","bibbaseid":"hofmann-kalluri-jurafsky-king-dialectprejudicepredictsaidecisionsaboutpeoplescharacteremployabilityandcriminality-2024","author_short":["Hofmann, V.","Kalluri, P. R.","Jurafsky, D.","King, S."],"bibdata":{"bibtype":"misc","type":"misc","title":"Dialect prejudice predicts AI decisions about people's character, employability, and criminality","url":"http://arxiv.org/abs/2403.00742","doi":"10.48550/arXiv.2403.00742","abstract":"Hundreds of millions of people now interact with language models, with uses ranging from serving as a writing aid to informing hiring decisions. Yet these language models are known to perpetuate systematic racial prejudices, making their judgments biased in problematic ways about groups like African Americans. While prior research has focused on overt racism in language models, social scientists have argued that racism with a more subtle character has developed over time. It is unknown whether this covert racism manifests in language models. Here, we demonstrate that language models embody covert racism in the form of dialect prejudice: we extend research showing that Americans hold raciolinguistic stereotypes about speakers of African American English and find that language models have the same prejudice, exhibiting covert stereotypes that are more negative than any human stereotypes about African Americans ever experimentally recorded, although closest to the ones from before the civil rights movement. By contrast, the language models' overt stereotypes about African Americans are much more positive. We demonstrate that dialect prejudice has the potential for harmful consequences by asking language models to make hypothetical decisions about people, based only on how they speak. Language models are more likely to suggest that speakers of African American English be assigned less prestigious jobs, be convicted of crimes, and be sentenced to death. Finally, we show that existing methods for alleviating racial bias in language models such as human feedback training do not mitigate the dialect prejudice, but can exacerbate the discrepancy between covert and overt stereotypes, by teaching language models to superficially conceal the racism that they maintain on a deeper level. Our findings have far-reaching implications for the fair and safe employment of language technology.","urldate":"2024-03-10","publisher":"arXiv","author":[{"propositions":[],"lastnames":["Hofmann"],"firstnames":["Valentin"],"suffixes":[]},{"propositions":[],"lastnames":["Kalluri"],"firstnames":["Pratyusha","Ria"],"suffixes":[]},{"propositions":[],"lastnames":["Jurafsky"],"firstnames":["Dan"],"suffixes":[]},{"propositions":[],"lastnames":["King"],"firstnames":["Sharese"],"suffixes":[]}],"month":"March","year":"2024","note":"arXiv:2403.00742 [cs]","bibtex":"@misc{hofmann_dialect_2024,\n\ttitle = {Dialect prejudice predicts {AI} decisions about people's character, employability, and criminality},\n\turl = {http://arxiv.org/abs/2403.00742},\n\tdoi = {10.48550/arXiv.2403.00742},\n\tabstract = {Hundreds of millions of people now interact with language models, with uses ranging from serving as a writing aid to informing hiring decisions. Yet these language models are known to perpetuate systematic racial prejudices, making their judgments biased in problematic ways about groups like African Americans. While prior research has focused on overt racism in language models, social scientists have argued that racism with a more subtle character has developed over time. It is unknown whether this covert racism manifests in language models. Here, we demonstrate that language models embody covert racism in the form of dialect prejudice: we extend research showing that Americans hold raciolinguistic stereotypes about speakers of African American English and find that language models have the same prejudice, exhibiting covert stereotypes that are more negative than any human stereotypes about African Americans ever experimentally recorded, although closest to the ones from before the civil rights movement. By contrast, the language models' overt stereotypes about African Americans are much more positive. We demonstrate that dialect prejudice has the potential for harmful consequences by asking language models to make hypothetical decisions about people, based only on how they speak. Language models are more likely to suggest that speakers of African American English be assigned less prestigious jobs, be convicted of crimes, and be sentenced to death. Finally, we show that existing methods for alleviating racial bias in language models such as human feedback training do not mitigate the dialect prejudice, but can exacerbate the discrepancy between covert and overt stereotypes, by teaching language models to superficially conceal the racism that they maintain on a deeper level. Our findings have far-reaching implications for the fair and safe employment of language technology.},\n\turldate = {2024-03-10},\n\tpublisher = {arXiv},\n\tauthor = {Hofmann, Valentin and Kalluri, Pratyusha Ria and Jurafsky, Dan and King, Sharese},\n\tmonth = mar,\n\tyear = {2024},\n\tnote = {arXiv:2403.00742 [cs]},\n}\n\n","author_short":["Hofmann, V.","Kalluri, P. R.","Jurafsky, D.","King, S."],"key":"hofmann_dialect_2024","id":"hofmann_dialect_2024","bibbaseid":"hofmann-kalluri-jurafsky-king-dialectprejudicepredictsaidecisionsaboutpeoplescharacteremployabilityandcriminality-2024","role":"author","urls":{"Paper":"http://arxiv.org/abs/2403.00742"},"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"misc","biburl":"https://bibbase.org/zotero/andreasmartin","dataSources":["jurZeGzSpYdkQ8rm4"],"keywords":[],"search_terms":["dialect","prejudice","predicts","decisions","people","character","employability","criminality","hofmann","kalluri","jurafsky","king"],"title":"Dialect prejudice predicts AI decisions about people's character, employability, and criminality","year":2024}