\n \n \n
\n
\n\n \n \n \n \n \n \n Margaret Mitchell on Ethical AI \\textbar Senate Subcommittee on Privacy, Technology, and the Law.\n \n \n \n \n\n\n \n Algorithmic Justice League\n\n\n \n\n\n\n October 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{algorithmic_justice_league_margaret_2024,\n\ttitle = {Margaret {Mitchell} on {Ethical} {AI} {\\textbar} {Senate} {Subcommittee} on {Privacy}, {Technology}, and the {Law}},\n\turl = {https://www.youtube.com/watch?v=rU_M3GDrjUg},\n\tabstract = {🎥 Watch AJL Research Collaborator Margaret Mitchell's testimony on ethical AI before the Senate Subcommittee on Privacy, Technology, and the Law. As a leading AI ethics researcher, she delivered one of the most penetrating analyses of AI challenges today, along with concrete paths forward.},\n\turldate = {2025-06-30},\n\tauthor = {{Algorithmic Justice League}},\n\tmonth = oct,\n\tyear = {2024},\n}\n\n\n\n
\n
\n\n\n
\n 🎥 Watch AJL Research Collaborator Margaret Mitchell's testimony on ethical AI before the Senate Subcommittee on Privacy, Technology, and the Law. As a leading AI ethics researcher, she delivered one of the most penetrating analyses of AI challenges today, along with concrete paths forward.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n The Race To Regulate AI.\n \n \n \n \n\n\n \n CNBC\n\n\n \n\n\n\n April 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{cnbc_race_2024,\n\ttitle = {The {Race} {To} {Regulate} {AI}},\n\turl = {https://www.youtube.com/watch?v=5aOkIauvsow},\n\tabstract = {Some businesses using new artificial intelligence tools have reported big gains in labor productivity. These AI assistants, backed by some of the biggest names in tech, could someday change how work gets done in the U.S. As the technology shuffles up white-collar work in the U.S., some policymakers are pitching ideas like 32-hour work weeks and robot taxes. Meanwhile, other countries are banning high-risk uses of AI in sectors like education and},\n\turldate = {2025-06-30},\n\tauthor = {{CNBC}},\n\tmonth = apr,\n\tyear = {2024},\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Some businesses using new artificial intelligence tools have reported big gains in labor productivity. These AI assistants, backed by some of the biggest names in tech, could someday change how work gets done in the U.S. As the technology shuffles up white-collar work in the U.S., some policymakers are pitching ideas like 32-hour work weeks and robot taxes. Meanwhile, other countries are banning high-risk uses of AI in sectors like education and\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Are You Giving Up Privacy for Convenience? Dr. Joy Buolamwini on Facial Recognition at GHC 2023.\n \n \n \n \n\n\n \n Algorithmic Justice League\n\n\n \n\n\n\n November 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{algorithmic_justice_league_are_2024,\n\ttitle = {Are {You} {Giving} {Up} {Privacy} for {Convenience}? {Dr}. {Joy} {Buolamwini} on {Facial} {Recognition} at {GHC} 2023},\n\tshorttitle = {Are {You} {Giving} {Up} {Privacy} for {Convenience}?},\n\turl = {https://www.youtube.com/watch?v=B5fjEQMD4fg},\n\tabstract = {Are you flying home for the most traveled weekend of the year? Listen to this conversation between Dr. Joy Buolamwini, Artist-in-Chief and Founder of the Algorithmic Justice League, and Brenda Darden Wilkerson, CEO of AnitaB.org, at the Grace Hopper Celebration 2023. Dr. Joy explains how "convenience shackles," like facial recognition technology in airports, influence us to give up our face data. The expansion of FRTs has increasingl},\n\turldate = {2025-06-30},\n\tauthor = {{Algorithmic Justice League}},\n\tmonth = nov,\n\tyear = {2024},\n}\n\n\n\n
\n
\n\n\n
\n Are you flying home for the most traveled weekend of the year? Listen to this conversation between Dr. Joy Buolamwini, Artist-in-Chief and Founder of the Algorithmic Justice League, and Brenda Darden Wilkerson, CEO of AnitaB.org, at the Grace Hopper Celebration 2023. Dr. Joy explains how \"convenience shackles,\" like facial recognition technology in airports, influence us to give up our face data. The expansion of FRTs has increasingl\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Training AI takes heavy toll on Kenyans working for $2 an hour {\\textbar} 60 {Minutes}.\n \n \n \n \n\n\n \n 60 Minutes\n\n\n \n\n\n\n November 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{60_minutes_training_2024,\n\ttitle = {Training {AI} takes heavy toll on {Kenyans} working for \\$2 an hour {\\textbar} 60 {Minutes}},\n\turl = {https://www.youtube.com/watch?v=qZS50KXjAX0},\n\tabstract = {Digital workers in Kenya had to sift through horrific online content to train AI, but say they were underpaid, overworked, and got inadequate mental health support. So they’re fighting back.},\n\turldate = {2025-06-30},\n\tauthor = {{60 Minutes}},\n\tmonth = nov,\n\tyear = {2024},\n}\n\n\n\n
\n
\n\n\n
\n Digital workers in Kenya had to sift through horrific online content to train AI, but say they were underpaid, overworked, and got inadequate mental health support. So they’re fighting back.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n 'Overlooked' data workers who train AI speak out about harsh conditions.\n \n \n \n \n\n\n \n ABC News\n\n\n \n\n\n\n May 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{abc_news_overlooked_2024,\n\ttitle = {'{Overlooked}' data workers who train {AI} speak out about harsh conditions},\n\turl = {https://www.youtube.com/watch?v=IAxd1aC2XK4},\n\tabstract = {AI has reshaped everything from medical diagnoses, to wedding vows, to stock market gains, but the technology wouldn’t be possible without gig workers across the globe.},\n\turldate = {2025-06-30},\n\tauthor = {{ABC News}},\n\tmonth = may,\n\tyear = {2024},\n}\n\n\n\n
\n
\n\n\n
\n AI has reshaped everything from medical diagnoses, to wedding vows, to stock market gains, but the technology wouldn’t be possible without gig workers across the globe.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Can clean energy handle the AI boom?.\n \n \n \n \n\n\n \n Vox\n\n\n \n\n\n\n October 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{vox_can_2024,\n\ttitle = {Can clean energy handle the {AI} boom?},\n\turl = {https://www.youtube.com/watch?v=YGfJeH5HRDQ},\n\tabstract = {How our digital lives are impacting our climate goals. \n\nThis video is presented by Klaviyo. Klaviyo has no editorial influence on our work, but their support makes videos like these possible.},\n\turldate = {2025-06-30},\n\tauthor = {{Vox}},\n\tmonth = oct,\n\tyear = {2024},\n}\n\n\n\n
\n
\n\n\n
\n How our digital lives are impacting our climate goals. This video is presented by Klaviyo. Klaviyo has no editorial influence on our work, but their support makes videos like these possible.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n AI and the energy required to power it fuel new climate concerns.\n \n \n \n \n\n\n \n PBS NewsHour\n\n\n \n\n\n\n July 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{pbs_newshour_ai_2024,\n\ttitle = {{AI} and the energy required to power it fuel new climate concerns},\n\turl = {https://www.youtube.com/watch?v=VOezW-b_mD8},\n\tabstract = {Google announced this week it is well behind on a pledge to all but eliminate its net carbon emissions by 2030. The company’s greenhouse gas outflow has increased in recent years mainly due to artificial intelligence and the energy required to power it. The AI arms race has experts worried about its climate consequences for energy and water. Economics correspondent Paul Solman reports.},\n\turldate = {2025-06-30},\n\tauthor = {{PBS NewsHour}},\n\tmonth = jul,\n\tyear = {2024},\n}\n\n\n\n
\n
\n\n\n
\n Google announced this week it is well behind on a pledge to all but eliminate its net carbon emissions by 2030. The company’s greenhouse gas outflow has increased in recent years mainly due to artificial intelligence and the energy required to power it. The AI arms race has experts worried about its climate consequences for energy and water. Economics correspondent Paul Solman reports.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n AI Ethics \\textbar Ethics Defined.\n \n \n \n \n\n\n \n McCombs School of Business\n\n\n \n\n\n\n September 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{mccombs_school_of_business_ai_2024,\n\ttitle = {{AI} {Ethics} {\\textbar} {Ethics} {Defined}},\n\turl = {https://www.youtube.com/watch?v=6yDr7CWLJ8c},\n\tabstract = {AI ethics focuses on ensuring that AI is developed and deployed responsibly, promoting fairness, transparency, accountability, and societal well-being while minimizing harm.},\n\turldate = {2025-06-30},\n\tauthor = {{McCombs School of Business}},\n\tmonth = sep,\n\tyear = {2024},\n}\n\n\n\n
\n
\n\n\n
\n AI ethics focuses on ensuring that AI is developed and deployed responsibly, promoting fairness, transparency, accountability, and societal well-being while minimizing harm.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Can ChatGPT Be Addictive? A Call to Examine the Shift from Support to Dependence in AI Conversational Large Language Models.\n \n \n \n \n\n\n \n Yankouskaya, A.; Liebherr, M.; and Ali, R.\n\n\n \n\n\n\n September 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@misc{yankouskaya_can_2024,\n\taddress = {Rochester, NY},\n\ttype = {{SSRN} {Scholarly} {Paper}},\n\ttitle = {Can {ChatGPT} {Be} {Addictive}? {A} {Call} to {Examine} the {Shift} from {Support} to {Dependence} in {AI} {Conversational} {Large} {Language} {Models}},\n\tshorttitle = {{\\textless}span{\\textgreater}{Can} {ChatGPT} {Be} {Addictive}?},\n\turl = {https://papers.ssrn.com/abstract=4972612},\n\tdoi = {10.2139/ssrn.4972612},\n\tabstract = {[PLEASE note the old title of this article was "ChatGPT Addiction: From Support to Dependence in AI Large Language Models"] The rapid rise of ChatGPT has introduced a transformative tool that enhances productivity, communication, and task automation across industries. However, concerns are emerging regarding the addictive potential of AI large language models (LLMs). This paper explores how ChatGPT fosters dependency through key features such as personalised responses, emotional validation, and continuous engagement. By offering instant gratification and adaptive dialogue, ChatGPT may blur the line between AI and human interaction, creating pseudosocial bonds that can replace genuine human relationships. Additionally, its ability to streamline decision-making and boost productivity may lead to over-reliance, reducing users' critical thinking skills and contributing to compulsive usage patterns. These behavioural tendencies align with known features of addiction, such as increased tolerance and conflict with daily life priorities. This viewpoint paper highlights the need for further research into the psychological and social impacts of prolonged interaction with AI tools like ChatGPT.},\n\tlanguage = {en},\n\turldate = {2025-02-13},\n\tpublisher = {Social Science Research Network},\n\tauthor = {Yankouskaya, Ala and Liebherr, Magnus and Ali, Raian},\n\tmonth = sep,\n\tyear = {2024},\n\tkeywords = {{\\textless}span{\\textgreater}Can ChatGPT Be Addictive? A Call to Examine the Shift from Support to Dependence in AI Conversational Large Language Models{\\textless}/span{\\textgreater}, Ala Yankouskaya, Magnus Liebherr, Raian Ali, SSRN},\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n
\n [PLEASE note the old title of this article was \"ChatGPT Addiction: From Support to Dependence in AI Large Language Models\"] The rapid rise of ChatGPT has introduced a transformative tool that enhances productivity, communication, and task automation across industries. However, concerns are emerging regarding the addictive potential of AI large language models (LLMs). This paper explores how ChatGPT fosters dependency through key features such as personalised responses, emotional validation, and continuous engagement. By offering instant gratification and adaptive dialogue, ChatGPT may blur the line between AI and human interaction, creating pseudosocial bonds that can replace genuine human relationships. Additionally, its ability to streamline decision-making and boost productivity may lead to over-reliance, reducing users' critical thinking skills and contributing to compulsive usage patterns. These behavioural tendencies align with known features of addiction, such as increased tolerance and conflict with daily life priorities. This viewpoint paper highlights the need for further research into the psychological and social impacts of prolonged interaction with AI tools like ChatGPT.\n
\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n 23. Cake-Making Analogy for Setting Generative AI Guidelines/Ethics.\n \n \n \n \n\n\n \n Bali, M.\n\n\n \n\n\n\n
Teaching and Generative AI: Pedagogical Possibilities and Productive Tensions,237–245. January 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{bali_23_2024,\n\ttitle = {23. {Cake}-{Making} {Analogy} for {Setting} {Generative} {AI} {Guidelines}/{Ethics}},\n\turl = {https://digitalcommons.usu.edu/teachingai/19},\n\tdoi = {https://doi.org/10.26079/e204-acc5},\n\tjournal = {Teaching and Generative AI: Pedagogical Possibilities and Productive Tensions},\n\tauthor = {Bali, Maha},\n\tmonth = jan,\n\tyear = {2024},\n\tpages = {237--245},\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Belief, Inference, and the Self-Conscious Mind, by Eric Marcus.\n \n \n \n \n\n\n \n Singh, K.\n\n\n \n\n\n\n
Mind, 133(532): 1145–1151. October 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{singh_belief_2024,\n\ttitle = {Belief, {Inference}, and the {Self}-{Conscious} {Mind}, by {Eric} {Marcus}},\n\tvolume = {133},\n\tissn = {0026-4423},\n\turl = {https://doi.org/10.1093/mind/fzac061},\n\tdoi = {10.1093/mind/fzac061},\n\tabstract = {Eric Marcus’ Belief, Inference, and the Self-Conscious Mind is an ambitious book: it attempts to do a lot in just 152 pages. And, impressively, it largely succeeds. Over the course of the book, with remarkable skill and efficiency, Marcus defends novel accounts of belief, inference, and the unity of the rational mind. Along the way, he makes strong cases for a variety of important theses about belief and inference, such as that we necessarily have non-inferential self-knowledge of what we believe, and that the standardly accepted distinction between dispositional and occurrent belief is spurious.},\n\tnumber = {532},\n\turldate = {2025-02-24},\n\tjournal = {Mind},\n\tauthor = {Singh, Keshav},\n\tmonth = oct,\n\tyear = {2024},\n\tpages = {1145--1151},\n}\n\n\n\n
\n
\n\n\n
\n Eric Marcus’ Belief, Inference, and the Self-Conscious Mind is an ambitious book: it attempts to do a lot in just 152 pages. And, impressively, it largely succeeds. Over the course of the book, with remarkable skill and efficiency, Marcus defends novel accounts of belief, inference, and the unity of the rational mind. Along the way, he makes strong cases for a variety of important theses about belief and inference, such as that we necessarily have non-inferential self-knowledge of what we believe, and that the standardly accepted distinction between dispositional and occurrent belief is spurious.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Publishers are selling papers to train AIs — and making millions of dollars.\n \n \n \n \n\n\n \n Kwon, D.\n\n\n \n\n\n\n
Nature. December 2024.\n
Bandiera_abtest: a Cg_type: News Publisher: Nature Publishing Group Subject_term: Publishing\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@article{kwon_publishers_2024,\n\ttitle = {Publishers are selling papers to train {AIs} — and making millions of dollars},\n\tcopyright = {2024 Springer Nature Limited},\n\tissn = {1476-4687},\n\turl = {https://www.nature.com/articles/d41586-024-04018-5},\n\tdoi = {10.1038/d41586-024-04018-5},\n\tabstract = {Generative-AI models require massive amounts of data — scholarly publishers are licensing their content to train them.},\n\tlanguage = {en},\n\turldate = {2024-12-10},\n\tjournal = {Nature},\n\tauthor = {Kwon, Diana},\n\tmonth = dec,\n\tyear = {2024},\n\tnote = {Bandiera\\_abtest: a\nCg\\_type: News\nPublisher: Nature Publishing Group\nSubject\\_term: Publishing},\n\tkeywords = {Publishing},\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Generative-AI models require massive amounts of data — scholarly publishers are licensing their content to train them.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n An AI Assistant to Streamline the Research Process.\n \n \n \n \n\n\n \n Ratmelia, B.\n\n\n \n\n\n\n . November 2024.\n
Company: Annual Reviews Distributor: Annual Reviews Institution: Annual Reviews Label: Annual Reviews Publisher: Katina Magazine\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{ratmelia_ai_2024,\n\ttitle = {An {AI} {Assistant} to {Streamline} the {Research} {Process}},\n\turl = {https://katinamagazine.org/content/article/resource-reviews/2024/an-ai-assistant-to-streamline-the-research-process},\n\tdoi = {10.1146/katina-111024-2},\n\tabstract = {With a couple of standout features in a cluttered interface, this AI research assistant may be a helpful adjunct to traditional research methods for some users},\n\tlanguage = {en},\n\turldate = {2024-12-05},\n\tauthor = {Ratmelia, Bella},\n\tmonth = nov,\n\tyear = {2024},\n\tnote = {Company: Annual Reviews\nDistributor: Annual Reviews\nInstitution: Annual Reviews\nLabel: Annual Reviews\nPublisher: Katina Magazine},\n}\n\n\n\n
\n
\n\n\n
\n With a couple of standout features in a cluttered interface, this AI research assistant may be a helpful adjunct to traditional research methods for some users\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n How Indigenous engineers are using AI to preserve their culture.\n \n \n \n \n\n\n \n \n\n\n \n\n\n\n November 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{noauthor_how_2024,\n\ttitle = {How {Indigenous} engineers are using {AI} to preserve their culture},\n\turl = {https://www.nbcnews.com/tech/innovation/indigenous-engineers-are-using-ai-preserve-culture-rcna176012},\n\tabstract = {Indigenous languages are rapidly disappearing, and AI could help preserve them, according to Indigenous technologists.},\n\tlanguage = {en},\n\turldate = {2024-12-02},\n\tjournal = {NBC News},\n\tmonth = nov,\n\tyear = {2024},\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Indigenous languages are rapidly disappearing, and AI could help preserve them, according to Indigenous technologists.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n AI’s computing gap: academics lack access to powerful chips needed for research.\n \n \n \n \n\n\n \n Kudiabor, H.\n\n\n \n\n\n\n
Nature. November 2024.\n
Bandiera_abtest: a Cg_type: News Publisher: Nature Publishing Group Subject_term: Research management, Mathematics and computing, Machine learning\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{kudiabor_ais_2024,\n\ttitle = {{AI}’s computing gap: academics lack access to powerful chips needed for research},\n\tcopyright = {2024 Springer Nature Limited},\n\tshorttitle = {{AI}’s computing gap},\n\turl = {https://www.nature.com/articles/d41586-024-03792-6},\n\tdoi = {10.1038/d41586-024-03792-6},\n\tabstract = {Survey highlights disparity between academic and industry scientists’ access to computing power needed to train machine-learning models.},\n\tlanguage = {en},\n\turldate = {2024-11-22},\n\tjournal = {Nature},\n\tauthor = {Kudiabor, Helena},\n\tmonth = nov,\n\tyear = {2024},\n\tnote = {Bandiera\\_abtest: a\nCg\\_type: News\nPublisher: Nature Publishing Group\nSubject\\_term: Research management, Mathematics and computing, Machine learning},\n\tkeywords = {Machine learning, Mathematics and computing, Research management},\n}\n\n\n\n
\n
\n\n\n
\n Survey highlights disparity between academic and industry scientists’ access to computing power needed to train machine-learning models.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n The line: AI and the future of personhood.\n \n \n \n\n\n \n Boyle, J.\n\n\n \n\n\n\n The MIT Press, Cambridge, 2024.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n\n\n\n
\n
@book{boyle_line_2024,\n\taddress = {Cambridge},\n\ttitle = {The line: {AI} and the future of personhood},\n\tisbn = {978-0-262-37967-0},\n\tshorttitle = {The line},\n\tabstract = {How AI will challenge our ideas about personhood. Chatbots like ChatGPT have challenged human exceptionalism: we are no longer the only beings capable of generating language and ideas fluently. But is ChatGPT conscious Or is it merely engaging in sophisticated mimicry And what happens in the future if the claims to consciousness are more credible. In "The Line, " James Boyle explores what these changes might do to our concept of personhood, to "the line" we believe separates our species from the rest of the world, but also separates "persons" with legal rights from objects. The personhood wars -- over the rights of corporations, animals, over the question of when life begins and ends -- have always been contentious. We've even denied the personhood of members of our own species. How will those old fights affect the new ones, and vice versa Boyle pursues those questions across a dizzying array of fields. He discusses moral philosophy and science fiction, transgenic species, nonhuman animals, the surprising history of corporate personality, and AI itself. Engaging with empathy and anthropomorphism, courtroom battles on behalf of chimps, and doom-laden projections about the threat of AI, The Line offers fascinating and thoughtful answers to questions about our future that are arriving sooner than we think.},\n\tlanguage = {eng},\n\tpublisher = {The MIT Press},\n\tauthor = {Boyle, James},\n\tyear = {2024},\n\tkeywords = {Artificial intelligence, Moral and ethical aspects},\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n How AI will challenge our ideas about personhood. Chatbots like ChatGPT have challenged human exceptionalism: we are no longer the only beings capable of generating language and ideas fluently. But is ChatGPT conscious Or is it merely engaging in sophisticated mimicry And what happens in the future if the claims to consciousness are more credible. In \"The Line, \" James Boyle explores what these changes might do to our concept of personhood, to \"the line\" we believe separates our species from the rest of the world, but also separates \"persons\" with legal rights from objects. The personhood wars – over the rights of corporations, animals, over the question of when life begins and ends – have always been contentious. We've even denied the personhood of members of our own species. How will those old fights affect the new ones, and vice versa Boyle pursues those questions across a dizzying array of fields. He discusses moral philosophy and science fiction, transgenic species, nonhuman animals, the surprising history of corporate personality, and AI itself. Engaging with empathy and anthropomorphism, courtroom battles on behalf of chimps, and doom-laden projections about the threat of AI, The Line offers fascinating and thoughtful answers to questions about our future that are arriving sooner than we think.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Opinion \\textbar AI May Ruin the University as We Know It.\n \n \n \n \n\n\n \n \n\n\n \n\n\n\n October 2024.\n
Section: The Review\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{noauthor_opinion_2024,\n\ttitle = {Opinion {\\textbar} {AI} {May} {Ruin} the {University} as {We} {Know} {It}},\n\turl = {https://www.chronicle.com/article/ai-may-ruin-the-university-as-we-know-it},\n\tabstract = {The existential threat of the newest wave of ed-tech.},\n\tlanguage = {en},\n\turldate = {2024-10-31},\n\tjournal = {The Chronicle of Higher Education},\n\tmonth = oct,\n\tyear = {2024},\n\tnote = {Section: The Review},\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n The existential threat of the newest wave of ed-tech.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Scientific papers that mention AI get a citation boost.\n \n \n \n \n\n\n \n Lenharo, M.\n\n\n \n\n\n\n
Nature. October 2024.\n
Bandiera_abtest: a Cg_type: News Publisher: Nature Publishing Group Subject_term: Computer science, Machine learning, Scientific community, Publishing\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{lenharo_scientific_2024,\n\ttitle = {Scientific papers that mention {AI} get a citation boost},\n\tcopyright = {2024 Springer Nature Limited},\n\turl = {https://www.nature.com/articles/d41586-024-03355-9},\n\tdoi = {10.1038/d41586-024-03355-9},\n\tabstract = {An analysis of tens of millions of papers shows which fields have embraced AI tools with enthusiasm — and which have been slower.},\n\tlanguage = {en},\n\turldate = {2024-10-23},\n\tjournal = {Nature},\n\tauthor = {Lenharo, Mariana},\n\tmonth = oct,\n\tyear = {2024},\n\tnote = {Bandiera\\_abtest: a\nCg\\_type: News\nPublisher: Nature Publishing Group\nSubject\\_term: Computer science, Machine learning, Scientific community, Publishing},\n\tkeywords = {Computer science, Machine learning, Publishing, Scientific community},\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n An analysis of tens of millions of papers shows which fields have embraced AI tools with enthusiasm — and which have been slower.\n
\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Epistemic Injustice in Generative AI.\n \n \n \n \n\n\n \n Kay, J.; Kasirzadeh, A.; and Mohamed, S.\n\n\n \n\n\n\n
Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society, 7: 684–697. October 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{kay_epistemic_2024,\n\ttitle = {Epistemic {Injustice} in {Generative} {AI}},\n\tvolume = {7},\n\tcopyright = {Copyright (c) 2024 Association for the Advancement of Artificial Intelligence},\n\turl = {https://ojs.aaai.org/index.php/AIES/article/view/31671},\n\tabstract = {This paper investigates how generative AI can potentially undermine the integrity of collective knowledge and the processes we rely on to acquire, assess, and trust information, posing a significant threat to our knowledge ecosystem and democratic discourse. Grounded in social and political philosophy, we introduce the concept of generative algorithmic epistemic injustice. We identify four key dimensions of this phenomenon: amplified and manipulative testimonial injustice, along with hermeneutical ignorance and access injustice. We illustrate each dimension with real-world examples that reveal how generative AI can produce or amplify misinformation, perpetuate representational harm, and create epistemic inequities, particularly in multilingual contexts. By highlighting these injustices, we aim to inform the development of epistemically just generative AI systems, proposing strategies for resistance, system design principles, and two approaches that leverage generative AI to foster a more equitable information ecosystem, thereby safeguarding democratic values and the integrity of knowledge production.},\n\tlanguage = {en},\n\turldate = {2024-10-21},\n\tjournal = {Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society},\n\tauthor = {Kay, Jackie and Kasirzadeh, Atoosa and Mohamed, Shakir},\n\tmonth = oct,\n\tyear = {2024},\n\tpages = {684--697},\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n This paper investigates how generative AI can potentially undermine the integrity of collective knowledge and the processes we rely on to acquire, assess, and trust information, posing a significant threat to our knowledge ecosystem and democratic discourse. Grounded in social and political philosophy, we introduce the concept of generative algorithmic epistemic injustice. We identify four key dimensions of this phenomenon: amplified and manipulative testimonial injustice, along with hermeneutical ignorance and access injustice. We illustrate each dimension with real-world examples that reveal how generative AI can produce or amplify misinformation, perpetuate representational harm, and create epistemic inequities, particularly in multilingual contexts. By highlighting these injustices, we aim to inform the development of epistemically just generative AI systems, proposing strategies for resistance, system design principles, and two approaches that leverage generative AI to foster a more equitable information ecosystem, thereby safeguarding democratic values and the integrity of knowledge production.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Why A.I. Isn’t Going to Make Art.\n \n \n \n \n\n\n \n Chiang, T.\n\n\n \n\n\n\n
The New Yorker. August 2024.\n
Section: the weekend essay\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{chiang_why_2024,\n\ttitle = {Why {A}.{I}. {Isn}’t {Going} to {Make} {Art}},\n\tissn = {0028-792X},\n\turl = {https://www.newyorker.com/culture/the-weekend-essay/why-ai-isnt-going-to-make-art},\n\tabstract = {To create a novel or a painting, an artist makes choices that are fundamentally alien to artificial intelligence.},\n\tlanguage = {en-US},\n\turldate = {2024-10-17},\n\tjournal = {The New Yorker},\n\tauthor = {Chiang, Ted},\n\tmonth = aug,\n\tyear = {2024},\n\tnote = {Section: the weekend essay},\n\tkeywords = {art, artificial intelligence (a.i.), artists, audio, automation, creativity},\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n To create a novel or a painting, an artist makes choices that are fundamentally alien to artificial intelligence.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n How Colleges Are Reimagining Learning in an AI World.\n \n \n \n \n\n\n \n \n\n\n \n\n\n\n October 2024.\n
Section: News\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{noauthor_how_2024,\n\ttitle = {How {Colleges} {Are} {Reimagining} {Learning} in an {AI} {World}},\n\turl = {https://www.chronicle.com/article/the-future-is-hybrid},\n\tabstract = {They shift to working with AI, not around it.},\n\tlanguage = {en},\n\turldate = {2024-10-17},\n\tjournal = {The Chronicle of Higher Education},\n\tmonth = oct,\n\tyear = {2024},\n\tnote = {Section: News},\n}\n\n\n\n
\n
\n\n\n
\n They shift to working with AI, not around it.\n
\n\n\n
\n\n\n \n\n\n
\n
\n\n \n \n \n \n \n \n Students Are Less Able and Less Willing to Read. Professors Are Stymied.\n \n \n \n \n\n\n \n McMurtrie, B.\n\n\n \n\n\n\n May 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n\n\n\n
\n
@misc{mcmurtrie_students_2024,\n\ttitle = {Students {Are} {Less} {Able} and {Less} {Willing} to {Read}. {Professors} {Are} {Stymied}.},\n\turl = {https://www.chronicle.com/article/is-this-the-end-of-reading},\n\turldate = {2024-10-10},\n\tauthor = {McMurtrie, Beth},\n\tmonth = may,\n\tyear = {2024},\n\tkeywords = {Reading, Teaching with AI},\n}\n\n\n\n\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Justification and Roadmap for Artificial Intelligence (AI) Literacy Courses in Higher Education.\n \n \n \n \n\n\n \n Hazari, S.\n\n\n \n\n\n\n
Journal of Educational Research and Practice, 14(1). April 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n\n\n\n
\n
@article{hazari_justification_2024,\n\ttitle = {Justification and {Roadmap} for {Artificial} {Intelligence} ({AI}) {Literacy} {Courses} in {Higher} {Education}},\n\tvolume = {14},\n\tissn = {2167-8693},\n\turl = {https://scholarworks.waldenu.edu/jerap/vol14/iss1/7},\n\tdoi = {10.5590/JERAP.2024.14.1.07},\n\tnumber = {1},\n\tjournal = {Journal of Educational Research and Practice},\n\tauthor = {Hazari, Sunil},\n\tmonth = apr,\n\tyear = {2024},\n\tkeywords = {AI Literacy},\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Has your paper been used to train an AI model? Almost certainly.\n \n \n \n \n\n\n \n Gibney, E.\n\n\n \n\n\n\n
Nature, 632(8026): 715–716. August 2024.\n
Bandiera_abtest: a Cg_type: News Publisher: Nature Publishing Group Subject_term: Machine learning, Databases\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n\n\n\n
\n
@article{gibney_has_2024,\n\ttitle = {Has your paper been used to train an {AI} model? {Almost} certainly},\n\tvolume = {632},\n\tcopyright = {2024 Springer Nature Limited},\n\tshorttitle = {Has your paper been used to train an {AI} model?},\n\turl = {https://www.nature.com/articles/d41586-024-02599-9},\n\tdoi = {10.1038/d41586-024-02599-9},\n\tabstract = {Artificial-intelligence developers are buying access to valuable data sets that contain research papers — raising uncomfortable questions about copyright.},\n\tlanguage = {en},\n\tnumber = {8026},\n\turldate = {2024-10-09},\n\tjournal = {Nature},\n\tauthor = {Gibney, Elizabeth},\n\tmonth = aug,\n\tyear = {2024},\n\tnote = {Bandiera\\_abtest: a\nCg\\_type: News\nPublisher: Nature Publishing Group\nSubject\\_term: Machine learning, Databases},\n\tkeywords = {Databases, Machine learning},\n\tpages = {715--716},\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Artificial-intelligence developers are buying access to valuable data sets that contain research papers — raising uncomfortable questions about copyright.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Colleges Begin to Reimagine Learning in an AI World.\n \n \n \n \n\n\n \n \n\n\n \n\n\n\n October 2024.\n
Section: News\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{noauthor_colleges_2024,\n\ttitle = {Colleges {Begin} to {Reimagine} {Learning} in an {AI} {World}},\n\turl = {https://www.chronicle.com/article/the-future-is-hybrid},\n\tabstract = {They shift to working with AI, not around it.},\n\tlanguage = {en},\n\turldate = {2024-10-04},\n\tjournal = {The Chronicle of Higher Education},\n\tmonth = oct,\n\tyear = {2024},\n\tnote = {Section: News},\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n
\n They shift to working with AI, not around it.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Exploring the Impact of Generative Artificial Intelligence on Higher Education Students’ Utilization of Library Resources : A Critical Examination.\n \n \n \n \n\n\n \n Meakin, L.\n\n\n \n\n\n\n
Information Technology and Libraries, 43(3). September 2024.\n
Number: 3\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{meakin_exploring_2024,\n\ttitle = {Exploring the {Impact} of {Generative} {Artificial} {Intelligence} on {Higher} {Education} {Students}’ {Utilization} of {Library} {Resources} : {A} {Critical} {Examination}},\n\tvolume = {43},\n\tcopyright = {Copyright (c) 2024 Lynsey Meakin},\n\tissn = {2163-5226},\n\tshorttitle = {Exploring the {Impact} of {Generative} {Artificial} {Intelligence} on {Higher} {Education} {Students}’ {Utilization} of {Library} {Resources}},\n\turl = {https://ital.corejournals.org/index.php/ital/article/view/17246},\n\tdoi = {10.5860/ital.v43i3.17246},\n\tabstract = {In the field of higher education, generative artificial intelligence (GenAI) has become a revolutionary influence, shaping how students access and use library resources. This study explores the intricate balance of both positive and negative effects that GenAI might have on the academic library experience for higher education (HE) students. The key aspects of enhanced discovery and retrieval, personalization and engagement, streamlined research processes, and digital literacy and information evaluation potentially offered through using generative AI will be considered. These prospective advantages to HE students offered by using GenAI will be examined through will be examined through the theoretical framework of the Technological Acceptance Model (TAM) introduced by Davis et al. in 1986, which suggests that perceived usefulness and perceived ease of use are key factors in determining user acceptance and utilization of technology. The adoption of GenAI by higher education students will be analyzed from this viewpoint before assessing its impact on their use of library resources.},\n\tlanguage = {en},\n\tnumber = {3},\n\turldate = {2024-10-01},\n\tjournal = {Information Technology and Libraries},\n\tauthor = {Meakin, Lynsey},\n\tmonth = sep,\n\tyear = {2024},\n\tnote = {Number: 3},\n\tkeywords = {Generative Artificial Intelligence, HE students, Library resources, Technology Acceptance Model (TAM)},\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n In the field of higher education, generative artificial intelligence (GenAI) has become a revolutionary influence, shaping how students access and use library resources. This study explores the intricate balance of both positive and negative effects that GenAI might have on the academic library experience for higher education (HE) students. The key aspects of enhanced discovery and retrieval, personalization and engagement, streamlined research processes, and digital literacy and information evaluation potentially offered through using generative AI will be considered. These prospective advantages to HE students offered by using GenAI will be examined through will be examined through the theoretical framework of the Technological Acceptance Model (TAM) introduced by Davis et al. in 1986, which suggests that perceived usefulness and perceived ease of use are key factors in determining user acceptance and utilization of technology. The adoption of GenAI by higher education students will be analyzed from this viewpoint before assessing its impact on their use of library resources.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n The Jack in the Black Box: Teaching College Students to Use ChatGPT Critically.\n \n \n \n \n\n\n \n Wan, S.\n\n\n \n\n\n\n
Information Technology and Libraries, 43(3). September 2024.\n
Number: 3\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{wan_jack_2024,\n\ttitle = {The {Jack} in the {Black} {Box}: {Teaching} {College} {Students} to {Use} {ChatGPT} {Critically}},\n\tvolume = {43},\n\tcopyright = {Copyright (c) 2024 Shu Wan},\n\tissn = {2163-5226},\n\tshorttitle = {The {Jack} in the {Black} {Box}},\n\turl = {https://ital.corejournals.org/index.php/ital/article/view/17234},\n\tdoi = {10.5860/ital.v43i3.17234},\n\tabstract = {This essay reviews the design and deployment of a critical generative AI and information literacy assignment along with its inspirations for instructional librarians in American colleges today.},\n\tlanguage = {en},\n\tnumber = {3},\n\turldate = {2024-10-01},\n\tjournal = {Information Technology and Libraries},\n\tauthor = {Wan, Shu},\n\tmonth = sep,\n\tyear = {2024},\n\tnote = {Number: 3},\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n This essay reviews the design and deployment of a critical generative AI and information literacy assignment along with its inspirations for instructional librarians in American colleges today.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Southern Maine police are testing new AI software to write their police reports.\n \n \n \n \n\n\n \n \n\n\n \n\n\n\n September 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{noauthor_southern_2024,\n\ttitle = {Southern {Maine} police are testing new {AI} software to write their police reports},\n\turl = {https://www.pressherald.com/2024/09/29/southern-maine-police-are-testing-new-ai-software-to-write-their-police-reports/},\n\tabstract = {Both the Cumberland County Sheriff's Office and Portland Police Department have tried out the tech, which is raising questions about whether it's cutting-edge or cutting corners.},\n\turldate = {2024-09-30},\n\tjournal = {Press Herald},\n\tmonth = sep,\n\tyear = {2024},\n}\n\n\n\n
\n
\n\n\n
\n Both the Cumberland County Sheriff's Office and Portland Police Department have tried out the tech, which is raising questions about whether it's cutting-edge or cutting corners.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n The lost data: how AI systems censor LGBTQ+ content in the name of safety.\n \n \n \n \n\n\n \n Chen, S.\n\n\n \n\n\n\n
Nature Computational Science, 4(9): 629–632. September 2024.\n
Publisher: Nature Publishing Group\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{chen_lost_2024,\n\ttitle = {The lost data: how {AI} systems censor {LGBTQ}+ content in the name of safety},\n\tvolume = {4},\n\tcopyright = {2024 Springer Nature America, Inc.},\n\tissn = {2662-8457},\n\tshorttitle = {The lost data},\n\turl = {https://www.nature.com/articles/s43588-024-00695-4},\n\tdoi = {10.1038/s43588-024-00695-4},\n\tabstract = {Many AI companies implement safety systems to protect users from offensive or inaccurate content. Though well intentioned, these filters can exacerbate existing inequalities, and data shows that they have disproportionately removed LGBTQ+ content.},\n\tlanguage = {en},\n\tnumber = {9},\n\turldate = {2024-09-26},\n\tjournal = {Nature Computational Science},\n\tauthor = {Chen, Sophia},\n\tmonth = sep,\n\tyear = {2024},\n\tnote = {Publisher: Nature Publishing Group},\n\tkeywords = {Computational science, Computer science, Ethics},\n\tpages = {629--632},\n}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Many AI companies implement safety systems to protect users from offensive or inaccurate content. Though well intentioned, these filters can exacerbate existing inequalities, and data shows that they have disproportionately removed LGBTQ+ content.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n OpenAI Unveils New ChatGPT That Can Reason Through Math and Science.\n \n \n \n \n\n\n \n Metz, C.\n\n\n \n\n\n\n
The New York Times. September 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{metz_openai_2024,\n\tchapter = {Technology},\n\ttitle = {{OpenAI} {Unveils} {New} {ChatGPT} {That} {Can} {Reason} {Through} {Math} and {Science}},\n\tissn = {0362-4331},\n\turl = {https://www.nytimes.com/2024/09/12/technology/openai-chatgpt-math.html},\n\tabstract = {Driven by new technology called OpenAI o1, the chatbot can test various strategies and try to identify mistakes as it tackles complex tasks.},\n\tlanguage = {en-US},\n\turldate = {2024-09-13},\n\tjournal = {The New York Times},\n\tauthor = {Metz, Cade},\n\tmonth = sep,\n\tyear = {2024},\n\tkeywords = {Artificial Intelligence, ChatGPT, Computers and the Internet, Mathematics, OpenAI Labs, Research},\n}\n\n\n\n
\n
\n\n\n
\n Driven by new technology called OpenAI o1, the chatbot can test various strategies and try to identify mistakes as it tackles complex tasks.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Generative AI Can Harm Learning.\n \n \n \n \n\n\n \n Bastani, H.; Bastani, O.; Sungu, A.; Ge, H.; Kabakcı, Ö.; and Mariman, R.\n\n\n \n\n\n\n July 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@misc{bastani_generative_2024,\n\taddress = {Rochester, NY},\n\ttype = {{SSRN} {Scholarly} {Paper}},\n\ttitle = {Generative {AI} {Can} {Harm} {Learning}},\n\turl = {https://papers.ssrn.com/abstract=4895486},\n\tdoi = {10.2139/ssrn.4895486},\n\tabstract = {Generative artificial intelligence (AI) is poised to revolutionize how humans work, and has already demonstrated promise in significantly improving human productivity. However, a key remaining question is how generative AI affects learning, namely, how humans acquire new skills as they perform tasks. This kind of skill learning is critical to long-term productivity gains, especially in domains where generative AI is fallible and human experts must check its outputs. We study the impact of generative AI, specifically OpenAI's GPT-4, on human learning in the context of math classes at a high school. In a field experiment involving nearly a thousand students, we have deployed and evaluated two GPT based tutors, one that mimics a standard ChatGPT interface (called GPT Base) and one with prompts designed to safeguard learning (called GPT Tutor). These tutors comprise about 15\\% of the curriculum in each of three grades. Consistent with prior work, our results show that access to GPT-4 significantly improves performance (48\\% improvement for GPT Base and 127\\% for GPT Tutor). However, we additionally find that when access is subsequently taken away, students actually perform worse than those who never had access (17\\% reduction for GPT Base). That is, access to GPT-4 can harm educational outcomes. These negative learning effects are largely mitigated by the safeguards included in GPT Tutor. Our results suggest that students attempt to use GPT-4 as a "crutch" during practice problem sessions, and when successful, perform worse on their own. Thus, to maintain long-term productivity, we must be cautious when deploying generative AI to ensure humans continue to learn critical skills. * HB, OB, and AS contributed equally},\n\tlanguage = {en},\n\turldate = {2024-09-13},\n\tauthor = {Bastani, Hamsa and Bastani, Osbert and Sungu, Alp and Ge, Haosen and Kabakcı, Özge and Mariman, Rei},\n\tmonth = jul,\n\tyear = {2024},\n\tkeywords = {Education, Generative AI, Human Capital Development, Human-AI Collaboration, Large Language Models},\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Generative artificial intelligence (AI) is poised to revolutionize how humans work, and has already demonstrated promise in significantly improving human productivity. However, a key remaining question is how generative AI affects learning, namely, how humans acquire new skills as they perform tasks. This kind of skill learning is critical to long-term productivity gains, especially in domains where generative AI is fallible and human experts must check its outputs. We study the impact of generative AI, specifically OpenAI's GPT-4, on human learning in the context of math classes at a high school. In a field experiment involving nearly a thousand students, we have deployed and evaluated two GPT based tutors, one that mimics a standard ChatGPT interface (called GPT Base) and one with prompts designed to safeguard learning (called GPT Tutor). These tutors comprise about 15% of the curriculum in each of three grades. Consistent with prior work, our results show that access to GPT-4 significantly improves performance (48% improvement for GPT Base and 127% for GPT Tutor). However, we additionally find that when access is subsequently taken away, students actually perform worse than those who never had access (17% reduction for GPT Base). That is, access to GPT-4 can harm educational outcomes. These negative learning effects are largely mitigated by the safeguards included in GPT Tutor. Our results suggest that students attempt to use GPT-4 as a \"crutch\" during practice problem sessions, and when successful, perform worse on their own. Thus, to maintain long-term productivity, we must be cautious when deploying generative AI to ensure humans continue to learn critical skills. * HB, OB, and AS contributed equally\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n “I Don't Want to Be Taught and Graded by a Robot”: Student-Teacher Relations in the Age of Generative AI.\n \n \n \n \n\n\n \n Frazee, J. P.; Goldberg, D.; Hauze, S.; Mohamed, A.; Ro, C.; and Sobo, E.\n\n\n \n\n\n\n June 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{frazee_i_2024,\n\ttitle = {“{I} {Don}'t {Want} to {Be} {Taught} and {Graded} by a {Robot}”: {Student}-{Teacher} {Relations} in the {Age} of {Generative} {AI}},\n\tshorttitle = {“{I} {Don}'t {Want} to {Be} {Taught} and {Graded} by a {Robot}”},\n\turl = {https://www.anthropology-news.org/articles/i-dont-want-to-be-taught-and-graded-by-a-robot-student-teacher-relations-in-the-age-of-generative-ai/},\n\tabstract = {Generative artificial intelligence (GenAI) is reshaping student-teacher relations in higher education in both exciting and worrying ways.},\n\tlanguage = {en-US},\n\turldate = {2024-08-15},\n\tjournal = {Anthropology News},\n\tauthor = {Frazee, James P. and Goldberg, David and Hauze, Sean and Mohamed, Abir and Ro, Colin and Sobo, Elisa},\n\tmonth = jun,\n\tyear = {2024},\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n
\n Generative artificial intelligence (GenAI) is reshaping student-teacher relations in higher education in both exciting and worrying ways.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n AI in Society: The Cognitive Leap Theory.\n \n \n \n \n\n\n \n \n\n\n \n\n\n\n August 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@misc{noauthor_ai_2024,\n\ttitle = {{AI} in {Society}: {The} {Cognitive} {Leap} {Theory}},\n\tshorttitle = {{AI} in {Society}},\n\turl = {https://aiczar.blogspot.com/2024/08/the-cognitive-leap-theory.html?},\n\turldate = {2024-08-09},\n\tjournal = {AI in Society},\n\tmonth = aug,\n\tyear = {2024},\n\tkeywords = {Learning, Teaching with AI, Theory},\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n An academic publisher has struck an AI data deal with Microsoft – without their authors’ knowledge.\n \n \n \n \n\n\n \n Potter, W.\n\n\n \n\n\n\n July 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{potter_academic_2024,\n\ttitle = {An academic publisher has struck an {AI} data deal with {Microsoft} – without their authors’ knowledge},\n\turl = {http://theconversation.com/an-academic-publisher-has-struck-an-ai-data-deal-with-microsoft-without-their-authors-knowledge-235203},\n\tabstract = {After news, art and music, generative AI has found a new resource to mine: academia},\n\tlanguage = {en-US},\n\turldate = {2024-08-07},\n\tjournal = {The Conversation},\n\tauthor = {Potter, Wellett},\n\tmonth = jul,\n\tyear = {2024},\n}\n\n\n\n\n\n\n\n
\n
\n\n\n
\n After news, art and music, generative AI has found a new resource to mine: academia\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n 10 reasons why AI may be overrated.\n \n \n \n \n\n\n \n Rosalsky, G.\n\n\n \n\n\n\n
NPR. August 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{rosalsky_10_2024,\n\tchapter = {Newsletter},\n\ttitle = {10 reasons why {AI} may be overrated},\n\turl = {https://www.npr.org/sections/planet-money/2024/08/06/g-s1-15245/10-reasons-why-ai-may-be-overrated-artificial-intelligence},\n\tabstract = {A list of reasons why generative AI may be overhyped.},\n\tlanguage = {en},\n\turldate = {2024-08-07},\n\tjournal = {NPR},\n\tauthor = {Rosalsky, Greg},\n\tmonth = aug,\n\tyear = {2024},\n}\n\n\n\n
\n
\n\n\n
\n A list of reasons why generative AI may be overhyped.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n The political preferences of LLMs.\n \n \n \n \n\n\n \n Rozado, D.\n\n\n \n\n\n\n
PLOS ONE, 19(7): e0306621. July 2024.\n
Publisher: Public Library of Science\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{rozado_political_2024,\n\ttitle = {The political preferences of {LLMs}},\n\tvolume = {19},\n\tissn = {1932-6203},\n\turl = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0306621},\n\tdoi = {10.1371/journal.pone.0306621},\n\tabstract = {I report here a comprehensive analysis about the political preferences embedded in Large Language Models (LLMs). Namely, I administer 11 political orientation tests, designed to identify the political preferences of the test taker, to 24 state-of-the-art conversational LLMs, both closed and open source. When probed with questions/statements with political connotations, most conversational LLMs tend to generate responses that are diagnosed by most political test instruments as manifesting preferences for left-of-center viewpoints. This does not appear to be the case for five additional base (i.e. foundation) models upon which LLMs optimized for conversation with humans are built. However, the weak performance of the base models at coherently answering the tests’ questions makes this subset of results inconclusive. Finally, I demonstrate that LLMs can be steered towards specific locations in the political spectrum through Supervised Fine-Tuning (SFT) with only modest amounts of politically aligned data, suggesting SFT’s potential to embed political orientation in LLMs. With LLMs beginning to partially displace traditional information sources like search engines and Wikipedia, the societal implications of political biases embedded in LLMs are substantial.},\n\tlanguage = {en},\n\tnumber = {7},\n\turldate = {2024-08-06},\n\tjournal = {PLOS ONE},\n\tauthor = {Rozado, David},\n\tmonth = jul,\n\tyear = {2024},\n\tnote = {Publisher: Public Library of Science},\n\tkeywords = {Culture, Human learning, Information retrieval, Language, Online encyclopedias, Political parties, User interfaces, Verbal communication},\n\tpages = {e0306621},\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n
\n I report here a comprehensive analysis about the political preferences embedded in Large Language Models (LLMs). Namely, I administer 11 political orientation tests, designed to identify the political preferences of the test taker, to 24 state-of-the-art conversational LLMs, both closed and open source. When probed with questions/statements with political connotations, most conversational LLMs tend to generate responses that are diagnosed by most political test instruments as manifesting preferences for left-of-center viewpoints. This does not appear to be the case for five additional base (i.e. foundation) models upon which LLMs optimized for conversation with humans are built. However, the weak performance of the base models at coherently answering the tests’ questions makes this subset of results inconclusive. Finally, I demonstrate that LLMs can be steered towards specific locations in the political spectrum through Supervised Fine-Tuning (SFT) with only modest amounts of politically aligned data, suggesting SFT’s potential to embed political orientation in LLMs. With LLMs beginning to partially displace traditional information sources like search engines and Wikipedia, the societal implications of political biases embedded in LLMs are substantial.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n AI is complicating plagiarism. How should scientists respond?.\n \n \n \n \n\n\n \n Kwon, D.\n\n\n \n\n\n\n
Nature. July 2024.\n
Bandiera_abtest: a Cg_type: News Feature Publisher: Nature Publishing Group Subject_term: Machine learning, Scientific community, Policy\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{kwon_ai_2024,\n\ttitle = {{AI} is complicating plagiarism. {How} should scientists respond?},\n\tcopyright = {2024 Springer Nature Limited},\n\turl = {https://www.nature.com/articles/d41586-024-02371-z},\n\tdoi = {10.1038/d41586-024-02371-z},\n\tabstract = {The explosive uptake of generative artificial intelligence in writing is raising difficult questions about when use of the technology should be allowed.},\n\tlanguage = {en},\n\turldate = {2024-07-31},\n\tjournal = {Nature},\n\tauthor = {Kwon, Diana},\n\tmonth = jul,\n\tyear = {2024},\n\tnote = {Bandiera\\_abtest: a\nCg\\_type: News Feature\nPublisher: Nature Publishing Group\nSubject\\_term: Machine learning, Scientific community, Policy},\n\tkeywords = {Machine learning, Policy, Scientific community},\n}\n\n\n\n
\n
\n\n\n
\n The explosive uptake of generative artificial intelligence in writing is raising difficult questions about when use of the technology should be allowed.\n
\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Teaching and Generative AI: Pedagogical Possibilities and Productive Tensions.\n \n \n \n \n\n\n \n Buyserie, B.; and Thurston, T.\n\n\n \n\n\n\n Utah State University, January 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@book{buyserie_teaching_2024,\n\ttitle = {Teaching and {Generative} {AI}: {Pedagogical} {Possibilities} and {Productive} {Tensions}},\n\tshorttitle = {Teaching and {Generative} {AI}},\n\turl = {https://digitalcommons.usu.edu/teachingai/1},\n\tpublisher = {Utah State University},\n\tauthor = {Buyserie, Beth and Thurston, Travis},\n\tmonth = jan,\n\tyear = {2024},\n}\n\n\n\n\n\n\n\n\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n HE Generative AI Literacy Definition.\n \n \n \n \n\n\n \n Attewell, S.\n\n\n \n\n\n\n July 2024.\n
\n\n
\n\n
\n\n
\n\n \n \n
Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@misc{attewell_he_2024,\n\ttitle = {{HE} {Generative} {AI} {Literacy} {Definition}},\n\turl = {https://nationalcentreforai.jiscinvolve.org/wp/2024/07/23/he-generative-ai-literacy-definition/},\n\tabstract = {AI literacy is essential for navigating the rapidly evolving landscape of generative AI (GenAI). We have framed our approach around three fundamental areas—Terms, Tools, and Tasks— to create a comprehensive approach to understanding and applying GenAI effectively. Adopting this model should ensure that staff members are not only equipped with a theoretical understanding of how […]},\n\tlanguage = {en-GB},\n\turldate = {2024-07-31},\n\tjournal = {Artificial intelligence},\n\tauthor = {Attewell, Sue},\n\tmonth = jul,\n\tyear = {2024},\n}\n\n\n\n
\n
\n\n\n
\n AI literacy is essential for navigating the rapidly evolving landscape of generative AI (GenAI). We have framed our approach around three fundamental areas—Terms, Tools, and Tasks— to create a comprehensive approach to understanding and applying GenAI effectively. Adopting this model should ensure that staff members are not only equipped with a theoretical understanding of how […]\n
\n\n\n
\n\n\n\n\n\n