var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2228317%2Fitems%3Fkey%3DMR9XyU2gEWmpdtfEoBUzqtJu%26format=bibtex&limit=6&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2228317%2Fitems%3Fkey%3DMR9XyU2gEWmpdtfEoBUzqtJu%26format=bibtex&limit=6&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2228317%2Fitems%3Fkey%3DMR9XyU2gEWmpdtfEoBUzqtJu%26format=bibtex&limit=6&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n ‘There are some things that I would never ask Alexa’ – privacy work, contextual integrity, and smart speaker assistants.\n \n \n \n \n\n\n \n Brause, S. R.; and Blank, G.\n\n\n \n\n\n\n Information, Communication & Society, 27(1): 182–197. January 2024.\n \n\n\n\n
\n\n\n\n \n \n \"‘TherePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{brause_there_2024,\n\ttitle = {‘{There} are some things that {I} would never ask {Alexa}’ – privacy work, contextual integrity, and smart speaker assistants},\n\tvolume = {27},\n\tissn = {1369-118X, 1468-4462},\n\turl = {https://www.tandfonline.com/doi/full/10.1080/1369118X.2023.2193241},\n\tdoi = {10.1080/1369118X.2023.2193241},\n\tlanguage = {en},\n\tnumber = {1},\n\turldate = {2024-04-25},\n\tjournal = {Information, Communication \\& Society},\n\tauthor = {Brause, Saba Rebecca and Blank, Grant},\n\tmonth = jan,\n\tyear = {2024},\n\tpages = {182--197},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Beware: Processing of Personal Data—Informed Consent Through Risk Communication.\n \n \n \n \n\n\n \n Seiling, L.; Gsenger, R.; Mulugeta, F.; Henningsen, M.; Mischau, L.; and Schirmbeck, M.\n\n\n \n\n\n\n IEEE Transactions on Professional Communication, 67(1): 4–25. March 2024.\n \n\n\n\n
\n\n\n\n \n \n \"Beware:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{seiling_beware_2024,\n\ttitle = {Beware: {Processing} of {Personal} {Data}—{Informed} {Consent} {Through} {Risk} {Communication}},\n\tvolume = {67},\n\tissn = {1558-1500},\n\tshorttitle = {Beware},\n\turl = {https://ieeexplore.ieee.org/abstract/document/10472565},\n\tdoi = {10.1109/TPC.2024.3361328},\n\tabstract = {Background: The General Data Protection Regulation (GDPR) has been applicable since May 2018 and aims to further harmonize data protection law in the European Union. Processing personal data based on individuals’ consent is lawful under the GDPR only if such consent meets certain requirements and is “informed,” in particular. However, complex privacy notice design and individual cognitive limitations challenge data subjects’ ability to make elaborate consent decisions. Risk-based communication may address these issues. Literature review: Most research focuses on isolated aspects of risk in processing personal data, such as the actors involved, specific events leading to risk formation, or distinctive (context-dependent) consequences. We propose a model combining these approaches as the basis for context-independent risk communication. Research questions: 1. What are relevant information categories for risk communication in the processing of personal data online? 2. Which potentially adverse consequences can arise from specific events in the processing of personal data online? 3. How can consequences in the processing of personal data be avoided or mitigated? Research methodology: The GDPR was examined through a systematic qualitative content analysis. The results inform the analysis of 32 interviews with privacy, data protection, and information security experts from academia, Non-Governmental Organizations, the public, and the private sector. Results: Risk-relevant information categories, specific consequences, and relations between them are identified, along with strategies for risk mitigation. The study concludes with a specified framework for perceived risk in processing personal data. Conclusion: The results provide controllers, regulatory bodies, data subjects, and experts in the field of professional communication with information on risk formation in personal data processing. Based on our analysis, we propose information categories for risk communication, which expand the current regulatory information requirements.},\n\tnumber = {1},\n\turldate = {2024-03-18},\n\tjournal = {IEEE Transactions on Professional Communication},\n\tauthor = {Seiling, Lukas and Gsenger, Rita and Mulugeta, Filmona and Henningsen, Marte and Mischau, Lena and Schirmbeck, Marie},\n\tmonth = mar,\n\tyear = {2024},\n\tkeywords = {Data processing, Data protection, Europe, General Data Protection Regulation, Information security, Interviews, Regulation, Systematics, general data protection regulation (GDPR), informed consent, privacy notice, risk communication, risk model},\n\tpages = {4--25},\n}\n\n
\n
\n\n\n
\n Background: The General Data Protection Regulation (GDPR) has been applicable since May 2018 and aims to further harmonize data protection law in the European Union. Processing personal data based on individuals’ consent is lawful under the GDPR only if such consent meets certain requirements and is “informed,” in particular. However, complex privacy notice design and individual cognitive limitations challenge data subjects’ ability to make elaborate consent decisions. Risk-based communication may address these issues. Literature review: Most research focuses on isolated aspects of risk in processing personal data, such as the actors involved, specific events leading to risk formation, or distinctive (context-dependent) consequences. We propose a model combining these approaches as the basis for context-independent risk communication. Research questions: 1. What are relevant information categories for risk communication in the processing of personal data online? 2. Which potentially adverse consequences can arise from specific events in the processing of personal data online? 3. How can consequences in the processing of personal data be avoided or mitigated? Research methodology: The GDPR was examined through a systematic qualitative content analysis. The results inform the analysis of 32 interviews with privacy, data protection, and information security experts from academia, Non-Governmental Organizations, the public, and the private sector. Results: Risk-relevant information categories, specific consequences, and relations between them are identified, along with strategies for risk mitigation. The study concludes with a specified framework for perceived risk in processing personal data. Conclusion: The results provide controllers, regulatory bodies, data subjects, and experts in the field of professional communication with information on risk formation in personal data processing. Based on our analysis, we propose information categories for risk communication, which expand the current regulatory information requirements.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Integrating Differential Privacy and Contextual Integrity.\n \n \n \n \n\n\n \n Benthall, S.; and Cummings, R.\n\n\n \n\n\n\n In Proceedings of the Symposium on Computer Science and Law, of CSLAW '24, pages 9–15, New York, NY, USA, March 2024. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"IntegratingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{benthall_integrating_2024,\n\taddress = {New York, NY, USA},\n\tseries = {{CSLAW} '24},\n\ttitle = {Integrating {Differential} {Privacy} and {Contextual} {Integrity}},\n\tisbn = {9798400703331},\n\turl = {https://dl.acm.org/doi/10.1145/3614407.3643702},\n\tdoi = {10.1145/3614407.3643702},\n\turldate = {2024-03-15},\n\tbooktitle = {Proceedings of the {Symposium} on {Computer} {Science} and {Law}},\n\tpublisher = {Association for Computing Machinery},\n\tauthor = {Benthall, Sebastian and Cummings, Rachel},\n\tmonth = mar,\n\tyear = {2024},\n\tpages = {9--15},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Remember the Human: A Systematic Review of Ethical Considerations in Reddit Research.\n \n \n \n \n\n\n \n Fiesler, C.; Zimmer, M.; Proferes, N.; Gilbert, S.; and Jones, N.\n\n\n \n\n\n\n Proceedings of the ACM on Human-Computer Interaction, 8(GROUP): 1–33. February 2024.\n \n\n\n\n
\n\n\n\n \n \n \"RememberPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{fiesler_remember_2024,\n\ttitle = {Remember the {Human}: {A} {Systematic} {Review} of {Ethical} {Considerations} in {Reddit} {Research}},\n\tvolume = {8},\n\tissn = {2573-0142},\n\tshorttitle = {Remember the {Human}},\n\turl = {https://dl.acm.org/doi/10.1145/3633070},\n\tdoi = {10.1145/3633070},\n\tabstract = {Reddit is one of the world's most prominent social media platforms, and also a valuable source of data for internet researchers. However, working with this kind of data also presents novel ethical complications for researchers, including issues around privacy, vulnerable populations, and unintended consequences. This paper describes an analysis of 134 papers that rely on Reddit data while also including some discussion of ethical implications and/or considerations by the researchers. Our analysis of these papers reveals common ethical issues and ethically motivated methodological decisions, as described by the researchers themselves, while also exposing some gaps for further ethical contemplation for researchers relying on Reddit data. Based on these findings, we close with a set of recommendations for ethically-informed methods and reflection for researchers working with social data.},\n\tlanguage = {en},\n\tnumber = {GROUP},\n\turldate = {2024-02-24},\n\tjournal = {Proceedings of the ACM on Human-Computer Interaction},\n\tauthor = {Fiesler, Casey and Zimmer, Michael and Proferes, Nicholas and Gilbert, Sarah and Jones, Naiyan},\n\tmonth = feb,\n\tyear = {2024},\n\tpages = {1--33},\n}\n\n
\n
\n\n\n
\n Reddit is one of the world's most prominent social media platforms, and also a valuable source of data for internet researchers. However, working with this kind of data also presents novel ethical complications for researchers, including issues around privacy, vulnerable populations, and unintended consequences. This paper describes an analysis of 134 papers that rely on Reddit data while also including some discussion of ethical implications and/or considerations by the researchers. Our analysis of these papers reveals common ethical issues and ethically motivated methodological decisions, as described by the researchers themselves, while also exposing some gaps for further ethical contemplation for researchers relying on Reddit data. Based on these findings, we close with a set of recommendations for ethically-informed methods and reflection for researchers working with social data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Protecting Privacy in Indian Schools: Regulating AI-based Technologies' Design, Development and Deployment.\n \n \n \n \n\n\n \n BAJPAI, H.\n\n\n \n\n\n\n Ph.D. Thesis, Durham University, 2024.\n \n\n\n\n
\n\n\n\n \n \n \"ProtectingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{bajpai_protecting_2024,\n\ttype = {Doctoral},\n\ttitle = {Protecting {Privacy} in {Indian} {Schools}: {Regulating} {AI}-based {Technologies}' {Design}, {Development} and {Deployment}},\n\tshorttitle = {Protecting {Privacy} in {Indian} {Schools}},\n\turl = {http://etheses.dur.ac.uk/15340/},\n\tabstract = {Education is one of the priority areas for the Indian government, where Artificial Intelligence (AI) technologies are touted to bring digital transformation. Several Indian states have also started deploying facial recognition-enabled CCTV cameras, emotion recognition technologies, fingerprint scanners, and Radio frequency identification tags in their schools to provide personalised recommendations, ensure student security, and predict the drop-out rate of students but also provide 360-degree information of a student. Further, Integrating Aadhaar (digital identity card that works on biometric data) across AI technologies and learning and management systems (LMS) renders schools a ‘panopticon’.\n\nCertain technologies or systems like Aadhaar, CCTV cameras, GPS Systems, RFID tags, and learning management systems are used primarily for continuous data collection, storage, and retention purposes. Though they cannot be termed AI technologies per se, they are fundamental for designing and developing AI systems like facial, fingerprint, and emotion recognition technologies. The large amount of student data collected speedily through the former technologies is used to create an algorithm for the latter-stated AI systems. Once algorithms are processed using machine learning (ML) techniques, they learn correlations between multiple datasets predicting each student’s identity, decisions, grades, learning growth, tendency to drop out, and other behavioural characteristics. Such autonomous and repetitive collection, processing, storage, and retention of student data without effective data protection legislation endangers student privacy.\n\nThe algorithmic predictions by AI technologies are an avatar of the data fed into the system. An AI technology is as good as the person collecting the data, processing it for a relevant and valuable output, and regularly evaluating the inputs going inside an AI model. An AI model can produce inaccurate predictions if the person overlooks any relevant data. However, the state, school administrations and parents’ belief in AI technologies as a panacea to student security and educational development overlooks the context in which ‘data practices’ are conducted. A right to privacy in an AI age is inextricably connected to data practices where data gets ‘cooked’. Thus, data protection legislation operating without understanding and regulating such data practices will remain ineffective in safeguarding privacy.\n\nThe thesis undergoes interdisciplinary research that enables a better understanding of the interplay of data practices of AI technologies with social practices of an Indian school, which the present Indian data protection legislation overlooks, endangering students’ privacy from designing and developing to deploying stages of an AI model. The thesis recommends the Indian legislature frame better legislation equipped for the AI/ML age and the Indian judiciary on evaluating the legality and reasonability of designing, developing, and deploying such technologies in schools.},\n\turldate = {2024-02-04},\n\tschool = {Durham University},\n\tauthor = {BAJPAI, HARSH},\n\tyear = {2024},\n}\n\n
\n
\n\n\n
\n Education is one of the priority areas for the Indian government, where Artificial Intelligence (AI) technologies are touted to bring digital transformation. Several Indian states have also started deploying facial recognition-enabled CCTV cameras, emotion recognition technologies, fingerprint scanners, and Radio frequency identification tags in their schools to provide personalised recommendations, ensure student security, and predict the drop-out rate of students but also provide 360-degree information of a student. Further, Integrating Aadhaar (digital identity card that works on biometric data) across AI technologies and learning and management systems (LMS) renders schools a ‘panopticon’. Certain technologies or systems like Aadhaar, CCTV cameras, GPS Systems, RFID tags, and learning management systems are used primarily for continuous data collection, storage, and retention purposes. Though they cannot be termed AI technologies per se, they are fundamental for designing and developing AI systems like facial, fingerprint, and emotion recognition technologies. The large amount of student data collected speedily through the former technologies is used to create an algorithm for the latter-stated AI systems. Once algorithms are processed using machine learning (ML) techniques, they learn correlations between multiple datasets predicting each student’s identity, decisions, grades, learning growth, tendency to drop out, and other behavioural characteristics. Such autonomous and repetitive collection, processing, storage, and retention of student data without effective data protection legislation endangers student privacy. The algorithmic predictions by AI technologies are an avatar of the data fed into the system. An AI technology is as good as the person collecting the data, processing it for a relevant and valuable output, and regularly evaluating the inputs going inside an AI model. An AI model can produce inaccurate predictions if the person overlooks any relevant data. However, the state, school administrations and parents’ belief in AI technologies as a panacea to student security and educational development overlooks the context in which ‘data practices’ are conducted. A right to privacy in an AI age is inextricably connected to data practices where data gets ‘cooked’. Thus, data protection legislation operating without understanding and regulating such data practices will remain ineffective in safeguarding privacy. The thesis undergoes interdisciplinary research that enables a better understanding of the interplay of data practices of AI technologies with social practices of an Indian school, which the present Indian data protection legislation overlooks, endangering students’ privacy from designing and developing to deploying stages of an AI model. The thesis recommends the Indian legislature frame better legislation equipped for the AI/ML age and the Indian judiciary on evaluating the legality and reasonability of designing, developing, and deploying such technologies in schools.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Sins of omission: Critical informatics perspectives on privacy in e‐learning systems in higher education.\n \n \n \n \n\n\n \n Paris, B.; Reynolds, R.; and McGowan, C.\n\n\n \n\n\n\n Journal of the Association for Information Science and Technology, 73(5): 708–725. May 2022.\n \n\n\n\n
\n\n\n\n \n \n \"SinsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{paris_sins_2022,\n\ttitle = {Sins of omission: {Critical} informatics perspectives on privacy in e‐learning systems in higher education},\n\tvolume = {73},\n\tissn = {2330-1635, 2330-1643},\n\tshorttitle = {Sins of omission},\n\turl = {https://asistdl.onlinelibrary.wiley.com/doi/10.1002/asi.24575},\n\tdoi = {10.1002/asi.24575},\n\tabstract = {Abstract \n            The COVID‐19 pandemic emptied classrooms across the globe and pushed administrators, students, educators, and parents into an uneasy alliance with online learning systems already committing serious privacy and intellectual property violations, and actively promoted the precarity of educational labor. In this article, we use methods and theories derived from critical informatics to examine Rutgers University's deployment of seven online learning platforms commonly used in higher education to uncover five themes that result from the deployment of corporate learning platforms. We conclude by suggesting ways ahead to meaningfully address the structural power and vulnerabilities extended by higher education's use of these platforms.},\n\tlanguage = {en},\n\tnumber = {5},\n\turldate = {2024-01-22},\n\tjournal = {Journal of the Association for Information Science and Technology},\n\tauthor = {Paris, Britt and Reynolds, Rebecca and McGowan, Catherine},\n\tmonth = may,\n\tyear = {2022},\n\tpages = {708--725},\n}\n\n
\n
\n\n\n
\n Abstract The COVID‐19 pandemic emptied classrooms across the globe and pushed administrators, students, educators, and parents into an uneasy alliance with online learning systems already committing serious privacy and intellectual property violations, and actively promoted the precarity of educational labor. In this article, we use methods and theories derived from critical informatics to examine Rutgers University's deployment of seven online learning platforms commonly used in higher education to uncover five themes that result from the deployment of corporate learning platforms. We conclude by suggesting ways ahead to meaningfully address the structural power and vulnerabilities extended by higher education's use of these platforms.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);