var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2228317%2Fitems%3Fkey%3DMR9XyU2gEWmpdtfEoBUzqtJu%26format%3Dbibtex&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2228317%2Fitems%3Fkey%3DMR9XyU2gEWmpdtfEoBUzqtJu%26format%3Dbibtex&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2228317%2Fitems%3Fkey%3DMR9XyU2gEWmpdtfEoBUzqtJu%26format%3Dbibtex&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (9)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n GoldCoin: Grounding Large Language Models in Privacy Laws via Contextual Integrity Theory.\n \n \n \n \n\n\n \n Fan, W.; Li, H.; Deng, Z.; Wang, W.; and Song, Y.\n\n\n \n\n\n\n June 2024.\n arXiv:2406.11149 [cs]\n\n\n\n
\n\n\n\n \n \n \"GoldCoin:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@misc{fan_goldcoin_2024,\n\ttitle = {{GoldCoin}: {Grounding} {Large} {Language} {Models} in {Privacy} {Laws} via {Contextual} {Integrity} {Theory}},\n\tshorttitle = {{GoldCoin}},\n\turl = {http://arxiv.org/abs/2406.11149},\n\tabstract = {Privacy issues arise prominently during the inappropriate transmission of information between entities. Existing research primarily studies privacy by exploring various privacy attacks, defenses, and evaluations within narrowly predefined patterns, while neglecting that privacy is not an isolated, context-free concept limited to traditionally sensitive data (e.g., social security numbers), but intertwined with intricate social contexts that complicate the identification and analysis of potential privacy violations. The advent of Large Language Models (LLMs) offers unprecedented opportunities for incorporating the nuanced scenarios outlined in privacy laws to tackle these complex privacy issues. However, the scarcity of open-source relevant case studies restricts the efficiency of LLMs in aligning with specific legal statutes. To address this challenge, we introduce a novel framework, GoldCoin, designed to efficiently ground LLMs in privacy laws for judicial assessing privacy violations. Our framework leverages the theory of contextual integrity as a bridge, creating numerous synthetic scenarios grounded in relevant privacy statutes (e.g., HIPAA), to assist LLMs in comprehending the complex contexts for identifying privacy risks in the real world. Extensive experimental results demonstrate that GoldCoin markedly enhances LLMs' capabilities in recognizing privacy risks across real court cases, surpassing the baselines on different judicial tasks.},\n\turldate = {2024-06-27},\n\tpublisher = {arXiv},\n\tauthor = {Fan, Wei and Li, Haoran and Deng, Zheye and Wang, Weiqi and Song, Yangqiu},\n\tmonth = jun,\n\tyear = {2024},\n\tnote = {arXiv:2406.11149 [cs]},\n\tkeywords = {Computer Science - Computation and Language, Computer Science - Cryptography and Security},\n}\n\n
\n
\n\n\n
\n Privacy issues arise prominently during the inappropriate transmission of information between entities. Existing research primarily studies privacy by exploring various privacy attacks, defenses, and evaluations within narrowly predefined patterns, while neglecting that privacy is not an isolated, context-free concept limited to traditionally sensitive data (e.g., social security numbers), but intertwined with intricate social contexts that complicate the identification and analysis of potential privacy violations. The advent of Large Language Models (LLMs) offers unprecedented opportunities for incorporating the nuanced scenarios outlined in privacy laws to tackle these complex privacy issues. However, the scarcity of open-source relevant case studies restricts the efficiency of LLMs in aligning with specific legal statutes. To address this challenge, we introduce a novel framework, GoldCoin, designed to efficiently ground LLMs in privacy laws for judicial assessing privacy violations. Our framework leverages the theory of contextual integrity as a bridge, creating numerous synthetic scenarios grounded in relevant privacy statutes (e.g., HIPAA), to assist LLMs in comprehending the complex contexts for identifying privacy risks in the real world. Extensive experimental results demonstrate that GoldCoin markedly enhances LLMs' capabilities in recognizing privacy risks across real court cases, surpassing the baselines on different judicial tasks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Understanding Contextual Expectations for Sharing Wearables' Data: Insights from a Vignette Study.\n \n \n \n \n\n\n \n Bourgeus, A.; Vandercruysse, L.; and Verhulst, N.\n\n\n \n\n\n\n Computers in Human Behavior Reports,100443. June 2024.\n \n\n\n\n
\n\n\n\n \n \n \"UnderstandingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{bourgeus_understanding_2024,\n\ttitle = {Understanding {Contextual} {Expectations} for {Sharing} {Wearables}' {Data}: {Insights} from a {Vignette} {Study}},\n\tissn = {2451-9588},\n\tshorttitle = {Understanding {Contextual} {Expectations} for {Sharing} {Wearables}' {Data}},\n\turl = {https://www.sciencedirect.com/science/article/pii/S2451958824000769},\n\tdoi = {10.1016/j.chbr.2024.100443},\n\tabstract = {People are increasingly open to sharing personal data collected by wearables, while concerns have emerged on how companies, governments and organisations process this data. This paper applies Nissenbaum’s theory of contextual integrity to explore the perceived appropriateness of information flows linked to wearables. A vignette study was conducted (N= 500) to examine the influence of the type of data shared, its purpose, and the sender, on the appropriateness of different wearables’ information flow scenarios. Results revealed a significant impact of information type, sharing purpose, and sender on the perceived appropriateness of data sharing. Notably, data collected for research purposes or to develop new functionalities was deemed most appropriate, while data used for advertising was viewed unfavourably. Further, the user-controlled sharing received higher appropriateness ratings. This research underscores the need for meaningful consent in data sharing and suggests that manufacturers of wearable devices should utilise user agency to supplement information flow automation based on societal and contextual privacy norms.},\n\turldate = {2024-06-27},\n\tjournal = {Computers in Human Behavior Reports},\n\tauthor = {Bourgeus, August and Vandercruysse, Laurens and Verhulst, Nanouk},\n\tmonth = jun,\n\tyear = {2024},\n\tkeywords = {Privacy, agency, contextual integrity, wearables},\n\tpages = {100443},\n}\n\n
\n
\n\n\n
\n People are increasingly open to sharing personal data collected by wearables, while concerns have emerged on how companies, governments and organisations process this data. This paper applies Nissenbaum’s theory of contextual integrity to explore the perceived appropriateness of information flows linked to wearables. A vignette study was conducted (N= 500) to examine the influence of the type of data shared, its purpose, and the sender, on the appropriateness of different wearables’ information flow scenarios. Results revealed a significant impact of information type, sharing purpose, and sender on the perceived appropriateness of data sharing. Notably, data collected for research purposes or to develop new functionalities was deemed most appropriate, while data used for advertising was viewed unfavourably. Further, the user-controlled sharing received higher appropriateness ratings. This research underscores the need for meaningful consent in data sharing and suggests that manufacturers of wearable devices should utilise user agency to supplement information flow automation based on societal and contextual privacy norms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Air Gap: Protecting Privacy-Conscious Conversational Agents.\n \n \n \n \n\n\n \n Bagdasaryan, E.; Yi, R.; Ghalebikesabi, S.; Kairouz, P.; Gruteser, M.; Oh, S.; Balle, B.; and Ramage, D.\n\n\n \n\n\n\n May 2024.\n arXiv:2405.05175 [cs]\n\n\n\n
\n\n\n\n \n \n \"AirPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@misc{bagdasaryan_air_2024,\n\ttitle = {Air {Gap}: {Protecting} {Privacy}-{Conscious} {Conversational} {Agents}},\n\tshorttitle = {Air {Gap}},\n\turl = {http://arxiv.org/abs/2405.05175},\n\tabstract = {The growing use of large language model (LLM)-based conversational agents to manage sensitive user data raises significant privacy concerns. While these agents excel at understanding and acting on context, this capability can be exploited by malicious actors. We introduce a novel threat model where adversarial third-party apps manipulate the context of interaction to trick LLM-based agents into revealing private information not relevant to the task at hand. Grounded in the framework of contextual integrity, we introduce AirGapAgent, a privacy-conscious agent designed to prevent unintended data leakage by restricting the agent's access to only the data necessary for a specific task. Extensive experiments using Gemini, GPT, and Mistral models as agents validate our approach's effectiveness in mitigating this form of context hijacking while maintaining core agent functionality. For example, we show that a single-query context hijacking attack on a Gemini Ultra agent reduces its ability to protect user data from 94\\% to 45\\%, while an AirGapAgent achieves 97\\% protection, rendering the same attack ineffective.},\n\turldate = {2024-05-12},\n\tpublisher = {arXiv},\n\tauthor = {Bagdasaryan, Eugene and Yi, Ren and Ghalebikesabi, Sahra and Kairouz, Peter and Gruteser, Marco and Oh, Sewoong and Balle, Borja and Ramage, Daniel},\n\tmonth = may,\n\tyear = {2024},\n\tnote = {arXiv:2405.05175 [cs]},\n\tkeywords = {Computer Science - Computation and Language, Computer Science - Cryptography and Security, Computer Science - Machine Learning},\n}\n\n
\n
\n\n\n
\n The growing use of large language model (LLM)-based conversational agents to manage sensitive user data raises significant privacy concerns. While these agents excel at understanding and acting on context, this capability can be exploited by malicious actors. We introduce a novel threat model where adversarial third-party apps manipulate the context of interaction to trick LLM-based agents into revealing private information not relevant to the task at hand. Grounded in the framework of contextual integrity, we introduce AirGapAgent, a privacy-conscious agent designed to prevent unintended data leakage by restricting the agent's access to only the data necessary for a specific task. Extensive experiments using Gemini, GPT, and Mistral models as agents validate our approach's effectiveness in mitigating this form of context hijacking while maintaining core agent functionality. For example, we show that a single-query context hijacking attack on a Gemini Ultra agent reduces its ability to protect user data from 94% to 45%, while an AirGapAgent achieves 97% protection, rendering the same attack ineffective.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Roadmap for Applying the Contextual Integrity Framework in Qualitative Privacy Research.\n \n \n \n \n\n\n \n Kumar, P. C.; Zimmer, M.; and Vitak, J.\n\n\n \n\n\n\n Proceedings of the ACM on Human-Computer Interaction, 8(CSCW1): 219:1–219:29. April 2024.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{kumar_roadmap_2024,\n\ttitle = {A {Roadmap} for {Applying} the {Contextual} {Integrity} {Framework} in {Qualitative} {Privacy} {Research}},\n\tvolume = {8},\n\turl = {https://dl.acm.org/doi/10.1145/3653710},\n\tdoi = {10.1145/3653710},\n\tabstract = {Privacy is an important topic in HCI and social computing research, and the theory of contextual integrity (CI) is increasingly used to understand how sociotechnical systems-and the new kinds of information flows they introduce-can violate privacy. In empirical research, CI can serve as a conceptual framework for explaining the contextual nature of privacy as well as an analytical framework for evaluating privacy attitudes and behaviors. Analytical applications of CI in HCI primarily employ quantitative methods to identify appropriate information flows but rarely engage with the full CI framework to evaluate such flows. In this paper, we present a roadmap to guide HCI and social computing researchers on how to apply the full CI framework to qualitative projects. To help researchers envision what such an analysis can look like, each step includes an example analysis using interview data from projects on privacy and fitness tracking. We conclude by discussing how harnessing the full CI framework can address critiques of CI and identify opportunities for further theory development.},\n\tnumber = {CSCW1},\n\turldate = {2024-04-29},\n\tjournal = {Proceedings of the ACM on Human-Computer Interaction},\n\tauthor = {Kumar, Priya C. and Zimmer, Michael and Vitak, Jessica},\n\tmonth = apr,\n\tyear = {2024},\n\tkeywords = {contextual integrity, information flows, methodology, privacy, qualitative data analysis, theory},\n\tpages = {219:1--219:29},\n}\n\n
\n
\n\n\n
\n Privacy is an important topic in HCI and social computing research, and the theory of contextual integrity (CI) is increasingly used to understand how sociotechnical systems-and the new kinds of information flows they introduce-can violate privacy. In empirical research, CI can serve as a conceptual framework for explaining the contextual nature of privacy as well as an analytical framework for evaluating privacy attitudes and behaviors. Analytical applications of CI in HCI primarily employ quantitative methods to identify appropriate information flows but rarely engage with the full CI framework to evaluate such flows. In this paper, we present a roadmap to guide HCI and social computing researchers on how to apply the full CI framework to qualitative projects. To help researchers envision what such an analysis can look like, each step includes an example analysis using interview data from projects on privacy and fitness tracking. We conclude by discussing how harnessing the full CI framework can address critiques of CI and identify opportunities for further theory development.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ‘There are some things that I would never ask Alexa’ – privacy work, contextual integrity, and smart speaker assistants.\n \n \n \n \n\n\n \n Brause, S. R.; and Blank, G.\n\n\n \n\n\n\n Information, Communication & Society, 27(1): 182–197. January 2024.\n \n\n\n\n
\n\n\n\n \n \n \"‘TherePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{brause_there_2024,\n\ttitle = {‘{There} are some things that {I} would never ask {Alexa}’ – privacy work, contextual integrity, and smart speaker assistants},\n\tvolume = {27},\n\tissn = {1369-118X, 1468-4462},\n\turl = {https://www.tandfonline.com/doi/full/10.1080/1369118X.2023.2193241},\n\tdoi = {10.1080/1369118X.2023.2193241},\n\tlanguage = {en},\n\tnumber = {1},\n\turldate = {2024-04-25},\n\tjournal = {Information, Communication \\& Society},\n\tauthor = {Brause, Saba Rebecca and Blank, Grant},\n\tmonth = jan,\n\tyear = {2024},\n\tpages = {182--197},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Beware: Processing of Personal Data—Informed Consent Through Risk Communication.\n \n \n \n \n\n\n \n Seiling, L.; Gsenger, R.; Mulugeta, F.; Henningsen, M.; Mischau, L.; and Schirmbeck, M.\n\n\n \n\n\n\n IEEE Transactions on Professional Communication, 67(1): 4–25. March 2024.\n \n\n\n\n
\n\n\n\n \n \n \"Beware:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{seiling_beware_2024,\n\ttitle = {Beware: {Processing} of {Personal} {Data}—{Informed} {Consent} {Through} {Risk} {Communication}},\n\tvolume = {67},\n\tissn = {1558-1500},\n\tshorttitle = {Beware},\n\turl = {https://ieeexplore.ieee.org/abstract/document/10472565},\n\tdoi = {10.1109/TPC.2024.3361328},\n\tabstract = {Background: The General Data Protection Regulation (GDPR) has been applicable since May 2018 and aims to further harmonize data protection law in the European Union. Processing personal data based on individuals’ consent is lawful under the GDPR only if such consent meets certain requirements and is “informed,” in particular. However, complex privacy notice design and individual cognitive limitations challenge data subjects’ ability to make elaborate consent decisions. Risk-based communication may address these issues. Literature review: Most research focuses on isolated aspects of risk in processing personal data, such as the actors involved, specific events leading to risk formation, or distinctive (context-dependent) consequences. We propose a model combining these approaches as the basis for context-independent risk communication. Research questions: 1. What are relevant information categories for risk communication in the processing of personal data online? 2. Which potentially adverse consequences can arise from specific events in the processing of personal data online? 3. How can consequences in the processing of personal data be avoided or mitigated? Research methodology: The GDPR was examined through a systematic qualitative content analysis. The results inform the analysis of 32 interviews with privacy, data protection, and information security experts from academia, Non-Governmental Organizations, the public, and the private sector. Results: Risk-relevant information categories, specific consequences, and relations between them are identified, along with strategies for risk mitigation. The study concludes with a specified framework for perceived risk in processing personal data. Conclusion: The results provide controllers, regulatory bodies, data subjects, and experts in the field of professional communication with information on risk formation in personal data processing. Based on our analysis, we propose information categories for risk communication, which expand the current regulatory information requirements.},\n\tnumber = {1},\n\turldate = {2024-03-18},\n\tjournal = {IEEE Transactions on Professional Communication},\n\tauthor = {Seiling, Lukas and Gsenger, Rita and Mulugeta, Filmona and Henningsen, Marte and Mischau, Lena and Schirmbeck, Marie},\n\tmonth = mar,\n\tyear = {2024},\n\tkeywords = {Data processing, Data protection, Europe, General Data Protection Regulation, Information security, Interviews, Regulation, Systematics, general data protection regulation (GDPR), informed consent, privacy notice, risk communication, risk model},\n\tpages = {4--25},\n}\n\n
\n
\n\n\n
\n Background: The General Data Protection Regulation (GDPR) has been applicable since May 2018 and aims to further harmonize data protection law in the European Union. Processing personal data based on individuals’ consent is lawful under the GDPR only if such consent meets certain requirements and is “informed,” in particular. However, complex privacy notice design and individual cognitive limitations challenge data subjects’ ability to make elaborate consent decisions. Risk-based communication may address these issues. Literature review: Most research focuses on isolated aspects of risk in processing personal data, such as the actors involved, specific events leading to risk formation, or distinctive (context-dependent) consequences. We propose a model combining these approaches as the basis for context-independent risk communication. Research questions: 1. What are relevant information categories for risk communication in the processing of personal data online? 2. Which potentially adverse consequences can arise from specific events in the processing of personal data online? 3. How can consequences in the processing of personal data be avoided or mitigated? Research methodology: The GDPR was examined through a systematic qualitative content analysis. The results inform the analysis of 32 interviews with privacy, data protection, and information security experts from academia, Non-Governmental Organizations, the public, and the private sector. Results: Risk-relevant information categories, specific consequences, and relations between them are identified, along with strategies for risk mitigation. The study concludes with a specified framework for perceived risk in processing personal data. Conclusion: The results provide controllers, regulatory bodies, data subjects, and experts in the field of professional communication with information on risk formation in personal data processing. Based on our analysis, we propose information categories for risk communication, which expand the current regulatory information requirements.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Integrating Differential Privacy and Contextual Integrity.\n \n \n \n \n\n\n \n Benthall, S.; and Cummings, R.\n\n\n \n\n\n\n In Proceedings of the Symposium on Computer Science and Law, of CSLAW '24, pages 9–15, New York, NY, USA, March 2024. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"IntegratingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{benthall_integrating_2024,\n\taddress = {New York, NY, USA},\n\tseries = {{CSLAW} '24},\n\ttitle = {Integrating {Differential} {Privacy} and {Contextual} {Integrity}},\n\tisbn = {9798400703331},\n\turl = {https://dl.acm.org/doi/10.1145/3614407.3643702},\n\tdoi = {10.1145/3614407.3643702},\n\turldate = {2024-03-15},\n\tbooktitle = {Proceedings of the {Symposium} on {Computer} {Science} and {Law}},\n\tpublisher = {Association for Computing Machinery},\n\tauthor = {Benthall, Sebastian and Cummings, Rachel},\n\tmonth = mar,\n\tyear = {2024},\n\tpages = {9--15},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Remember the Human: A Systematic Review of Ethical Considerations in Reddit Research.\n \n \n \n \n\n\n \n Fiesler, C.; Zimmer, M.; Proferes, N.; Gilbert, S.; and Jones, N.\n\n\n \n\n\n\n Proceedings of the ACM on Human-Computer Interaction, 8(GROUP): 1–33. February 2024.\n \n\n\n\n
\n\n\n\n \n \n \"RememberPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{fiesler_remember_2024,\n\ttitle = {Remember the {Human}: {A} {Systematic} {Review} of {Ethical} {Considerations} in {Reddit} {Research}},\n\tvolume = {8},\n\tissn = {2573-0142},\n\tshorttitle = {Remember the {Human}},\n\turl = {https://dl.acm.org/doi/10.1145/3633070},\n\tdoi = {10.1145/3633070},\n\tabstract = {Reddit is one of the world's most prominent social media platforms, and also a valuable source of data for internet researchers. However, working with this kind of data also presents novel ethical complications for researchers, including issues around privacy, vulnerable populations, and unintended consequences. This paper describes an analysis of 134 papers that rely on Reddit data while also including some discussion of ethical implications and/or considerations by the researchers. Our analysis of these papers reveals common ethical issues and ethically motivated methodological decisions, as described by the researchers themselves, while also exposing some gaps for further ethical contemplation for researchers relying on Reddit data. Based on these findings, we close with a set of recommendations for ethically-informed methods and reflection for researchers working with social data.},\n\tlanguage = {en},\n\tnumber = {GROUP},\n\turldate = {2024-02-24},\n\tjournal = {Proceedings of the ACM on Human-Computer Interaction},\n\tauthor = {Fiesler, Casey and Zimmer, Michael and Proferes, Nicholas and Gilbert, Sarah and Jones, Naiyan},\n\tmonth = feb,\n\tyear = {2024},\n\tpages = {1--33},\n}\n\n
\n
\n\n\n
\n Reddit is one of the world's most prominent social media platforms, and also a valuable source of data for internet researchers. However, working with this kind of data also presents novel ethical complications for researchers, including issues around privacy, vulnerable populations, and unintended consequences. This paper describes an analysis of 134 papers that rely on Reddit data while also including some discussion of ethical implications and/or considerations by the researchers. Our analysis of these papers reveals common ethical issues and ethically motivated methodological decisions, as described by the researchers themselves, while also exposing some gaps for further ethical contemplation for researchers relying on Reddit data. Based on these findings, we close with a set of recommendations for ethically-informed methods and reflection for researchers working with social data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Protecting Privacy in Indian Schools: Regulating AI-based Technologies' Design, Development and Deployment.\n \n \n \n \n\n\n \n BAJPAI, H.\n\n\n \n\n\n\n Ph.D. Thesis, Durham University, 2024.\n \n\n\n\n
\n\n\n\n \n \n \"ProtectingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{bajpai_protecting_2024,\n\ttype = {Doctoral},\n\ttitle = {Protecting {Privacy} in {Indian} {Schools}: {Regulating} {AI}-based {Technologies}' {Design}, {Development} and {Deployment}},\n\tshorttitle = {Protecting {Privacy} in {Indian} {Schools}},\n\turl = {http://etheses.dur.ac.uk/15340/},\n\tabstract = {Education is one of the priority areas for the Indian government, where Artificial Intelligence (AI) technologies are touted to bring digital transformation. Several Indian states have also started deploying facial recognition-enabled CCTV cameras, emotion recognition technologies, fingerprint scanners, and Radio frequency identification tags in their schools to provide personalised recommendations, ensure student security, and predict the drop-out rate of students but also provide 360-degree information of a student. Further, Integrating Aadhaar (digital identity card that works on biometric data) across AI technologies and learning and management systems (LMS) renders schools a ‘panopticon’.\n\nCertain technologies or systems like Aadhaar, CCTV cameras, GPS Systems, RFID tags, and learning management systems are used primarily for continuous data collection, storage, and retention purposes. Though they cannot be termed AI technologies per se, they are fundamental for designing and developing AI systems like facial, fingerprint, and emotion recognition technologies. The large amount of student data collected speedily through the former technologies is used to create an algorithm for the latter-stated AI systems. Once algorithms are processed using machine learning (ML) techniques, they learn correlations between multiple datasets predicting each student’s identity, decisions, grades, learning growth, tendency to drop out, and other behavioural characteristics. Such autonomous and repetitive collection, processing, storage, and retention of student data without effective data protection legislation endangers student privacy.\n\nThe algorithmic predictions by AI technologies are an avatar of the data fed into the system. An AI technology is as good as the person collecting the data, processing it for a relevant and valuable output, and regularly evaluating the inputs going inside an AI model. An AI model can produce inaccurate predictions if the person overlooks any relevant data. However, the state, school administrations and parents’ belief in AI technologies as a panacea to student security and educational development overlooks the context in which ‘data practices’ are conducted. A right to privacy in an AI age is inextricably connected to data practices where data gets ‘cooked’. Thus, data protection legislation operating without understanding and regulating such data practices will remain ineffective in safeguarding privacy.\n\nThe thesis undergoes interdisciplinary research that enables a better understanding of the interplay of data practices of AI technologies with social practices of an Indian school, which the present Indian data protection legislation overlooks, endangering students’ privacy from designing and developing to deploying stages of an AI model. The thesis recommends the Indian legislature frame better legislation equipped for the AI/ML age and the Indian judiciary on evaluating the legality and reasonability of designing, developing, and deploying such technologies in schools.},\n\turldate = {2024-02-04},\n\tschool = {Durham University},\n\tauthor = {BAJPAI, HARSH},\n\tyear = {2024},\n}\n\n
\n
\n\n\n
\n Education is one of the priority areas for the Indian government, where Artificial Intelligence (AI) technologies are touted to bring digital transformation. Several Indian states have also started deploying facial recognition-enabled CCTV cameras, emotion recognition technologies, fingerprint scanners, and Radio frequency identification tags in their schools to provide personalised recommendations, ensure student security, and predict the drop-out rate of students but also provide 360-degree information of a student. Further, Integrating Aadhaar (digital identity card that works on biometric data) across AI technologies and learning and management systems (LMS) renders schools a ‘panopticon’. Certain technologies or systems like Aadhaar, CCTV cameras, GPS Systems, RFID tags, and learning management systems are used primarily for continuous data collection, storage, and retention purposes. Though they cannot be termed AI technologies per se, they are fundamental for designing and developing AI systems like facial, fingerprint, and emotion recognition technologies. The large amount of student data collected speedily through the former technologies is used to create an algorithm for the latter-stated AI systems. Once algorithms are processed using machine learning (ML) techniques, they learn correlations between multiple datasets predicting each student’s identity, decisions, grades, learning growth, tendency to drop out, and other behavioural characteristics. Such autonomous and repetitive collection, processing, storage, and retention of student data without effective data protection legislation endangers student privacy. The algorithmic predictions by AI technologies are an avatar of the data fed into the system. An AI technology is as good as the person collecting the data, processing it for a relevant and valuable output, and regularly evaluating the inputs going inside an AI model. An AI model can produce inaccurate predictions if the person overlooks any relevant data. However, the state, school administrations and parents’ belief in AI technologies as a panacea to student security and educational development overlooks the context in which ‘data practices’ are conducted. A right to privacy in an AI age is inextricably connected to data practices where data gets ‘cooked’. Thus, data protection legislation operating without understanding and regulating such data practices will remain ineffective in safeguarding privacy. The thesis undergoes interdisciplinary research that enables a better understanding of the interplay of data practices of AI technologies with social practices of an Indian school, which the present Indian data protection legislation overlooks, endangering students’ privacy from designing and developing to deploying stages of an AI model. The thesis recommends the Indian legislature frame better legislation equipped for the AI/ML age and the Indian judiciary on evaluating the legality and reasonability of designing, developing, and deploying such technologies in schools.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (13)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Rethinking Artificial Intelligence: Algorithmic Bias and Ethical Issues\\textbar Rage Against the Artificial Intelligence? Understanding Contextuality of Algorithm Aversion and Appreciation.\n \n \n \n \n\n\n \n Oomen, T.; Gonçalves, J.; and Mols, A.\n\n\n \n\n\n\n International Journal of Communication, 18(0): 25. December 2023.\n \n\n\n\n
\n\n\n\n \n \n \"RethinkingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{oomen_rethinking_2023,\n\ttitle = {Rethinking {Artificial} {Intelligence}: {Algorithmic} {Bias} and {Ethical} {Issues}{\\textbar} {Rage} {Against} the {Artificial} {Intelligence}? {Understanding} {Contextuality} of {Algorithm} {Aversion} and {Appreciation}},\n\tvolume = {18},\n\tcopyright = {The  International Journal of Communication  is an academic journal. As such, it is dedicated to the open exchange of information. For this reason, IJoC is freely available to individuals and institutions. Copies of this journal or articles in this journal may be distributed for research or educational purposes free of charge and without permission. However, commercial use of the IJoC website or the articles contained herein is expressly prohibited without the written consent of the editor. Authors who publish in The  International Journal of Communication  will release their articles under the   Creative Commons Attribution Non-Commercial No Derivatives (by-nc-nd) license  . This license allows anyone to copy and distribute the article for non-commercial purposes provided that appropriate attribution is given. For details of the rights authors grants users of their work, see the  "human-readable summary" of the license , with a link to the full license. (Note that "you" refers to a user, not an author, in the summary.) This journal utilizes the  LOCKSS system to create a distributed archiving system among participating libraries and permits those libraries to create permanent archives of the journal for purposes of preservation and restoration. The publisher perpetually authorizes participants in the LOCKSS system to archive and restore our publication through the LOCKSS System for the benefit of all LOCKSS System participants. Specifically participating libraries may:  Collect and preserve currently accessible materials;  Use material consistent with original license terms;  Provide copies to other LOCKSS appliances for purposes of audit and repair.        Fair Use The U.S. Copyright Act of 1976 specifies, in Section 107, the terms of the Fair Use exception: Notwithstanding the provisions of sections 106 and 106A, the fair use of a copyrighted work, including such use by reproduction in copies or phonorecords or by any other means specified by that section, for purposes such as criticism, comment, news reporting, teaching (including multiple copies for classroom use), scholarship, or research, is not an infringement of copyright. In determining whether the use made of a work in any particular case is a fair use the factors to be considered shall include:  the purpose and character of the use, including whether such use is of a commercial nature or is for nonprofit educational purposes;  the nature of the copyrighted work;  the amount and substantiality of the portion used in relation to the copyrighted work as a whole; \\&amp;  the effect of the use upon the potential market for or value of the copyrighted work.   The fact that a work is unpublished shall not itself bar a finding of fair use if such finding is made upon consideration of all the above factors. In accord with these provisions, the  International Journal of Communication  believes in the vigorous assertion and defense of Fair Use by scholars engaged in academic research, teaching and non-commercial publishing. Thus, we view the inclusion of “quotations” from existing print, visual, audio and audio-visual texts to be appropriate examples of Fair Use, as are reproductions of visual images for the purpose of scholarly analysis. We encourage authors to obtain appropriate permissions to use materials originally produced by others, but do not require such permissions as long as the usage of such materials falls within the boundaries of Fair Use.  The  International Journal of Communication  encourages authors to employ fair use in their scholarly publishing wherever appropriate. Fair use is the right to use unlicensed copyrighted material (whether it is text, images, audio-visual, or other) in your own work, in some circumstances. We consult the  Code of Best Practices in Fair Use for Scholarly Research in Communication , created by the International Communication Association and endorsed by the National Communication Association, and you should too. If you have any questions about whether fair use applies to your uses of copyrighted material (whether it is text, images, audio-visual, or other) in your scholarship, simply include your rationale, grounded in the Best Practices, as a supplementary document with your submission.},\n\tissn = {1932-8036},\n\tshorttitle = {Rethinking {Artificial} {Intelligence}},\n\turl = {https://ijoc.org/index.php/ijoc/article/view/20809},\n\tabstract = {People tend to be hesitant toward algorithmic tools, and this aversion potentially affects how innovations in artificial intelligence (AI) are effectively implemented. Explanatory mechanisms for aversion are based on individual or structural issues but often lack reflection on real-world contexts. Our study addresses this gap through a mixed-method approach, analyzing seven cases of AI deployment and their public reception on social media and in news articles. Using the Contextual Integrity framework, we argue that most often it is not the AI technology that is perceived as problematic, but that processes related to transparency, consent, and lack of influence by individuals raise aversion. Future research into aversion should acknowledge that technologies cannot be extricated from their contexts if they aim to understand public perceptions of AI innovation.},\n\tlanguage = {en},\n\tnumber = {0},\n\turldate = {2024-01-04},\n\tjournal = {International Journal of Communication},\n\tauthor = {Oomen, Tessa and Gonçalves, João and Mols, Anouk},\n\tmonth = dec,\n\tyear = {2023},\n\tkeywords = {Contextual Integrity, algorithm appreciation, algorithm aversion, artificial intelligence, mixed methods, public perceptions},\n\tpages = {25},\n}\n\n
\n
\n\n\n
\n People tend to be hesitant toward algorithmic tools, and this aversion potentially affects how innovations in artificial intelligence (AI) are effectively implemented. Explanatory mechanisms for aversion are based on individual or structural issues but often lack reflection on real-world contexts. Our study addresses this gap through a mixed-method approach, analyzing seven cases of AI deployment and their public reception on social media and in news articles. Using the Contextual Integrity framework, we argue that most often it is not the AI technology that is perceived as problematic, but that processes related to transparency, consent, and lack of influence by individuals raise aversion. Future research into aversion should acknowledge that technologies cannot be extricated from their contexts if they aim to understand public perceptions of AI innovation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Beyond The Text: Analysis of Privacy Statements through Syntactic and Semantic Role Labeling.\n \n \n \n \n\n\n \n Shvartzshanider, Y.; Balashankar, A.; Wies, T.; and Subramanian, L.\n\n\n \n\n\n\n In Preoțiuc-Pietro, D.; Goanta, C.; Chalkidis, I.; Barrett, L.; Spanakis, G. (.; and Aletras, N., editor(s), Proceedings of the Natural Legal Language Processing Workshop 2023, pages 85–98, Singapore, December 2023. Association for Computational Linguistics\n \n\n\n\n
\n\n\n\n \n \n \"BeyondPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{shvartzshanider_beyond_2023,\n\taddress = {Singapore},\n\ttitle = {Beyond {The} {Text}: {Analysis} of {Privacy} {Statements} through {Syntactic} and {Semantic} {Role} {Labeling}},\n\tshorttitle = {Beyond {The} {Text}},\n\turl = {https://aclanthology.org/2023.nllp-1.10},\n\tabstract = {This paper formulates a new task of extracting privacy parameters from a privacy policy, through the lens of Contextual Integrity (CI), an established social theory framework for reasoning about privacy norms. Through extensive experiments, we further show that incorporating CI-based domain-specific knowledge into a BERT-based SRL model results in the highest precision and recall, achieving an F1 score of 84\\%. With our work, we would like to motivate new research in building NLP applications for the privacy domain.},\n\turldate = {2023-12-09},\n\tbooktitle = {Proceedings of the {Natural} {Legal} {Language} {Processing} {Workshop} 2023},\n\tpublisher = {Association for Computational Linguistics},\n\tauthor = {Shvartzshanider, Yan and Balashankar, Ananth and Wies, Thomas and Subramanian, Lakshminarayanan},\n\teditor = {Preoțiuc-Pietro, Daniel and Goanta, Catalina and Chalkidis, Ilias and Barrett, Leslie and Spanakis, Gerasimos (Jerry) and Aletras, Nikolaos},\n\tmonth = dec,\n\tyear = {2023},\n\tpages = {85--98},\n}\n\n
\n
\n\n\n
\n This paper formulates a new task of extracting privacy parameters from a privacy policy, through the lens of Contextual Integrity (CI), an established social theory framework for reasoning about privacy norms. Through extensive experiments, we further show that incorporating CI-based domain-specific knowledge into a BERT-based SRL model results in the highest precision and recall, achieving an F1 score of 84%. With our work, we would like to motivate new research in building NLP applications for the privacy domain.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Digital Pathology Scanners and Contextual Integrity.\n \n \n \n \n\n\n \n Sorell, T.; and Li, R. Z.\n\n\n \n\n\n\n Digital Society, 2(3): 56. December 2023.\n \n\n\n\n
\n\n\n\n \n \n \"DigitalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{sorell_digital_2023,\n\ttitle = {Digital {Pathology} {Scanners} and {Contextual} {Integrity}},\n\tvolume = {2},\n\tissn = {2731-4650, 2731-4669},\n\turl = {https://link.springer.com/10.1007/s44206-023-00085-9},\n\tdoi = {10.1007/s44206-023-00085-9},\n\tabstract = {Abstract \n            We aim to bring both digital pathology in general and computational pathology in particular within the scope of Helen Nissenbaum’s theory of appropriate information transfer as contextual integrity. In Section 1, the main lines of the theory of contextual integrity are introduced, and reasons are given why it is not properly speaking a theory of privacy, but rather a theory of morally permissible information transfer in general. Then the theory is applied to uses of digitised pathology images for (a) patient-by-patient analysis (Section 2); and (b) computational pathology (Sections 3 and 4). Although big data exercises involving personal data are sometimes seen by Nissenbaum and colleagues as particular threats to existing data-sharing norms and other social norms, we claim that patient-by-patient digital pathology is riskier, at least in forms it has taken during the pandemic. At the end, we consider some risks in computational pathology that are due to the interaction between health institutions, particularly in the public sector, and commercial algorithm developers.},\n\tlanguage = {en},\n\tnumber = {3},\n\turldate = {2023-12-16},\n\tjournal = {Digital Society},\n\tauthor = {Sorell, Tom and Li, Ricky Z.},\n\tmonth = dec,\n\tyear = {2023},\n\tpages = {56},\n}\n\n
\n
\n\n\n
\n Abstract We aim to bring both digital pathology in general and computational pathology in particular within the scope of Helen Nissenbaum’s theory of appropriate information transfer as contextual integrity. In Section 1, the main lines of the theory of contextual integrity are introduced, and reasons are given why it is not properly speaking a theory of privacy, but rather a theory of morally permissible information transfer in general. Then the theory is applied to uses of digitised pathology images for (a) patient-by-patient analysis (Section 2); and (b) computational pathology (Sections 3 and 4). Although big data exercises involving personal data are sometimes seen by Nissenbaum and colleagues as particular threats to existing data-sharing norms and other social norms, we claim that patient-by-patient digital pathology is riskier, at least in forms it has taken during the pandemic. At the end, we consider some risks in computational pathology that are due to the interaction between health institutions, particularly in the public sector, and commercial algorithm developers.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Understanding Privacy in Virtual Reality Classrooms: A Contextual Integrity Perspective.\n \n \n \n \n\n\n \n Brehm, K.; and Shvartzhnaider, Y.\n\n\n \n\n\n\n IEEE Security & Privacy,2–11. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"UnderstandingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 26 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{brehm_understanding_2023,\n\ttitle = {Understanding {Privacy} in {Virtual} {Reality} {Classrooms}: {A} {Contextual} {Integrity} {Perspective}},\n\tissn = {1540-7993, 1558-4046},\n\tshorttitle = {Understanding {Privacy} in {Virtual} {Reality} {Classrooms}},\n\turl = {https://ieeexplore.ieee.org/document/10352441/},\n\tdoi = {10.1109/MSEC.2023.3336802},\n\turldate = {2023-12-12},\n\tjournal = {IEEE Security \\& Privacy},\n\tauthor = {Brehm, Karoline and Shvartzhnaider, Yan},\n\tyear = {2023},\n\tpages = {2--11},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n When PETs misbehave: A Contextual Integrity analysis.\n \n \n \n \n\n\n \n Balsa, E.; and Shvartzshnaider, Y.\n\n\n \n\n\n\n . December 2023.\n \n\n\n\n
\n\n\n\n \n \n \"WhenPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{balsa_when_2023,\n\ttitle = {When {PETs} misbehave: {A} {Contextual} {Integrity} analysis},\n\tcopyright = {arXiv.org perpetual, non-exclusive license},\n\tshorttitle = {When {PETs} misbehave},\n\turl = {https://arxiv.org/abs/2312.02509},\n\tdoi = {10.48550/ARXIV.2312.02509},\n\tabstract = {Privacy enhancing technologies, or PETs, have been hailed as a promising means to protect privacy without compromising on the functionality of digital services. At the same time, and partly because they may encode a narrow conceptualization of privacy as confidentiality that is popular among policymakers, engineers and the public, PETs risk being co-opted to promote privacy-invasive practices. In this paper, we resort to the theory of Contextual Integrity to explain how privacy technologies may be misused to erode privacy. To illustrate, we consider three PETs and scenarios: anonymous credentials for age verification, client-side scanning for illegal content detection, and homomorphic encryption for machine learning model training. Using the theory of Contextual Integrity, we reason about the notion of privacy that these PETs encode, and show that CI enables us to identify and reason about the limitations of PETs and their misuse, and which may ultimately lead to privacy violations.},\n\turldate = {2023-12-09},\n\tauthor = {Balsa, Ero and Shvartzshnaider, Yan},\n\tmonth = dec,\n\tyear = {2023},\n\tkeywords = {Computers and Society (cs.CY), Cryptography and Security (cs.CR), FOS: Computer and information sciences, Information Theory (cs.IT)},\n}\n\n
\n
\n\n\n
\n Privacy enhancing technologies, or PETs, have been hailed as a promising means to protect privacy without compromising on the functionality of digital services. At the same time, and partly because they may encode a narrow conceptualization of privacy as confidentiality that is popular among policymakers, engineers and the public, PETs risk being co-opted to promote privacy-invasive practices. In this paper, we resort to the theory of Contextual Integrity to explain how privacy technologies may be misused to erode privacy. To illustrate, we consider three PETs and scenarios: anonymous credentials for age verification, client-side scanning for illegal content detection, and homomorphic encryption for machine learning model training. Using the theory of Contextual Integrity, we reason about the notion of privacy that these PETs encode, and show that CI enables us to identify and reason about the limitations of PETs and their misuse, and which may ultimately lead to privacy violations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Private attributes: The meanings and mechanisms of “privacy-preserving” adtech.\n \n \n \n \n\n\n \n McGuigan, L.; Sivan-Sevilla, I.; Parham, P.; and Shvartzshnaider, Y.\n\n\n \n\n\n\n New Media & Society,14614448231213267. November 2023.\n \n\n\n\n
\n\n\n\n \n \n \"PrivatePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{mcguigan_private_2023,\n\ttitle = {Private attributes: {The} meanings and mechanisms of “privacy-preserving” adtech},\n\tissn = {1461-4448, 1461-7315},\n\tshorttitle = {Private attributes},\n\turl = {http://journals.sagepub.com/doi/10.1177/14614448231213267},\n\tdoi = {10.1177/14614448231213267},\n\tabstract = {This study analyzes the meanings and technical mechanisms of privacy that leading advertising technology (adtech) companies are deploying under the banner of “privacy-preserving” adtech. We analyze this discourse by examining documents wherein Meta, Google, and Apple each propose to provide advertising attribution services—which aim to measure and optimize advertising effectiveness—while “solving” some of the privacy problems associated with online ad attribution. We find that these solutions define privacy primarily as anonymity, as limiting access to individuals’ information, and as the prevention of third-party tracking. We critique these proposals by drawing on the theory of privacy as contextual integrity. Overall, we argue that these attribution solutions not only fail to achieve meaningful privacy but also leverage privacy rhetoric to advance commercial interests.},\n\tlanguage = {en},\n\turldate = {2023-12-02},\n\tjournal = {New Media \\& Society},\n\tauthor = {McGuigan, Lee and Sivan-Sevilla, Ido and Parham, Patrick and Shvartzshnaider, Yan},\n\tmonth = nov,\n\tyear = {2023},\n\tpages = {14614448231213267},\n}\n\n
\n
\n\n\n
\n This study analyzes the meanings and technical mechanisms of privacy that leading advertising technology (adtech) companies are deploying under the banner of “privacy-preserving” adtech. We analyze this discourse by examining documents wherein Meta, Google, and Apple each propose to provide advertising attribution services—which aim to measure and optimize advertising effectiveness—while “solving” some of the privacy problems associated with online ad attribution. We find that these solutions define privacy primarily as anonymity, as limiting access to individuals’ information, and as the prevention of third-party tracking. We critique these proposals by drawing on the theory of privacy as contextual integrity. Overall, we argue that these attribution solutions not only fail to achieve meaningful privacy but also leverage privacy rhetoric to advance commercial interests.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Privacy in the Age of Neurotechnology: Investigating Public Attitudes towards Brain Data Collection and Use.\n \n \n \n \n\n\n \n Kablo, E.; and Arias-Cabarcos, P.\n\n\n \n\n\n\n In Proceedings of the 2023 ACM SIGSAC Conference on Computer and Communications Security, of CCS '23, pages 225–238, New York, NY, USA, November 2023. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"PrivacyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{kablo_privacy_2023,\n\taddress = {New York, NY, USA},\n\tseries = {{CCS} '23},\n\ttitle = {Privacy in the {Age} of {Neurotechnology}: {Investigating} {Public} {Attitudes} towards {Brain} {Data} {Collection} and {Use}},\n\tisbn = {9798400700507},\n\tshorttitle = {Privacy in the {Age} of {Neurotechnology}},\n\turl = {https://dl.acm.org/doi/10.1145/3576915.3623164},\n\tdoi = {10.1145/3576915.3623164},\n\tabstract = {Brain Computer Interfaces (BCIs) are expanding beyond the medical realm into entertainment, wellness, and marketing. However, as consumer neurotechnology becomes more popular, privacy concerns arise due to the sensitive nature of brainwave data and its potential commodification. Attacks on privacy have been demonstrated and AI advancements in brain-to-speech and brain-to-image decoding pose a new unique set of risks. In this space, we contribute with the first user study (n=287) to understand people's neuroprivacy expectations and awareness of neurotechnology implications. Our analysis shows that, while users are interested in the technology, privacy is a critical issue for acceptability. The results underscore the importance of consent and the need for implementing effective transparency about neurodata sharing. Our insights provide a ground to analyse the gap in current privacy protection mechanisms, adding to the debate on how to design privacy-respecting neurotechnology.},\n\turldate = {2023-11-26},\n\tbooktitle = {Proceedings of the 2023 {ACM} {SIGSAC} {Conference} on {Computer} and {Communications} {Security}},\n\tpublisher = {Association for Computing Machinery},\n\tauthor = {Kablo, Emiram and Arias-Cabarcos, Patricia},\n\tmonth = nov,\n\tyear = {2023},\n\tkeywords = {brain data, contextual integrity, neuroprivacy, neurotechnology, user study},\n\tpages = {225--238},\n}\n\n
\n
\n\n\n
\n Brain Computer Interfaces (BCIs) are expanding beyond the medical realm into entertainment, wellness, and marketing. However, as consumer neurotechnology becomes more popular, privacy concerns arise due to the sensitive nature of brainwave data and its potential commodification. Attacks on privacy have been demonstrated and AI advancements in brain-to-speech and brain-to-image decoding pose a new unique set of risks. In this space, we contribute with the first user study (n=287) to understand people's neuroprivacy expectations and awareness of neurotechnology implications. Our analysis shows that, while users are interested in the technology, privacy is a critical issue for acceptability. The results underscore the importance of consent and the need for implementing effective transparency about neurodata sharing. Our insights provide a ground to analyse the gap in current privacy protection mechanisms, adding to the debate on how to design privacy-respecting neurotechnology.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automating Governing Knowledge Commons and Contextual Integrity (GKC-CI) Privacy Policy Annotations with Large Language Models.\n \n \n \n \n\n\n \n Chanenson, J.; Pickering, M.; and Apthorpe, N.\n\n\n \n\n\n\n November 2023.\n arXiv:2311.02192 [cs]\n\n\n\n
\n\n\n\n \n \n \"AutomatingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@misc{chanenson_automating_2023,\n\ttitle = {Automating {Governing} {Knowledge} {Commons} and {Contextual} {Integrity} ({GKC}-{CI}) {Privacy} {Policy} {Annotations} with {Large} {Language} {Models}},\n\turl = {http://arxiv.org/abs/2311.02192},\n\tabstract = {Identifying contextual integrity (CI) and governing knowledge commons (GKC) parameters in privacy policy texts can facilitate normative privacy analysis. However, GKC-CI annotation has heretofore required manual or crowdsourced effort. This paper demonstrates that high-accuracy GKC-CI parameter annotation of privacy policies can be performed automatically using large language models. We fine-tune 18 open-source and proprietary models on 21,588 GKC-CI annotations from 16 ground truth privacy policies. Our best-performing model (fine-tuned GPT-3.5 Turbo with prompt engineering) has an accuracy of 86\\%, exceeding the performance of prior crowdsourcing approaches despite the complexity of privacy policy texts and the nuance of the GKC-CI annotation task. We apply our best-performing model to privacy policies from 164 popular online services, demonstrating the effectiveness of scaling GKC-CI annotation for data exploration. We make all annotated policies as well as the training data and scripts needed to fine-tune our best-performing model publicly available for future research.},\n\turldate = {2023-11-07},\n\tpublisher = {arXiv},\n\tauthor = {Chanenson, Jake and Pickering, Madison and Apthorpe, Noah},\n\tmonth = nov,\n\tyear = {2023},\n\tnote = {arXiv:2311.02192 [cs]},\n\tkeywords = {Computer Science - Computation and Language, Computer Science - Computers and Society, Computer Science - Machine Learning},\n}\n\n
\n
\n\n\n
\n Identifying contextual integrity (CI) and governing knowledge commons (GKC) parameters in privacy policy texts can facilitate normative privacy analysis. However, GKC-CI annotation has heretofore required manual or crowdsourced effort. This paper demonstrates that high-accuracy GKC-CI parameter annotation of privacy policies can be performed automatically using large language models. We fine-tune 18 open-source and proprietary models on 21,588 GKC-CI annotations from 16 ground truth privacy policies. Our best-performing model (fine-tuned GPT-3.5 Turbo with prompt engineering) has an accuracy of 86%, exceeding the performance of prior crowdsourcing approaches despite the complexity of privacy policy texts and the nuance of the GKC-CI annotation task. We apply our best-performing model to privacy policies from 164 popular online services, demonstrating the effectiveness of scaling GKC-CI annotation for data exploration. We make all annotated policies as well as the training data and scripts needed to fine-tune our best-performing model publicly available for future research.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Balancing Caution and the Need for Change: The General Contextual Integrity Approach.\n \n \n \n \n\n\n \n O’Neill, E.\n\n\n \n\n\n\n Philosophy & Technology, 36(4): 68. October 2023.\n \n\n\n\n
\n\n\n\n \n \n \"BalancingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{oneill_balancing_2023,\n\ttitle = {Balancing {Caution} and the {Need} for {Change}: {The} {General} {Contextual} {Integrity} {Approach}},\n\tvolume = {36},\n\tissn = {2210-5441},\n\tshorttitle = {Balancing {Caution} and the {Need} for {Change}},\n\turl = {https://doi.org/10.1007/s13347-023-00671-2},\n\tdoi = {10.1007/s13347-023-00671-2},\n\tabstract = {In this reply to van de Poel’s (Philosophy \\& Technology, 35(3), 82, 2022) commentary on O’Neill (Philosophy \\& Technology, 35(79), 2022), I discuss two worries about the general contextual integrity approach to evaluating technological change. First, I address van de Poel’s concern that the general contextual integrity approach will not supply the right guidance in cases where morally problematic technological change poses no threat to contextual integrity. Second, I elaborate on how the approach supplies mechanisms for balancing caution with the need for change.},\n\tlanguage = {en},\n\tnumber = {4},\n\turldate = {2023-11-06},\n\tjournal = {Philosophy \\& Technology},\n\tauthor = {O’Neill, Elizabeth},\n\tmonth = oct,\n\tyear = {2023},\n\tkeywords = {Contextual integrity, Ethics of technology, Norm change, Socially disruptive technologies, Technological change, Value change},\n\tpages = {68},\n}\n\n
\n
\n\n\n
\n In this reply to van de Poel’s (Philosophy & Technology, 35(3), 82, 2022) commentary on O’Neill (Philosophy & Technology, 35(79), 2022), I discuss two worries about the general contextual integrity approach to evaluating technological change. First, I address van de Poel’s concern that the general contextual integrity approach will not supply the right guidance in cases where morally problematic technological change poses no threat to contextual integrity. Second, I elaborate on how the approach supplies mechanisms for balancing caution with the need for change.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Datafication and algorithmization of education: How do parents and students evaluate the appropriateness of learning analytics?.\n \n \n \n \n\n\n \n Martens, M.; De Wolf, R.; and De Marez, L.\n\n\n \n\n\n\n Education and Information Technologies. August 2023.\n \n\n\n\n
\n\n\n\n \n \n \"DataficationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{martens_datafication_2023,\n\ttitle = {Datafication and algorithmization of education: {How} do parents and students evaluate the appropriateness of learning analytics?},\n\tissn = {1573-7608},\n\tshorttitle = {Datafication and algorithmization of education},\n\turl = {https://doi.org/10.1007/s10639-023-12124-6},\n\tdoi = {10.1007/s10639-023-12124-6},\n\tabstract = {Algorithmic systems such as Learning Analytics (LA) are driving the datafication and algorithmization of education. In this research, we focus on the appropriateness of LA systems from the perspective of parents and students in secondary education. Anchored in the contextual integrity framework (Nissenbaum, Washington Law Review, 79, 41, 2004), we conducted two survey studies (Nstudents=277, Nparents=1013) in Flanders to investigate how they evaluate the appropriateness of the data flows in LA systems, and how both populations differ in their evaluations. The results show that the most-used student-centered LA are perceived less appropriate than the less-used teacher-centered LA by both students and parents. The usage of personal characteristics in LA is perceived as least appropriate, in contrast to coarser class characteristics. Sharing insights of LA with institutions that are part of the traditional educational context, such as the school, is seen as the most appropriate, and more appropriate than sharing it with learning platforms or third parties (e.g., Big Tech). Overall, we found that parents evaluated the different elements of the dataflows embedded in LA as less appropriate than students. In the discussion, we argue that educational institutions should include the evaluation of both parents and students to further manage expectations and construct shared norms and practices when implementing LA in education.},\n\tlanguage = {en},\n\turldate = {2023-08-25},\n\tjournal = {Education and Information Technologies},\n\tauthor = {Martens, Marijn and De Wolf, Ralf and De Marez, Lieven},\n\tmonth = aug,\n\tyear = {2023},\n\tkeywords = {Appropriateness, Contextual integrity, Learning analytics, Parents, Students, Survey},\n}\n\n
\n
\n\n\n
\n Algorithmic systems such as Learning Analytics (LA) are driving the datafication and algorithmization of education. In this research, we focus on the appropriateness of LA systems from the perspective of parents and students in secondary education. Anchored in the contextual integrity framework (Nissenbaum, Washington Law Review, 79, 41, 2004), we conducted two survey studies (Nstudents=277, Nparents=1013) in Flanders to investigate how they evaluate the appropriateness of the data flows in LA systems, and how both populations differ in their evaluations. The results show that the most-used student-centered LA are perceived less appropriate than the less-used teacher-centered LA by both students and parents. The usage of personal characteristics in LA is perceived as least appropriate, in contrast to coarser class characteristics. Sharing insights of LA with institutions that are part of the traditional educational context, such as the school, is seen as the most appropriate, and more appropriate than sharing it with learning platforms or third parties (e.g., Big Tech). Overall, we found that parents evaluated the different elements of the dataflows embedded in LA as less appropriate than students. In the discussion, we argue that educational institutions should include the evaluation of both parents and students to further manage expectations and construct shared norms and practices when implementing LA in education.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Surveillance and the future of work: exploring employees’ attitudes toward monitoring in a post-COVID workplace.\n \n \n \n \n\n\n \n Vitak, J.; and Zimmer, M.\n\n\n \n\n\n\n Journal of Computer-Mediated Communication, 28(4): zmad007. June 2023.\n \n\n\n\n
\n\n\n\n \n \n \"SurveillancePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{vitak_surveillance_2023,\n\ttitle = {Surveillance and the future of work: exploring employees’ attitudes toward monitoring in a post-{COVID} workplace},\n\tvolume = {28},\n\tissn = {1083-6101},\n\tshorttitle = {Surveillance and the future of work},\n\turl = {https://academic.oup.com/jcmc/article/doi/10.1093/jcmc/zmad007/7210235},\n\tdoi = {10.1093/jcmc/zmad007},\n\tabstract = {Abstract \n            The future of work increasingly focuses on the collection and analysis of worker data to monitor communication, ensure productivity, reduce security threats, and assist in decision-making. The COVID-19 pandemic increased employer reliance on these technologies; however, the blurring of home and work boundaries meant these monitoring tools might also surveil private spaces. To explore workers’ attitudes toward increased monitoring practices, we present findings from a factorial vignette survey of 645 U.S. adults who worked from home during the early months of the pandemic. Using the theory of privacy as contextual integrity to guide the survey design and analysis, we unpack the types of workplace surveillance practices that violate privacy norms and consider attitudinal differences between male and female workers. Our findings highlight that the acceptability of workplace surveillance practices is highly contextual, and that reductions in privacy and autonomy at work may further exacerbate power imbalances, especially for vulnerable employees.},\n\tlanguage = {en},\n\tnumber = {4},\n\turldate = {2023-07-03},\n\tjournal = {Journal of Computer-Mediated Communication},\n\tauthor = {Vitak, Jessica and Zimmer, Michael},\n\teditor = {Baym, Nancy and Ellison, Nicole},\n\tmonth = jun,\n\tyear = {2023},\n\tpages = {zmad007},\n}\n\n
\n
\n\n\n
\n Abstract The future of work increasingly focuses on the collection and analysis of worker data to monitor communication, ensure productivity, reduce security threats, and assist in decision-making. The COVID-19 pandemic increased employer reliance on these technologies; however, the blurring of home and work boundaries meant these monitoring tools might also surveil private spaces. To explore workers’ attitudes toward increased monitoring practices, we present findings from a factorial vignette survey of 645 U.S. adults who worked from home during the early months of the pandemic. Using the theory of privacy as contextual integrity to guide the survey design and analysis, we unpack the types of workplace surveillance practices that violate privacy norms and consider attitudinal differences between male and female workers. Our findings highlight that the acceptability of workplace surveillance practices is highly contextual, and that reductions in privacy and autonomy at work may further exacerbate power imbalances, especially for vulnerable employees.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Comparative Audit of Privacy Policies from Healthcare Organizations in USA, UK and India.\n \n \n \n \n\n\n \n Balde, G.; Singh, A.; Ganguly, N.; and Mondal, M.\n\n\n \n\n\n\n June 2023.\n arXiv:2306.11557 [cs]\n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@misc{balde_comparative_2023,\n\ttitle = {A {Comparative} {Audit} of {Privacy} {Policies} from {Healthcare} {Organizations} in {USA}, {UK} and {India}},\n\turl = {http://arxiv.org/abs/2306.11557},\n\tabstract = {Data privacy in healthcare is of paramount importance (and thus regulated using laws like HIPAA) due to the highly sensitive nature of patient data. To that end, healthcare organizations mention how they collect/process/store/share this data (i.e., data practices) via their privacy policies. Thus there is a need to audit these policies and check compliance with respective laws. This paper addresses this need and presents a large-scale data-driven study to audit privacy policies from healthcare organizations in three countries -- USA, UK, and India. We developed a three-stage novel {\\textbackslash}textit\\{workflow\\} for our audit. First, we collected the privacy policies of thousands of healthcare organizations in these countries and cleaned this privacy policy data using a clustering-based mixed-method technique. We identified data practices regarding users' private medical data (medical history) and site privacy (cookie, logs) in these policies. Second, we adopted a summarization-based technique to uncover exact broad data practices across countries and notice important differences. Finally, we evaluated the cross-country data practices using the lens of legal compliance (with legal expert feedback) and grounded in the theory of Contextual Integrity (CI). Alarmingly, we identified six themes of non-alignment (observed in 21.8{\\textbackslash}\\% of data practices studied in India) pointed out by our legal experts. Furthermore, there are four {\\textbackslash}textit\\{potential violations\\} according to case verdicts from Indian Courts as pointed out by our legal experts. We conclude this paper by discussing the utility of our auditing workflow and the implication of our findings for different stakeholders.},\n\turldate = {2023-06-24},\n\tpublisher = {arXiv},\n\tauthor = {Balde, Gunjan and Singh, Aryendra and Ganguly, Niloy and Mondal, Mainack},\n\tmonth = jun,\n\tyear = {2023},\n\tnote = {arXiv:2306.11557 [cs]},\n\tkeywords = {Computer Science - Computers and Society, Computer Science - Cryptography and Security, Computer Science - Human-Computer Interaction},\n}\n\n
\n
\n\n\n
\n Data privacy in healthcare is of paramount importance (and thus regulated using laws like HIPAA) due to the highly sensitive nature of patient data. To that end, healthcare organizations mention how they collect/process/store/share this data (i.e., data practices) via their privacy policies. Thus there is a need to audit these policies and check compliance with respective laws. This paper addresses this need and presents a large-scale data-driven study to audit privacy policies from healthcare organizations in three countries – USA, UK, and India. We developed a three-stage novel \\textit\\workflow\\ for our audit. First, we collected the privacy policies of thousands of healthcare organizations in these countries and cleaned this privacy policy data using a clustering-based mixed-method technique. We identified data practices regarding users' private medical data (medical history) and site privacy (cookie, logs) in these policies. Second, we adopted a summarization-based technique to uncover exact broad data practices across countries and notice important differences. Finally, we evaluated the cross-country data practices using the lens of legal compliance (with legal expert feedback) and grounded in the theory of Contextual Integrity (CI). Alarmingly, we identified six themes of non-alignment (observed in 21.8\\% of data practices studied in India) pointed out by our legal experts. Furthermore, there are four \\textit\\potential violations\\ according to case verdicts from Indian Courts as pointed out by our legal experts. We conclude this paper by discussing the utility of our auditing workflow and the implication of our findings for different stakeholders.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Justice-Centered Pathways to Combat Surveillance Realism in Informal CS Learning - ProQuest.\n \n \n \n \n\n\n \n Ruppert, J.\n\n\n \n\n\n\n Ph.D. Thesis, 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Justice-CenteredPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{ruppert_justice-centered_2023,\n\ttitle = {Justice-{Centered} {Pathways} to {Combat} {Surveillance} {Realism} in {Informal} {CS} {Learning} - {ProQuest}},\n\turl = {https://www.proquest.com/openview/ec0bc759a3955b3982d045f51290f8ba/1?pq-origsite=gscholar&cbl=18750&diss=y},\n\tabstract = {Explore millions of resources from scholarly journals, books, newspapers, videos and more, on the ProQuest Platform.},\n\tlanguage = {en},\n\turldate = {2023-05-22},\n\tauthor = {Ruppert, Janet},\n\tyear = {2023},\n}\n
\n
\n\n\n
\n Explore millions of resources from scholarly journals, books, newspapers, videos and more, on the ProQuest Platform.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Sins of omission: Critical informatics perspectives on privacy in e‐learning systems in higher education.\n \n \n \n \n\n\n \n Paris, B.; Reynolds, R.; and McGowan, C.\n\n\n \n\n\n\n Journal of the Association for Information Science and Technology, 73(5): 708–725. May 2022.\n \n\n\n\n
\n\n\n\n \n \n \"SinsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{paris_sins_2022,\n\ttitle = {Sins of omission: {Critical} informatics perspectives on privacy in e‐learning systems in higher education},\n\tvolume = {73},\n\tissn = {2330-1635, 2330-1643},\n\tshorttitle = {Sins of omission},\n\turl = {https://asistdl.onlinelibrary.wiley.com/doi/10.1002/asi.24575},\n\tdoi = {10.1002/asi.24575},\n\tabstract = {Abstract \n            The COVID‐19 pandemic emptied classrooms across the globe and pushed administrators, students, educators, and parents into an uneasy alliance with online learning systems already committing serious privacy and intellectual property violations, and actively promoted the precarity of educational labor. In this article, we use methods and theories derived from critical informatics to examine Rutgers University's deployment of seven online learning platforms commonly used in higher education to uncover five themes that result from the deployment of corporate learning platforms. We conclude by suggesting ways ahead to meaningfully address the structural power and vulnerabilities extended by higher education's use of these platforms.},\n\tlanguage = {en},\n\tnumber = {5},\n\turldate = {2024-01-22},\n\tjournal = {Journal of the Association for Information Science and Technology},\n\tauthor = {Paris, Britt and Reynolds, Rebecca and McGowan, Catherine},\n\tmonth = may,\n\tyear = {2022},\n\tpages = {708--725},\n}\n\n
\n
\n\n\n
\n Abstract The COVID‐19 pandemic emptied classrooms across the globe and pushed administrators, students, educators, and parents into an uneasy alliance with online learning systems already committing serious privacy and intellectual property violations, and actively promoted the precarity of educational labor. In this article, we use methods and theories derived from critical informatics to examine Rutgers University's deployment of seven online learning platforms commonly used in higher education to uncover five themes that result from the deployment of corporate learning platforms. We conclude by suggesting ways ahead to meaningfully address the structural power and vulnerabilities extended by higher education's use of these platforms.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2003\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Privacy and the Question of Technology.\n \n \n \n \n\n\n \n Austin, L.\n\n\n \n\n\n\n Law and Philosophy, 22(2): 119–166. 2003.\n \n\n\n\n
\n\n\n\n \n \n \"PrivacyPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{austin_privacy_2003,\n\ttitle = {Privacy and the {Question} of {Technology}},\n\tvolume = {22},\n\turl = {https://heinonline.org/HOL/P?h=hein.journals/lwphil22&i=119},\n\tlanguage = {eng},\n\tnumber = {2},\n\turldate = {2024-05-25},\n\tjournal = {Law and Philosophy},\n\tauthor = {Austin, Lisa},\n\tyear = {2003},\n\tpages = {119--166},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n undefined\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Who’s Listening? Analyzing Privacy Preferences in Multi-User Smart Personal Assistants Settings \\textbar USENIX.\n \n \n \n \n\n\n \n \n\n\n \n\n\n\n \n \n\n\n\n
\n\n\n\n \n \n \"Who’sPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{noauthor_whos_nodate,\n\ttitle = {Who’s {Listening}? {Analyzing} {Privacy} {Preferences} in {Multi}-{User} {Smart} {Personal} {Assistants} {Settings} {\\textbar} {USENIX}},\n\turl = {https://www.usenix.org/conference/soups2024/presentation/carreira-listening-poster},\n\turldate = {2024-08-15},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);