Voice Assistants' Accountability through Explanatory Dialogues. Alizadeh, F., Tolmie, P., Lee, M., Wintersberger, P., Pins, D., & Stevens, G. In Proceedings of the 6th ACM Conference on Conversational User Interfaces, of CUI '24, pages 1–12, New York, NY, USA, July, 2024. Association for Computing Machinery.
Paper doi abstract bibtex As voice assistants (VAs) become more advanced leveraging Large Language Models (LLMs) and natural language processing, their potential for accountable behavior expands. Yet, the long-term situational effectiveness of VAs’ accounts when errors occur remains unclear. In our 19-month exploratory study with 19 households, we investigated the impact of an Alexa feature that allows users to inquire about the reasons behind its actions. Our findings indicate that Alexa's accounts are often single, decontextualized responses that led to users’ alternative repair strategies over the long term, such as turning off the device, rather than initiating a dialogue about what went wrong. Through role-playing workshops, we demonstrate that VA interactions should facilitate explanatory dialogues as dynamic exchanges that consider a range of speech acts, recognizing users’ emotional states and the context of interaction. We conclude by discussing the implications of our findings for the design of accountable VAs.
@inproceedings{alizadeh_voice_2024,
address = {New York, NY, USA},
series = {{CUI} '24},
title = {Voice {Assistants}' {Accountability} through {Explanatory} {Dialogues}},
isbn = {9798400705113},
url = {https://doi.org/10.1145/3640794.3665557},
doi = {10.1145/3640794.3665557},
abstract = {As voice assistants (VAs) become more advanced leveraging Large Language Models (LLMs) and natural language processing, their potential for accountable behavior expands. Yet, the long-term situational effectiveness of VAs’ accounts when errors occur remains unclear. In our 19-month exploratory study with 19 households, we investigated the impact of an Alexa feature that allows users to inquire about the reasons behind its actions. Our findings indicate that Alexa's accounts are often single, decontextualized responses that led to users’ alternative repair strategies over the long term, such as turning off the device, rather than initiating a dialogue about what went wrong. Through role-playing workshops, we demonstrate that VA interactions should facilitate explanatory dialogues as dynamic exchanges that consider a range of speech acts, recognizing users’ emotional states and the context of interaction. We conclude by discussing the implications of our findings for the design of accountable VAs.},
urldate = {2024-07-11},
booktitle = {Proceedings of the 6th {ACM} {Conference} on {Conversational} {User} {Interfaces}},
publisher = {Association for Computing Machinery},
author = {Alizadeh, Fatemeh and Tolmie, Peter and Lee, Minha and Wintersberger, Philipp and Pins, Dominik and Stevens, Gunnar},
month = jul,
year = {2024},
pages = {1--12},
}
Downloads: 0
{"_id":"exjsAhvXDhPZgCsZF","bibbaseid":"alizadeh-tolmie-lee-wintersberger-pins-stevens-voiceassistantsaccountabilitythroughexplanatorydialogues-2024","author_short":["Alizadeh, F.","Tolmie, P.","Lee, M.","Wintersberger, P.","Pins, D.","Stevens, G."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","address":"New York, NY, USA","series":"CUI '24","title":"Voice Assistants' Accountability through Explanatory Dialogues","isbn":"9798400705113","url":"https://doi.org/10.1145/3640794.3665557","doi":"10.1145/3640794.3665557","abstract":"As voice assistants (VAs) become more advanced leveraging Large Language Models (LLMs) and natural language processing, their potential for accountable behavior expands. Yet, the long-term situational effectiveness of VAs’ accounts when errors occur remains unclear. In our 19-month exploratory study with 19 households, we investigated the impact of an Alexa feature that allows users to inquire about the reasons behind its actions. Our findings indicate that Alexa's accounts are often single, decontextualized responses that led to users’ alternative repair strategies over the long term, such as turning off the device, rather than initiating a dialogue about what went wrong. Through role-playing workshops, we demonstrate that VA interactions should facilitate explanatory dialogues as dynamic exchanges that consider a range of speech acts, recognizing users’ emotional states and the context of interaction. We conclude by discussing the implications of our findings for the design of accountable VAs.","urldate":"2024-07-11","booktitle":"Proceedings of the 6th ACM Conference on Conversational User Interfaces","publisher":"Association for Computing Machinery","author":[{"propositions":[],"lastnames":["Alizadeh"],"firstnames":["Fatemeh"],"suffixes":[]},{"propositions":[],"lastnames":["Tolmie"],"firstnames":["Peter"],"suffixes":[]},{"propositions":[],"lastnames":["Lee"],"firstnames":["Minha"],"suffixes":[]},{"propositions":[],"lastnames":["Wintersberger"],"firstnames":["Philipp"],"suffixes":[]},{"propositions":[],"lastnames":["Pins"],"firstnames":["Dominik"],"suffixes":[]},{"propositions":[],"lastnames":["Stevens"],"firstnames":["Gunnar"],"suffixes":[]}],"month":"July","year":"2024","pages":"1–12","bibtex":"@inproceedings{alizadeh_voice_2024,\n\taddress = {New York, NY, USA},\n\tseries = {{CUI} '24},\n\ttitle = {Voice {Assistants}' {Accountability} through {Explanatory} {Dialogues}},\n\tisbn = {9798400705113},\n\turl = {https://doi.org/10.1145/3640794.3665557},\n\tdoi = {10.1145/3640794.3665557},\n\tabstract = {As voice assistants (VAs) become more advanced leveraging Large Language Models (LLMs) and natural language processing, their potential for accountable behavior expands. Yet, the long-term situational effectiveness of VAs’ accounts when errors occur remains unclear. In our 19-month exploratory study with 19 households, we investigated the impact of an Alexa feature that allows users to inquire about the reasons behind its actions. Our findings indicate that Alexa's accounts are often single, decontextualized responses that led to users’ alternative repair strategies over the long term, such as turning off the device, rather than initiating a dialogue about what went wrong. Through role-playing workshops, we demonstrate that VA interactions should facilitate explanatory dialogues as dynamic exchanges that consider a range of speech acts, recognizing users’ emotional states and the context of interaction. We conclude by discussing the implications of our findings for the design of accountable VAs.},\n\turldate = {2024-07-11},\n\tbooktitle = {Proceedings of the 6th {ACM} {Conference} on {Conversational} {User} {Interfaces}},\n\tpublisher = {Association for Computing Machinery},\n\tauthor = {Alizadeh, Fatemeh and Tolmie, Peter and Lee, Minha and Wintersberger, Philipp and Pins, Dominik and Stevens, Gunnar},\n\tmonth = jul,\n\tyear = {2024},\n\tpages = {1--12},\n}\n\n\n\n\n\n\n\n","author_short":["Alizadeh, F.","Tolmie, P.","Lee, M.","Wintersberger, P.","Pins, D.","Stevens, G."],"key":"alizadeh_voice_2024","id":"alizadeh_voice_2024","bibbaseid":"alizadeh-tolmie-lee-wintersberger-pins-stevens-voiceassistantsaccountabilitythroughexplanatorydialogues-2024","role":"author","urls":{"Paper":"https://doi.org/10.1145/3640794.3665557"},"metadata":{"authorlinks":{}}},"bibtype":"inproceedings","biburl":"https://bibbase.org/zotero-group/WinemeCSCW/2912317","dataSources":["2aZf3F3Kjs6p26m56"],"keywords":[],"search_terms":["voice","assistants","accountability","through","explanatory","dialogues","alizadeh","tolmie","lee","wintersberger","pins","stevens"],"title":"Voice Assistants' Accountability through Explanatory Dialogues","year":2024}