Humanizing chatbots: The effects of visual, identity and conversational cues on humanness perceptions. Go, E. & Sundar, S. S. Computers in Human Behavior, 97(June 2018):304–316, 2019. Publisher: Elsevier
Paper doi abstract bibtex Chatbots are replacing human agents in a number of domains, from online tutoring to customer-service to even cognitive therapy. But, they are often machine-like in their interactions. What can we do to humanize chatbots? Should they necessarily be driven by human operators for them to be considered human? Or, will an anthropomorphic visual cue on the interface and/or a high-level of contingent message exchanges provide humanness to automated chatbots? We explored these questions with a 2 (anthropomorphic visual cues: high vs. low anthropomorphism) × 2 (message interactivity: high vs. low message interactivity) × 2 (identity cue: chat-bot vs. human) between-subjects experiment (N = 141) in which participants interacted with a chat agent on an e-commerce site about choosing a digital camera to purchase. Our findings show that a high level of message interactivity compensates for the impersonal nature of a chatbot that is low on anthropomorphic visual cues. Moreover, identifying the agent as human raises user expectations for interactivity. Theoretical as well as practical implications of these findings are discussed.
@article{go_humanizing_2019,
title = {Humanizing chatbots: {The} effects of visual, identity and conversational cues on humanness perceptions},
volume = {97},
issn = {07475632},
url = {https://doi.org/10.1016/j.chb.2019.01.020},
doi = {10.1016/j.chb.2019.01.020},
abstract = {Chatbots are replacing human agents in a number of domains, from online tutoring to customer-service to even cognitive therapy. But, they are often machine-like in their interactions. What can we do to humanize chatbots? Should they necessarily be driven by human operators for them to be considered human? Or, will an anthropomorphic visual cue on the interface and/or a high-level of contingent message exchanges provide humanness to automated chatbots? We explored these questions with a 2 (anthropomorphic visual cues: high vs. low anthropomorphism) × 2 (message interactivity: high vs. low message interactivity) × 2 (identity cue: chat-bot vs. human) between-subjects experiment (N = 141) in which participants interacted with a chat agent on an e-commerce site about choosing a digital camera to purchase. Our findings show that a high level of message interactivity compensates for the impersonal nature of a chatbot that is low on anthropomorphic visual cues. Moreover, identifying the agent as human raises user expectations for interactivity. Theoretical as well as practical implications of these findings are discussed.},
number = {June 2018},
journal = {Computers in Human Behavior},
author = {Go, Eun and Sundar, S. Shyam},
year = {2019},
note = {Publisher: Elsevier},
keywords = {Anthropomorphic visual cue, Compensation effect, Expectancy violation effect, Identity cue, Message interactivity, Online chat agents},
pages = {304--316},
}
Downloads: 0
{"_id":"SMk4ohFe2mYpJAxmW","bibbaseid":"go-sundar-humanizingchatbotstheeffectsofvisualidentityandconversationalcuesonhumannessperceptions-2019","author_short":["Go, E.","Sundar, S. S."],"bibdata":{"bibtype":"article","type":"article","title":"Humanizing chatbots: The effects of visual, identity and conversational cues on humanness perceptions","volume":"97","issn":"07475632","url":"https://doi.org/10.1016/j.chb.2019.01.020","doi":"10.1016/j.chb.2019.01.020","abstract":"Chatbots are replacing human agents in a number of domains, from online tutoring to customer-service to even cognitive therapy. But, they are often machine-like in their interactions. What can we do to humanize chatbots? Should they necessarily be driven by human operators for them to be considered human? Or, will an anthropomorphic visual cue on the interface and/or a high-level of contingent message exchanges provide humanness to automated chatbots? We explored these questions with a 2 (anthropomorphic visual cues: high vs. low anthropomorphism) × 2 (message interactivity: high vs. low message interactivity) × 2 (identity cue: chat-bot vs. human) between-subjects experiment (N = 141) in which participants interacted with a chat agent on an e-commerce site about choosing a digital camera to purchase. Our findings show that a high level of message interactivity compensates for the impersonal nature of a chatbot that is low on anthropomorphic visual cues. Moreover, identifying the agent as human raises user expectations for interactivity. Theoretical as well as practical implications of these findings are discussed.","number":"June 2018","journal":"Computers in Human Behavior","author":[{"propositions":[],"lastnames":["Go"],"firstnames":["Eun"],"suffixes":[]},{"propositions":[],"lastnames":["Sundar"],"firstnames":["S.","Shyam"],"suffixes":[]}],"year":"2019","note":"Publisher: Elsevier","keywords":"Anthropomorphic visual cue, Compensation effect, Expectancy violation effect, Identity cue, Message interactivity, Online chat agents","pages":"304–316","bibtex":"@article{go_humanizing_2019,\n\ttitle = {Humanizing chatbots: {The} effects of visual, identity and conversational cues on humanness perceptions},\n\tvolume = {97},\n\tissn = {07475632},\n\turl = {https://doi.org/10.1016/j.chb.2019.01.020},\n\tdoi = {10.1016/j.chb.2019.01.020},\n\tabstract = {Chatbots are replacing human agents in a number of domains, from online tutoring to customer-service to even cognitive therapy. But, they are often machine-like in their interactions. What can we do to humanize chatbots? Should they necessarily be driven by human operators for them to be considered human? Or, will an anthropomorphic visual cue on the interface and/or a high-level of contingent message exchanges provide humanness to automated chatbots? We explored these questions with a 2 (anthropomorphic visual cues: high vs. low anthropomorphism) × 2 (message interactivity: high vs. low message interactivity) × 2 (identity cue: chat-bot vs. human) between-subjects experiment (N = 141) in which participants interacted with a chat agent on an e-commerce site about choosing a digital camera to purchase. Our findings show that a high level of message interactivity compensates for the impersonal nature of a chatbot that is low on anthropomorphic visual cues. Moreover, identifying the agent as human raises user expectations for interactivity. Theoretical as well as practical implications of these findings are discussed.},\n\tnumber = {June 2018},\n\tjournal = {Computers in Human Behavior},\n\tauthor = {Go, Eun and Sundar, S. Shyam},\n\tyear = {2019},\n\tnote = {Publisher: Elsevier},\n\tkeywords = {Anthropomorphic visual cue, Compensation effect, Expectancy violation effect, Identity cue, Message interactivity, Online chat agents},\n\tpages = {304--316},\n}\n\n\n\n","author_short":["Go, E.","Sundar, S. S."],"key":"go_humanizing_2019","id":"go_humanizing_2019","bibbaseid":"go-sundar-humanizingchatbotstheeffectsofvisualidentityandconversationalcuesonhumannessperceptions-2019","role":"author","urls":{"Paper":"https://doi.org/10.1016/j.chb.2019.01.020"},"keyword":["Anthropomorphic visual cue","Compensation effect","Expectancy violation effect","Identity cue","Message interactivity","Online chat agents"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"article","biburl":"https://bibbase.org/zotero/jumoga","dataSources":["NWfDysABGA323queN"],"keywords":["anthropomorphic visual cue","compensation effect","expectancy violation effect","identity cue","message interactivity","online chat agents"],"search_terms":["humanizing","chatbots","effects","visual","identity","conversational","cues","humanness","perceptions","go","sundar"],"title":"Humanizing chatbots: The effects of visual, identity and conversational cues on humanness perceptions","year":2019}