Augmenting Abstract Meaning Representation for Human-Robot Dialogue. Bonial, C., Donatelli, L., Lukin, S. M., Tratz, S., Artstein, R., Traum, D., & Voss, C. R. In Proceedings of the First International Workshop on Designing Meaning Representations (DMR), pages 199–210, Florence, Italy, August, 2019. Association of Computational Linguistics.
Paper abstract bibtex We detail refinements made to Abstract Meaning Representation (AMR) that make the representation more suitable for supporting a situated dialogue system, where a human remotely controls a robot for purposes of search and rescue and reconnaissance. We propose 36 augmented AMRs that capture speech acts, tense and aspect, and spatial information. This linguistic information is vital for representing important distinctions, for example whether the robot has moved, is moving, or will move. We evaluate two existing AMR parsers for their performance on dialogue data. We also outline a model for graph-to-graph conversion, in which output from AMR parsers is converted into our refined AMRs. The design scheme presentedhere,thoughtask-specific,isextendable for broad coverage of speech acts using AMR in future task-independent work.
@inproceedings{bonial_augmenting_2019,
address = {Florence, Italy},
title = {Augmenting {Abstract} {Meaning} {Representation} for {Human}-{Robot} {Dialogue}},
url = {https://www.aclweb.org/anthology/W19-3322},
abstract = {We detail refinements made to Abstract Meaning Representation (AMR) that make the representation more suitable for supporting a situated dialogue system, where a human remotely controls a robot for purposes of search and rescue and reconnaissance. We propose 36 augmented AMRs that capture speech acts, tense and aspect, and spatial information. This linguistic information is vital for representing important distinctions, for example whether the robot has moved, is moving, or will move. We evaluate two existing AMR parsers for their performance on dialogue data. We also outline a model for graph-to-graph conversion, in which output from AMR parsers is converted into our refined AMRs. The design scheme presentedhere,thoughtask-specific,isextendable for broad coverage of speech acts using AMR in future task-independent work.},
booktitle = {Proceedings of the {First} {International} {Workshop} on {Designing} {Meaning} {Representations} ({DMR})},
publisher = {Association of Computational Linguistics},
author = {Bonial, Claire and Donatelli, Lucia and Lukin, Stephanie M. and Tratz, Stephen and Artstein, Ron and Traum, David and Voss, Clare R.},
month = aug,
year = {2019},
keywords = {ARL, DoD, UARC, Virtual Humans},
pages = {199--210},
}
Downloads: 0
{"_id":"SfJMfLN7rwpR9eMNJ","bibbaseid":"bonial-donatelli-lukin-tratz-artstein-traum-voss-augmentingabstractmeaningrepresentationforhumanrobotdialogue-2019","author_short":["Bonial, C.","Donatelli, L.","Lukin, S. M.","Tratz, S.","Artstein, R.","Traum, D.","Voss, C. R."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","address":"Florence, Italy","title":"Augmenting Abstract Meaning Representation for Human-Robot Dialogue","url":"https://www.aclweb.org/anthology/W19-3322","abstract":"We detail refinements made to Abstract Meaning Representation (AMR) that make the representation more suitable for supporting a situated dialogue system, where a human remotely controls a robot for purposes of search and rescue and reconnaissance. We propose 36 augmented AMRs that capture speech acts, tense and aspect, and spatial information. This linguistic information is vital for representing important distinctions, for example whether the robot has moved, is moving, or will move. We evaluate two existing AMR parsers for their performance on dialogue data. We also outline a model for graph-to-graph conversion, in which output from AMR parsers is converted into our refined AMRs. The design scheme presentedhere,thoughtask-specific,isextendable for broad coverage of speech acts using AMR in future task-independent work.","booktitle":"Proceedings of the First International Workshop on Designing Meaning Representations (DMR)","publisher":"Association of Computational Linguistics","author":[{"propositions":[],"lastnames":["Bonial"],"firstnames":["Claire"],"suffixes":[]},{"propositions":[],"lastnames":["Donatelli"],"firstnames":["Lucia"],"suffixes":[]},{"propositions":[],"lastnames":["Lukin"],"firstnames":["Stephanie","M."],"suffixes":[]},{"propositions":[],"lastnames":["Tratz"],"firstnames":["Stephen"],"suffixes":[]},{"propositions":[],"lastnames":["Artstein"],"firstnames":["Ron"],"suffixes":[]},{"propositions":[],"lastnames":["Traum"],"firstnames":["David"],"suffixes":[]},{"propositions":[],"lastnames":["Voss"],"firstnames":["Clare","R."],"suffixes":[]}],"month":"August","year":"2019","keywords":"ARL, DoD, UARC, Virtual Humans","pages":"199–210","bibtex":"@inproceedings{bonial_augmenting_2019,\n\taddress = {Florence, Italy},\n\ttitle = {Augmenting {Abstract} {Meaning} {Representation} for {Human}-{Robot} {Dialogue}},\n\turl = {https://www.aclweb.org/anthology/W19-3322},\n\tabstract = {We detail refinements made to Abstract Meaning Representation (AMR) that make the representation more suitable for supporting a situated dialogue system, where a human remotely controls a robot for purposes of search and rescue and reconnaissance. We propose 36 augmented AMRs that capture speech acts, tense and aspect, and spatial information. This linguistic information is vital for representing important distinctions, for example whether the robot has moved, is moving, or will move. We evaluate two existing AMR parsers for their performance on dialogue data. We also outline a model for graph-to-graph conversion, in which output from AMR parsers is converted into our refined AMRs. The design scheme presentedhere,thoughtask-specific,isextendable for broad coverage of speech acts using AMR in future task-independent work.},\n\tbooktitle = {Proceedings of the {First} {International} {Workshop} on {Designing} {Meaning} {Representations} ({DMR})},\n\tpublisher = {Association of Computational Linguistics},\n\tauthor = {Bonial, Claire and Donatelli, Lucia and Lukin, Stephanie M. and Tratz, Stephen and Artstein, Ron and Traum, David and Voss, Clare R.},\n\tmonth = aug,\n\tyear = {2019},\n\tkeywords = {ARL, DoD, UARC, Virtual Humans},\n\tpages = {199--210},\n}\n\n","author_short":["Bonial, C.","Donatelli, L.","Lukin, S. M.","Tratz, S.","Artstein, R.","Traum, D.","Voss, C. R."],"key":"bonial_augmenting_2019","id":"bonial_augmenting_2019","bibbaseid":"bonial-donatelli-lukin-tratz-artstein-traum-voss-augmentingabstractmeaningrepresentationforhumanrobotdialogue-2019","role":"author","urls":{"Paper":"https://www.aclweb.org/anthology/W19-3322"},"keyword":["ARL","DoD","UARC","Virtual Humans"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"inproceedings","biburl":"https://api.zotero.org/users/6976806/collections/GUMH2QKL/items?key=ipCS99jY9KwOteQbpfAW7VKn&format=bibtex&limit=100","dataSources":["Z4B8L2qnYQgDdZhbe","jjKsXjebLR7xyJojc"],"keywords":["arl","dod","uarc","virtual humans"],"search_terms":["augmenting","abstract","meaning","representation","human","robot","dialogue","bonial","donatelli","lukin","tratz","artstein","traum","voss"],"title":"Augmenting Abstract Meaning Representation for Human-Robot Dialogue","year":2019}