Toward Understanding Natural Language Directions. Kollar, T., Tellex, S., Roy, D., & Roy, N.
Toward Understanding Natural Language Directions [pdf]Paper  abstract   bibtex   
—Speaking using unconstrained natural language is an intuitive and flexible way for humans to interact with robots. Understanding this kind of linguistic input is challenging because diverse words and phrases must be mapped into structures that the robot can understand, and elements in those structures must be grounded in an uncertain environment. We present a system that follows natural language directions by extracting a sequence of spatial description clauses from the linguistic input and then infers the most probable path through the environment given only information about the environmental geometry and detected visible objects. We use a probabilistic graphical model that factors into three key components. The first component grounds landmark phrases such as " the computers " in the perceptual frame of the robot by exploiting co-occurrence statistics from a database of tagged images such as Flickr. Second, a spatial reasoning component judges how well spatial relations such as " past the computers " describe a path. Finally, verb phrases such as " turn right " are modeled according to the amount of change in orientation in the path. Our system follows 60% of the directions in our corpus to within 15 meters of the true destination, significantly outperforming other approaches.
@article{
 title = {Toward Understanding Natural Language Directions},
 type = {article},
 keywords = {Index Terms—spatial language,direction understanding,route instructions},
 id = {af8bfbcb-b77e-3b81-90f6-5aea58d86e6b},
 created = {2017-09-01T15:53:37.422Z},
 file_attached = {true},
 profile_id = {80da7853-f7b7-36a9-8e4c-d7ddb2d9e538},
 group_id = {a2333ea3-15a4-3d40-8d36-f0d9590ca926},
 last_modified = {2017-09-01T15:53:37.548Z},
 read = {false},
 starred = {false},
 authored = {false},
 confirmed = {false},
 hidden = {false},
 abstract = {—Speaking using unconstrained natural language is an intuitive and flexible way for humans to interact with robots. Understanding this kind of linguistic input is challenging because diverse words and phrases must be mapped into structures that the robot can understand, and elements in those structures must be grounded in an uncertain environment. We present a system that follows natural language directions by extracting a sequence of spatial description clauses from the linguistic input and then infers the most probable path through the environment given only information about the environmental geometry and detected visible objects. We use a probabilistic graphical model that factors into three key components. The first component grounds landmark phrases such as " the computers " in the perceptual frame of the robot by exploiting co-occurrence statistics from a database of tagged images such as Flickr. Second, a spatial reasoning component judges how well spatial relations such as " past the computers " describe a path. Finally, verb phrases such as " turn right " are modeled according to the amount of change in orientation in the path. Our system follows 60% of the directions in our corpus to within 15 meters of the true destination, significantly outperforming other approaches.},
 bibtype = {article},
 author = {Kollar, Thomas and Tellex, Stefanie and Roy, Deb and Roy, Nicholas}
}
Downloads: 0