A constrained navigation framework for individual and exploration of 3d environmentscollaborative. Wernert, E., A. In Visualization '99, 1999. Indiana University.
doi  abstract   bibtex   
We approach the problem of exploring a virtual space by exploiting positional and camera-model constraints on navigation to provide extra assistance that focuses the user's explorational wanderings on the task objectives. Our specific design incorporates not only task-based constraints on the viewer's location, gaze, and viewing parameters, but also a personal "glide" that serves two important functions: keeping the user oriented in the navigation space, and "pointing" to interesting subject areas as they are approached. The guide's cues may be ignored by continuing in motion, but if the user stops, the gaze shifts automatically toward whatever the guide was interested in. This design has the serendipitous feature that it automatically incorporates a nested collaborative paradigm simply by allowing any given viewer to be seen as the "guide" of one or more viewers following behind; the leading automated guide (we tend to select a guide dog for this avatar) can remind the leading live human guide of interesting sites to point out, while each real human collaborator down the chain has some choices about whether to follow the local leader's hints. We have chosen VRML as our initial development medium primarily because of its portability, and we have implemented a variety of natural modes for leading and collaborating, including ways for collaborators to attach to and detach from a particular leader.
@inproceedings{
 title = {A constrained navigation framework for individual and exploration of 3d environmentscollaborative},
 type = {inproceedings},
 year = {1999},
 publisher = {Indiana University},
 id = {95df15a4-6332-3418-9196-186af6dc43fa},
 created = {2019-10-01T17:20:38.754Z},
 file_attached = {false},
 profile_id = {42d295c0-0737-38d6-8b43-508cab6ea85d},
 last_modified = {2019-10-01T17:25:44.533Z},
 read = {false},
 starred = {false},
 authored = {true},
 confirmed = {true},
 hidden = {false},
 citation_key = {Wernert1999},
 source_type = {BOOK},
 folder_uuids = {22c3b665-9e84-4884-8172-710aa9082eaf},
 private_publication = {false},
 abstract = {We approach the problem of exploring a virtual space by exploiting positional and camera-model constraints on navigation to provide extra assistance that focuses the user's explorational wanderings on the task objectives. Our specific design incorporates not only task-based constraints on the viewer's location, gaze, and viewing parameters, but also a personal "glide" that serves two important functions: keeping the user oriented in the navigation space, and "pointing" to interesting subject areas as they are approached. The guide's cues may be ignored by continuing in motion, but if the user stops, the gaze shifts automatically toward whatever the guide was interested in. This design has the serendipitous feature that it automatically incorporates a nested collaborative paradigm simply by allowing any given viewer to be seen as the "guide" of one or more viewers following behind; the leading automated guide (we tend to select a guide dog for this avatar) can remind the leading live human guide of interesting sites to point out, while each real human collaborator down the chain has some choices about whether to follow the local leader's hints. We have chosen VRML as our initial development medium primarily because of its portability, and we have implemented a variety of natural modes for leading and collaborating, including ways for collaborators to attach to and detach from a particular leader.},
 bibtype = {inproceedings},
 author = {Wernert, Eric Andrew},
 doi = {10.1109/VISUAL.1999.809893},
 booktitle = {Visualization '99}
}

Downloads: 0