var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Fzotero-mypublications%2Fgirdhar&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Fzotero-mypublications%2Fgirdhar&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Fzotero-mypublications%2Fgirdhar&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Spatiotemporal Topic Modeling Reveals Storm-Driven Advection and Stirring Control Plankton Community Variability in an Open Ocean Eddy.\n \n \n \n\n\n \n Soucie, J. S.; Girdhar, Y.; Johnson, L.; Peacock, E. E.; Shalapyonok, A.; and Sosik, H. M.\n\n\n \n\n\n\n [Submitted] Journal of Geophysical Research: Oceans. 2024.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{soucie_spatiotemporal_2024,\n\ttitle = {Spatiotemporal {Topic} {Modeling} {Reveals} {Storm}-{Driven} {Advection} and {Stirring} {Control} {Plankton} {Community} {Variability} in an {Open} {Ocean} {Eddy}},\n\tcopyright = {All rights reserved},\n\tjournal = {[Submitted] Journal of Geophysical Research: Oceans},\n\tauthor = {Soucie, John San and Girdhar, Yogesh and Johnson, Leah and Peacock, Emily E. and Shalapyonok, Alexei and Sosik, Heidi M.},\n\tyear = {2024},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Finding the optimal exploration-exploitation trade-off online through Bayesian risk estimation and minimization.\n \n \n \n \n\n\n \n Jamieson, S.; How, J. P.; and Girdhar, Y.\n\n\n \n\n\n\n Artificial Intelligence,104096. February 2024.\n \n\n\n\n
\n\n\n\n \n \n \"FindingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{jamieson_finding_2024,\n\ttitle = {Finding the optimal exploration-exploitation trade-off online through {Bayesian} risk estimation and minimization},\n\tcopyright = {All rights reserved},\n\tissn = {00043702},\n\turl = {https://linkinghub.elsevier.com/retrieve/pii/S0004370224000328},\n\tdoi = {10.1016/j.artint.2024.104096},\n\tlanguage = {en},\n\turldate = {2024-02-29},\n\tjournal = {Artificial Intelligence},\n\tauthor = {Jamieson, Stewart and How, Jonathan P. and Girdhar, Yogesh},\n\tmonth = feb,\n\tyear = {2024},\n\tpages = {104096},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Underwater Dome-Port Camera Calibration: Modeling of Refraction and Offset through N-Sphere Camera Model.\n \n \n \n\n\n \n Roznere, M.; Pediredla, A. K.; Lensgraf, S. E.; Girdhar, Y.; and Li, A. Q.\n\n\n \n\n\n\n In IEEE International Conference on Robotics and Automation (ICRA), 2024. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{roznere_underwater_2024,\n\ttitle = {Underwater {Dome}-{Port} {Camera} {Calibration}: {Modeling} of {Refraction} and {Offset} through {N}-{Sphere} {Camera} {Model}},\n\tcopyright = {All rights reserved},\n\tbooktitle = {{IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})},\n\tauthor = {Roznere, Monika and Pediredla, Adithya K. and Lensgraf, Samuel E. and Girdhar, Yogesh and Li, Alberto Quattrini},\n\tyear = {2024},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n ReefGlider: A highly maneuverable vectored buoyancy engine based underwater robot.\n \n \n \n\n\n \n Macauley, K.; Cai, L.; Adamczyk, P.; and Girdhar, Y.\n\n\n \n\n\n\n In IEEE International Conference on Robotics and Automation (ICRA), 2024. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{macauley_reefglider:_2024,\n\ttitle = {{ReefGlider}: {A} highly maneuverable vectored buoyancy engine based underwater robot},\n\tcopyright = {All rights reserved},\n\tbooktitle = {{IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})},\n\tauthor = {Macauley, Kevin and Cai, Levi and Adamczyk, Peter and Girdhar, Yogesh},\n\tyear = {2024},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Discovering Biological Hotspots with a Passively Listening AUV.\n \n \n \n\n\n \n McCammon, S.; Jamieson, S.; Mooney, T. A.; and Girdhar, Y.\n\n\n \n\n\n\n In IEEE International Conference on Robotics and Automation (ICRA), pages 7, 2024. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{mccammon_discovering_2024,\n\ttitle = {Discovering {Biological} {Hotspots} with a {Passively} {Listening} {AUV}},\n\tcopyright = {All rights reserved},\n\tbooktitle = {{IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})},\n\tauthor = {McCammon, Seth and Jamieson, Stewart and Mooney, T. Aran and Girdhar, Yogesh},\n\tyear = {2024},\n\tpages = {7},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (10)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Streaming Gaussian Dirichlet Random Fields for Spatial Predictions of High Dimensional Categorical Observations.\n \n \n \n\n\n \n Soucie, J. E S.; Sosik, H. M.; and Girdhar, Y.\n\n\n \n\n\n\n In 18th International Symposium on Experimental Robotics (ISER), Chiang Mai, Thailand, November 2023. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{SoucieSGDRF2023,\n\taddress = {Chiang Mai, Thailand},\n\ttitle = {Streaming {Gaussian} {Dirichlet} {Random} {Fields} for {Spatial} {Predictions} of {High} {Dimensional} {Categorical} {Observations}},\n\tcopyright = {All rights reserved},\n\tabstract = {We present the Streaming Gaussian Dirichlet Random Field (S-GDRF) model, a novel approach for modeling a stream of spatiotemporally distributed, sparse, high-dimensional categorical observations. The proposed approach  efficiently learns global and local patterns in spatiotemporal data, allowing for fast inference and querying with a bounded time complexity. Using a high-resolution data series of plankton images classified with a neural network, we demonstrate the ability of the approach to make more accurate predictions compared to a Variational Gaussian Process (VGP), and to learn a predictive distribution of observations from streaming categorical data. S-GDRFs open the door to enabling efficient informative path planning over high-dimensional categorical observations, which until now has not been feasible.},\n\tbooktitle = {18th {International} {Symposium} on {Experimental} {Robotics} ({ISER})},\n\tauthor = {Soucie, John E San and Sosik, Heidi M. and Girdhar, Yogesh},\n\tmonth = nov,\n\tyear = {2023},\n}\n\n
\n
\n\n\n
\n We present the Streaming Gaussian Dirichlet Random Field (S-GDRF) model, a novel approach for modeling a stream of spatiotemporally distributed, sparse, high-dimensional categorical observations. The proposed approach efficiently learns global and local patterns in spatiotemporal data, allowing for fast inference and querying with a bounded time complexity. Using a high-resolution data series of plankton images classified with a neural network, we demonstrate the ability of the approach to make more accurate predictions compared to a Variational Gaussian Process (VGP), and to learn a predictive distribution of observations from streaming categorical data. S-GDRFs open the door to enabling efficient informative path planning over high-dimensional categorical observations, which until now has not been feasible.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n CUREE: a curious underwater robot for ecosystem exploration.\n \n \n \n\n\n \n Girdhar, Y.; McGuire, N.; Cai, L.; Jamieson, S.; McCammon, S.; Claus, B.; Soucie, J. E. S.; Todd, J. E.; and Mooney, T. A.\n\n\n \n\n\n\n In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 11411–11417, London, United Kingdom, May 2023. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar2023,\n\taddress = {London, United Kingdom},\n\ttitle = {{CUREE}: a curious underwater robot for ecosystem exploration},\n\tcopyright = {All rights reserved},\n\tisbn = {9798350323658},\n\tshorttitle = {{CUREE}},\n\tdoi = {10.1109/ICRA48891.2023.10161282},\n\turldate = {2023-08-20},\n\tbooktitle = {2023 {IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})},\n\tpublisher = {IEEE},\n\tauthor = {Girdhar, Yogesh and McGuire, Nathan and Cai, Levi and Jamieson, Stewart and McCammon, Seth and Claus, Brian and Soucie, John E. San and Todd, Jessica E. and Mooney, T. Aran},\n\tmonth = may,\n\tyear = {2023},\n\tpages = {11411--11417},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Short-term habituation of the longfin squid (Doryteuthis pealeii) to pile driving sound.\n \n \n \n \n\n\n \n Jézéquel, Y; Jandial, P; Cones, S F; Ferguson, S; Aoki, N; Girdhar, Y; and Mooney, T A\n\n\n \n\n\n\n ICES Journal of Marine Science,fsad157. October 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Short-termPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{jezequel_short-term_2023,\n\ttitle = {Short-term habituation of the longfin squid ({Doryteuthis} pealeii) to pile driving sound},\n\tcopyright = {All rights reserved},\n\tissn = {1054-3139, 1095-9289},\n\turl = {https://academic.oup.com/icesjms/advance-article/doi/10.1093/icesjms/fsad157/7329472},\n\tdoi = {10.1093/icesjms/fsad157},\n\tabstract = {Abstract\n            Offshore windfarms are a key renewable solution to help supply global energy needs. However, implementation has its challenges, including intense pile driving sound produced during constructions, which can affect marine life at the individual level, yet impacts at the group level remain poorly studied. Here, we exposed groups of longfin squid (Doryteuthis pealeii) in cages at multiple distances from consecutive pile driving events and sought to quantify responses at both individual and group levels. Pile driving induced short-term alarm responses at sound levels (in zero-peak) of 112–123 dB re 1 µm s−2 that were similar to those measured at kilometre scale from offshore windfarm constructions. The rate of individual alarm responses quickly decreased both within and across consecutive pile driving events, a result consistent with previous laboratory studies. Despite observing dramatic behavioural changes in response to initial pile driving sound, there were no significant differences in squid shoaling areas before and during exposure, showing no disruption of squid collective behaviours. Our results demonstrate rapid habituation of squid to pile driving sound, showing minimal effects on this ecologically and commercially key taxon. However, future work is now needed to assess responses of wild squid shoals in the vicinity of offshore windfarm constructions.},\n\tlanguage = {en},\n\tjournal = {ICES Journal of Marine Science},\n\tauthor = {Jézéquel, Y and Jandial, P and Cones, S F and Ferguson, S and Aoki, N and Girdhar, Y and Mooney, T A},\n\teditor = {Secor, David},\n\tmonth = oct,\n\tyear = {2023},\n\tpages = {fsad157},\n}\n\n
\n
\n\n\n
\n Abstract Offshore windfarms are a key renewable solution to help supply global energy needs. However, implementation has its challenges, including intense pile driving sound produced during constructions, which can affect marine life at the individual level, yet impacts at the group level remain poorly studied. Here, we exposed groups of longfin squid (Doryteuthis pealeii) in cages at multiple distances from consecutive pile driving events and sought to quantify responses at both individual and group levels. Pile driving induced short-term alarm responses at sound levels (in zero-peak) of 112–123 dB re 1 µm s−2 that were similar to those measured at kilometre scale from offshore windfarm constructions. The rate of individual alarm responses quickly decreased both within and across consecutive pile driving events, a result consistent with previous laboratory studies. Despite observing dramatic behavioural changes in response to initial pile driving sound, there were no significant differences in squid shoaling areas before and during exposure, showing no disruption of squid collective behaviours. Our results demonstrate rapid habituation of squid to pile driving sound, showing minimal effects on this ecologically and commercially key taxon. However, future work is now needed to assess responses of wild squid shoals in the vicinity of offshore windfarm constructions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Semitransparent tactile surface sensor and a method of sensing an interaction with an object using the semitransparent tactile surface sensor.\n \n \n \n\n\n \n Francois Hogan; Michael Jenkin; Gregory Lewis Dudek; Yogesh Girdhar; Sahand Rezaei-Shoshtari; and David Meger\n\n\n \n\n\n\n May 2023.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@patent{francois_hogan_semitransparent_2023,\n\ttitle = {Semitransparent tactile surface sensor and a method of sensing an interaction with an object using the semitransparent tactile surface sensor},\n\tcopyright = {All rights reserved},\n\tabstract = {A method of sensing an interaction with an object using a semitransparent tactile surface (STS) sensor having an image sensor and a semitransparent membrane includes capturing, by the image sensor, a stream of images; separating the stream of images into a visual stream including light traveling through the semitransparent membrane and a tactile stream including light reflecting off of the semitransparent membrane; and processing the visual stream and tactile stream through a multimodal deep neural network.},\n\tnationality = {US},\n\tnumber = {11656759},\n\tauthor = {{Francois Hogan} and {Michael Jenkin} and {Gregory Lewis Dudek} and {Yogesh Girdhar} and {Sahand Rezaei-Shoshtari} and {David Meger}},\n\tmonth = may,\n\tyear = {2023},\n}\n\n
\n
\n\n\n
\n A method of sensing an interaction with an object using a semitransparent tactile surface (STS) sensor having an image sensor and a semitransparent membrane includes capturing, by the image sensor, a stream of images; separating the stream of images into a visual stream including light traveling through the semitransparent membrane and a tactile stream including light reflecting off of the semitransparent membrane; and processing the visual stream and tactile stream through a multimodal deep neural network.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robot Goes Fishing: Rapid, High-Resolution Biological Hotspot Mapping in Coral Reefs with Vision-Guided Autonomous Underwater Vehicles.\n \n \n \n \n\n\n \n Yang, D.; Cai, L.; Jamieson, S.; and Girdhar, Y.\n\n\n \n\n\n\n In IEEE Computer Vision and Pattern Recognition (CVPR) CV4Animals Workshop, 2023. arXiv\n arXiv:2305.02330 [cs]\n\n\n\n
\n\n\n\n \n \n \"RobotPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{yang_robot_2023,\n\ttitle = {Robot {Goes} {Fishing}: {Rapid}, {High}-{Resolution} {Biological} {Hotspot} {Mapping} in {Coral} {Reefs} with {Vision}-{Guided} {Autonomous} {Underwater} {Vehicles}},\n\tcopyright = {All rights reserved},\n\tshorttitle = {Robot {Goes} {Fishing}},\n\turl = {http://arxiv.org/abs/2305.02330},\n\tabstract = {Coral reefs are fast-changing and complex ecosystems that are crucial to monitor and study. Biological hotspot detection can help coral reef managers prioritize limited resources for monitoring and intervention tasks. Here, we explore the use of autonomous underwater vehicles (AUVs) with cameras, coupled with visual detectors and photogrammetry, to map and identify these hotspots. This approach can provide high spatial resolution information in fast feedback cycles. To the best of our knowledge, we present one of the first attempts at using an AUV to gather visually-observed, fine-grain biological hotspot maps in concert with topography of a coral reefs. Our hotspot maps correlate with rugosity, an established proxy metric for coral reef biodiversity and abundance, as well as with our visual inspections of the 3D reconstruction. We also investigate issues of scaling this approach when applied to new reefs by using these visual detectors pre-trained on large public datasets.},\n\tbooktitle = {{IEEE} {Computer} {Vision} and {Pattern} {Recognition} ({CVPR}) {CV4Animals} {Workshop}},\n\tpublisher = {arXiv},\n\tauthor = {Yang, Daniel and Cai, Levi and Jamieson, Stewart and Girdhar, Yogesh},\n\tyear = {2023},\n\tnote = {arXiv:2305.02330 [cs]},\n}\n\n
\n
\n\n\n
\n Coral reefs are fast-changing and complex ecosystems that are crucial to monitor and study. Biological hotspot detection can help coral reef managers prioritize limited resources for monitoring and intervention tasks. Here, we explore the use of autonomous underwater vehicles (AUVs) with cameras, coupled with visual detectors and photogrammetry, to map and identify these hotspots. This approach can provide high spatial resolution information in fast feedback cycles. To the best of our knowledge, we present one of the first attempts at using an AUV to gather visually-observed, fine-grain biological hotspot maps in concert with topography of a coral reefs. Our hotspot maps correlate with rugosity, an established proxy metric for coral reef biodiversity and abundance, as well as with our visual inspections of the 3D reconstruction. We also investigate issues of scaling this approach when applied to new reefs by using these visual detectors pre-trained on large public datasets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Toward a New Era of Coral Reef Monitoring.\n \n \n \n \n\n\n \n Apprill, A.; Girdhar, Y.; Mooney, T. A.; Hansel, C. M.; Long, M. H.; Liu, Y.; Zhang, W. G.; Kapit, J.; Hughen, K.; Coogan, J.; and Greene, A.\n\n\n \n\n\n\n Environmental Science & Technology, 57(13): 5117–5124. April 2023.\n \n\n\n\n
\n\n\n\n \n \n \"TowardPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{apprill_toward_2023,\n\ttitle = {Toward a {New} {Era} of {Coral} {Reef} {Monitoring}},\n\tvolume = {57},\n\tcopyright = {All rights reserved},\n\tissn = {0013-936X, 1520-5851},\n\turl = {https://pubs.acs.org/doi/10.1021/acs.est.2c05369},\n\tdoi = {10.1021/acs.est.2c05369},\n\tlanguage = {en},\n\tnumber = {13},\n\tjournal = {Environmental Science \\& Technology},\n\tauthor = {Apprill, Amy and Girdhar, Yogesh and Mooney, T. Aran and Hansel, Colleen M. and Long, Matthew H. and Liu, Yaqin and Zhang, W. Gordon and Kapit, Jason and Hughen, Konrad and Coogan, Jeff and Greene, Austin},\n\tmonth = apr,\n\tyear = {2023},\n\tpages = {5117--5124},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n DeepSeeColor: Realtime Adaptive Color Correction for Autonomous Underwater Vehicles via Deep Learning Methods.\n \n \n \n \n\n\n \n Jamieson, S.; How, J. P.; and Girdhar, Y.\n\n\n \n\n\n\n In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 3095–3101, London, United Kingdom, May 2023. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"DeepSeeColor:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Jamieson2023,\n\taddress = {London, United Kingdom},\n\ttitle = {{DeepSeeColor}: {Realtime} {Adaptive} {Color} {Correction} for {Autonomous} {Underwater} {Vehicles} via {Deep} {Learning} {Methods}},\n\tcopyright = {All rights reserved},\n\tisbn = {9798350323658},\n\tshorttitle = {{DeepSeeColor}},\n\turl = {https://ieeexplore.ieee.org/document/10160477/},\n\tdoi = {10.1109/ICRA48891.2023.10160477},\n\tbooktitle = {2023 {IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})},\n\tpublisher = {IEEE},\n\tauthor = {Jamieson, Stewart and How, Jonathan P. and Girdhar, Yogesh},\n\tmonth = may,\n\tyear = {2023},\n\tpages = {3095--3101},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semi-supervised Visual Tracking of Marine Animals Using Autonomous Underwater Vehicles.\n \n \n \n \n\n\n \n Cai, L.; McGuire, N. E.; Hanlon, R.; Mooney, T. A.; and Girdhar, Y.\n\n\n \n\n\n\n International Journal of Computer Vision, 131(6): 1406–1427. June 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Semi-supervisedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Cai2023,\n\ttitle = {Semi-supervised {Visual} {Tracking} of {Marine} {Animals} {Using} {Autonomous} {Underwater} {Vehicles}},\n\tvolume = {131},\n\tcopyright = {All rights reserved},\n\tissn = {0920-5691, 1573-1405},\n\turl = {https://link.springer.com/10.1007/s11263-023-01762-5},\n\tdoi = {10.1007/s11263-023-01762-5},\n\tabstract = {Abstract\n            \n              In-situ visual observations of marine organisms is crucial to developing behavioural understandings and their relations to their surrounding ecosystem. Typically, these observations are collected via divers, tags, and remotely-operated or human-piloted vehicles. Recently, however, autonomous underwater vehicles equipped with cameras and embedded computers with GPU capabilities are being developed for a variety of applications, and in particular, can be used to supplement these existing data collection mechanisms where human operation or tags are more difficult. Existing approaches have focused on using fully-supervised tracking methods, but labelled data for many underwater species are severely lacking. Semi-supervised trackers may offer alternative tracking solutions because they require less data than fully-supervised counterparts. However, because there are not existing realistic underwater tracking datasets, the performance of semi-supervised tracking algorithms in the marine domain is not well understood. To better evaluate their performance and utility, in this paper we provide (1) a novel dataset specific to marine animals located at\n              http://warp.whoi.edu/vmat/\n              , (2) an evaluation of state-of-the-art semi-supervised algorithms in the context of underwater animal tracking, and (3) an evaluation of real-world performance through demonstrations using a semi-supervised algorithm on-board an autonomous underwater vehicle to track marine animals in the wild.},\n\tlanguage = {en},\n\tnumber = {6},\n\tjournal = {International Journal of Computer Vision},\n\tauthor = {Cai, Levi and McGuire, Nathan E. and Hanlon, Roger and Mooney, T. Aran and Girdhar, Yogesh},\n\tmonth = jun,\n\tyear = {2023},\n\tpages = {1406--1427},\n}\n\n
\n
\n\n\n
\n Abstract In-situ visual observations of marine organisms is crucial to developing behavioural understandings and their relations to their surrounding ecosystem. Typically, these observations are collected via divers, tags, and remotely-operated or human-piloted vehicles. Recently, however, autonomous underwater vehicles equipped with cameras and embedded computers with GPU capabilities are being developed for a variety of applications, and in particular, can be used to supplement these existing data collection mechanisms where human operation or tags are more difficult. Existing approaches have focused on using fully-supervised tracking methods, but labelled data for many underwater species are severely lacking. Semi-supervised trackers may offer alternative tracking solutions because they require less data than fully-supervised counterparts. However, because there are not existing realistic underwater tracking datasets, the performance of semi-supervised tracking algorithms in the marine domain is not well understood. To better evaluate their performance and utility, in this paper we provide (1) a novel dataset specific to marine animals located at http://warp.whoi.edu/vmat/ , (2) an evaluation of state-of-the-art semi-supervised algorithms in the context of underwater animal tracking, and (3) an evaluation of real-world performance through demonstrations using a semi-supervised algorithm on-board an autonomous underwater vehicle to track marine animals in the wild.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Computer Vision Applications in Underwater Robotics and Oceanography.\n \n \n \n\n\n \n Islam, M. J.; Quattrini Li, A.; Girdhar, Y.; and Rekleitis, I.\n\n\n \n\n\n\n In Computer Vision: Challenges, Trends, and Opportunities. 2023.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{islam_computer_2023,\n\ttitle = {Computer {Vision} {Applications} in {Underwater} {Robotics} and {Oceanography}},\n\tcopyright = {All rights reserved},\n\tbooktitle = {Computer {Vision}: {Challenges}, {Trends}, and {Opportunities}},\n\tauthor = {Islam, Md. Jahidul and Quattrini Li, Alberto and Girdhar, Yogesh and Rekleitis, Ioannis},\n\tyear = {2023},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CATAIN: An underwater camera system for studying settlement in fouling communities at high temporal resolution.\n \n \n \n \n\n\n \n Meyer‐Kaiser, K. S.; Schrage, K. R.; Suman, S.; Bailey, J.; and Girdhar, Y.\n\n\n \n\n\n\n Limnology and Oceanography: Methods, 21(6): 345–355. June 2023.\n \n\n\n\n
\n\n\n\n \n \n \"CATAIN:Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{meyer2023,\n\ttitle = {{CATAIN}: {An} underwater camera system for studying settlement in fouling communities at high temporal resolution},\n\tvolume = {21},\n\tcopyright = {All rights reserved},\n\tissn = {1541-5856, 1541-5856},\n\turl = {https://aslopubs.onlinelibrary.wiley.com/doi/10.1002/lom3.10550},\n\tdoi = {10.1002/lom3.10550},\n\tlanguage = {en},\n\tnumber = {6},\n\turldate = {2023-08-20},\n\tjournal = {Limnology and Oceanography: Methods},\n\tauthor = {Meyer‐Kaiser, Kirstin S. and Schrage, Kharis R. and Suman, Stefano and Bailey, John and Girdhar, Yogesh},\n\tmonth = jun,\n\tyear = {2023},\n\tpages = {345--355},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Adaptive Online Sampling of Periodic Processes with Application to Coral Reef Acoustic Abundance Monitoring.\n \n \n \n \n\n\n \n McCammon, S.; Aoki, N.; Mooney, T. A.; and Girdhar, Y.\n\n\n \n\n\n\n In 2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 11671–11678, October 2022. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Mccammon2022,\n\ttitle = {Adaptive {Online} {Sampling} of {Periodic} {Processes} with {Application} to {Coral} {Reef} {Acoustic} {Abundance} {Monitoring}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-66547-927-1},\n\turl = {https://ieeexplore.ieee.org/document/9982217/},\n\tdoi = {10.1109/IROS47612.2022.9982217},\n\tabstract = {In this paper, we present an approach that enables long-term monitoring of biological activity on coral reefs by extending mission time and adaptively focusing sensing resources on high-value periods. Coral reefs are one of the most biodiverse ecosystems on the planet; yet they are also among the most imperiled: facing bleaching, ecological community collapses due to global climate change, and degradation from human activities. Our proposed method improves the ability of scientists to monitor biological activity and abundance using passive acoustic sensors. We accomplish this by extracting periodicities from the observed abundance, and using them to predict future abundance. This predictive model is then used with a Monte Carlo Tree Search planning algorithm to schedule sampling at periods of high biological activity, and power down the sensor during periods of low activity. In simulated experiments using long-term acoustic datasets collected in the US Virgin Islands, our adaptive Online Sensor Scheduling algorithm is able to double the lifetime of a sensor while simultaneously increasing the average observed acoustic activity by 21\\%.},\n\tbooktitle = {2022 {IEEE}/{RSJ} {International} {Conference} on {Intelligent} {Robots} and {Systems} ({IROS})},\n\tpublisher = {IEEE},\n\tauthor = {McCammon, Seth and Aoki, Nadege and Mooney, T. Aran and Girdhar, Yogesh},\n\tmonth = oct,\n\tyear = {2022},\n\tpages = {11671--11678},\n}\n\n
\n
\n\n\n
\n In this paper, we present an approach that enables long-term monitoring of biological activity on coral reefs by extending mission time and adaptively focusing sensing resources on high-value periods. Coral reefs are one of the most biodiverse ecosystems on the planet; yet they are also among the most imperiled: facing bleaching, ecological community collapses due to global climate change, and degradation from human activities. Our proposed method improves the ability of scientists to monitor biological activity and abundance using passive acoustic sensors. We accomplish this by extracting periodicities from the observed abundance, and using them to predict future abundance. This predictive model is then used with a Monte Carlo Tree Search planning algorithm to schedule sampling at periods of high biological activity, and power down the sensor during periods of low activity. In simulated experiments using long-term acoustic datasets collected in the US Virgin Islands, our adaptive Online Sensor Scheduling algorithm is able to double the lifetime of a sensor while simultaneously increasing the average observed acoustic activity by 21%.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Aquaculture monitoring system and method.\n \n \n \n\n\n \n Girdhar, Y.\n\n\n \n\n\n\n 2022.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@patent{Girdhar2022,\n\ttitle = {Aquaculture monitoring system and method},\n\tcopyright = {All rights reserved},\n\tauthor = {Girdhar, Yogesh},\n\tyear = {2022},\n\tpages = {35},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n [poster] Rapid and Comprehensive Coral Reef Monitoring through Semantic Mapping of the Reef Benthos.\n \n \n \n\n\n \n Jamieson, S.; Becker, C.; Apprill, A.; Mooney, T A.; and Girdhar, Y.\n\n\n \n\n\n\n In International Coral Reef Symposium (ICRS), 2022. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Jamieson,\n\ttitle = {[poster] {Rapid} and {Comprehensive} {Coral} {Reef} {Monitoring} through {Semantic} {Mapping} of the {Reef} {Benthos}},\n\tcopyright = {All rights reserved},\n\tbooktitle = {International {Coral} {Reef} {Symposium} ({ICRS})},\n\tauthor = {Jamieson, Stewart and Becker, Cynthia and Apprill, Amy and Mooney, T Aran and Girdhar, Yogesh},\n\tyear = {2022},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n [poster] Evaluating semi-supervised, in-situ visual tracking methods for marine aninmals.\n \n \n \n\n\n \n Cai, L.; Hanlon, R.; and Girdhar, Y.\n\n\n \n\n\n\n In Ocean Sciences Meeting (OSM), 2022. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Cai2022a,\n\ttitle = {[poster] {Evaluating} semi-supervised, in-situ visual tracking methods for marine aninmals},\n\tcopyright = {All rights reserved},\n\tbooktitle = {Ocean {Sciences} {Meeting} ({OSM})},\n\tauthor = {Cai, Levi and Hanlon, Roger and Girdhar, Yogesh},\n\tyear = {2022},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Optimizing Cellular Networks via Continuously Moving Base Stations on Road Networks.\n \n \n \n\n\n \n Girdhar, Y.; Rivkin, D.; Wu, D.; Jenkin, M.; Liu, X.; and Dudek, G.\n\n\n \n\n\n\n In Proceedings - IEEE International Conference on Robotics and Automation, volume 2021-May, pages 4020–4025, 2021. Institute of Electrical and Electronics Engineers Inc.\n ISSN: 10504729\n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{girdhar_optimizing_2021,\n\ttitle = {Optimizing {Cellular} {Networks} via {Continuously} {Moving} {Base} {Stations} on {Road} {Networks}},\n\tvolume = {2021-May},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-72819-077-8},\n\tdoi = {10.1109/ICRA48506.2021.9561052},\n\tabstract = {Although existing cellular network base stations are typically immobile, the recent development of small form factor base stations and self driving cars has enabled the possibility of deploying a team of continuously moving base stations that can reorganize the network infrastructure to adapt to changing network traffic usage patterns. Given such a system of mobile base stations (MBSes) that can freely move on the road, how should their path be planned in an effort to optimize the experience of the users? This paper addresses this question by modeling the problem as a Markov Decision Process where the actions correspond to the MBSes deciding which direction to go at traffic intersections; states corresponds to the position of MBSes; and rewards correspond to minimization of packet loss in the network. A Monte Carlo Tree Search (MCTS)-based anytime algorithm that produces path plans for multiple base stations while optimizing expected packet loss is proposed. Simulated experiments in the city of Verdun, QC, Canada with varying user equipment (UE) densities and random initial conditions show that the proposed approach consistently outperforms myopic planners, and is able to achieve near-optimal performance.},\n\tbooktitle = {Proceedings - {IEEE} {International} {Conference} on {Robotics} and {Automation}},\n\tpublisher = {Institute of Electrical and Electronics Engineers Inc.},\n\tauthor = {Girdhar, Yogesh and Rivkin, Dmitriy and Wu, Di and Jenkin, Michael and Liu, Xue and Dudek, Gregory},\n\tyear = {2021},\n\tnote = {ISSN: 10504729},\n\tpages = {4020--4025},\n}\n\n
\n
\n\n\n
\n Although existing cellular network base stations are typically immobile, the recent development of small form factor base stations and self driving cars has enabled the possibility of deploying a team of continuously moving base stations that can reorganize the network infrastructure to adapt to changing network traffic usage patterns. Given such a system of mobile base stations (MBSes) that can freely move on the road, how should their path be planned in an effort to optimize the experience of the users? This paper addresses this question by modeling the problem as a Markov Decision Process where the actions correspond to the MBSes deciding which direction to go at traffic intersections; states corresponds to the position of MBSes; and rewards correspond to minimization of packet loss in the network. A Monte Carlo Tree Search (MCTS)-based anytime algorithm that produces path plans for multiple base stations while optimizing expected packet loss is proposed. Simulated experiments in the city of Verdun, QC, Canada with varying user equipment (UE) densities and random initial conditions show that the proposed approach consistently outperforms myopic planners, and is able to achieve near-optimal performance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Seeing Through your Skin: Recognizing Objects with a Novel Visuotactile Sensor.\n \n \n \n \n\n\n \n Hogan, F. R.; Jenkin, M.; Rezaei-Shoshtari, S.; Girdhar, Y.; Meger, D.; and Dudek, G.\n\n\n \n\n\n\n In 2021 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 1217–1226, January 2021. IEEE\n arXiv: 2011.09552\n\n\n\n
\n\n\n\n \n \n \"SeeingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Hogan2020,\n\ttitle = {Seeing {Through} your {Skin}: {Recognizing} {Objects} with a {Novel} {Visuotactile} {Sensor}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-66540-477-8},\n\turl = {http://arxiv.org/abs/2011.09552},\n\tdoi = {10.1109/WACV48630.2021.00126},\n\tabstract = {We introduce a new class of vision-based sensor and associated algorithmic processes that combine visual imaging with high-resolution tactile sending, all in a uniform hardware and computational architecture. We demonstrate the sensor's efficacy for both multi-modal object recognition and metrology. Object recognition is typically formulated as an unimodal task, but by combining two sensor modalities we show that we can achieve several significant performance improvements. This sensor, named the See-Through-your-Skin sensor (STS), is designed to provide rich multi-modal sensing of contact surfaces. Inspired by recent developments in optical tactile sensing technology, we address a key missing feature of these sensors: the ability to capture a visual perspective of the region beyond the contact surface. Whereas optical tactile sensors are typically opaque, we present a sensor with a semitransparent skin that has the dual capabilities of acting as a tactile sensor and/or as a visual camera depending on its internal lighting conditions. This paper details the design of the sensor, showcases its dual sensing capabilities, and presents a deep learning architecture that fuses vision and touch. We validate the ability of the sensor to classify household objects, recognize fine textures, and infer their physical properties both through numerical simulations and experiments with a smart countertop prototype.},\n\tbooktitle = {2021 {IEEE} {Winter} {Conference} on {Applications} of {Computer} {Vision} ({WACV})},\n\tpublisher = {IEEE},\n\tauthor = {Hogan, Francois Robert and Jenkin, Michael and Rezaei-Shoshtari, Sahand and Girdhar, Yogesh and Meger, David and Dudek, Gregory},\n\tmonth = jan,\n\tyear = {2021},\n\tnote = {arXiv: 2011.09552},\n\tpages = {1217--1226},\n}\n\n
\n
\n\n\n
\n We introduce a new class of vision-based sensor and associated algorithmic processes that combine visual imaging with high-resolution tactile sending, all in a uniform hardware and computational architecture. We demonstrate the sensor's efficacy for both multi-modal object recognition and metrology. Object recognition is typically formulated as an unimodal task, but by combining two sensor modalities we show that we can achieve several significant performance improvements. This sensor, named the See-Through-your-Skin sensor (STS), is designed to provide rich multi-modal sensing of contact surfaces. Inspired by recent developments in optical tactile sensing technology, we address a key missing feature of these sensors: the ability to capture a visual perspective of the region beyond the contact surface. Whereas optical tactile sensors are typically opaque, we present a sensor with a semitransparent skin that has the dual capabilities of acting as a tactile sensor and/or as a visual camera depending on its internal lighting conditions. This paper details the design of the sensor, showcases its dual sensing capabilities, and presents a deep learning architecture that fuses vision and touch. We validate the ability of the sensor to classify household objects, recognize fine textures, and infer their physical properties both through numerical simulations and experiments with a smart countertop prototype.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Semitransparent tactile surface sensor and a method of sensing an interaction with an object using the semitransparent tactile surface sensor.\n \n \n \n\n\n \n Hogan, F.; Jenkin, M.; Dudek, G. L.; Girdhar, Y.; Rezaei-Shoshtari, S.; and Meger, D.\n\n\n \n\n\n\n 2021.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@patent{Hogan2021,\n\ttitle = {Semitransparent tactile surface sensor and a method of sensing an interaction with an object using the semitransparent tactile surface sensor},\n\tcopyright = {All rights reserved},\n\tauthor = {Hogan, Francois and Jenkin, Michael and Dudek, Gregory Lewis and Girdhar, Yogesh and Rezaei-Shoshtari, Sahand and Meger, David},\n\tyear = {2021},\n\tpages = {24},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Assessment of attraction and avoidance behaviors of fish in response to the proximity of transiting underwater vehicles.\n \n \n \n \n\n\n \n Campbell, M. D.; Huddleston, A.; Somerton, D.; Clarke, M. E.; Wakefield, W.; Murawski, S.; Taylor, C.; Singh, H.; Girdhar, Y.; and Yoklavich, M.\n\n\n \n\n\n\n Fishery Bulletin, 119(4): 216–230. November 2021.\n \n\n\n\n
\n\n\n\n \n \n \"AssessmentPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Campbell2021,\n\ttitle = {Assessment of attraction and avoidance behaviors of fish in response to the proximity of transiting underwater vehicles},\n\tvolume = {119},\n\tcopyright = {All rights reserved},\n\tissn = {00900656},\n\turl = {https://spo.nmfs.noaa.gov/content/fishery-bulletin/assessment-attraction-and-avoidance-behaviors-fish-response-proximity},\n\tdoi = {10.7755/FB.119.4.2},\n\tabstract = {Underwater vehicles have many advantages for sampling fishes however estimates can be biased by behavioral responses of organisms. In this experiment a test bed was configured to assess fish response to sampling vehicles and to lend inference to potential avoidance or attraction bias. Fish exhibited various response patterns to the vehicles that allowed for gross classification of species into behavioral guilds. The rigor and persistence of the corresponding responses varied by vehicle, vehicle range and altitude, transect number, and habitat complexity. The effect of each of variable is dependent on the behavioral guild of interest but range was the most consistent predictor of changes in abundance regardless of vehicle. Vehicles that transected at higher relative altitudes off the seafloor and at slower speeds elicited less strong behavioral responses. The test-bed approach allowed for assessment of far-field responses that are important but cannot be observed from the perspective of the sampling vehicle. Despite the success of estimating the behavioral response, calibrating the effect against known densities of fish was not possible in the experiment. However, the method is a robust way for future investigations to develop species specific response functions for gear calibration and to assist in the proper calculation of fish abundance and density.},\n\tnumber = {4},\n\tjournal = {Fishery Bulletin},\n\tauthor = {Campbell, Matthew D. and Huddleston, Ariane and Somerton, David and Clarke, M. Elizabeth and Wakefield, Waldo and Murawski, Steve and Taylor, Chris and Singh, Hanumant and Girdhar, Yogesh and Yoklavich, Mary},\n\tmonth = nov,\n\tyear = {2021},\n\tpages = {216--230},\n}\n\n
\n
\n\n\n
\n Underwater vehicles have many advantages for sampling fishes however estimates can be biased by behavioral responses of organisms. In this experiment a test bed was configured to assess fish response to sampling vehicles and to lend inference to potential avoidance or attraction bias. Fish exhibited various response patterns to the vehicles that allowed for gross classification of species into behavioral guilds. The rigor and persistence of the corresponding responses varied by vehicle, vehicle range and altitude, transect number, and habitat complexity. The effect of each of variable is dependent on the behavioral guild of interest but range was the most consistent predictor of changes in abundance regardless of vehicle. Vehicles that transected at higher relative altitudes off the seafloor and at slower speeds elicited less strong behavioral responses. The test-bed approach allowed for assessment of far-field responses that are important but cannot be observed from the perspective of the sampling vehicle. Despite the success of estimating the behavioral response, calibrating the effect against known densities of fish was not possible in the experiment. However, the method is a robust way for future investigations to develop species specific response functions for gear calibration and to assist in the proper calculation of fish abundance and density.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-Robot Distributed Semantic Mapping in Unfamiliar Environments through Online Matching of Learned Representations.\n \n \n \n \n\n\n \n Jamieson, S.; Fathian, K.; Khosoussi, K.; How, J. P; and Girdhar, Y.\n\n\n \n\n\n\n In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 8587–8593, May 2021. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Multi-RobotPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Jamieson2021,\n\ttitle = {Multi-{Robot} {Distributed} {Semantic} {Mapping} in {Unfamiliar} {Environments} through {Online} {Matching} of {Learned} {Representations}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-72819-077-8},\n\turl = {https://arxiv.org/abs/2103.14805},\n\tdoi = {10.1109/ICRA48506.2021.9561934},\n\tbooktitle = {2021 {IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})},\n\tpublisher = {IEEE},\n\tauthor = {Jamieson, Stewart and Fathian, Kaveh and Khosoussi, Kasra and How, Jonathan P and Girdhar, Yogesh},\n\tmonth = may,\n\tyear = {2021},\n\tpages = {8587--8593},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n [poster] Evaluation of Semi-supervised Visual Object Tracking Methods For Fully Autonomous In-situ, Tagless Tracking of Marine Animals.\n \n \n \n\n\n \n Cai, L.; Hanlon, R.; and Girdhar, Y.\n\n\n \n\n\n\n In CV4Animals: Computer Vision for Animal Behavior Tracking and Modeling (CVPR Workshop), Virtual, 2021. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Cai2021,\n\taddress = {Virtual},\n\ttitle = {[poster] {Evaluation} of {Semi}-supervised {Visual} {Object} {Tracking} {Methods} {For} {Fully} {Autonomous} {In}-situ, {Tagless} {Tracking} of {Marine} {Animals}},\n\tcopyright = {All rights reserved},\n\tbooktitle = {{CV4Animals}: {Computer} {Vision} for {Animal} {Behavior} {Tracking} and {Modeling} ({CVPR} {Workshop})},\n\tauthor = {Cai, Levi and Hanlon, Roger and Girdhar, Yogesh},\n\tyear = {2021},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n [poster]Communicating Efficiently to Enable Human-Multi-Robot Collaboration in Space Exploration.\n \n \n \n\n\n \n Jamieson, S.; Todd, J. E; How, J. P; and Girdhar\n\n\n \n\n\n\n In Proceedings of SpaceCHI: Human-Computer Interaction for Space Exploration (SpaceCHI '21), 2021. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Jamieson2021a,\n\ttitle = {[poster]{Communicating} {Efficiently} to {Enable} {Human}-{Multi}-{Robot} {Collaboration} in {Space} {Exploration}},\n\tcopyright = {All rights reserved},\n\tbooktitle = {Proceedings of {SpaceCHI}: {Human}-{Computer} {Interaction} for {Space} {Exploration} ({SpaceCHI} '21)},\n\tpublisher = {Association for Computing Machinery},\n\tauthor = {Jamieson, Stewart and Todd, Jessica E and How, Jonathan P and {Girdhar}},\n\tyear = {2021},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Active Reward Learning for Co-Robotic Vision Based Exploration in Bandwidth Limited Environments.\n \n \n \n \n\n\n \n Jamieson, S.; How, J. P.; and Girdhar, Y.\n\n\n \n\n\n\n In 2020 IEEE International Conference on Robotics and Automation (ICRA), pages 1806–1812, May 2020. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"ActivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Jamieson2020,\n\ttitle = {Active {Reward} {Learning} for {Co}-{Robotic} {Vision} {Based} {Exploration} in {Bandwidth} {Limited} {Environments}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-72817-395-5},\n\turl = {https://arxiv.org/abs/2003.05016},\n\tdoi = {10.1109/ICRA40945.2020.9196922},\n\tbooktitle = {2020 {IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})},\n\tpublisher = {IEEE},\n\tauthor = {Jamieson, Stewart and How, Jonathan P. and Girdhar, Yogesh},\n\tmonth = may,\n\tyear = {2020},\n\tpages = {1806--1812},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gaussian-Dirichlet Random Fields for Inference over High Dimensional Categorical Observations.\n \n \n \n \n\n\n \n Soucie, J. E. S.; Sosik, H. M.; and Girdhar, Y.\n\n\n \n\n\n\n In 2020 IEEE International Conference on Robotics and Automation (ICRA), pages 2924–2931, May 2020. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Gaussian-DirichletPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Soucie2020,\n\ttitle = {Gaussian-{Dirichlet} {Random} {Fields} for {Inference} over {High} {Dimensional} {Categorical} {Observations}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-72817-395-5},\n\turl = {https://arxiv.org/abs/2003.12120},\n\tdoi = {10.1109/ICRA40945.2020.9196713},\n\tabstract = {We propose a generative model for the spatio-temporal distribution of high dimensional categorical observa- tions. These are commonly produced by robots equipped with an imaging sensor such as a camera, paired with an image classifier, potentially producing observations over thousands of categories. The proposed approach combines the use of Dirichlet distributions to model sparse co-occurrence relations between the observed categories using a latent variable, and Gaussian processes to model the latent variable’s spatio- temporal distribution. Experiments in this paper show that the resulting model is able to efficiently and accurately approximate the temporal distribution of high dimensional categorical measurements such as taxonomic observations of microscopic organisms in the ocean, even in unobserved (held out) locations, far from other samples. This work’s primary motivation is to enable deployment of informative path planning techniques over high dimensional categorical fields, which until now have been limited to scalar or low dimensional vector observations.},\n\tbooktitle = {2020 {IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})},\n\tpublisher = {IEEE},\n\tauthor = {Soucie, John E. San and Sosik, Heidi M. and Girdhar, Yogesh},\n\tmonth = may,\n\tyear = {2020},\n\tpages = {2924--2931},\n}\n\n
\n
\n\n\n
\n We propose a generative model for the spatio-temporal distribution of high dimensional categorical observa- tions. These are commonly produced by robots equipped with an imaging sensor such as a camera, paired with an image classifier, potentially producing observations over thousands of categories. The proposed approach combines the use of Dirichlet distributions to model sparse co-occurrence relations between the observed categories using a latent variable, and Gaussian processes to model the latent variable’s spatio- temporal distribution. Experiments in this paper show that the resulting model is able to efficiently and accurately approximate the temporal distribution of high dimensional categorical measurements such as taxonomic observations of microscopic organisms in the ocean, even in unobserved (held out) locations, far from other samples. This work’s primary motivation is to enable deployment of informative path planning techniques over high dimensional categorical fields, which until now have been limited to scalar or low dimensional vector observations.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Streaming Scene Maps for Co-Robotic Exploration in Bandwidth Limited Environments.\n \n \n \n \n\n\n \n Girdhar, Y.; Cai, L.; Jamieson, S.; McGuire, N.; Flaspohler, G.; Suman, S.; and Claus, B.\n\n\n \n\n\n\n In 2019 International Conference on Robotics and Automation (ICRA), pages 7940–7946, May 2019. IEEE\n arXiv: 1903.03214\n\n\n\n
\n\n\n\n \n \n \"StreamingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar2019,\n\ttitle = {Streaming {Scene} {Maps} for {Co}-{Robotic} {Exploration} in {Bandwidth} {Limited} {Environments}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-5386-6027-0},\n\turl = {http://arxiv.org/abs/1903.03214},\n\tdoi = {10.1109/ICRA.2019.8794132},\n\tabstract = {This paper proposes a bandwidth tunable technique for real-time probabilistic scene modeling and mapping to enable co-robotic exploration in communication constrained environments such as the deep sea. The parameters of the system enable the user to characterize the scene complexity represented by the map, which in turn determines the bandwidth requirements. The approach is demonstrated using an underwater robot that learns an unsupervised scene model of the environment and then uses this scene model to communicate the spatial distribution of various high-level semantic scene constructs to a human operator. Preliminary experiments in an artificially constructed tank environment as well as simulated missions over a 10m\\${\\textbackslash}times\\$10m coral reef using real data show the tunability of the maps to different bandwidth constraints and science interests. To our knowledge this is the first paper to quantify how the free parameters of the unsupervised scene model impact both the scientific utility of and bandwidth required to communicate the resulting scene model.},\n\tbooktitle = {2019 {International} {Conference} on {Robotics} and {Automation} ({ICRA})},\n\tpublisher = {IEEE},\n\tauthor = {Girdhar, Yogesh and Cai, Levi and Jamieson, Stewart and McGuire, Nathan and Flaspohler, Genevieve and Suman, Stefano and Claus, Brian},\n\tmonth = may,\n\tyear = {2019},\n\tnote = {arXiv: 1903.03214},\n\tpages = {7940--7946},\n}\n\n
\n
\n\n\n
\n This paper proposes a bandwidth tunable technique for real-time probabilistic scene modeling and mapping to enable co-robotic exploration in communication constrained environments such as the deep sea. The parameters of the system enable the user to characterize the scene complexity represented by the map, which in turn determines the bandwidth requirements. The approach is demonstrated using an underwater robot that learns an unsupervised scene model of the environment and then uses this scene model to communicate the spatial distribution of various high-level semantic scene constructs to a human operator. Preliminary experiments in an artificially constructed tank environment as well as simulated missions over a 10m${\\}times$10m coral reef using real data show the tunability of the maps to different bandwidth constraints and science interests. To our knowledge this is the first paper to quantify how the free parameters of the unsupervised scene model impact both the scientific utility of and bandwidth required to communicate the resulting scene model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Information-Guided Robotic Maximum Seek-and-Sample in Partially Observable Continuous Environments.\n \n \n \n \n\n\n \n Flaspohler, G.; Preston, V.; Michel, A. P. M.; Girdhar, Y.; and Roy, N.\n\n\n \n\n\n\n IEEE Robotics and Automation Letters, 4(4): 3782–3789. October 2019.\n \n\n\n\n
\n\n\n\n \n \n \"Information-GuidedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Flaspohler2019,\n\ttitle = {Information-{Guided} {Robotic} {Maximum} {Seek}-and-{Sample} in {Partially} {Observable} {Continuous} {Environments}},\n\tvolume = {4},\n\tcopyright = {All rights reserved},\n\tissn = {2377-3766},\n\turl = {https://ieeexplore.ieee.org/document/8767964/},\n\tdoi = {10.1109/LRA.2019.2929997},\n\tnumber = {4},\n\tjournal = {IEEE Robotics and Automation Letters},\n\tauthor = {Flaspohler, Genevieve and Preston, Victoria and Michel, Anna Pauline Miranda and Girdhar, Yogesh and Roy, Nicholas},\n\tmonth = oct,\n\tyear = {2019},\n\tpages = {3782--3789},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n [poster] WARPAUV: A low-cost, vision-guided AUV for robotics research.\n \n \n \n\n\n \n McGuire, N.; Cai, L.; Belani, M.; Claus, B.; and Girdhar, Y.\n\n\n \n\n\n\n In Northeast Robotics Colloquium, Philadelphia, PA, 2019. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{McGuire2019,\n\taddress = {Philadelphia, PA},\n\ttitle = {[poster] {WARPAUV}: {A} low-cost, vision-guided {AUV} for robotics research},\n\tcopyright = {All rights reserved},\n\tbooktitle = {Northeast {Robotics} {Colloquium}},\n\tauthor = {McGuire, Nathan and Cai, Levi and Belani, Manyu and Claus, Brian and Girdhar, Yogesh},\n\tyear = {2019},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n [poster] Thinking like a scientist: Gaussian-Dirichlet Random Fields for Spatiotemporal Categorical Inference.\n \n \n \n\n\n \n San Soucie, J.; ; Sosik, H.; ; and Girdhar, Y.\n\n\n \n\n\n\n In Northeast Robotics Colloquium, 2019. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{SanSoucie2019,\n\ttitle = {[poster] {Thinking} like a scientist: {Gaussian}-{Dirichlet} {Random} {Fields} for {Spatiotemporal} {Categorical} {Inference}},\n\tcopyright = {All rights reserved},\n\tbooktitle = {Northeast {Robotics} {Colloquium}},\n\tauthor = {San Soucie, J.E. and and Sosik, H.M. and and Girdhar, Y.},\n\tyear = {2019},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Near-optimal Irrevocable Sample Selection for Periodic Data Streams with Applications to Marine Robotics.\n \n \n \n \n\n\n \n Flaspohler, G.; Roy, N.; and Girdhar, Y.\n\n\n \n\n\n\n In 2018 IEEE International Conference on Robotics and Automation (ICRA), pages 1–8, May 2018. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Near-optimalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Flaspohler2018,\n\ttitle = {Near-optimal {Irrevocable} {Sample} {Selection} for {Periodic} {Data} {Streams} with {Applications} to {Marine} {Robotics}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-5386-3081-5},\n\turl = {https://ieeexplore.ieee.org/document/8460709/},\n\tdoi = {10.1109/ICRA.2018.8460709},\n\tbooktitle = {2018 {IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})},\n\tpublisher = {IEEE},\n\tauthor = {Flaspohler, Genevieve and Roy, Nicholas and Girdhar, Yogesh},\n\tmonth = may,\n\tyear = {2018},\n\tpages = {1--8},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Approximate Distributed Spatiotemporal Topic Models for Multi-Robot Terrain Characterization.\n \n \n \n \n\n\n \n Doherty, K.; Flaspohler, G.; Roy, N.; and Girdhar, Y.\n\n\n \n\n\n\n In 2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 3730–3737, October 2018. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"ApproximatePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Doherty2018,\n\ttitle = {Approximate {Distributed} {Spatiotemporal} {Topic} {Models} for {Multi}-{Robot} {Terrain} {Characterization}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-5386-8094-0},\n\turl = {https://ieeexplore.ieee.org/document/8594442/},\n\tdoi = {10.1109/IROS.2018.8594442},\n\tabstract = {Unsupervised learning techniques, such as Bayesian topic models, are capable of discovering latent structure directly from raw data. These unsupervised models can endow robots with the ability to learn from their observations without human supervision, and then use the learned models for tasks such as autonomous exploration, adaptive sampling, or surveillance. This paper extends single-robot topic models to the domain of multiple robots. The main difficulty of this extension lies in achieving and maintaining global consensus among the unsupervised models learned locally by each robot. This is especially challenging for multi-robot teams operating in communication-constrained environments, such as marine robots. We present a novel approach for multi-robot distributed learning in which each robot maintains a local topic model to categorize its observations and model parameters are shared to achieve global consensus. We apply a combinatorial optimization procedure that combines local robot topic distributions into a globally consistent model based on topic similarity, which we find mitigates topic drift when compared to a baseline approach that matches topics naïvely, We evaluate our methods experimentally by demonstrating multi-robot underwater terrain characterization using simulated missions on real seabed imagery. Our proposed method achieves similar model quality under bandwidth-constraints to that achieved by models that continuously communicate, despite requiring less than one percent of the data transmission needed for continuous communication.},\n\tbooktitle = {2018 {IEEE}/{RSJ} {International} {Conference} on {Intelligent} {Robots} and {Systems} ({IROS})},\n\tpublisher = {IEEE},\n\tauthor = {Doherty, Kevin and Flaspohler, Genevieve and Roy, Nicholas and Girdhar, Yogesh},\n\tmonth = oct,\n\tyear = {2018},\n\tpages = {3730--3737},\n}\n\n
\n
\n\n\n
\n Unsupervised learning techniques, such as Bayesian topic models, are capable of discovering latent structure directly from raw data. These unsupervised models can endow robots with the ability to learn from their observations without human supervision, and then use the learned models for tasks such as autonomous exploration, adaptive sampling, or surveillance. This paper extends single-robot topic models to the domain of multiple robots. The main difficulty of this extension lies in achieving and maintaining global consensus among the unsupervised models learned locally by each robot. This is especially challenging for multi-robot teams operating in communication-constrained environments, such as marine robots. We present a novel approach for multi-robot distributed learning in which each robot maintains a local topic model to categorize its observations and model parameters are shared to achieve global consensus. We apply a combinatorial optimization procedure that combines local robot topic distributions into a globally consistent model based on topic similarity, which we find mitigates topic drift when compared to a baseline approach that matches topics naïvely, We evaluate our methods experimentally by demonstrating multi-robot underwater terrain characterization using simulated missions on real seabed imagery. Our proposed method achieves similar model quality under bandwidth-constraints to that achieved by models that continuously communicate, despite requiring less than one percent of the data transmission needed for continuous communication.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Detection of unanticipated faults for autonomous underwater vehicles using online topic models.\n \n \n \n \n\n\n \n Raanan, B.; Bellingham, J.; Zhang, Y.; Kemp, M.; Kieft, B.; Singh, H.; and Girdhar, Y.\n\n\n \n\n\n\n Journal of Field Robotics, 35(5): 705–716. August 2018.\n \n\n\n\n
\n\n\n\n \n \n \"DetectionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Raanan2017,\n\ttitle = {Detection of unanticipated faults for autonomous underwater vehicles using online topic models},\n\tvolume = {35},\n\tcopyright = {All rights reserved},\n\tissn = {15564959},\n\turl = {http://doi.wiley.com/10.1002/rob.21771},\n\tdoi = {10.1002/rob.21771},\n\tnumber = {5},\n\tjournal = {Journal of Field Robotics},\n\tauthor = {Raanan, Ben-Yair and Bellingham, James and Zhang, Yanwu and Kemp, Mathieu and Kieft, Brian and Singh, Hanumant and Girdhar, Yogesh},\n\tmonth = aug,\n\tyear = {2018},\n\tpages = {705--716},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Learning seasonal phytoplankton communities with topic models.\n \n \n \n\n\n \n Kalmbach, A.; Sosik, H.; Dudek, G.; and Girdhar, Y.\n\n\n \n\n\n\n 2017.\n Publication Title: arXiv\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{Kalmbach2017a,\n\ttitle = {Learning seasonal phytoplankton communities with topic models},\n\tcopyright = {All rights reserved},\n\tabstract = {Copyright © 2017, arXiv, All rights reserved. In this work we develop and demonstrate a probabilistic generative model for phytoplankton communities. The proposed model takes counts of a set of phytoplankton taxa in a timeseries as its training data, and models communities by learning sparse co-occurrence structure between the taxa. Our model is probabilistic, where communities are represented by probability distributions over the species, and each time-step is represented by a probability distribution over the communities. The proposed approach uses a non-parametric, spatiotemporal topic model to encourage the communities to form an interpretable representation of the data, without making strong assumptions about the communities. We demonstrate the quality and interpretability of our method by its ability to improve performance of a simplistic regression model. We show that simple linear regression is sufficient to predict the community distribution learned by our method, and therefore the taxon distributions, from a set of naively chosen environment variables. In contrast, a similar regression model is insufficient to predict the taxon distributions directly or through PCA with the same level of accuracy.},\n\tauthor = {Kalmbach, A. and Sosik, H.M. and Dudek, G. and Girdhar, Y.},\n\tyear = {2017},\n\tnote = {Publication Title: arXiv},\n}\n\n
\n
\n\n\n
\n Copyright © 2017, arXiv, All rights reserved. In this work we develop and demonstrate a probabilistic generative model for phytoplankton communities. The proposed model takes counts of a set of phytoplankton taxa in a timeseries as its training data, and models communities by learning sparse co-occurrence structure between the taxa. Our model is probabilistic, where communities are represented by probability distributions over the species, and each time-step is represented by a probability distribution over the communities. The proposed approach uses a non-parametric, spatiotemporal topic model to encourage the communities to form an interpretable representation of the data, without making strong assumptions about the communities. We demonstrate the quality and interpretability of our method by its ability to improve performance of a simplistic regression model. We show that simple linear regression is sufficient to predict the community distribution learned by our method, and therefore the taxon distributions, from a set of naively chosen environment variables. In contrast, a similar regression model is insufficient to predict the taxon distributions directly or through PCA with the same level of accuracy.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n [poster] Learning Seasonal Phytoplankton Communities with Topic Models.\n \n \n \n \n\n\n \n Kalmbach, A.; Sosik, H. M.; Dudek, G.; and Girdhar, Y.\n\n\n \n\n\n\n In OCEANS MTS/IEEE, pages 7, Anchorage, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"[poster]Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Kalmbach2017b,\n\taddress = {Anchorage},\n\ttitle = {[poster] {Learning} {Seasonal} {Phytoplankton} {Communities} with {Topic} {Models}},\n\tcopyright = {All rights reserved},\n\turl = {https://arxiv.org/abs/1711.09013},\n\tbooktitle = {{OCEANS} {MTS}/{IEEE}},\n\tauthor = {Kalmbach, Arnold and Sosik, Heidi M. and Dudek, Gregory and Girdhar, Yogesh},\n\tyear = {2017},\n\tpages = {7},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Feature discovery and visualization of robot mission data using convolutional autoencoders and Bayesian nonparametric topic models.\n \n \n \n \n\n\n \n Flaspohler, G.; Roy, N.; and Girdhar, Y.\n\n\n \n\n\n\n In 2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 1–8, September 2017. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"FeaturePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Flaspohler2017,\n\ttitle = {Feature discovery and visualization of robot mission data using convolutional autoencoders and {Bayesian} nonparametric topic models},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-5386-2682-5},\n\turl = {http://ieeexplore.ieee.org/document/8202130/},\n\tdoi = {10.1109/IROS.2017.8202130},\n\tbooktitle = {2017 {IEEE}/{RSJ} {International} {Conference} on {Intelligent} {Robots} and {Systems} ({IROS})},\n\tpublisher = {IEEE},\n\tauthor = {Flaspohler, Genevieve and Roy, Nicholas and Girdhar, Yogesh},\n\tmonth = sep,\n\tyear = {2017},\n\tpages = {1--8},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n [abstract] A convolutional neural network based approach for classifying delphinidae vocal repertoire.\n \n \n \n \n\n\n \n Flaspohler, G. E.; Silva, T.; Mooney, T A.; and Girdhar, Y.\n\n\n \n\n\n\n The Journal of the Acoustical Society of America, 141(5): 3864–3864. May 2017.\n \n\n\n\n
\n\n\n\n \n \n \"[abstract]Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Flaspohler2017a,\n\ttitle = {[abstract] {A} convolutional neural network based approach for classifying delphinidae vocal repertoire},\n\tvolume = {141},\n\tcopyright = {All rights reserved},\n\tissn = {0001-4966},\n\turl = {http://asa.scitation.org/doi/10.1121/1.4988632},\n\tdoi = {10.1121/1.4988632},\n\tnumber = {5},\n\tjournal = {The Journal of the Acoustical Society of America},\n\tauthor = {Flaspohler, Genevieve E. and Silva, Tammy and Mooney, T Aran and Girdhar, Yogesh},\n\tmonth = may,\n\tyear = {2017},\n\tpages = {3864--3864},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n [poster] Unsupervised Spatial-Semantic Maps for Human-Robot Collaboration in Communication-Constrained Environments.\n \n \n \n\n\n \n Doherty, K.; and Girdhar, Y.\n\n\n \n\n\n\n In Intelligent Robots and Systems (IROS) (Abstract-Only / Poster track), pages 1, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Doherty2017,\n\ttitle = {[poster] {Unsupervised} {Spatial}-{Semantic} {Maps} for {Human}-{Robot} {Collaboration} in {Communication}-{Constrained} {Environments}},\n\tcopyright = {All rights reserved},\n\tbooktitle = {Intelligent {Robots} and {Systems} ({IROS}) ({Abstract}-{Only} / {Poster} track)},\n\tauthor = {Doherty, Kevin and Girdhar, Yogesh},\n\tyear = {2017},\n\tpages = {1},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n [poster] Vision based real-time fish detection using convolutional neural network.\n \n \n \n \n\n\n \n Sung, M.; Yu, S.; and Girdhar, Y.\n\n\n \n\n\n\n In OCEANS 2017 - Aberdeen, pages 1–6, Aberdeen, June 2017. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"[poster]Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Sung2017,\n\taddress = {Aberdeen},\n\ttitle = {[poster] {Vision} based real-time fish detection using convolutional neural network},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-5090-5278-3},\n\turl = {http://ieeexplore.ieee.org/document/8084889/},\n\tdoi = {10.1109/OCEANSE.2017.8084889},\n\tbooktitle = {{OCEANS} 2017 - {Aberdeen}},\n\tpublisher = {IEEE},\n\tauthor = {Sung, Minsung and Yu, Son-Cheol and Girdhar, Yogesh},\n\tmonth = jun,\n\tyear = {2017},\n\tpages = {1--6},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Phytoplankton hotspot prediction with an unsupervised spatial community model.\n \n \n \n \n\n\n \n Kalmbach, A.; Girdhar, Y.; Sosik, H. M.; and Dudek, G.\n\n\n \n\n\n\n In 2017 IEEE International Conference on Robotics and Automation (ICRA), pages 4906–4913, May 2017. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"PhytoplanktonPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Kalmbach2017,\n\ttitle = {Phytoplankton hotspot prediction with an unsupervised spatial community model},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-5090-4633-1},\n\turl = {https://arxiv.org/abs/1703.07309},\n\tdoi = {10.1109/ICRA.2017.7989568},\n\tabstract = {Many interesting natural phenomena are sparsely distributed and discrete. Locating the hotspots of such sparsely distributed phenomena is often difficult because their density gradient is likely to be very noisy. We present a novel approach to this search problem, where we model the co-occurrence relations between a robot's observations with a Bayesian nonparametric topic model. This approach makes it possible to produce a robust estimate of the spatial distribution of the target, even in the absence of direct target observations. We apply the proposed approach to the problem of finding the spatial locations of the hotspots of a specific phytoplankton taxon in the ocean. We use classified image data from Imaging FlowCytobot (IFCB), which automatically measures individual microscopic cells and colonies of cells. Given these individual taxon-specific observations, we learn a phytoplankton community model that characterizes the co-occurrence relations between taxa. We present experiments with simulated robot missions drawn from real observation data collected during a research cruise traversing the US Atlantic coast. Our results show that the proposed approach outperforms nearest neighbor and k-means based methods for predicting the spatial distribution of hotspots from in-situ observations.},\n\tbooktitle = {2017 {IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})},\n\tpublisher = {IEEE},\n\tauthor = {Kalmbach, Arnold and Girdhar, Yogesh and Sosik, Heidi M. and Dudek, Gregory},\n\tmonth = may,\n\tyear = {2017},\n\tpages = {4906--4913},\n}\n\n
\n
\n\n\n
\n Many interesting natural phenomena are sparsely distributed and discrete. Locating the hotspots of such sparsely distributed phenomena is often difficult because their density gradient is likely to be very noisy. We present a novel approach to this search problem, where we model the co-occurrence relations between a robot's observations with a Bayesian nonparametric topic model. This approach makes it possible to produce a robust estimate of the spatial distribution of the target, even in the absence of direct target observations. We apply the proposed approach to the problem of finding the spatial locations of the hotspots of a specific phytoplankton taxon in the ocean. We use classified image data from Imaging FlowCytobot (IFCB), which automatically measures individual microscopic cells and colonies of cells. Given these individual taxon-specific observations, we learn a phytoplankton community model that characterizes the co-occurrence relations between taxa. We present experiments with simulated robot missions drawn from real observation data collected during a research cruise traversing the US Atlantic coast. Our results show that the proposed approach outperforms nearest neighbor and k-means based methods for predicting the spatial distribution of hotspots from in-situ observations.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (9)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Anomaly detection in unstructured environments using Bayesian nonparametric scene modeling.\n \n \n \n \n\n\n \n Girdhar, Y.; Cho, W.; Campbell, M.; Pineda, J.; Clarke, E.; and Singh, H.\n\n\n \n\n\n\n In 2016 IEEE International Conference on Robotics and Automation (ICRA), pages 2651–2656, May 2016. IEEE\n arXiv: 1509.07979\n\n\n\n
\n\n\n\n \n \n \"AnomalyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar2015,\n\ttitle = {Anomaly detection in unstructured environments using {Bayesian} nonparametric scene modeling},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-4673-8026-3},\n\turl = {http://ieeexplore.ieee.org/document/7487424/},\n\tdoi = {10.1109/ICRA.2016.7487424},\n\tabstract = {This paper explores the use of a Bayesian non-parametric topic modeling technique for the purpose of anomaly detection in video data. We present results from two experiments. The first experiment shows that the proposed technique is automatically able characterize the underlying terrain, and detect anomalous flora in image data collected by an underwater robot. The second experiment shows that the same technique can be used on images from a static camera in a dynamic unstructured environment. The second dataset consisting of video data from a static seafloor camera, capturing images of a busy coral reef. The proposed technique was able to detect all three instances of an underwater vehicle passing in front of the camera, amongst many other observations of fishes, debris, lighting changes due to surface waves, and benthic flora.},\n\tbooktitle = {2016 {IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})},\n\tpublisher = {IEEE},\n\tauthor = {Girdhar, Yogesh and Cho, Walter and Campbell, Matthew and Pineda, Jesus and Clarke, Elizabeth and Singh, Hanumant},\n\tmonth = may,\n\tyear = {2016},\n\tnote = {arXiv: 1509.07979},\n\tpages = {2651--2656},\n}\n\n
\n
\n\n\n
\n This paper explores the use of a Bayesian non-parametric topic modeling technique for the purpose of anomaly detection in video data. We present results from two experiments. The first experiment shows that the proposed technique is automatically able characterize the underlying terrain, and detect anomalous flora in image data collected by an underwater robot. The second experiment shows that the same technique can be used on images from a static camera in a dynamic unstructured environment. The second dataset consisting of video data from a static seafloor camera, capturing images of a busy coral reef. The proposed technique was able to detect all three instances of an underwater vehicle passing in front of the camera, amongst many other observations of fishes, debris, lighting changes due to surface waves, and benthic flora.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Subsea Fauna Enumeration Using Vision-Based Marine Robots.\n \n \n \n \n\n\n \n Koreitem, K.; Girdhar, Y.; Cho, W.; Singh, H.; Pineda, J.; and Dudek, G.\n\n\n \n\n\n\n In 2016 13th Conference on Computer and Robot Vision (CRV), pages 101–108, June 2016. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"SubseaPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Koreitem2016,\n\ttitle = {Subsea {Fauna} {Enumeration} {Using} {Vision}-{Based} {Marine} {Robots}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-5090-2491-9},\n\turl = {http://ieeexplore.ieee.org/document/7801509/},\n\tdoi = {10.1109/CRV.2016.53},\n\tbooktitle = {2016 13th {Conference} on {Computer} and {Robot} {Vision} ({CRV})},\n\tpublisher = {IEEE},\n\tauthor = {Koreitem, Karim and Girdhar, Yogesh and Cho, Walter and Singh, Hanumant and Pineda, Jesus and Dudek, Gregory},\n\tmonth = jun,\n\tyear = {2016},\n\tpages = {101--108},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n [poster] Unsupervised Lifelong Learning for a Curious Underwater Exploration Robot.\n \n \n \n\n\n \n Girdhar, Y.; and Singh, H.\n\n\n \n\n\n\n In ICRA 2016 Workshop: AI for Long-term Autonomy, pages 4, 2016. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar2016,\n\ttitle = {[poster] {Unsupervised} {Lifelong} {Learning} for a {Curious} {Underwater} {Exploration} {Robot}},\n\tcopyright = {All rights reserved},\n\tbooktitle = {{ICRA} 2016 {Workshop}: {AI} for {Long}-term {Autonomy}},\n\tauthor = {Girdhar, Yogesh and Singh, Hanumant},\n\tyear = {2016},\n\tpages = {4},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n [poster] Automatic fault diagnosis for autonomous underwater vehicles using online topic models.\n \n \n \n \n\n\n \n Raanan, B.; Bellingham, J. G.; Zhang, Y.; Kemp, M.; Kieft, B.; Singh, H.; and Girdhar, Y.\n\n\n \n\n\n\n In OCEANS 2016 MTS/IEEE Monterey, pages 1–6, September 2016. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"[poster]Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Raanan2016,\n\ttitle = {[poster] {Automatic} fault diagnosis for autonomous underwater vehicles using online topic models},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-5090-1537-5},\n\turl = {http://ieeexplore.ieee.org/document/7761139/},\n\tdoi = {10.1109/OCEANS.2016.7761139},\n\tabstract = {As the capabilities of autonomous underwater vehicles (AUVs) improve, the missions become longer, riskier, and more complex. For AUVs to succeed in complex missions, they must be reliable in the face of subsystem failure and environmental challenges. In practice, fault detection activities carried out by most AUVs employ a rule-based emergency abort system that is triggered by specific events. AUVs equipped with the ability to diagnose faults and reason about mitigation actions in real time could improve their survivability and increase the value of individual deployments by replanning their mission in response to failures. In this paper, we focus on AUV autonomy as it pertains to self-perception and health monitoring and argue that automatic classification of state-sensor data represents an important enabling capability. We apply an online Bayesian nonparametric topic modeling technique to state-sensor data in order to automatically characterize the performance patterns of an AUV, then demonstrate how in combination with operator-supplied semantic labels these patterns can be used for fault detection and diagnosis by means of nearest-neighbor classifier. The method is applied in post-processing to diagnose faults that led to the temporary loss of the Monterey Bay Aquarium Research Institute’s Tethys long-range AUV in two separate deployments. Our results show that the method is able to accurately identify and characterize patterns that correspond to various states of the AUV, and classify faults with high probability of detection and no false detects.},\n\tbooktitle = {{OCEANS} 2016 {MTS}/{IEEE} {Monterey}},\n\tpublisher = {IEEE},\n\tauthor = {Raanan, Ben-Yair and Bellingham, James G. and Zhang, Yanwu and Kemp, Mathieu and Kieft, Brian and Singh, Hanumant and Girdhar, Yogesh},\n\tmonth = sep,\n\tyear = {2016},\n\tpages = {1--6},\n}\n\n
\n
\n\n\n
\n As the capabilities of autonomous underwater vehicles (AUVs) improve, the missions become longer, riskier, and more complex. For AUVs to succeed in complex missions, they must be reliable in the face of subsystem failure and environmental challenges. In practice, fault detection activities carried out by most AUVs employ a rule-based emergency abort system that is triggered by specific events. AUVs equipped with the ability to diagnose faults and reason about mitigation actions in real time could improve their survivability and increase the value of individual deployments by replanning their mission in response to failures. In this paper, we focus on AUV autonomy as it pertains to self-perception and health monitoring and argue that automatic classification of state-sensor data represents an important enabling capability. We apply an online Bayesian nonparametric topic modeling technique to state-sensor data in order to automatically characterize the performance patterns of an AUV, then demonstrate how in combination with operator-supplied semantic labels these patterns can be used for fault detection and diagnosis by means of nearest-neighbor classifier. The method is applied in post-processing to diagnose faults that led to the temporary loss of the Monterey Bay Aquarium Research Institute’s Tethys long-range AUV in two separate deployments. Our results show that the method is able to accurately identify and characterize patterns that correspond to various states of the AUV, and classify faults with high probability of detection and no false detects.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards persistent cooperative marine robotics.\n \n \n \n \n\n\n \n Claus, B.; Kinsey, J.; and Girdhar, Y.\n\n\n \n\n\n\n In 2016 IEEE/OES Autonomous Underwater Vehicles (AUV), pages 416–422, November 2016. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"TowardsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Claus2016,\n\ttitle = {Towards persistent cooperative marine robotics},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-5090-2442-1},\n\turl = {http://ieeexplore.ieee.org/document/7778706/},\n\tdoi = {10.1109/AUV.2016.7778706},\n\tabstract = {This work describes the ongoing effort to derive methods to collectively direct a heterogeneous group of vehicles trajectories, velocities, communication rates and sampling rates by the navigational accuracy required, energy consumption, communication performance and observational goals. These methods are being experimentally validated through field trials during the Summer and Fall of 2016. Initial results demonstrate the utility of using fine scale regional oceanographic models as a tool to locate features of interest; inform the spatial extents, bandwidth and power usage of both satellite and acoustic communication methods; and provide data on the performance and energy usage of the acoustically aided and dead-reckoned navigation methods.},\n\tbooktitle = {2016 {IEEE}/{OES} {Autonomous} {Underwater} {Vehicles} ({AUV})},\n\tpublisher = {IEEE},\n\tauthor = {Claus, Brian and Kinsey, James and Girdhar, Yogesh},\n\tmonth = nov,\n\tyear = {2016},\n\tpages = {416--422},\n}\n\n
\n
\n\n\n
\n This work describes the ongoing effort to derive methods to collectively direct a heterogeneous group of vehicles trajectories, velocities, communication rates and sampling rates by the navigational accuracy required, energy consumption, communication performance and observational goals. These methods are being experimentally validated through field trials during the Summer and Fall of 2016. Initial results demonstrate the utility of using fine scale regional oceanographic models as a tool to locate features of interest; inform the spatial extents, bandwidth and power usage of both satellite and acoustic communication methods; and provide data on the performance and energy usage of the acoustically aided and dead-reckoned navigation methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning deep-sea substrate types with visual topic models.\n \n \n \n \n\n\n \n Kalmbach, A.; Hoeberechts, M.; Albu, A. B.; Glotin, H.; Paris, S.; and Girdhar, Y.\n\n\n \n\n\n\n In 2016 IEEE Winter Conference on Applications of Computer Vision (WACV), pages 1–9, March 2016. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"LearningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Kalmbach2016,\n\ttitle = {Learning deep-sea substrate types with visual topic models},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-5090-0641-0},\n\turl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=7477600},\n\tdoi = {10.1109/WACV.2016.7477600},\n\tbooktitle = {2016 {IEEE} {Winter} {Conference} on {Applications} of {Computer} {Vision} ({WACV})},\n\tpublisher = {IEEE},\n\tauthor = {Kalmbach, Arnold and Hoeberechts, Maia and Albu, Alexandra Branzan and Glotin, Herve and Paris, Sebastien and Girdhar, Yogesh},\n\tmonth = mar,\n\tyear = {2016},\n\tpages = {1--9},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Control, localization and human interaction with an autonomous lighter-than-air performer.\n \n \n \n \n\n\n \n St-Onge, D.; Brèches, P.; Sharf, I.; Reeves, N.; Rekleitis, I.; Abouzakhm, P.; Girdhar, Y.; Harmat, A.; Dudek, G.; and Giguère, P.\n\n\n \n\n\n\n Robotics and Autonomous Systems, 88: 165–186. February 2016.\n \n\n\n\n
\n\n\n\n \n \n \"Control,Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{St-Onge2016,\n\ttitle = {Control, localization and human interaction with an autonomous lighter-than-air performer},\n\tvolume = {88},\n\tcopyright = {All rights reserved},\n\tissn = {09218890},\n\turl = {https://linkinghub.elsevier.com/retrieve/pii/S0921889016306674},\n\tdoi = {10.1016/j.robot.2016.10.013},\n\tjournal = {Robotics and Autonomous Systems},\n\tauthor = {St-Onge, David and Brèches, Pierre-Yves and Sharf, Inna and Reeves, Nicolas and Rekleitis, Ioannis and Abouzakhm, Patrick and Girdhar, Yogesh and Harmat, Adam and Dudek, Gregory and Giguère, Philippe},\n\tmonth = feb,\n\tyear = {2016},\n\tpages = {165--186},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Modeling curiosity in a mobile robot for long-term autonomous exploration and monitoring.\n \n \n \n \n\n\n \n Girdhar, Y.; and Dudek, G.\n\n\n \n\n\n\n Autonomous Robots, 40(7): 1267–1278. October 2016.\n \n\n\n\n
\n\n\n\n \n \n \"ModelingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Girdhar2015a,\n\ttitle = {Modeling curiosity in a mobile robot for long-term autonomous exploration and monitoring},\n\tvolume = {40},\n\tcopyright = {All rights reserved},\n\tissn = {0929-5593},\n\turl = {http://link.springer.com/10.1007/s10514-015-9500-x},\n\tdoi = {10.1007/s10514-015-9500-x},\n\tabstract = {This paper presents a novel approach to modeling curiosity in a mobile robot, which is useful for monitoring and adaptive data collection tasks, especially in the context of long term autonomous missions where pre-programmed missions are likely to have limited utility. We use a realtime topic modeling technique to build a semantic perception model of the environment, using which, we plan a path through the locations in the world with high semantic information content. The life-long learning behavior of the proposed perception model makes it suitable for long-term exploration missions. We validate the approach using simulated exploration experiments using aerial and underwater data, and demonstrate an implementation on the Aqua underwater robot in a variety of scenarios. We find that the proposed exploration paths that are biased towards locations with high topic perplexity, produce better terrain models with high discriminative power. Moreover, we show that the proposed algorithm implemented on Aqua robot is able to do tasks such as coral reef inspection, diver following, and sea floor exploration, without any prior training or preparation.},\n\tnumber = {7},\n\tjournal = {Autonomous Robots},\n\tauthor = {Girdhar, Yogesh and Dudek, Gregory},\n\tmonth = oct,\n\tyear = {2016},\n\tpages = {1267--1278},\n}\n\n
\n
\n\n\n
\n This paper presents a novel approach to modeling curiosity in a mobile robot, which is useful for monitoring and adaptive data collection tasks, especially in the context of long term autonomous missions where pre-programmed missions are likely to have limited utility. We use a realtime topic modeling technique to build a semantic perception model of the environment, using which, we plan a path through the locations in the world with high semantic information content. The life-long learning behavior of the proposed perception model makes it suitable for long-term exploration missions. We validate the approach using simulated exploration experiments using aerial and underwater data, and demonstrate an implementation on the Aqua underwater robot in a variety of scenarios. We find that the proposed exploration paths that are biased towards locations with high topic perplexity, produce better terrain models with high discriminative power. Moreover, we show that the proposed algorithm implemented on Aqua robot is able to do tasks such as coral reef inspection, diver following, and sea floor exploration, without any prior training or preparation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A crab swarm at an ecological hotspot: patchiness and population density from AUV observations at a coastal, tropical seamount.\n \n \n \n \n\n\n \n Pineda, J.; Cho, W.; Starczak, V.; Govindarajan, A. F.; Guzman, H. M.; Girdhar, Y.; Holleman, R. C; Churchill, J.; Singh, H.; and Ralston, D. K\n\n\n \n\n\n\n PeerJ, 4: e1770. April 2016.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Pineda2015,\n\ttitle = {A crab swarm at an ecological hotspot: patchiness and population density from {AUV} observations at a coastal, tropical seamount},\n\tvolume = {4},\n\tcopyright = {All rights reserved},\n\tissn = {2167-8359},\n\turl = {https://peerj.com/articles/1770},\n\tdoi = {10.7717/peerj.1770},\n\tabstract = {A research cruise to Hannibal Bank, a seamount and an ecological hotspot in the coastal eastern tropical Pacific Ocean off Panama, explored the zonation, biodiversity, and the ecological processes that contribute to the seamount’s elevated biomass. Here we describe the spatial structure of a benthic anomuran red crab population, using submarine video and autonomous underwater vehicle (AUV) photographs. High density aggregations and a swarm of red crabs were associated with a dense turbid layer 4–10 m above the bottom. The high density aggregations were constrained to 355–385 m water depth over the Northwest flank of the seamount, although the crabs also occurred at lower densities in shallower waters (∼280 m) and in another location of the seamount. The crab aggregations occurred in hypoxic water, with oxygen levels of 0.04 ml/l. Barcoding of Hannibal red crabs, and pelagic red crabs sampled in a mass stranding event in 2015 at a beach in San Diego, California, USA, revealed that the Panamanian and the Californian crabs are likely the same species, Pleuroncodes planipes , and these findings represent an extension of the southern endrange of this species. Measurements along a 1.6 km transect revealed three high density aggregations, with the highest density up to 78 crabs/m 2 , and that the crabs were patchily distributed. Crab density peaked in the middle of the patch, a density structure similar to that of swarming insects.},\n\tjournal = {PeerJ},\n\tauthor = {Pineda, Jesús and Cho, Walter and Starczak, Victoria and Govindarajan, Annette F. and Guzman, Héctor M. and Girdhar, Yogesh and Holleman, Rusty C and Churchill, James and Singh, Hanumant and Ralston, David K},\n\tmonth = apr,\n\tyear = {2016},\n\tpages = {e1770},\n}\n\n
\n
\n\n\n
\n A research cruise to Hannibal Bank, a seamount and an ecological hotspot in the coastal eastern tropical Pacific Ocean off Panama, explored the zonation, biodiversity, and the ecological processes that contribute to the seamount’s elevated biomass. Here we describe the spatial structure of a benthic anomuran red crab population, using submarine video and autonomous underwater vehicle (AUV) photographs. High density aggregations and a swarm of red crabs were associated with a dense turbid layer 4–10 m above the bottom. The high density aggregations were constrained to 355–385 m water depth over the Northwest flank of the seamount, although the crabs also occurred at lower densities in shallower waters (∼280 m) and in another location of the seamount. The crab aggregations occurred in hypoxic water, with oxygen levels of 0.04 ml/l. Barcoding of Hannibal red crabs, and pelagic red crabs sampled in a mass stranding event in 2015 at a beach in San Diego, California, USA, revealed that the Panamanian and the Californian crabs are likely the same species, Pleuroncodes planipes , and these findings represent an extension of the southern endrange of this species. Measurements along a 1.6 km transect revealed three high density aggregations, with the highest density up to 78 crabs/m 2 , and that the crabs were patchily distributed. Crab density peaked in the middle of the patch, a density structure similar to that of swarming insects.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Gibbs Sampling Strategies for Semantic Perception of Streaming Video Data.\n \n \n \n \n\n\n \n Girdhar, Y.; and Dudek, G.\n\n\n \n\n\n\n ArXiv e-prints,7. 2015.\n arXiv: 1509.03242\n\n\n\n
\n\n\n\n \n \n \"GibbsPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Girdhar2015Gibbs,\n\ttitle = {Gibbs {Sampling} {Strategies} for {Semantic} {Perception} of {Streaming} {Video} {Data}},\n\tcopyright = {All rights reserved},\n\turl = {http://arxiv.org/abs/1509.03242},\n\tabstract = {Topic modeling of streaming sensor data can be used for high level perception of the environment by a mobile robot. In this paper we compare various Gibbs sampling strategies for topic modeling of streaming spatiotemporal data, such as video captured by a mobile robot. Compared to previous work on online topic modeling, such as o-LDA and incremental LDA, we show that the proposed technique results in lower online and final perplexity, given the realtime constraints.},\n\tjournal = {ArXiv e-prints},\n\tauthor = {Girdhar, Yogesh and Dudek, Gregory},\n\tyear = {2015},\n\tnote = {arXiv: 1509.03242},\n\tpages = {7},\n}\n\n
\n
\n\n\n
\n Topic modeling of streaming sensor data can be used for high level perception of the environment by a mobile robot. In this paper we compare various Gibbs sampling strategies for topic modeling of streaming spatiotemporal data, such as video captured by a mobile robot. Compared to previous work on online topic modeling, such as o-LDA and incremental LDA, we show that the proposed technique results in lower online and final perplexity, given the realtime constraints.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gaining Insight Into Films via Topic Modeling & Visualization.\n \n \n \n \n\n\n \n Rabinovich, M.; and Girdhar, Y.\n\n\n \n\n\n\n Parsons Journal for Information Mapping, 7(1): 1–8. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"GainingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Rabinovich2015,\n\ttitle = {Gaining {Insight} {Into} {Films} via {Topic} {Modeling} \\& {Visualization}},\n\tvolume = {7},\n\tcopyright = {All rights reserved},\n\turl = {http://misharabinovich.com/soyummy.html},\n\tnumber = {1},\n\tjournal = {Parsons Journal for Information Mapping},\n\tauthor = {Rabinovich, Misha and Girdhar, Yogesh},\n\tyear = {2015},\n\tpages = {1--8},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n [abstract] Towards Quantifying Avoidance and Attraction of Reef Fish to Mobile Video Survey Platforms.\n \n \n \n\n\n \n Campbell, M.; Frappier, A.; Somerton, D.; Clarke, M. E.; Murawski, S.; Taylor, J. C.; Wakefield, W. W.; Singh, H.; Jacques, D. A.; Ebert, E.; Girdhar, Y.; Fruh, E.; Taylor, J.; and Lembke, C.\n\n\n \n\n\n\n In 145th Annual Meeting of the American Fisheries Society, 2015. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Campbell2015,\n\ttitle = {[abstract] {Towards} {Quantifying} {Avoidance} and {Attraction} of {Reef} {Fish} to {Mobile} {Video} {Survey} {Platforms}},\n\tcopyright = {All rights reserved},\n\tbooktitle = {145th {Annual} {Meeting} of the {American} {Fisheries} {Society}},\n\tauthor = {Campbell, Matthew and Frappier, Ariane and Somerton, David and Clarke, M. Elizabeth and Murawski, Steven and Taylor, J. Christopher and Wakefield, W. Waldo and Singh, Hanumant and Jacques, Dale A. and Ebert, Erik and Girdhar, Yogesh and Fruh, Erica and Taylor, Jeremy and Lembke, Chad},\n\tyear = {2015},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n ROST: Realtime Online Spatiotemporal Topic Modeling.\n \n \n \n \n\n\n \n Girdhar, Y.\n\n\n \n\n\n\n 2014.\n \n\n\n\n
\n\n\n\n \n \n \"ROST:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{Girdhar,\n\ttitle = {{ROST}: {Realtime} {Online} {Spatiotemporal} {Topic} {Modeling}},\n\tcopyright = {All rights reserved},\n\turl = {http://cim.mcgill.ca/~yogesh/rost/},\n\tauthor = {Girdhar, Yogesh},\n\tyear = {2014},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Unsupervised Semantic Perception, Summarization, and Autonomous Exploration for Robots in Unstructured Environments.\n \n \n \n \n\n\n \n Girdhar, Y.\n\n\n \n\n\n\n Ph.D. Thesis, McGill University, 2014.\n \n\n\n\n
\n\n\n\n \n \n \"UnsupervisedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{Girdhar2014,\n\ttitle = {Unsupervised {Semantic} {Perception}, {Summarization}, and {Autonomous} {Exploration} for {Robots} in {Unstructured} {Environments}},\n\tcopyright = {All rights reserved},\n\turl = {http://digitool.library.mcgill.ca:80/R/-?func=dbin-jump-full&object_id=129641&silo_library=GEN01},\n\tabstract = {This thesis explores the challenges involved in building autonomous exploration and monitoring systems, and makes contributions on four fronts: describing the semantic context of the collected data, summarizing this information, deciding where to collect this data, and making optimal online irrevocable decisions for physical sample collection. Making high level decisions based on the environmental context of a robot's location requires that we first describe what is being observed in a semantic space with higher level of abstraction than the low level sensor reading. ROST, a realtime online spatiotemporal topic modeling technique that we develop in this thesis solves the problem of obtaining such high level descriptors. Topics in this case represent the latent causes (such as objects and terrains), which produce these observations. ROST extends previous work on topic modeling by efficiently taking into account the spatiotemporal context of an observation, and using a novel Gibbs sampling technique to refine the topic label assignment in realtime, making it suitable for processing streaming sensor data such as video and audio observed by a robot. Our experiments suggest that taking into account the spatiotemporal context of observations results in better topic labels that have higher mutual information with ground truth labels, compared to topic modeling without taking into account the spatiotemporal context. Moreover we show that the perplexity of the online topic model using the proposed Gibbs sampler is competitive with batch Gibbs sampler. Given a scene descriptor such as bag-of-words, location, or topic distribution, the thesis then proposes a novel online summarization algorithm, which unlike previous techniques focuses on building a navigation summary containing all the surprising scenes observed by the robot. We argue that the summaries produced by the algorithm (called extremum summaries) are ideal for monitoring and inspections tasks, where the goal is to maintain a small set of images that is representative of the diversity of what has been observed. Although computation of an optimal summary, even in the batch case is NP-hard, we empirically show that the approximate online algorithm presented in the thesis produces summaries with cost that is statistically indistinguishable from batch summaries, while running on natural datasets. Cost was measured as the distance of the farthest sample from a sample in the summary. Collecting data from an environment to build a topic model or a summary requires a robot to traverse this environment. If the geographic size of this region of interest is small then we can simply use any space filling curve to plan this path. However, for larger areas this might not be possible, and hence we propose an information theoretic exploration technique which biases the path towards locations with high information gain in topic space. The resulting topic models were empirically shown to perform better than topic models learned with other competing exploration algorithms, such as free space exploration. Performance was measured in terms of mutual information with ground truth labels, and mutual information with topic labels computed in batch mode with complete knowledge of the environment. Many exploration robots are required to collect samples and perform some chemical or physical analysis. Often such a task requires making irrevocable decisions on whether to select the current sample or not. This thesis presents a novel formulation of this task as an instance of the secretaries hiring problem. We examine several existing variants of this problem, and present an optimal solution to a new variant of the secretaries hiring problem, where the goal is to maximize the probability of identifying the top K samples online and irrevocably. Together, the contributions of this thesis are a step towards developing fully autonomous robotic agents that can be used in collaboration with humans to explore dangerous unknown environments.},\n\tschool = {McGill University},\n\tauthor = {Girdhar, Yogesh},\n\tyear = {2014},\n}\n\n
\n
\n\n\n
\n This thesis explores the challenges involved in building autonomous exploration and monitoring systems, and makes contributions on four fronts: describing the semantic context of the collected data, summarizing this information, deciding where to collect this data, and making optimal online irrevocable decisions for physical sample collection. Making high level decisions based on the environmental context of a robot's location requires that we first describe what is being observed in a semantic space with higher level of abstraction than the low level sensor reading. ROST, a realtime online spatiotemporal topic modeling technique that we develop in this thesis solves the problem of obtaining such high level descriptors. Topics in this case represent the latent causes (such as objects and terrains), which produce these observations. ROST extends previous work on topic modeling by efficiently taking into account the spatiotemporal context of an observation, and using a novel Gibbs sampling technique to refine the topic label assignment in realtime, making it suitable for processing streaming sensor data such as video and audio observed by a robot. Our experiments suggest that taking into account the spatiotemporal context of observations results in better topic labels that have higher mutual information with ground truth labels, compared to topic modeling without taking into account the spatiotemporal context. Moreover we show that the perplexity of the online topic model using the proposed Gibbs sampler is competitive with batch Gibbs sampler. Given a scene descriptor such as bag-of-words, location, or topic distribution, the thesis then proposes a novel online summarization algorithm, which unlike previous techniques focuses on building a navigation summary containing all the surprising scenes observed by the robot. We argue that the summaries produced by the algorithm (called extremum summaries) are ideal for monitoring and inspections tasks, where the goal is to maintain a small set of images that is representative of the diversity of what has been observed. Although computation of an optimal summary, even in the batch case is NP-hard, we empirically show that the approximate online algorithm presented in the thesis produces summaries with cost that is statistically indistinguishable from batch summaries, while running on natural datasets. Cost was measured as the distance of the farthest sample from a sample in the summary. Collecting data from an environment to build a topic model or a summary requires a robot to traverse this environment. If the geographic size of this region of interest is small then we can simply use any space filling curve to plan this path. However, for larger areas this might not be possible, and hence we propose an information theoretic exploration technique which biases the path towards locations with high information gain in topic space. The resulting topic models were empirically shown to perform better than topic models learned with other competing exploration algorithms, such as free space exploration. Performance was measured in terms of mutual information with ground truth labels, and mutual information with topic labels computed in batch mode with complete knowledge of the environment. Many exploration robots are required to collect samples and perform some chemical or physical analysis. Often such a task requires making irrevocable decisions on whether to select the current sample or not. This thesis presents a novel formulation of this task as an instance of the secretaries hiring problem. We examine several existing variants of this problem, and present an optimal solution to a new variant of the secretaries hiring problem, where the goal is to maximize the probability of identifying the top K samples online and irrevocably. Together, the contributions of this thesis are a step towards developing fully autonomous robotic agents that can be used in collaboration with humans to explore dangerous unknown environments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring Underwater Environments with Curiosity.\n \n \n \n \n\n\n \n Girdhar, Y.; and Dudek, G.\n\n\n \n\n\n\n In 2014 Canadian Conference on Computer and Robot Vision, pages 104–110, Montreal, May 2014. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar2014CRV,\n\taddress = {Montreal},\n\ttitle = {Exploring {Underwater} {Environments} with {Curiosity}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-4799-4337-1},\n\turl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6816831},\n\tdoi = {10.1109/CRV.2014.22},\n\turldate = {2014-06-10},\n\tbooktitle = {2014 {Canadian} {Conference} on {Computer} and {Robot} {Vision}},\n\tpublisher = {IEEE},\n\tauthor = {Girdhar, Yogesh and Dudek, Gregory},\n\tmonth = may,\n\tyear = {2014},\n\tpages = {104--110},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Curiosity based exploration for learning terrain models.\n \n \n \n \n\n\n \n Girdhar, Y.; Whitney, D.; and Dudek, G.\n\n\n \n\n\n\n In 2014 IEEE International Conference on Robotics and Automation (ICRA), pages 578–584, May 2014. IEEE\n arXiv: 1310.6767 Genre: Robotics\n\n\n\n
\n\n\n\n \n \n \"CuriosityPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar2014ICRA,\n\ttitle = {Curiosity based exploration for learning terrain models},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-4799-3685-4},\n\turl = {http://ieeexplore.ieee.org/document/6906913/},\n\tdoi = {10.1109/ICRA.2014.6906913},\n\tabstract = {We present a robotic exploration technique in which the goal is to learn to a visual model and be able to distinguish between different terrains and other visual components in an unknown environment. We use ROST, a realtime online spatiotemporal topic modeling framework to model these terrains using the observations made by the robot, and then use an information theoretic path planning technique to define the exploration path. We conduct experiments with aerial view and underwater datasets with millions of observations and varying path lengths, and find that paths that are biased towards locations with high topic perplexity produce better terrain models with high discriminative power, especially with paths of length close to the diameter of the world.},\n\turldate = {2014-11-23},\n\tbooktitle = {2014 {IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})},\n\tpublisher = {IEEE},\n\tauthor = {Girdhar, Yogesh and Whitney, David and Dudek, Gregory},\n\tmonth = may,\n\tyear = {2014},\n\tnote = {arXiv: 1310.6767\nGenre: Robotics},\n\tpages = {578--584},\n}\n\n
\n
\n\n\n
\n We present a robotic exploration technique in which the goal is to learn to a visual model and be able to distinguish between different terrains and other visual components in an unknown environment. We use ROST, a realtime online spatiotemporal topic modeling framework to model these terrains using the observations made by the robot, and then use an information theoretic path planning technique to define the exploration path. We conduct experiments with aerial view and underwater datasets with millions of observations and varying path lengths, and find that paths that are biased towards locations with high topic perplexity produce better terrain models with high discriminative power, especially with paths of length close to the diameter of the world.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Unsupervised environment recognition and modeling using sound sensing.\n \n \n \n \n\n\n \n Kalmbach, A.; Girdhar, Y.; and Dudek, G.\n\n\n \n\n\n\n In 2013 IEEE International Conference on Robotics and Automation, pages 2699–2704, Karlsruhe, Germany, May 2013. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"UnsupervisedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Kalmbach2013,\n\taddress = {Karlsruhe, Germany},\n\ttitle = {Unsupervised environment recognition and modeling using sound sensing},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-4673-5643-5},\n\turl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6630948},\n\tdoi = {10.1109/ICRA.2013.6630948},\n\turldate = {2014-06-11},\n\tbooktitle = {2013 {IEEE} {International} {Conference} on {Robotics} and {Automation}},\n\tpublisher = {IEEE},\n\tauthor = {Kalmbach, Arnold and Girdhar, Yogesh and Dudek, Gregory},\n\tmonth = may,\n\tyear = {2013},\n\tpages = {2699--2704},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n [demonstration] Topic Modeling for Robots.\n \n \n \n\n\n \n Girdhar, Y.; and Dudek, G.\n\n\n \n\n\n\n In Twenty-seventh Conference on Neural Information Processing Systems, Lake Tahoe, Nevada, 2013. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar2013,\n\taddress = {Lake Tahoe, Nevada},\n\ttitle = {[demonstration] {Topic} {Modeling} for {Robots}},\n\tcopyright = {All rights reserved},\n\tbooktitle = {Twenty-seventh {Conference} on {Neural} {Information} {Processing} {Systems}},\n\tauthor = {Girdhar, Yogesh and Dudek, Gregory},\n\tyear = {2013},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Autonomous Adaptive Underwater Exploration using Online Topic Modeling.\n \n \n \n \n\n\n \n Girdhar, Y.; Giguère, P.; and Dudek, G.\n\n\n \n\n\n\n In Desai, J. P.; Dudek, G.; Khatib, O.; and Kumar, V., editor(s), Experimental Robotics, pages 789–802. Springer International Publishing, 2013.\n \n\n\n\n
\n\n\n\n \n \n \"AutonomousPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Girdhar2013a,\n\ttitle = {Autonomous {Adaptive} {Underwater} {Exploration} using {Online} {Topic} {Modeling}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-3-319-00064-0},\n\turl = {http://dx.doi.org/10.1007/978-3-319-00065-7_53},\n\tbooktitle = {Experimental {Robotics}},\n\tpublisher = {Springer International Publishing},\n\tauthor = {Girdhar, Yogesh and Giguère, Philippe and Dudek, Gregory},\n\teditor = {Desai, Jaydev P. and Dudek, Gregory and Khatib, Oussama and Kumar, Vijay},\n\tyear = {2013},\n\tdoi = {10.1007/978-3-319-00065-7_53},\n\tpages = {789--802},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Autonomous adaptive exploration using realtime online spatiotemporal topic modeling.\n \n \n \n \n\n\n \n Girdhar, Y.; Giguere, P.; and Dudek, G.\n\n\n \n\n\n\n The International Journal of Robotics Research, 33(4): 645–657. November 2013.\n \n\n\n\n
\n\n\n\n \n \n \"AutonomousPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Girdhar2013IJRR,\n\ttitle = {Autonomous adaptive exploration using realtime online spatiotemporal topic modeling},\n\tvolume = {33},\n\tcopyright = {All rights reserved},\n\tissn = {0278-3649},\n\turl = {http://ijr.sagepub.com/cgi/doi/10.1177/0278364913507325},\n\tdoi = {10.1177/0278364913507325},\n\tabstract = {Exploration of dangerous environments, such as underwater coral reefs and shipwrecks, is a difficult and potentially life threatening tasks for humans, which naturally makes the use of an autonomous robotic system very appealing. Exploration through the use of an autonomous agent can find uses in many different scenarios.This paper presents such an autonomous system, which is capable of autonomous exploration, and shows its use in a series of experiments to collect image data in challenging underwater marine environments. We presents novel contributions on three fronts. First, we present an online topic-modeling based technique to describe what is being observed using a low dimensional semantic descriptor. This descriptor attempts to be invariant to observations of different corals belonging to the same species, or observations of similar types rocks observed from different viewpoints. Second, we use the topic descriptor to compute the surprise score of the current observation. This is done by maintaining an online summary of observations thus far, and then computing the surprise score as the distance of the current observation to the summary, in the topic space. Finally, we present a novel control strategy for an underwater robot thats allows for intelligent traversal; hovering over surprising observations, and swimming quickly over previously seen corals and rocks.},\n\tnumber = {4},\n\turldate = {2014-04-29},\n\tjournal = {The International Journal of Robotics Research},\n\tauthor = {Girdhar, Yogesh and Giguere, Philippe and Dudek, Gregory},\n\tmonth = nov,\n\tyear = {2013},\n\tpages = {645--657},\n}\n\n
\n
\n\n\n
\n Exploration of dangerous environments, such as underwater coral reefs and shipwrecks, is a difficult and potentially life threatening tasks for humans, which naturally makes the use of an autonomous robotic system very appealing. Exploration through the use of an autonomous agent can find uses in many different scenarios.This paper presents such an autonomous system, which is capable of autonomous exploration, and shows its use in a series of experiments to collect image data in challenging underwater marine environments. We presents novel contributions on three fronts. First, we present an online topic-modeling based technique to describe what is being observed using a low dimensional semantic descriptor. This descriptor attempts to be invariant to observations of different corals belonging to the same species, or observations of similar types rocks observed from different viewpoints. Second, we use the topic descriptor to compute the surprise score of the current observation. This is done by maintaining an online summary of observations thus far, and then computing the surprise score as the distance of the current observation to the summary, in the topic space. Finally, we present a novel control strategy for an underwater robot thats allows for intelligent traversal; hovering over surprising observations, and swimming quickly over previously seen corals and rocks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Wide-Speed Autopilot System for a Swimming Hexapod Robot.\n \n \n \n \n\n\n \n Giguere, P.; Girdhar, Y.; and Dudek, G.\n\n\n \n\n\n\n In 2013 International Conference on Computer and Robot Vision, pages 9–15, May 2013. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Wide-SpeedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Giguere2013,\n\ttitle = {Wide-{Speed} {Autopilot} {System} for a {Swimming} {Hexapod} {Robot}},\n\tcopyright = {All rights reserved},\n\tisbn = {978-0-7695-4983-5},\n\turl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6569178},\n\tdoi = {10.1109/CRV.2013.13},\n\tabstract = {For underwater swimming robots, which use the unconventional method of oscillating flippers for propulsion and control, being able to move stably at various velocities is challenging. This stable motion facilitates navigation, avoids blurring the images taken by a camera motion, and enables longterm observations of specific locations. Previous experiments with our swimming robot Aqua have shown that its autopilot system must adapt the control parameters as a function of speed. The reason is that the dynamics of both the robot and the thrusting system vary widely as a function of the overall velocity of the robot. In this paper, we present the results of manually tuning a stable autopilot system for this Aqua swimming robot. We employed a well-known technique called gain scheduling to allow for stable operation for velocities ranging from 0 to 40 cm=s, in real open sea conditions. Thus, our platform is now suitable for vision-based navigation in low light conditions as well as for extended observation through station-keeping. The results presented here are also a proof-of-concept that agile and reactive autonomous hovering is possible for flipper-based propulsion system.},\n\turldate = {2014-07-11},\n\tbooktitle = {2013 {International} {Conference} on {Computer} and {Robot} {Vision}},\n\tpublisher = {IEEE},\n\tauthor = {Giguere, Philippe and Girdhar, Yogesh and Dudek, Gregory},\n\tmonth = may,\n\tyear = {2013},\n\tpages = {9--15},\n}\n\n
\n
\n\n\n
\n For underwater swimming robots, which use the unconventional method of oscillating flippers for propulsion and control, being able to move stably at various velocities is challenging. This stable motion facilitates navigation, avoids blurring the images taken by a camera motion, and enables longterm observations of specific locations. Previous experiments with our swimming robot Aqua have shown that its autopilot system must adapt the control parameters as a function of speed. The reason is that the dynamics of both the robot and the thrusting system vary widely as a function of the overall velocity of the robot. In this paper, we present the results of manually tuning a stable autopilot system for this Aqua swimming robot. We employed a well-known technique called gain scheduling to allow for stable operation for velocities ranging from 0 to 40 cm=s, in real open sea conditions. Thus, our platform is now suitable for vision-based navigation in low light conditions as well as for extended observation through station-keeping. The results presented here are also a proof-of-concept that agile and reactive autonomous hovering is possible for flipper-based propulsion system.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Efficient on-line data summarization using extremum summaries.\n \n \n \n \n\n\n \n Girdhar, Y.; and Dudek, G.\n\n\n \n\n\n\n In 2012 IEEE International Conference on Robotics and Automation, pages 3490–3496, Saint Paul, MN, May 2012. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"EfficientPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar:ICRA:2012,\n\taddress = {Saint Paul, MN},\n\ttitle = {Efficient on-line data summarization using extremum summaries},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-4673-1405-3},\n\turl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6224657},\n\tdoi = {10.1109/ICRA.2012.6224657},\n\tabstract = {We are interested in the task of online summarization of the data observed by a mobile robot, with the goal that these summaries could be then be used for applications such as surveillance, identifying samples to be collected by a planetary rover, and site inspections to detect anomalies. In this paper, we pose the summarization problem as an instance of the well known k-center problem, where the goal is to identify k observations so that the maximum distance of any observation from a summary sample is minimized. We focus on the online version of the summarization problem, which requires that the decision to add an incoming observation to the summary be made instantaneously. Moreover, we add the constraint that only a finite number of observed samples can be saved at any time, which allows for applications where the selection of a sample is linked to a physical action such as rock sample collection by a planetary rover. We show that the proposed online algorithm has performance comparable to the offline algorithm when used with real world data.},\n\turldate = {2014-07-11},\n\tbooktitle = {2012 {IEEE} {International} {Conference} on {Robotics} and {Automation}},\n\tpublisher = {IEEE},\n\tauthor = {Girdhar, Yogesh and Dudek, Gregory},\n\tmonth = may,\n\tyear = {2012},\n\tpages = {3490--3496},\n}\n\n
\n
\n\n\n
\n We are interested in the task of online summarization of the data observed by a mobile robot, with the goal that these summaries could be then be used for applications such as surveillance, identifying samples to be collected by a planetary rover, and site inspections to detect anomalies. In this paper, we pose the summarization problem as an instance of the well known k-center problem, where the goal is to identify k observations so that the maximum distance of any observation from a summary sample is minimized. We focus on the online version of the summarization problem, which requires that the decision to add an incoming observation to the summary be made instantaneously. Moreover, we add the constraint that only a finite number of observed samples can be saved at any time, which allows for applications where the selection of a sample is linked to a physical action such as rock sample collection by a planetary rover. We show that the proposed online algorithm has performance comparable to the offline algorithm when used with real world data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Monitoring Marine Environments using a Team of Heterogeneous Robots.\n \n \n \n\n\n \n Girdhar, Y.; Xu, A.; Shkurti, F.; Camilo, J.; Higuera, G.; Meghjani, M.; Giguere, P.; Rekleitis, I.; and Dudek, G.\n\n\n \n\n\n\n In RSS 2012 Workshop on Robotics for Environmental Monitoring, pages 4, 2012. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar:WREM:2012,\n\ttitle = {Monitoring {Marine} {Environments} using a {Team} of {Heterogeneous} {Robots}},\n\tcopyright = {All rights reserved},\n\tbooktitle = {{RSS} 2012 {Workshop} on {Robotics} for {Environmental} {Monitoring}},\n\tauthor = {Girdhar, Yogesh and Xu, Anqi and Shkurti, Florian and Camilo, Juan and Higuera, Gamboa and Meghjani, Malika and Giguere, Philippe and Rekleitis, Ioannis and Dudek, Gregory},\n\tyear = {2012},\n\tpages = {4},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n [presentation] Realtime Online Spatiotemporal Topics for Navigation Summaries.\n \n \n \n\n\n \n Girdhar, Y.; Adam, R.; and Dudek, G.\n\n\n \n\n\n\n In 7th Annual Machine Learning Symposium, New York, 2012. The New York Academy of Sciences\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar2012,\n\taddress = {New York},\n\ttitle = {[presentation] {Realtime} {Online} {Spatiotemporal} {Topics} for {Navigation} {Summaries}},\n\tcopyright = {All rights reserved},\n\tbooktitle = {7th {Annual} {Machine} {Learning} {Symposium}},\n\tpublisher = {The New York Academy of Sciences},\n\tauthor = {Girdhar, Yogesh and Adam, Raheem and Dudek, Gregory},\n\tyear = {2012},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-domain monitoring of marine environments using a heterogeneous robot team.\n \n \n \n \n\n\n \n Shkurti, F.; Xu, A.; Meghjani, M.; Gamboa Higuera, J. C.; Girdhar, Y.; Giguere, P.; Dey, B. B.; Li, J.; Kalmbach, A.; Prahacs, C.; Turgeon, K.; Rekleitis, I.; and Dudek, G.\n\n\n \n\n\n\n In 2012 IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 1747–1753, October 2012. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Multi-domainPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Shkurti2012,\n\ttitle = {Multi-domain monitoring of marine environments using a heterogeneous robot team},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-4673-1736-8},\n\turl = {http://ieeexplore.ieee.org/document/6385685/},\n\tdoi = {10.1109/IROS.2012.6385685},\n\tbooktitle = {2012 {IEEE}/{RSJ} {International} {Conference} on {Intelligent} {Robots} and {Systems}},\n\tpublisher = {IEEE},\n\tauthor = {Shkurti, Florian and Xu, Anqi and Meghjani, Malika and Gamboa Higuera, Juan Camilo and Girdhar, Yogesh and Giguere, Philippe and Dey, Bir Bikram and Li, Jimmy and Kalmbach, Arnold and Prahacs, Chris and Turgeon, Katrine and Rekleitis, Ioannis and Dudek, Gregory},\n\tmonth = oct,\n\tyear = {2012},\n\tpages = {1747--1753},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Autonomous Adaptive Underwater Exploration using Online Topic Modelling.\n \n \n \n \n\n\n \n Girdhar, Y.; Giguère, P.; and Dudek, G.\n\n\n \n\n\n\n In International Symposium on Experimental Robotics (ISER), 2012. \n \n\n\n\n
\n\n\n\n \n \n \"AutonomousPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar:ISER:2012,\n\ttitle = {Autonomous {Adaptive} {Underwater} {Exploration} using {Online} {Topic} {Modelling}},\n\tcopyright = {All rights reserved},\n\turl = {http://www.cim.mcgill.ca/~yogesh/iser2012},\n\tbooktitle = {International {Symposium} on {Experimental} {Robotics} ({ISER})},\n\tauthor = {Girdhar, Yogesh and Giguère, Philippe and Dudek, Gregory},\n\tyear = {2012},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2011\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n MARE: Marine Autonomous Robotic Explorer.\n \n \n \n\n\n \n Girdhar, Y.; Xu, A.; Dey, B. B.; Meghjani, M.; Shkurti, F.; Rekleitis, I.; and Dudek, G.\n\n\n \n\n\n\n In Proceedings of the 2011 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 5048 – 5053, San Francisco, USA, September 2011. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar:IROS:2011,\n\taddress = {San Francisco, USA},\n\ttitle = {{MARE}: {Marine} {Autonomous} {Robotic} {Explorer}},\n\tcopyright = {All rights reserved},\n\tdoi = {10.1109/IROS.2011.6094914},\n\tabstract = {We present MARE, an autonomous airboat robot that is suitable for exploration-oriented tasks, such as inspection of coral reefs and shallow seabeds. The combination of this platform's particular mechanical properties and its powerful software framework enables it to function in a multitude of potential capacities, including autonomous surveillance, mapping, and search operations. In this paper we describe two different exploration strategies and their implementation using the MARE platform. First, we discuss the application of an efficient coverage algorithm, for the purpose of achieving systematic exploration of a known and bounded environment. Second, we present an exploration strategy driven by surprise, which steers the robot on a path that might lead to potentially surprising observations.},\n\tbooktitle = {Proceedings of the 2011 {IEEE}/{RSJ} {International} {Conference} on {Intelligent} {Robots} and {Systems} ({IROS})},\n\tauthor = {Girdhar, Yogesh and Xu, Anqi and Dey, Bir Bikram and Meghjani, Malika and Shkurti, Florian and Rekleitis, Ioannis and Dudek, Gregory},\n\tmonth = sep,\n\tyear = {2011},\n\tpages = {5048 -- 5053},\n}\n\n
\n
\n\n\n
\n We present MARE, an autonomous airboat robot that is suitable for exploration-oriented tasks, such as inspection of coral reefs and shallow seabeds. The combination of this platform's particular mechanical properties and its powerful software framework enables it to function in a multitude of potential capacities, including autonomous surveillance, mapping, and search operations. In this paper we describe two different exploration strategies and their implementation using the MARE platform. First, we discuss the application of an efficient coverage algorithm, for the purpose of achieving systematic exploration of a known and bounded environment. Second, we present an exploration strategy driven by surprise, which steers the robot on a path that might lead to potentially surprising observations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Online Visual Vocabularies.\n \n \n \n\n\n \n Girdhar, Y.; and Dudek, G.\n\n\n \n\n\n\n In CRV '11: Proceedings of the 2011 Canadian Conference on Computer and Robot Vision, pages 191–196, May 2011. IEEE Computer Society\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar:CRV:2011,\n\ttitle = {Online {Visual} {Vocabularies}},\n\tcopyright = {All rights reserved},\n\tdoi = {10.1109/CRV.2011.32},\n\tabstract = {he idea of an online visual vocabulary is proposed. In contrast to the accepted strategy of generating vocabularies offline, using the k-means clustering over all the features extracted form all the images in a dataset, an online vocabulary is dynamic and evolves iteratively over time as new observations are made. Hence, it is much more suitable for online robotic applications, such as exploration, landmark detection, and SLAM, where the future is unknown. We present two different strategies for building online vocabularies. The first strategy produces a vocabulary, which optimizes the k-centres objective of minimizing the maximum distance of a a feature from the closest vocabulary word. The second strategy produces a vocabulary by randomly sampling from the current vocabulary and the features in the current observation. We show that both the algorithms are able to produce distance matrices which have positive rank correlation with distance matrices computed using an offline k-means vocabulary. We discover that the online random vocabulary is consistently effective at approximating the behaviour of the offline k-means vocabulary, at least for the moderate sized datasets we examine.},\n\tbooktitle = {{CRV} '11: {Proceedings} of the 2011 {Canadian} {Conference} on {Computer} and {Robot} {Vision}},\n\tpublisher = {IEEE Computer Society},\n\tauthor = {Girdhar, Yogesh and Dudek, Gregory},\n\tmonth = may,\n\tyear = {2011},\n\tpages = {191--196},\n}\n\n
\n
\n\n\n
\n he idea of an online visual vocabulary is proposed. In contrast to the accepted strategy of generating vocabularies offline, using the k-means clustering over all the features extracted form all the images in a dataset, an online vocabulary is dynamic and evolves iteratively over time as new observations are made. Hence, it is much more suitable for online robotic applications, such as exploration, landmark detection, and SLAM, where the future is unknown. We present two different strategies for building online vocabularies. The first strategy produces a vocabulary, which optimizes the k-centres objective of minimizing the maximum distance of a a feature from the closest vocabulary word. The second strategy produces a vocabulary by randomly sampling from the current vocabulary and the features in the current observation. We show that both the algorithms are able to produce distance matrices which have positive rank correlation with distance matrices computed using an offline k-means vocabulary. We discover that the online random vocabulary is consistently effective at approximating the behaviour of the offline k-means vocabulary, at least for the moderate sized datasets we examine.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A surprising problem in navigation.\n \n \n \n \n\n\n \n Girdhar, Y.; and Dudek, G.\n\n\n \n\n\n\n In Harris, L. R; and Jenkin, M. R M, editor(s), Vision in 3D Environments, pages 228–252. Cambridge University Press, Cambridge, 2011.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Girdhar:V3D:2011,\n\taddress = {Cambridge},\n\ttitle = {A surprising problem in navigation},\n\tcopyright = {All rights reserved},\n\turl = {http://ebooks.cambridge.org/ref/id/CBO9780511736261A114},\n\tbooktitle = {Vision in {3D} {Environments}},\n\tpublisher = {Cambridge University Press},\n\tauthor = {Girdhar, Yogesh and Dudek, Gregory},\n\teditor = {Harris, Laurence R and Jenkin, Michael R M},\n\tyear = {2011},\n\tdoi = {10.1017/CBO9780511736261.011},\n\tpages = {228--252},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n [poster] SoYummy: real-time temporal semantic compression to further the Synopticon.\n \n \n \n\n\n \n Rabinovich, M.; and Girdhar, Y.\n\n\n \n\n\n\n In Subtle Technologies Festival, Toronto, 2011. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Rabinovich2011,\n\taddress = {Toronto},\n\ttitle = {[poster] {SoYummy}: real-time temporal semantic compression to further the {Synopticon}},\n\tcopyright = {All rights reserved},\n\tbooktitle = {Subtle {Technologies} {Festival}},\n\tauthor = {Rabinovich, Michael and Girdhar, Yogesh},\n\tyear = {2011},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Offline Navigation Summaries.\n \n \n \n\n\n \n Girdhar, Y.; and Dudek, G.\n\n\n \n\n\n\n In IEEE International Conference on Robotics and Automation (ICRA), pages 5769–5775, May 2011. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar:ICRA:2011,\n\ttitle = {Offline {Navigation} {Summaries}},\n\tcopyright = {All rights reserved},\n\tdoi = {10.1109/ICRA.2011.5980094},\n\tabstract = {In this paper we focus on the task of summarizing observations made by a mobile robot on a trajectory. A navigation summary is the synopsis of these observations. We pose the problem of generating navigation summaries as a sampling problem. The goal is to select a few samples from the set of all observations, which are characteristic of the environment, and capture its mean properties and surprises. We define the surprise score of an observation as its distance to the closest sample in the summary. Hence, an ideal summary is defined to have a low mean and a low max surprise score, measured over all the observations. We present three different strategies for solving this sampling problem. Of these, we show that the kCover sampling algorithm produces summaries with low mean and max surprise scores; even in the presence of noise. These results are demonstrated on datasets acquired in different robotics context.},\n\tbooktitle = {{IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})},\n\tauthor = {Girdhar, Yogesh and Dudek, Gregory},\n\tmonth = may,\n\tyear = {2011},\n\tpages = {5769--5775},\n}\n\n
\n
\n\n\n
\n In this paper we focus on the task of summarizing observations made by a mobile robot on a trajectory. A navigation summary is the synopsis of these observations. We pose the problem of generating navigation summaries as a sampling problem. The goal is to select a few samples from the set of all observations, which are characteristic of the environment, and capture its mean properties and surprises. We define the surprise score of an observation as its distance to the closest sample in the summary. Hence, an ideal summary is defined to have a low mean and a low max surprise score, measured over all the observations. We present three different strategies for solving this sampling problem. Of these, we show that the kCover sampling algorithm produces summaries with low mean and max surprise scores; even in the presence of noise. These results are demonstrated on datasets acquired in different robotics context.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2010\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n ONSUM: A System for Generating Online Navigation Summaries.\n \n \n \n\n\n \n Girdhar, Y.; and Dudek, G.\n\n\n \n\n\n\n In Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems, (IROS), pages 746–751, October 2010. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar:IROS:2010,\n\ttitle = {{ONSUM}: {A} {System} for {Generating} {Online} {Navigation} {Summaries}},\n\tcopyright = {All rights reserved},\n\tdoi = {10.1109/IROS.2010.5650315},\n\tabstract = {We propose an algorithm for generating naviga- tion summaries. Navigation summaries are a specialization of video summaries, where the focus is on video collected by a mobile robot, on a specified trajectory. We are interested in finding a few images that epitomize the visual experience of a robot as it traverse a terrain. This paper presents a novel approach to generating summaries in form of a set of images, where the decision to include the image in the summary set is made online. Our focus is on the case where the number of observations is infinite or unknown, but the size of the desired summary is known. Our strategy is to consider the images in the summary set as the prior hypothesis of the appearance of the world, and then use the idea of Bayesian Surprise to compute the novelty of an observed image. If the novelty is above a threshold, then we accept the image. We discuss different criterion for setting this threshold. Online nature of our approach allows for several interesting applications such as coral reef inspection, surveying, and surveillance.},\n\tbooktitle = {Proceedings of the {IEEE}/{RSJ} {International} {Conference} on {Intelligent} {Robots} and {Systems}, ({IROS})},\n\tauthor = {Girdhar, Yogesh and Dudek, Gregory},\n\tmonth = oct,\n\tyear = {2010},\n\tpages = {746--751},\n}\n\n
\n
\n\n\n
\n We propose an algorithm for generating naviga- tion summaries. Navigation summaries are a specialization of video summaries, where the focus is on video collected by a mobile robot, on a specified trajectory. We are interested in finding a few images that epitomize the visual experience of a robot as it traverse a terrain. This paper presents a novel approach to generating summaries in form of a set of images, where the decision to include the image in the summary set is made online. Our focus is on the case where the number of observations is infinite or unknown, but the size of the desired summary is known. Our strategy is to consider the images in the summary set as the prior hypothesis of the appearance of the world, and then use the idea of Bayesian Surprise to compute the novelty of an observed image. If the novelty is above a threshold, then we accept the image. We discuss different criterion for setting this threshold. Online nature of our approach allows for several interesting applications such as coral reef inspection, surveying, and surveillance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Online Navigation Summaries.\n \n \n \n\n\n \n Girdhar, Y.; and Dudek, G.\n\n\n \n\n\n\n In IEEE International Conference on Robotics and Automation (ICRA), pages 5035–5040, May 2010. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar:ICRA:2010,\n\ttitle = {Online {Navigation} {Summaries}},\n\tcopyright = {All rights reserved},\n\tdoi = {10.1109/ROBOT.2010.5509464},\n\tabstract = {Our objective is to find a small set of images that summarize a robot's visual experience along a path. We present a novel on-line algorithm for this task. This algorithm is based on a new extension to the classical Secretaries Problem. We also present an extension to the idea of Bayesian Surprise, which we then use to measure the fitness of an image as a summary image.},\n\tbooktitle = {{IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})},\n\tauthor = {Girdhar, Yogesh and Dudek, Gregory},\n\tmonth = may,\n\tyear = {2010},\n\tpages = {5035--5040},\n}\n\n
\n
\n\n\n
\n Our objective is to find a small set of images that summarize a robot's visual experience along a path. We present a novel on-line algorithm for this task. This algorithm is based on a new extension to the classical Secretaries Problem. We also present an extension to the idea of Bayesian Surprise, which we then use to measure the fitness of an image as a summary image.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2009\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Optimal Online Data Sampling or How to Hire the Best Secretaries.\n \n \n \n\n\n \n Girdhar, Y.; and Dudek, G.\n\n\n \n\n\n\n In CRV '09: Proceedings of the 2009 Canadian Conference on Computer and Robot Vision, pages 292–298, Kelowna, British Columbia, May 2009. IEEE Computer Society\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar:CRV:2009,\n\taddress = {Kelowna, British Columbia},\n\ttitle = {Optimal {Online} {Data} {Sampling} or {How} to {Hire} the {Best} {Secretaries}},\n\tcopyright = {All rights reserved},\n\tdoi = {10.1109/CRV.2009.30},\n\tabstract = {In this paper we present a framework for automatically generating a visual synopsis of the images observed as a robot follows a trajectory. These ``navigation summaries'' are a set of images that are meant to capture the notable visual events observed by the robot independent of task- specific constraints. These small set of images character- ize the visual appearance of the trajectory of a robot. In our effort to create such summaries we use several different functional criteria while choosing among candidate images and then combine these into a single measurement, tuned to make our decision process approximate the selections that might be made by human observers. Our approach also takes into account the location information of the photos to make sure that the summary images are well distributed in the world. The tuning with respect to human performance is attained by collecting human performance data on a similar task using a web-based interface and setting the parameters of our automated system to match the behaviour of the hu- man observers.},\n\tbooktitle = {{CRV} '09: {Proceedings} of the 2009 {Canadian} {Conference} on {Computer} and {Robot} {Vision}},\n\tpublisher = {IEEE Computer Society},\n\tauthor = {Girdhar, Yogesh and Dudek, Gregory},\n\tmonth = may,\n\tyear = {2009},\n\tpages = {292--298},\n}\n\n
\n
\n\n\n
\n In this paper we present a framework for automatically generating a visual synopsis of the images observed as a robot follows a trajectory. These ``navigation summaries'' are a set of images that are meant to capture the notable visual events observed by the robot independent of task- specific constraints. These small set of images character- ize the visual appearance of the trajectory of a robot. In our effort to create such summaries we use several different functional criteria while choosing among candidate images and then combine these into a single measurement, tuned to make our decision process approximate the selections that might be made by human observers. Our approach also takes into account the location information of the photos to make sure that the summary images are well distributed in the world. The tuning with respect to human performance is attained by collecting human performance data on a similar task using a web-based interface and setting the parameters of our automated system to match the behaviour of the hu- man observers.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2008\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Enabling autonomous capabilities in underwater robotics.\n \n \n \n \n\n\n \n Sattar, J.; Dudek, G.; Chiu, O.; Rekleitis, I.; Giguere, P.; Mills, A.; Plamondon, N.; Prahacs, C.; Girdhar, Y.; Nahon, M.; and Lobos, J.\n\n\n \n\n\n\n In 2008 IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 3628–3634, Nice, France, September 2008. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"EnablingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Sattar:IROS:2008,\n\taddress = {Nice, France},\n\ttitle = {Enabling autonomous capabilities in underwater robotics},\n\tcopyright = {All rights reserved},\n\tisbn = {978-1-4244-2057-5},\n\turl = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4651158},\n\tdoi = {10.1109/IROS.2008.4651158},\n\tbooktitle = {2008 {IEEE}/{RSJ} {International} {Conference} on {Intelligent} {Robots} and {Systems}},\n\tpublisher = {IEEE},\n\tauthor = {Sattar, Junaed and Dudek, Gregory and Chiu, Olivia and Rekleitis, Ioannis and Giguere, P. and Mills, Alec and Plamondon, Nicolas and Prahacs, Chris and Girdhar, Yogesh and Nahon, Meyer and Lobos, J.-P.},\n\tmonth = sep,\n\tyear = {2008},\n\tpages = {3628--3634},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Vision Based Mobile Robot Pose Estimation and Mapping.\n \n \n \n\n\n \n Girdhar, Y. A\n\n\n \n\n\n\n Technical Report McGill University, 2008.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{Girdhar2008,\n\ttitle = {Vision {Based} {Mobile} {Robot} {Pose} {Estimation} and {Mapping}},\n\tcopyright = {All rights reserved},\n\tabstract = {The problem of localization and pose estimation, are closely related to generic object recognition and even to ego-motion estimation, but they differ in several important ways. This paper discusses the ideas required to build a fully autonomous robot systems, in which sensing, localization, and specifically vision-based localization can be expected to be critical generic components. We discuss large scale state-of-the-art localization systems; the fundamental algorithmic mechanisms for uncertainty modeling used by these localization systems; vision-based modeling of positions and objects; and classic approaches for computing usable features for recognition systems.},\n\tinstitution = {McGill University},\n\tauthor = {Girdhar, Yogesh A},\n\tyear = {2008},\n\tpages = {1--14},\n}\n
\n
\n\n\n
\n The problem of localization and pose estimation, are closely related to generic object recognition and even to ego-motion estimation, but they differ in several important ways. This paper discusses the ideas required to build a fully autonomous robot systems, in which sensing, localization, and specifically vision-based localization can be expected to be critical generic components. We discuss large scale state-of-the-art localization systems; the fundamental algorithmic mechanisms for uncertainty modeling used by these localization systems; vision-based modeling of positions and objects; and classic approaches for computing usable features for recognition systems.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2005\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n [poster] Efficient sampling of protein folding pathways using HMMSTR and probabilistic roadmaps.\n \n \n \n\n\n \n Girdhar, Y.; Bystroff, C.; and Akella, S.\n\n\n \n\n\n\n In Computational Systems Bioinformatics Conference, 2005. Workshops and Poster Abstracts. IEEE, pages 222–223, August 2005. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Girdhar:CSB:2005,\n\ttitle = {[poster] {Efficient} sampling of protein folding pathways using {HMMSTR} and probabilistic roadmaps},\n\tcopyright = {All rights reserved},\n\tdoi = {10.1109/CSBW.2005.59},\n\tabstract = {We present a method for constructing thousands of compact protein conformations from fragments and then connecting these structures to form a network of physically plausible folding pathways. This is the first attempt to merge the previous successes in fragment assembly methods with probabilistic roadmap (PRM) methods. Previous PRM methods have used the knowledge of the true structure to sample conformational space. Our method uses only the amino acid sequence to bias the conformational sampling. Conformational sampling is done using HMMSTR, a hidden Markov model for local sequence-structure correlations. We then build a PRM graph and find paths that have the the lowest energy climb. We find that favored folding pathways exist, corresponding to deep valleys in the energy landscape. We describe the pathways for three small proteins with different secondary structure content in the context of a folding funnel model.},\n\tbooktitle = {Computational {Systems} {Bioinformatics} {Conference}, 2005. {Workshops} and {Poster} {Abstracts}. {IEEE}},\n\tauthor = {Girdhar, Yogesh and Bystroff, Chris and Akella, Srinivas},\n\tmonth = aug,\n\tyear = {2005},\n\tpages = {222--223},\n}\n\n
\n
\n\n\n
\n We present a method for constructing thousands of compact protein conformations from fragments and then connecting these structures to form a network of physically plausible folding pathways. This is the first attempt to merge the previous successes in fragment assembly methods with probabilistic roadmap (PRM) methods. Previous PRM methods have used the knowledge of the true structure to sample conformational space. Our method uses only the amino acid sequence to bias the conformational sampling. Conformational sampling is done using HMMSTR, a hidden Markov model for local sequence-structure correlations. We then build a PRM graph and find paths that have the the lowest energy climb. We find that favored folding pathways exist, corresponding to deep valleys in the energy landscape. We describe the pathways for three small proteins with different secondary structure content in the context of a folding funnel model.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Efficient Sampling of Protein Folding Funnels using HMMSTR, and Pathway Generation using Probabilistic Roadmaps.\n \n \n \n\n\n \n Girdhar, Y.\n\n\n \n\n\n\n Ph.D. Thesis, Rensselaer Polytechnic Institute, April 2005.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{Girdhar:2005:MSThesis,\n\ttitle = {Efficient {Sampling} of {Protein} {Folding} {Funnels} using {HMMSTR}, and {Pathway} {Generation} using {Probabilistic} {Roadmaps}},\n\tcopyright = {All rights reserved},\n\tabstract = {Classical techniques for simulating molecular motion such as Molecular Dynamics and Monte Carlo simulations only generate one pathway at a time and have extremely high computational cost. We present a biologically significant and effi- cient way to predict protein folding pathways using HMMSTR and Probabilistic Roadmaps (PRM), with several order of magnitudes lower computational cost. We show how to perform unbiased sampling of the folding funnel to generate a PRM graph for protein chains of up to 36 residues. This biologically based sampling is achieved by enforcing protein-like local structures using HMMSTR, a hidden Markov model for local sequence-structure prediction, thereby significantly reducing the size of the conformational space. We also show that there exist favored folding pathways (highways) that proteins take to reach their native fold or other compact folded states. We evalute our approach with three different proteins: a 36 residue long subdomain from Chicken Villin Headpiece -- 1VII(36), a 16-residue long β-hairpin from Protein G -- 2GB1(16), and a 28 residue long Fbp28Ww Domain from Mus Musculus -- 1E0L(28).},\n\tschool = {Rensselaer Polytechnic Institute},\n\tauthor = {Girdhar, Yogesh},\n\tmonth = apr,\n\tyear = {2005},\n}\n\n
\n
\n\n\n
\n Classical techniques for simulating molecular motion such as Molecular Dynamics and Monte Carlo simulations only generate one pathway at a time and have extremely high computational cost. We present a biologically significant and effi- cient way to predict protein folding pathways using HMMSTR and Probabilistic Roadmaps (PRM), with several order of magnitudes lower computational cost. We show how to perform unbiased sampling of the folding funnel to generate a PRM graph for protein chains of up to 36 residues. This biologically based sampling is achieved by enforcing protein-like local structures using HMMSTR, a hidden Markov model for local sequence-structure prediction, thereby significantly reducing the size of the conformational space. We also show that there exist favored folding pathways (highways) that proteins take to reach their native fold or other compact folded states. We evalute our approach with three different proteins: a 36 residue long subdomain from Chicken Villin Headpiece – 1VII(36), a 16-residue long β-hairpin from Protein G – 2GB1(16), and a 28 residue long Fbp28Ww Domain from Mus Musculus – 1E0L(28).\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n undefined\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n .\n \n \n \n\n\n \n \n\n\n \n\n\n\n In . \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 9 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Exploring the sampling properties of ROV, AUV and Towed Vehicles.\n \n \n \n\n\n \n Campbell, M. D.; Frappier, A.; Somerton, D.; Wakefield, W.; Clarke, E.; Murawski, S.; Taylor, C.; Singh, H.; Girdhar, Y.; and Yoklavich, M.\n\n\n \n\n\n\n Technical Report .\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{Campbell,\n\ttitle = {Exploring the sampling properties of {ROV}, {AUV} and {Towed} {Vehicles}},\n\tcopyright = {All rights reserved},\n\tauthor = {Campbell, Matthew D. and Frappier, Ariane and Somerton, David and Wakefield, Waldo and Clarke, Elizabeth and Murawski, Steve and Taylor, Chris and Singh, Hanumant and Girdhar, Yogesh and Yoklavich, Mary},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);