var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https://raw.githubusercontent.com/plai-group/bibliography/master/group_publications.bib&jsonp=1&theme=dividers&group0=year&group1=type&folding=0&filter=support:LwLL&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https://raw.githubusercontent.com/plai-group/bibliography/master/group_publications.bib&jsonp=1&theme=dividers&group0=year&group1=type&folding=0&filter=support:LwLL\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https://raw.githubusercontent.com/plai-group/bibliography/master/group_publications.bib&jsonp=1&theme=dividers&group0=year&group1=type&folding=0&filter=support:LwLL\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2022\n \n \n (1)\n \n \n
\n
\n \n \n
\n
\n  \n inproceedings\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Enhancing Few-Shot Image Classification With Unlabelled Examples.\n \n \n \n \n\n\n \n Bateni, P.; Barber, J.; van de Meent, J.; and Wood, F.\n\n\n \n\n\n\n In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 2796-2805, January 2022. \n \n\n\n\n
\n\n\n\n \n \n \"Enhancing arxiv\n  \n \n \n \"Enhancing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Bateni_2022_WACV,\n    author    = {Bateni, Peyman and Barber, Jarred and van de Meent, Jan-Willem and Wood, Frank},\n    title     = {Enhancing Few-Shot Image Classification With Unlabelled Examples},\n    booktitle = {Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)},\n    month     = {January},\n    year      = {2022},\n    pages     = {2796-2805},\n    url_ArXiv = {https://arxiv.org/abs/2006.12245},\n    url_Paper = {https://ieeexplore.ieee.org/document/9706775},\n    support = {D3M,LwLL},\n    abstract={We develop a transductive meta-learning method that uses unlabelled instances to improve few-shot image classification performance. Our approach combines a regularized Mahalanobis-distance-based soft k-means clustering procedure with a modified state of the art neural adaptive feature extractor to achieve improved test-time classification accuracy using unlabelled data. We evaluate our method on transductive few-shot learning tasks, in which the goal is to jointly predict labels for query (test) examples given a set of support (training) examples. We achieve state of the art performance on the Meta-Dataset, mini-ImageNet and tiered-ImageNet benchmarks.}\n}\n\n
\n
\n\n\n
\n We develop a transductive meta-learning method that uses unlabelled instances to improve few-shot image classification performance. Our approach combines a regularized Mahalanobis-distance-based soft k-means clustering procedure with a modified state of the art neural adaptive feature extractor to achieve improved test-time classification accuracy using unlabelled data. We evaluate our method on transductive few-shot learning tasks, in which the goal is to jointly predict labels for query (test) examples given a set of support (training) examples. We achieve state of the art performance on the Meta-Dataset, mini-ImageNet and tiered-ImageNet benchmarks.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (1)\n \n \n
\n
\n \n \n
\n
\n  \n inproceedings\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Attention for Inference Compilation.\n \n \n \n \n\n\n \n Harvey, W; Munk, A; Baydin, A.; Bergholm, A; and Wood, F\n\n\n \n\n\n\n In The second International Conference on Probabilistic Programming (PROBPROG), 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Attention paper\n  \n \n \n \"Attention arxiv\n  \n \n \n \"Attention poster\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 10 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{HAR-20,\n  title={Attention for Inference Compilation},\n  author={Harvey, W and Munk, A and Baydin, AG and Bergholm, A and Wood, F},\n  booktitle={The second International Conference on Probabilistic Programming (PROBPROG)},\n  year={2020},\n  archiveprefix = {arXiv},\n  eprint = {1910.11961},\n  support = {D3M,LwLL},\n  url_Paper={https://arxiv.org/pdf/1910.11961.pdf},\n  url_ArXiv={https://arxiv.org/abs/1910.11961},\n  url_Poster={https://github.com/plai-group/bibliography/blob/master/presentations_posters/PROBPROG2020_HAR.pdf},\n  abstract = {We present a new approach to automatic amortized inference in universal probabilistic programs which improves performance compared to current methods. Our approach is a variation of inference compilation (IC) which leverages deep neural networks to approximate a posterior distribution over latent variables in a probabilistic program. A challenge with existing IC network architectures is that they can fail to model long-range dependencies between latent variables. To address this, we introduce an attention mechanism that attends to the most salient variables previously sampled in the execution of a probabilistic program. We demonstrate that the addition of attention allows the proposal distributions to better match the true posterior, enhancing inference about latent variables in simulators.},\n}\n\n
\n
\n\n\n
\n We present a new approach to automatic amortized inference in universal probabilistic programs which improves performance compared to current methods. Our approach is a variation of inference compilation (IC) which leverages deep neural networks to approximate a posterior distribution over latent variables in a probabilistic program. A challenge with existing IC network architectures is that they can fail to model long-range dependencies between latent variables. To address this, we introduce an attention mechanism that attends to the most salient variables previously sampled in the execution of a probabilistic program. We demonstrate that the addition of attention allows the proposal distributions to better match the true posterior, enhancing inference about latent variables in simulators.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improved Few-Shot Visual Classification.\n \n \n \n \n\n\n \n Bateni, P.; Goyal, R.; Masrani, V.; Wood, F.; and Sigal, L.\n\n\n \n\n\n\n In Conference on Computer Vision and Pattern Recognition (CVPR), 2020. \n \n\n\n\n
\n\n\n\n \n \n \"Improved link\n  \n \n \n \"Improved paper\n  \n \n \n \"Improved arxiv\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 10 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{BAT-20,\n  author = {{Bateni}, Peyman and {Goyal}, Raghav and {Masrani}, Vaden and {Wood}, Frank and {Sigal}, Leonid},\n  title = {Improved Few-Shot Visual Classification},\n  booktitle = {Conference on Computer Vision and Pattern Recognition (CVPR)},\n  keywords = {LwLL, Computer Science - Computer Vision and Pattern Recognition},\n  year = {2020},\n  eid = {arXiv:1912.03432},\n  archivePrefix = {arXiv},\n  eprint = {1912.03432},\n  support = {D3M,LwLL},\n  url_Link = {https://openaccess.thecvf.com/content_CVPR_2020/html/Bateni_Improved_Few-Shot_Visual_Classification_CVPR_2020_paper.html},\n  url_Paper={http://openaccess.thecvf.com/content_CVPR_2020/papers/Bateni_Improved_Few-Shot_Visual_Classification_CVPR_2020_paper.pdf},\n  url_ArXiv={https://arxiv.org/abs/1912.03432},\n  abstract={Few-shot learning is a fundamental task in computer vision that carries the promise of alleviating the need for exhaustively labeled data. Most few-shot learning approaches to date have focused on progressively more complex neural feature extractors and classifier adaptation strategies, as well as the refinement of the task definition itself. In this paper, we explore the hypothesis that a simple class-covariance-based distance metric, namely the Mahalanobis distance, adopted into a state of the art few-shot learning approach (CNAPS) can, in and of itself, lead to a significant performance improvement. We also discover that it is possible to learn adaptive feature extractors that allow useful estimation of the high dimensional feature covariances required by this metric from surprisingly few samples. The result of our work is a new "Simple CNAPS" architecture which has up to 9.2% fewer trainable parameters than CNAPS and performs up to 6.1% better than state of the art on the standard few-shot image classification benchmark dataset.}\n}\n\n%@inproceedings{WAN-19,\n%  title={Safer End-to-End Autonomous Driving via Conditional Imitation Learning and Command Augmentation},\n%  author={Wang, R and Scibior, A and Wood F},\n%  booktitle={NeurIPS self-driving car workshop},\n%  year={2019},\n%  archiveprefix = {arXiv},\n%  eprint = {1909.09721},\n%  support = {D3M},\n%  url_Paper = {https://arxiv.org/pdf/1909.09721.pdf},\n%  url_ArXiv={https://arxiv.org/abs/1909.09721},\n%  abstract={Imitation learning is a promising approach to end-to-end training of autonomous vehicle controllers. Typically the driving process with such approaches is entirely automatic and black-box, although in practice it is desirable to control the vehicle through high-level commands, such as telling it which way to go at an intersection. In existing work this has been accomplished by the application of a branched neural architecture, since directly providing the command as an additional input to the controller often results in the command being ignored. In this work we overcome this limitation by learning a disentangled probabilistic latent variable model that generates the steering commands. We achieve faithful command-conditional generation without using a branched architecture and demonstrate improved stability of the controller, applying only a variational objective without any domain-specific adjustments. On top of that, we extend our model with an additional latent variable and augment the dataset to train a controller that is robust to unsafe commands, such as asking it to turn into a wall. The main contribution of this work is a recipe for building controllable imitation driving agents that improves upon multiple aspects of the current state of the art relating to robustness and interpretability.}\n%}\n\n
\n
\n\n\n
\n Few-shot learning is a fundamental task in computer vision that carries the promise of alleviating the need for exhaustively labeled data. Most few-shot learning approaches to date have focused on progressively more complex neural feature extractors and classifier adaptation strategies, as well as the refinement of the task definition itself. In this paper, we explore the hypothesis that a simple class-covariance-based distance metric, namely the Mahalanobis distance, adopted into a state of the art few-shot learning approach (CNAPS) can, in and of itself, lead to a significant performance improvement. We also discover that it is possible to learn adaptive feature extractors that allow useful estimation of the high dimensional feature covariances required by this metric from surprisingly few samples. The result of our work is a new \"Simple CNAPS\" architecture which has up to 9.2% fewer trainable parameters than CNAPS and performs up to 6.1% better than state of the art on the standard few-shot image classification benchmark dataset.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (1)\n \n \n
\n
\n \n \n
\n
\n  \n inproceedings\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Near-Optimal Glimpse Sequences for Improved Hard Attention Neural Network Training.\n \n \n \n \n\n\n \n Harvey, W.; Teng, M.; and Wood, F.\n\n\n \n\n\n\n In NeurIPS Workshop on Bayesian Deep Learning, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Near-Optimal paper\n  \n \n \n \"Near-Optimal arxiv\n  \n \n \n \"Near-Optimal poster\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{HAR-19,\n  title={Near-Optimal Glimpse Sequences for Improved Hard Attention Neural Network Training},\n  author={Harvey, William and Teng, Michael and Wood, Frank},\n  booktitle={NeurIPS Workshop on Bayesian Deep Learning},\n  year={2019},\n  support = {D3M,LwLL},\n  archiveprefix = {arXiv},\n  eprint = {1906.05462},\n  url_Paper={http://bayesiandeeplearning.org/2019/papers/38.pdf},\n  url_ArXiv={https://arxiv.org/abs/1906.05462},\n  url_Poster={https://github.com/plai-group/bibliography/blob/master/presentations_posters/HAR-19.pdf},\n  abstract = {We introduce the use of Bayesian optimal experimental design techniques for generating glimpse sequences to use in semi-supervised training of hard attention networks. Hard attention holds the promise of greater energy efficiency and superior inference performance. Employing such networks for image classification usually involves choosing a sequence of glimpse locations from a stochastic policy. As the outputs of observations are typically non-differentiable with respect to their glimpse locations, unsupervised gradient learning of such a policy requires REINFORCE-style updates. Also, the only reward signal is the final classification accuracy. For these reasons hard attention networks, despite their promise, have not achieved the wide adoption that soft attention networks have and, in many practical settings, are difficult to train. We find that our method for semi-supervised training makes it easier and faster to train hard attention networks and correspondingly could make them practical to consider in situations where they were not before.},\n}\n\n
\n
\n\n\n
\n We introduce the use of Bayesian optimal experimental design techniques for generating glimpse sequences to use in semi-supervised training of hard attention networks. Hard attention holds the promise of greater energy efficiency and superior inference performance. Employing such networks for image classification usually involves choosing a sequence of glimpse locations from a stochastic policy. As the outputs of observations are typically non-differentiable with respect to their glimpse locations, unsupervised gradient learning of such a policy requires REINFORCE-style updates. Also, the only reward signal is the final classification accuracy. For these reasons hard attention networks, despite their promise, have not achieved the wide adoption that soft attention networks have and, in many practical settings, are difficult to train. We find that our method for semi-supervised training makes it easier and faster to train hard attention networks and correspondingly could make them practical to consider in situations where they were not before.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);