var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=http%3A%2F%2Fml.mit.edu%2F%2Fmlbib.bib&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=http%3A%2F%2Fml.mit.edu%2F%2Fmlbib.bib&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=http%3A%2F%2Fml.mit.edu%2F%2Fmlbib.bib&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2018\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Batched Large-scale Bayesian Optimization in High-dimensional Spaces.\n \n \n \n \n\n\n \n Wang, Z.; Gehring, C.; Kohli, P.; and Jegelka, S.\n\n\n \n\n\n\n In International Conference on Artificial Intelligence and Statistics (AISTATS), 2018. \n \n\n\n\n
\n\n\n\n \n \n \"Batched pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{wang18aistats,\n  author = \t {Zi Wang and Clement Gehring and Pushmeet Kohli and Stefanie Jegelka},\n  title = \t {Batched Large-scale Bayesian Optimization in High-dimensional Spaces},\n  booktitle = {International Conference on Artificial Intelligence and Statistics (AISTATS)},\n  year = \t 2018,\n  url_pdf = {https://arxiv.org/abs/1706.01445}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Structured Optimal Transport.\n \n \n \n \n\n\n \n Alvarez-Melis, D.; Jaakkola, T.; and Jegelka, S.\n\n\n \n\n\n\n In International Conference on Artificial Intelligence and Statistics (AISTATS), 2018. \n \n\n\n\n
\n\n\n\n \n \n \"Structured pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{alvarezM18,\n  author = \t {D. Alvarez-Melis and T. Jaakkola and S. Jegelka},\n  title = \t {Structured Optimal Transport},\n  booktitle = {International Conference on Artificial Intelligence and Statistics (AISTATS)},\n  year = \t 2018,\n  url_pdf = {https://arxiv.org/abs/1712.06199}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Streaming Non-monotone Submodular Maximization: Personalized Video Summarization on the Fly.\n \n \n \n \n\n\n \n Mirzasoleiman, B.; Jegelka, S.; and Krause, A.\n\n\n \n\n\n\n In AAAI Conference on Artificial Intelligence (AAAI), 2018. \n \n\n\n\n
\n\n\n\n \n \n \"Streaming pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{mirzasoleiman18,\n  author = \t {Baharan Mirzasoleiman and Stefanie Jegelka and Andreas Krause},\n  title = \t {Streaming Non-monotone Submodular Maximization: Personalized Video Summarization on the Fly},\n  booktitle = {AAAI Conference on Artificial Intelligence (AAAI)},\n  url_pdf = {https://arxiv.org/abs/1706.03583},\n  year = \t 2018}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (35)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Causal Effect Inference with Deep Latent-Variable Models.\n \n \n \n \n\n\n \n Louizos, C.; Shalit, U.; Mooij, J.; Sontag, D.; Zemel, R. S.; and Welling, M.\n\n\n \n\n\n\n In Proceedings of the 31st International Conference on Neural Information Processing Systems (NIPS), of NIPS'17, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Causal paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 12 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{LouizosEtAl_nips17,\n  author    = {Christos Louizos and\n               Uri Shalit and\n               Joris Mooij and\n               David Sontag and\n               Richard S. Zemel and\n               Max Welling},\n  title     = {Causal Effect Inference with Deep Latent-Variable Models},\n booktitle = {Proceedings of the 31st International Conference on Neural Information Processing Systems (NIPS)},\n series = {NIPS'17},\n year = {2017},\n keywords = {Machine learning, Causal inference, Deep learning},\n url_Paper = {https://arxiv.org/pdf/1705.08821.pdf},\n abstract = {Learning individual-level causal effects from observational data, such as inferring the most effective medication for a specific patient, is a problem of growing importance for policy makers. The most important aspect of inferring causal effects from observational data is the handling of confounders, factors that affect both an intervention and its outcome. A carefully designed observational study attempts to measure all important confounders. However, even if one does not have direct access to all confounders, there may exist noisy and uncertain measurement of proxies for confounders. We build on recent advances in latent variable modelling to simultaneously estimate the unknown latent space summarizing the confounders and the causal effect. Our method is based on Variational Autoencoders (VAE) which follow the causal structure of inference with proxies. We show our method is significantly more robust than existing methods, and matches the state-of-the-art on previous benchmarks focused on individual treatment effects.}\n}\n\n
\n
\n\n\n
\n Learning individual-level causal effects from observational data, such as inferring the most effective medication for a specific patient, is a problem of growing importance for policy makers. The most important aspect of inferring causal effects from observational data is the handling of confounders, factors that affect both an intervention and its outcome. A carefully designed observational study attempts to measure all important confounders. However, even if one does not have direct access to all confounders, there may exist noisy and uncertain measurement of proxies for confounders. We build on recent advances in latent variable modelling to simultaneously estimate the unknown latent space summarizing the confounders and the causal effect. Our method is based on Variational Autoencoders (VAE) which follow the causal structure of inference with proxies. We show our method is significantly more robust than existing methods, and matches the state-of-the-art on previous benchmarks focused on individual treatment effects.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Estimating individual treatment effect: generalization bounds and algorithms.\n \n \n \n \n\n\n \n Shalit, U.; Johansson, F. D.; and Sontag, D.\n\n\n \n\n\n\n In Proceedings of the 34th International Conference on Machine Learning (ICML), pages 3076–3085, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Estimating paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 7 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{ShalitEtAl_icml17,\n  author    = {Uri Shalit and\n               Fredrik D. Johansson and\n               David Sontag},\n  title     = {Estimating individual treatment effect: generalization bounds and\n               algorithms},\n  booktitle = {Proceedings of the 34th International Conference on Machine Learning (ICML)},\n  pages     = {3076--3085},\n  year      = {2017},\n  keywords = {Machine learning, Causal inference, Deep learning},\n  url_Paper = {http://arxiv.org/pdf/1606.03976.pdf},\n  abstract = {There is intense interest in applying machine learning to problems of causal inference in fields such as healthcare, economics and education. In particular, individual-level causal inference has important applications such as precision medicine. We give a new theoretical analysis and family of algorithms for predicting individual treatment effect (ITE) from observational data, under the assumption known as strong ignorability. The algorithms learn a "balanced" representation such that the induced treated and control distributions look similar. We give a novel, simple and intuitive generalization-error bound showing that the expected ITE estimation error of a representation is bounded by a sum of the standard generalization-error of that representation and the distance between the treated and control distributions induced by the representation. We use Integral Probability Metrics to measure distances between distributions, deriving explicit bounds for the Wasserstein and Maximum Mean Discrepancy (MMD) distances. Experiments on real and simulated data show the new algorithms match or outperform the state-of-the-art.}\n}\n\n
\n
\n\n\n
\n There is intense interest in applying machine learning to problems of causal inference in fields such as healthcare, economics and education. In particular, individual-level causal inference has important applications such as precision medicine. We give a new theoretical analysis and family of algorithms for predicting individual treatment effect (ITE) from observational data, under the assumption known as strong ignorability. The algorithms learn a \"balanced\" representation such that the induced treated and control distributions look similar. We give a novel, simple and intuitive generalization-error bound showing that the expected ITE estimation error of a representation is bounded by a sum of the standard generalization-error of that representation and the distance between the treated and control distributions induced by the representation. We use Integral Probability Metrics to measure distances between distributions, deriving explicit bounds for the Wasserstein and Maximum Mean Discrepancy (MMD) distances. Experiments on real and simulated data show the new algorithms match or outperform the state-of-the-art.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Simultaneous Learning of Trees and Representations for Extreme Classification and Density Estimation.\n \n \n \n \n\n\n \n Jernite, Y.; Choromanska, A.; and Sontag, D.\n\n\n \n\n\n\n In Proceedings of the 34th International Conference on Machine Learning (ICML), pages 1665–1674, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Simultaneous paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{JerniteEtAl_icml17,\n  author    = {Yacine Jernite and\n               Anna Choromanska and\n               David Sontag},\n  title     = {Simultaneous Learning of Trees and Representations for Extreme Classification\n               and Density Estimation},\n  booktitle = {Proceedings of the 34th International Conference on Machine Learning (ICML)},\n  pages     = {1665--1674},\n  year      = {2017},\n  keywords = {Machine learning, Natural language processing, Deep learning},\n  url_Paper = {https://arxiv.org/pdf/1610.04658.pdf},\n  abstract = {We consider multi-class classification where the predictor has a hierarchical structure that allows for a very large number of labels both at train and test time. The predictive power of such models can heavily depend on the structure of the tree, and although past work showed how to learn the tree structure, it expected that the feature vectors remained static. We provide a novel algorithm to simultaneously perform representation learning for the input data and learning of the hierarchical predictor. Our approach optimizes an objective function which favors balanced and easily-separable multi-way node partitions. We theoretically analyze this objective, showing that it gives rise to a boosting style property and a bound on classification error. We next show how to extend the algorithm to conditional density estimation. We empirically validate both variants of the algorithm on text classification and language modeling, respectively, and show that they compare favorably to common baselines in terms of accuracy and running time.}\n}\n\n
\n
\n\n\n
\n We consider multi-class classification where the predictor has a hierarchical structure that allows for a very large number of labels both at train and test time. The predictive power of such models can heavily depend on the structure of the tree, and although past work showed how to learn the tree structure, it expected that the feature vectors remained static. We provide a novel algorithm to simultaneously perform representation learning for the input data and learning of the hierarchical predictor. Our approach optimizes an objective function which favors balanced and easily-separable multi-way node partitions. We theoretically analyze this objective, showing that it gives rise to a boosting style property and a bound on classification error. We next show how to extend the algorithm to conditional density estimation. We empirically validate both variants of the algorithm on text classification and language modeling, respectively, and show that they compare favorably to common baselines in terms of accuracy and running time.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Structured Inference Networks for Nonlinear State Space Models.\n \n \n \n \n\n\n \n Krishnan, R. G.; Shalit, U.; and Sontag, D.\n\n\n \n\n\n\n In Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence, pages 2101–2109, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Structured paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{KrishnanEtAl_aaai17,\n  author    = {Rahul G. Krishnan and\n               Uri Shalit and\n               David Sontag},\n  title     = {Structured Inference Networks for Nonlinear State Space Models},\n  booktitle = {Proceedings of the Thirty-First {AAAI} Conference on Artificial Intelligence},\n  pages     = {2101--2109},\n  year      = {2017},\n  keywords = {Machine learning, Unsupervised learning, Deep learning, Health care, Approximate inference in graphical models},\n  url_Paper = {https://arxiv.org/pdf/1609.09869.pdf},\n  abstract = {Gaussian state space models have been used for decades as generative models of sequential data. They admit an intuitive probabilistic interpretation, have a simple functional form, and enjoy widespread adoption. We introduce a unified algorithm to efficiently learn a broad class of linear and non-linear state space models, including variants where the emission and transition distributions are modeled by deep neural networks. Our learning algorithm simultaneously learns a compiled inference network and the generative model, leveraging a structured variational approximation parameterized by recurrent neural networks to mimic the posterior distribution. We apply the learning algorithm to both synthetic and real-world datasets, demonstrating its scalability and versatility. We find that using the structured approximation to the posterior results in models with significantly higher held-out likelihood.}\n}\n\n\n% TOMMI\n\n\n
\n
\n\n\n
\n Gaussian state space models have been used for decades as generative models of sequential data. They admit an intuitive probabilistic interpretation, have a simple functional form, and enjoy widespread adoption. We introduce a unified algorithm to efficiently learn a broad class of linear and non-linear state space models, including variants where the emission and transition distributions are modeled by deep neural networks. Our learning algorithm simultaneously learns a compiled inference network and the generative model, leveraging a structured variational approximation parameterized by recurrent neural networks to mimic the posterior distribution. We apply the learning algorithm to both synthetic and real-world datasets, demonstrating its scalability and versatility. We find that using the structured approximation to the posterior results in models with significantly higher held-out likelihood.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Aspect-augmented Adversarial Networks for Domain Adaptation.\n \n \n \n \n\n\n \n Zhang, Y.; Barzilay, R.; and Jaakkola, T.\n\n\n \n\n\n\n Transactions of the Association for Computational Linguistics (TACL). 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Aspect-augmented pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Zhang_etal-TACL2017,\ntitle = {Aspect-augmented Adversarial Networks for Domain Adaptation},\nauthor = {Y. Zhang and R. Barzilay and T. Jaakkola},\njournal = {Transactions of the Association for Computational Linguistics (TACL)},\nyear = {2017},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/Zhang_etal-TACL17.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A causal framework for explaining the predictions of black-box sequence-to-sequence models.\n \n \n \n \n\n\n \n Melis, D. A.; and Jaakkola, T.\n\n\n \n\n\n\n In Empirical Methods in Natural Language Processing (EMNLP), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"A pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{AlvJaa_EMNLP2017,\nauthor = {D. Alvarez Melis and T. Jaakkola},\ntitle = {A causal framework for explaining the predictions of black-box sequence-to-sequence models},\nbooktitle = {Empirical Methods in Natural Language Processing (EMNLP)},\nyear = {2017},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/AlvJaa_EMNLP2017.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deriving Neural Architectures from Sequence and Graph Kernels.\n \n \n \n \n\n\n \n Lei, T.; Jin, W.; Barzilay, R.; and Jaakkola, T.\n\n\n \n\n\n\n In International Conference on Machine Learning (ICML), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Deriving pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Lei_etal_ICML2017,\nauthor = {T. Lei and W. Jin and R. Barzilay and T. Jaakkola},\ntitle = {Deriving Neural Architectures from Sequence and Graph Kernels},\nbooktitle = {International Conference on Machine Learning (ICML)},\nyear = {2017},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/Lei_etal_ICML2017.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sequence to Better Sequence: Continuous Revision of Combinatorial Structures.\n \n \n \n \n\n\n \n Mueller, J.; Gifford, D.; and Jaakkola, T.\n\n\n \n\n\n\n In International Conference on Machine Learning (ICML), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Sequence pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Mueller_etal_ICML2017,\nauthor = {J. Mueller and D. Gifford and T. Jaakkola},\ntitle = {Sequence to Better Sequence: Continuous Revision of Combinatorial Structures},\nbooktitle = {International Conference on Machine Learning (ICML)},\nyear = {2017},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/seq2betterseqICML17.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Sleep Stages from Radio Signals: A Conditional Adversarial Architecture.\n \n \n \n \n\n\n \n Zhao, M.; Yue, S.; Katabi, D.; Jaakkola, T.; and Bianchi, M.\n\n\n \n\n\n\n In International Conference on Machine Learning (ICML), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Learning pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Zhao_etal_ICML2017,\nauthor = {M. Zhao and S. Yue and D. Katabi and T. Jaakkola and M. Bianchi},\ntitle = {Learning Sleep Stages from Radio Signals: A Conditional Adversarial Architecture},\nbooktitle = {International Conference on Machine Learning (ICML)},\nyear = {2017},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/Zhao_etal_ICML2017.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Modeling Persistent Trends in Distributions.\n \n \n \n \n\n\n \n Mueller, J.; Jaakkola, T.; and Gifford, D.\n\n\n \n\n\n\n Journal of the American Statistical Association. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Modeling pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Mueller_etal_JASA_2017,\nauthor = {J. Mueller and T. Jaakkola and D. Gifford},\ntitle = {Modeling Persistent Trends in Distributions},\njournal = {Journal of the American Statistical Association},\nyear = {2017},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/persistentTrends.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Prediction of Organic Reaction Outcomes Using Machine Learning.\n \n \n \n \n\n\n \n Coley, C. W.; Barzilay, R.; Jaakkola, T.; Green, W. H.; and Jensen, K. F.\n\n\n \n\n\n\n ACS Central Science. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Prediction pdf\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Connor_etal_ACS_2017,\ntitle = {Prediction of Organic Reaction Outcomes Using Machine Learning},\nauthor = {C. W. Coley and R. Barzilay and T. Jaakkola and W. H. Green and K. F. Jensen},\njournal = {ACS Central Science},\nDOI = {DOI: 10.1021/acscentsci.7b00064},\nyear = {2017},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/Connor_etal_ACS_2017.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tree Structured Decoding with Doubly Recurrent Neural Networks.\n \n \n \n \n\n\n \n Alvarez-Melis, D.; and Jaakkola, T.\n\n\n \n\n\n\n In International Conference on Learning Representations (ICLR), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Tree pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Alvarez_etal_ICLR2017,\nauthor = {D. Alvarez-Melis and T. Jaakkola},\ntitle = {Tree Structured Decoding with Doubly Recurrent Neural Networks},\nbooktitle = {International Conference on Learning Representations (ICLR)},\nyear = {2017},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/AlvJaa_ICLR2017.pdf},\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Optimal Interventions.\n \n \n \n \n\n\n \n Mueller, J.; Reshef, D.; Du, G.; and Jaakkola, T.\n\n\n \n\n\n\n In Artificial Intelligence and Statistics (AISTATS), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Learning pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Mueller_etal_aistats2017,\nauthor = {J. Mueller and D. Reshef and G. Du and T. Jaakkola},\ntitle = {Learning Optimal Interventions},\nbooktitle = {Artificial Intelligence and Statistics (AISTATS)},\nyear = {2017},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/Mueller_etal_aistats2017.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Focused Model-Learning and Planning for Non-Gaussian Continuous State-Action Systems.\n \n \n \n \n\n\n \n Wang, Z.; Jegelka, S.; Kaelbling, L. P.; and Lozano-Perez, T.\n\n\n \n\n\n\n In IEEE International Conference on Robotics and Automation (ICRA), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Focused pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{wang17icra,\n  author = \t {Zi Wang and Stefanie Jegelka and Leslie Pack Kaelbling and Tomas Lozano-Perez},\n  title = \t {Focused Model-Learning and Planning for Non-Gaussian Continuous State-Action Systems},\n  booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},\n  url_pdf = {http://arxiv.org/abs/1607.07762},\n  year = \t 2017}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Multiple wavelength sensing array design.\n \n \n \n\n\n \n Shulkind, G.; Jegelka, S.; and Wornell, G. W.\n\n\n \n\n\n\n In ICASSP, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{shulkind17,\n  author = \t {Gal Shulkind and Stefanie Jegelka and G. W. Wornell},\n  title = \t {Multiple wavelength sensing array design},\n  booktitle = {ICASSP},\n  year = \t 2017}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Deep Metric Learning via Facility Location.\n \n \n \n \n\n\n \n Song, H. O.; Jegelka, S.; Rathod, V.; and Murphy, K.\n\n\n \n\n\n\n In International Conference on Computer Vision and Pattern Recognition (CVPR), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Deep pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{song17,\n  author = \t {Hyun Oh Song and Stefanie Jegelka and Vivek Rathod and Kevin Murphy},\n  title = \t {Deep Metric Learning via Facility Location},\n  booktitle = {International Conference on Computer Vision and Pattern Recognition (CVPR)},\n  year = \t 2017,\n  url_pdf = {http://openaccess.thecvf.com/content_cvpr_2017/papers/Song_Deep_Metric_Learning_CVPR_2017_paper.pdf}}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust Budget Allocation via Continuous Submodular Functions.\n \n \n \n \n\n\n \n Staib, M.; and Jegelka, S.\n\n\n \n\n\n\n In International Conference on Machine Learning (ICML), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Robust pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{staib17,\n  author = \t {Matthew Staib and Stefanie Jegelka},\n  title = \t {Robust Budget Allocation via Continuous Submodular Functions},\n  booktitle = {International Conference on Machine Learning (ICML)},\n  year = \t 2017,\n  url_pdf = {http://proceedings.mlr.press/v70/staib17a/staib17a.pdf}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Max-value entropy search for efficient Bayesian Optimization.\n \n \n \n \n\n\n \n Wang, Z.; and Jegelka, S.\n\n\n \n\n\n\n In International Conference on Machine Learning (ICML), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Max-value pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{wang17mes,\n  author = \t {Zi Wang and Stefanie Jegelka},\n  title = \t {Max-value entropy search for efficient Bayesian Optimization},\n  booktitle = {International Conference on Machine Learning (ICML)},\n  year = \t 2017,\n  url_pdf = {http://proceedings.mlr.press/v70/wang17e/wang17e.pdf}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Batched High-dimensional Bayesian Optimization via Structural Kernel Learning.\n \n \n \n \n\n\n \n Wang, Z.; Li, C.; Jegelka, S.; and Kohli, P.\n\n\n \n\n\n\n In International Conference on Machine Learning (ICML), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Batched pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{wangLi17,\n  author = \t {Zi Wang and Chengtao Li and Stefanie Jegelka and Pushmeet Kohli},\n  title = \t {Batched High-dimensional Bayesian Optimization via Structural Kernel Learning.},\n  booktitle = {International Conference on Machine Learning (ICML)},\n  year = \t 2017,\n  url_pdf = {http://proceedings.mlr.press/v70/wang17h/wang17h.pdf}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Wasserstein k-means++ for Cloud Regime Histogram Clustering.\n \n \n \n\n\n \n Staib, M.; and Jegelka, S.\n\n\n \n\n\n\n In Climate Informatics, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{staib17ci,\n  author = \t {Matthew Staib and Stefanie Jegelka},\n  title = \t {Wasserstein k-means++ for Cloud Regime Histogram Clustering},\n  booktitle = {Climate Informatics},\n  year = \t 2017}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Parallel Streaming Wasserstein Barycenters.\n \n \n \n \n\n\n \n Staib, M.; Claici, S.; Solomon, J.; and Jegelka, S.\n\n\n \n\n\n\n In Advances in Neural Information Processing Systems (NIPS), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Parallel pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{staibCSJ17,\n  author = \t {Matthew Staib and\n               Sebastian Claici and\n               Justin Solomon and\n               Stefanie Jegelka},\n  title = \t {Parallel Streaming {W}asserstein Barycenters},\n  booktitle = {Advances in Neural Information Processing Systems (NIPS)},\n  url_pdf = {https://arxiv.org/abs/1705.07443},\n  year = \t 2017}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Polynomial Time Algorithms for Dual Volume Sampling.\n \n \n \n \n\n\n \n Li, C.; Jegelka, S.; and Sra, S.\n\n\n \n\n\n\n In Advances in Neural Information Processing Systems (NIPS), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Polynomial pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{li17dual,\n  author = \t {Chengtao Li and Stefanie Jegelka and Suvrit Sra},\n  title = \t {Polynomial Time Algorithms for Dual Volume Sampling},\n  booktitle = {Advances in Neural Information Processing Systems (NIPS)},\n  url_pdf = {https://arxiv.org/abs/1703.02674},\n  year = \t 2017}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Virtual screening of inorganic materials synthesis parameters with deep learning.\n \n \n \n \n\n\n \n Kim, E.; Huang, K.; Jegelka, S.; and Olivetti, E.\n\n\n \n\n\n\n npj Computational Materials, 3(53). 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Virtual pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{kim17,\n  author = \t {Edward Kim and Kevin Huang and Stefanie Jegelka and Elsa Olivetti},\n  title = \t {Virtual screening of inorganic materials synthesis parameters with deep learning},\n  journal = \t {npj Computational Materials},\n  year = \t 2017,\n  volume = \t 3,\n  number = \t 53,\n  url_pdf = {https://www.nature.com/articles/s41524-017-0055-6}}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Logarithmic inequalities under a symmetric polynomial dominance order.\n \n \n \n\n\n \n Sra, S.\n\n\n \n\n\n\n Proceedings American Mathematical Society (PAMS). Oct 2017.\n ıt Accepted.\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{sra.esym,\n  author =       {Suvrit Sra},\n  title =        {Logarithmic inequalities under a symmetric polynomial dominance order},\n  journal =      {Proceedings American Mathematical Society (PAMS)},\n  year =         2017,\n  month =        {Oct},\n  note  =        {{\\it Accepted.}},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Elementary symmetric polynomials for optimal experimental design.\n \n \n \n \n\n\n \n Mariet, Z.; and Sra, S.\n\n\n \n\n\n\n In Advances in Neural Information Processing Systems (NIPS), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Elementary pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{marietSra17b,\n  author = \t {Zelda Mariet and Suvrit Sra},\n  title = \t {Elementary symmetric polynomials for optimal experimental design},\n  booktitle = {Advances in Neural Information Processing Systems (NIPS)},\n  url_pdf = {https://arxiv.org/abs/1705.09677},\n  year = \t 2017}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Combinatorial topic modling using small variance asymptotics.\n \n \n \n \n\n\n \n Jiang, K.; Sra, S.; and Kulis, B.\n\n\n \n\n\n\n In Artificial Intelligence and Statistics (AISTATS), 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Combinatorial pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{sraKulis, \nauthor = {Ke Jiang and Suvrit Sra and Brian Kulis},\ntitle = {Combinatorial topic modling using small variance asymptotics},\nbooktitle = {Artificial Intelligence and Statistics (AISTATS)},\nyear = {2017},\nurl_pdf = {http://arxiv.org/abs/1604.02027}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Size-Independent Sample Complexity of Neural Networks.\n \n \n \n\n\n \n Golowich, N.; Rakhlin, A.; and Shamir, O.\n\n\n \n\n\n\n arXiv preprint arXiv:1712.06541. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{golowich2017size,\n  title={Size-Independent Sample Complexity of Neural Networks},\n  author={Golowich, Noah and Rakhlin, Alexander and Shamir, Ohad},\n  journal={arXiv preprint arXiv:1712.06541},\n  year={2017}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Fisher-rao metric, geometry, and complexity of neural networks.\n \n \n \n\n\n \n Liang, T.; Poggio, T.; Rakhlin, A.; and Stokes, J.\n\n\n \n\n\n\n arXiv preprint arXiv:1711.01530. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{liang2017fisher,\n  title={Fisher-rao metric, geometry, and complexity of neural networks},\n  author={Liang, Tengyuan and Poggio, Tomaso and Rakhlin, Alexander and Stokes, James},\n  journal={arXiv preprint arXiv:1711.01530},\n  year={2017}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Weighted Message Passing and Minimum Energy Flow for Heterogeneous Stochastic Block Models with Side Information.\n \n \n \n\n\n \n Cai, T T.; Liang, T.; and Rakhlin, A.\n\n\n \n\n\n\n arXiv preprint arXiv:1709.03907. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{cai2017weighted,\n  title={Weighted Message Passing and Minimum Energy Flow for Heterogeneous Stochastic Block Models with Side Information},\n  author={Cai, T Tony and Liang, Tengyuan and Rakhlin, Alexander},\n  journal={arXiv preprint arXiv:1709.03907},\n  year={2017}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Non-convex learning via Stochastic Gradient Langevin Dynamics: a nonasymptotic analysis.\n \n \n \n\n\n \n Raginsky, M.; Rakhlin, A.; and Telgarsky, M.\n\n\n \n\n\n\n In COLT, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{RagRakTel17,\n  title =          {Non-convex learning via Stochastic Gradient Langevin Dynamics: a nonasymptotic analysis},\n  author =          {Maxim Raginsky and Alexander Rakhlin and Matus Telgarsky},\n  booktitle =          {COLT},\n  year =          {2017},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n ZigZag: A New Approach to Adaptive Online Learning.\n \n \n \n\n\n \n Foster, D. J.; Rakhlin, A.; and Sridharan, K.\n\n\n \n\n\n\n In COLT, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{FosRakSri17a,\n  title =          {ZigZag: A New Approach to Adaptive Online Learning},\n  author =          {Dylan J. Foster and Alexander Rakhlin and Karthik Sridharan},\n  booktitle =          {COLT},\n  year =          {2017},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On Equivalence of Martingale Tail Bounds and Deterministic Regret Inequalities.\n \n \n \n\n\n \n Rakhlin, A.; and Sridharan, K.\n\n\n \n\n\n\n In COLT, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{RakSri17a,\n  title =          {On Equivalence of Martingale Tail Bounds and Deterministic Regret Inequalities},\n  author =          {Alexander Rakhlin and Karthik Sridharan},\n  booktitle =          {COLT},\n  year =          {2017},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Efficient Online Multiclass Prediction on Graphs via Surrogate Losses.\n \n \n \n\n\n \n Rakhlin, A.; and Sridharan, K.\n\n\n \n\n\n\n In Artificial Intelligence and Statistics, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{rakhlin2017efficient,\n  title={Efficient Online Multiclass Prediction on Graphs via Surrogate Losses},\n  author={Rakhlin, Alexander and Sridharan, Karthik},\n  booktitle={Artificial Intelligence and Statistics},\n  year={2017}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On detection and structural reconstruction of small-world random networks.\n \n \n \n\n\n \n Cai, T.; Liang, T.; and Rakhlin, A.\n\n\n \n\n\n\n IEEE Transactions on Network Science and Engineering, 4(3): 165–176. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{cai2017detection,\n  title={On detection and structural reconstruction of small-world random networks},\n  author={Cai, Tony and Liang, Tengyuan and Rakhlin, Alexander},\n  journal={IEEE Transactions on Network Science and Engineering},\n  volume={4},\n  number={3},\n  pages={165--176},\n  year={2017},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Computational and statistical boundaries for submatrix localization in a large noisy matrix.\n \n \n \n\n\n \n Cai, T T.; Liang, T.; Rakhlin, A.; and others\n\n\n \n\n\n\n The Annals of Statistics, 45(4): 1403–1430. 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{cai2017computational,\n  title={Computational and statistical boundaries for submatrix localization in a large noisy matrix},\n  author={Cai, T Tony and Liang, Tengyuan and Rakhlin, Alexander and others},\n  journal={The Annals of Statistics},\n  volume={45},\n  number={4},\n  pages={1403--1430},\n  year={2017},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (46)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Learning Tree Structured Potential Games.\n \n \n \n \n\n\n \n Garg, V.; and Jaakkola, T.\n\n\n \n\n\n\n In Advances in Neural Information Processing Systems (NIPS), 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Learning pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{GarJaa-nips2016,\nauthor = {V. Garg and T. Jaakkola},\ntitle = {Learning Tree Structured Potential Games},\nbooktitle = {Advances in Neural Information Processing Systems (NIPS)},\nyear = {2016},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/Garg_Jaakkola_NIPS2016.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Rationalizing Neural Predictions.\n \n \n \n \n\n\n \n Lei, T.; Barzilay, R.; and Jaakkola, T.\n\n\n \n\n\n\n In Empirical Methods in Natural Language Processing (EMNLP), 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Rationalizing pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Leietal-EMNLP2016,\nauthor = {T. Lei and R. Barzilay and T. Jaakkola},\ntitle = {Rationalizing Neural Predictions},\nbooktitle = {Empirical Methods in Natural Language Processing (EMNLP)},\nyear = {2016},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/Lei_etal_EMNLP2016.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Structured Prediction: From Gaussian Perturbations to Linear-Time Principled Algorithms.\n \n \n \n \n\n\n \n Honorio, J.; and Jaakkola, T.\n\n\n \n\n\n\n In Uncertainty in Artificial Intelligence (UAI), 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Structured pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Honetal-UAI2016,\ntitle = {Structured Prediction: From Gaussian Perturbations to Linear-Time Principled Algorithms},\nauthor = {J. Honorio and T. Jaakkola},\nbooktitle = {Uncertainty in Artificial Intelligence (UAI)},\nyear = {2016},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/HonJaa-UAI2016.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Word embeddings as metric recovery in semantic spaces.\n \n \n \n \n\n\n \n Hashimoto, T.; Alvarez-Melis, D.; and Jaakkola, T.\n\n\n \n\n\n\n Transactions of the Association for Computational Linguistics (TACL), 4. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"Word pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Hasetal-TACL2016,\ntitle = {Word embeddings as metric recovery in semantic spaces},\nauthor = {T. Hashimoto and D. Alvarez-Melis and T. Jaakkola},\njournal = {Transactions of the Association for Computational Linguistics (TACL)},\nvolume = {4},\nyear = {2016},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/HasAlvJaa-TACL16.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning population-level diffusions with generative RNNs.\n \n \n \n \n\n\n \n Hashimoto, T.; Jaakkola, T.; and Gifford, D.\n\n\n \n\n\n\n In International Conference on Machine Learning (ICML), 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Learning pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Hasetal-icml2016,\nauthor = {T. Hashimoto and T. Jaakkola and D. Gifford},\ntitle = {Learning population-level diffusions with generative {RNN}s},\nyear = {2016},\nbooktitle = {International Conference on Machine Learning (ICML)},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/hashimoto16.pdf},\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ten Pairs to Tag – Multilingual POS Tagging via Coarse Mapping between Embeddings.\n \n \n \n \n\n\n \n Zhang, Y.; Gaddy, D.; Barzilay, R.; and Jaakkola, T.\n\n\n \n\n\n\n In The 15th Annual Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL), 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Ten pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Zhangetal-NAACL16,\ntitle = {Ten Pairs to Tag -- Multilingual POS Tagging via Coarse Mapping between Embeddings},\nauthor = {Y. Zhang and D. Gaddy and R. Barzilay and T. Jaakkola},\nyear = {2016},\nbooktitle = {The 15th Annual Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL)},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/Zhangetal_naacl16.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semi-supervised Question Retrieval with Gated Convolutions.\n \n \n \n \n\n\n \n Lei, T.; Joshi, H.; Barzilay, R.; Jaakkola, T.; Tymoshenko, K.; Moschitti, A.; and Marquez, L.\n\n\n \n\n\n\n In The 15th Annual Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL), 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Semi-supervised pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Leietal-NAACL16,\ntitle = {Semi-supervised Question Retrieval with Gated Convolutions},\nauthor= {T. Lei and H. Joshi and R. Barzilay and T. Jaakkola and K. Tymoshenko and A. Moschitti and L. Marquez},\nyear = 2016,\nbooktitle = {The 15th Annual Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL)},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/Leietal_naacl16.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n CRAFT: ClusteR-specific Assorted Feature selecTion.\n \n \n \n \n\n\n \n Garg, V.; Rudin, C.; and Jaakkola, T.\n\n\n \n\n\n\n In Artificial Intelligence and Statistics (AISTATS), 2016. \n \n\n\n\n
\n\n\n\n \n \n \"CRAFT: pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Gargetal-aistats2016,\ntitle = {CRAFT: ClusteR-specific Assorted Feature selecTion},\nauthor= {V. Garg and C. Rudin and T. Jaakkola},\nbooktitle={Artificial Intelligence and Statistics (AISTATS)},\nyear = {2016},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/Garg_etal_aistats16.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Deep Metric Learning via Lifted Structured Feature Embedding.\n \n \n \n\n\n \n Song, H.; Xiang, Y.; Jegelka, S.; and Savarese, S.\n\n\n \n\n\n\n In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. \n Spotlight presentation\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{song16,\n  author = \t {H. Song and Y. Xiang and S. Jegelka and S. Savarese},\n  title = \t {Deep Metric Learning via Lifted Structured Feature Embedding},\n  booktitle = cvpr,\n  note      = {Spotlight presentation},\n  year = \t 2016}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Optimization as Estimation with Gaussian Processes in Bandit Settings.\n \n \n \n\n\n \n Wang, Z.; Zhou, B.; and Jegelka, S.\n\n\n \n\n\n\n In Artificial Intelligence and Statistics (AISTATS), 2016. \n Oral presentation\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{wangZJ16,\n  author = \t {Z. Wang and B. Zhou and S. Jegelka},\n  title = \t {Optimization as Estimation with {G}aussian Processes in Bandit Settings},\n  booktitle = aistats,\n  note = {Oral presentation},\n  year = \t 2016}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Auxiliary Image Regularization for Deep CNNs with Noisy Labels.\n \n \n \n\n\n \n Azadi, S.; Feng, J.; Jegelka, S.; and Darrell, T.\n\n\n \n\n\n\n In International Conference on Learning Representations (ICLR), 2016. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{azadi16,\n  author = \t {Samaneh Azadi and Jiashi Feng and Stefanie Jegelka and Trevor Darrell},\n  title = \t {Auxiliary Image Regularization for Deep {CNN}s with Noisy Labels},\n  booktitle = iclr,\n  year = \t 2016}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Graph Cuts with Interacting Edge Costs - Examples, Approximations, and Algorithms.\n \n \n \n\n\n \n Jegelka, S.; and Bilmes, J.\n\n\n \n\n\n\n Mathematical Programming Series A. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{jegelkaBilmes16,\n  author = \t {Stefanie Jegelka and Jeff Bilmes},\n  title = \t {Graph Cuts with Interacting Edge Costs - Examples, Approximations, and Algorithms},\n  journal = \t {Mathematical Programming Series A},\n  year = \t 2016}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Cooperative Graphical Models.\n \n \n \n\n\n \n Djolonga, J.; Jegelka, S.; Tschiatschek, S.; and Krause, A.\n\n\n \n\n\n\n In Advances in Neural Information Processing Systems (NIPS), 2016. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{djolonga16,\n  author = \t {Josip Djolonga and Stefanie Jegelka and Sebastian Tschiatschek and Andreas Krause},\n  title = \t {Cooperative Graphical Models},\n  booktitle = nips,\n  year = \t 2016}\n\n\n% SUVRIT\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Proximal Stochastic Methods for Nonsmooth Nonconvex Finite-Sum Optimization.\n \n \n \n \n\n\n \n Reddi, S.; Sra, S.; Poczos, B.; and Smola, A. J.\n\n\n \n\n\n\n In Advances in Neural Information Processing Systems (NIPS), 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Proximal pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{ncprox,\n  author =       {Sashank Reddi and Suvrit Sra and Barnabas Poczos and Alexander J. Smola},\n  title =        {Proximal Stochastic Methods for Nonsmooth Nonconvex Finite-Sum Optimization},\n  booktitle =    nips,\n  url_pdf   =    {https://papers.nips.cc/paper/6116-proximal-stochastic-methods-for-nonsmooth-nonconvex-finite-sum-optimization.pdf},\n  year =         2016}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Riemannian SVRG: Fast Stochastic Optimization on Riemannian Manifolds.\n \n \n \n \n\n\n \n Zhang, H.; Reddi, S.; and Sra, S.\n\n\n \n\n\n\n In Advances in Neural Information Processing Systems (NIPS), 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Riemannian pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{rsvrg,\n  author =       {Hongyi Zhang and Sashank Reddi and Suvrit Sra},\n  title =        {Riemannian SVRG: Fast Stochastic Optimization on Riemannian Manifolds},\n  booktitle =    nips,\n  url_pdf =      {https://papers.nips.cc/paper/6515-riemannian-svrg-fast-stochastic-optimization-on-riemannian-manifolds.pdf},\n  year =         2016}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast Mixing Markov Chains for Strongly Rayleigh Measures, DPPs, and Constrained Sampling.\n \n \n \n \n\n\n \n Li, C.; Jegelka, S.; and Sra, S.\n\n\n \n\n\n\n In Advances in Neural Information Processing Systems (NIPS), 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Fast pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{liJeSr16c,\n  author =       {Chengtao Li and Stefanie Jegelka and Suvrit Sra},\n  title =        {Fast Mixing Markov Chains for Strongly Rayleigh Measures, DPPs, and Constrained Sampling},\n  booktitle =    nips,\n  url_pdf   =    {https://papers.nips.cc/paper/6182-fast-mixing-markov-chains-for-strongly-rayleigh-measures-dpps-and-constrained-sampling.pdf},\n  year =         2016}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Kronecker Determinantal Point Processes.\n \n \n \n \n\n\n \n Mariet, Z.; and Sra, S.\n\n\n \n\n\n\n In Advances in Neural Information Processing Systems (NIPS), 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Kronecker pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{krondpp,\n  author =       {Zelda Mariet and Suvrit Sra},\n  title =        {Kronecker Determinantal Point Processes},\n  url_pdf =      {https://papers.nips.cc/paper/6296-kronecker-determinantal-point-processes.pdf},\n  booktitle =    nips,\n  year =         2016}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Frank-Wolfe Methods for Nonconvex Optimization.\n \n \n \n \n\n\n \n Reddi, S.; Sra, S.; Poczos, B.; and Smola, A. J.\n\n\n \n\n\n\n In 54th Annual Allerton Conference on Communication, Control, and Computing, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Stochastic pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{ncfw,\n  author =       {Sashank Reddi and Suvrit Sra and Barnabas Poczos and Alexander J. Smola},\n  title =        {{Stochastic Frank-Wolfe Methods for Nonconvex Optimization}},\n  booktitle =    {54th Annual Allerton Conference on Communication, Control, and Computing},\n  url_pdf   =    {http://ieeexplore.ieee.org/document/7852377/},\n  year =         2016}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Fast incremental methods for smooth nonconvex optimization.\n \n \n \n\n\n \n Reddi, S.; Sra, S.; Poczos, B.; and Smola, A. J.\n\n\n \n\n\n\n In IEEE Conference on Decision and Control (CDC), Dec 2016. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{ncsaga,\n  author =       {Sashank Reddi and Suvrit Sra and Barnabas Poczos and Alexander J. Smola},\n  title =        {Fast incremental methods for smooth nonconvex optimization},\n  booktitle =    {IEEE Conference on Decision and Control (CDC)},\n  month =        {Dec},\n  year =         2016\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Positive Definite Matrices: Data Representation and Applications to Computer Vision.\n \n \n \n \n\n\n \n Cherian, A.; and Sra, S.\n\n\n \n\n\n\n In Algorithmic Advances in Riemannian Geometry and Applications. Springer, 2016.\n \n\n\n\n
\n\n\n\n \n \n \"Positive pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InCollection{chSra16,\n  author =       {Anoop Cherian and Suvrit Sra},\n  title =        {{Positive Definite Matrices: Data Representation and Applications to Computer Vision}},\n  booktitle =    {Algorithmic Advances in Riemannian Geometry and Applications},\n  publisher =    {Springer},\n  year =         {2016},\n  url_pdf =      {http://suvrit.de/papers/cherian_sra_chapter.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Riemannian Dictionary Learning and Sparse Coding for Positive Definite Matrices.\n \n \n \n \n\n\n \n Cherian, A.; and Sra, S.\n\n\n \n\n\n\n IEEE Neural Networks and Learning Systems (TNNLS). 2016.\n \n\n\n\n
\n\n\n\n \n \n \"Riemannian pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{cheSra15,\nauthor = {Anoop Cherian and Suvrit Sra},\ntitle = {Riemannian Dictionary Learning and Sparse Coding for Positive Definite Matrices},\njournal = {IEEE Neural Networks and Learning Systems (TNNLS)},\nyear = {2016},\nurl_pdf = {http://ieeexplore.ieee.org/document/7565529/}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast DPP Sampling for Nyström with Application to Kernel Methods.\n \n \n \n \n\n\n \n Li, C.; Jegelka, S.; and Sra, S.\n\n\n \n\n\n\n In International Conference on Machine Learning (ICML), Jun. 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Fast pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{liSra16a,\n  author =       {Chengtao Li and Stefanie Jegelka and  Suvrit Sra},\n  title =        {Fast {DPP} Sampling for {N}yström with Application to Kernel Methods},\n  booktitle =    icml,\n  month     =    {Jun.},\n  year =         2016,\n  url_pdf =      {http://proceedings.mlr.press/v48/lih16.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Inference and mixture modeling with the Elliptical Gamma Distribution.\n \n \n \n \n\n\n \n Hosseini, R.; Sra, S.; Theis, L.; and Bethge, M.\n\n\n \n\n\n\n Computational Statistics & Data Analysis, 101(Supplement C): 29–43. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"InferencePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{hoSra16,\ntitle = "Inference and mixture modeling with the Elliptical Gamma Distribution",\njournal = "Computational Statistics & Data Analysis",\nvolume = "101",\nnumber = "Supplement C",\npages = "29--43",\nyear = "2016",\nurl = "http://www.sciencedirect.com/science/article/pii/S0167947316300251",\nauthor = "Reshad Hosseini and Suvrit Sra and Lucas Theis and Matthias Bethge",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Parallel and Distributed Block-Coordinate Frank-Wolfe Algorithms.\n \n \n \n \n\n\n \n Wang, Y.; Sadhanala, V.; Dai, W.; Neiswanger, W.; Sra, S.; and Xing, E.\n\n\n \n\n\n\n In Proceedings of The 33rd International Conference on Machine Learning, volume 48, pages 1548–1557, 2016. PMLR\n \n\n\n\n
\n\n\n\n \n \n \"ParallelPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{xiangSra16,\n  title = \t {Parallel and Distributed Block-Coordinate {F}rank-{W}olfe Algorithms},\n  author = \t {Yu-Xiang Wang and Veeranjaneyulu Sadhanala and Wei Dai and Willie Neiswanger and Suvrit Sra and Eric Xing},\n  booktitle = \t {Proceedings of The 33rd International Conference on Machine Learning},\n  pages = \t {1548--1557},\n  year = \t {2016},\n  volume = \t {48},\n  publisher = \t {PMLR},\n  url = \t {http://proceedings.mlr.press/v48/wangd16.html},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Gaussian quadrature for matrix inverse forms with applications.\n \n \n \n \n\n\n \n Li, C.; Sra, S.; and Jegelka, S.\n\n\n \n\n\n\n In International Conference on Machine Learning (ICML), Jun. 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Gaussian pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{liSra16b,\n  author =       {Chengtao Li and  Suvrit Sra and Stefanie Jegelka},\n  title =        {Gaussian quadrature for matrix inverse forms with applications},\n  booktitle =    icml,\n  month     =    {Jun.},\n  year =         2016,\n  url_pdf =      {http://proceedings.mlr.press/v48/lig16.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Diversity Networks: Neural Network Compression Using Determinantal Point Processes.\n \n \n \n \n\n\n \n Mariet, Z.; and Sra, S.\n\n\n \n\n\n\n In International Conference on Learning Representations (ICLR), May 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Diversity pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{marSra16,\n  author =       {Zelda Mariet and  Suvrit Sra},\n  title =        {{Diversity Networks: Neural Network Compression Using Determinantal Point Processes}},\n  booktitle=     {International Conference on Learning Representations (ICLR)},\n  OPTnote =      {arXiv:1511.0577},\n  year =         2016,\n  month =        {May},\n  url_pdf =      {https://arxiv.org/abs/1511.05077}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the matrix square root and geometric optimization.\n \n \n \n \n\n\n \n Sra, S.\n\n\n \n\n\n\n Electronic Journal on Linear Algebra (ELA). 2016.\n \n\n\n\n
\n\n\n\n \n \n \"On pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{sraRoot,\n author = {Suvrit Sra},\n title = {On the matrix square root and geometric optimization},\n journal = {Electronic Journal on Linear Algebra (ELA)},\n year = {2016},\n url_pdf = {http://repository.uwyo.edu/ela/vol31/iss1/30/}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic variance reduction for nonconvex optimization.\n \n \n \n \n\n\n \n Reddi, S.; Hefny, A.; Sra, S.; Poczos, B.; and Smola, A. J.\n\n\n \n\n\n\n In International Conference on Machine Learning (ICML), Jun. 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Stochastic pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{reddiSra,\n author = {Sashank Reddi and Ahmed Hefny and  Suvrit Sra and Barnabas Poczos and Alexander J. Smola},\n title = {Stochastic variance reduction for nonconvex optimization},\n booktitle = icml,\n month     =    {Jun.},\n year = {2016},\n url_pdf = {http://proceedings.mlr.press/v48/reddi16.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Entropic Metric Alignment for Correspondence Problems.\n \n \n \n \n\n\n \n Solomon, J.; Peyré, G.; Kim, V.; and Sra, S.\n\n\n \n\n\n\n In ACM SIGGRAPH, Jul. 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Entropic pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{solomon,\n  author =       {Justin Solomon and Gabriel Peyré and Vladimir Kim and  Suvrit Sra},\n  title =        {{Entropic Metric Alignment for Correspondence Problems}},\n  booktitle =    {ACM SIGGRAPH},\n  month = {Jul.},\n  year =         {2016},\n  url_pdf = {http://dl.acm.org/citation.cfm?id=2925903}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Geometric Optimization in Machine Learning.\n \n \n \n \n\n\n \n Sra, S.; and Hosseini, R.\n\n\n \n\n\n\n In Algorithmic Advances in Riemannian Geometry and Applications. Springer, 2016.\n \n\n\n\n
\n\n\n\n \n \n \"Geometric pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InCollection{sraHos16,\n  author =       {Suvrit Sra and Reshad Hosseini},\n  title =        {Geometric Optimization in Machine Learning},\n  booktitle =    {Algorithmic Advances in Riemannian Geometry and Applications},\n  publisher =    {Springer},\n  year =         2016,\n  url_pdf =      {https://link.springer.com/chapter/10.1007%2F978-3-319-45026-1_3}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Positive definite matrices and the S-divergence.\n \n \n \n \n\n\n \n Sra, S.\n\n\n \n\n\n\n Proceedings American Mathematical Society (PAMS). 2016.\n \n\n\n\n
\n\n\n\n \n \n \"Positive pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{srapd,\n  author =       {Suvrit Sra},\n  title =        {Positive definite matrices and the {S}-divergence},\n  journal =      {Proceedings American Mathematical Society (PAMS)},\n  year =         2016,\n  url_pdf =      {http://www.ams.org/proc/2016-144-07/S0002-9939-2015-12953-X/S0002-9939-2015-12953-X.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Parallel and Distributed Block-Coordinate Frank-Wolfe Algorithms.\n \n \n \n \n\n\n \n Wang, Y.; Sadhanala, V.; Dai, W.; Neiswanger, W.; Sra, S.; and Xing, E. P.\n\n\n \n\n\n\n In International Conference on Machine Learning (ICML), Jun. 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Parallel pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{wangSaSra14,\n author = {Yu-Xiang Wang and Veeranjaneyulu Sadhanala and Wei Dai and Willie Neiswanger and  Suvrit Sra and Eric P. Xing},\n title = {{Parallel and Distributed Block-Coordinate Frank-Wolfe Algorithms}},\n booktitle=icml,\n month     =    {Jun.},\n year = {2016},\n url_pdf = {http://proceedings.mlr.press/v48/wangd16.html}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Geometric Mean Metric Learning.\n \n \n \n \n\n\n \n Zadeh, P. H.; Hosseini, R.; and Sra, S.\n\n\n \n\n\n\n In International Conference on Machine Learning (ICML), Jun. 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Geometric pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{zadeh,\n  author =       {Pourya H. Zadeh and Reshad Hosseini and  Suvrit Sra},\n  title =        {Geometric Mean Metric Learning},\n  booktitle =    icml,\n  month     =    {Jun.},\n  year =         {2016},\n  url_pdf =      {http://proceedings.mlr.press/v48/zadeh16.html}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The sum of squared logarithms inequality in arbitrary dimensions.\n \n \n \n \n\n\n \n Borisov, L.; Neff, P.; Sra, S.; and Thiel, C.\n\n\n \n\n\n\n Linear Algebra and its Applications (LAA). 2016.\n \n\n\n\n
\n\n\n\n \n \n \"The pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{boNes15,\nauthor = {Lev Borisov and Patrizio Neff and Suvrit Sra and Christian Thiel},\ntitle = {The sum of squared logarithms inequality in arbitrary dimensions},\njournal = {Linear Algebra and its Applications (LAA)},\nyear = {2016},\nurl_pdf = {http://www.sciencedirect.com/science/article/pii/S0024379516302439}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n First-order methods for geodesically convex optimization.\n \n \n \n \n\n\n \n Zhang, H.; and Sra, S.\n\n\n \n\n\n\n In Conference on Learning Theory (COLT), Jun. 2016. \n \n\n\n\n
\n\n\n\n \n \n \"First-order pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{zhangSra16a,\n author = {Hongyi Zhang and  Suvrit Sra},\n title = {First-order methods for geodesically convex optimization},\n month     =    {Jun.},\n year = {2016},\n booktitle={Conference on Learning Theory (COLT)},\n url_pdf = {http://proceedings.mlr.press/v49/zhang16b.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient sampling for k-determinantal point processes.\n \n \n \n \n\n\n \n Li, C.; Jegelka, S.; and Sra, S.\n\n\n \n\n\n\n In Artificial Intelligence and Statistics (AISTATS), 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Efficient pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{liJeSr16a,\n  author =       {Chengtao Li and Stefanie Jegelka and Suvrit Sra},\n  title =        {Efficient sampling for k-determinantal point processes},\n  booktitle =    {Artificial Intelligence and Statistics (AISTATS)},\n  year =         {2016},\n  url_pdf =      {http://www.jmlr.org/proceedings/papers/v51/li16f.pdf}  \n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n AdaDelay: Delay sensitive distributed stochastic convex optimization.\n \n \n \n \n\n\n \n Sra, S.; Yu, A. W.; Li, M.; and Smola, A. J.\n\n\n \n\n\n\n In Artificial Intelligence and Statistics (AISTATS), 2016. \n \n\n\n\n
\n\n\n\n \n \n \"AdaDelay: pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{adams16,\n  author =       {Suvrit Sra and Adams Wei Yu and Mu Li and Alexander J. Smola},\n  title =        {{AdaDelay: Delay sensitive distributed stochastic convex optimization}},\n  booktitle =    {Artificial Intelligence and Statistics (AISTATS)},\n  year =         {2016},\n  url_pdf =      {http://proceedings.mlr.press/v51/sra16.html}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On inequalities for normalized Schur functions.\n \n \n \n \n\n\n \n Sra, S.\n\n\n \n\n\n\n European J. Combinatorics, Volume 51: 492-–494. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"On pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{sra15a,\nauthor = {Suvrit Sra},\ntitle = {{On inequalities for normalized Schur functions}},\njournal = {European J. Combinatorics},\nvolume = {Volume 51},\nyear = {2016},\npages = {492-–494},\nmon  = jan,\nurl_pdf = {http://www.sciencedirect.com/science/article/pii/S0195669815001717}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Deep Learning without Poor Local Minima.\n \n \n \n\n\n \n Kawaguchi, K.\n\n\n \n\n\n\n In Advances in Neural Information Processing Systems (NIPS), 2016. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{kawaguchi2016deep,\ntitle={Deep Learning without Poor Local Minima},\nauthor={Kawaguchi, Kenji},\nbooktitle={Advances in Neural Information Processing Systems (NIPS)},\nyear={2016}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Bounded Optimal Exploration in MDP.\n \n \n \n\n\n \n Kawaguchi, K.\n\n\n \n\n\n\n In Proceedings of the 30th AAAI Conference on Artificial Intelligence (AAAI), pages 1758–1764, 2016. AAAI Press\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{kawaguchiAAAI2016,\ntitle={Bounded Optimal Exploration in {MDP}},\nauthor={Kenji Kawaguchi},\nbooktitle={Proceedings of the 30th AAAI Conference on Artificial Intelligence (AAAI)},\npages = {1758--1764},\nyear={2016},\norganization={AAAI Press}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Global Continuous Optimization with Error Bound and Fast Convergence.\n \n \n \n\n\n \n Kawaguchi, K.; Maruyama, Y.; and Zheng, X.\n\n\n \n\n\n\n Journal of Artificial Intelligence Research, 56: 153–195. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{kawaguchi2016global,\ntitle={Global Continuous Optimization with Error Bound and Fast Convergence},\nauthor={Kawaguchi, Kenji and Maruyama, Yu and Zheng, Xiaoyu},\njournal={Journal of Artificial Intelligence Research},\nvolume={56},\npages={153--195},\nyear={2016}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A tutorial on online supervised learning with applications to node classification in social networks.\n \n \n \n\n\n \n Rakhlin, A.; and Sridharan, K.\n\n\n \n\n\n\n arXiv preprint arXiv:1608.09014. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{rakhlin2016tutorial,\n  title={A tutorial on online supervised learning with applications to node classification in social networks},\n  author={Rakhlin, Alexander and Sridharan, Karthik},\n  journal={arXiv preprint arXiv:1608.09014},\n  year={2016}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Information-theoretic analysis of stability and bias of learning algorithms.\n \n \n \n\n\n \n Raginsky, M.; Rakhlin, A.; Tsao, M.; Wu, Y.; and Xu, A.\n\n\n \n\n\n\n In Information Theory Workshop (ITW), 2016 IEEE, 2016. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{raginsky2016information,\n  title={Information-theoretic analysis of stability and bias of learning algorithms},\n  author={Raginsky, Maxim and Rakhlin, Alexander and Tsao, Matthew and Wu, Yihong and Xu, Aolin},\n  booktitle={Information Theory Workshop (ITW), 2016 IEEE},\n  year={2016},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n BISTRO: An Efficient Relaxation-Based Method for Contextual Bandits.\n \n \n \n\n\n \n Rakhlin, A.; and Sridharan, K.\n\n\n \n\n\n\n In ICML, pages 1977–1985, 2016. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{rakhlin2016bistro,\n  title={BISTRO: An Efficient Relaxation-Based Method for Contextual Bandits},\n  author={Rakhlin, Alexander and Sridharan, Karthik},\n  booktitle={ICML},\n  pages={1977--1985},\n  year={2016}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Geometric inference for general high-dimensional linear inverse problems.\n \n \n \n\n\n \n Cai, T T.; Liang, T.; Rakhlin, A.; and others\n\n\n \n\n\n\n The Annals of Statistics, 44(4): 1536–1563. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{cai2016geometric,\n  title={Geometric inference for general high-dimensional linear inverse problems},\n  author={Cai, T Tony and Liang, Tengyuan and Rakhlin, Alexander and others},\n  journal={The Annals of Statistics},\n  volume={44},\n  number={4},\n  pages={1536--1563},\n  year={2016},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Inference via message passing on partially labeled stochastic block models.\n \n \n \n\n\n \n Cai, T T.; Liang, T.; and Rakhlin, A.\n\n\n \n\n\n\n arXiv preprint arXiv:1603.06923. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{cai2016inference,\n  title={Inference via message passing on partially labeled stochastic block models},\n  author={Cai, T Tony and Liang, Tengyuan and Rakhlin, Alexander},\n  journal={arXiv preprint arXiv:1603.06923},\n  year={2016}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (14)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n From random walks to distances on unweighted graphs.\n \n \n \n \n\n\n \n Hashimoto, T.; Sun, Y.; and Jaakkola, T.\n\n\n \n\n\n\n In Advances in Neural Information Processing Systems (NIPS), 2015. \n \n\n\n\n
\n\n\n\n \n \n \"From pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Hasetal-nips2015,\nauthor = {T. Hashimoto and Y. Sun and T. Jaakkola},\ntitle = {From random walks to distances on unweighted graphs},\nbooktitle = {Advances in Neural Information Processing Systems (NIPS)},\nyear = {2015},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/Hashimoto_etal_nips2015.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Principal Differences Analysis: Interpretable Characterization of Differences between Distributions.\n \n \n \n \n\n\n \n Mueller, J.; and Jaakkola, T.\n\n\n \n\n\n\n In Advances in Neural Information Processing Systems (NIPS), 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Principal pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Mueetal-nips2015,\nauthor = {J. Mueller and T. Jaakkola},\ntitle = {Principal Differences Analysis: Interpretable Characterization of Differences between Distributions},\nbooktitle = {Advances in Neural Information Processing Systems (NIPS)},\nyear = {2015},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/Mueller_etal_nips2015.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Molding CNNs for Text: Non-linear, Non-consecutive Convolutions.\n \n \n \n \n\n\n \n Lei, T.; Barzilay, R.; and Jaakkola, T.\n\n\n \n\n\n\n In Empirical Methods in Natural Language Processing, 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Molding pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Leietal-EMNLP2015,\nauthor = {T. Lei and R. Barzilay and T. Jaakkola},\ntitle = {Molding {CNN}s for Text: Non-linear, Non-consecutive Convolutions},\nbooktitle = {Empirical Methods in Natural Language Processing},\nyear = {2015},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/Leietal_EMNLP15.pdf},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Unsupervised Method for Uncovering Morphological Chains.\n \n \n \n \n\n\n \n Narasimhan, K.; Barzilay, R.; and Jaakkola, T.\n\n\n \n\n\n\n Transactions of the Association for Computational Linguistics, 3: 157–167. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"An pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{TACL458,\nauthor = {K. Narasimhan and R. Barzilay and T. Jaakkola},\ntitle = {An Unsupervised Method for Uncovering Morphological Chains},\njournal = {Transactions of the Association for Computational Linguistics},\nvolume = {3},\nyear = {2015},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/Karthik-TACL-458-1565-1-PB.pdf},\npages = {157--167}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Metric recovery from directed unweighted graphs.\n \n \n \n \n\n\n \n Hashimoto, T.; Sun, Y.; and Jaakkola, T.\n\n\n \n\n\n\n In Artificial Intelligence and Statistics, 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Metric pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{tatsu_aistats15,\nauthor = {T. Hashimoto and Y. Sun and T. Jaakkola},\ntitle = {Metric recovery from directed unweighted graphs},\nyear = {2015},\nbooktitle = {Artificial Intelligence and Statistics},\nurl_pdf = {https://people.csail.mit.edu/tommi/papers/Tatsu_aistats15.pdf},\n}\n\n\n% STEFANIE -- last updated: 8th Oct 2017\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Matrix Manifold Optimization for Gaussian Mixtures.\n \n \n \n \n\n\n \n Hosseini, R.; and Sra, S.\n\n\n \n\n\n\n In Advances in Neural Information Processing Systems (NIPS), December 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Matrix pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{hoSra.nips,\n  author =       {Reshad Hosseini and Suvrit Sra},\n  title =        {{Matrix Manifold Optimization for Gaussian Mixtures}},\n  booktitle =    nips,\n  year =         2015,\n  month =        dec,\n  url_pdf =      {https://papers.nips.cc/paper/5812-matrix-manifold-optimization-for-gaussian-mixtures.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On Variance Reduction in Stochastic Gradient Descent and its Asynchronous Variants.\n \n \n \n \n\n\n \n Reddi, S.; Hefny, A.; Sra, S.; Poczos, B.; and Smola, A.\n\n\n \n\n\n\n In Advances in Neural Information Processing Systems (NIPS), December 2015. \n \n\n\n\n
\n\n\n\n \n \n \"On pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{reddy,\n  author =       {Sashank Reddi and Ahmed Hefny and Suvrit Sra and Barnabas Poczos and Alexander Smola},\n  title =        {{On Variance Reduction in Stochastic Gradient Descent and its Asynchronous Variants}},\n  booktitle =    nips,\n  year =         2015,\n  month =        dec,\n  url_pdf =      {https://papers.nips.cc/paper/5821-on-variance-reduction-in-stochastic-gradient-descent-and-its-asynchronous-variants.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Complete strong superadditivity of generalized matrix functions.\n \n \n \n \n\n\n \n Lin, M.; and Sra, S.\n\n\n \n\n\n\n Mathematical Notes. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"Complete pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{linSra14,\nauthor = {Minghua Lin and Suvrit Sra},\ntitle = {{Complete strong superadditivity of generalized matrix functions}},\njournal = {Mathematical Notes},\nyear = {2015},\nurl_pdf = {https://arxiv.org/abs/1410.1958}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hlawka-Popoviciu inequalities on positive definite tensors.\n \n \n \n \n\n\n \n Berndt, W.; and Sra, S.\n\n\n \n\n\n\n Linear Algebra and its Applications, 486(1): 317–327. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"Hlawka-Popoviciu pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{berSra15,\nauthor = {Wolfgang Berndt and Suvrit Sra},\ntitle = {{Hlawka-Popoviciu inequalities on positive definite tensors}},\njournal = {Linear Algebra and its Applications},\nvolume = {486},\nnumber = {1},\npages = {317--327},\nyear = {2015},\nurl_pdf = {http://www.sciencedirect.com/science/article/pii/S0024379515004966}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Large-scale randomized-coordinate descent methods with non-separable linear constraints.\n \n \n \n \n\n\n \n Reddi, S.; Hefny, A.; Downey, C.; Dubey, A.; and Sra, S.\n\n\n \n\n\n\n In Uncertainty in Artificial Intelligence (UAI), 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Large-scale pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{reHeSra14,\nauthor = {Sashank Reddi and Ahmed Hefny and Carlton Downey and Avinava Dubey and Suvrit Sra},\ntitle = {{Large-scale randomized-coordinate descent methods with non-separable linear constraints}},\nbooktitle = {Uncertainty in Artificial Intelligence (UAI)},\nmon     = {May},\nyear = {2015},\nurl_pdf = {http://auai.org/uai2015/proceedings/papers/191.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fixed-point algorithms for learning determinantal point processes.\n \n \n \n \n\n\n \n Mariet, Z.; and Sra, S.\n\n\n \n\n\n\n In International Conference on Machine Learning (ICML), 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Fixed-point pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Inproceedings{marSra15,\nauthor = {Zelda Mariet and Suvrit Sra},\ntitle = {{Fixed-point algorithms for learning determinantal point processes}},\nbooktitle = {International Conference on Machine Learning (ICML)},\nmon     = {Jun},\nyear = {2015},\nurl_pdf = {http://proceedings.mlr.press/v37/mariet15.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Conic Geometric Optimization on the Manifold of Positive Definite Matrices.\n \n \n \n \n\n\n \n Sra, S.; and Hosseini, R.\n\n\n \n\n\n\n SIAM J. Optimization (SIOPT), 25(1): 713–739. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"Conic pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{sraHo15,\nauthor = {Suvrit Sra and Reshad Hosseini},\ntitle = {{Conic Geometric Optimization on the Manifold of Positive Definite Matrices}},\nvolume = {25},\nnumber = {1},\npages  = {713--739},\njournal = {SIAM J. Optimization (SIOPT)},\nyear = {2015},\nurl_pdf = {http://epubs.siam.org/doi/abs/10.1137/140978168?af=R}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Statistical inference with the Elliptical Gamma Distribution.\n \n \n \n \n\n\n \n Hosseini, R.; Sra, S.; Theis, L.; and Bethge, M.\n\n\n \n\n\n\n In Artificial Intelligence and Statistics (AISTATS), volume 18, 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Statistical pdf\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{hoSra15,\nauthor = {Reshad Hosseini and Suvrit Sra and L. Theis and M. Bethge},\ntitle = {{Statistical inference with the Elliptical Gamma Distribution}},\nbooktitle = {Artificial Intelligence and Statistics (AISTATS)},\nyear = {2015},\nvolume = 18,\nurl_pdf = {http://www.sciencedirect.com/science/article/pii/S0167947316300251}\n}\n\n\n% KENJI\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Bayesian optimization with exponential convergence.\n \n \n \n\n\n \n Kawaguchi, K.; Kaelbling, L. P.; and Lozano-Pérez, T.\n\n\n \n\n\n\n In Advances in Neural Information Processing Systems (NIPS), pages 2809–2817, 2015. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{kawaguchi2015bayesian,\ntitle={Bayesian optimization with exponential convergence},\nauthor={Kawaguchi, Kenji and Kaelbling, Leslie Pack and Lozano-P{\\'e}rez, Tom{\\'a}s},\nbooktitle={Advances in Neural Information Processing Systems (NIPS)},\npages={2809--2817},\nyear={2015}\n}\n\n\n%%%%%%%%%%%%%%%%%%%%\n%% SASHA\n%%%%%%%%%%%%%%%%%%%%\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);