var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=natisb.github.io%2FPub.bib&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=natisb.github.io%2FPub.bib&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=natisb.github.io%2FPub.bib&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2023\n \n \n (11)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n An Agnostic View on the Cost of Overfitting in (Kernel) Ridge Regression.\n \n \n \n \n\n\n \n Zhou, L.; Simon B., J.; Vardi, G.; and Srebro, N.\n\n\n \n\n\n\n arXiv preprint. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"An paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 15 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2023zhouagnostic,\n  title={An Agnostic View on the Cost of Overfitting in (Kernel) Ridge Regression},\n  author={Zhou, Lijia and Simon B., James and Vardi, Gal and Srebro, Nathan},\n  journal={arXiv preprint},\n  year={2023},\n  url_Paper={https://arxiv.org/pdf/2306.13185.pdf},\n  my_funding = {},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Uniform Convergence with Square-Root Lipschitz Loss.\n \n \n \n \n\n\n \n Zhou, L.; Dai, Z.; Koehler, F.; and Srebro, N.\n\n\n \n\n\n\n arXiv preprint. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Uniform paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2023zhouuniform,\n  title={Uniform Convergence with Square-Root Lipschitz Loss},\n  author={Zhou, Lijia and Dai, Zhen and Koehler, Frederic and Srebro, Nathan},\n  journal={arXiv preprint},\n  year={2023},\n  url_Paper={https://arxiv.org/pdf/2306.13188.pdf},\n  my_funding = {},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Federated Online and Bandit Convex Optimization.\n \n \n \n \n\n\n \n Patel Kshitij, K.; Wang, L.; Saha, A.; and Srebro, N.\n\n\n \n\n\n\n OpenReview.net. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Federated paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2023patelfederated,\n  title={Federated Online and Bandit Convex Optimization},\n  author={Patel Kshitij, Kumar and Wang, Lingxiao and Saha, Aadirupa and Srebro, Nathan},\n  journal={OpenReview.net},\n  year={2023},\n  url_Paper={https://openreview.net/pdf?id=mi7pnouqLa},\n  my_funding = {NSF-BSF, NSF TRIPOD and Simons},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Continual Learning in Linear Classification on Separable Data.\n \n \n \n \n\n\n \n Evron, I.; Moroshko, E.; Buzaglo, G.; Khriesh, M.; Marjieh, B.; Srebro, N.; and Soudry, D.\n\n\n \n\n\n\n arXiv preprint. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Continual paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2023evroncontinual,\n  title={Continual Learning in Linear Classification on Separable Data},\n  author={Evron, Itay and Moroshko, Edward and Buzaglo, Gon and Khriesh, Maroun and Marjieh, Badea and Srebro, Nathan and Soudry, Daniel},\n  journal={arXiv preprint},\n  year={2023},\n  url_Paper={https://arxiv.org/pdf/2306.03534.pdf},\n  my_funding = {},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Most Neural Networks Are Almost Learnable.\n \n \n \n \n\n\n \n Daniely, A.; Srebro, N.; and Vardi, G.\n\n\n \n\n\n\n arXiv preprint. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Most paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2023danielyneural,\n  title={Most Neural Networks Are Almost Learnable},\n  author={Daniely, Amit and Srebro, Nathan and Vardi, Gal},\n  journal={arXiv preprint},\n  year={2023},\n  url_Paper={https://arxiv.org/pdf/2305.16508.pdf},\n  my_funding = {NSF Simons},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Lower bounds for non-convex stochastic optimization.\n \n \n \n \n\n\n \n Arjevani, Y.; Carmon, Y.; Duchi C., J.; Foster J., D.; Srebro, N.; and Woodworth, B.\n\n\n \n\n\n\n Mathematical Programming. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Lower paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2023arjevanilower,\n  title={Lower bounds for non-convex stochastic optimization},\n  author={Arjevani, Yossi and Carmon, Yair and Duchi C., John and Foster J., Dylan and Srebro, Nathan and Woodworth, Blake},\n  journal={Mathematical Programming},\n  year={2023},\n  url_Paper={https://link.springer.com/article/10.1007/s10107-022-01822-7},\n  my_funding = {},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Double-Edged Sword of Implicit Bias: Generalization vs. Robustness in ReLU Networks.\n \n \n \n \n\n\n \n Frei, S.; Vardi, G.; Bartlett L., P.; and Srebro, N.\n\n\n \n\n\n\n arXiv preprint. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2023freidouble,\n  title={The Double-Edged Sword of Implicit Bias: Generalization vs. Robustness in ReLU Networks},\n  author={Frei, Spencer and Vardi, Gal and Bartlett L., Peter and Srebro, Nathan},\n  journal={arXiv preprint},\n  year={2023},\n  url_Paper={https://arxiv.org/pdf/2303.01456.pdf},\n  my_funding = {NSF and Simons},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Benign Overfitting in Linear Classifiers and Leaky ReLU Networks from KKT Conditions for Margin Maximization.\n \n \n \n \n\n\n \n Frei, S.; Vardi, G.; Bartlett L., P.; and Srebro, N.\n\n\n \n\n\n\n arXiv preprint. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Benign paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2023freibenign,\n  title={Benign Overfitting in Linear Classifiers and Leaky ReLU Networks from KKT Conditions for Margin Maximization},\n  author={Frei, Spencer and Vardi, Gal and Bartlett L., Peter and Srebro, Nathan},\n  journal={arXiv preprint},\n  year={2023},\n  url_Paper={https://arxiv.org/pdf/2303.01462.pdf},\n  my_funding = {},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficiently Learning Neural Networks: What Assumptions May Suffice?.\n \n \n \n \n\n\n \n Daniely, A.; Srebro, N.; and Vardi, G.\n\n\n \n\n\n\n arXiv preprint. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Efficiently paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2023danielyefficiently,\n  title={Efficiently Learning Neural Networks: What Assumptions May Suffice?},\n  author={Daniely, Amit and Srebro, Nathan and Vardi, Gal},\n  journal={arXiv preprint},\n  year={2023},\n  url_Paper={https://arxiv.org/pdf/2302.07426.pdf},\n  my_funding = {Simons},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Interpolation Learning With Minimum Description Length.\n \n \n \n \n\n\n \n Manoj Sarayu, N.; and Srebro, N.\n\n\n \n\n\n\n arXiv preprint. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Interpolation paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2023sarayuinterpolation,\n  title={Interpolation Learning With Minimum Description Length},\n  author={Manoj Sarayu, Naren and Srebro, Nathan},\n  journal={arXiv preprint},\n  year={2023},\n  url_Paper={https://arxiv.org/pdf/2302.07263.pdf},\n  my_funding = {NSF TRIPOD and Simons},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse Data Reconstruction, Missing Value and Multiple Imputation through Matrix Factorization.\n \n \n \n \n\n\n \n Sengupta, N.; Udell, M.; Srebro, N.; and Evans, J.\n\n\n \n\n\n\n Sociological Methodology. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Sparse paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2023senguptasparse,\n  title={Sparse Data Reconstruction, Missing Value and Multiple Imputation through Matrix Factorization},\n  author={Sengupta, Nandana and Udell, Madeleine and Srebro, Nathan and Evans, James},\n  journal={Sociological Methodology},\n  year={2023},\n  url_Paper={https://journals.sagepub.com/doi/pdf/10.1177/00811750221125799},\n  my_funding = {},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (16)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Understanding the Eluder Dimension.\n \n \n \n \n\n\n \n Li, G.; Kamath, P.; Foster J, D.; and Srebro, N.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Understanding paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2022liunderstanding,\n  title={Understanding the Eluder Dimension},\n  author={Li, Gene and Kamath, Pritish and Foster J, Dylan and Srebro, Nathan},\n  journal={Advances in Neural Information Processing Systems},\n  year={2022},\n  url_Paper={https://proceedings.neurips.cc/paper_files/paper/2022/file/960cfbb846aff424ac20aadce6fa6530-Paper-Conference.pdf},\n  my_funding = {},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards optimal communication complexity in distributed non-convex optimization.\n \n \n \n \n\n\n \n Patel Kshitij, K.; Wang, L.; Woodworth E, B.; Bullins, B.; and Srebro, N.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Towards paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2022pateloptimization,\n  title={Towards optimal communication complexity in distributed non-convex optimization},\n  author={Patel Kshitij, Kumar and Wang, Lingxiao and Woodworth E, Blake and Bullins, Brian and Srebro, Nathan},\n  journal={Advances in Neural Information Processing Systems},\n  year={2022},\n  url_Paper={https://proceedings.neurips.cc/paper_files/paper/2022/file/56bd21259e28ebdc4d7e1503733bf421-Paper-Conference.pdf},\n  my_funding = {NSF-BSF, NSF TRIPOD, and Simons},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Non-Asymptotic Moreau Envelope Theory for High-Dimensional Generalized Linear Models.\n \n \n \n \n\n\n \n Zhou, L.; Koehler, F.; Sur, P.; Sutherland J, D.; and Srebro, N.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2022zhoumodels,\n  title={A Non-Asymptotic Moreau Envelope Theory for High-Dimensional Generalized Linear Models},\n  author={Zhou, Lijia and Koehler, Frederic and Sur, Pragya and Sutherland J, Danica and Srebro, Nathan},\n  journal={Advances in Neural Information Processing Systems},\n  year={2022},\n  url_Paper={https://proceedings.neurips.cc/paper_files/paper/2022/file/861f7dad098aec1c3560fb7add468d41-Paper-Conference.pdf},\n  my_funding = {},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adversarially Robust Learning: A Generic Minimax Optimal Learner and Characterization.\n \n \n \n \n\n\n \n Montasser, O.; Hanneke, S.; and Srebro, N.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Adversarially paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2022montasseradversarially,\n  title={Adversarially Robust Learning: A Generic Minimax Optimal Learner and Characterization},\n  author={Montasser, Omar and Hanneke, Steve and Srebro, Nathan},\n  journal={Advances in Neural Information Processing Systems},\n  year={2022},\n  url_Paper={https://proceedings.neurips.cc/paper_files/paper/2022/file/f392c6bbb14548df50092f10c9db440f-Paper-Conference.pdf},\n  my_funding = {DARPA and Simons},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pessimism for Offline Linear Contextual Bandits using ℓp Confidence Sets.\n \n \n \n \n\n\n \n Li, G.; Ma, C.; and Srebro, N.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Pessimism paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2022lipessimism,\n  title={Pessimism for Offline Linear Contextual Bandits using ℓp Confidence Sets},\n  author={Li, Gene and Ma, Cong and Srebro, Nathan},\n  journal={Advances in Neural Information Processing Systems},\n  year={2022},\n  url_Paper={https://proceedings.neurips.cc/paper_files/paper/2022/file/8443219a991f068c34d9491ad68ffa94-Paper-Conference.pdf},\n  my_funding = {NSF IDEAL},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Thinking Outside the Ball: Optimal Learning with Gradient Descent for Generalized Linear Stochastic Convex Optimization.\n \n \n \n \n\n\n \n Amir, I.; Livni, R.; and Srebro, N.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Thinking paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 38 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2022amirthinking,\n  title={Thinking Outside the Ball: Optimal Learning with Gradient Descent for Generalized Linear Stochastic Convex Optimization},\n  author={Amir, Idan and Livni, Roi and Srebro, Nathan},\n  journal={Advances in Neural Information Processing Systems},\n  year={2022},\n  url_Paper={https://proceedings.neurips.cc/paper_files/paper/2022/file/9521b6e7f33e039e7d92e23f5e37bbf4-Paper-Conference.pdf},\n  my_funding = {},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The sample complexity of one-hidden-layer neural networks.\n \n \n \n \n\n\n \n Vardi, G.; Shamir, O.; and Srebro, N.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 23 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2022vardicomplexity,\n  title={The sample complexity of one-hidden-layer neural networks},\n  author={Vardi, Gal and Shamir, Ohad and Srebro, Nathan},\n  journal={Advances in Neural Information Processing Systems},\n  year={2022},\n  url_Paper={https://proceedings.neurips.cc/paper_files/paper/2022/file/3baf4eeffad860ca9c54aeab632716b4-Paper-Conference.pdf},\n  my_funding = {NSF-BSF},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exponential family model-based reinforcement learning via score matching.\n \n \n \n \n\n\n \n Li, G.; Li, J.; Kabra, A.; Srebro, N.; Wang, Z.; and Yang, Z.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Exponential paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2022liexponential,\n  title={Exponential family model-based reinforcement learning via score matching},\n  author={Li, Gene and Li, Junbo and Kabra, Anmol and Srebro, Nathan and Wang, Zhaoran and Yang, Zhuoran},\n  journal={Advances in Neural Information Processing Systems},\n  year={2022},\n  url_Paper={https://proceedings.neurips.cc/paper_files/paper/2022/file/b693a240cf1009bff9fa4422141c9392-Paper-Conference.pdf},\n  my_funding = {NSF IDEAL},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On margin maximization in linear and relu networks.\n \n \n \n \n\n\n \n Vardi, G.; Shamir, O.; and Srebro, N.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2022vardimargin,\n  title={On margin maximization in linear and relu networks},\n  author={Vardi, Gal and Shamir, Ohad and Srebro, Nathan},\n  journal={Advances in Neural Information Processing Systems},\n  year={2022},\n  url_Paper={https://proceedings.neurips.cc/paper_files/paper/2022/file/f062da1973ac9ac61fc6d44dd7fa309f-Paper-Conference.pdf},\n  my_funding = {ERC grant},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed online and bandit convex optimization.\n \n \n \n \n\n\n \n Patel Kshitij, K.; Saha, A.; Wang, L.; and Srebro, N.\n\n\n \n\n\n\n OpenReview.net. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Distributed paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2022pateldistributed,\n  title={Distributed online and bandit convex optimization},\n  author={Patel Kshitij, Kumar and Saha, Aadirupa and Wang, Lingxiao and Srebro, Nathan},\n  journal={OpenReview.net},\n  year={2022},\n  url_Paper={https://openreview.net/pdf?id=KKfjOEvDwQ},\n  my_funding = {NSF-BSF, NSF IDEAL and Simons},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Implicit bias in leaky relu networks trained on high-dimensional data.\n \n \n \n \n\n\n \n Frei, S.; Vardi, G.; Barlett L, P.; Srebro, N.; and Hu, W.\n\n\n \n\n\n\n arXiv preprint. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Implicit paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2022freiimplicit,\n  title={Implicit bias in leaky relu networks trained on high-dimensional data},\n  author={Frei, Spencer and Vardi, Gal and Barlett L, Peter and Srebro, Nathan and Hu, Wei},\n  journal={arXiv preprint},\n  year={2022},\n  url_Paper={https://arxiv.org/pdf/2210.07082.pdf},\n  my_funding = {},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Implicit bias of the step size in linear diagonal neural networks.\n \n \n \n \n\n\n \n Nacson Shpigel, M.; Ravichandran, K.; Srebro, N.; and Soudry, D.\n\n\n \n\n\n\n Proceedings of Machine Learning Research. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Implicit paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2022nacsonimplicit,\n  title={Implicit bias of the step size in linear diagonal neural networks},\n  author={Nacson Shpigel, Mor and Ravichandran, Kavya and Srebro, Nathan and Soudry, Daniel},\n  journal={Proceedings of Machine Learning Research},\n  year={2022},\n  url_Paper={https://proceedings.mlr.press/v162/nacson22a/nacson22a.pdf},\n  my_funding = {NSF IIS award and Simons},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How catastrophic can catastrophic forgetting be in linear regression?.\n \n \n \n \n\n\n \n Evron, I.; Moroshko, E.; Ward, R.; Srebro, N.; and Soudry, D.\n\n\n \n\n\n\n Proceedings of Machine Learning Research. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"How paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2022itayregression,\n  title={How catastrophic can catastrophic forgetting be in linear regression?},\n  author={Evron, Itay and Moroshko, Edward and Ward, Rachel and Srebro, Nathan and Soudry, Daniel},\n  journal={Proceedings of Machine Learning Research},\n  year={2022},\n  url_Paper={https://proceedings.mlr.press/v178/evron22a/evron22a.pdf},\n  my_funding = {NSF 103 and Simons},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transductive Robust Learning Guarantees.\n \n \n \n \n\n\n \n Montasser, O.; Hanneke, S.; and Srebro, N.\n\n\n \n\n\n\n Proceedings of Machine Learning Research. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Transductive paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2022montassertransductive,\n  title={Transductive Robust Learning Guarantees},\n  author={Montasser, Omar and Hanneke, Steve and Srebro, Nathan},\n  journal={Proceedings of Machine Learning Research},\n  year={2022},\n  url_Paper={https://proceedings.mlr.press/v151/montasser22a/montasser22a.pdf},\n  my_funding = {DARPA},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Thinking Outside the Ball: Optimal Learning with Gradient Descent for Generalized Linear Stochastic Convex Optimization.\n \n \n \n \n\n\n \n Amir, I.; Livni, R.; and Srebro, N.\n\n\n \n\n\n\n arXiv preprint. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Thinking paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 38 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021amirthinking,\n  title={Thinking Outside the Ball: Optimal Learning with Gradient Descent for Generalized Linear Stochastic Convex Optimization},\n  author={Amir, Idan and Livni, Roi and Srebro, Nathan},\n  journal={arXiv preprint},\n  year={2022},\n  url_Paper={https://arxiv.org/pdf/2202.13328.pdf},\n  my_funding = {NSF medium},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Sample Complexity of One-Hidden-Layer Neural Networks.\n \n \n \n \n\n\n \n Vardi, G.; Shamir, O.; and Srebro, N.\n\n\n \n\n\n\n arXiv preprint. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 23 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021vardicomplexity,\n  title={The Sample Complexity of One-Hidden-Layer Neural Networks},\n  author={Vardi, Gal and Shamir, Ohad and Srebro, Nathan},\n  journal={arXiv preprint},\n  year={2022},\n  url_Paper={https://arxiv.org/pdf/2202.06233.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (20)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Exponential Family Model-Based Reinforcement Learning via Score Matching.\n \n \n \n \n\n\n \n Li, G.; Li, J.; Srebro, N.; Wang, Z.; and Yang, Z.\n\n\n \n\n\n\n arXiv preprint. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Exponential paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021liexponential,\n  title={Exponential Family Model-Based Reinforcement Learning via Score Matching},\n  author={Li, Gene and Li, Junbo and Srebro, Nathan and Wang, Zhaoran and Yang, Zhuoran},\n  journal={arXiv preprint},\n  year={2021},\n  url_Paper={https://arxiv.org/pdf/2112.14195.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimistic Rates: A Unifying Theory for Interpolation Learning and Regularization in Linear Regression.\n \n \n \n \n\n\n \n Zhou, L.; Koehler, F.; Sutherland J, D.; and Srebro, N.\n\n\n \n\n\n\n arXiv preprint. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Optimistic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021zhouoptimistic,\n  title={Optimistic Rates: A Unifying Theory for Interpolation Learning and Regularization in Linear Regression},\n  author={Zhou, Lijia and Koehler, Frederic and Sutherland J, Danica and Srebro, Nathan},\n  journal={arXiv preprint},\n  year={2021},\n  url_Paper={https://arxiv.org/pdf/2112.04470.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Representation Costs of Linear Neural Networks: Analysis and Design.\n \n \n \n \n\n\n \n Dai, Z.; Karzand, M.; and Srebro, N.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Representation paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021dairepresentation,\n  title={Representation Costs of Linear Neural Networks: Analysis and Design},\n  author={Dai, Zhen and Karzand, Mina and Srebro, Nathan},\n  journal={Advances in Neural Information Processing Systems},\n  year={2021},\n  url_Paper={https://proceedings.neurips.cc/paper/2021/file/e22cb9d6bbb4c290a94e4fff4d68a831-Paper.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Stochastic Newton Algorithm for Distributed Convex Optimization.\n \n \n \n \n\n\n \n Bullins, B.; Patel, K.; Shamir, O.; Srebro, N.; and Woodworth E, B.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021bullinsstochastic,\n  title={A Stochastic Newton Algorithm for Distributed Convex Optimization},\n  author={Bullins, Brian and Patel, Kshitij and Shamir, Ohad and Srebro, Nathan and Woodworth E, Blake},\n  journal={Advances in Neural Information Processing Systems},\n  year={2021},\n  url_Paper={https://proceedings.neurips.cc/paper/2021/file/e17a5a399de92e1d01a56c50afb2a68e-Paper.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the Power of Differentiable Learning versus PAC and SQ Learning.\n \n \n \n \n\n\n \n Abbe, E.; Kamath, P.; Malach, E.; Sandon, C.; and Srebro, N.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 9 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021abbedifferentiable,\n  title={On the Power of Differentiable Learning versus PAC and SQ Learning},\n  author={Abbe, Emmanuel and Kamath, Pritish and Malach, Eran and Sandon, Colin and Srebro, Nathan},\n  journal={Advances in Neural Information Processing Systems},\n  year={2021},\n  url_Paper={https://proceedings.neurips.cc/paper/2021/file/cc225865b743ecc91c4743259813f604-Paper.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Uniform Convergence of Interpolators: Gaussian Width, Norm Bounds and Benign Overfitting.\n \n \n \n \n\n\n \n Koehler, F.; Zhou, L.; Sutherland J, D.; and Srebro, N.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Uniform paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021koehleruniform,\n  title={Uniform Convergence of Interpolators: Gaussian Width, Norm Bounds and Benign Overfitting},\n  author={Koehler, Frederic and Zhou, Lijia and Sutherland J, Danica and Srebro, Nathan},\n  journal={Advances in Neural Information Processing Systems},\n  year={2021},\n  url_Paper={https://proceedings.neurips.cc/paper/2021/file/ac9815bef801f58de83804bce86984ad-Paper.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Even More Optimal Stochastic Optimization Algorithm: Minibatching and Interpolation Learning.\n \n \n \n \n\n\n \n Woodworth E, B.; and Srebro, N.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"An paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021woodworthoptimal,\n  title={An Even More Optimal Stochastic Optimization Algorithm: Minibatching and Interpolation Learning},\n  author={Woodworth E, Blake and Srebro, Nathan},\n  journal={Advances in Neural Information Processing Systems},\n  year={2021},\n  url_Paper={https://proceedings.neurips.cc/paper/2021/file/3c63ec7be1b6c49e6c308397023fd8cd-Paper.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transductive Robust Learning Guarantees.\n \n \n \n \n\n\n \n Montasser, O.; Hanneke, S.; and Srebro, N.\n\n\n \n\n\n\n arXiv preprint. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Transductive paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021montassertransductive,\n  title={Transductive Robust Learning Guarantees},\n  author={Montasser, Omar and Hanneke, Steve and Srebro, Nathan},\n  journal={arXiv preprint},\n  year={2021},\n  url_Paper={https://arxiv.org/pdf/2110.10602.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On Margin Maximization in Linear and ReLU Networks.\n \n \n \n \n\n\n \n Vardi, G.; Shamir, O.; and Srebro, N.\n\n\n \n\n\n\n arXiv preprint. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021vardimargin,\n  title={On Margin Maximization in Linear and ReLU Networks},\n  author={Vardi, Gal and Shamir, Ohad and Srebro, Nathan},\n  journal={arXiv preprint},\n  year={2021},\n  url_Paper={https://arxiv.org/pdf/2110.02732.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adversarially robust learning with unknown perturbation sets.\n \n \n \n \n\n\n \n Montasser, O.; Hanneke, S.; and Srebro, N.\n\n\n \n\n\n\n Conference on Learning Theory. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Adversarially paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021montasseradversarially,\n  title={Adversarially robust learning with unknown perturbation sets},\n  author={Montasser, Omar and Hanneke, Steve and Srebro, Nathan},\n  journal={Conference on Learning Theory},\n  year={2021},\n  url_Paper={http://proceedings.mlr.press/v134/montasser21a/montasser21a.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The min-max complexity of distributed stochastic convex optimization with intermittent communication.\n \n \n \n \n\n\n \n Woodworth E, B.; Bullins, B.; Shamir, O.; and Srebro, N.\n\n\n \n\n\n\n Conference on Learning Theory. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021woodworthcomplexity,\n  title={The min-max complexity of distributed stochastic convex optimization with intermittent communication},\n  author={Woodworth E, Blake and Bullins, Brian and Shamir, Ohad and Srebro, Nathan},\n  journal={Conference on Learning Theory},\n  year={2021},\n  url_Paper={http://proceedings.mlr.press/v134/woodworth21a/woodworth21a.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast margin maximization via dual acceleration.\n \n \n \n \n\n\n \n Ji, Z.; Srebro, N.; and Telgarsky, M.\n\n\n \n\n\n\n International Conference on Machine Learning. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Fast paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021jimaximization,\n  title={Fast margin maximization via dual acceleration},\n  author={Ji, Ziwei and Srebro, Nathan and Telgarsky, Matus},\n  journal={International Conference on Machine Learning},\n  year={2021},\n  url_Paper={http://proceedings.mlr.press/v139/ji21a/ji21a.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Quantifying the benefit of using differentiable learning over tangent kernels.\n \n \n \n \n\n\n \n Malach, E.; Kamath, P.; Abbe, E.; and Srebro, N.\n\n\n \n\n\n\n International Conference on Machine Learning. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Quantifying paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 11 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021malachquantifying,\n  title={Quantifying the benefit of using differentiable learning over tangent kernels},\n  author={Malach, Eran and Kamath, Pritish and Abbe, Emmanuel and Srebro, Nathan},\n  journal={International Conference on Machine Learning},\n  year={2021},\n  url_Paper={http://proceedings.mlr.press/v139/malach21a/malach21a.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the implicit bias of initialization shape: Beyond infinitesimal mirror descent.\n \n \n \n \n\n\n \n Azulay, S.; Bartlett, P.; Mianjy, P.; and Srebro, N.\n\n\n \n\n\n\n International Conference on Machine Learning. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021azulayimplicit,\n  title={On the implicit bias of initialization shape: Beyond infinitesimal mirror descent},\n  author={Azulay, Shahar and Bartlett, Peter and Mianjy, Poorya and Srebro, Nathan},\n  journal={International Conference on Machine Learning},\n  year={2021},\n  url_Paper={http://proceedings.mlr.press/v139/azulay21a/azulay21a.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dropout: Explicit forms and capacity control.\n \n \n \n \n\n\n \n Arora, R.; Bartlett, P.; Mianjy, P.; and Srebro, N.\n\n\n \n\n\n\n International Conference on Machine Learning. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Dropout: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021aroradropout,\n  title={Dropout: Explicit forms and capacity control},\n  author={Arora, Raman and Bartlett, Peter and Mianjy, Poorya and Srebro, Nathan},\n  journal={International Conference on Machine Learning},\n  year={2021},\n  url_Paper={http://proceedings.mlr.press/v139/arora21a/arora21a.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Eluder dimension and generalized rank.\n \n \n \n \n\n\n \n Li, G.; Kamath, P.; Foster J, D.; and Srebro, N.\n\n\n \n\n\n\n arXiv preprint. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Eluder paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021lieluder,\n  title={Eluder dimension and generalized rank},\n  author={Li, Gene and Kamath, Pritish and Foster J, Dylan and Srebro, Nathan},\n  journal={arXiv preprint},\n  year={2021},\n  url_Paper={https://arxiv.org/pdf/2104.06970.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Mirrorless mirror descent: A natural derivation of mirror descent.\n \n \n \n \n\n\n \n Gunasekar, S.; Woodworth, B.; and Srebro, N.\n\n\n \n\n\n\n International Conference on Artificial Intelligence and Statistics. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Mirrorless paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021gunasekarmirrorless,\n  title={Mirrorless mirror descent: A natural derivation of mirror descent},\n  author={Gunasekar, Suriya and Woodworth, Blake and Srebro, Nathan},\n  journal={International Conference on Artificial Intelligence and Statistics},\n  year={2021},\n  url_Paper={http://proceedings.mlr.press/v130/gunasekar21a/gunasekar21a.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Does invariant risk minimization capture invariance?.\n \n \n \n \n\n\n \n Kamath, P.; Tangella, A.; Sutherland, D.; and Srebro, N.\n\n\n \n\n\n\n International Conference on Artificial Intelligence and Statistics. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Does paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021kamathinvariant,\n  title={Does invariant risk minimization capture invariance?},\n  author={Kamath, Pritish and Tangella, Akilesh and Sutherland, Danica and Srebro, Nathan},\n  journal={International Conference on Artificial Intelligence and Statistics},\n  year={2021},\n  url_Paper={http://proceedings.mlr.press/v130/kamath21a/kamath21a.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Simple surveys: Response retrieval inspired by recommendation systems.\n \n \n \n \n\n\n \n Sengupta, N.; Evans, J.; and Srebro, N.\n\n\n \n\n\n\n Social Science Computer Review. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Simple paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021senguptasimple,\n  title={Simple surveys: Response retrieval inspired by recommendation systems},\n  author={Sengupta, Nandana and Evans, James and Srebro, Nathan},\n  journal={Social Science Computer Review},\n  year={2021},\n  url_Paper={https://journals.sagepub.com/doi/pdf/10.1177/0894439319848374}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An accelerated communication-efficient primal-dual optimization framework for structured machine learning.\n \n \n \n \n\n\n \n Ma, C.; Jaggi, M.; Curtis E, F.; Takác, M.; and Srebro, N.\n\n\n \n\n\n\n Optimization Methods and Software. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"An paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2021macommunicationefficient,\n  title={An accelerated communication-efficient primal-dual optimization framework for structured machine learning},\n  author={Ma, Chenxin and Jaggi, Martin and Curtis E, Frank and Takác, Martin and Srebro, Nathan},\n  journal={Optimization Methods and Software},\n  year={2021},\n  url_Paper={https://www.tandfonline.com/doi/pdf/10.1080/10556788.2019.1650361}\n} \n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (16)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Efficiently learning adversarially robust halfspaces with noise.\n \n \n \n \n\n\n \n Montasser, O.; Goel, S.; Diakonikolas, I.; and Srebro, N.\n\n\n \n\n\n\n PMLR. 2020.\n \n\nSet to appear in the International Conference in Machine Learning (ICML) 2020\n\n
\n\n\n\n \n \n \"Efficiently paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2020montasserefficiently,\n  title={Efficiently learning adversarially robust halfspaces with noise},\n  author={Montasser, Omar and Goel, Surbhi and Diakonikolas, Ilias and Srebro, Nathan},\n  journal={PMLR},\n  year={2020},\n  bibbase_note={Set to appear in the International Conference in Machine Learning (ICML) 2020},\n  url_Paper={http://proceedings.mlr.press/v119/montasser20a/montasser20a.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fair learning with private demographic data.\n \n \n \n \n\n\n \n Mozannar, H.; Ohannessian, M.; and Srebro, N.\n\n\n \n\n\n\n PMLR. 2020.\n \n\nSet to appear in the International Conference in Machine Learning (ICML) 2020\n\n
\n\n\n\n \n \n \"Fair paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2020mozannarfair,\n  title={Fair learning with private demographic data},\n  author={Mozannar, Hussein and Ohannessian, Mesrob and Srebro, Nathan},\n  journal={PMLR},\n  year={2020},\n  bibbase_note={Set to appear in the International Conference in Machine Learning (ICML) 2020},\n  url_Paper={http://proceedings.mlr.press/v119/mozannar20a/mozannar20a.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Is local SGD better than minibatch SGD?.\n \n \n \n \n\n\n \n Woodworth, B.; Patel Kshitij, K.; Stich, S.; Dai, Z.; Bullins, B.; Mcmahan, B.; Shamir, O.; and Srebro, N.\n\n\n \n\n\n\n PMLR. 2020.\n \n\nSet to appear in the International Conference in Machine Learning (ICML) 2020\n\n
\n\n\n\n \n \n \"Is paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2020woodworthsgd,\n  title={Is local SGD better than minibatch SGD?},\n  author={Woodworth, Blake and Patel Kshitij, Kumar and Stich, Sebastian and Dai, Zhen and Bullins, Brian and Mcmahan, Brendan and Shamir, Ohad and Srebro, Nathan},\n  journal={PMLR},\n  year={2020},\n  bibbase_note={Set to appear in the International Conference in Machine Learning (ICML) 2020},\n  url_Paper={http://proceedings.mlr.press/v119/woodworth20a/woodworth20a.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Approximate is good enough: Probabilistic variants of dimensional and margin complexity.\n \n \n \n \n\n\n \n Kamath, P.; Montasser, O.; and Srebro, N.\n\n\n \n\n\n\n PMLR. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"Approximate paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2020kamathapproximate,\n  title={Approximate is good enough: Probabilistic variants of dimensional and margin complexity},\n  author={Kamath, Pritish and Montasser, Omar and Srebro, Nathan},\n  journal={PMLR},\n  year={2020},\n  url_Paper={http://proceedings.mlr.press/v125/kamath20b/kamath20b.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Kernel and rich regimes in overparametrized models.\n \n \n \n \n\n\n \n Woodworth, B.; Gunasekar, S.; Lee D, J.; Moroshko, E.; Savarese, P.; Golan, I.; Soudry, D.; and Srebro, N.\n\n\n \n\n\n\n PMLR. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"Kernel paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2020woodworthkernel,\n  title={Kernel and rich regimes in overparametrized models},\n  author={Woodworth, Blake and Gunasekar, Suriya and Lee D, Jason and Moroshko, Edward and Savarese, Pedro and Golan, Itay and Soudry, Daniel and Srebro, Nathan},\n  journal={PMLR},\n  year={2020},\n  url_Paper={http://proceedings.mlr.press/v125/woodworth20a/woodworth20a.pdf}\n  }\n  \n  
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Predictive Value Generalization Bounds.\n \n \n \n \n\n\n \n Vemuri, K.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"Predictive paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2020vemuripredictive,\n  title={Predictive Value Generalization Bounds},\n  author={Vemuri, Keshav and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2020},\n  url_Paper={https://arxiv.org/pdf/2007.05073.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Guaranteed validity for empirical approaches to adaptive data analysis.\n \n \n \n \n\n\n \n Rogers, R.; Roth, A.; Smith, A.; Srebro, N.; Thakkar, O.; and Woodworth, B.\n\n\n \n\n\n\n . 2020.\n \n\n\n\n
\n\n\n\n \n \n \"Guaranteed paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2020rogersguaranteed,\n  title={Guaranteed validity for empirical approaches to adaptive data analysis},\n  author={Rogers, Ryan and Roth, Aaron and Smith, Adam and Srebro, Nathan and Thakkar, Om and Woodworth, Blake},\n  year={2020},\n  joiurnal={International Conference on Artificial Intelligence and Statistics (AISTATS)},\n  url_Paper={https://arxiv.org/pdf/1906.09231.pdf}\n\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A tight convergence analysis for stochastic gradient descent with delayed updates.\n \n \n \n \n\n\n \n Arjevani, Y.; Shamir, O.; and Srebro, N.\n\n\n \n\n\n\n Conference on Algorithmic Learning Theory (ALT). 2020.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2020arjevaniconvergence,\n  title={A tight convergence analysis for stochastic gradient descent with delayed updates},\n  author={Arjevani, Yossi and Shamir, Ohad and Srebro, Nathan},\n  year={2020},\n  journal={Conference on Algorithmic Learning Theory (ALT)},\n  url_Paper={http://proceedings.mlr.press/v117/arjevani20a/arjevani20a.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reducing adversarially robust learning to non-robust pac learning.\n \n \n \n \n\n\n \n Montasser, O.; Hanneke, S.; and Srebro, N.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"Reducing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2020montasserreducing,\n  title={Reducing adversarially robust learning to non-robust pac learning},\n  author={Montasser, Omar and Hanneke, Steve and Srebro, Nathan},\n  journal={Advances in Neural Information Processing Systems},\n  year={2020},\n  url_Paper={https://proceedings.neurips.cc/paper/2020/file/a822554e5403b1d370db84cfbc530503-Paper.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Implicit bias in deep linear classification: Initialization scale vs training accuracy.\n \n \n \n \n\n\n \n Moroshko, E.; Woodworth E, B.; Gunasekar, S.; Lee D, J.; and Srebro, N.\n\n\n \n\n\n\n Advances in neural information processing systems. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"Implicit paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2020moroshkoimplicit,\n  title={Implicit bias in deep linear classification: Initialization scale vs training accuracy},\n  author={Moroshko, Edward and Woodworth E, Blake and Gunasekar, Suriya and Lee D, Jason and Srebro, Nathan},\n  journal={Advances in neural information processing systems},\n  year={2020},\n  url_Paper={https://proceedings.neurips.cc/paper/2020/file/fc2022c89b61c76bbef978f1370660bf-Paper.pdff}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On uniform convergence and low-norm interpolation learning.\n \n \n \n \n\n\n \n Zhou, L.; Sutherland J, D.; and Srebro, N.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2020zhouconvergence,\n  title={On uniform convergence and low-norm interpolation learning},\n  author={Zhou, Lijia and Sutherland J, Danica and Srebro, Nathan},\n  journal={Advances in Neural Information Processing Systems},\n  year={2020},\n  url_Paper={https://proceedings.neurips.cc/paper/2020/file/4cc5400e63624c44fadeda99f57588a6-Paper.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Minibatch vs local sgd for heterogeneous distributed learning.\n \n \n \n \n\n\n \n Woodworth, B.; Patel Kshitij, K.; and Srebro, N.\n\n\n \n\n\n\n Advances in Neural Information Processing Systems. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"Minibatch paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2020woodworthminibatch,\n  title={Minibatch vs local sgd for heterogeneous distributed learning},\n  author={Woodworth, Blake and Patel Kshitij, Kumar and Srebro, Nathan},\n  journal={Advances in Neural Information Processing Systems},\n  year={2020},\n  url_Paper={https://proceedings.neurips.cc/paper/2020/file/45713f6ff2041d3fdfae927b82488db8-Paper.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dropout: Explicit Forms and Capacity Control.\n \n \n \n \n\n\n \n Arora, R.; Bartlett, P.; Mianjy, P.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"Dropout: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2020aroradropout,\n  title={Dropout: Explicit Forms and Capacity Control},\n  author={Arora, Raman and Bartlett, Peter and Mianjy, Poorya and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2020},\n  url_Paper={https://arxiv.org/pdf/2003.03397.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fair Learning with Private Demographic Data.\n \n \n \n \n\n\n \n Mozannar, H.; Ohannessian, M.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"Fair paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2020mozannarfair,\n  title={Fair Learning with Private Demographic Data},\n  author={Mozannar, Hussein and Ohannessian, Mesrob and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2020},\n  url_Paper={https://arxiv.org/pdf/2002.11651.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Kernel and Rich Regimes in Overparametrized Models.\n \n \n \n \n\n\n \n Woodworth, B.; Gunasekar, S.; Lee, J.; Moroshko, E.; Savarese, P.; Golan, I.; Soudry, D.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"Kernel paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2020woodworthkernel,\n  title={Kernel and Rich Regimes in Overparametrized Models},\n  author={Woodworth, Blake and Gunasekar, Suriya and Lee, Jason and Moroshko, Edward and Savarese, Pedro and Golan, Itay and Soudry, Daniel and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2020},\n  url_Paper={https://arxiv.org/pdf/2002.09277.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Is Local SGD Better than Minibatch SGD?.\n \n \n \n \n\n\n \n Woodworth, B.; Patel, K. K.; Stich, S.; Dai, Z.; Bullins, B.; McMahan, H. B.; Shamir, O.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"Is paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2020woodworthlocal,\n  title={Is Local SGD Better than Minibatch SGD?},\n  author={Woodworth, Blake and Patel, Kumar Kshitij and Stich, Sebastian and Dai, Zhen and Bullins, Brian and McMahan, H. Brendan and Shamir, Ohad and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2020},\n  url_Paper={https://arxiv.org/pdf/2002.07839.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (35)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Lower Bounds for Non-convex Stochastic Optimization.\n \n \n \n \n\n\n \n Arjevani, Y.; Carmon, Y.; Duchi, J.; Foster, D.; Srebro, N.; and Woodworth, B.\n\n\n \n\n\n\n Mathematical Programming (accepted for publication). 2019.\n \n\n\n\n
\n\n\n\n \n \n \"Lower paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 12 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2019arjevanilower,\n  title={Lower Bounds for Non-convex Stochastic Optimization},\n  author={Arjevani, Yossi and Carmon, Yair and Duchi, John and Foster, Dylan and Srebro, Nathan and Woodworth, Blake},\n  journal={Mathematical Programming (accepted for publication)},\n  year={2019},\n  url_Paper={https://arxiv.org/pdf/1912.02365.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Function Space View of Bounded Norm Infinite Width ReLU Nets: The Multivariate Case.\n \n \n \n \n\n\n \n Ongie, G.; Willett, R.; Soudry, D.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2019ongiefunction,\n  title={A Function Space View of Bounded Norm Infinite Width ReLU Nets: The Multivariate Case},\n  author={Ongie, Greg and Willett, Rebecca and Soudry, Daniel and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2019},\n  url_Paper={https://arxiv.org/pdf/1910.01635.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Simple Surveys: Response Retrieval Inspired by Recommendation Systems.\n \n \n \n \n\n\n \n Sengupta, N.; Srebro, N.; and Evans, J.\n\n\n \n\n\n\n Social Science Computer Review. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"Simple paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2019senguptasimple,\n  title={Simple Surveys: Response Retrieval Inspired by Recommendation Systems},\n  author={Sengupta, Nandana and Srebro, Nathan and Evans, James},\n  journal={Social Science Computer Review},\n  year={2019},\n  %https://journals.sagepub.com/doi/full/10.1177/0894439319848374\n  url_Paper={https://arxiv.org/ftp/arxiv/papers/1901/1901.09659.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Accelerated Communication-efficient Primal-dual Optimization Framework for Structured Machine Learning.\n \n \n \n \n\n\n \n Ma, C.; Jaggi, M.; Curtis, F.; Srebro, N.; and Takáč, M.\n\n\n \n\n\n\n Optimization Methods and Software. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"An paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2019maaccelerated,\n  title={An Accelerated Communication-efficient Primal-dual Optimization Framework for Structured Machine Learning},\n  author={Ma, Chenxin and Jaggi, Martin and Curtis, Frank and Srebro, Nathan and Tak{\\'a}{\\v{c}}, Martin},\n  journal={Optimization Methods and Software},\n  year={2019},\n  %https://www.tandfonline.com/doi/full/10.1080/10556788.2019.1650361\n  url_Paper={https://arxiv.org/pdf/1711.05305.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Open Problem: The Oracle Complexity of Convex Optimization with Limited Memory.\n \n \n \n \n\n\n \n Woodworth, B.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 32nd Annual Conference on Learning Theory (COLT), volume PMLR 99, pages 3202-3210, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Open paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019woodworthopen,\n  title={Open Problem: The Oracle Complexity of Convex Optimization with Limited Memory},\n  author={Woodworth, Blake and Srebro, Nathan},\n  booktitle={Proceedings of the 32nd Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 99},\n  pages={3202-3210},\n  year={2019},\n  %http://proceedings.mlr.press/v99/woodworth19a.html\n  url_Paper={https://arxiv.org/pdf/1907.00762.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Lexicographic and Depth-Sensitive Margins in Homogeneous and Non-Homogeneous Deep Models.\n \n \n \n \n\n\n \n Shpigel Nacson, M.; Gunasekar, S.; Lee, J.; Srebro, N.; and Soudry, D.\n\n\n \n\n\n\n In Proceedings of the 36th International Conference of Machine Learning (ICML), volume PMLR 97, pages 4683–4692, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Lexicographic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019shipgellexicographic,\n  title={Lexicographic and Depth-Sensitive Margins in Homogeneous and Non-Homogeneous Deep Models},\n  author={Shpigel Nacson, Mor and Gunasekar, Suriya and Lee, Jason and Srebro, Nathan and Soudry, Daniel},\n  booktitle={Proceedings of the 36th International Conference of Machine Learning (ICML)},\n  volume={PMLR 97},\n  pages={4683--4692},\n  year={2019},\n  %http://proceedings.mlr.press/v97/nacson19a.html\n  url_Paper={https://arxiv.org/pdf/1905.07325.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semi-cyclic Stochastic Gradient Descent.\n \n \n \n \n\n\n \n Eichner, H.; Koren, T.; McMahan, H. B.; Srebro, N.; and Talwar, K.\n\n\n \n\n\n\n In Proceedings of the 36th International Conference on Machine Learning (ICML), volume PMLR 97, pages 1764–1773, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Semi-cyclic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019eichnersemi,\n  title={Semi-cyclic Stochastic Gradient Descent},\n  author={Eichner, Hubert and Koren, Tomer and McMahan, H. Brendan and Srebro, Nathan and Talwar, Kunal},\n  booktitle={Proceedings of the 36th International Conference on Machine Learning (ICML)},\n  volume={PMLR 97},\n  pages={1764--1773},\n  year={2019},\n  %http://proceedings.mlr.press/v97/eichner19a.html\n  url_Paper={https://arxiv.org/pdf/1904.10120.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Complexity of Making the Gradient Small in Stochastic Convex Optimization.\n \n \n \n \n\n\n \n Foster, D.; Sekhari, A.; Shamir, O.; Srebro, N.; Sridharan, K.; and Woodworth, B.\n\n\n \n\n\n\n In Proceedings of the 32nd Annual Conference on Learning Theory (COLT), volume PMLR 99, pages 1319–1345, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019fostercomplexity,\n  title={The Complexity of Making the Gradient Small in Stochastic Convex Optimization},\n  author={Foster, Dylan and Sekhari, Ayush and Shamir, Ohad and Srebro, Nathan and Sridharan, Karthik and Woodworth, Blake},\n  booktitle={Proceedings of the 32nd Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 99},\n  pages={1319--1345},\n  year={2019},\n  %http://proceedings.mlr.press/v99/foster19b.html\n  url_Paper={https://arxiv.org/pdf/1902.04686.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How do Infinite Width Bounded Norm Networks Look in Function Space?.\n \n \n \n \n\n\n \n Savarese, P.; Evron, I.; Soudry, D.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 32nd Annual Conference on Learning Theory (COLT), volume PMLR 99, pages 2667–2690, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"How paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019savareseinfinite,\n  title={How do Infinite Width Bounded Norm Networks Look in Function Space?},\n  author={Savarese, Pedro and Evron, Itay and Soudry, Daniel and Srebro, Nathan},\n  booktitle={Proceedings of the 32nd Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 99},\n  pages={2667--2690},\n  year={2019},\n  %http://proceedings.mlr.press/v99/savarese19a.html\n  url_Paper={https://arxiv.org/pdf/1902.05040.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n VC Classes are Adversarially Robustly Learnable, but Only Improperly.\n \n \n \n \n\n\n \n Montasser, O.; Hanneke, S.; and Srebro, N.\n\n\n \n\n\n\n In Beygelzimer, A.; and Hsu, D., editor(s), Proceedings of the 32nd Annual Conference on Learning Theory (COLT), volume PMLR 99, pages 2512–2530, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"VC paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019montasservc,\n  title={VC Classes are Adversarially Robustly Learnable, but Only Improperly},\n  author={Montasser, Omar and Hanneke, Steve and Srebro, Nathan},\n  booktitle={Proceedings of the 32nd Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 99},\n  pages={2512--2530},\n  year={2019},\n  editor={Beygelzimer, Alina and Hsu, Daniel},\n  %http://proceedings.mlr.press/v99/montasser19a.html\n  url_Paper={https://arxiv.org/pdf/1902.04217.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n From Fair Decision Making to Social Equality.\n \n \n \n \n\n\n \n Mouzannar, H.; Ohannessian, M.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the Conference on Fairness, Accountability, and Transparency (FAT*), pages 359–368, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"From paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019mouzannarfair,\n  title={From Fair Decision Making to Social Equality},\n  author={Mouzannar, Hussein and Ohannessian, Mesrob and Srebro, Nathan},\n  booktitle={Proceedings of the Conference on Fairness, Accountability, and Transparency (FAT*)},\n  pages={359--368},\n  year={2019},\n  %https://dl.acm.org/doi/10.1145/3287560.3287599\n  url_Paper={https://arxiv.org/pdf/1812.02952.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Canonical Correlation Analysis.\n \n \n \n \n\n\n \n Gao, C.; Garber, D.; Srebro, N.; Wang, J.; and Wang, W.\n\n\n \n\n\n\n Journal of Machine Learning Research, 20(167): 1–46. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2019gaostochastic,\n  title={Stochastic Canonical Correlation Analysis},\n  author={Gao, Chao and Garber, Dan and Srebro, Nathan and Wang, Jialei and Wang, Weiran},\n  journal={Journal of Machine Learning Research},\n  volume={20},\n  number={167},\n  pages={1--46},\n  year={2019},\n  editor={Shawe-Taylor, John},\n  %http://www.jmlr.org/papers/volume20/18-095/18-095.pdf\n  url_Paper={https://arxiv.org/pdf/1702.06533.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Training Well-generalizing Classifiers for Fairness Metrics and Other Data-Dependent Constraints.\n \n \n \n \n\n\n \n Cotter, A.; Gupta, M.; Jiang, H.; Srebro, N.; Sridharan, K.; Wang, S.; Woodworth, B.; and You, S.\n\n\n \n\n\n\n In Proceedings of the 36th International Conference of Machine Learning (ICML), volume PMLR 97, pages 1397–1405, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Training paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019cottertraining,\n  title={Training Well-generalizing Classifiers for Fairness Metrics and Other Data-Dependent Constraints},\n  author={Cotter, Andrew and Gupta, Maya and Jiang, Heinrich and Srebro, Nathan and Sridharan, Karthik and Wang, Serena and Woodworth, Blake and You, Seungil},\n  booktitle={Proceedings of the 36th International Conference of Machine Learning (ICML)},\n  volume={PMLR 97},\n  pages={1397--1405},\n  year={2019},\n  %http://proceedings.mlr.press/v97/cotter19b.html\n  url_Paper={https://arxiv.org/pdf/1807.00028.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Gradient Descent on Separable Data: Exact Convergence with a Fixed Learning Rate.\n \n \n \n \n\n\n \n Nacson, M. S.; Srebro, N.; and Soudry, D.\n\n\n \n\n\n\n In Proceedings of the 22nd International Conference on Artificial Intelligence and Statistics (AISTATS), volume PMLR 89, pages 3051–3059, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019nacsonstochastic,\n  title={Stochastic Gradient Descent on Separable Data: Exact Convergence with a Fixed Learning Rate},\n  author={Nacson, Mor Shpigel and Srebro, Nathan and Soudry, Daniel},\n  booktitle={Proceedings of the 22nd International Conference on Artificial Intelligence and Statistics (AISTATS)},\n  volume={PMLR 89},\n  pages={3051--3059},\n  year={2019},\n  %http://proceedings.mlr.press/v89/nacson19a.html\n  url_Paper={https://arxiv.org/pdf/1806.01796.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Role of Over-Parametrization in Generalization of Neural Networks.\n \n \n \n \n\n\n \n Neyshabur, B.; Li, Z.; Bhojanapalli, S.; LeCun, Y.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 7th International Conference on Learning Representations (ICLR), 2019. \n \n\nThis is a revision of a paper published on arXiv in 2018\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019neyshaburrole,\n  title={The Role of Over-Parametrization in Generalization of Neural Networks},\n  author={Neyshabur, Behnam and Li, Zhiyuan and Bhojanapalli, Srinadh and LeCun, Yann and Srebro, Nathan},\n  booktitle={Proceedings of the 7th International Conference on Learning Representations (ICLR)},\n  year={2019},\n  bibbase_note={This is a revision of a paper published on arXiv in 2018},\n  url_Paper={https://openreview.net/pdf?id=BygfghAcYX}\n}%%%\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Convergence of Gradient Descent on Separable Data.\n \n \n \n \n\n\n \n Nacson, M. S.; Lee, J.; Gunasekar, S.; Savarese, P.; Srebro, N.; and Soudry, D.\n\n\n \n\n\n\n In Proceedings of the 22nd International Conference on Artificial Intelligence and Statistics (AISTATS), volume PMLR 89, pages 3420–3428, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Convergence paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019nacsonconvergence,\n  title={Convergence of Gradient Descent on Separable Data},\n  author={Nacson, Mor Shpigel and Lee, Jason and Gunasekar, Suriya and Savarese, Pedro and Srebro, Nathan and Soudry, Daniel},\n  booktitle={Proceedings of the 22nd International Conference on Artificial Intelligence and Statistics (AISTATS)},\n  volume={PMLR 89},\n  pages={3420--3428},\n  year={2019},\n  %http://proceedings.mlr.press/v89/nacson19b.html\n  url_Paper={https://arxiv.org/pdf/1803.01905.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Nonconvex Optimization with Large Minibatches.\n \n \n \n \n\n\n \n Wang, W.; and Srebro, N.\n\n\n \n\n\n\n In Garivier, A.; and Kale, S., editor(s), Proceedings of the 30th International Conference on Algorithmic Learning Theory (ALT), volume PMLR 98, pages 857–882, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019wangstochastic,\n  title={Stochastic Nonconvex Optimization with Large Minibatches},\n  author={Wang, Weiran and Srebro, Nathan},\n  booktitle={Proceedings of the 30th International Conference on Algorithmic Learning Theory (ALT)},\n  volume={PMLR 98},\n  pages={857--882},\n  year={2019},\n  editor={Garivier, Aur{\\'e}lien and Kale, Satyen},\n  %http://proceedings.mlr.press/v98/wang19a.html\n  url_Paper={https://arxiv.org/pdf/1709.08728.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Lower Bounds for Non-convex Stochastic Optimization.\n \n \n \n \n\n\n \n Arjevani, Y.; Carmon, Y.; Duchi, J.; Foster, D.; Srebro, N.; and Woodworth, B.\n\n\n \n\n\n\n arXiv Preprint. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"Lower paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 12 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2019arjevanilower,\n  title={Lower Bounds for Non-convex Stochastic Optimization},\n  author={Arjevani, Yossi and Carmon, Yair and Duchi, John and Foster, Dylan and Srebro, Nathan and Woodworth, Blake},\n  journal={arXiv Preprint},\n  year={2019},\n  url_Paper={https://arxiv.org/pdf/1912.02365.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Function Space View of Bounded Norm Infinite Width ReLU Nets: The Multivariate Case.\n \n \n \n \n\n\n \n Ongie, G.; Willett, R.; Soudry, D.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2019ongiefunction,\n  title={A Function Space View of Bounded Norm Infinite Width ReLU Nets: The Multivariate Case},\n  author={Ongie, Greg and Willett, Rebecca and Soudry, Daniel and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2019},\n  url_Paper={https://arxiv.org/pdf/1910.01635.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Simple Surveys: Response Retrieval Inspired by Recommendation Systems.\n \n \n \n \n\n\n \n Sengupta, N.; Srebro, N.; and Evans, J.\n\n\n \n\n\n\n Social Science Computer Review. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"Simple paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2019senguptasimple,\n  title={Simple Surveys: Response Retrieval Inspired by Recommendation Systems},\n  author={Sengupta, Nandana and Srebro, Nathan and Evans, James},\n  journal={Social Science Computer Review},\n  year={2019},\n  %https://journals.sagepub.com/doi/full/10.1177/0894439319848374\n  url_Paper={https://arxiv.org/ftp/arxiv/papers/1901/1901.09659.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Accelerated Communication-efficient Primal-dual Optimization Framework for Structured Machine Learning.\n \n \n \n \n\n\n \n Ma, C.; Jaggi, M.; Curtis, F.; Srebro, N.; and Takáč, M.\n\n\n \n\n\n\n Optimization Methods and Software. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"An paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2019maaccelerated,\n  title={An Accelerated Communication-efficient Primal-dual Optimization Framework for Structured Machine Learning},\n  author={Ma, Chenxin and Jaggi, Martin and Curtis, Frank and Srebro, Nathan and Tak{\\'a}{\\v{c}}, Martin},\n  journal={Optimization Methods and Software},\n  year={2019},\n  %https://www.tandfonline.com/doi/full/10.1080/10556788.2019.1650361\n  url_Paper={https://arxiv.org/pdf/1711.05305.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Open Problem: The Oracle Complexity of Convex Optimization with Limited Memory.\n \n \n \n \n\n\n \n Woodworth, B.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 32nd Annual Conference on Learning Theory (COLT), volume PMLR 99, pages 3202-3210, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Open paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019woodworthopen,\n  title={Open Problem: The Oracle Complexity of Convex Optimization with Limited Memory},\n  author={Woodworth, Blake and Srebro, Nathan},\n  booktitle={Proceedings of the 32nd Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 99},\n  pages={3202-3210},\n  year={2019},\n  %http://proceedings.mlr.press/v99/woodworth19a.html\n  url_Paper={https://arxiv.org/pdf/1907.00762.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Guaranteed Validity for Empirical Approaches to Adaptive Data Analysis.\n \n \n \n \n\n\n \n Rogers, R.; Roth, A.; Smith, A.; Srebro, N.; Thakkar, O.; and Woodworth, B.\n\n\n \n\n\n\n arXiv Preprint. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"Guaranteed paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2019rogersguaranteed,\n  title={Guaranteed Validity for Empirical Approaches to Adaptive Data Analysis},\n  author={Rogers, Ryan and Roth, Aaron and Smith, Adam and Srebro, Nathan and Thakkar, Om and Woodworth, Blake},\n  journal={arXiv Preprint},\n  year={2019},\n  url_Paper={https://arxiv.org/pdf/1906.09231.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Lexicographic and Depth-Sensitive Margins in Homogeneous and Non-Homogeneous Deep Models.\n \n \n \n \n\n\n \n Shpigel Nacson, M.; Gunasekar, S.; Lee, J.; Srebro, N.; and Soudry, D.\n\n\n \n\n\n\n In Proceedings of the 36th International Conference of Machine Learning (ICML), volume PMLR 97, pages 4683–4692, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Lexicographic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019shipgellexicographic,\n  title={Lexicographic and Depth-Sensitive Margins in Homogeneous and Non-Homogeneous Deep Models},\n  author={Shpigel Nacson, Mor and Gunasekar, Suriya and Lee, Jason and Srebro, Nathan and Soudry, Daniel},\n  booktitle={Proceedings of the 36th International Conference of Machine Learning (ICML)},\n  volume={PMLR 97},\n  pages={4683--4692},\n  year={2019},\n  %http://proceedings.mlr.press/v97/nacson19a.html\n  url_Paper={https://arxiv.org/pdf/1905.07325.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semi-cyclic Stochastic Gradient Descent.\n \n \n \n \n\n\n \n Eichner, H.; Koren, T.; McMahan, H. B.; Srebro, N.; and Talwar, K.\n\n\n \n\n\n\n In Proceedings of the 36th International Conference on Machine Learning (ICML), volume PMLR 97, pages 1764–1773, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Semi-cyclic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019eichnersemi,\n  title={Semi-cyclic Stochastic Gradient Descent},\n  author={Eichner, Hubert and Koren, Tomer and McMahan, H. Brendan and Srebro, Nathan and Talwar, Kunal},\n  booktitle={Proceedings of the 36th International Conference on Machine Learning (ICML)},\n  volume={PMLR 97},\n  pages={1764--1773},\n  year={2019},\n  %http://proceedings.mlr.press/v97/eichner19a.html\n  url_Paper={https://arxiv.org/pdf/1904.10120.pdf}\n} \n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Complexity of Making the Gradient Small in Stochastic Convex Optimization.\n \n \n \n \n\n\n \n Foster, D.; Sekhari, A.; Shamir, O.; Srebro, N.; Sridharan, K.; and Woodworth, B.\n\n\n \n\n\n\n In Proceedings of the 32nd Annual Conference on Learning Theory (COLT), volume PMLR 99, pages 1319–1345, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019fostercomplexity,\n  title={The Complexity of Making the Gradient Small in Stochastic Convex Optimization},\n  author={Foster, Dylan and Sekhari, Ayush and Shamir, Ohad and Srebro, Nathan and Sridharan, Karthik and Woodworth, Blake},\n  booktitle={Proceedings of the 32nd Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 99},\n  pages={1319--1345},\n  year={2019},\n  %http://proceedings.mlr.press/v99/foster19b.html\n  url_Paper={https://arxiv.org/pdf/1902.04686.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How do Infinite Width Bounded Norm Networks Look in Function Space?.\n \n \n \n \n\n\n \n Savarese, P.; Evron, I.; Soudry, D.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 32nd Annual Conference on Learning Theory (COLT), volume PMLR 99, pages 2667–2690, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"How paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019savareseinfinite,\n  title={How do Infinite Width Bounded Norm Networks Look in Function Space?},\n  author={Savarese, Pedro and Evron, Itay and Soudry, Daniel and Srebro, Nathan},\n  booktitle={Proceedings of the 32nd Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 99},\n  pages={2667--2690},\n  year={2019},\n  %http://proceedings.mlr.press/v99/savarese19a.html\n  url_Paper={https://arxiv.org/pdf/1902.05040.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n VC Classes are Adversarially Robustly Learnable, but Only Improperly.\n \n \n \n \n\n\n \n Montasser, O.; Hanneke, S.; and Srebro, N.\n\n\n \n\n\n\n In Beygelzimer, A.; and Hsu, D., editor(s), Proceedings of the 32nd Annual Conference on Learning Theory (COLT), volume PMLR 99, pages 2512–2530, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"VC paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019montasservc,\n  title={VC Classes are Adversarially Robustly Learnable, but Only Improperly},\n  author={Montasser, Omar and Hanneke, Steve and Srebro, Nathan},\n  booktitle={Proceedings of the 32nd Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 99},\n  pages={2512--2530},\n  year={2019},\n  editor={Beygelzimer, Alina and Hsu, Daniel},\n  %http://proceedings.mlr.press/v99/montasser19a.html\n  url_Paper={https://arxiv.org/pdf/1902.04217.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n From Fair Decision Making to Social Equality.\n \n \n \n \n\n\n \n Mouzannar, H.; Ohannessian, M.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the Conference on Fairness, Accountability, and Transparency (FAT*), pages 359–368, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"From paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019mouzannarfair,\n  title={From Fair Decision Making to Social Equality},\n  author={Mouzannar, Hussein and Ohannessian, Mesrob and Srebro, Nathan},\n  booktitle={Proceedings of the Conference on Fairness, Accountability, and Transparency (FAT*)},\n  pages={359--368},\n  year={2019},\n  %https://dl.acm.org/doi/10.1145/3287560.3287599\n  url_Paper={https://arxiv.org/pdf/1812.02952.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Canonical Correlation Analysis.\n \n \n \n \n\n\n \n Gao, C.; Garber, D.; Srebro, N.; Wang, J.; and Wang, W.\n\n\n \n\n\n\n Journal of Machine Learning Research, 20(167): 1–46. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2019gaostochastic,\n  title={Stochastic Canonical Correlation Analysis},\n  author={Gao, Chao and Garber, Dan and Srebro, Nathan and Wang, Jialei and Wang, Weiran},\n  journal={Journal of Machine Learning Research},\n  volume={20},\n  number={167},\n  pages={1--46},\n  year={2019},\n  editor={Shawe-Taylor, John},\n  %http://www.jmlr.org/papers/volume20/18-095/18-095.pdf\n  url_Paper={https://arxiv.org/pdf/1702.06533.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Training Well-generalizing Classifiers for Fairness Metrics and Other Data-Dependent Constraints.\n \n \n \n \n\n\n \n Cotter, A.; Gupta, M.; Jiang, H.; Srebro, N.; Sridharan, K.; Wang, S.; Woodworth, B.; and You, S.\n\n\n \n\n\n\n In Proceedings of the 36th International Conference of Machine Learning (ICML), volume PMLR 97, pages 1397–1405, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Training paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019cottertraining,\n  title={Training Well-generalizing Classifiers for Fairness Metrics and Other Data-Dependent Constraints},\n  author={Cotter, Andrew and Gupta, Maya and Jiang, Heinrich and Srebro, Nathan and Sridharan, Karthik and Wang, Serena and Woodworth, Blake and You, Seungil},\n  booktitle={Proceedings of the 36th International Conference of Machine Learning (ICML)},\n  volume={PMLR 97},\n  pages={1397--1405},\n  year={2019},\n  %http://proceedings.mlr.press/v97/cotter19b.html\n  url_Paper={https://arxiv.org/pdf/1807.00028.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Gradient Descent on Separable Data: Exact Convergence with a Fixed Learning Rate.\n \n \n \n \n\n\n \n Nacson, M. S.; Srebro, N.; and Soudry, D.\n\n\n \n\n\n\n In Proceedings of the 22nd International Conference on Artificial Intelligence and Statistics (AISTATS), volume PMLR 89, pages 3051–3059, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019nacsonstochastic,\n  title={Stochastic Gradient Descent on Separable Data: Exact Convergence with a Fixed Learning Rate},\n  author={Nacson, Mor Shpigel and Srebro, Nathan and Soudry, Daniel},\n  booktitle={Proceedings of the 22nd International Conference on Artificial Intelligence and Statistics (AISTATS)},\n  volume={PMLR 89},\n  pages={3051--3059},\n  year={2019},\n  %http://proceedings.mlr.press/v89/nacson19a.html\n  url_Paper={https://arxiv.org/pdf/1806.01796.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Role of Over-Parametrization in Generalization of Neural Networks.\n \n \n \n \n\n\n \n Neyshabur, B.; Li, Z.; Bhojanapalli, S.; LeCun, Y.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 7th International Conference on Learning Representations (ICLR), 2019. \n \n\n\\newline This is a revision of a paper published on arXiv in 2018\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019neyshaburrole,\n  title={The Role of Over-Parametrization in Generalization of Neural Networks},\n  author={Neyshabur, Behnam and Li, Zhiyuan and Bhojanapalli, Srinadh and LeCun, Yann and Srebro, Nathan},\n  booktitle={Proceedings of the 7th International Conference on Learning Representations (ICLR)},\n  year={2019},\n  bibbase_note={\\newline This is a revision of a paper published on arXiv in 2018},\n  url_Paper={https://openreview.net/pdf?id=BygfghAcYX}\n}%%%\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Convergence of Gradient Descent on Separable Data.\n \n \n \n \n\n\n \n Nacson, M. S.; Lee, J.; Gunasekar, S.; Savarese, P.; Srebro, N.; and Soudry, D.\n\n\n \n\n\n\n In Proceedings of the 22nd International Conference on Artificial Intelligence and Statistics (AISTATS), volume PMLR 89, pages 3420–3428, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Convergence paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019nacsonconvergence,\n  title={Convergence of Gradient Descent on Separable Data},\n  author={Nacson, Mor Shpigel and Lee, Jason and Gunasekar, Suriya and Savarese, Pedro and Srebro, Nathan and Soudry, Daniel},\n  booktitle={Proceedings of the 22nd International Conference on Artificial Intelligence and Statistics (AISTATS)},\n  volume={PMLR 89},\n  pages={3420--3428},\n  year={2019},\n  %http://proceedings.mlr.press/v89/nacson19b.html\n  url_Paper={https://arxiv.org/pdf/1803.01905.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Nonconvex Optimization with Large Minibatches.\n \n \n \n \n\n\n \n Wang, W.; and Srebro, N.\n\n\n \n\n\n\n In Garivier, A.; and Kale, S., editor(s), Proceedings of the 30th International Conference on Algorithmic Learning Theory (ALT), volume PMLR 98, pages 857–882, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2019wangstochastic,\n  title={Stochastic Nonconvex Optimization with Large Minibatches},\n  author={Wang, Weiran and Srebro, Nathan},\n  booktitle={Proceedings of the 30th International Conference on Algorithmic Learning Theory (ALT)},\n  volume={PMLR 98},\n  pages={857--882},\n  year={2019},\n  editor={Garivier, Aur{\\'e}lien and Kale, Satyen},\n  %http://proceedings.mlr.press/v98/wang19a.html\n  url_Paper={https://arxiv.org/pdf/1709.08728.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (23)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Towards Understanding the Role of Over-Parametrization in Generalization of Neural Networks.\n \n \n \n \n\n\n \n Neyshabur, B.; Li, Z.; Bhojanapalli, S.; LeCun, Y.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"Towards paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2018neyshaburtowards,\n  title={Towards Understanding the Role of Over-Parametrization in Generalization of Neural Networks},\n  author={Neyshabur, Behnam and Li, Zhiyuan and Bhojanapalli, Srinadh and LeCun, Yann and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2018},\n  url_Paper={https://arxiv.org/pdf/1805.12076.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Characterizing Implicit Bias in Terms of Optimization Geometry.\n \n \n \n \n\n\n \n Gunasekar, S.; Lee, J.; Soudry, D.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 35th International Conference on Machine Learning, volume PMLR 80, pages 1832–1841, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"Characterizing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2018gunasekarcharacterizing,\n  title={Characterizing Implicit Bias in Terms of Optimization Geometry},\n  author={Gunasekar, Suriya and Lee, Jason and Soudry, Daniel and Srebro, Nathan},\n  booktitle={Proceedings of the 35th International Conference on Machine Learning},\n  volume={PMLR 80},\n  pages={1832--1841},\n  year={2018},\n  %http://proceedings.mlr.press/v80/gunasekar18a.html\n  url_Paper={https://arxiv.org/pdf/1802.08246.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Stochastic Multi-task Learning with Graph Regularization.\n \n \n \n \n\n\n \n Wang, W.; Wang, J.; Kolar, M.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"Distributed paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2018wangdistributed,\n  title={Distributed Stochastic Multi-task Learning with Graph Regularization},\n  author={Wang, Weiran and Wang, Jialei and Kolar, Mladen and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2018},\n  url_Paper={https://arxiv.org/pdf/1802.03830.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On Preserving Non-discrimination when Combining Expert Advice.\n \n \n \n \n\n\n \n Blum, A.; Gunasekar, S.; Lykouris, T.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS), pages 8376–8387, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2018blumpreserving,\n  title={On Preserving Non-discrimination when Combining Expert Advice},\n  author={Blum, Avrim and Gunasekar, Suriya and Lykouris, Thodoris and Srebro, Nati},\n  booktitle={Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS)},\n  pages={8376--8387},\n  year={2018},\n  %http://papers.nips.cc/paper/8058-on-preserving-non-discrimination-when-combining-expert-advice\n  url_Paper={https://arxiv.org/pdf/1810.11829.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Training Fairness-Constrained Classifiers to Generalize.\n \n \n \n \n\n\n \n Cotter, A.; Gupta, M.; Jiang, H.; Srebro, N.; Sridharan, K.; Wang, S.; Woodworth, B.; and You, S.\n\n\n \n\n\n\n Fairness, Accountability, and Transparency in Machine Learning (FATML). 2018.\n \n\n\n\n
\n\n\n\n \n \n \"Training paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2018cottertraining,\n  title={Training Fairness-Constrained Classifiers to Generalize},\n  author={Cotter, Andrew and Gupta, Maya and Jiang, Heinrich and Srebro, Nathan and Sridharan, Karthik and Wang, Serena and Woodworth, Blake and You, Seungil},\n  journal={Fairness, Accountability, and Transparency in Machine Learning (FATML)},\n  year={2018},\n  url_Paper={https://www.fatml.org/media/documents/training_fairness_constrained_classifiers_to_generalize.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Implicit Bias of Gradient Descent on Linear Convolutional Networks.\n \n \n \n \n\n\n \n Gunasekar, S.; Lee, J.; Soudry, D.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS), pages 9482–9491, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"Implicit paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2018gunasekarimplicit,\n  title={Implicit Bias of Gradient Descent on Linear Convolutional Networks},\n  author={Gunasekar, Suriya and Lee, Jason and Soudry, Daniel and Srebro, Nathan},\n  booktitle={Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS)},\n  pages={9482--9491},\n  year={2018},\n  %http://papers.nips.cc/paper/8156-implicit-bias-of-gradient-descent-on-linear-convolutional-networks\n  url_Paper={https://arxiv.org/pdf/1806.00468.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Graph Oracle Models, Lower Bounds, and Gaps for Parallel Stochastic Optimization.\n \n \n \n \n\n\n \n Woodworth, B.; Wang, J.; Smith, A.; McMahan, B.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS), pages 8505-8515, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"Graph paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2018woodworthgraph,\n  title={Graph Oracle Models, Lower Bounds, and Gaps for Parallel Stochastic Optimization},\n  author={Woodworth, Blake and Wang, Jialei and Smith, Adam and McMahan, Brendan and Srebro, Nathan},\n  booktitle={Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS)},\n  pages={8505-8515},\n  year={2018},\n  %http://papers.nips.cc/paper/8069-graph-oracle-models-lower-bounds-and-gaps-for-parallel-stochastic-optimization\n  url_Paper={https://arxiv.org/pdf/1805.10222.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Everlasting Database: Statistical Validity at a Fair Price.\n \n \n \n \n\n\n \n Woodworth, B.; Feldman, V.; Rosset, S.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS), pages 6532–6541, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2018woodwortheverlasting,\n  title={The Everlasting Database: Statistical Validity at a Fair Price},\n  author={Woodworth, Blake and Feldman, Vitaly and Rosset, Saharon and Srebro, Nati},\n  booktitle={Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS)},\n  pages={6532--6541},\n  year={2018},\n  %http://papers.nips.cc/paper/7888-the-everlasting-database-statistical-validity-at-a-fair-price\n  url_Paper={https://arxiv.org/pdf/1803.04307.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Implicit Bias of Gradient Descent on Separable Data.\n \n \n \n \n\n\n \n Soudry, D.; Hoffer, E.; Nacson, M. S.; Gunasekar, S.; and Srebro, N.\n\n\n \n\n\n\n Journal of Machine Learning Research, 19(70): 1–57. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2018soudryimplicit,\n  title={The Implicit Bias of Gradient Descent on Separable Data},\n  author={Soudry, Daniel and Hoffer, Elad and Nacson, Mor Shpigel and Gunasekar, Suriya and Srebro, Nathan},\n  journal={Journal of Machine Learning Research},\n  volume={19},\n  number={70},\n  pages={1--57},\n  year={2018},\n  editor={Bottou, Leon},\n  %http://www.jmlr.org/papers/v19/18-188.html\n  url_Paper={https://arxiv.org/pdf/1710.10345.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A PAC-Bayesian Approach to Spectrally-Normalized Margin Bounds for Neural Networks.\n \n \n \n \n\n\n \n Neyshabur, B.; Bhojanapalli, S.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the International Conference on Learning Representations (ICLR), 2018. \n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2018neyshaburpac,\n  title={A PAC-Bayesian Approach to Spectrally-Normalized Margin Bounds for Neural Networks},\n  author={Neyshabur, Behnam and Bhojanapalli, Srinadh and Srebro, Nathan},\n  booktitle={Proceedings of the International Conference on Learning Representations (ICLR)},\n  year={2018},\n  %https://openreview.net/forum?id=Skz_WfbCZ&noteId=Skz_WfbCZ\n  url_Paper={https://arxiv.org/pdf/1707.09564.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient Coordinate-wise Leading Eigenvector Computation.\n \n \n \n \n\n\n \n Wang, J.; Wang, W.; Garber, D.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 29th International Conference on Algorithmic Learning Theory (ALT), volume PMLR 83, pages 806–820, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"Efficient paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2018wangefficient,\n  title={Efficient Coordinate-wise Leading Eigenvector Computation},\n  author={Wang, Jialei and Wang, Weiran and Garber, Dan and Srebro, Nathan},\n  booktitle={Proceedings of the 29th International Conference on Algorithmic Learning Theory (ALT)},\n  volume={PMLR 83},\n  pages={806--820},\n  year={2018},\n  %http://proceedings.mlr.press/v83/wang18a.html\n  url_Paper={https://arxiv.org/pdf/1702.07834.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards Understanding the Role of Over-Parametrization in Generalization of Neural Networks.\n \n \n \n \n\n\n \n Neyshabur, B.; Li, Z.; Bhojanapalli, S.; LeCun, Y.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"Towards paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2018neyshaburtowards,\n  title={Towards Understanding the Role of Over-Parametrization in Generalization of Neural Networks},\n  author={Neyshabur, Behnam and Li, Zhiyuan and Bhojanapalli, Srinadh and LeCun, Yann and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2018},\n  url_Paper={https://arxiv.org/pdf/1805.12076.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Tight Convergence Analysis for Stochastic Gradient Descent with Delayed Updates.\n \n \n \n \n\n\n \n Arjevani, Y.; Shamir, O.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2018arjevanitight,\n  title={A Tight Convergence Analysis for Stochastic Gradient Descent with Delayed Updates},\n  author={Arjevani, Yossi and Shamir, Ohad and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2018},\n  url_Paper={https://arxiv.org/pdf/1806.10188.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Characterizing Implicit Bias in Terms of Optimization Geometry.\n \n \n \n \n\n\n \n Gunasekar, S.; Lee, J.; Soudry, D.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 35th International Conference on Machine Learning, volume PMLR 80, pages 1832–1841, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"Characterizing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2018gunasekarcharacterizing,\n  title={Characterizing Implicit Bias in Terms of Optimization Geometry},\n  author={Gunasekar, Suriya and Lee, Jason and Soudry, Daniel and Srebro, Nathan},\n  booktitle={Proceedings of the 35th International Conference on Machine Learning},\n  volume={PMLR 80},\n  pages={1832--1841},\n  year={2018},\n  %http://proceedings.mlr.press/v80/gunasekar18a.html\n  url_Paper={https://arxiv.org/pdf/1802.08246.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Stochastic Multi-task Learning with Graph Regularization.\n \n \n \n \n\n\n \n Wang, W.; Wang, J.; Kolar, M.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"Distributed paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2018wangdistributed,\n  title={Distributed Stochastic Multi-task Learning with Graph Regularization},\n  author={Wang, Weiran and Wang, Jialei and Kolar, Mladen and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2018},\n  url_Paper={https://arxiv.org/pdf/1802.03830.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On Preserving Non-discrimination when Combining Expert Advice.\n \n \n \n \n\n\n \n Blum, A.; Gunasekar, S.; Lykouris, T.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS), pages 8376–8387, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2018blumpreserving,\n  title={On Preserving Non-discrimination when Combining Expert Advice},\n  author={Blum, Avrim and Gunasekar, Suriya and Lykouris, Thodoris and Srebro, Nati},\n  booktitle={Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS)},\n  pages={8376--8387},\n  year={2018},\n  %http://papers.nips.cc/paper/8058-on-preserving-non-discrimination-when-combining-expert-advice\n  url_Paper={https://arxiv.org/pdf/1810.11829.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Training Fairness-Constrained Classifiers to Generalize.\n \n \n \n \n\n\n \n Cotter, A.; Gupta, M.; Jiang, H.; Srebro, N.; Sridharan, K.; Wang, S.; Woodworth, B.; and You, S.\n\n\n \n\n\n\n Fairness, Accountability, and Transparency in Machine Learning (FATML). 2018.\n \n\n\n\n
\n\n\n\n \n \n \"Training paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2018cottertraining,\n  title={Training Fairness-Constrained Classifiers to Generalize},\n  author={Cotter, Andrew and Gupta, Maya and Jiang, Heinrich and Srebro, Nathan and Sridharan, Karthik and Wang, Serena and Woodworth, Blake and You, Seungil},\n  journal={Fairness, Accountability, and Transparency in Machine Learning (FATML)},\n  year={2018},\n  url_Paper={https://www.fatml.org/media/documents/training_fairness_constrained_classifiers_to_generalize.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Implicit Bias of Gradient Descent on Linear Convolutional Networks.\n \n \n \n \n\n\n \n Gunasekar, S.; Lee, J.; Soudry, D.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS), pages 9482–9491, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"Implicit paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2018gunasekarimplicit,\n  title={Implicit Bias of Gradient Descent on Linear Convolutional Networks},\n  author={Gunasekar, Suriya and Lee, Jason and Soudry, Daniel and Srebro, Nathan},\n  booktitle={Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS)},\n  pages={9482--9491},\n  year={2018},\n  %http://papers.nips.cc/paper/8156-implicit-bias-of-gradient-descent-on-linear-convolutional-networks\n  url_Paper={https://arxiv.org/pdf/1806.00468.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Graph Oracle Models, Lower Bounds, and Gaps for Parallel Stochastic Optimization.\n \n \n \n \n\n\n \n Woodworth, B.; Wang, J.; Smith, A.; McMahan, B.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS), pages 8505-8515, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"Graph paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2018woodworthgraph,\n  title={Graph Oracle Models, Lower Bounds, and Gaps for Parallel Stochastic Optimization},\n  author={Woodworth, Blake and Wang, Jialei and Smith, Adam and McMahan, Brendan and Srebro, Nathan},\n  booktitle={Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS)},\n  pages={8505-8515},\n  year={2018},\n  %http://papers.nips.cc/paper/8069-graph-oracle-models-lower-bounds-and-gaps-for-parallel-stochastic-optimization\n  url_Paper={https://arxiv.org/pdf/1805.10222.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Everlasting Database: Statistical Validity at a Fair Price.\n \n \n \n \n\n\n \n Woodworth, B.; Feldman, V.; Rosset, S.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS), pages 6532–6541, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2018woodwortheverlasting,\n  title={The Everlasting Database: Statistical Validity at a Fair Price},\n  author={Woodworth, Blake and Feldman, Vitaly and Rosset, Saharon and Srebro, Nati},\n  booktitle={Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS)},\n  pages={6532--6541},\n  year={2018},\n  %http://papers.nips.cc/paper/7888-the-everlasting-database-statistical-validity-at-a-fair-price\n  url_Paper={https://arxiv.org/pdf/1803.04307.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Implicit Bias of Gradient Descent on Separable Data.\n \n \n \n \n\n\n \n Soudry, D.; Hoffer, E.; Nacson, M. S.; Gunasekar, S.; and Srebro, N.\n\n\n \n\n\n\n Journal of Machine Learning Research, 19(70): 1–57. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2018soudryimplicit,\n  title={The Implicit Bias of Gradient Descent on Separable Data},\n  author={Soudry, Daniel and Hoffer, Elad and Nacson, Mor Shpigel and Gunasekar, Suriya and Srebro, Nathan},\n  journal={Journal of Machine Learning Research},\n  volume={19},\n  number={70},\n  pages={1--57},\n  year={2018},\n  editor={Bottou, Leon},\n  %http://www.jmlr.org/papers/v19/18-188.html\n  url_Paper={https://arxiv.org/pdf/1710.10345.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A PAC-Bayesian Approach to Spectrally-Normalized Margin Bounds for Neural Networks.\n \n \n \n \n\n\n \n Neyshabur, B.; Bhojanapalli, S.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the International Conference on Learning Representations (ICLR), 2018. \n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2018neyshaburpac,\n  title={A PAC-Bayesian Approach to Spectrally-Normalized Margin Bounds for Neural Networks},\n  author={Neyshabur, Behnam and Bhojanapalli, Srinadh and Srebro, Nathan},\n  booktitle={Proceedings of the International Conference on Learning Representations (ICLR)},\n  year={2018},\n  %https://openreview.net/forum?id=Skz_WfbCZ&noteId=Skz_WfbCZ\n  url_Paper={https://arxiv.org/pdf/1707.09564.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient Coordinate-wise Leading Eigenvector Computation.\n \n \n \n \n\n\n \n Wang, J.; Wang, W.; Garber, D.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 29th International Conference on Algorithmic Learning Theory (ALT), volume PMLR 83, pages 806–820, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"Efficient paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2018wangefficient,\n  title={Efficient Coordinate-wise Leading Eigenvector Computation},\n  author={Wang, Jialei and Wang, Weiran and Garber, Dan and Srebro, Nathan},\n  booktitle={Proceedings of the 29th International Conference on Algorithmic Learning Theory (ALT)},\n  volume={PMLR 83},\n  pages={806--820},\n  year={2018},\n  %http://proceedings.mlr.press/v83/wang18a.html\n  url_Paper={https://arxiv.org/pdf/1702.07834.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (26)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Lower Bound for Randomized First Order Convex Optimization.\n \n \n \n \n\n\n \n Woodworth, B.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Lower paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2017woodworthlower,\n  title={Lower Bound for Randomized First Order Convex Optimization},\n  author={Woodworth, Blake and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2017},\n  url_Paper={https://arxiv.org/pdf/1709.03594.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Communication-efficient Algorithms for Distributed Stochastic Principal Component Analysis.\n \n \n \n \n\n\n \n Garber, D.; Shamir, O.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 34th International Conference on Machine Learning (ICML), volume PMLR 70, pages 1203–1212, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Communication-efficient paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017garbercommunication,\n  title={Communication-efficient Algorithms for Distributed Stochastic Principal Component Analysis},\n  author={Garber, Dan and Shamir, Ohad and Srebro, Nathan},\n  booktitle={Proceedings of the 34th International Conference on Machine Learning (ICML)},\n  volume={PMLR 70},\n  pages={1203--1212},\n  year={2017},\n  %http://proceedings.mlr.press/v70/garber17a.html\n  url_Paper={https://arxiv.org/pdf/1702.08169.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient Distributed Learning with Sparsity.\n \n \n \n \n\n\n \n Wang, J.; Kolar, M.; Srebro, N.; and Zhang, T.\n\n\n \n\n\n\n In Proceedings of the 34th International Conference on Machine Learning (ICML), volume PMLR 70, pages 3636–3645, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Efficient paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017wangefficient,\n  title={Efficient Distributed Learning with Sparsity},\n  author={Wang, Jialei and Kolar, Mladen and Srebro, Nathan and Zhang, Tong},\n  booktitle={Proceedings of the 34th International Conference on Machine Learning (ICML)},\n  volume={PMLR 70},\n  pages={3636--3645},\n  year={2017},\n  %http://proceedings.mlr.press/v70/wang17f.html\n  url_Paper={https://arxiv.org/pdf/1605.07991.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Geometry of Optimization and Implicit Regularization in Deep Learning.\n \n \n \n \n\n\n \n Neyshabur, B.; Tomioka, R.; Salakhutdinov, R.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Geometry paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{neyshabur2017geometry,\n  title={Geometry of Optimization and Implicit Regularization in Deep Learning},\n  author={Neyshabur, Behnam and Tomioka, Ryota and Salakhutdinov, Ruslan and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2017},\n  url_Paper={https://arxiv.org/pdf/1705.03071.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Memory and Communication Efficient Distributed Stochastic Optimization with Minibatch-Prox.\n \n \n \n \n\n\n \n Wang, J.; Wang, W.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th Conference on Learning Theory (COLT), volume PMLR 65, pages 1882–1919, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Memory paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017wangmemory,\n  title={Memory and Communication Efficient Distributed Stochastic Optimization with Minibatch-Prox},\n  author={Wang, Jialei and Wang, Weiran and Srebro, Nathan},\n  booktitle={Proceedings of the 30th Conference on Learning Theory (COLT)},\n  volume={PMLR 65},\n  pages={1882--1919},\n  year={2017},\n  %http://proceedings.mlr.press/v65/wang17a.html\n  url_Paper={https://arxiv.org/pdf/1702.06269.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Non-discriminatory Predictors.\n \n \n \n \n\n\n \n Woodworth, B.; Gunasekar, S.; Ohannessian, M.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th Conference on Learning Theory (COLT), volume PMLR 65, pages 1920–1953, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Learning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017woodworthlearning,\n  title={Learning Non-discriminatory Predictors},\n  author={Woodworth, Blake and Gunasekar, Suriya and Ohannessian, Mesrob and Srebro, Nathan},\n  booktitle={Proceedings of the 30th Conference on Learning Theory (COLT)},\n  volume={PMLR 65},\n  pages={1920--1953},\n  year={2017},\n  %http://proceedings.mlr.press/v65/woodworth17a.html\n  url_Paper={https://arxiv.org/pdf/1702.06081.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data-dependent Convergence for Consensus Stochastic Optimization.\n \n \n \n \n\n\n \n Bijral, A.; Sarwate, A.; and Srebro, N.\n\n\n \n\n\n\n IEEE Transactions on Automatic Control, 62(9): 4483–4498. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Data-dependent paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2017bijraldata,\n  title={Data-dependent Convergence for Consensus Stochastic Optimization},\n  author={Bijral, Avleen and Sarwate, Anand and Srebro, Nathan},\n  journal={IEEE Transactions on Automatic Control},\n  volume={62},\n  number={9},\n  pages={4483--4498},\n  year={2017},\n  publisher={IEEE},\n  url_Paper={https://ieeexplore.ieee.org/abstract/document/7858743}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Approximation for Canonical Correlation Analysis.\n \n \n \n \n\n\n \n Arora, R.; Marinov, T.; Mianjy, P.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 31st International Conference on Neural Information Processing Systems (NIPS), pages 4778–4787, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017arorastochastic,\n  title={Stochastic Approximation for Canonical Correlation Analysis},\n  author={Arora, Raman and Marinov, Teodor and Mianjy, Poorya and Srebro, Nathan},\n  booktitle={Proceedings of the 31st International Conference on Neural Information Processing Systems (NIPS)},\n  pages={4778--4787},\n  year={2017},\n  url_Paper={http://papers.nips.cc/paper/7063-stochastic-approximation-for-canonical-correlation-analysis}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring Generalization in Deep Learning.\n \n \n \n \n\n\n \n Neyshabur, B.; Bhojanapalli, S.; McAllester, D.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 31st International Conference on Neural Information Processing Systems (NIPS), pages 5949–5958, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Exploring paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017neyshaburexploring,\n  title={Exploring Generalization in Deep Learning},\n  author={Neyshabur, Behnam and Bhojanapalli, Srinadh and McAllester, David and Srebro, Nathan},\n  booktitle={Proceedings of the 31st International Conference on Neural Information Processing Systems (NIPS)},\n  pages={5949--5958},\n  year={2017},\n  %http://papers.nips.cc/paper/7176-exploring-generalization-in-deep-learning\n  url_Paper={https://arxiv.org/pdf/1706.08947.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Implicit Regularization in Matrix Factorization.\n \n \n \n \n\n\n \n Gunasekar, S.; Woodworth, B.; Bhojanapalli, S.; Neyshabur, B.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 31st International Conference on Neural Information Processing Systems, pages 6152–6160, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Implicit paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017gunasekarimplicit,\n  title={Implicit Regularization in Matrix Factorization},\n  author={Gunasekar, Suriya and Woodworth, Blake and Bhojanapalli, Srinadh and Neyshabur, Behnam and Srebro, Nathan},\n  booktitle={Proceedings of the 31st International Conference on Neural Information Processing Systems},\n  pages={6152--6160},\n  year={2017},\n  %http://papers.nips.cc/paper/7195-implicit-regularization-in-matrix-factorization\n  url_Paper={https://arxiv.org/pdf/1705.08292.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Marginal Value of Adaptive Gradient Methods in Machine Learning.\n \n \n \n \n\n\n \n Wilson, A. C; Roelofs, R.; Stern, M.; Srebro, N.; and Recht, B.\n\n\n \n\n\n\n In Proceedings of the 31st International Conference on Neural Information Processing Systems, pages 4151–4161, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017wilsonmarginal,\n  title={The Marginal Value of Adaptive Gradient Methods in Machine Learning},\n  author={Wilson, Ashia C and Roelofs, Rebecca and Stern, Mitchell and Srebro, Nati and Recht, Benjamin},\n  booktitle={Proceedings of the 31st International Conference on Neural Information Processing Systems},\n  pages={4151--4161},\n  year={2017},\n  %http://papers.nips.cc/paper/7003-the-marginal-value-of-adaptive-gradient-methods-in-machine-learning\n  url_Paper={https://arxiv.org/pdf/1705.08292.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sketching Meets Random Projection in the Dual: A Provable Recovery Algorithm for Big and High-dimensional Data.\n \n \n \n \n\n\n \n Wang, J.; Lee, J.; Mahdavi, M.; Kolar, M.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 20th International Conference on Artifical Intelligence and Statistics (AISTATS), volume PMLR 54, pages 1150–1158, 2017. \n \n\nThis was also published in the Electronic Journal of Statistics\n\n
\n\n\n\n \n \n \"Sketching paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017wangsketching,\n  title={Sketching Meets Random Projection in the Dual: A Provable Recovery Algorithm for Big and High-dimensional Data},\n  author={Wang, Jialei and Lee, Jason and Mahdavi, Mehrdad and Kolar, Mladen and Srebro, Nathan},\n  booktitle={Proceedings of the 20th International Conference on Artifical Intelligence and Statistics (AISTATS)},\n  volume={PMLR 54},\n  pages={1150--1158},\n  year={2017},\n  bibbase_note={This was also published in the Electronic Journal of Statistics},\n  url_Paper={http://proceedings.mlr.press/v54/wang17d}\n}%%%\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sketching Meets Random Projection in the Dual: A Provable Recovery Algorithm for Big and High-dimensional Data.\n \n \n \n \n\n\n \n Wang, J.; Lee, J.; Mahdavi, M.; Kolar, M.; and Srebro, N.\n\n\n \n\n\n\n Electronic Journal of Statistics, 11(2): 4896–4944. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Sketching paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2017wangsketching2,\n  title={Sketching Meets Random Projection in the Dual: A Provable Recovery Algorithm for Big and High-dimensional Data},\n  author={Wang, Jialei and Lee, Jason and Mahdavi, Mehrdad and Kolar, Mladen and Srebro, Nathan},\n  journal={Electronic Journal of Statistics},\n  volume={11},\n  number={2},\n  pages={4896--4944},\n  year={2017},\n  publisher={The Institute of Mathematical Statistics and the Bernoulli Society},\n  %https://projecteuclid.org/euclid.ejs/1513306863\n  url_Paper={https://arxiv.org/pdf/1610.03045.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Lower Bound for Randomized First Order Convex Optimization.\n \n \n \n \n\n\n \n Woodworth, B.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Lower paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2017woodworthlower,\n  title={Lower Bound for Randomized First Order Convex Optimization},\n  author={Woodworth, Blake and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2017},\n  url_Paper={https://arxiv.org/pdf/1709.03594.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Communication-efficient Algorithms for Distributed Stochastic Principal Component Analysis.\n \n \n \n \n\n\n \n Garber, D.; Shamir, O.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 34th International Conference on Machine Learning (ICML), volume PMLR 70, pages 1203–1212, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Communication-efficient paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017garbercommunication,\n  title={Communication-efficient Algorithms for Distributed Stochastic Principal Component Analysis},\n  author={Garber, Dan and Shamir, Ohad and Srebro, Nathan},\n  booktitle={Proceedings of the 34th International Conference on Machine Learning (ICML)},\n  volume={PMLR 70},\n  pages={1203--1212},\n  year={2017},\n  %http://proceedings.mlr.press/v70/garber17a.html\n  url_Paper={https://arxiv.org/pdf/1702.08169.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient Distributed Learning with Sparsity.\n \n \n \n \n\n\n \n Wang, J.; Kolar, M.; Srebro, N.; and Zhang, T.\n\n\n \n\n\n\n In Proceedings of the 34th International Conference on Machine Learning (ICML), volume PMLR 70, pages 3636–3645, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Efficient paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017wangefficient,\n  title={Efficient Distributed Learning with Sparsity},\n  author={Wang, Jialei and Kolar, Mladen and Srebro, Nathan and Zhang, Tong},\n  booktitle={Proceedings of the 34th International Conference on Machine Learning (ICML)},\n  volume={PMLR 70},\n  pages={3636--3645},\n  year={2017},\n  %http://proceedings.mlr.press/v70/wang17f.html\n  url_Paper={https://arxiv.org/pdf/1605.07991.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Geometry of Optimization and Implicit Regularization in Deep Learning.\n \n \n \n \n\n\n \n Neyshabur, B.; Tomioka, R.; Salakhutdinov, R.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Geometry paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{neyshabur2017geometry,\n  title={Geometry of Optimization and Implicit Regularization in Deep Learning},\n  author={Neyshabur, Behnam and Tomioka, Ryota and Salakhutdinov, Ruslan and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2017},\n  url_Paper={https://arxiv.org/pdf/1705.03071.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Memory and Communication Efficient Distributed Stochastic Optimization with Minibatch-Prox.\n \n \n \n \n\n\n \n Wang, J.; Wang, W.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th Conference on Learning Theory (COLT), volume PMLR 65, pages 1882–1919, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Memory paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017wangmemory,\n  title={Memory and Communication Efficient Distributed Stochastic Optimization with Minibatch-Prox},\n  author={Wang, Jialei and Wang, Weiran and Srebro, Nathan},\n  booktitle={Proceedings of the 30th Conference on Learning Theory (COLT)},\n  volume={PMLR 65},\n  pages={1882--1919},\n  year={2017},\n  %http://proceedings.mlr.press/v65/wang17a.html\n  url_Paper={https://arxiv.org/pdf/1702.06269.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Non-discriminatory Predictors.\n \n \n \n \n\n\n \n Woodworth, B.; Gunasekar, S.; Ohannessian, M.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th Conference on Learning Theory (COLT), volume PMLR 65, pages 1920–1953, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Learning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017woodworthlearning,\n  title={Learning Non-discriminatory Predictors},\n  author={Woodworth, Blake and Gunasekar, Suriya and Ohannessian, Mesrob and Srebro, Nathan},\n  booktitle={Proceedings of the 30th Conference on Learning Theory (COLT)},\n  volume={PMLR 65},\n  pages={1920--1953},\n  year={2017},\n  %http://proceedings.mlr.press/v65/woodworth17a.html\n  url_Paper={https://arxiv.org/pdf/1702.06081.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data-dependent Convergence for Consensus Stochastic Optimization.\n \n \n \n \n\n\n \n Bijral, A.; Sarwate, A.; and Srebro, N.\n\n\n \n\n\n\n IEEE Transactions on Automatic Control, 62(9): 4483–4498. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Data-dependent paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2017bijraldata,\n  title={Data-dependent Convergence for Consensus Stochastic Optimization},\n  author={Bijral, Avleen and Sarwate, Anand and Srebro, Nathan},\n  journal={IEEE Transactions on Automatic Control},\n  volume={62},\n  number={9},\n  pages={4483--4498},\n  year={2017},\n  publisher={IEEE},\n  url_Paper={https://ieeexplore.ieee.org/abstract/document/7858743}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Approximation for Canonical Correlation Analysis.\n \n \n \n \n\n\n \n Arora, R.; Marinov, T.; Mianjy, P.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 31st International Conference on Neural Information Processing Systems (NIPS), pages 4778–4787, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017arorastochastic,\n  title={Stochastic Approximation for Canonical Correlation Analysis},\n  author={Arora, Raman and Marinov, Teodor and Mianjy, Poorya and Srebro, Nathan},\n  booktitle={Proceedings of the 31st International Conference on Neural Information Processing Systems (NIPS)},\n  pages={4778--4787},\n  year={2017},\n  url_Paper={http://papers.nips.cc/paper/7063-stochastic-approximation-for-canonical-correlation-analysis}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring Generalization in Deep Learning.\n \n \n \n \n\n\n \n Neyshabur, B.; Bhojanapalli, S.; McAllester, D.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 31st International Conference on Neural Information Processing Systems (NIPS), pages 5949–5958, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Exploring paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017neyshaburexploring,\n  title={Exploring Generalization in Deep Learning},\n  author={Neyshabur, Behnam and Bhojanapalli, Srinadh and McAllester, David and Srebro, Nathan},\n  booktitle={Proceedings of the 31st International Conference on Neural Information Processing Systems (NIPS)},\n  pages={5949--5958},\n  year={2017},\n  %http://papers.nips.cc/paper/7176-exploring-generalization-in-deep-learning\n  url_Paper={https://arxiv.org/pdf/1706.08947.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Implicit Regularization in Matrix Factorization.\n \n \n \n \n\n\n \n Gunasekar, S.; Woodworth, B.; Bhojanapalli, S.; Neyshabur, B.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 31st International Conference on Neural Information Processing Systems, pages 6152–6160, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"Implicit paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017gunasekarimplicit,\n  title={Implicit Regularization in Matrix Factorization},\n  author={Gunasekar, Suriya and Woodworth, Blake and Bhojanapalli, Srinadh and Neyshabur, Behnam and Srebro, Nathan},\n  booktitle={Proceedings of the 31st International Conference on Neural Information Processing Systems},\n  pages={6152--6160},\n  year={2017},\n  %http://papers.nips.cc/paper/7195-implicit-regularization-in-matrix-factorization\n  url_Paper={https://arxiv.org/pdf/1705.08292.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Marginal Value of Adaptive Gradient Methods in Machine Learning.\n \n \n \n \n\n\n \n Wilson, A. C; Roelofs, R.; Stern, M.; Srebro, N.; and Recht, B.\n\n\n \n\n\n\n In Proceedings of the 31st International Conference on Neural Information Processing Systems, pages 4151–4161, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017wilsonmarginal,\n  title={The Marginal Value of Adaptive Gradient Methods in Machine Learning},\n  author={Wilson, Ashia C and Roelofs, Rebecca and Stern, Mitchell and Srebro, Nati and Recht, Benjamin},\n  booktitle={Proceedings of the 31st International Conference on Neural Information Processing Systems},\n  pages={4151--4161},\n  year={2017},\n  %http://papers.nips.cc/paper/7003-the-marginal-value-of-adaptive-gradient-methods-in-machine-learning\n  url_Paper={https://arxiv.org/pdf/1705.08292.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sketching Meets Random Projection in the Dual: A Provable Recovery Algorithm for Big and High-dimensional Data.\n \n \n \n \n\n\n \n Wang, J.; Lee, J.; Mahdavi, M.; Kolar, M.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 20th International Conference on Artifical Intelligence and Statistics (AISTATS), volume PMLR 54, pages 1150–1158, 2017. \n \n\n\\newline This was also published in the Electronic Journal of Statistics\n\n
\n\n\n\n \n \n \"Sketching paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2017wangsketching,\n  title={Sketching Meets Random Projection in the Dual: A Provable Recovery Algorithm for Big and High-dimensional Data},\n  author={Wang, Jialei and Lee, Jason and Mahdavi, Mehrdad and Kolar, Mladen and Srebro, Nathan},\n  booktitle={Proceedings of the 20th International Conference on Artifical Intelligence and Statistics (AISTATS)},\n  volume={PMLR 54},\n  pages={1150--1158},\n  year={2017},\n  bibbase_note={\\newline This was also published in the Electronic Journal of Statistics},\n  url_Paper={http://proceedings.mlr.press/v54/wang17d}\n}%%%\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sketching Meets Random Projection in the Dual: A Provable Recovery Algorithm for Big and High-dimensional Data.\n \n \n \n \n\n\n \n Wang, J.; Lee, J.; Mahdavi, M.; Kolar, M.; and Srebro, N.\n\n\n \n\n\n\n Electronic Journal of Statistics, 11(2): 4896–4944. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Sketching paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2017wangsketching2,\n  title={Sketching Meets Random Projection in the Dual: A Provable Recovery Algorithm for Big and High-dimensional Data},\n  author={Wang, Jialei and Lee, Jason and Mahdavi, Mehrdad and Kolar, Mladen and Srebro, Nathan},\n  journal={Electronic Journal of Statistics},\n  volume={11},\n  number={2},\n  pages={4896--4944},\n  year={2017},\n  publisher={The Institute of Mathematical Statistics and the Bernoulli Society},\n  %https://projecteuclid.org/euclid.ejs/1513306863\n  url_Paper={https://arxiv.org/pdf/1610.03045.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (28)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Data-dependent Bounds on Network Gradient Descent.\n \n \n \n \n\n\n \n Bijral, A.; Sarwate, A. D; and Srebro, N.\n\n\n \n\n\n\n In 2016 54th Annual Allerton Conference on Communication, Control, and Computing (Allerton), pages 869–874, 2016. Institute of Electrical and Electronics Engineers (IEEE)\n \n\n\n\n
\n\n\n\n \n \n \"Data-dependent paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016bijraldata,\n  title={Data-dependent Bounds on Network Gradient Descent},\n  author={Bijral, Avleen and Sarwate, Anand D and Srebro, Nathan},\n  booktitle={2016 54th Annual Allerton Conference on Communication, Control, and Computing (Allerton)},\n  pages={869--874},\n  year={2016},\n  organization={Institute of Electrical and Electronics Engineers (IEEE)},\n  url_Paper={https://ieeexplore.ieee.org/abstract/document/7852325}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data Dependent Convergence for Distributed Stochastic Optimization.\n \n \n \n \n\n\n \n Bijral, A.\n\n\n \n\n\n\n Ph.D. Thesis, Toyota Technological Institute at Chicago, 2016.\n Committee Members: Nathan Srebro, Anand Sarwate, Greg Shakhnarovich, Lek-Heng Lim\n\n\n\n
\n\n\n\n \n \n \"Data paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{2016bijraldatad,\n  title={Data Dependent Convergence for Distributed Stochastic Optimization},\n  author={Bijral, Avleen},\n  school={Toyota Technological Institute at Chicago},\n  year={2016},\n  note={Committee Members: Nathan Srebro, Anand Sarwate, Greg Shakhnarovich, Lek-Heng Lim},\n  url_Paper={https://arxiv.org/pdf/1608.08337.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast and Scalable Structural SVM with Slack Rescaling.\n \n \n \n \n\n\n \n Choi, H.; Meshi, O.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 19th International Conference on Artifical Intelligence and Statistics (AISTATS), volume PMLR 51, pages 667–675, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Fast paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016choifast,\n  title={Fast and Scalable Structural SVM with Slack Rescaling},\n  author={Choi, Heejin and Meshi, Ofer and Srebro, Nathan},\n  booktitle={Proceedings of the 19th International Conference on Artifical Intelligence and Statistics (AISTATS)},\n  volume={PMLR 51},\n  pages={667--675},\n  year={2016},\n  %http://proceedings.mlr.press/v51/choi16.html\n  url_Paper={https://arxiv.org/pdf/1510.06002.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Multi-task Learning.\n \n \n \n \n\n\n \n Wang, J.; Kolar, M.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 19th International Conference on Artificial Intelligence and Statistics (AISTATS), volume PMLR 51, pages 751–760, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Distributed paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016wangdistributed,\n  title={Distributed Multi-task Learning},\n  author={Wang, Jialei and Kolar, Mladen and Srebro, Nathan},\n  booktitle={Proceedings of the 19th International Conference on Artificial Intelligence and Statistics (AISTATS)},\n  volume={PMLR 51},\n  pages={751--760},\n  year={2016},\n  %http://proceedings.mlr.press/v51/wang16d.html\n  url_Paper={https://arxiv.org/pdf/1510.00633.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On Data Dependence in Distributed Stochastic Optimization.\n \n \n \n \n\n\n \n Bijral, A.; Sarwate, A.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2016.\n The newest version on arXiv was published in 2018\n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2016bijraldatao,\n  title={On Data Dependence in Distributed Stochastic Optimization},\n  author={Bijral, Avleen and Sarwate, Anand and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2016},\n  note={The newest version on arXiv was published in 2018},\n  url_Paper={https://arxiv.org/pdf/1603.04379.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Multi-task Learning with Shared Representation.\n \n \n \n \n\n\n \n Wang, J.; Kolar, M.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"Distributed paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2016wangdistributedsr,\n  title={Distributed Multi-task Learning with Shared Representation},\n  author={Wang, Jialei and Kolar, Mladen and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2016},\n  url_Paper={https://arxiv.org/pdf/1603.02185.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reducing Runtime by Recycling Samples.\n \n \n \n \n\n\n \n Wang, J.; Wang, H.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"Reducing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2016wangreducing,\n  title={Reducing Runtime by Recycling Samples},\n  author={Wang, Jialei and Wang, Hai and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2016},\n  url_Paper={https://arxiv.org/pdf/1602.02136.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Gradient Descent, Weighted Sampling, and the Randomized Kaczmarz Algorithm.\n \n \n \n \n\n\n \n Needell, D.; Srebro, N.; and Ward, R.\n\n\n \n\n\n\n Mathematical Programming, 155(1-2): 549–573. 2016.\n \n\nAn earlier version appeared in the 27th International Conference of Neural Information Processing Systems\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2016needellstochastic,\n  title={Stochastic Gradient Descent, Weighted Sampling, and the Randomized Kaczmarz Algorithm},\n  author={Needell, Deanna and Srebro, Nathan and Ward, Rachel},\n  journal={Mathematical Programming},\n  volume={155},\n  number={1-2},\n  pages={549--573},\n  year={2016},\n  publisher={Springer},\n  bibbase_note={An earlier version appeared in the 27th International Conference of Neural Information Processing Systems},\n  url_Paper={https://link.springer.com/article/10.1007/s10107-015-0864-7}\n}%%%\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Normalized Spectral Map Synchronization.\n \n \n \n \n\n\n \n Shen, Y.; Huang, Q.; Srebro, N.; and Sanghavi, S.\n\n\n \n\n\n\n In Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS), pages 4932–4940, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Normalized paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016shennormalized,\n  title={Normalized Spectral Map Synchronization},\n  author={Shen, Yanyao and Huang, Qixing and Srebro, Nathan and Sanghavi, Sujay},\n  booktitle={Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={4932--4940},\n  year={2016},\n  url_Paper={http://papers.nips.cc/paper/6128-normalized-spectral-map-synchronization}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Equality of Opportunity in Supervised Learning.\n \n \n \n \n\n\n \n Hardt, M.; Price, E.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS), pages 3323–3331, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Equality paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016hardtequality,\n  title={Equality of Opportunity in Supervised Learning},\n  author={Hardt, Moritz and Price, Eric and Srebro, Nathan},\n  booktitle={Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={3323--3331},\n  year={2016},\n  %http://papers.nips.cc/paper/6373-equality-of-opportunity-in-supervised-learning\n  url_Paper={https://arxiv.org/pdf/1610.02413.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient Globally Convergent Stochastic Optimization for Canonical Correlation Analysis.\n \n \n \n \n\n\n \n Wang, W.; Wang, J.; Garber, D.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS), pages 766–774, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Efficient paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016wangefficient,\n  title={Efficient Globally Convergent Stochastic Optimization for Canonical Correlation Analysis},\n  author={Wang, Weiran and Wang, Jialei and Garber, Dan and Srebro, Nathan},\n  booktitle={Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={766--774},\n  year={2016},\n  %http://papers.nips.cc/paper/6459-efficient-globally-convergent-stochastic-optimization-for-canonical-correlation-analysis\n  url_Paper={https://arxiv.org/pdf/1604.01870.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tight Complexity Bounds for Optimizing Composite Objectives.\n \n \n \n \n\n\n \n Woodworth, B.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS), pages 3646–3654, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Tight paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016woodworthtight,\n  title={Tight Complexity Bounds for Optimizing Composite Objectives},\n  author={Woodworth, Blake and Srebro, Nathan},\n  booktitle={Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={3646--3654},\n  year={2016},\n  %http://papers.nips.cc/paper/6058-tight-complexity-bounds-for-optimizing-composite-objectives\n  url_Paper={https://arxiv.org/pdf/1605.08003.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Path-normalized Optimization of Recurrent Neural Networks with ReLU Activations.\n \n \n \n \n\n\n \n Neyshabur, B.; Wu, Y.; Salakhutdinov, R.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS), pages 3485–3493, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Path-normalized paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016neyshaburpath,\n  title={Path-normalized Optimization of Recurrent Neural Networks with ReLU Activations},\n  author={Neyshabur, Behnam and Wu, Yuhuai and Salakhutdinov, Ruslan and Srebro, Nathan},\n  booktitle={Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={3485--3493},\n  year={2016},\n  %http://papers.nips.cc/paper/6213-path-normalized-optimization-of-recurrent-neural-networks-with-relu-activations\n  url_Paper={https://arxiv.org/pdf/1605.07154.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Global Optimality of Local Search for Low Rank Matrix Recovery.\n \n \n \n \n\n\n \n Bhojanapalli, S.; Neyshabur, B.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS), pages 3880–3888, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Global paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016bhojanapalliglobal,\n  title={Global Optimality of Local Search for Low Rank Matrix Recovery},\n  author={Bhojanapalli, Srinadh and Neyshabur, Behnam and Srebro, Nathan},\n  booktitle={Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={3880--3888},\n  year={2016},\n  %http://papers.nips.cc/paper/6270-global-optimality-of-local-search-for-low-rank-matrix-recovery\n  url_Paper={https://arxiv.org/pdf/1605.07221.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data-dependent Bounds on Network Gradient Descent.\n \n \n \n \n\n\n \n Bijral, A.; Sarwate, A. D; and Srebro, N.\n\n\n \n\n\n\n In 2016 54th Annual Allerton Conference on Communication, Control, and Computing (Allerton), pages 869–874, 2016. Institute of Electrical and Electronics Engineers (IEEE)\n \n\n\n\n
\n\n\n\n \n \n \"Data-dependent paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016bijraldata,\n  title={Data-dependent Bounds on Network Gradient Descent},\n  author={Bijral, Avleen and Sarwate, Anand D and Srebro, Nathan},\n  booktitle={2016 54th Annual Allerton Conference on Communication, Control, and Computing (Allerton)},\n  pages={869--874},\n  year={2016},\n  organization={Institute of Electrical and Electronics Engineers (IEEE)},\n  url_Paper={https://ieeexplore.ieee.org/abstract/document/7852325}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data Dependent Convergence for Distributed Stochastic Optimization.\n \n \n \n \n\n\n \n Bijral, A.\n\n\n \n\n\n\n Ph.D. Thesis, Toyota Technological Institute at Chicago, 2016.\n Committee Members: Nathan Srebro, Anand Sarwate, Greg Shakhnarovich, Lek-Heng Lim\n\n\n\n
\n\n\n\n \n \n \"Data paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{2016bijraldatad,\n  title={Data Dependent Convergence for Distributed Stochastic Optimization},\n  author={Bijral, Avleen},\n  school={Toyota Technological Institute at Chicago},\n  year={2016},\n  note={Committee Members: Nathan Srebro, Anand Sarwate, Greg Shakhnarovich, Lek-Heng Lim},\n  url_Paper={https://arxiv.org/pdf/1608.08337.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast and Scalable Structural SVM with Slack Rescaling.\n \n \n \n \n\n\n \n Choi, H.; Meshi, O.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 19th International Conference on Artifical Intelligence and Statistics (AISTATS), volume PMLR 51, pages 667–675, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Fast paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016choifast,\n  title={Fast and Scalable Structural SVM with Slack Rescaling},\n  author={Choi, Heejin and Meshi, Ofer and Srebro, Nathan},\n  booktitle={Proceedings of the 19th International Conference on Artifical Intelligence and Statistics (AISTATS)},\n  volume={PMLR 51},\n  pages={667--675},\n  year={2016},\n  %http://proceedings.mlr.press/v51/choi16.html\n  url_Paper={https://arxiv.org/pdf/1510.06002.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Multi-task Learning.\n \n \n \n \n\n\n \n Wang, J.; Kolar, M.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 19th International Conference on Artificial Intelligence and Statistics (AISTATS), volume PMLR 51, pages 751–760, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Distributed paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016wangdistributed,\n  title={Distributed Multi-task Learning},\n  author={Wang, Jialei and Kolar, Mladen and Srebro, Nathan},\n  booktitle={Proceedings of the 19th International Conference on Artificial Intelligence and Statistics (AISTATS)},\n  volume={PMLR 51},\n  pages={751--760},\n  year={2016},\n  %http://proceedings.mlr.press/v51/wang16d.html\n  url_Paper={https://arxiv.org/pdf/1510.00633.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On Data Dependence in Distributed Stochastic Optimization.\n \n \n \n \n\n\n \n Bijral, A.; Sarwate, A.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2016.\n The newest version on arXiv was published in 2018\n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2016bijraldatao,\n  title={On Data Dependence in Distributed Stochastic Optimization},\n  author={Bijral, Avleen and Sarwate, Anand and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2016},\n  note={The newest version on arXiv was published in 2018},\n  url_Paper={https://arxiv.org/pdf/1603.04379.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Multi-task Learning with Shared Representation.\n \n \n \n \n\n\n \n Wang, J.; Kolar, M.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"Distributed paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2016wangdistributedsr,\n  title={Distributed Multi-task Learning with Shared Representation},\n  author={Wang, Jialei and Kolar, Mladen and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2016},\n  url_Paper={https://arxiv.org/pdf/1603.02185.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reducing Runtime by Recycling Samples.\n \n \n \n \n\n\n \n Wang, J.; Wang, H.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"Reducing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2016wangreducing,\n  title={Reducing Runtime by Recycling Samples},\n  author={Wang, Jialei and Wang, Hai and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2016},\n  url_Paper={https://arxiv.org/pdf/1602.02136.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Gradient Descent, Weighted Sampling, and the Randomized Kaczmarz Algorithm.\n \n \n \n \n\n\n \n Needell, D.; Srebro, N.; and Ward, R.\n\n\n \n\n\n\n Mathematical Programming, 155(1-2): 549–573. 2016.\n \n\n\\newline An earlier version appeared in the 27th International Conference of Neural Information Processing Systems\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2016needellstochastic,\n  title={Stochastic Gradient Descent, Weighted Sampling, and the Randomized Kaczmarz Algorithm},\n  author={Needell, Deanna and Srebro, Nathan and Ward, Rachel},\n  journal={Mathematical Programming},\n  volume={155},\n  number={1-2},\n  pages={549--573},\n  year={2016},\n  publisher={Springer},\n  bibbase_note={\\newline An earlier version appeared in the 27th International Conference of Neural Information Processing Systems},\n  url_Paper={https://link.springer.com/article/10.1007/s10107-015-0864-7}\n}%%%\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Normalized Spectral Map Synchronization.\n \n \n \n \n\n\n \n Shen, Y.; Huang, Q.; Srebro, N.; and Sanghavi, S.\n\n\n \n\n\n\n In Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS), pages 4932–4940, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Normalized paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016shennormalized,\n  title={Normalized Spectral Map Synchronization},\n  author={Shen, Yanyao and Huang, Qixing and Srebro, Nathan and Sanghavi, Sujay},\n  booktitle={Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={4932--4940},\n  year={2016},\n  url_Paper={http://papers.nips.cc/paper/6128-normalized-spectral-map-synchronization}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Equality of Opportunity in Supervised Learning.\n \n \n \n \n\n\n \n Hardt, M.; Price, E.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS), pages 3323–3331, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Equality paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016hardtequality,\n  title={Equality of Opportunity in Supervised Learning},\n  author={Hardt, Moritz and Price, Eric and Srebro, Nathan},\n  booktitle={Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={3323--3331},\n  year={2016},\n  %http://papers.nips.cc/paper/6373-equality-of-opportunity-in-supervised-learning\n  url_Paper={https://arxiv.org/pdf/1610.02413.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient Globally Convergent Stochastic Optimization for Canonical Correlation Analysis.\n \n \n \n \n\n\n \n Wang, W.; Wang, J.; Garber, D.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS), pages 766–774, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Efficient paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016wangefficient,\n  title={Efficient Globally Convergent Stochastic Optimization for Canonical Correlation Analysis},\n  author={Wang, Weiran and Wang, Jialei and Garber, Dan and Srebro, Nathan},\n  booktitle={Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={766--774},\n  year={2016},\n  %http://papers.nips.cc/paper/6459-efficient-globally-convergent-stochastic-optimization-for-canonical-correlation-analysis\n  url_Paper={https://arxiv.org/pdf/1604.01870.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tight Complexity Bounds for Optimizing Composite Objectives.\n \n \n \n \n\n\n \n Woodworth, B.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS), pages 3646–3654, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Tight paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016woodworthtight,\n  title={Tight Complexity Bounds for Optimizing Composite Objectives},\n  author={Woodworth, Blake and Srebro, Nathan},\n  booktitle={Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={3646--3654},\n  year={2016},\n  %http://papers.nips.cc/paper/6058-tight-complexity-bounds-for-optimizing-composite-objectives\n  url_Paper={https://arxiv.org/pdf/1605.08003.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Path-normalized Optimization of Recurrent Neural Networks with ReLU Activations.\n \n \n \n \n\n\n \n Neyshabur, B.; Wu, Y.; Salakhutdinov, R.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS), pages 3485–3493, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Path-normalized paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016neyshaburpath,\n  title={Path-normalized Optimization of Recurrent Neural Networks with ReLU Activations},\n  author={Neyshabur, Behnam and Wu, Yuhuai and Salakhutdinov, Ruslan and Srebro, Nathan},\n  booktitle={Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={3485--3493},\n  year={2016},\n  %http://papers.nips.cc/paper/6213-path-normalized-optimization-of-recurrent-neural-networks-with-relu-activations\n  url_Paper={https://arxiv.org/pdf/1605.07154.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Global Optimality of Local Search for Low Rank Matrix Recovery.\n \n \n \n \n\n\n \n Bhojanapalli, S.; Neyshabur, B.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS), pages 3880–3888, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Global paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2016bhojanapalliglobal,\n  title={Global Optimality of Local Search for Low Rank Matrix Recovery},\n  author={Bhojanapalli, Srinadh and Neyshabur, Behnam and Srebro, Nathan},\n  booktitle={Proceedings of the 30th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={3880--3888},\n  year={2016},\n  %http://papers.nips.cc/paper/6270-global-optimality-of-local-search-for-low-rank-matrix-recovery\n  url_Paper={https://arxiv.org/pdf/1605.07221.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (18)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Data-dependent Path Normalization in Neural Networks.\n \n \n \n \n\n\n \n Neyshabur, B.; Tomioka, R.; Salakhutdinov, R.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"Data-dependent paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2015neyshaburdata,\n  title={Data-dependent Path Normalization in Neural Networks},\n  author={Neyshabur, Behnam and Tomioka, Ryota and Salakhutdinov, Ruslan and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2015},\n  url_Paper={https://arxiv.org/pdf/1511.06747.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Optimization for Deep CCA via Nonlinear Orthogonal Iterations.\n \n \n \n \n\n\n \n Wang, W.; Arora, R.; Livescu, K.; and Srebro, N.\n\n\n \n\n\n\n In 2015 53rd Annual Allerton Conference on Communication, Control, and Computing (Allerton), pages 688–695, 2015. Institute of Electrical and Electronics Engineers (IEEE)\n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2015wangstochastic,\n  title={Stochastic Optimization for Deep CCA via Nonlinear Orthogonal Iterations},\n  author={Wang, Weiran and Arora, Raman and Livescu, Karen and Srebro, Nathan},\n  booktitle={2015 53rd Annual Allerton Conference on Communication, Control, and Computing (Allerton)},\n  pages={688--695},\n  year={2015},\n  organization={Institute of Electrical and Electronics Engineers (IEEE)},\n  %https://ieeexplore.ieee.org/abstract/document/7447071\n  url_Paper={https://arxiv.org/pdf/1510.02054.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Normalized Hierarchical SVM.\n \n \n \n \n\n\n \n Choi, H.; Sasaki, Y.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"Normalized paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2015choinormalized,\n  title={Normalized Hierarchical SVM},\n  author={Choi, Heejin and Sasaki, Yutaka and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2015},\n  url_Paper={https://arxiv.org/pdf/1508.02479.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Mini-batch SDCA.\n \n \n \n \n\n\n \n Takáč, M.; Richtárik, P.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"Distributed paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2015takacdistributed,\n  title={Distributed Mini-batch SDCA},\n  author={Tak{\\'a}{\\v{c}}, Martin and Richt{\\'a}rik, Peter and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2015},\n  url_Paper={https://arxiv.org/pdf/1507.08322.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Norm-based Capacity Control in Neural Networks.\n \n \n \n \n\n\n \n Neyshabur, B.; Tomioka, R.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 28th Conference on Learning Theory (COLT), volume PMLR 40, pages 1376–1401, 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Norm-based paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2015neyshaburnorm,\n  title={Norm-based Capacity Control in Neural Networks},\n  author={Neyshabur, Behnam and Tomioka, Ryota and Srebro, Nathan},\n  booktitle={Proceedings of the 28th Conference on Learning Theory (COLT)},\n  volume={PMLR 40},\n  pages={1376--1401},\n  year={2015},\n  %http://proceedings.mlr.press/v40/Neyshabur15.html\n  url_Paper={https://arxiv.org/pdf/1503.00036.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient Training of Structured SVMs via Soft Constraints.\n \n \n \n \n\n\n \n Meshi, O.; Srebro, N.; and Hazan, T.\n\n\n \n\n\n\n In Proceedings of the 18th International Conference on Artificial Intelligent and Statistics (AISTATS), volume PMLR 38, pages 699–707, 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Efficient paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2015meshiefficient,\n  title={Efficient Training of Structured SVMs via Soft Constraints},\n  author={Meshi, Ofer and Srebro, Nathan and Hazan, Tamir},\n  booktitle={Proceedings of the 18th International Conference on Artificial Intelligent and Statistics (AISTATS)},\n  volume={PMLR 38},\n  pages={699--707},\n  year={2015},\n  url_Paper={http://proceedings.mlr.press/v38/meshi15.html}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Path-SGD: Path-Normalized Optimization in Deep Neural Networks.\n \n \n \n \n\n\n \n Neyshabur, B.; Salakhutdinov, R.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 28th International Conference on Neural Information Processing Systems (NIPS), volume 2, pages 2422–2430, 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Path-SGD: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2015neyshaburpath,\n  title={Path-SGD: Path-Normalized Optimization in Deep Neural Networks},\n  author={Neyshabur, Behnam and Salakhutdinov, Ruslan and Srebro, Nathan},\n  booktitle={Proceedings of the 28th International Conference on Neural Information Processing Systems (NIPS)},\n  volume={2},\n  pages={2422--2430},\n  year={2015},\n  %http://papers.nips.cc/paper/5797-path-sgd-path-normalized-optimization-in-deep-neural-networks\n  url_Paper={https://arxiv.org/pdf/1506.02617.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On Symmetric and Asymmetric LSHs for Inner Product Search.\n \n \n \n \n\n\n \n Neyshabur, B.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 32nd International Conference on Machine Learning (ICML), volume PMLR 37, pages 1926–1934, 2015. \n \n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2015neyshabursymmetric,\n  title={On Symmetric and Asymmetric LSHs for Inner Product Search},\n  author={Neyshabur, Behnam and Srebro, Nathan},\n  booktitle={Proceedings of the 32nd International Conference on Machine Learning (ICML)},\n  volume={PMLR 37},\n  pages={1926--1934},\n  year={2015},\n  %http://proceedings.mlr.press/v37/neyshabur15.html\n  url_Paper={https://arxiv.org/pdf/1410.5518.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Sparse Low-threshold Linear Classifiers.\n \n \n \n \n\n\n \n Sabato, S.; Shalev-Shwartz, S.; Srebro, N.; Hsu, D.; and Zhang, T.\n\n\n \n\n\n\n Journal of Machine Learning Research, 16(38): 1275–1304. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"Learning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2015sabatolearning,\n  title={Learning Sparse Low-threshold Linear Classifiers},\n  author={Sabato, Sivan and Shalev-Shwartz, Shai and Srebro, Nathan and Hsu, Daniel and Zhang, Tong},\n  journal={Journal of Machine Learning Research},\n  volume={16},\n  number={38},\n  pages={1275--1304},\n  year={2015},\n  editor={Crammer, Koby},\n  %http://www.jmlr.org/papers/v16/sabato15a.html\n  url_Paper={https://arxiv.org/pdf/1212.3276.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data-dependent Path Normalization in Neural Networks.\n \n \n \n \n\n\n \n Neyshabur, B.; Tomioka, R.; Salakhutdinov, R.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"Data-dependent paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2015neyshaburdata,\n  title={Data-dependent Path Normalization in Neural Networks},\n  author={Neyshabur, Behnam and Tomioka, Ryota and Salakhutdinov, Ruslan and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2015},\n  url_Paper={https://arxiv.org/pdf/1511.06747.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Optimization for Deep CCA via Nonlinear Orthogonal Iterations.\n \n \n \n \n\n\n \n Wang, W.; Arora, R.; Livescu, K.; and Srebro, N.\n\n\n \n\n\n\n In 2015 53rd Annual Allerton Conference on Communication, Control, and Computing (Allerton), pages 688–695, 2015. Institute of Electrical and Electronics Engineers (IEEE)\n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2015wangstochastic,\n  title={Stochastic Optimization for Deep CCA via Nonlinear Orthogonal Iterations},\n  author={Wang, Weiran and Arora, Raman and Livescu, Karen and Srebro, Nathan},\n  booktitle={2015 53rd Annual Allerton Conference on Communication, Control, and Computing (Allerton)},\n  pages={688--695},\n  year={2015},\n  organization={Institute of Electrical and Electronics Engineers (IEEE)},\n  %https://ieeexplore.ieee.org/abstract/document/7447071\n  url_Paper={https://arxiv.org/pdf/1510.02054.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Normalized Hierarchical SVM.\n \n \n \n \n\n\n \n Choi, H.; Sasaki, Y.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"Normalized paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2015choinormalized,\n  title={Normalized Hierarchical SVM},\n  author={Choi, Heejin and Sasaki, Yutaka and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2015},\n  url_Paper={https://arxiv.org/pdf/1508.02479.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Mini-batch SDCA.\n \n \n \n \n\n\n \n Takáč, M.; Richtárik, P.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"Distributed paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2015takacdistributed,\n  title={Distributed Mini-batch SDCA},\n  author={Tak{\\'a}{\\v{c}}, Martin and Richt{\\'a}rik, Peter and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2015},\n  url_Paper={https://arxiv.org/pdf/1507.08322.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Norm-based Capacity Control in Neural Networks.\n \n \n \n \n\n\n \n Neyshabur, B.; Tomioka, R.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 28th Conference on Learning Theory (COLT), volume PMLR 40, pages 1376–1401, 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Norm-based paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2015neyshaburnorm,\n  title={Norm-based Capacity Control in Neural Networks},\n  author={Neyshabur, Behnam and Tomioka, Ryota and Srebro, Nathan},\n  booktitle={Proceedings of the 28th Conference on Learning Theory (COLT)},\n  volume={PMLR 40},\n  pages={1376--1401},\n  year={2015},\n  %http://proceedings.mlr.press/v40/Neyshabur15.html\n  url_Paper={https://arxiv.org/pdf/1503.00036.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient Training of Structured SVMs via Soft Constraints.\n \n \n \n \n\n\n \n Meshi, O.; Srebro, N.; and Hazan, T.\n\n\n \n\n\n\n In Proceedings of the 18th International Conference on Artificial Intelligent and Statistics (AISTATS), volume PMLR 38, pages 699–707, 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Efficient paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2015meshiefficient,\n  title={Efficient Training of Structured SVMs via Soft Constraints},\n  author={Meshi, Ofer and Srebro, Nathan and Hazan, Tamir},\n  booktitle={Proceedings of the 18th International Conference on Artificial Intelligent and Statistics (AISTATS)},\n  volume={PMLR 38},\n  pages={699--707},\n  year={2015},\n  url_Paper={http://proceedings.mlr.press/v38/meshi15.html}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Path-SGD: Path-Normalized Optimization in Deep Neural Networks.\n \n \n \n \n\n\n \n Neyshabur, B.; Salakhutdinov, R.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 28th International Conference on Neural Information Processing Systems (NIPS), volume 2, pages 2422–2430, 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Path-SGD: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2015neyshaburpath,\n  title={Path-SGD: Path-Normalized Optimization in Deep Neural Networks},\n  author={Neyshabur, Behnam and Salakhutdinov, Ruslan and Srebro, Nathan},\n  booktitle={Proceedings of the 28th International Conference on Neural Information Processing Systems (NIPS)},\n  volume={2},\n  pages={2422--2430},\n  year={2015},\n  %http://papers.nips.cc/paper/5797-path-sgd-path-normalized-optimization-in-deep-neural-networks\n  url_Paper={https://arxiv.org/pdf/1506.02617.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On Symmetric and Asymmetric LSHs for Inner Product Search.\n \n \n \n \n\n\n \n Neyshabur, B.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 32nd International Conference on Machine Learning (ICML), volume PMLR 37, pages 1926–1934, 2015. \n \n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2015neyshabursymmetric,\n  title={On Symmetric and Asymmetric LSHs for Inner Product Search},\n  author={Neyshabur, Behnam and Srebro, Nathan},\n  booktitle={Proceedings of the 32nd International Conference on Machine Learning (ICML)},\n  volume={PMLR 37},\n  pages={1926--1934},\n  year={2015},\n  %http://proceedings.mlr.press/v37/neyshabur15.html\n  url_Paper={https://arxiv.org/pdf/1410.5518.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Sparse Low-threshold Linear Classifiers.\n \n \n \n \n\n\n \n Sabato, S.; Shalev-Shwartz, S.; Srebro, N.; Hsu, D.; and Zhang, T.\n\n\n \n\n\n\n Journal of Machine Learning Research, 16(38): 1275–1304. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"Learning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2015sabatolearning,\n  title={Learning Sparse Low-threshold Linear Classifiers},\n  author={Sabato, Sivan and Shalev-Shwartz, Shai and Srebro, Nathan and Hsu, Daniel and Zhang, Tong},\n  journal={Journal of Machine Learning Research},\n  volume={16},\n  number={38},\n  pages={1275--1304},\n  year={2015},\n  editor={Crammer, Koby},\n  %http://www.jmlr.org/papers/v16/sabato15a.html\n  url_Paper={https://arxiv.org/pdf/1212.3276.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (12)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Stochastic Gradient Descent, Weighted Sampling, and the Randomized Kaczmarz algorithm.\n \n \n \n \n\n\n \n Needell, D.; Srebro, N.; and Ward, R.\n\n\n \n\n\n\n In Proceedings of the 27th International Conference on Neural Information Processing Systems (NIPS), volume 1, pages 1017–1025, 2014. \n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2014needellstochastic,\n  title={Stochastic Gradient Descent, Weighted Sampling, and the Randomized Kaczmarz algorithm},\n  author={Needell, Deanna and Srebro, Nathan and Ward, Rachel},\n  booktitle={Proceedings of the 27th International Conference on Neural Information Processing Systems (NIPS)},\n  volume={1},\n  pages={1017--1025},\n  year={2014},\n  %http://papers.nips.cc/paper/5355-stochastic-gradient-descent-weighted-sampling-and-the-randomized-kaczmarz-algorithm\n  url_Paper={https://arxiv.org/pdf/1310.5715.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n In Search of the Real Inductive Bias: On the Role of Implicit Regularization in Deep Learning.\n \n \n \n \n\n\n \n Neyshabur, B.; Tomioka, R.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2014.\n \n\nAccepted as a workshop contribution at the 3rd International Conference on Learning Representations (ICLR) 2015\n\n
\n\n\n\n \n \n \"In paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2014neyshabursearch,\n  title={In Search of the Real Inductive Bias: On the Role of Implicit Regularization in Deep Learning},\n  author={Neyshabur, Behnam and Tomioka, Ryota and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2014},\n  bibbase_note={Accepted as a workshop contribution at the 3rd International Conference on Learning Representations (ICLR) 2015},\n  url_Paper={https://arxiv.org/pdf/1412.6614.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Clustering, hamming embedding, generalized LSH and the max norm.\n \n \n \n \n\n\n \n Neyshabur, B.; Makarychev, Y.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 25th International Conference on Algorithmic Learning Theory (ALT), pages 306–320, 2014. Springer\n \n\n\n\n
\n\n\n\n \n \n \"Clustering, paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2014neyshaburclustering,\n  title={Clustering, hamming embedding, generalized LSH and the max norm},\n  author={Neyshabur, Behnam and Makarychev, Yury and Srebro, Nathan},\n  booktitle={Proceedings of the 25th International Conference on Algorithmic Learning Theory (ALT)},\n  pages={306--320},\n  year={2014},\n  publisher={Springer},\n  %https://link.springer.com/chapter/10.1007/978-3-319-11662-4_22\n  url_Paper={https://arxiv.org/pdf/1405.3167.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Stochastic Optimization and Learning.\n \n \n \n \n\n\n \n Shamir, O.; and Srebro, N.\n\n\n \n\n\n\n In 2014 52nd Annual Allerton Conference on Communication, Control, and Computing, pages 850–857, 2014. Institute of Electrical and Electronics Engineers (IEEE)\n \n\n\n\n
\n\n\n\n \n \n \"Distributed paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2014shamirdistributed,\n  title={Distributed Stochastic Optimization and Learning},\n  author={Shamir, Ohad and Srebro, Nathan},\n  booktitle={2014 52nd Annual Allerton Conference on Communication, Control, and Computing},\n  pages={850--857},\n  year={2014},\n  organization={Institute of Electrical and Electronics Engineers (IEEE)},\n  url_Paper={https://ieeexplore.ieee.org/abstract/document/7028543}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Active Collaborative Permutation Learning.\n \n \n \n \n\n\n \n Wang, J.; Srebro, N.; and Evans, J.\n\n\n \n\n\n\n In Proceedings of the 20th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pages 502–511, 2014. \n \n\n\n\n
\n\n\n\n \n \n \"Active paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2014wangactive,\n  title={Active Collaborative Permutation Learning},\n  author={Wang, Jialei and Srebro, Nathan and Evans, James},\n  booktitle={Proceedings of the 20th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining},\n  pages={502--511},\n  year={2014},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/2623330.2623730}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Communication-efficient Distributed Optimization using an Approximate Newton-type Method.\n \n \n \n \n\n\n \n Shamir, O.; Srebro, N.; and Zhang, T.\n\n\n \n\n\n\n In Proceedings of the 31st International Conference on Machine Learning (ICML), volume PMLR 32, pages 1000–1008, 2014. \n \n\n\n\n
\n\n\n\n \n \n \"Communication-efficient paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2014shamircommunication,\n  title={Communication-efficient Distributed Optimization using an Approximate Newton-type Method},\n  author={Shamir, Ohad and Srebro, Nathan and Zhang, Tong},\n  booktitle={Proceedings of the 31st International Conference on Machine Learning (ICML)},\n  volume={PMLR 32},\n  number={2},\n  pages={1000--1008},\n  year={2014},\n  %http://proceedings.mlr.press/v32/shamir14.html\n  url_Paper={https://arxiv.org/pdf/1312.7853.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Gradient Descent, Weighted Sampling, and the Randomized Kaczmarz algorithm.\n \n \n \n \n\n\n \n Needell, D.; Srebro, N.; and Ward, R.\n\n\n \n\n\n\n In Proceedings of the 27th International Conference on Neural Information Processing Systems (NIPS), volume 1, pages 1017–1025, 2014. \n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2014needellstochastic,\n  title={Stochastic Gradient Descent, Weighted Sampling, and the Randomized Kaczmarz algorithm},\n  author={Needell, Deanna and Srebro, Nathan and Ward, Rachel},\n  booktitle={Proceedings of the 27th International Conference on Neural Information Processing Systems (NIPS)},\n  volume={1},\n  pages={1017--1025},\n  year={2014},\n  %http://papers.nips.cc/paper/5355-stochastic-gradient-descent-weighted-sampling-and-the-randomized-kaczmarz-algorithm\n  url_Paper={https://arxiv.org/pdf/1310.5715.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n In Search of the Real Inductive Bias: On the Role of Implicit Regularization in Deep Learning.\n \n \n \n \n\n\n \n Neyshabur, B.; Tomioka, R.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2014.\n \n\n\\newline Accepted as a workshop contribution at the 3rd International Conference on Learning Representations (ICLR) 2015\n\n
\n\n\n\n \n \n \"In paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2014neyshabursearch,\n  title={In Search of the Real Inductive Bias: On the Role of Implicit Regularization in Deep Learning},\n  author={Neyshabur, Behnam and Tomioka, Ryota and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2014},\n  bibbase_note={\\newline Accepted as a workshop contribution at the 3rd International Conference on Learning Representations (ICLR) 2015},\n  url_Paper={https://arxiv.org/pdf/1412.6614.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Clustering, hamming embedding, generalized LSH and the max norm.\n \n \n \n \n\n\n \n Neyshabur, B.; Makarychev, Y.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 25th International Conference on Algorithmic Learning Theory (ALT), pages 306–320, 2014. Springer\n \n\n\n\n
\n\n\n\n \n \n \"Clustering, paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2014neyshaburclustering,\n  title={Clustering, hamming embedding, generalized LSH and the max norm},\n  author={Neyshabur, Behnam and Makarychev, Yury and Srebro, Nathan},\n  booktitle={Proceedings of the 25th International Conference on Algorithmic Learning Theory (ALT)},\n  pages={306--320},\n  year={2014},\n  publisher={Springer},\n  %https://link.springer.com/chapter/10.1007/978-3-319-11662-4_22\n  url_Paper={https://arxiv.org/pdf/1405.3167.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Stochastic Optimization and Learning.\n \n \n \n \n\n\n \n Shamir, O.; and Srebro, N.\n\n\n \n\n\n\n In 2014 52nd Annual Allerton Conference on Communication, Control, and Computing, pages 850–857, 2014. Institute of Electrical and Electronics Engineers (IEEE)\n \n\n\n\n
\n\n\n\n \n \n \"Distributed paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2014shamirdistributed,\n  title={Distributed Stochastic Optimization and Learning},\n  author={Shamir, Ohad and Srebro, Nathan},\n  booktitle={2014 52nd Annual Allerton Conference on Communication, Control, and Computing},\n  pages={850--857},\n  year={2014},\n  organization={Institute of Electrical and Electronics Engineers (IEEE)},\n  url_Paper={https://ieeexplore.ieee.org/abstract/document/7028543}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Active Collaborative Permutation Learning.\n \n \n \n \n\n\n \n Wang, J.; Srebro, N.; and Evans, J.\n\n\n \n\n\n\n In Proceedings of the 20th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pages 502–511, 2014. \n \n\n\n\n
\n\n\n\n \n \n \"Active paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2014wangactive,\n  title={Active Collaborative Permutation Learning},\n  author={Wang, Jialei and Srebro, Nathan and Evans, James},\n  booktitle={Proceedings of the 20th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining},\n  pages={502--511},\n  year={2014},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/2623330.2623730}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Communication-efficient Distributed Optimization using an Approximate Newton-type Method.\n \n \n \n \n\n\n \n Shamir, O.; Srebro, N.; and Zhang, T.\n\n\n \n\n\n\n In Proceedings of the 31st International Conference on Machine Learning (ICML), volume PMLR 32, pages 1000–1008, 2014. \n \n\n\n\n
\n\n\n\n \n \n \"Communication-efficient paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2014shamircommunication,\n  title={Communication-efficient Distributed Optimization using an Approximate Newton-type Method},\n  author={Shamir, Ohad and Srebro, Nathan and Zhang, Tong},\n  booktitle={Proceedings of the 31st International Conference on Machine Learning (ICML)},\n  volume={PMLR 32},\n  number={2},\n  pages={1000--1008},\n  year={2014},\n  %http://proceedings.mlr.press/v32/shamir14.html\n  url_Paper={https://arxiv.org/pdf/1312.7853.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (12)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Mini-Batch Primal and Dual Methods for SVMs.\n \n \n \n \n\n\n \n Takáč, M.; Bijral, A.; Richtárik, P.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th International Conference on Machine Learning (ICML), volume PMLR 28, pages 1022–1030, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"Mini-Batch paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2013takacmini,\n  title={Mini-Batch Primal and Dual Methods for SVMs},\n  author={Tak{\\'a}{\\v{c}}, Martin and Bijral, Avleen and Richt{\\'a}rik, Peter and Srebro, Nathan},\n  booktitle={Proceedings of the 30th International Conference on Machine Learning (ICML)},\n  volume={PMLR 28},\n  number={3},\n  pages={1022--1030},\n  year={2013},\n  %http://proceedings.mlr.press/v28/takac13.html\n  url_Paper={https://arxiv.org/pdf/1303.2314.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Optimally Sparse Support Vector Machines.\n \n \n \n \n\n\n \n Cotter, A.; Shalev-Shwartz, S.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th International Conference on Machine Learning (ICML), volume PMLR 28, pages 266–274, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"Learning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2013cotterlearning,\n  title={Learning Optimally Sparse Support Vector Machines},\n  author={Cotter, Andrew and Shalev-Shwartz, Shai and Srebro, Nathan},\n  booktitle={Proceedings of the 30th International Conference on Machine Learning (ICML)},\n  volume={PMLR 28},\n  number={1},\n  pages={266--274},\n  year={2013},\n  url_Paper={http://proceedings.mlr.press/v28/cotter13.html}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distribution-dependent Sample Complexity of Large Margin Learning.\n \n \n \n \n\n\n \n Sabato, S.; Srebro, N.; and Tishby, N.\n\n\n \n\n\n\n Journal of Machine Learning Research, 14(28): 2119–2149. 2013.\n \n\n\n\n
\n\n\n\n \n \n \"Distribution-dependent paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2013sabatodistribution,\n  title={Distribution-dependent Sample Complexity of Large Margin Learning},\n  author={Sabato, Sivan and Srebro, Nathan and Tishby, Naftali},\n  journal={Journal of Machine Learning Research},\n  volume={14},\n  number={28},\n  pages={2119--2149},\n  year={2013},\n  editor={Shawe-Taylor, John},\n  %http://www.jmlr.org/papers/v14/sabato13a.html\n  url_Paper={https://arxiv.org/pdf/1204.1276.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Power of Asymmetry in Binary Hashing.\n \n \n \n \n\n\n \n Neyshabur, B.; Yadollahpour, P.; Makarychev, Y.; Salakhutdinov, R.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 26th International Conference on Neural Information Processing Systems (NIPS), volume 2, pages 2823–2831, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2013neyshaburpower,\n  title={The Power of Asymmetry in Binary Hashing},\n  author={Neyshabur, Behnam and Yadollahpour, Payman and Makarychev, Yury and Salakhutdinov, Ruslan and Srebro, Nathan},\n  booktitle={Proceedings of the 26th International Conference on Neural Information Processing Systems (NIPS)},\n  volume={2},\n  pages={2823--2831},\n  year={2013},\n  %http://papers.nips.cc/paper/5017-the-power-of-asymmetry-in-binary-hashing\n  url_Paper={https://arxiv.org/pdf/1311.7662.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Optimization of PCA with Capped MSG.\n \n \n \n \n\n\n \n Arora, R.; Cotter, A.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 26th International Conference on Neural Information Processing Systems (NIPS), volume 2, pages 1815–1823, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2013arorastochastic,\n  title={Stochastic Optimization of PCA with Capped MSG},\n  author={Arora, Raman and Cotter, Andy and Srebro, Nathan},\n  booktitle={Proceedings of the 26th International Conference on Neural Information Processing Systems (NIPS)},\n  volume={2},\n  pages={1815--1823},\n  year={2013},\n  %http://papers.nips.cc/paper/5033-stochastic-optimization-of-pca-with-capped-msg\n  url_Paper={https://arxiv.org/pdf/1307.1674.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Auditing: Active Learning with Outcome-dependent Query Costs.\n \n \n \n \n\n\n \n Sabato, S.; Sarwate, A.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 26th International Conference on Neural Information Processing Systems (NIPS), volume 1, pages 512–520, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"Auditing: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2013sabatoauditing,\n  title={Auditing: Active Learning with Outcome-dependent Query Costs},\n  author={Sabato, Sivan and Sarwate, Anand and Srebro, Nathan},\n  booktitle={Proceedings of the 26th International Conference on Neural Information Processing Systems (NIPS)},\n  volume={1},\n  pages={512--520},\n  year={2013},\n  %http://papers.nips.cc/paper/4956-auditing-active-learning-with-outcome-dependent-query-costs\n  url_Paper={https://arxiv.org/pdf/1306.2347.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Mini-Batch Primal and Dual Methods for SVMs.\n \n \n \n \n\n\n \n Takáč, M.; Bijral, A.; Richtárik, P.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th International Conference on Machine Learning (ICML), volume PMLR 28, pages 1022–1030, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"Mini-Batch paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2013takacmini,\n  title={Mini-Batch Primal and Dual Methods for SVMs},\n  author={Tak{\\'a}{\\v{c}}, Martin and Bijral, Avleen and Richt{\\'a}rik, Peter and Srebro, Nathan},\n  booktitle={Proceedings of the 30th International Conference on Machine Learning (ICML)},\n  volume={PMLR 28},\n  number={3},\n  pages={1022--1030},\n  year={2013},\n  %http://proceedings.mlr.press/v28/takac13.html\n  url_Paper={https://arxiv.org/pdf/1303.2314.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Optimally Sparse Support Vector Machines.\n \n \n \n \n\n\n \n Cotter, A.; Shalev-Shwartz, S.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 30th International Conference on Machine Learning (ICML), volume PMLR 28, pages 266–274, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"Learning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2013cotterlearning,\n  title={Learning Optimally Sparse Support Vector Machines},\n  author={Cotter, Andrew and Shalev-Shwartz, Shai and Srebro, Nathan},\n  booktitle={Proceedings of the 30th International Conference on Machine Learning (ICML)},\n  volume={PMLR 28},\n  number={1},\n  pages={266--274},\n  year={2013},\n  url_Paper={http://proceedings.mlr.press/v28/cotter13.html}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distribution-dependent Sample Complexity of Large Margin Learning.\n \n \n \n \n\n\n \n Sabato, S.; Srebro, N.; and Tishby, N.\n\n\n \n\n\n\n Journal of Machine Learning Research, 14(28): 2119–2149. 2013.\n \n\n\n\n
\n\n\n\n \n \n \"Distribution-dependent paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2013sabatodistribution,\n  title={Distribution-dependent Sample Complexity of Large Margin Learning},\n  author={Sabato, Sivan and Srebro, Nathan and Tishby, Naftali},\n  journal={Journal of Machine Learning Research},\n  volume={14},\n  number={28},\n  pages={2119--2149},\n  year={2013},\n  editor={Shawe-Taylor, John},\n  %http://www.jmlr.org/papers/v14/sabato13a.html\n  url_Paper={https://arxiv.org/pdf/1204.1276.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Power of Asymmetry in Binary Hashing.\n \n \n \n \n\n\n \n Neyshabur, B.; Yadollahpour, P.; Makarychev, Y.; Salakhutdinov, R.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 26th International Conference on Neural Information Processing Systems (NIPS), volume 2, pages 2823–2831, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2013neyshaburpower,\n  title={The Power of Asymmetry in Binary Hashing},\n  author={Neyshabur, Behnam and Yadollahpour, Payman and Makarychev, Yury and Salakhutdinov, Ruslan and Srebro, Nathan},\n  booktitle={Proceedings of the 26th International Conference on Neural Information Processing Systems (NIPS)},\n  volume={2},\n  pages={2823--2831},\n  year={2013},\n  %http://papers.nips.cc/paper/5017-the-power-of-asymmetry-in-binary-hashing\n  url_Paper={https://arxiv.org/pdf/1311.7662.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Optimization of PCA with Capped MSG.\n \n \n \n \n\n\n \n Arora, R.; Cotter, A.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 26th International Conference on Neural Information Processing Systems (NIPS), volume 2, pages 1815–1823, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2013arorastochastic,\n  title={Stochastic Optimization of PCA with Capped MSG},\n  author={Arora, Raman and Cotter, Andy and Srebro, Nathan},\n  booktitle={Proceedings of the 26th International Conference on Neural Information Processing Systems (NIPS)},\n  volume={2},\n  pages={1815--1823},\n  year={2013},\n  %http://papers.nips.cc/paper/5033-stochastic-optimization-of-pca-with-capped-msg\n  url_Paper={https://arxiv.org/pdf/1307.1674.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Auditing: Active Learning with Outcome-dependent Query Costs.\n \n \n \n \n\n\n \n Sabato, S.; Sarwate, A.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 26th International Conference on Neural Information Processing Systems (NIPS), volume 1, pages 512–520, 2013. \n \n\n\n\n
\n\n\n\n \n \n \"Auditing: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2013sabatoauditing,\n  title={Auditing: Active Learning with Outcome-dependent Query Costs},\n  author={Sabato, Sivan and Sarwate, Anand and Srebro, Nathan},\n  booktitle={Proceedings of the 26th International Conference on Neural Information Processing Systems (NIPS)},\n  volume={1},\n  pages={512--520},\n  year={2013},\n  %http://papers.nips.cc/paper/4956-auditing-active-learning-with-outcome-dependent-query-costs\n  url_Paper={https://arxiv.org/pdf/1306.2347.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (28)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Stochastic Optimization for PCA and PLS.\n \n \n \n \n\n\n \n Arora, R.; Cotter, A.; Livescu, K.; and Srebro, N.\n\n\n \n\n\n\n In 2012 50th Annual Allerton Conference on Communication, Control, and Computing, pages 861–868, 2012. Institute of Electrical and Electronics Engineers (IEEE)\n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 7 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012arorastochastic,\n  title={Stochastic Optimization for PCA and PLS},\n  author={Arora, Raman and Cotter, Andrew and Livescu, Karen and Srebro, Nathan},\n  booktitle={2012 50th Annual Allerton Conference on Communication, Control, and Computing},\n  pages={861--868},\n  year={2012},\n  organization={Institute of Electrical and Electronics Engineers (IEEE)},\n  url_Paper={https://ieeexplore.ieee.org/abstract/document/6483308}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Minimizing the Misclassification Error Rate using a Surrogate Convex Loss.\n \n \n \n \n\n\n \n Ben-David, S.; Loker, D.; Srebro, N.; and Sridharan, K.\n\n\n \n\n\n\n In Proceedings of the 29th International Conference on Machine Learning (ICML), pages 83–90, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Minimizing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012benminimizing,\n  title={Minimizing the Misclassification Error Rate using a Surrogate Convex Loss},\n  author={Ben-David, Shai and Loker, David and Srebro, Nathan and Sridharan, Karthik},\n  booktitle={Proceedings of the 29th International Conference on Machine Learning (ICML)},\n  pages={83--90},\n  year={2012},\n  %https://dl.acm.org/doi/abs/10.5555/3042573.3042588\n  url_Paper={https://arxiv.org/ftp/arxiv/papers/1206/1206.6442.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Best of Both Worlds: Stochastic and Adversarial Bandits.\n \n \n \n \n\n\n \n Bubeck, S.; and Slivkins, A.\n\n\n \n\n\n\n In Mannor, S.; Srebro, N.; and Williamson, R., editor(s), Proceedings of the 25th Annual Conference on Learning Theory (COLT), volume PMLR 23, pages 42.1–42.23, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012bubeckbest,\n  title={The Best of Both Worlds: Stochastic and Adversarial Bandits},\n  author={Bubeck, S{\\'e}bastien and Slivkins, Aleksandrs},\n  booktitle={Proceedings of the 25th Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 23},\n  pages={42.1--42.23},\n  year={2012},\n  editor={Mannor, Shie and Srebro, Nathan and Williamson, Robert},\n  %http://proceedings.mlr.press/v23/bubeck12b.html\n  url_Paper={https://arxiv.org/pdf/1202.4473.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards Minimax Policies for Online Linear Optimization with Bandit Feedback.\n \n \n \n \n\n\n \n Bubeck, S.; Cesa-Bianchi, N.; and Kakade, S.\n\n\n \n\n\n\n In Shie, M.; Srebro, N.; and Williamson, R., editor(s), Proceedings of the 25th Annual Conference on Learning Theory (COLT), volume PMLR 23, pages 41.1–41.14, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Towards paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012bubecltowards,\n  title={Towards Minimax Policies for Online Linear Optimization with Bandit Feedback},\n  author={Bubeck, S{\\'e}bastien and Cesa-Bianchi, Nicolo and Kakade, Sham},\n  booktitle={Proceedings of the 25th Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 23},\n  pages={41.1--41.14},\n  year={2012},\n  editor={Mannor Shie and Srebro, Nathan and Williamson, Robert},\n  %http://proceedings.mlr.press/v23/bubeck12a.html\n  url_Paper={https://arxiv.org/pdf/1202.3079.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Toward a Noncommutative Arithmetic-geometric Mean Inequality: Conjectures, Case-studies, and Consequences.\n \n \n \n \n\n\n \n Recht, B.; and Ré, C.\n\n\n \n\n\n\n In Mannor, S.; Srebro, N.; and Williamson, R., editor(s), Proceedings of the 25th Annual Conference on Learning Theory (COLT), volume PMLR 23, pages 11.1–11.24, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Toward paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012rechttoward,\n  title={Toward a Noncommutative Arithmetic-geometric Mean Inequality: Conjectures, Case-studies, and Consequences},\n  author={Recht, Benjamin and R{\\'e}, Christopher},\n  booktitle={Proceedings of the 25th Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 23},\n  pages={11.1--11.24},\n  year={2012},\n  editor={Mannor, Shie and Srebro, Nathan and Williamson, Robert},\n  url_Paper={http://proceedings.mlr.press/v23/recht12.html}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n PRISMA: PRoximal Iterative SMoothing Algorithm.\n \n \n \n \n\n\n \n Orabona, F.; Argyriou, A.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"PRISMA: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 12 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2012orabonaprisma,\n  title={PRISMA: PRoximal Iterative SMoothing Algorithm},\n  author={Orabona, Francesco and Argyriou, Andreas and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2012},\n  url_Paper={https://arxiv.org/pdf/1206.2372.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Kernelized Stochastic Batch Perceptron.\n \n \n \n \n\n\n \n Cotter, A.; Shalev-Shwartz, S.; and Srebro, N.\n\n\n \n\n\n\n Proceedings of the 29th International Conference on Machine Learning (ICML),739–746. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2012cotterkernelized,\n  title={The Kernelized Stochastic Batch Perceptron},\n  author={Cotter, Andrew and Shalev-Shwartz, Shai and Srebro, Nathan},\n  journal={Proceedings of the 29th International Conference on Machine Learning (ICML)},\n  pages={739--746},\n  year={2012},\n  %https://dl.acm.org/doi/abs/10.5555/3042573.3042670\n  url_Paper={https://arxiv.org/pdf/1204.0566.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Approximate Inference by Intersecting Semidefinite Bound and Local Polytope.\n \n \n \n \n\n\n \n Peng, J.; Hazan, T.; Srebro, N.; and Xu, J.\n\n\n \n\n\n\n In Proceedings of the 15th International Conference on Artificial Intelligence and Statistics (AISTATS), volume PMLR 22, pages 868–876, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Approximate paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012pengapproximate,\n  title={Approximate Inference by Intersecting Semidefinite Bound and Local Polytope},\n  author={Peng, Jian and Hazan, Tamir and Srebro, Nathan and Xu, Jinbo},\n  booktitle={Proceedings of the 15th International Conference on Artificial Intelligence and Statistics (AISTATS)},\n  volume={PMLR 22},\n  pages={868--876},\n  year={2012},\n  url_Paper={http://proceedings.mlr.press/v22/peng12.html}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Clustering using Max-norm Constrained Optimization.\n \n \n \n \n\n\n \n Jalali, A.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 29th International Conference on Machine Learning (ICML), pages 1579–1586, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Clustering paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012jalaliclustering,\n  title={Clustering using Max-norm Constrained Optimization},\n  author={Jalali, Ali and Srebro, Nathan},\n  booktitle={Proceedings of the 29th International Conference on Machine Learning (ICML)},\n  pages={1579--1586},\n  year={2012},\n  %https://dl.acm.org/doi/abs/10.5555/3042573.3042775\n  url_Paper={https://arxiv.org/pdf/1202.5598.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semi-supervised Learning with Density Based Distances.\n \n \n \n \n\n\n \n Bijral, A.; Ratliff, N.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 27th Conference on Uncertainty in Artificial Intelligence (UAI), pages 43–50, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Semi-supervised paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012bijralsemi,\n  title={Semi-supervised Learning with Density Based Distances},\n  author={Bijral, Avleen and Ratliff, Nathan and Srebro, Nathan},\n  booktitle={Proceedings of the 27th Conference on Uncertainty in Artificial Intelligence (UAI)},\n  pages={43--50},\n  year={2012},\n  %https://dl.acm.org/doi/abs/10.5555/3020548.3020555\n  url_Paper={https://arxiv.org/ftp/arxiv/papers/1202/1202.3702.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On Convex Optimization, Fat Shattering and Learning.\n \n \n \n \n\n\n \n Srebro, N.; and Sridharan, K.\n\n\n \n\n\n\n 2012.\n \n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{2012srebroconvex,\n  title={On Convex Optimization, Fat Shattering and Learning},\n  author={Srebro, Nathan and Sridharan, Karthik},\n  year={2012},\n  url_Paper={https://ttic.uchicago.edu/~karthik/optfat.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Commentary on \"Near-Optimal Algorithms for Online Matrix Prediction\".\n \n \n \n \n\n\n \n Foygel, R.\n\n\n \n\n\n\n In Mannor, S.; Srebro, N.; and Williamson, R., editor(s), Proceedings of the 25th Annual Conference on Learning Theory (COLT), volume PMLR 23, pages 38.14–38.17, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Commentary paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012foygelcommentary,\n  title={Commentary on "Near-Optimal Algorithms for Online Matrix Prediction"},\n  author={Foygel, Rina},\n  booktitle={Proceedings of the 25th Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 23},\n  pages={38.14--38.17},\n  year={2012},\n  editor={Mannor, Shie and Srebro, Nathan and Williamson, Robert},\n  url_Paper={http://proceedings.mlr.press/v23/foygel12/foygel12.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse Prediction with the $ k $-Support Norm.\n \n \n \n \n\n\n \n Argyriou, A.; Foygel, R.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 25th International Conference on Neural Information Processing Systems (NIPS), volume 1, pages 1457–1465, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Sparse paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 12 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012argyriousparse,\n  title={Sparse Prediction with the $ k $-Support Norm},\n  author={Argyriou, Andreas and Foygel, Rina and Srebro, Nathan},\n  booktitle={Proceedings of the 25th International Conference on Neural Information Processing Systems (NIPS)},\n  volume={1},\n  pages={1457--1465},\n  year={2012},\n  %http://papers.nips.cc/paper/4537-sparse-prediction-with-the-k-support-norm\n  url_Paper={https://arxiv.org/pdf/1204.5043.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Matrix Reconstruction with the Local Max Norm.\n \n \n \n \n\n\n \n Foygel, R.; Srebro, N.; and Salakhutdinov, R.\n\n\n \n\n\n\n In Proceedings of the 25th International Conference on Neural Information Processing Systems (NIPS), volume 1, pages 935–943, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Matrix paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012foygelmatrix,\n  title={Matrix Reconstruction with the Local Max Norm},\n  author={Foygel, Rina and Srebro, Nathan and Salakhutdinov, Ruslan},\n  booktitle={Proceedings of the 25th International Conference on Neural Information Processing Systems (NIPS)},\n  volume={1},\n  pages={935--943},\n  year={2012},\n  %http://papers.nips.cc/paper/4615-matrix-reconstruction-with-the-local-max-norm\n  url_Paper={https://arxiv.org/pdf/1210.5196.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Optimization for PCA and PLS.\n \n \n \n \n\n\n \n Arora, R.; Cotter, A.; Livescu, K.; and Srebro, N.\n\n\n \n\n\n\n In 2012 50th Annual Allerton Conference on Communication, Control, and Computing, pages 861–868, 2012. Institute of Electrical and Electronics Engineers (IEEE)\n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 7 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012arorastochastic,\n  title={Stochastic Optimization for PCA and PLS},\n  author={Arora, Raman and Cotter, Andrew and Livescu, Karen and Srebro, Nathan},\n  booktitle={2012 50th Annual Allerton Conference on Communication, Control, and Computing},\n  pages={861--868},\n  year={2012},\n  organization={Institute of Electrical and Electronics Engineers (IEEE)},\n  url_Paper={https://ieeexplore.ieee.org/abstract/document/6483308}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Minimizing the Misclassification Error Rate using a Surrogate Convex Loss.\n \n \n \n \n\n\n \n Ben-David, S.; Loker, D.; Srebro, N.; and Sridharan, K.\n\n\n \n\n\n\n In Proceedings of the 29th International Conference on Machine Learning (ICML), pages 83–90, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Minimizing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012benminimizing,\n  title={Minimizing the Misclassification Error Rate using a Surrogate Convex Loss},\n  author={Ben-David, Shai and Loker, David and Srebro, Nathan and Sridharan, Karthik},\n  booktitle={Proceedings of the 29th International Conference on Machine Learning (ICML)},\n  pages={83--90},\n  year={2012},\n  %https://dl.acm.org/doi/abs/10.5555/3042573.3042588\n  url_Paper={https://arxiv.org/ftp/arxiv/papers/1206/1206.6442.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Best of Both Worlds: Stochastic and Adversarial Bandits.\n \n \n \n \n\n\n \n Bubeck, S.; and Slivkins, A.\n\n\n \n\n\n\n In Mannor, S.; Srebro, N.; and Williamson, R., editor(s), Proceedings of the 25th Annual Conference on Learning Theory (COLT), volume PMLR 23, pages 42.1–42.23, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012bubeckbest,\n  title={The Best of Both Worlds: Stochastic and Adversarial Bandits},\n  author={Bubeck, S{\\'e}bastien and Slivkins, Aleksandrs},\n  booktitle={Proceedings of the 25th Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 23},\n  pages={42.1--42.23},\n  year={2012},\n  editor={Mannor, Shie and Srebro, Nathan and Williamson, Robert},\n  %http://proceedings.mlr.press/v23/bubeck12b.html\n  url_Paper={https://arxiv.org/pdf/1202.4473.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards Minimax Policies for Online Linear Optimization with Bandit Feedback.\n \n \n \n \n\n\n \n Bubeck, S.; Cesa-Bianchi, N.; and Kakade, S.\n\n\n \n\n\n\n In Shie, M.; Srebro, N.; and Williamson, R., editor(s), Proceedings of the 25th Annual Conference on Learning Theory (COLT), volume PMLR 23, pages 41.1–41.14, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Towards paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012bubecltowards,\n  title={Towards Minimax Policies for Online Linear Optimization with Bandit Feedback},\n  author={Bubeck, S{\\'e}bastien and Cesa-Bianchi, Nicolo and Kakade, Sham},\n  booktitle={Proceedings of the 25th Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 23},\n  pages={41.1--41.14},\n  year={2012},\n  editor={Mannor Shie and Srebro, Nathan and Williamson, Robert},\n  %http://proceedings.mlr.press/v23/bubeck12a.html\n  url_Paper={https://arxiv.org/pdf/1202.3079.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Toward a Noncommutative Arithmetic-geometric Mean Inequality: Conjectures, Case-studies, and Consequences.\n \n \n \n \n\n\n \n Recht, B.; and Ré, C.\n\n\n \n\n\n\n In Mannor, S.; Srebro, N.; and Williamson, R., editor(s), Proceedings of the 25th Annual Conference on Learning Theory (COLT), volume PMLR 23, pages 11.1–11.24, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Toward paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012rechttoward,\n  title={Toward a Noncommutative Arithmetic-geometric Mean Inequality: Conjectures, Case-studies, and Consequences},\n  author={Recht, Benjamin and R{\\'e}, Christopher},\n  booktitle={Proceedings of the 25th Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 23},\n  pages={11.1--11.24},\n  year={2012},\n  editor={Mannor, Shie and Srebro, Nathan and Williamson, Robert},\n  url_Paper={http://proceedings.mlr.press/v23/recht12.html}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n PRISMA: PRoximal Iterative SMoothing Algorithm.\n \n \n \n \n\n\n \n Orabona, F.; Argyriou, A.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"PRISMA: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 12 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2012orabonaprisma,\n  title={PRISMA: PRoximal Iterative SMoothing Algorithm},\n  author={Orabona, Francesco and Argyriou, Andreas and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2012},\n  url_Paper={https://arxiv.org/pdf/1206.2372.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Kernelized Stochastic Batch Perceptron.\n \n \n \n \n\n\n \n Cotter, A.; Shalev-Shwartz, S.; and Srebro, N.\n\n\n \n\n\n\n Proceedings of the 29th International Conference on Machine Learning (ICML),739–746. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2012cotterkernelized,\n  title={The Kernelized Stochastic Batch Perceptron},\n  author={Cotter, Andrew and Shalev-Shwartz, Shai and Srebro, Nathan},\n  journal={Proceedings of the 29th International Conference on Machine Learning (ICML)},\n  pages={739--746},\n  year={2012},\n  %https://dl.acm.org/doi/abs/10.5555/3042573.3042670\n  url_Paper={https://arxiv.org/pdf/1204.0566.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Approximate Inference by Intersecting Semidefinite Bound and Local Polytope.\n \n \n \n \n\n\n \n Peng, J.; Hazan, T.; Srebro, N.; and Xu, J.\n\n\n \n\n\n\n In Proceedings of the 15th International Conference on Artificial Intelligence and Statistics (AISTATS), volume PMLR 22, pages 868–876, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Approximate paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012pengapproximate,\n  title={Approximate Inference by Intersecting Semidefinite Bound and Local Polytope},\n  author={Peng, Jian and Hazan, Tamir and Srebro, Nathan and Xu, Jinbo},\n  booktitle={Proceedings of the 15th International Conference on Artificial Intelligence and Statistics (AISTATS)},\n  volume={PMLR 22},\n  pages={868--876},\n  year={2012},\n  url_Paper={http://proceedings.mlr.press/v22/peng12.html}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Clustering using Max-norm Constrained Optimization.\n \n \n \n \n\n\n \n Jalali, A.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 29th International Conference on Machine Learning (ICML), pages 1579–1586, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Clustering paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012jalaliclustering,\n  title={Clustering using Max-norm Constrained Optimization},\n  author={Jalali, Ali and Srebro, Nathan},\n  booktitle={Proceedings of the 29th International Conference on Machine Learning (ICML)},\n  pages={1579--1586},\n  year={2012},\n  %https://dl.acm.org/doi/abs/10.5555/3042573.3042775\n  url_Paper={https://arxiv.org/pdf/1202.5598.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semi-supervised Learning with Density Based Distances.\n \n \n \n \n\n\n \n Bijral, A.; Ratliff, N.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 27th Conference on Uncertainty in Artificial Intelligence (UAI), pages 43–50, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Semi-supervised paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012bijralsemi,\n  title={Semi-supervised Learning with Density Based Distances},\n  author={Bijral, Avleen and Ratliff, Nathan and Srebro, Nathan},\n  booktitle={Proceedings of the 27th Conference on Uncertainty in Artificial Intelligence (UAI)},\n  pages={43--50},\n  year={2012},\n  %https://dl.acm.org/doi/abs/10.5555/3020548.3020555\n  url_Paper={https://arxiv.org/ftp/arxiv/papers/1202/1202.3702.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On Convex Optimization, Fat Shattering and Learning.\n \n \n \n \n\n\n \n Srebro, N.; and Sridharan, K.\n\n\n \n\n\n\n 2012.\n \n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{2012srebroconvex,\n  title={On Convex Optimization, Fat Shattering and Learning},\n  author={Srebro, Nathan and Sridharan, Karthik},\n  year={2012},\n  url_Paper={https://ttic.uchicago.edu/~karthik/optfat.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Commentary on \"Near-Optimal Algorithms for Online Matrix Prediction\".\n \n \n \n \n\n\n \n Foygel, R.\n\n\n \n\n\n\n In Mannor, S.; Srebro, N.; and Williamson, R., editor(s), Proceedings of the 25th Annual Conference on Learning Theory (COLT), volume PMLR 23, pages 38.14–38.17, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Commentary paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012foygelcommentary,\n  title={Commentary on "Near-Optimal Algorithms for Online Matrix Prediction"},\n  author={Foygel, Rina},\n  booktitle={Proceedings of the 25th Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 23},\n  pages={38.14--38.17},\n  year={2012},\n  editor={Mannor, Shie and Srebro, Nathan and Williamson, Robert},\n  url_Paper={http://proceedings.mlr.press/v23/foygel12/foygel12.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse Prediction with the $ k $-Support Norm.\n \n \n \n \n\n\n \n Argyriou, A.; Foygel, R.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 25th International Conference on Neural Information Processing Systems (NIPS), volume 1, pages 1457–1465, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Sparse paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 12 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012argyriousparse,\n  title={Sparse Prediction with the $ k $-Support Norm},\n  author={Argyriou, Andreas and Foygel, Rina and Srebro, Nathan},\n  booktitle={Proceedings of the 25th International Conference on Neural Information Processing Systems (NIPS)},\n  volume={1},\n  pages={1457--1465},\n  year={2012},\n  %http://papers.nips.cc/paper/4537-sparse-prediction-with-the-k-support-norm\n  url_Paper={https://arxiv.org/pdf/1204.5043.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Matrix Reconstruction with the Local Max Norm.\n \n \n \n \n\n\n \n Foygel, R.; Srebro, N.; and Salakhutdinov, R.\n\n\n \n\n\n\n In Proceedings of the 25th International Conference on Neural Information Processing Systems (NIPS), volume 1, pages 935–943, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Matrix paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2012foygelmatrix,\n  title={Matrix Reconstruction with the Local Max Norm},\n  author={Foygel, Rina and Srebro, Nathan and Salakhutdinov, Ruslan},\n  booktitle={Proceedings of the 25th International Conference on Neural Information Processing Systems (NIPS)},\n  volume={1},\n  pages={935--943},\n  year={2012},\n  %http://papers.nips.cc/paper/4615-matrix-reconstruction-with-the-local-max-norm\n  url_Paper={https://arxiv.org/pdf/1210.5196.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2011\n \n \n (28)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Concentration-Based Guarantees for Low-Rank Matrix Reconstruction.\n \n \n \n \n\n\n \n Foygel, R.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 24th Annual Conference on Learning Theory (COLT), volume PMLR 19, pages 315–340, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Concentration-Based paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011foygelconcentration,\n  title={Concentration-Based Guarantees for Low-Rank Matrix Reconstruction},\n  author={Foygel, Rina and Srebro, Nathan},\n  booktitle={Proceedings of the 24th Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 19},\n  pages={315--340},\n  year={2011},\n  %http://proceedings.mlr.press/v19/foygel11a.html\n  url_Paper={https://arxiv.org/pdf/1102.3923.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Explicit Approximations of the Gaussian Kernel.\n \n \n \n \n\n\n \n Cotter, A.; Keshet, J.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2011.\n \n\n\n\n
\n\n\n\n \n \n \"Explicit paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2011cotterexplicit,\n  title={Explicit Approximations of the Gaussian Kernel},\n  author={Cotter, Andrew and Keshet, Joseph and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2011},\n  url_Paper={https://arxiv.org/pdf/1109.4603.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A GPU-Tailored Approach for Training Kernelized SVMs.\n \n \n \n \n\n\n \n Cotter, A.; Srebro, N.; and Keshet, J.\n\n\n \n\n\n\n In Proceedings of the 17th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pages 805–813, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011cottergpu,\n  title={A GPU-Tailored Approach for Training Kernelized SVMs},\n  author={Cotter, Andrew and Srebro, Nathan and Keshet, Joseph},\n  booktitle={Proceedings of the 17th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining},\n  pages={805--813},\n  year={2011},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/2020408.2020548}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Iterated Graph Laplacian Approach for Ranking on Manifolds.\n \n \n \n \n\n\n \n Zhou, X.; Belkin, M.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 17th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pages 877–885, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"An paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011zhouiterated,\n  title={An Iterated Graph Laplacian Approach for Ranking on Manifolds},\n  author={Zhou, Xueyuan and Belkin, Mikhail and Srebro, Nathan},\n  booktitle={Proceedings of the 17th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining},\n  pages={877--885},\n  year={2011},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/2020408.2020556}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast-rate and Optimistic-rate Error Bounds for L1-regularized Regression.\n \n \n \n \n\n\n \n Foygel, R.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2011.\n \n\nThe newest version on arXiv was uploaded in 2018\n\n
\n\n\n\n \n \n \"Fast-rate paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2011foygelfast,\n  title={Fast-rate and Optimistic-rate Error Bounds for L1-regularized Regression},\n  author={Foygel, Rina and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2011},\n  bibbase_note={The newest version on arXiv was uploaded in 2018},\n  url_Paper={https://arxiv.org/pdf/1108.0373.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Error Analysis of Laplacian Eigenmaps for Semi-supervised Learning.\n \n \n \n \n\n\n \n Zhou, X.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 14th International Conference on Artificial Intelligence and Statistics (AISTATS), volume PMLR 15, pages 901–908, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Error paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011zhouerror,\n  title={Error Analysis of Laplacian Eigenmaps for Semi-supervised Learning},\n  author={Zhou, Xueyuan and Srebro, Nathan},\n  booktitle={Proceedings of the 14th International Conference on Artificial Intelligence and Statistics (AISTATS)},\n  volume={PMLR 15},\n  pages={901--908},\n  year={2011},\n  url_Paper={http://proceedings.mlr.press/v15/zhou11c.html}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pegasos: Primal Estimated Sub-GrAdient SOlver for SVM.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; Singer, Y.; Srebro, N.; and Cotter, A.\n\n\n \n\n\n\n Mathematical Programming, 127(1): 3–30. 2011.\n \n\nICML 2017 Test of Time Award Honorable Mention An earlier version appeared in ICML 2007\n\n
\n\n\n\n \n \n \"Pegasos: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2011shalevpegasos,\n  title={Pegasos: Primal Estimated Sub-GrAdient SOlver for SVM},\n  author={Shalev-Shwartz, Shai and Singer, Yoram and Srebro, Nathan and Cotter, Andrew},\n  journal={Mathematical Programming},\n  volume={127},\n  number={1},\n  pages={3--30},\n  year={2011},\n  publisher={Springer},\n  bibbase_note={ICML 2017 Test of Time Award Honorable Mention An earlier version appeared in ICML 2007},\n  url_Paper={https://link.springer.com/content/pdf/10.1007/s10107-010-0420-4.pdf}\n}%%%\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Theoretical Basis for “More Data Less Work”.\n \n \n \n \n\n\n \n Srebro, N.; and Sridharan, K.\n\n\n \n\n\n\n In NIPS Workshop on Computational Trade-offs in Statistical Learning, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Theoretical paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011srebrotheoretical,\n  title={Theoretical Basis for “More Data Less Work”},\n  author={Srebro, Nathan and Sridharan, Karthik},\n  booktitle={NIPS Workshop on Computational Trade-offs in Statistical Learning},\n  year={2011},\n  url_Paper={https://4dc14f7c-a-62cb3a1a-s-sites.googlegroups.com/site/costnips/abstracts/cost2011_submission_7.pdf?attachauth=ANoY7cqaVDzHIksVmh1UfvrmS6Pr4wL65Xq_9_KcGGXyfUSFAKn3RYhY6o3fq9kSs6nbAPBL9KmQ5BIuE-CiJlCT1EJ2mHD9AM1ru2QOfESQ1WdwkoHyB3fhKfwohX8b9UEtI767pXGMQz5Ss7ZJzf-6QNi72tuj1NycN5nEU83y2sinf3OtdM8BZIguX-GJMGGtOUbkfpvHJudEE62_cxp6IRoXEHJs6410twiGCSaFwT7UYhw_EZk%3D&attredirects=0}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fixed-structure $H_∞$ Controller Design Based on Distributed Probabilistic Model-building Genetic Algorithm.\n \n \n \n \n\n\n \n Kawanishi, M.; Narikiyo, T.; Kaneko, T.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the IASTED International Conference on Intelligent Systems and Control, pages 127–132, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Fixed-structure paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011kawanishifixed,\n  title={Fixed-structure $H_\\infty$ Controller Design Based on Distributed Probabilistic Model-building Genetic Algorithm},\n  author={Kawanishi, Michihiro and Narikiyo, Tatsuo and Kaneko, Tomohiro and Srebro, Nathan},\n  booktitle={Proceedings of the IASTED International Conference on Intelligent Systems and Control},\n  pages={127--132},\n  year={2011},\n  url_Paper={http://www.actapress.com/Abstract.aspx?paperId=452603}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Universal Learning vs. No Free Lunch Results.\n \n \n \n \n\n\n \n Ben-David, S.; Srebro, N.; and Urner, R.\n\n\n \n\n\n\n In Philosophy and Machine Learning Workshop NIPS, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Universal paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011benuniversal,\n  title={Universal Learning vs. No Free Lunch Results},\n  author={Ben-David, Shai and Srebro, Nathan and Urner, Ruth},\n  booktitle={Philosophy and Machine Learning Workshop NIPS},\n  year={2011},\n  url_Paper={https://www.dsi.unive.it/PhiMaLe2011/Abstract/Ben-David_Srebro_Urner.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Beating SGD: Learning SVMs in Sublinear Time.\n \n \n \n \n\n\n \n Hazan, E.; Koren, T.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS), pages 1233–1241, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Beating paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011hazanbeating,\n  title={Beating SGD: Learning SVMs in Sublinear Time},\n  author={Hazan, Elad and Koren, Tomer and Srebro, NatHAN},\n  booktitle={Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={1233--1241},\n  year={2011},\n  url_Paper={http://papers.nips.cc/paper/4359-beating-sgd-learning-svms-in-sublinear-time}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Better Mini-batch Algorithms via Accelerated Gradient Methods.\n \n \n \n \n\n\n \n Cotter, A.; Shamir, O.; Srebro, N.; and Sridharan, K.\n\n\n \n\n\n\n In Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS), pages 1647–1655, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Better paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011cotterbetter,\n  title={Better Mini-batch Algorithms via Accelerated Gradient Methods},\n  author={Cotter, Andrew and Shamir, Ohad and Srebro, Nathan and Sridharan, Karthik},\n  booktitle={Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={1647--1655},\n  year={2011},\n  %http://papers.nips.cc/paper/4432-better-mini-batch-algorithms-via-accelerated-gradient-meth\n  url_Paper={https://arxiv.org/pdf/1106.4574.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning with the Weighted Trace-norm under Arbitrary Sampling Distributions.\n \n \n \n \n\n\n \n Foygel, R.; Shamir, O.; Srebro, N.; and Salakhutdinov, R.\n\n\n \n\n\n\n In Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS), pages 2133–2141, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Learning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011foygellearning,\n  title={Learning with the Weighted Trace-norm under Arbitrary Sampling Distributions},\n  author={Foygel, Rina and Shamir, Ohad and Srebro, Nathan and Salakhutdinov, Ruslan},\n  booktitle={Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={2133--2141},\n  year={2011},\n  %http://papers.nips.cc/paper/4303-learning-with-the-weighted-trace-norm-under-arbitrary-sampling-distributions\n  url_Paper={https://arxiv.org/pdf/1106.4251.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the Universality of Online Mirror Descent.\n \n \n \n \n\n\n \n Srebro, N.; Sridharan, K.; and Tewari, A.\n\n\n \n\n\n\n In Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS), pages 2645–2653, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011srebrouniversality,\n  title={On the Universality of Online Mirror Descent},\n  author={Srebro, Nathan and Sridharan, Karthik and Tewari, Ambuj},\n  booktitle={Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={2645--2653},\n  year={2011},\n  %http://papers.nips.cc/paper/4413-on-the-universality-of-online-mirror-descent\n  url_Paper={https://arxiv.org/pdf/1107.4080.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Concentration-Based Guarantees for Low-Rank Matrix Reconstruction.\n \n \n \n \n\n\n \n Foygel, R.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 24th Annual Conference on Learning Theory (COLT), volume PMLR 19, pages 315–340, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Concentration-Based paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011foygelconcentration,\n  title={Concentration-Based Guarantees for Low-Rank Matrix Reconstruction},\n  author={Foygel, Rina and Srebro, Nathan},\n  booktitle={Proceedings of the 24th Annual Conference on Learning Theory (COLT)},\n  volume={PMLR 19},\n  pages={315--340},\n  year={2011},\n  %http://proceedings.mlr.press/v19/foygel11a.html\n  url_Paper={https://arxiv.org/pdf/1102.3923.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Explicit Approximations of the Gaussian Kernel.\n \n \n \n \n\n\n \n Cotter, A.; Keshet, J.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2011.\n \n\n\n\n
\n\n\n\n \n \n \"Explicit paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2011cotterexplicit,\n  title={Explicit Approximations of the Gaussian Kernel},\n  author={Cotter, Andrew and Keshet, Joseph and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2011},\n  url_Paper={https://arxiv.org/pdf/1109.4603.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A GPU-Tailored Approach for Training Kernelized SVMs.\n \n \n \n \n\n\n \n Cotter, A.; Srebro, N.; and Keshet, J.\n\n\n \n\n\n\n In Proceedings of the 17th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pages 805–813, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011cottergpu,\n  title={A GPU-Tailored Approach for Training Kernelized SVMs},\n  author={Cotter, Andrew and Srebro, Nathan and Keshet, Joseph},\n  booktitle={Proceedings of the 17th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining},\n  pages={805--813},\n  year={2011},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/2020408.2020548}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Iterated Graph Laplacian Approach for Ranking on Manifolds.\n \n \n \n \n\n\n \n Zhou, X.; Belkin, M.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 17th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pages 877–885, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"An paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011zhouiterated,\n  title={An Iterated Graph Laplacian Approach for Ranking on Manifolds},\n  author={Zhou, Xueyuan and Belkin, Mikhail and Srebro, Nathan},\n  booktitle={Proceedings of the 17th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining},\n  pages={877--885},\n  year={2011},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/2020408.2020556}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast-rate and Optimistic-rate Error Bounds for L1-regularized Regression.\n \n \n \n \n\n\n \n Foygel, R.; and Srebro, N.\n\n\n \n\n\n\n arXiv Preprint. 2011.\n \n\n\\newline The newest version on arXiv was uploaded in 2018\n\n
\n\n\n\n \n \n \"Fast-rate paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2011foygelfast,\n  title={Fast-rate and Optimistic-rate Error Bounds for L1-regularized Regression},\n  author={Foygel, Rina and Srebro, Nathan},\n  journal={arXiv Preprint},\n  year={2011},\n  bibbase_note={\\newline The newest version on arXiv was uploaded in 2018},\n  url_Paper={https://arxiv.org/pdf/1108.0373.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Error Analysis of Laplacian Eigenmaps for Semi-supervised Learning.\n \n \n \n \n\n\n \n Zhou, X.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 14th International Conference on Artificial Intelligence and Statistics (AISTATS), volume PMLR 15, pages 901–908, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Error paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011zhouerror,\n  title={Error Analysis of Laplacian Eigenmaps for Semi-supervised Learning},\n  author={Zhou, Xueyuan and Srebro, Nathan},\n  booktitle={Proceedings of the 14th International Conference on Artificial Intelligence and Statistics (AISTATS)},\n  volume={PMLR 15},\n  pages={901--908},\n  year={2011},\n  url_Paper={http://proceedings.mlr.press/v15/zhou11c.html}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pegasos: Primal Estimated Sub-GrAdient SOlver for SVM.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; Singer, Y.; Srebro, N.; and Cotter, A.\n\n\n \n\n\n\n Mathematical Programming, 127(1): 3–30. 2011.\n \n\n\\newline ICML 2017 Test of Time Award Honorable Mention An earlier version appeared in ICML 2007\n\n
\n\n\n\n \n \n \"Pegasos: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2011shalevpegasos,\n  title={Pegasos: Primal Estimated Sub-GrAdient SOlver for SVM},\n  author={Shalev-Shwartz, Shai and Singer, Yoram and Srebro, Nathan and Cotter, Andrew},\n  journal={Mathematical Programming},\n  volume={127},\n  number={1},\n  pages={3--30},\n  year={2011},\n  publisher={Springer},\n  bibbase_note={\\newline ICML 2017 Test of Time Award Honorable Mention An earlier version appeared in ICML 2007},\n  url_Paper={https://link.springer.com/content/pdf/10.1007/s10107-010-0420-4.pdf}\n}%%%\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Theoretical Basis for “More Data Less Work”.\n \n \n \n \n\n\n \n Srebro, N.; and Sridharan, K.\n\n\n \n\n\n\n In NIPS Workshop on Computational Trade-offs in Statistical Learning, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Theoretical paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011srebrotheoretical,\n  title={Theoretical Basis for “More Data Less Work”},\n  author={Srebro, Nathan and Sridharan, Karthik},\n  booktitle={NIPS Workshop on Computational Trade-offs in Statistical Learning},\n  year={2011},\n  url_Paper={https://4dc14f7c-a-62cb3a1a-s-sites.googlegroups.com/site/costnips/abstracts/cost2011_submission_7.pdf?attachauth=ANoY7cqaVDzHIksVmh1UfvrmS6Pr4wL65Xq_9_KcGGXyfUSFAKn3RYhY6o3fq9kSs6nbAPBL9KmQ5BIuE-CiJlCT1EJ2mHD9AM1ru2QOfESQ1WdwkoHyB3fhKfwohX8b9UEtI767pXGMQz5Ss7ZJzf-6QNi72tuj1NycN5nEU83y2sinf3OtdM8BZIguX-GJMGGtOUbkfpvHJudEE62_cxp6IRoXEHJs6410twiGCSaFwT7UYhw_EZk%3D&attredirects=0}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fixed-structure $H_∞$ Controller Design Based on Distributed Probabilistic Model-building Genetic Algorithm.\n \n \n \n \n\n\n \n Kawanishi, M.; Narikiyo, T.; Kaneko, T.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the IASTED International Conference on Intelligent Systems and Control, pages 127–132, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Fixed-structure paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011kawanishifixed,\n  title={Fixed-structure $H_\\infty$ Controller Design Based on Distributed Probabilistic Model-building Genetic Algorithm},\n  author={Kawanishi, Michihiro and Narikiyo, Tatsuo and Kaneko, Tomohiro and Srebro, Nathan},\n  booktitle={Proceedings of the IASTED International Conference on Intelligent Systems and Control},\n  pages={127--132},\n  year={2011},\n  url_Paper={http://www.actapress.com/Abstract.aspx?paperId=452603}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Universal Learning vs. No Free Lunch Results.\n \n \n \n \n\n\n \n Ben-David, S.; Srebro, N.; and Urner, R.\n\n\n \n\n\n\n In Philosophy and Machine Learning Workshop NIPS, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Universal paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011benuniversal,\n  title={Universal Learning vs. No Free Lunch Results},\n  author={Ben-David, Shai and Srebro, Nathan and Urner, Ruth},\n  booktitle={Philosophy and Machine Learning Workshop NIPS},\n  year={2011},\n  url_Paper={https://www.dsi.unive.it/PhiMaLe2011/Abstract/Ben-David_Srebro_Urner.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Beating SGD: Learning SVMs in Sublinear Time.\n \n \n \n \n\n\n \n Hazan, E.; Koren, T.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS), pages 1233–1241, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Beating paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011hazanbeating,\n  title={Beating SGD: Learning SVMs in Sublinear Time},\n  author={Hazan, Elad and Koren, Tomer and Srebro, NatHAN},\n  booktitle={Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={1233--1241},\n  year={2011},\n  url_Paper={http://papers.nips.cc/paper/4359-beating-sgd-learning-svms-in-sublinear-time}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Better Mini-batch Algorithms via Accelerated Gradient Methods.\n \n \n \n \n\n\n \n Cotter, A.; Shamir, O.; Srebro, N.; and Sridharan, K.\n\n\n \n\n\n\n In Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS), pages 1647–1655, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Better paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011cotterbetter,\n  title={Better Mini-batch Algorithms via Accelerated Gradient Methods},\n  author={Cotter, Andrew and Shamir, Ohad and Srebro, Nathan and Sridharan, Karthik},\n  booktitle={Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={1647--1655},\n  year={2011},\n  %http://papers.nips.cc/paper/4432-better-mini-batch-algorithms-via-accelerated-gradient-meth\n  url_Paper={https://arxiv.org/pdf/1106.4574.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning with the Weighted Trace-norm under Arbitrary Sampling Distributions.\n \n \n \n \n\n\n \n Foygel, R.; Shamir, O.; Srebro, N.; and Salakhutdinov, R.\n\n\n \n\n\n\n In Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS), pages 2133–2141, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Learning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011foygellearning,\n  title={Learning with the Weighted Trace-norm under Arbitrary Sampling Distributions},\n  author={Foygel, Rina and Shamir, Ohad and Srebro, Nathan and Salakhutdinov, Ruslan},\n  booktitle={Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={2133--2141},\n  year={2011},\n  %http://papers.nips.cc/paper/4303-learning-with-the-weighted-trace-norm-under-arbitrary-sampling-distributions\n  url_Paper={https://arxiv.org/pdf/1106.4251.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the Universality of Online Mirror Descent.\n \n \n \n \n\n\n \n Srebro, N.; Sridharan, K.; and Tewari, A.\n\n\n \n\n\n\n In Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS), pages 2645–2653, 2011. \n \n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2011srebrouniversality,\n  title={On the Universality of Online Mirror Descent},\n  author={Srebro, Nathan and Sridharan, Karthik and Tewari, Ambuj},\n  booktitle={Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={2645--2653},\n  year={2011},\n  %http://papers.nips.cc/paper/4413-on-the-universality-of-online-mirror-descent\n  url_Paper={https://arxiv.org/pdf/1107.4080.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2010\n \n \n (20)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Optimistic Rates for Learning with a Smooth Loss.\n \n \n \n \n\n\n \n Srebro, N.; Sridharan, K.; and Tewari, A.\n\n\n \n\n\n\n arXiv Preprint. 2010.\n \n\n\n\n
\n\n\n\n \n \n \"Optimistic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2010srebrooptimistic,\n  title={Optimistic Rates for Learning with a Smooth Loss},\n  author={Srebro, Nathan and Sridharan, Karthik and Tewari, Ambuj},\n  journal={arXiv Preprint},\n  year={2010},\n  url_Paper={https://arxiv.org/pdf/1009.3896.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Smoothness, Low-Noise and Fast Rates: Supplementary Material.\n \n \n \n \n\n\n \n Srebro, N.; Sridharan, K.; and Tewari, A.\n\n\n \n\n\n\n In Proceedings of the 23rd International Conference on Neural Information Processing Systems (NIPS), volume 2, pages 2199–2207, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"Smoothness, paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2010srebrosmoothness,\n  title={Smoothness, Low-Noise and Fast Rates: Supplementary Material},\n  author={Srebro, Nathan and Sridharan, Karthik and Tewari, Ambuj},\n  booktitle={Proceedings of the 23rd International Conference on Neural Information Processing Systems (NIPS)},\n  volume={2},\n  pages={2199--2207},\n  year={2010},\n  url_Paper={http://papers.nips.cc/paper/3894-smoothness-low-noise-and-fast-rates}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reducing Label Complexity by Learning From Bags.\n \n \n \n \n\n\n \n Sabato, S.; Srebro, N.; and Tishby, N.\n\n\n \n\n\n\n In Proceedings of the 13th International Conference on Artificial Intelligence and Statistics (AISTATS), volume 9, pages 685–692, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"Reducing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2010sabatoreducing,\n  title={Reducing Label Complexity by Learning From Bags},\n  author={Sabato, Sivan and Srebro, Nathan and Tishby, Naftali},\n  booktitle={Proceedings of the 13th International Conference on Artificial Intelligence and Statistics (AISTATS)},\n  volume={9},\n  pages={685--692},\n  year={2010},\n  url_Paper={http://proceedings.mlr.press/v9/sabato10a.html}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Note on refined dudley integral covering number bound.\n \n \n \n \n\n\n \n Srebro, N.; and Sridharan, K.\n\n\n \n\n\n\n 2010.\n \n\n\n\n
\n\n\n\n \n \n \"Note paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{2010srebronote,\n  title={Note on refined dudley integral covering number bound},\n  author={Srebro, Nathan and Sridharan, Karthik},\n  year={2010},\n  url_Paper={https://www.cs.cornell.edu/~sridharan/dudley.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the Interaction Between Norm and Dimensionality: Multiple Regimes in Learning.\n \n \n \n \n\n\n \n Liang, P.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 27th International Conference on Machine Learning (ICML), pages 647–654, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2010lianginteraction,\n  title={On the Interaction Between Norm and Dimensionality: Multiple Regimes in Learning},\n  author={Liang, Percy and Srebro, Nathan},\n  booktitle={Proceedings of the 27th International Conference on Machine Learning (ICML)},\n  pages={647--654},\n  year={2010},\n  url_Paper={https://icml.cc/Conferences/2010/papers/601.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Trading Accuracy for Sparsity in Optimization Problems with Sparsity Constraints.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; Srebro, N.; and Zhang, T.\n\n\n \n\n\n\n SIAM Journal on Optimization, 20(6): 2807–2832. 2010.\n \n\n\n\n
\n\n\n\n \n \n \"Trading paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2010shalevtrading,\n  title={Trading Accuracy for Sparsity in Optimization Problems with Sparsity Constraints},\n  author={Shalev-Shwartz, Shai and Srebro, Nathan and Zhang, Tong},\n  journal={SIAM Journal on Optimization},\n  volume={20},\n  number={6},\n  pages={2807--2832},\n  year={2010},\n  publisher={Society for Industrial and Applied Mathematics},\n  url_Paper={https://epubs.siam.org/doi/pdf/10.1137/090759574}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tight Sample Complexity of Large-margin Learning.\n \n \n \n \n\n\n \n Sabato, S.; Srebro, N.; and Tishby, N.\n\n\n \n\n\n\n In Proceedings of the 23rd International Conference on Neural Information Processing Systems (NIPS), volume 2, pages 2038–2046, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"Tight paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2010sabatotight,\n  title={Tight Sample Complexity of Large-margin Learning},\n  author={Sabato, Sivan and Srebro, Nathan and Tishby, Naftali},\n  booktitle={Proceedings of the 23rd International Conference on Neural Information Processing Systems (NIPS)},\n  volume={2},\n  pages={2038--2046},\n  year={2010},\n  %http://papers.nips.cc/paper/4032-tight-sample-complexity-of-large-margin-learning\n  url_Paper={https://arxiv.org/pdf/1011.5053.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learnability, Stability and Uniform Convergence.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; Shamir, O.; Srebro, N.; and Sridharan, K.\n\n\n \n\n\n\n Journal of Machine Learning Research, 11(90): 2635–2670. 2010.\n \n\n\n\n
\n\n\n\n \n \n \"Learnability, paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2010shalevlearnability,\n  title={Learnability, Stability and Uniform Convergence},\n  author={Shalev-Shwartz, Shai and Shamir, Ohad and Srebro, Nathan and Sridharan, Karthik},\n  journal={Journal of Machine Learning Research},\n  volume={11},\n  number={90},\n  pages={2635--2670},\n  year={2010},\n  url_Paper={http://www.jmlr.org/papers/v11/shalev-shwartz10a.html}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Practical Large-Scale Optimization for Max-norm Regularization.\n \n \n \n \n\n\n \n Lee, J.; Recht, B.; Salakhutdinov, R.; Srebro, N.; and Tropp, J. A.\n\n\n \n\n\n\n In Proceedings of the 23rd International Conference on Neural Information Processing Systems (NIPS), volume 1, pages 1297–1305, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"Practical paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2010leepractical,\n  title={Practical Large-Scale Optimization for Max-norm Regularization},\n  author={Lee, Jason and Recht, Benjamin and Salakhutdinov, Ruslan and Srebro, Nathan and Tropp, Joel A.},\n  booktitle={Proceedings of the 23rd International Conference on Neural Information Processing Systems (NIPS)},\n  volume={1},\n  pages={1297--1305},\n  year={2010},\n  url_Paper={http://papers.nips.cc/paper/4124-practical-large-scale-optimization-for-max-norm-regularization}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Collaborative Filtering in a Non-Uniform World: Learning with the Weighted Trace Norm.\n \n \n \n \n\n\n \n Salakhutdinov, R.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 23rd International Conference on Neural Information Processing Systems, volume 2, pages 2056–2064, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"Collaborative paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2010salakhutdinovcollaborative,\n  title={Collaborative Filtering in a Non-Uniform World: Learning with the Weighted Trace Norm},\n  author={Salakhutdinov, Ruslan and Srebro, Nathan},\n  booktitle={Proceedings of the 23rd International Conference on Neural Information Processing Systems},\n  volume={2},\n  pages={2056--2064},\n  year={2010},\n  url_Paper={http://papers.nips.cc/paper/4102-collaborative-filtering-in-a-non-uniform-world-learning-with-the-weighted-trace-norm}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimistic Rates for Learning with a Smooth Loss.\n \n \n \n \n\n\n \n Srebro, N.; Sridharan, K.; and Tewari, A.\n\n\n \n\n\n\n arXiv Preprint. 2010.\n \n\n\n\n
\n\n\n\n \n \n \"Optimistic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2010srebrooptimistic,\n  title={Optimistic Rates for Learning with a Smooth Loss},\n  author={Srebro, Nathan and Sridharan, Karthik and Tewari, Ambuj},\n  journal={arXiv Preprint},\n  year={2010},\n  url_Paper={https://arxiv.org/pdf/1009.3896.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Smoothness, Low-Noise and Fast Rates: Supplementary Material.\n \n \n \n \n\n\n \n Srebro, N.; Sridharan, K.; and Tewari, A.\n\n\n \n\n\n\n In Proceedings of the 23rd International Conference on Neural Information Processing Systems (NIPS), volume 2, pages 2199–2207, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"Smoothness, paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2010srebrosmoothness,\n  title={Smoothness, Low-Noise and Fast Rates: Supplementary Material},\n  author={Srebro, Nathan and Sridharan, Karthik and Tewari, Ambuj},\n  booktitle={Proceedings of the 23rd International Conference on Neural Information Processing Systems (NIPS)},\n  volume={2},\n  pages={2199--2207},\n  year={2010},\n  url_Paper={http://papers.nips.cc/paper/3894-smoothness-low-noise-and-fast-rates}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reducing Label Complexity by Learning From Bags.\n \n \n \n \n\n\n \n Sabato, S.; Srebro, N.; and Tishby, N.\n\n\n \n\n\n\n In Proceedings of the 13th International Conference on Artificial Intelligence and Statistics (AISTATS), volume 9, pages 685–692, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"Reducing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2010sabatoreducing,\n  title={Reducing Label Complexity by Learning From Bags},\n  author={Sabato, Sivan and Srebro, Nathan and Tishby, Naftali},\n  booktitle={Proceedings of the 13th International Conference on Artificial Intelligence and Statistics (AISTATS)},\n  volume={9},\n  pages={685--692},\n  year={2010},\n  url_Paper={http://proceedings.mlr.press/v9/sabato10a.html}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Note on refined dudley integral covering number bound.\n \n \n \n \n\n\n \n Srebro, N.; and Sridharan, K.\n\n\n \n\n\n\n 2010.\n \n\n\n\n
\n\n\n\n \n \n \"Note paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{2010srebronote,\n  title={Note on refined dudley integral covering number bound},\n  author={Srebro, Nathan and Sridharan, Karthik},\n  year={2010},\n  url_Paper={https://www.cs.cornell.edu/~sridharan/dudley.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the Interaction Between Norm and Dimensionality: Multiple Regimes in Learning.\n \n \n \n \n\n\n \n Liang, P.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 27th International Conference on Machine Learning (ICML), pages 647–654, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2010lianginteraction,\n  title={On the Interaction Between Norm and Dimensionality: Multiple Regimes in Learning},\n  author={Liang, Percy and Srebro, Nathan},\n  booktitle={Proceedings of the 27th International Conference on Machine Learning (ICML)},\n  pages={647--654},\n  year={2010},\n  url_Paper={https://icml.cc/Conferences/2010/papers/601.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Trading Accuracy for Sparsity in Optimization Problems with Sparsity Constraints.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; Srebro, N.; and Zhang, T.\n\n\n \n\n\n\n SIAM Journal on Optimization, 20(6): 2807–2832. 2010.\n \n\n\n\n
\n\n\n\n \n \n \"Trading paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2010shalevtrading,\n  title={Trading Accuracy for Sparsity in Optimization Problems with Sparsity Constraints},\n  author={Shalev-Shwartz, Shai and Srebro, Nathan and Zhang, Tong},\n  journal={SIAM Journal on Optimization},\n  volume={20},\n  number={6},\n  pages={2807--2832},\n  year={2010},\n  publisher={Society for Industrial and Applied Mathematics},\n  url_Paper={https://epubs.siam.org/doi/pdf/10.1137/090759574}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tight Sample Complexity of Large-margin Learning.\n \n \n \n \n\n\n \n Sabato, S.; Srebro, N.; and Tishby, N.\n\n\n \n\n\n\n In Proceedings of the 23rd International Conference on Neural Information Processing Systems (NIPS), volume 2, pages 2038–2046, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"Tight paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2010sabatotight,\n  title={Tight Sample Complexity of Large-margin Learning},\n  author={Sabato, Sivan and Srebro, Nathan and Tishby, Naftali},\n  booktitle={Proceedings of the 23rd International Conference on Neural Information Processing Systems (NIPS)},\n  volume={2},\n  pages={2038--2046},\n  year={2010},\n  %http://papers.nips.cc/paper/4032-tight-sample-complexity-of-large-margin-learning\n  url_Paper={https://arxiv.org/pdf/1011.5053.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learnability, Stability and Uniform Convergence.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; Shamir, O.; Srebro, N.; and Sridharan, K.\n\n\n \n\n\n\n Journal of Machine Learning Research, 11(90): 2635–2670. 2010.\n \n\n\n\n
\n\n\n\n \n \n \"Learnability, paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2010shalevlearnability,\n  title={Learnability, Stability and Uniform Convergence},\n  author={Shalev-Shwartz, Shai and Shamir, Ohad and Srebro, Nathan and Sridharan, Karthik},\n  journal={Journal of Machine Learning Research},\n  volume={11},\n  number={90},\n  pages={2635--2670},\n  year={2010},\n  url_Paper={http://www.jmlr.org/papers/v11/shalev-shwartz10a.html}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Practical Large-Scale Optimization for Max-norm Regularization.\n \n \n \n \n\n\n \n Lee, J.; Recht, B.; Salakhutdinov, R.; Srebro, N.; and Tropp, J. A.\n\n\n \n\n\n\n In Proceedings of the 23rd International Conference on Neural Information Processing Systems (NIPS), volume 1, pages 1297–1305, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"Practical paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2010leepractical,\n  title={Practical Large-Scale Optimization for Max-norm Regularization},\n  author={Lee, Jason and Recht, Benjamin and Salakhutdinov, Ruslan and Srebro, Nathan and Tropp, Joel A.},\n  booktitle={Proceedings of the 23rd International Conference on Neural Information Processing Systems (NIPS)},\n  volume={1},\n  pages={1297--1305},\n  year={2010},\n  url_Paper={http://papers.nips.cc/paper/4124-practical-large-scale-optimization-for-max-norm-regularization}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Collaborative Filtering in a Non-Uniform World: Learning with the Weighted Trace Norm.\n \n \n \n \n\n\n \n Salakhutdinov, R.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 23rd International Conference on Neural Information Processing Systems, volume 2, pages 2056–2064, 2010. \n \n\n\n\n
\n\n\n\n \n \n \"Collaborative paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2010salakhutdinovcollaborative,\n  title={Collaborative Filtering in a Non-Uniform World: Learning with the Weighted Trace Norm},\n  author={Salakhutdinov, Ruslan and Srebro, Nathan},\n  booktitle={Proceedings of the 23rd International Conference on Neural Information Processing Systems},\n  volume={2},\n  pages={2056--2064},\n  year={2010},\n  url_Paper={http://papers.nips.cc/paper/4102-collaborative-filtering-in-a-non-uniform-world-learning-with-the-weighted-trace-norm}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2009\n \n \n (12)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Semi-supervised Learning with the Graph Laplacian: The Limit of Infinite Unlabelled Data.\n \n \n \n \n\n\n \n Nadler, B.; Srebro, N.; and Zhou, X.\n\n\n \n\n\n\n In Proceedings of the 22nd International Conference on Neural Information Processing Systems, pages 1330–1338, 2009. \n \n\n\n\n
\n\n\n\n \n \n \"Semi-supervised paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2009nadlersemi,\n  title={Semi-supervised Learning with the Graph Laplacian: The Limit of Infinite Unlabelled Data},\n  author={Nadler, Boaz and Srebro, Nathan and Zhou, Xueyuan},\n  booktitle={Proceedings of the 22nd International Conference on Neural Information Processing Systems},\n  pages={1330--1338},\n  year={2009},\n  url_Paper={http://papers.nips.cc/paper/3731-predicting-the-optimal-spacing-of-study-a-multiscale-context-model-of-memory}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Convex Optimization.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; Srebro, N.; and Sridharan, K.\n\n\n \n\n\n\n In Proceedings of the 22nd Annual Conference on Learning Theory (COLT), 2009. \n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2009shalevstochastic,\n  title={Stochastic Convex Optimization},\n  author={Shalev-Shwartz, Shai and Srebro, Nathan and Sridharan, Karthik},\n  booktitle={Proceedings of the 22nd Annual Conference on Learning Theory (COLT)},\n  year={2009},\n  url_Paper={https://ttic.uchicago.edu/~karthik/nonlinearTR.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Trading Accuracy for Sparsity.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; Srebro, N.; and Zhang, T.\n\n\n \n\n\n\n Technical Report Toyota Technological Institute at Chicago, 2009.\n \n\n\n\n
\n\n\n\n \n \n \"Trading paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{2009shalevtrading,\n  title={Trading Accuracy for Sparsity},\n  author={Shalev-Shwartz, Shai and Srebro, Nathan and Zhang, Tong},\n  year={2009},\n  institution={Toyota Technological Institute at Chicago},\n  url_Paper={https://ttic.uchicago.edu/~shai/papers/ShalevSrebroZhang09report.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Theory and Practice of Support Vector Machines Optimization.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; and Srebo, N.\n\n\n \n\n\n\n Automatic Speech and Speaker Recognition,11–26. 2009.\n \n\n\n\n
\n\n\n\n \n \n \"Theory paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2009shalevtheory,\n  title={Theory and Practice of Support Vector Machines Optimization},\n  author={Shalev-Shwartz, Shai and Srebo, Nathan},\n  journal={Automatic Speech and Speaker Recognition},\n  pages={11--26},\n  year={2009},\n  editor={Keshet, Joseph and Bengio, Samy},\n  publisher={Wiley},\n  url_Paper={https://ccc.inaoep.mx/~villasen/bib/Automatic%20Speech%20and%20Speaker%20Recognition%20Large%20Margin%20and%20Kernel%20Methods.pdf#page=24}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Statistical Analysis of Semi-Supervised Learning: The Limit of Infinite Unlabelled Data.\n \n \n \n \n\n\n \n Nadler, B.; Srebro, N.; and Zhou, X.\n\n\n \n\n\n\n In pages 1330–1338, 2009. \n \n\n\n\n
\n\n\n\n \n \n \"Statistical paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2009nadlerstatistical,\n  title={Statistical Analysis of Semi-Supervised Learning: The Limit of Infinite Unlabelled Data},\n  author={Nadler, Boaz and Srebro, Nathan and Zhou, Xueyuan},\n  journal={Proceedings of the 22nd International Conference on Neural Information Processing Systems},\n  pages={1330--1338},\n  year={2009},\n  url_Paper={http://papers.nips.cc/paper/3652-statistical-analysis-of-semi-supervised-learning-the-limit-of-infinite-unlabelled-data}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learnability and Stability in the General Learning Setting.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; Shamir, O.; Srebro, N.; and Sridharan, K.\n\n\n \n\n\n\n In Proceedings of the 22nd Annual Conference on Learning Theory (COLT), 2009. \n \n\n\n\n
\n\n\n\n \n \n \"Learnability paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2009shalevlearnability,\n  title={Learnability and Stability in the General Learning Setting},\n  author={Shalev-Shwartz, Shai and Shamir, Ohad and Srebro, Nathan and Sridharan, Karthik},\n  booktitle={Proceedings of the 22nd Annual Conference on Learning Theory (COLT)},\n  year={2009},\n  url_Paper={https://ttic.uchicago.edu/~shai/papers/ShalevShamirSridharanSrebro2.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Semi-supervised Learning with the Graph Laplacian: The Limit of Infinite Unlabelled Data.\n \n \n \n \n\n\n \n Nadler, B.; Srebro, N.; and Zhou, X.\n\n\n \n\n\n\n In Proceedings of the 22nd International Conference on Neural Information Processing Systems, pages 1330–1338, 2009. \n \n\n\n\n
\n\n\n\n \n \n \"Semi-supervised paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2009nadlersemi,\n  title={Semi-supervised Learning with the Graph Laplacian: The Limit of Infinite Unlabelled Data},\n  author={Nadler, Boaz and Srebro, Nathan and Zhou, Xueyuan},\n  booktitle={Proceedings of the 22nd International Conference on Neural Information Processing Systems},\n  pages={1330--1338},\n  year={2009},\n  url_Paper={http://papers.nips.cc/paper/3731-predicting-the-optimal-spacing-of-study-a-multiscale-context-model-of-memory}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Convex Optimization.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; Srebro, N.; and Sridharan, K.\n\n\n \n\n\n\n In Proceedings of the 22nd Annual Conference on Learning Theory (COLT), 2009. \n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2009shalevstochastic,\n  title={Stochastic Convex Optimization},\n  author={Shalev-Shwartz, Shai and Srebro, Nathan and Sridharan, Karthik},\n  booktitle={Proceedings of the 22nd Annual Conference on Learning Theory (COLT)},\n  year={2009},\n  url_Paper={https://ttic.uchicago.edu/~karthik/nonlinearTR.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Trading Accuracy for Sparsity.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; Srebro, N.; and Zhang, T.\n\n\n \n\n\n\n Technical Report Toyota Technological Institute at Chicago, 2009.\n \n\n\n\n
\n\n\n\n \n \n \"Trading paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{2009shalevtrading,\n  title={Trading Accuracy for Sparsity},\n  author={Shalev-Shwartz, Shai and Srebro, Nathan and Zhang, Tong},\n  year={2009},\n  institution={Toyota Technological Institute at Chicago},\n  url_Paper={https://ttic.uchicago.edu/~shai/papers/ShalevSrebroZhang09report.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Theory and Practice of Support Vector Machines Optimization.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; and Srebo, N.\n\n\n \n\n\n\n Automatic Speech and Speaker Recognition,11–26. 2009.\n \n\n\n\n
\n\n\n\n \n \n \"Theory paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2009shalevtheory,\n  title={Theory and Practice of Support Vector Machines Optimization},\n  author={Shalev-Shwartz, Shai and Srebo, Nathan},\n  journal={Automatic Speech and Speaker Recognition},\n  pages={11--26},\n  year={2009},\n  editor={Keshet, Joseph and Bengio, Samy},\n  publisher={Wiley},\n  url_Paper={https://ccc.inaoep.mx/~villasen/bib/Automatic%20Speech%20and%20Speaker%20Recognition%20Large%20Margin%20and%20Kernel%20Methods.pdf#page=24}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Statistical Analysis of Semi-Supervised Learning: The Limit of Infinite Unlabelled Data.\n \n \n \n \n\n\n \n Nadler, B.; Srebro, N.; and Zhou, X.\n\n\n \n\n\n\n In pages 1330–1338, 2009. \n \n\n\n\n
\n\n\n\n \n \n \"Statistical paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2009nadlerstatistical,\n  title={Statistical Analysis of Semi-Supervised Learning: The Limit of Infinite Unlabelled Data},\n  author={Nadler, Boaz and Srebro, Nathan and Zhou, Xueyuan},\n  journal={Proceedings of the 22nd International Conference on Neural Information Processing Systems},\n  pages={1330--1338},\n  year={2009},\n  url_Paper={http://papers.nips.cc/paper/3652-statistical-analysis-of-semi-supervised-learning-the-limit-of-infinite-unlabelled-data}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learnability and Stability in the General Learning Setting.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; Shamir, O.; Srebro, N.; and Sridharan, K.\n\n\n \n\n\n\n In Proceedings of the 22nd Annual Conference on Learning Theory (COLT), 2009. \n \n\n\n\n
\n\n\n\n \n \n \"Learnability paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2009shalevlearnability,\n  title={Learnability and Stability in the General Learning Setting},\n  author={Shalev-Shwartz, Shai and Shamir, Ohad and Srebro, Nathan and Sridharan, Karthik},\n  booktitle={Proceedings of the 22nd Annual Conference on Learning Theory (COLT)},\n  year={2009},\n  url_Paper={https://ttic.uchicago.edu/~shai/papers/ShalevShamirSridharanSrebro2.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2008\n \n \n (16)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Complexity of Inference in Graphical Models.\n \n \n \n \n\n\n \n Chandrasekaran, V.; Srebro, N.; and Harsha, P.\n\n\n \n\n\n\n In Proceedings of the 24th Conference on Uncertainty in Artificial Intelligence (UAI), pages 70–78, 2008. \n \n\n\n\n
\n\n\n\n \n \n \"Complexity paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2008chandrasekarancomplexity,\n  title={Complexity of Inference in Graphical Models},\n  author={Chandrasekaran, Venkat and Srebro, Nathan and Harsha, Prahladh},\n  booktitle={Proceedings of the 24th Conference on Uncertainty in Artificial Intelligence (UAI)},\n  pages={70--78},\n  year={2008},\n  %https://dl.acm.org/doi/abs/10.5555/3023476.3023485\n  url_Paper={https://arxiv.org/ftp/arxiv/papers/1206/1206.3240.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast Rates for Regularized Objectives.\n \n \n \n \n\n\n \n Sridharan, K.; Srebro, N.; and Shalev-Shwartz, S.\n\n\n \n\n\n\n In Proceedings of the 21st International Conference on Neural Information Processing Systems (NIPS), pages 1545–1552, 2008. \n \n\n\n\n
\n\n\n\n \n \n \"Fast paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2008sridharanfast,\n  title={Fast Rates for Regularized Objectives},\n  author={Sridharan, Karthik and Srebro, Nathan and Shalev-Shwartz, Shai},\n  booktitle={Proceedings of the 21st International Conference on Neural Information Processing Systems (NIPS)},\n  pages={1545--1552},\n  year={2008},\n  url_Paper={http://papers.nips.cc/paper/3400-fast-rates-for-regularized-objectives}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Theory of Learning with Similarity Functions.\n \n \n \n \n\n\n \n Balcan, M.; Blum, A.; and Srebro, N.\n\n\n \n\n\n\n Machine Learning, 72: 89–112. 2008.\n \n\nAn earlier version appeared in ICML 2006\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2008balcantheory,\n  title={A Theory of Learning with Similarity Functions},\n  author={Balcan, Maria-Florina and Blum, Avrim and Srebro, Nathan},\n  journal={Machine Learning},\n  volume={72},\n  pages={89--112},\n  year={2008},\n  publisher={Springer},\n  bibbase_note={An earlier version appeared in ICML 2006},\n  url_Paper={https://link.springer.com/article/10.1007/s10994-008-5059-5#citeas}\n}%%%\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SVM Optimization: Inverse Dependence on Training Set Size.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 25th International Conference on Machine Learning (ICML), pages 928–935, 2008. \n \n\n\n\n
\n\n\n\n \n \n \"SVM paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2008shalevsvm,\n  title={SVM Optimization: Inverse Dependence on Training Set Size},\n  author={Shalev-Shwartz, Shai and Srebro, Nathan},\n  booktitle={Proceedings of the 25th International Conference on Machine Learning (ICML)},\n  pages={928--935},\n  year={2008},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/1390156.1390273}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Iterative loss minimization with $\\ell_1$-norm constraint and guarantees on sparsity.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; and Srebro, N.\n\n\n \n\n\n\n 2008.\n \n\n\n\n
\n\n\n\n \n \n \"Iterative paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{2008shaleviterative,\n  title={Iterative loss minimization with $\\ell_1$-norm constraint and guarantees on sparsity},\n  author={Shalev-Shwartz, Shai and Srebro, Nathan},\n  year={2008},\n  url_Paper={https://ttic.uchicago.edu/~nati/Publications/ShalevSrebro08Iterative.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low $\\ell_1$-norm and guarantees on sparsifiability.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; and Srebro, N.\n\n\n \n\n\n\n In Sparse Optimization and Variable Selection, Joint ICML/COLT/UAI Workshop, 2008. \n \n\n\n\n
\n\n\n\n \n \n \"Low paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2008shalevlow,\n  title={Low $\\ell_1$-norm and guarantees on sparsifiability},\n  author={Shalev-Shwartz, Shai and Srebro, Nathan},\n  booktitle={Sparse Optimization and Variable Selection, Joint ICML/COLT/UAI Workshop},\n  year={2008},\n  url_Paper={https://ttic.uchicago.edu/~shai/papers/ShalevSr08t.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improved Guarantees for Learning via Similarity Functions.\n \n \n \n \n\n\n \n Balcan, M.; Blum, A.; and Srebro, N.\n\n\n \n\n\n\n 2008.\n \n\n\n\n
\n\n\n\n \n \n \"Improved paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{balcan2008improved,\n  title={Improved Guarantees for Learning via Similarity Functions},\n  author={Balcan, Maria-Florina and Blum, Avrim and Srebro, Nathan},\n  year={2008},\n  url_Paper={https://kilthub.cmu.edu/articles/Improved_Guarantees_for_Learning_via_Similarity_Functions/6606368}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Similarity-Based Theoretical Foundation for Sparse Parzen Window Prediction.\n \n \n \n \n\n\n \n Balcan, M.; Blum, A.; and Srebro, N.\n\n\n \n\n\n\n Sparse Optimization and Variable Selection Wokrshop, ICML/COLT/UAI. 2008.\n \n\n\n\n
\n\n\n\n \n \n \"Similarity-Based paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2008balcansimilarity,\n  title={Similarity-Based Theoretical Foundation for Sparse Parzen Window Prediction},\n  author={Balcan, Maria-Florina and Blum, Avrim and Srebro, Nati},\n  journal={Sparse Optimization and Variable Selection Wokrshop, ICML/COLT/UAI},\n  year={2008},\n  url_Paper={http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.367.8195&rep=rep1&type=pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Complexity of Inference in Graphical Models.\n \n \n \n \n\n\n \n Chandrasekaran, V.; Srebro, N.; and Harsha, P.\n\n\n \n\n\n\n In Proceedings of the 24th Conference on Uncertainty in Artificial Intelligence (UAI), pages 70–78, 2008. \n \n\n\n\n
\n\n\n\n \n \n \"Complexity paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2008chandrasekarancomplexity,\n  title={Complexity of Inference in Graphical Models},\n  author={Chandrasekaran, Venkat and Srebro, Nathan and Harsha, Prahladh},\n  booktitle={Proceedings of the 24th Conference on Uncertainty in Artificial Intelligence (UAI)},\n  pages={70--78},\n  year={2008},\n  %https://dl.acm.org/doi/abs/10.5555/3023476.3023485\n  url_Paper={https://arxiv.org/ftp/arxiv/papers/1206/1206.3240.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast Rates for Regularized Objectives.\n \n \n \n \n\n\n \n Sridharan, K.; Srebro, N.; and Shalev-Shwartz, S.\n\n\n \n\n\n\n In Proceedings of the 21st International Conference on Neural Information Processing Systems (NIPS), pages 1545–1552, 2008. \n \n\n\n\n
\n\n\n\n \n \n \"Fast paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2008sridharanfast,\n  title={Fast Rates for Regularized Objectives},\n  author={Sridharan, Karthik and Srebro, Nathan and Shalev-Shwartz, Shai},\n  booktitle={Proceedings of the 21st International Conference on Neural Information Processing Systems (NIPS)},\n  pages={1545--1552},\n  year={2008},\n  url_Paper={http://papers.nips.cc/paper/3400-fast-rates-for-regularized-objectives}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Theory of Learning with Similarity Functions.\n \n \n \n \n\n\n \n Balcan, M.; Blum, A.; and Srebro, N.\n\n\n \n\n\n\n Machine Learning, 72: 89–112. 2008.\n \n\n\\newline An earlier version appeared in ICML 2006\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2008balcantheory,\n  title={A Theory of Learning with Similarity Functions},\n  author={Balcan, Maria-Florina and Blum, Avrim and Srebro, Nathan},\n  journal={Machine Learning},\n  volume={72},\n  pages={89--112},\n  year={2008},\n  publisher={Springer},\n  bibbase_note={\\newline An earlier version appeared in ICML 2006},\n  url_Paper={https://link.springer.com/article/10.1007/s10994-008-5059-5#citeas}\n}%%%\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SVM Optimization: Inverse Dependence on Training Set Size.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 25th International Conference on Machine Learning (ICML), pages 928–935, 2008. \n \n\n\n\n
\n\n\n\n \n \n \"SVM paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2008shalevsvm,\n  title={SVM Optimization: Inverse Dependence on Training Set Size},\n  author={Shalev-Shwartz, Shai and Srebro, Nathan},\n  booktitle={Proceedings of the 25th International Conference on Machine Learning (ICML)},\n  pages={928--935},\n  year={2008},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/1390156.1390273}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Iterative loss minimization with $\\ell_1$-norm constraint and guarantees on sparsity.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; and Srebro, N.\n\n\n \n\n\n\n 2008.\n \n\n\n\n
\n\n\n\n \n \n \"Iterative paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{2008shaleviterative,\n  title={Iterative loss minimization with $\\ell_1$-norm constraint and guarantees on sparsity},\n  author={Shalev-Shwartz, Shai and Srebro, Nathan},\n  year={2008},\n  url_Paper={https://ttic.uchicago.edu/~nati/Publications/ShalevSrebro08Iterative.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Low $\\ell_1$-norm and guarantees on sparsifiability.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; and Srebro, N.\n\n\n \n\n\n\n In Sparse Optimization and Variable Selection, Joint ICML/COLT/UAI Workshop, 2008. \n \n\n\n\n
\n\n\n\n \n \n \"Low paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2008shalevlow,\n  title={Low $\\ell_1$-norm and guarantees on sparsifiability},\n  author={Shalev-Shwartz, Shai and Srebro, Nathan},\n  booktitle={Sparse Optimization and Variable Selection, Joint ICML/COLT/UAI Workshop},\n  year={2008},\n  url_Paper={https://ttic.uchicago.edu/~shai/papers/ShalevSr08t.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improved Guarantees for Learning via Similarity Functions.\n \n \n \n \n\n\n \n Balcan, M.; Blum, A.; and Srebro, N.\n\n\n \n\n\n\n 2008.\n \n\n\n\n
\n\n\n\n \n \n \"Improved paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{balcan2008improved,\n  title={Improved Guarantees for Learning via Similarity Functions},\n  author={Balcan, Maria-Florina and Blum, Avrim and Srebro, Nathan},\n  year={2008},\n  url_Paper={https://kilthub.cmu.edu/articles/Improved_Guarantees_for_Learning_via_Similarity_Functions/6606368}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Similarity-Based Theoretical Foundation for Sparse Parzen Window Prediction.\n \n \n \n \n\n\n \n Balcan, M.; Blum, A.; and Srebro, N.\n\n\n \n\n\n\n Sparse Optimization and Variable Selection Wokrshop, ICML/COLT/UAI. 2008.\n \n\n\n\n
\n\n\n\n \n \n \"Similarity-Based paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2008balcansimilarity,\n  title={Similarity-Based Theoretical Foundation for Sparse Parzen Window Prediction},\n  author={Balcan, Maria-Florina and Blum, Avrim and Srebro, Nati},\n  journal={Sparse Optimization and Variable Selection Wokrshop, ICML/COLT/UAI},\n  year={2008},\n  url_Paper={http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.367.8195&rep=rep1&type=pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2007\n \n \n (14)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Pegasos: Primal Estimated Sub-GrAdient SOlver for SVM.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; Singer, Y.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 24th International Conference on Machine Learning (ICML), 2007. \n \n\n\n\n
\n\n\n\n \n \n \"Pegasos: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2007shalevpegasos,\n  title={Pegasos: Primal Estimated Sub-GrAdient SOlver for SVM},\n  author={Shalev-Shwartz, Shai and Singer, Yoram and Srebro, Nathan},\n  booktitle={Proceedings of the 24th International Conference on Machine Learning (ICML)},\n  year={2007},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/1273496.1273598}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Thesis submitted for the degree of “Doctor of Philosophy”.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.\n\n\n \n\n\n\n Ph.D. Thesis, Hebrew University, 2007.\n \n\nAuthor collaborated and worked with Nathan Srebro\n\n
\n\n\n\n \n \n \"Thesis paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{2007shalevthesis,\n  title={Thesis submitted for the degree of “Doctor of Philosophy”},\n  author={Shalev-Shwartz, Shai},\n  school={Hebrew University},\n  year={2007},\n  bibbase_note={Author collaborated and worked with Nathan Srebro},\n  url_Paper={https://ttic.uchicago.edu/~shai/papers/ShalevThesis07.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Uncovering Shared Structures in Multiclass Classification.\n \n \n \n \n\n\n \n Amit, Y.; Fink, M.; Srebro, N.; and Ullman, S.\n\n\n \n\n\n\n In Proceedings of the 24th International Conference on Machine Learning (ICML), pages 17–24, 2007. \n \n\n\n\n
\n\n\n\n \n \n \"Uncovering paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 12 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2007amituncovering,\n  title={Uncovering Shared Structures in Multiclass Classification},\n  author={Amit, Yonatan and Fink, Michael and Srebro, Nathan and Ullman, Shimon},\n  booktitle={Proceedings of the 24th International Conference on Machine Learning (ICML)},\n  pages={17--24},\n  year={2007},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/1273496.1273499}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Are There Local Maxima in the Infinite-sample Likelihood of Gaussian Mixture Estimation?.\n \n \n \n \n\n\n \n Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 20th Annual Conference on Learning Theory (COLT), pages 628–629, 2007. \n \n\n\n\n
\n\n\n\n \n \n \"Are paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2007srebrothere,\n  title={Are There Local Maxima in the Infinite-sample Likelihood of Gaussian Mixture Estimation?},\n  author={Srebro, Nathan},\n  booktitle={Proceedings of the 20th Annual Conference on Learning Theory (COLT)},\n  pages={628--629},\n  year={2007},\n  url_Paper={https://link.springer.com/chapter/10.1007/978-3-540-72927-3_47}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How Good is a Kernel When Used as a Similarity Measure?.\n \n \n \n \n\n\n \n Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 20th Annual Conference on Learning Theory (COLT), pages 323–335, 2007. \n \n\n\n\n
\n\n\n\n \n \n \"How paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2007srebrohow,\n  title={How Good is a Kernel When Used as a Similarity Measure?},\n  author={Srebro, Nathan},\n  booktitle={Proceedings of the 20th Annual Conference on Learning Theory (COLT)},\n  pages={323--335},\n  year={2007},\n  url_Paper={https://link.springer.com/chapter/10.1007/978-3-540-72927-3_24}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n $\\ell_1$ Regularization in Infinite Dimensional Feature Spaces.\n \n \n \n \n\n\n \n Rosset, S.; Swirszcz, G.; Srebro, N.; and Zhu, J.\n\n\n \n\n\n\n In Proceedings of the 20th Annual Conference on Learning Theory (COLT), pages 544–558, 2007. \n \n\n\n\n
\n\n\n\n \n \n \"$\\ell_1$ paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2007rossetl1,\n  title={$\\ell_1$ Regularization in Infinite Dimensional Feature Spaces},\n  author={Rosset, Saharon and Swirszcz, Grzegorz and Srebro, Nathan and Zhu, Ji},\n  booktitle={Proceedings of the 20th Annual Conference on Learning Theory (COLT)},\n  pages={544--558},\n  year={2007},\n  url_Paper={https://link.springer.com/chapter/10.1007/978-3-540-72927-3_39}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive Gaussian Kernel SVMs.\n \n \n \n \n\n\n \n Srebro, N.; and Roweis, S.\n\n\n \n\n\n\n Snowbird Learning Workshop. 2007.\n \n\n\n\n
\n\n\n\n \n \n \"Adaptive paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2005srebroadaptive,\n  title={Adaptive Gaussian Kernel SVMs},\n  author={Srebro, Nathan and Roweis, Sam},\n  journal={Snowbird Learning Workshop},\n  year={2007},\n  url_Paper={http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.87.8071&rep=rep1&type=pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pegasos: Primal Estimated Sub-GrAdient SOlver for SVM.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.; Singer, Y.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 24th International Conference on Machine Learning (ICML), 2007. \n \n\n\n\n
\n\n\n\n \n \n \"Pegasos: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2007shalevpegasos,\n  title={Pegasos: Primal Estimated Sub-GrAdient SOlver for SVM},\n  author={Shalev-Shwartz, Shai and Singer, Yoram and Srebro, Nathan},\n  booktitle={Proceedings of the 24th International Conference on Machine Learning (ICML)},\n  year={2007},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/1273496.1273598}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Thesis submitted for the degree of “Doctor of Philosophy”.\n \n \n \n \n\n\n \n Shalev-Shwartz, S.\n\n\n \n\n\n\n Ph.D. Thesis, Hebrew University, 2007.\n \n\n\\newline Author collaborated and worked with Nathan Srebro\n\n
\n\n\n\n \n \n \"Thesis paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{2007shalevthesis,\n  title={Thesis submitted for the degree of “Doctor of Philosophy”},\n  author={Shalev-Shwartz, Shai},\n  school={Hebrew University},\n  year={2007},\n  bibbase_note={\\newline Author collaborated and worked with Nathan Srebro},\n  url_Paper={https://ttic.uchicago.edu/~shai/papers/ShalevThesis07.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Uncovering Shared Structures in Multiclass Classification.\n \n \n \n \n\n\n \n Amit, Y.; Fink, M.; Srebro, N.; and Ullman, S.\n\n\n \n\n\n\n In Proceedings of the 24th International Conference on Machine Learning (ICML), pages 17–24, 2007. \n \n\n\n\n
\n\n\n\n \n \n \"Uncovering paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 12 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2007amituncovering,\n  title={Uncovering Shared Structures in Multiclass Classification},\n  author={Amit, Yonatan and Fink, Michael and Srebro, Nathan and Ullman, Shimon},\n  booktitle={Proceedings of the 24th International Conference on Machine Learning (ICML)},\n  pages={17--24},\n  year={2007},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/1273496.1273499}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Are There Local Maxima in the Infinite-sample Likelihood of Gaussian Mixture Estimation?.\n \n \n \n \n\n\n \n Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 20th Annual Conference on Learning Theory (COLT), pages 628–629, 2007. \n \n\n\n\n
\n\n\n\n \n \n \"Are paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2007srebrothere,\n  title={Are There Local Maxima in the Infinite-sample Likelihood of Gaussian Mixture Estimation?},\n  author={Srebro, Nathan},\n  booktitle={Proceedings of the 20th Annual Conference on Learning Theory (COLT)},\n  pages={628--629},\n  year={2007},\n  url_Paper={https://link.springer.com/chapter/10.1007/978-3-540-72927-3_47}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How Good is a Kernel When Used as a Similarity Measure?.\n \n \n \n \n\n\n \n Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 20th Annual Conference on Learning Theory (COLT), pages 323–335, 2007. \n \n\n\n\n
\n\n\n\n \n \n \"How paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2007srebrohow,\n  title={How Good is a Kernel When Used as a Similarity Measure?},\n  author={Srebro, Nathan},\n  booktitle={Proceedings of the 20th Annual Conference on Learning Theory (COLT)},\n  pages={323--335},\n  year={2007},\n  url_Paper={https://link.springer.com/chapter/10.1007/978-3-540-72927-3_24}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n $\\ell_1$ Regularization in Infinite Dimensional Feature Spaces.\n \n \n \n \n\n\n \n Rosset, S.; Swirszcz, G.; Srebro, N.; and Zhu, J.\n\n\n \n\n\n\n In Proceedings of the 20th Annual Conference on Learning Theory (COLT), pages 544–558, 2007. \n \n\n\n\n
\n\n\n\n \n \n \"$\\ell_1$ paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2007rossetl1,\n  title={$\\ell_1$ Regularization in Infinite Dimensional Feature Spaces},\n  author={Rosset, Saharon and Swirszcz, Grzegorz and Srebro, Nathan and Zhu, Ji},\n  booktitle={Proceedings of the 20th Annual Conference on Learning Theory (COLT)},\n  pages={544--558},\n  year={2007},\n  url_Paper={https://link.springer.com/chapter/10.1007/978-3-540-72927-3_39}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive Gaussian Kernel SVMs.\n \n \n \n \n\n\n \n Srebro, N.; and Roweis, S.\n\n\n \n\n\n\n Snowbird Learning Workshop. 2007.\n \n\n\n\n
\n\n\n\n \n \n \"Adaptive paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2005srebroadaptive,\n  title={Adaptive Gaussian Kernel SVMs},\n  author={Srebro, Nathan and Roweis, Sam},\n  journal={Snowbird Learning Workshop},\n  year={2007},\n  url_Paper={http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.87.8071&rep=rep1&type=pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2006\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A Theory of Learning with Similarity Functions.\n \n \n \n \n\n\n \n Balcan, M.; Blum, A.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 23rd International Conference on Machine Learning (ICML), pages 73–80, 2006. \n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2006balcantheory,\n  title={A Theory of Learning with Similarity Functions},\n  author={Balcan, Maria-Florina and Blum, Avrim and Srebro, Nathan},\n  booktitle={Proceedings of the 23rd International Conference on Machine Learning (ICML)},\n  pages={73--80},\n  year={2006},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/1143844.1143854}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Investigation of Computational and Informational Limits in Gaussian Mixture Clustering.\n \n \n \n \n\n\n \n Srebro, N.; Shakhnarovich, G.; and Roweis, S.\n\n\n \n\n\n\n In Proceedings of the 23rd International Conference on Machine Learning (ICML), pages 865–872, 2006. \n \n\n\n\n
\n\n\n\n \n \n \"An paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2006srebroinvestigation,\n  title={An Investigation of Computational and Informational Limits in Gaussian Mixture Clustering},\n  author={Srebro, Nathan and Shakhnarovich, Gregory and Roweis, Sam},\n  booktitle={Proceedings of the 23rd International Conference on Machine Learning (ICML)},\n  pages={865--872},\n  year={2006},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/1143844.1143953}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Bounds for Support Vector Machines with Learned Kernels.\n \n \n \n \n\n\n \n Srebro, N.; and Ben-David, S.\n\n\n \n\n\n\n In Proceedings of the 19th Annual Conference on Learning Theory (COLT), pages 169–183, 2006. \n \n\n\n\n
\n\n\n\n \n \n \"Learning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2006srebrolearning,\n  title={Learning Bounds for Support Vector Machines with Learned Kernels},\n  author={Srebro, Nathan and Ben-David, Shai},\n  booktitle={Proceedings of the 19th Annual Conference on Learning Theory (COLT)},\n  pages={169--183},\n  year={2006},\n  url_Paper={https://link.springer.com/chapter/10.1007/11776420_15}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improved Prediction of HIV Resistance In-Vitro by Biochemically-Driven Models.\n \n \n \n \n\n\n \n Neuvirth, H.; Rosen-Zvi, M.; Srebro, N.; Aharoni, E.; Zazzi, M.; and Tishby, N.\n\n\n \n\n\n\n Neural Information Processing Systems (NIPS) 2006 Workshop on New Problems and Methods in Computational Biology. 2006.\n \n\n\n\n
\n\n\n\n \n \n \"Improved paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 11 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2006neuvirthimproved,\n  title={Improved Prediction of HIV Resistance In-Vitro by Biochemically-Driven Models},\n  author={Neuvirth, Hani and Rosen-Zvi, Michal and Srebro, Nathan and Aharoni, Ehud and Zazzi, Maurizio and Tishby, Naftali},\n  journal={Neural Information Processing Systems (NIPS) 2006 Workshop on New Problems and Methods in Computational Biology},\n  year={2006},\n  url_Paper={http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.133.2640&rep=rep1&type=pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Theory of Learning with Similarity Functions.\n \n \n \n \n\n\n \n Balcan, M.; Blum, A.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 23rd International Conference on Machine Learning (ICML), pages 73–80, 2006. \n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2006balcantheory,\n  title={A Theory of Learning with Similarity Functions},\n  author={Balcan, Maria-Florina and Blum, Avrim and Srebro, Nathan},\n  booktitle={Proceedings of the 23rd International Conference on Machine Learning (ICML)},\n  pages={73--80},\n  year={2006},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/1143844.1143854}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Investigation of Computational and Informational Limits in Gaussian Mixture Clustering.\n \n \n \n \n\n\n \n Srebro, N.; Shakhnarovich, G.; and Roweis, S.\n\n\n \n\n\n\n In Proceedings of the 23rd International Conference on Machine Learning (ICML), pages 865–872, 2006. \n \n\n\n\n
\n\n\n\n \n \n \"An paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2006srebroinvestigation,\n  title={An Investigation of Computational and Informational Limits in Gaussian Mixture Clustering},\n  author={Srebro, Nathan and Shakhnarovich, Gregory and Roweis, Sam},\n  booktitle={Proceedings of the 23rd International Conference on Machine Learning (ICML)},\n  pages={865--872},\n  year={2006},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/1143844.1143953}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Bounds for Support Vector Machines with Learned Kernels.\n \n \n \n \n\n\n \n Srebro, N.; and Ben-David, S.\n\n\n \n\n\n\n In Proceedings of the 19th Annual Conference on Learning Theory (COLT), pages 169–183, 2006. \n \n\n\n\n
\n\n\n\n \n \n \"Learning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2006srebrolearning,\n  title={Learning Bounds for Support Vector Machines with Learned Kernels},\n  author={Srebro, Nathan and Ben-David, Shai},\n  booktitle={Proceedings of the 19th Annual Conference on Learning Theory (COLT)},\n  pages={169--183},\n  year={2006},\n  url_Paper={https://link.springer.com/chapter/10.1007/11776420_15}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improved Prediction of HIV Resistance In-Vitro by Biochemically-Driven Models.\n \n \n \n \n\n\n \n Neuvirth, H.; Rosen-Zvi, M.; Srebro, N.; Aharoni, E.; Zazzi, M.; and Tishby, N.\n\n\n \n\n\n\n Neural Information Processing Systems (NIPS) 2006 Workshop on New Problems and Methods in Computational Biology. 2006.\n \n\n\n\n
\n\n\n\n \n \n \"Improved paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 11 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2006neuvirthimproved,\n  title={Improved Prediction of HIV Resistance In-Vitro by Biochemically-Driven Models},\n  author={Neuvirth, Hani and Rosen-Zvi, Michal and Srebro, Nathan and Aharoni, Ehud and Zazzi, Maurizio and Tishby, Naftali},\n  journal={Neural Information Processing Systems (NIPS) 2006 Workshop on New Problems and Methods in Computational Biology},\n  year={2006},\n  url_Paper={http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.133.2640&rep=rep1&type=pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2005\n \n \n (16)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Fast Maximum Margin Matrix Factorization for Collaborative Prediction.\n \n \n \n \n\n\n \n Rennie, J.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 22nd International Conference on Machine Learning (ICML), pages 713–719, 2005. \n \n\n\n\n
\n\n\n\n \n \n \"Fast paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2005renniefast,\n  title={Fast Maximum Margin Matrix Factorization for Collaborative Prediction},\n  author={Rennie, Jasson and Srebro, Nathan},\n  booktitle={Proceedings of the 22nd International Conference on Machine Learning (ICML)},\n  pages={713--719},\n  year={2005},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/1102351.1102441}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Loss Functions for Preference Levels: Regression with Discrete Ordered Labels.\n \n \n \n \n\n\n \n Rennie, J.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the IJCAI Multidisciplinary Workshop on Advances in Preference Handling, 2005. \n \n\n\n\n
\n\n\n\n \n \n \"Loss paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2005rennieloss,\n  title={Loss Functions for Preference Levels: Regression with Discrete Ordered Labels},\n  author={Rennie, Jason and Srebro, Nathan},\n  booktitle={Proceedings of the IJCAI Multidisciplinary Workshop on Advances in Preference Handling},\n  year={2005},\n  url_Paper={https://ttic.uchicago.edu/~nati/Publications/RennieSrebroIJCAI05.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Rank, Trace-Norm and Max-Norm.\n \n \n \n \n\n\n \n Srebro, N.; and Shraibman, A.\n\n\n \n\n\n\n In Proceedings of the 18th Annual Conference on Learning Theory (COLT), pages 545–560, 2005. \n \n\n\n\n
\n\n\n\n \n \n \"Rank, paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2005srebrorank,\n  title={Rank, Trace-Norm and Max-Norm},\n  author={Srebro, Nathan and Shraibman, Adi},\n  booktitle={Proceedings of the 18th Annual Conference on Learning Theory (COLT)},\n  pages={545--560},\n  year={2005},\n  url_Paper={https://link.springer.com/chapter/10.1007/11503415_37}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Time-Varying Topic Models using Dependent Dirichlet Processes.\n \n \n \n \n\n\n \n Srebro, N.; and Roweis, S.\n\n\n \n\n\n\n UTML TR 2005, 3. 2005.\n \n\n\n\n
\n\n\n\n \n \n \"Time-Varying paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2005srebrotime,\n  title={Time-Varying Topic Models using Dependent Dirichlet Processes},\n  author={Srebro, Nathan and Roweis, Sam},\n  journal={UTML TR 2005},\n  volume={3},\n  year={2005},\n  url_Paper={https://pdfs.semanticscholar.org/ff4e/6ca51649b2a48a780758b3f554b907edddc7.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Dynamic Data Structure for Checking Hyperacyclicity.\n \n \n \n \n\n\n \n Liang, P.; and Srebro, N.\n\n\n \n\n\n\n Technical Report MIT Computer Science and Artificial Intelligence Laboratory, 2005.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{2005liangdynamic,\n  title={A Dynamic Data Structure for Checking Hyperacyclicity},\n  author={Liang, Percy and Srebro, Nathan},\n  institution={MIT Computer Science and Artificial Intelligence Laboratory},\n  year={2005},\n  url_Paper={https://dspace.mit.edu/handle/1721.1/30514}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How Much of a Hypertree can be Captured by Windmills?.\n \n \n \n \n\n\n \n Liang, P.; and Srebro, N.\n\n\n \n\n\n\n Technical Report MIT Computer Science and Artificial Intelligence Laboratory, 2005.\n \n\n\n\n
\n\n\n\n \n \n \"How paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{2005liangmuch,\n  title={How Much of a Hypertree can be Captured by Windmills?},\n  author={Liang, Percy and Srebro, Nathan},\n  institution={MIT Computer Science and Artificial Intelligence Laboratory},\n  year={2005},\n  url_Paper={https://dspace.mit.edu/handle/1721.1/30515}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generalization Error Bounds for Collaborative Prediction with Low-Rank Matrices.\n \n \n \n \n\n\n \n Srebro, N.; Alon, N.; and Jaakkola, T.\n\n\n \n\n\n\n In Proceedings of the 17th International Conference on Neural Information Processing Systems (NIPS), pages 1321–1328, 2005. \n \n\n\n\n
\n\n\n\n \n \n \"Generalization paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 12 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2005srebrogeneralization,\n  title={Generalization Error Bounds for Collaborative Prediction with Low-Rank Matrices},\n  author={Srebro, Nathan and Alon, Noga and Jaakkola, Tommi},\n  booktitle={Proceedings of the 17th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={1321--1328},\n  year={2005},\n  url_Paper={http://papers.nips.cc/paper/2700-generalization-error-bounds-for-collaborative-prediction-with-low-rank-matrices}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Data Structure for Maintaining Acyclicity in Hypergraphs.\n \n \n \n \n\n\n \n Liang, P.; and Srebro, N.\n\n\n \n\n\n\n 2005.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{2005liangdata,\n  title={A Data Structure for Maintaining Acyclicity in Hypergraphs},\n  author={Liang, Percy and Srebro, Nathan},\n  year={2005},\n  url_Paper={http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.87.3561&rep=rep1&type=pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fast Maximum Margin Matrix Factorization for Collaborative Prediction.\n \n \n \n \n\n\n \n Rennie, J.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 22nd International Conference on Machine Learning (ICML), pages 713–719, 2005. \n \n\n\n\n
\n\n\n\n \n \n \"Fast paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2005renniefast,\n  title={Fast Maximum Margin Matrix Factorization for Collaborative Prediction},\n  author={Rennie, Jasson and Srebro, Nathan},\n  booktitle={Proceedings of the 22nd International Conference on Machine Learning (ICML)},\n  pages={713--719},\n  year={2005},\n  url_Paper={https://dl.acm.org/doi/abs/10.1145/1102351.1102441}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Loss Functions for Preference Levels: Regression with Discrete Ordered Labels.\n \n \n \n \n\n\n \n Rennie, J.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the IJCAI Multidisciplinary Workshop on Advances in Preference Handling, 2005. \n \n\n\n\n
\n\n\n\n \n \n \"Loss paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2005rennieloss,\n  title={Loss Functions for Preference Levels: Regression with Discrete Ordered Labels},\n  author={Rennie, Jason and Srebro, Nathan},\n  booktitle={Proceedings of the IJCAI Multidisciplinary Workshop on Advances in Preference Handling},\n  year={2005},\n  url_Paper={https://ttic.uchicago.edu/~nati/Publications/RennieSrebroIJCAI05.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Rank, Trace-Norm and Max-Norm.\n \n \n \n \n\n\n \n Srebro, N.; and Shraibman, A.\n\n\n \n\n\n\n In Proceedings of the 18th Annual Conference on Learning Theory (COLT), pages 545–560, 2005. \n \n\n\n\n
\n\n\n\n \n \n \"Rank, paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2005srebrorank,\n  title={Rank, Trace-Norm and Max-Norm},\n  author={Srebro, Nathan and Shraibman, Adi},\n  booktitle={Proceedings of the 18th Annual Conference on Learning Theory (COLT)},\n  pages={545--560},\n  year={2005},\n  url_Paper={https://link.springer.com/chapter/10.1007/11503415_37}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Time-Varying Topic Models using Dependent Dirichlet Processes.\n \n \n \n \n\n\n \n Srebro, N.; and Roweis, S.\n\n\n \n\n\n\n UTML TR 2005, 3. 2005.\n \n\n\n\n
\n\n\n\n \n \n \"Time-Varying paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2005srebrotime,\n  title={Time-Varying Topic Models using Dependent Dirichlet Processes},\n  author={Srebro, Nathan and Roweis, Sam},\n  journal={UTML TR 2005},\n  volume={3},\n  year={2005},\n  url_Paper={https://pdfs.semanticscholar.org/ff4e/6ca51649b2a48a780758b3f554b907edddc7.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Dynamic Data Structure for Checking Hyperacyclicity.\n \n \n \n \n\n\n \n Liang, P.; and Srebro, N.\n\n\n \n\n\n\n Technical Report MIT Computer Science and Artificial Intelligence Laboratory, 2005.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{2005liangdynamic,\n  title={A Dynamic Data Structure for Checking Hyperacyclicity},\n  author={Liang, Percy and Srebro, Nathan},\n  institution={MIT Computer Science and Artificial Intelligence Laboratory},\n  year={2005},\n  url_Paper={https://dspace.mit.edu/handle/1721.1/30514}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How Much of a Hypertree can be Captured by Windmills?.\n \n \n \n \n\n\n \n Liang, P.; and Srebro, N.\n\n\n \n\n\n\n Technical Report MIT Computer Science and Artificial Intelligence Laboratory, 2005.\n \n\n\n\n
\n\n\n\n \n \n \"How paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{2005liangmuch,\n  title={How Much of a Hypertree can be Captured by Windmills?},\n  author={Liang, Percy and Srebro, Nathan},\n  institution={MIT Computer Science and Artificial Intelligence Laboratory},\n  year={2005},\n  url_Paper={https://dspace.mit.edu/handle/1721.1/30515}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generalization Error Bounds for Collaborative Prediction with Low-Rank Matrices.\n \n \n \n \n\n\n \n Srebro, N.; Alon, N.; and Jaakkola, T.\n\n\n \n\n\n\n In Proceedings of the 17th International Conference on Neural Information Processing Systems (NIPS), pages 1321–1328, 2005. \n \n\n\n\n
\n\n\n\n \n \n \"Generalization paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 12 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2005srebrogeneralization,\n  title={Generalization Error Bounds for Collaborative Prediction with Low-Rank Matrices},\n  author={Srebro, Nathan and Alon, Noga and Jaakkola, Tommi},\n  booktitle={Proceedings of the 17th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={1321--1328},\n  year={2005},\n  url_Paper={http://papers.nips.cc/paper/2700-generalization-error-bounds-for-collaborative-prediction-with-low-rank-matrices}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Data Structure for Maintaining Acyclicity in Hypergraphs.\n \n \n \n \n\n\n \n Liang, P.; and Srebro, N.\n\n\n \n\n\n\n 2005.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{2005liangdata,\n  title={A Data Structure for Maintaining Acyclicity in Hypergraphs},\n  author={Liang, Percy and Srebro, Nathan},\n  year={2005},\n  url_Paper={http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.87.3561&rep=rep1&type=pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2004\n \n \n (10)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Maximum-margin Matrix Factorization.\n \n \n \n \n\n\n \n Srebro, N.; Rennie, J.; and Jaakkola, T. S\n\n\n \n\n\n\n In Proceedings of the 17th International Conference on Neural Information Processing Systems (NIPS), pages 1329–1336, 2004. \n \n\n\n\n
\n\n\n\n \n \n \"Maximum-margin paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2004srebromaximum,\n  title={Maximum-margin Matrix Factorization},\n  author={Srebro, Nathan and Rennie, Jason and Jaakkola, Tommi S},\n  booktitle={Proceedings of the 17th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={1329--1336},\n  year={2004},\n  url_Paper={https://papers.nips.cc/paper/2655-maximum-margin-matrix-factorization}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Methods and Experiments with Bounded Tree-width Markov Networks.\n \n \n \n \n\n\n \n Liang, P.; and Srebro, N.\n\n\n \n\n\n\n Technical Report MIT Computer Science and Artificial Intelligence Laboratory, 2004.\n \n\n\n\n
\n\n\n\n \n \n \"Methods paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{2004liangmethods,\n  title={Methods and Experiments with Bounded Tree-width Markov Networks},\n  author={Liang, Percy and Srebro, Nathan},\n  institution={MIT Computer Science and Artificial Intelligence Laboratory},\n  year={2004},\n  url_Paper={https://dspace.mit.edu/bitstream/handle/1721.1/30511/MIT-CSAIL-TR-2004-081.pdf?sequence=2}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning With Matrix Factorizations.\n \n \n \n \n\n\n \n Srebro, N.\n\n\n \n\n\n\n Technical Report MIT Computer Science and Artificial Intelligence Laboratory, 2004.\n \n\n\n\n
\n\n\n\n \n \n \"Learning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{srebro2004learning,\n  title={Learning With Matrix Factorizations},\n  author={Srebro, Nathan},\n  institution={MIT Computer Science and Artificial Intelligence Laboratory},\n  year={2004},\n  url_Paper={https://dspace.mit.edu/handle/1721.1/30507}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distribution of Short Paired Duplications in Mammalian Genomes.\n \n \n \n \n\n\n \n Thomas, E.; Srebro, N.; Sebat, J.; Navin, N.; Healy, J.; Mishra, B.; and Wigler, M.\n\n\n \n\n\n\n Proceedings of the National Academy of Sciences, 101(28): 10349–10354. 2004.\n \n\n\n\n
\n\n\n\n \n \n \"Distribution paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2004thomasdistribution,\n  title={Distribution of Short Paired Duplications in Mammalian Genomes},\n  author={Thomas, Elizabeth and Srebro, Nathan and Sebat, Jonathan and Navin, Nicholas and Healy, John and Mishra, Bud and Wigler, Michael},\n  journal={Proceedings of the National Academy of Sciences},\n  volume={101},\n  number={28},\n  pages={10349--10354},\n  year={2004},\n  url_Paper={https://www.pnas.org/content/101/28/10349.short}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Linear Dependent Dimensionality Reduction.\n \n \n \n \n\n\n \n Srebro, N.; and Jaakkola, T.\n\n\n \n\n\n\n In Proceedings of the 16th International Conference on Neural Information Processing Systems (NIPS), pages 145–152, 2004. \n \n\n\n\n
\n\n\n\n \n \n \"Linear paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2004srebrolinear,\n  title={Linear Dependent Dimensionality Reduction},\n  author={Srebro, Nathan and Jaakkola, Tommi},\n  booktitle={Proceedings of the 16th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={145--152},\n  year={2004},\n  url_Paper={http://papers.nips.cc/paper/2431-linear-dependent-dimensionality-reduction}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Maximum-margin Matrix Factorization.\n \n \n \n \n\n\n \n Srebro, N.; Rennie, J.; and Jaakkola, T. S\n\n\n \n\n\n\n In Proceedings of the 17th International Conference on Neural Information Processing Systems (NIPS), pages 1329–1336, 2004. \n \n\n\n\n
\n\n\n\n \n \n \"Maximum-margin paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2004srebromaximum,\n  title={Maximum-margin Matrix Factorization},\n  author={Srebro, Nathan and Rennie, Jason and Jaakkola, Tommi S},\n  booktitle={Proceedings of the 17th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={1329--1336},\n  year={2004},\n  url_Paper={https://papers.nips.cc/paper/2655-maximum-margin-matrix-factorization}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Methods and Experiments with Bounded Tree-width Markov Networks.\n \n \n \n \n\n\n \n Liang, P.; and Srebro, N.\n\n\n \n\n\n\n Technical Report MIT Computer Science and Artificial Intelligence Laboratory, 2004.\n \n\n\n\n
\n\n\n\n \n \n \"Methods paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{2004liangmethods,\n  title={Methods and Experiments with Bounded Tree-width Markov Networks},\n  author={Liang, Percy and Srebro, Nathan},\n  institution={MIT Computer Science and Artificial Intelligence Laboratory},\n  year={2004},\n  url_Paper={https://dspace.mit.edu/bitstream/handle/1721.1/30511/MIT-CSAIL-TR-2004-081.pdf?sequence=2}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning With Matrix Factorizations.\n \n \n \n \n\n\n \n Srebro, N.\n\n\n \n\n\n\n Technical Report MIT Computer Science and Artificial Intelligence Laboratory, 2004.\n \n\n\n\n
\n\n\n\n \n \n \"Learning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{srebro2004learning,\n  title={Learning With Matrix Factorizations},\n  author={Srebro, Nathan},\n  institution={MIT Computer Science and Artificial Intelligence Laboratory},\n  year={2004},\n  url_Paper={https://dspace.mit.edu/handle/1721.1/30507}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distribution of Short Paired Duplications in Mammalian Genomes.\n \n \n \n \n\n\n \n Thomas, E.; Srebro, N.; Sebat, J.; Navin, N.; Healy, J.; Mishra, B.; and Wigler, M.\n\n\n \n\n\n\n Proceedings of the National Academy of Sciences, 101(28): 10349–10354. 2004.\n \n\n\n\n
\n\n\n\n \n \n \"Distribution paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2004thomasdistribution,\n  title={Distribution of Short Paired Duplications in Mammalian Genomes},\n  author={Thomas, Elizabeth and Srebro, Nathan and Sebat, Jonathan and Navin, Nicholas and Healy, John and Mishra, Bud and Wigler, Michael},\n  journal={Proceedings of the National Academy of Sciences},\n  volume={101},\n  number={28},\n  pages={10349--10354},\n  year={2004},\n  url_Paper={https://www.pnas.org/content/101/28/10349.short}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Linear Dependent Dimensionality Reduction.\n \n \n \n \n\n\n \n Srebro, N.; and Jaakkola, T.\n\n\n \n\n\n\n In Proceedings of the 16th International Conference on Neural Information Processing Systems (NIPS), pages 145–152, 2004. \n \n\n\n\n
\n\n\n\n \n \n \"Linear paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2004srebrolinear,\n  title={Linear Dependent Dimensionality Reduction},\n  author={Srebro, Nathan and Jaakkola, Tommi},\n  booktitle={Proceedings of the 16th International Conference on Neural Information Processing Systems (NIPS)},\n  pages={145--152},\n  year={2004},\n  url_Paper={http://papers.nips.cc/paper/2431-linear-dependent-dimensionality-reduction}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2003\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n $ K $-ary Clustering with Optimal Leaf Ordering for Gene Expression Data.\n \n \n \n \n\n\n \n Bar-Joseph, Z.; Demaine, E.; Gifford, D.; Srebro, N.; Hamel, A.; and Jaakkola, T.\n\n\n \n\n\n\n Bioinformatics, 19(9): 1070–1078. 2003.\n \n\n\n\n
\n\n\n\n \n \n \"$ paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2003bark,\n  title={$ K $-ary Clustering with Optimal Leaf Ordering for Gene Expression Data},\n  author={Bar-Joseph, Ziv and Demaine, Erik and Gifford, David and Srebro, Nathan and Hamel, Ang{\\`e}le and Jaakkola, Tommi},\n  journal={Bioinformatics},\n  volume={19},\n  number={9},\n  pages={1070--1078},\n  year={2003},\n  publisher={Oxford University Press},\n  url_Paper={https://academic.oup.com/bioinformatics/article/19/9/1070/284974}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generalized Low-rank approximations.\n \n \n \n \n\n\n \n Srebro, N.; and Jaakkola, T.\n\n\n \n\n\n\n Technical Report MIT Artificial Intelligence Laboratory, 2003.\n \n\n\n\n
\n\n\n\n \n \n \"Generalized paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{2003srebrogeneralized,\n  title={Generalized Low-rank approximations},\n  author={Srebro, Nathan and Jaakkola, Tommi},\n  institution={MIT Artificial Intelligence Laboratory},\n  year={2003},\n  url_Paper={https://dspace.mit.edu/handle/1721.1/6708}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Maximum Likelihood Bounded Tree-width Markov Networks.\n \n \n \n \n\n\n \n Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 17th Conference on Uncertainty in Artificial Intelligence (UAI), volume 143, pages 123–138, 2003. \n \n\n\n\n
\n\n\n\n \n \n \"Maximum paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2003srebromaximum,\n  title={Maximum Likelihood Bounded Tree-width Markov Networks},\n  author={Srebro, Nathan},\n  booktitle={Proceedings of the 17th Conference on Uncertainty in Artificial Intelligence (UAI)},\n  volume={143},\n  number={1},\n  pages={123--138},\n  year={2003},\n  %https://www.sciencedirect.com/science/article/pii/S0004370202003600\n  url_Paper={https://arxiv.org/ftp/arxiv/papers/1301/1301.2311.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Weighted Low-Rank Approximations.\n \n \n \n \n\n\n \n Srebro, N.; and Jaakkola, T.\n\n\n \n\n\n\n In Proceedings of the 20th International Conference on Machine Learning (ICML), pages 720–727, 2003. \n \n\n\n\n
\n\n\n\n \n \n \"Weighted paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2003srebroweighted,\n  title={Weighted Low-Rank Approximations},\n  author={Srebro, Nathan and Jaakkola, Tommi},\n  booktitle={Proceedings of the 20th International Conference on Machine Learning (ICML)},\n  pages={720--727},\n  year={2003},\n  url_Paper={https://www.aaai.org/Library/ICML/2003/icml03-094.php}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n $ K $-ary Clustering with Optimal Leaf Ordering for Gene Expression Data.\n \n \n \n \n\n\n \n Bar-Joseph, Z.; Demaine, E.; Gifford, D.; Srebro, N.; Hamel, A.; and Jaakkola, T.\n\n\n \n\n\n\n Bioinformatics, 19(9): 1070–1078. 2003.\n \n\n\n\n
\n\n\n\n \n \n \"$ paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{2003bark,\n  title={$ K $-ary Clustering with Optimal Leaf Ordering for Gene Expression Data},\n  author={Bar-Joseph, Ziv and Demaine, Erik and Gifford, David and Srebro, Nathan and Hamel, Ang{\\`e}le and Jaakkola, Tommi},\n  journal={Bioinformatics},\n  volume={19},\n  number={9},\n  pages={1070--1078},\n  year={2003},\n  publisher={Oxford University Press},\n  url_Paper={https://academic.oup.com/bioinformatics/article/19/9/1070/284974}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generalized Low-rank approximations.\n \n \n \n \n\n\n \n Srebro, N.; and Jaakkola, T.\n\n\n \n\n\n\n Technical Report MIT Artificial Intelligence Laboratory, 2003.\n \n\n\n\n
\n\n\n\n \n \n \"Generalized paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{2003srebrogeneralized,\n  title={Generalized Low-rank approximations},\n  author={Srebro, Nathan and Jaakkola, Tommi},\n  institution={MIT Artificial Intelligence Laboratory},\n  year={2003},\n  url_Paper={https://dspace.mit.edu/handle/1721.1/6708}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Maximum Likelihood Bounded Tree-width Markov Networks.\n \n \n \n \n\n\n \n Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 17th Conference on Uncertainty in Artificial Intelligence (UAI), volume 143, pages 123–138, 2003. \n \n\n\n\n
\n\n\n\n \n \n \"Maximum paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2003srebromaximum,\n  title={Maximum Likelihood Bounded Tree-width Markov Networks},\n  author={Srebro, Nathan},\n  booktitle={Proceedings of the 17th Conference on Uncertainty in Artificial Intelligence (UAI)},\n  volume={143},\n  number={1},\n  pages={123--138},\n  year={2003},\n  %https://www.sciencedirect.com/science/article/pii/S0004370202003600\n  url_Paper={https://arxiv.org/ftp/arxiv/papers/1301/1301.2311.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Weighted Low-Rank Approximations.\n \n \n \n \n\n\n \n Srebro, N.; and Jaakkola, T.\n\n\n \n\n\n\n In Proceedings of the 20th International Conference on Machine Learning (ICML), pages 720–727, 2003. \n \n\n\n\n
\n\n\n\n \n \n \"Weighted paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{2003srebroweighted,\n  title={Weighted Low-Rank Approximations},\n  author={Srebro, Nathan and Jaakkola, Tommi},\n  booktitle={Proceedings of the 20th International Conference on Machine Learning (ICML)},\n  pages={720--727},\n  year={2003},\n  url_Paper={https://www.aaai.org/Library/ICML/2003/icml03-094.php}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2001\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Optimal Arrangement of Leaves in the Tree Representing Hierarchical Clustering of Gene Expression Data.\n \n \n \n \n\n\n \n Biedl, T.; Brejová, B.; Demaine, E.; Hamel, A.; Jaakkola, T.; Srebro, N.; and Vinar, T.\n\n\n \n\n\n\n Technical Report University of Waterloo Department of Computer Science, 2001.\n \n\n\n\n
\n\n\n\n \n \n \"Optimal paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{2001baroptimal,\n  title={Optimal Arrangement of Leaves in the Tree Representing Hierarchical Clustering of Gene Expression Data},\n  author={Biedl, Therese and Brejov{\\'a}, Bro{\\v{n}}a and Demaine, Erik and Hamel, Ang{\\`e}le and Jaakkola, Tommi and Srebro, Nathan and Vinar, Tom{\\'a}{\\v{s}}},\n  institution={University of Waterloo Department of Computer Science},\n  year={2001},\n  url_Paper={http://compbio.compbio.fmph.uniba.sk/papers/01exprtr.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Maximum Likelihood Markov Hypertrees.\n \n \n \n \n\n\n \n Srebro, N.; Karger, D.; and Jaakkola, T.\n\n\n \n\n\n\n Technical Report MIT Artificial Intelligence Laboratory, 2001.\n \n\n\n\n
\n\n\n\n \n \n \"Maximum paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{2001srebromaximum,\n  title={Maximum Likelihood Markov Hypertrees},\n  author={Srebro, Nathan and Karger, David and Jaakkola, Tommi},\n  institution={MIT Artificial Intelligence Laboratory},\n  year={2001},\n  url_Paper={https://pdfs.semanticscholar.org/181f/2860d781b9588603d3e914d58e7737535970.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse Matrix Factorization of Gene Expression Data.\n \n \n \n \n\n\n \n Srebro, N.; and Jaakkola, T.\n\n\n \n\n\n\n Technical Report MIT Artificial Intelligence Laboratory, 2001.\n \n\n\n\n
\n\n\n\n \n \n \"Sparse paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{2001srebrosparse,\n  title={Sparse Matrix Factorization of Gene Expression Data},\n  author={Srebro, Nathan and Jaakkola, Tommi},\n  institution={MIT Artificial Intelligence Laboratory},\n  year={2001},\n  url_Paper={http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.298.2057&rep=rep1&type=pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Markov Networks: Maximum Bounded Tree-Width Graphs.\n \n \n \n \n\n\n \n Karger, D.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 12th Annual ACM-SIAM Symposium on Discrete Algorithms (SODA), pages 392–401, 2001. Society for Industrial and Applied Mathematics\n \n\n\n\n
\n\n\n\n \n \n \"Learning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{karger2001learning,\n  title={Learning Markov Networks: Maximum Bounded Tree-Width Graphs},\n  author={Karger, David and Srebro, Nathan},\n  booktitle={Proceedings of the 12th Annual ACM-SIAM Symposium on Discrete Algorithms (SODA)},\n  pages={392--401},\n  year={2001},\n  organization={Society for Industrial and Applied Mathematics},\n  url_Paper={https://dl.acm.org/doi/10.5555/365411.365486}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimal Arrangement of Leaves in the Tree Representing Hierarchical Clustering of Gene Expression Data.\n \n \n \n \n\n\n \n Biedl, T.; Brejová, B.; Demaine, E.; Hamel, A.; Jaakkola, T.; Srebro, N.; and Vinar, T.\n\n\n \n\n\n\n Technical Report University of Waterloo Department of Computer Science, 2001.\n \n\n\n\n
\n\n\n\n \n \n \"Optimal paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{2001baroptimal,\n  title={Optimal Arrangement of Leaves in the Tree Representing Hierarchical Clustering of Gene Expression Data},\n  author={Biedl, Therese and Brejov{\\'a}, Bro{\\v{n}}a and Demaine, Erik and Hamel, Ang{\\`e}le and Jaakkola, Tommi and Srebro, Nathan and Vinar, Tom{\\'a}{\\v{s}}},\n  institution={University of Waterloo Department of Computer Science},\n  year={2001},\n  url_Paper={http://compbio.compbio.fmph.uniba.sk/papers/01exprtr.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Maximum Likelihood Markov Hypertrees.\n \n \n \n \n\n\n \n Srebro, N.; Karger, D.; and Jaakkola, T.\n\n\n \n\n\n\n Technical Report MIT Artificial Intelligence Laboratory, 2001.\n \n\n\n\n
\n\n\n\n \n \n \"Maximum paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{2001srebromaximum,\n  title={Maximum Likelihood Markov Hypertrees},\n  author={Srebro, Nathan and Karger, David and Jaakkola, Tommi},\n  institution={MIT Artificial Intelligence Laboratory},\n  year={2001},\n  url_Paper={https://pdfs.semanticscholar.org/181f/2860d781b9588603d3e914d58e7737535970.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sparse Matrix Factorization of Gene Expression Data.\n \n \n \n \n\n\n \n Srebro, N.; and Jaakkola, T.\n\n\n \n\n\n\n Technical Report MIT Artificial Intelligence Laboratory, 2001.\n \n\n\n\n
\n\n\n\n \n \n \"Sparse paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{2001srebrosparse,\n  title={Sparse Matrix Factorization of Gene Expression Data},\n  author={Srebro, Nathan and Jaakkola, Tommi},\n  institution={MIT Artificial Intelligence Laboratory},\n  year={2001},\n  url_Paper={http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.298.2057&rep=rep1&type=pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Markov Networks: Maximum Bounded Tree-Width Graphs.\n \n \n \n \n\n\n \n Karger, D.; and Srebro, N.\n\n\n \n\n\n\n In Proceedings of the 12th Annual ACM-SIAM Symposium on Discrete Algorithms (SODA), pages 392–401, 2001. Society for Industrial and Applied Mathematics\n \n\n\n\n
\n\n\n\n \n \n \"Learning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{karger2001learning,\n  title={Learning Markov Networks: Maximum Bounded Tree-Width Graphs},\n  author={Karger, David and Srebro, Nathan},\n  booktitle={Proceedings of the 12th Annual ACM-SIAM Symposium on Discrete Algorithms (SODA)},\n  pages={392--401},\n  year={2001},\n  organization={Society for Industrial and Applied Mathematics},\n  url_Paper={https://dl.acm.org/doi/10.5555/365411.365486}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2000\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Maximum Likelihood Markov Networks: An Algorithmic Approach.\n \n \n \n \n\n\n \n Srebro, N.\n\n\n \n\n\n\n Master's thesis, Massachusetts Institute of Technology, 2000.\n \n\n\n\n
\n\n\n\n \n \n \"Maximum paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@mastersthesis{2000srebromaximum,\n  title={Maximum Likelihood Markov Networks: An Algorithmic Approach},\n  author={Srebro, Nathan},\n  year={2000},\n  school={Massachusetts Institute of Technology},\n  url_Paper={http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.21.8068&rep=rep1&type=pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Maximum Likelihood Markov Networks: An Algorithmic Approach.\n \n \n \n \n\n\n \n Srebro, N.\n\n\n \n\n\n\n Master's thesis, Massachusetts Institute of Technology, 2000.\n \n\n\n\n
\n\n\n\n \n \n \"Maximum paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@mastersthesis{2000srebromaximum,\n  title={Maximum Likelihood Markov Networks: An Algorithmic Approach},\n  author={Srebro, Nathan},\n  year={2000},\n  school={Massachusetts Institute of Technology},\n  url_Paper={http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.21.8068&rep=rep1&type=pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n undefined\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Learning with Multiple Similarity Functions.\n \n \n \n\n\n \n Balcan, M.; Blum, A.; and Srebro, N.\n\n\n \n\n\n\n \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{balcanlearning,\n  title={Learning with Multiple Similarity Functions},\n  author={Balcan, Maria-Florina and Blum, Avrim and Srebro, Nathan}\n}\n\n  
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Learning with Multiple Similarity Functions.\n \n \n \n\n\n \n Balcan, M.; Blum, A.; and Srebro, N.\n\n\n \n\n\n\n \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{balcanlearning,\n  title={Learning with Multiple Similarity Functions},\n  author={Balcan, Maria-Florina and Blum, Avrim and Srebro, Nathan}\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);