var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https://dl.dropbox.com/s/26018h26wgh5c2o/bibbase.bib&jsonp=1&theme=side&fullnames=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https://dl.dropbox.com/s/26018h26wgh5c2o/bibbase.bib&jsonp=1&theme=side&fullnames=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https://dl.dropbox.com/s/26018h26wgh5c2o/bibbase.bib&jsonp=1&theme=side&fullnames=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n The more the merrier! On the complexity of finding multicollisions, with connections to codes and lattices.\n \n \n \n \n\n\n \n Huck Bennett; Surendra Ghentiyala; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n 2024.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 16 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{BGS24,\n  title = {The more the merrier! On the complexity of finding multicollisions, with connections to codes and lattices},\n  author = {Huck Bennett and Surendra Ghentiyala and Noah {Stephens-Davidowitz}},\n  year = {2024},\n  url = {https://eccc.weizmann.ac.il/report/2024/018},\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A reverse Minkowski theorem.\n \n \n \n \n\n\n \n Oded Regev; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n Annals of Mathematics. 2024.\n A preliminary version appeared in STOC 2017.\n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n \n \"A tcs+ talk\n  \n \n \n \"A ias talk\n  \n \n \n \"A bourbaki seminar\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 18 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{RSReverseMinkowski17,\n  title = {A reverse {Minkowski} theorem},\n  abstract = {$ \\newcommand{\\R}{\\mathbb{R}} \\newcommand{\\lat}{\\mathcal{L}} $We prove a conjecture due to Dadush, showing that if $\\lat \\subset \\R^n$ is a lattice such that $\\det(\\lat') \\ge 1$ for all sublattices $\\lat' \\subseteq \\lat$, then \\[\\sum \\exp(-\\pi t^2 \\| y\\|^2) \\le 3/2 \\; , \\] where $t := 10(\\log n + 2)$ and the sum is over all $y \\in L$. From this we also derive bounds on the number of short lattice vectors and on the covering radius.},\n  url = {http://arxiv.org/abs/1611.05979},\n  journal = {Annals of Mathematics},\n  author = {Regev, Oded and {Stephens-Davidowitz}, Noah},\n  year = {2024},\n  url_TCS+_talk = {http://www.youtube.com/watch?v=mgDNeg3U5TQ},\n  url_IAS_talk = {http://www.youtube.com/watch?v=9mvPxAKj5BU},\n  url_Bourbaki_seminar = {http://www.youtube.com/watch?v=j7YvtVvv3qs},\n  note = {A preliminary version appeared in STOC 2017.}\n}\n\n\n\n
\n
\n\n\n
\n $ \\newcommand{\\R}{ℝ} \\newcommand{łat}{\\mathcal{L}} $We prove a conjecture due to Dadush, showing that if $łat ⊂ \\R^n$ is a lattice such that $\\det(łat') \\ge 1$ for all sublattices $łat' ⊆ łat$, then \\[∑ \\exp(-π t^2 \\| y\\|^2) łe 3/2   , \\] where $t := 10(łog n + 2)$ and the sum is over all $y ∈ L$. From this we also derive bounds on the number of short lattice vectors and on the covering radius.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n On the randomized complexity of range avoidance, with applications to cryptography and metacomplexity.\n \n \n \n \n\n\n \n Eldon Chung; Alexander Golovnev; Zeyong Li; Maciej Obremski; Sidhant Saraogi; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n 2023.\n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{CGLOSS23,\n  title = {On the randomized complexity of range avoidance, with applications to cryptography and metacomplexity},\n  author = {Eldon Chung and Alexander Golovnev and Zeyong Li and Maciej Obremski and Sidhant Saraogi and Noah {Stephens-Davidowitz}},\n  year = {2023},\n  url = {https://eccc.weizmann.ac.il/report/2023/193},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Recursive lattice reduction—A framework for finding short lattice vectors .\n \n \n \n \n\n\n \n Divesh Aggarwal; Thomas Espitau; Spencer Peters; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n 2023.\n \n\n\n\n
\n\n\n\n \n \n \"RecursivePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{AEPD23Recursive,\n  title = {Recursive lattice reduction---A framework for finding short lattice vectors\n},\n  author = {Aggarwal, Divesh and Espitau, Thomas and Peters, Spencer and {Stephens-Davidowitz}, Noah},\n  year = {2023},\n  url = {https://arxiv.org/abs/2311.15064},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A simple proof of a reverse Minkowski theorem for integral lattices.\n \n \n \n \n\n\n \n Oded Regev; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n 2023.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{RSSimpleProofReverse2023,\n  title = {A simple proof of a reverse {{Minkowski}} theorem for integral lattices},\n  author = {Regev, Oded and {Stephens-Davidowitz}, Noah},\n  year = {2023},\n  url = {http://arxiv.org/abs/2306.03697},\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Lattice problems beyond polynomial time.\n \n \n \n \n\n\n \n Divesh Aggarwal; Huck Bennett; Zvika Brakerski; Alexander Golovnev; Rajendra Kumar; Zeyong Li; Spencer Peters; Noah Stephens-Davidowitz; and Vinod Vaikuntanathan.\n\n\n \n\n\n\n In STOC, 2023. \n \n\n\n\n
\n\n\n\n \n \n \"LatticePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 44 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ABB+LatticeProblemsPolynomial2022,\n  title = {Lattice problems beyond polynomial time},\n  author = {Aggarwal, Divesh and Bennett, Huck and Brakerski, Zvika and Golovnev, Alexander and Kumar, Rajendra and Li, Zeyong and Peters, Spencer and {Stephens-Davidowitz}, Noah and Vaikuntanathan, Vinod},\n  year = {2023},\n  booktitle = {STOC},\n  url = {http://arxiv.org/abs/2211.11693},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Revisiting time-space tradeoffs for function inversion.\n \n \n \n \n\n\n \n Alexander Golovnev; Siyao Guo; Spencer Peters; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In CRYPTO, 2023. \n \n\n\n\n
\n\n\n\n \n \n \"RevisitingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{GGPSRevisitingTimespaceTradeoffs2022,\n  title = {Revisiting time-space tradeoffs for function inversion},\n  author = {Golovnev, Alexander and Guo, Siyao and Peters, Spencer and {Stephens-Davidowitz}, Noah},\n  year = {2023},\n  url = {https://eccc.weizmann.ac.il/report/2022/145/},\n  booktitle = {CRYPTO},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Just how hard are rotations of $ℤ^n$? Algorithms and cryptography with the simplest lattice.\n \n \n \n \n\n\n \n Huck Bennett; Atul Ganju; Pura Peetathawatchai; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In Eurocrypt, 2023. \n \n\n\n\n
\n\n\n\n \n \n \"JustPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{BGPSJustHowHard2021,\n  title = {Just how hard are rotations of $\\mathbb{Z}^n$? Algorithms and cryptography with the simplest lattice},\n  author = {Bennett, Huck and Ganju, Atul and Peetathawatchai, Pura and {Stephens-Davidowitz}, Noah},\n  year = {2023},\n\turl = {https://eprint.iacr.org/2021/1548},\n\tbooktitle = {Eurocrypt},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the (im)possibility of branch-and-bound search-to-decision reductions for approximate optimization.\n \n \n \n \n\n\n \n Alexander Golovnev; Siyao Guo; Spencer Peters; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In APPROX, 2023. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{GGPSImPossibilityBranchandbound2021,\n  title = {On the (im)possibility of branch-and-bound search-to-decision reductions for approximate optimization},\n  author = {Golovnev, Alexander and Guo, Siyao and Peters, Spencer and {Stephens-Davidowitz}, Noah},\n  year = {2023},\n  booktitle = {APPROX},\n  url = {https://eccc.weizmann.ac.il/report/2021/141/},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A tight reverse Minkowski inequality for the Epstein zeta function.\n \n \n \n \n\n\n \n Yael Eisenberg; Oded Regev; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n Proceedings of the AMS. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{ERSTightReverseMinkowski2022,\n  title = {A tight reverse {{Minkowski}} inequality for the {{Epstein}} zeta function},\n  author = {Eisenberg, Yael and Regev, Oded and {Stephens-Davidowitz}, Noah},\n  year = {2022},\n  journal = {Proceedings of the AMS},\n  url = {http://arxiv.org/abs/2201.05201},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On seedless PRNGs and premature next.\n \n \n \n \n\n\n \n Sandro Coretti; Yevgeniy Dodis; Harish Karthikeyan; Noah Stephens-Davidowitz; and Stefano Tessaro.\n\n\n \n\n\n\n In ITC, 2022. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{CDK+SeedlessPRNGsPremature2022,\n  title = {On seedless {{PRNGs}} and premature next},\n  booktitle = {{{ITC}}},\n  author = {Coretti, Sandro and Dodis, Yevgeniy and Karthikeyan, Harish and {Stephens-Davidowitz}, Noah and Tessaro, Stefano},\n  year = {2022},\n  url = {https://eprint.iacr.org/2022/558},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n No time to hash: On super efficient entropy accumulation.\n \n \n \n \n\n\n \n Yevgeniy Dodis; Siyao Guo; Noah Stephens-Davidowitz; and Zhiye Xie.\n\n\n \n\n\n\n In CRYPTO, 2021. \n \n\n\n\n
\n\n\n\n \n \n \"NoPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{DGSX21a,\n\ttitle = {No time to hash: {On} super efficient entropy accumulation},\n\turl = {https://eprint.iacr.org/2021/523},\n\tyear = {2021},\n\tbooktitle = {CRYPTO},\n\tauthor = {Dodis, Yevgeniy and Guo, Siyao and {Stephens-Davidowitz}, Noah and Xie, Zhiye},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Online linear extractors for independent sources.\n \n \n \n \n\n\n \n Yevgeniy Dodis; Siyao Guo; Noah Stephens-Davidowitz; and Zhiye Xie.\n\n\n \n\n\n\n In ITC, 2021. \n \n\n\n\n
\n\n\n\n \n \n \"OnlinePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{DGSX21b,\n\ttitle = {Online linear extractors for independent sources},\n\tauthor = {Dodis, Yevgeniy and Guo, Siyao and {Stephens-Davidowitz}, Noah and Xie, Zhiye},\n\tbooktitle = {ITC},\n\tyear = {2021},\n\turl = {https://eprint.iacr.org/2021/1002},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the hardness of average-case $k$-SUM.\n \n \n \n \n\n\n \n Zvika Brakerski; Noah Stephens-Davidowitz; and Vinod Vaikuntanathan.\n\n\n \n\n\n\n In RANDOM, 2021. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{BSV21,\n\ttitle = {On the hardness of average-case {$k$-SUM}},\n\turl = {https://arxiv.org/abs/2010.08821},\n\tyear = {2021},\n\tauthor = {Brakerski, Zvika and {Stephens-Davidowitz}, Noah and Vaikuntanathan, Vinod},\n\tbooktitle = {RANDOM},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fine-grained hardness of CVP(P)— Everything that we can prove (and nothing else).\n \n \n \n \n\n\n \n Divesh Aggarwal; Huck Bennett; Alexander Golovnev; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In SODA, 2021. \n \n\n\n\n
\n\n\n\n \n \n \"Fine-grainedPaper\n  \n \n \n \"Fine-grained simons blog post\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ABGSFinegrainedHardness19,\n\ttitle = {Fine-grained hardness of {{CVP}}({{P}})--- {{Everything}} that we can prove (and nothing else)},\n\turl = {http://arxiv.org/abs/1911.02440},\n\tauthor = {Aggarwal, Divesh and Bennett, Huck and Golovnev, Alexander and {Stephens-Davidowitz}, Noah},\n\tyear = {2021},\n\tbooktitle = {SODA},\n\turl_Simons_blog_post = {https://blog.simons.berkeley.edu/2020/05/fine-grained-hardness-of-lattice-problems-open-questions/},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dimension-preserving reductions between SVP and CVP in different $p$-norms.\n \n \n \n \n\n\n \n Divesh Aggarwal; Yanlin Chen; Rajendra Kumar; Zeyong Li; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In SODA, 2021. \n \n\n\n\n
\n\n\n\n \n \n \"Dimension-preservingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ACKLS21,\n\ttitle = {Dimension-preserving reductions between {{{SVP}}} and {{{CVP}}} in\tdifferent $p$-norms},\n\tauthor = {Aggarwal, Divesh and Chen, Yanlin and Kumar, Rajendra and Li, Zeyong and {Stephens-Davidowitz}, Noah},\n\tbooktitle = {SODA},\n\tyear = {2021},\n\turl = {http://arxiv.org/abs/2104.06576},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A $2^{n/2}$-time algorithm for $\\sqrt{n}$-SVP and $\\sqrt{n}$-Hermite SVP, and an improved time-approximation tradeoff for (H)SVP.\n \n \n \n \n\n\n \n Divesh Aggarwal; Zeyong Li; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In Eurocrypt, 2021. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ALSTimeAlgorithm20,\n\ttitle = {A $2^{n/2}$-time algorithm for $\\sqrt{n}$-{{SVP}} and $\\sqrt{n}$-{{Hermite SVP}}, and an improved time-approximation tradeoff for ({{H}}){{SVP}}},\n\tauthor = {Aggarwal, Divesh and Li, Zeyong and {Stephens-Davidowitz}, Noah},\n\tbooktitle = {Eurocrypt},\n\tyear = {2021},\n\turl = {http://arxiv.org/abs/2007.09556}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Lattice reduction for modules, or how to reduce ModuleSVP to ModuleSVP.\n \n \n \n \n\n\n \n Tamalika Mukherjee; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In CRYPTO, 2020. \n \n\n\n\n
\n\n\n\n \n \n \"LatticePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{MSLatticeReduction19,\n\ttitle = {Lattice reduction for modules, or how to reduce {ModuleSVP} to {ModuleSVP}},\n\turl = {http://eprint.iacr.org/2019/1142},\n\tauthor = {Mukherjee, Tamalika and {Stephens-Davidowitz}, Noah},\n\tyear = {2020},\n\tbooktitle = {CRYPTO},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Extractor lower bounds, revisited.\n \n \n \n \n\n\n \n Divesh Aggarwal; Siyao Guo; Maciej Obremski; João Ribeiro; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In RANDOM, 2020. \n \n\n\n\n
\n\n\n\n \n \n \"ExtractorPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{AGO+ExtractorLower19,\n\ttitle = {Extractor lower bounds, revisited},\n\tauthor = {Aggarwal, Divesh and Guo, Siyao and Obremski, Maciej and Ribeiro, Jo\\~ao and {Stephens-Davidowitz}, Noah},\n\turl ={https://eccc.weizmann.ac.il/report/2019/173/},\n\tyear = {2020},\n\tbooktitle = {RANDOM},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Slide reduction, revisited—Filling the gaps in SVP approximation.\n \n \n \n \n\n\n \n Divesh Aggarwal; Jianwei Li; Phong Q. Nguyen; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In CRYPTO, 2020. \n \n\n\n\n
\n\n\n\n \n \n \"SlidePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ALNSSlideReduction19,\n\ttitle = {Slide reduction, revisited---{F}illing the gaps in {SVP} approximation},\n\turl = {http://arxiv.org/abs/1908.03724},\n\tauthor = {Aggarwal, Divesh and Li, Jianwei and Nguyen, Phong Q. and {Stephens-Davidowitz}, Noah},\n\tyear = {2020},\n\tabstract = {We show how to generalize Gama and Nguyen's slide reduction algorithm [STOC '08] for solving the approximate Shortest Vector Problem over lattices (SVP). As a result, we show the fastest provably correct algorithm for $\\delta$-approximate SVP for all approximation factors $n^{1/2+\\epsilon} \\leq \\delta \\leq n^{O(1)}$. This is the range of approximation factors most relevant for cryptography.},\n\tbooktitle={CRYPTO},\n}\n\n
\n
\n\n\n
\n We show how to generalize Gama and Nguyen's slide reduction algorithm [STOC '08] for solving the approximate Shortest Vector Problem over lattices (SVP). As a result, we show the fastest provably correct algorithm for $δ$-approximate SVP for all approximation factors $n^{1/2+ε} ≤ δ ≤ n^{O(1)}$. This is the range of approximation factors most relevant for cryptography.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n An improved constant in Banaszczyk's transference theorem.\n \n \n \n \n\n\n \n Divesh Aggarwal; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n 2019.\n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 41 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{ASImprovedConstant19,\n\ttitle = {An improved constant in {{Banaszczyk}}'s transference theorem},\n\turl = {http://arxiv.org/abs/1907.09020},\n\tauthor = {Aggarwal, Divesh and {Stephens-Davidowitz}, Noah},\n\tyear = {2019},\n\tabstract = {$ \\newcommand{\\R}{\\ensuremath{\\mathbb{R}}} \\newcommand{\\lat}{\\mathcal{L}} \\newcommand{\\ensuremath}[1]{#1} $We show that \\[ \\mu(\\lat) \\lambda_1(\\lat^*) < \\big( 0.1275 + o(1) \\big) \\cdot n \\; , \\] where $\\mu(\\lat)$ is the covering radius of an $n$-dimensional lattice $\\lat \\subset \\R^n$ and $\\lambda_1(\\lat^*)$ is the length of the shortest non-zero vector in the dual lattice $\\lat^*$. This improves on Banaszczyk's celebrated transference theorem (Math. Annal., 1993) by about 20%. Our proof follows Banaszczyk exactly, except in one step, where we replace a Fourier-analytic bound on the discrete Gaussian mass with a slightly stronger bound based on packing. The packing-based bound that we use was already proven by Aggarwal, Dadush, Regev, and Stephens-Davidowitz (STOC, 2015) in a very different context. Our contribution is therefore simply the observation that this implies a better transference theorem.},\n}\n\n
\n
\n\n\n
\n $ \\newcommand{\\R}{\\ensuremath{ℝ}} \\newcommand{łat}{\\mathcal{L}} \\newcommand{\\ensuremath}[1]{#1} $We show that \\[ μ(łat) λ_1(łat^*) < \\big( 0.1275 + o(1) \\big) · n   , \\] where $μ(łat)$ is the covering radius of an $n$-dimensional lattice $łat ⊂ \\R^n$ and $λ_1(łat^*)$ is the length of the shortest non-zero vector in the dual lattice $łat^*$. This improves on Banaszczyk's celebrated transference theorem (Math. Annal., 1993) by about 20%. Our proof follows Banaszczyk exactly, except in one step, where we replace a Fourier-analytic bound on the discrete Gaussian mass with a slightly stronger bound based on packing. The packing-based bound that we use was already proven by Aggarwal, Dadush, Regev, and Stephens-Davidowitz (STOC, 2015) in a very different context. Our contribution is therefore simply the observation that this implies a better transference theorem.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Kissing numbers and transference theorems from generalized tail bounds.\n \n \n \n \n\n\n \n Stephen D. Miller; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n SIDMA, 33(3). 2019.\n \n\n\n\n
\n\n\n\n \n \n \"KissingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 53 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{MSKissing18,\n  title = {Kissing numbers and transference theorems from generalized tail bounds},\n  abstract = {We generalize Banaszczyk's seminal tail bound for the Gaussian mass of a lattice to a wide class of test functions. We therefore obtain quite general transference bounds, as well as bounds on the number of lattice points contained in certain bodies. As example applications, we bound the lattice kissing number in $\\ell_p$ norms by $e^{(n+ o(n))/p}$ for $0 < p \\leq 2$, and also give a proof of a new transference bound in the $\\ell_1$ norm.},\n  url = {http://arxiv.org/abs/1802.05708},\n  author = {Miller, Stephen D. and {Stephens-Davidowitz}, Noah},\n  journal = {SIDMA},\n  year = {2019},\n  volume = {33},\n  number = {3},\n}\n\n
\n
\n\n\n
\n We generalize Banaszczyk's seminal tail bound for the Gaussian mass of a lattice to a wide class of test functions. We therefore obtain quite general transference bounds, as well as bounds on the number of lattice points contained in certain bodies. As example applications, we bound the lattice kissing number in $\\ell_p$ norms by $e^{(n+ o(n))/p}$ for $0 < p ≤ 2$, and also give a proof of a new transference bound in the $\\ell_1$ norm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SETH-hardness of coding problems.\n \n \n \n \n\n\n \n Noah Stephens-Davidowitz; and Vinod Vaikuntanathan.\n\n\n \n\n\n\n In FOCS, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"SETH-hardnessPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 100 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{SVCodeSETH19,\n\ttitle = {{SETH}-hardness of coding problems},\n\tbooktitle = {FOCS},\n\tauthor = {{Stephens-Davidowitz}, Noah and Vaikuntanathan, Vinod},\n\turl = {http://eccc.weizmann.ac.il/report/2019/159/},\n\tyear = {2019},\n\tabstract = {We show that assuming the strong exponential-time hypothesis (SETH), there are {\\em no non-trivial algorithms} for the nearest codeword problem (NCP), the minimum distance problem (MDP), or the nearest codeword problem with preprocessing (NCPP) on linear codes over any finite field. More precisely, we show that there are no NCP, MDP, or NCPP algorithms running in time $q^{(1-\\epsilon)n}$ for any constant $\\epsilon>0$ for codes with $q^n$ codewords. (In the case of NCPP, we assume non-uniform SETH.)\n\t\n\t%\t\t\\nnote{Here's an attempt at bragging a bit. Could just delete it.} Our results are inspired by recent similar lower bounds for computational lattice problems. However, our results are qualitatively quite different. In fact, our lower bounds show a strong \\emph{separation} between lattices and codes: these coding problems have no non-trivial algorithms while highly non-trivial algorithms are known for the analogous lattice problems.\n\t\n\tWe also show that there are no sub-exponential-time algorithms for $\\gamma$-approximate versions of these problems for some constant $\\gamma > 1$, under different versions of the exponential-time hypothesis.},\n}\n\n
\n
\n\n\n
\n We show that assuming the strong exponential-time hypothesis (SETH), there are \\em no non-trivial algorithms for the nearest codeword problem (NCP), the minimum distance problem (MDP), or the nearest codeword problem with preprocessing (NCPP) on linear codes over any finite field. More precisely, we show that there are no NCP, MDP, or NCPP algorithms running in time $q^{(1-ε)n}$ for any constant $ε>0$ for codes with $q^n$ codewords. (In the case of NCPP, we assume non-uniform SETH.) % \\nnoteHere's an attempt at bragging a bit. Could just delete it. Our results are inspired by recent similar lower bounds for computational lattice problems. However, our results are qualitatively quite different. In fact, our lower bounds show a strong \\emphseparation between lattices and codes: these coding problems have no non-trivial algorithms while highly non-trivial algorithms are known for the analogous lattice problems. We also show that there are no sub-exponential-time algorithms for $γ$-approximate versions of these problems for some constant $γ > 1$, under different versions of the exponential-time hypothesis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A time-distance trade-off for GDD with preprocessing—Instantiating the DLW heuristic.\n \n \n \n \n\n\n \n Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In CCC, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 20 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{SteTimedistanceTradeoff19,\n\tabstract = {$\n\t\\newcommand{\\Otil}{\\ensuremath{\\widetilde{O}}}\n\t\\newcommand{\\problem}[1]{\\ensuremath{\\mathsf{#1}}}\n\t\\newcommand{\\gapspp}{\\problem{GapSPP}}\n\t\\newcommand{\\oNP}{\\mathsf{coNP}}\n\t\\newcommand{\\eps}{\\epsilon}\n\t\\newcommand{\\ensuremath}[1]{#1}\n\t\\newcommand{\\R}{\\mathbb{R}} \\newcommand{\\lat}{\\mathcal{L}} $For $0 \\leq \\alpha \\leq 1/2$, we show an algorithm that does the following. Given appropriate preprocessing $P(\\lat)$ consisting of $N_\\alpha := 2^{O(n^{1-2\\alpha} + \\log n)}$ vectors in some lattice $\\lat \\subset \\R^n$ and a target vector $t \\in \\R^n$, the algorithm finds $y \\in \\lat$ such that $\\|y- t\\| \\leq n^{1/2 + \\alpha} \\eta(\\lat)$ in time $\\mathrm{poly}(n) \\cdot N_\\alpha$, where $\\eta(\\lat)$ is the smoothing parameter of the lattice.\n\t\n\tThe algorithm itself is very simple and was originally studied by Doulgerakis, Laarhoven, and de Weger (to appear in PQCrypto, 2019), who proved its correctness under certain reasonable heuristic assumptions on the preprocessing $P(\\lat)$ and target $t$. Our primary contribution is a choice of preprocessing that allows us to prove correctness without any heuristic assumptions.\n\t\n\tOur main motivation for studying this is the recent breakthrough algorithm for IdealSVP due to Hanrot, Pellet--Mary, and Stehl\\'e (to appear in Eurocrypt, 2019), which uses the DLW algorithm as a key subprocedure. In particular, our result implies that the HPS IdealSVP algorithm can be made to work with fewer heuristic assumptions.\n\t\n\tOur only technical tool is the discrete Gaussian distribution over $\\lat$, and in particular, a lemma showing that the one-dimensional projections of this distribution behave very similarly to the continuous Gaussian. This lemma might be of independent interest.},\n\ttitle = {A time-distance trade-off for {GDD} with preprocessing---{Instantiating} the {DLW} heuristic},\n\turl = {http://arxiv.org/abs/1902.08340},\n\tauthor = {{Stephens-Davidowitz}, Noah},\n\tyear = {2019},\n\tbooktitle={CCC},\n}\n\n\n\n
\n
\n\n\n
\n $ \\newcommand{Øtil}{\\ensuremath{\\widetilde{O}}} \\newcommand{\\problem}[1]{\\ensuremath{\\mathsf{#1}}} \\newcommand{\\gapspp}{\\problem{GapSPP}} \\newcommand{øNP}{\\mathsf{coNP}} \\newcommand{\\eps}{ε} \\newcommand{\\ensuremath}[1]{#1} \\newcommand{\\R}{ℝ} \\newcommand{łat}{\\mathcal{L}} $For $0 ≤ α ≤ 1/2$, we show an algorithm that does the following. Given appropriate preprocessing $P(łat)$ consisting of $N_α := 2^{O(n^{1-2α} + łog n)}$ vectors in some lattice $łat ⊂ \\R^n$ and a target vector $t ∈ \\R^n$, the algorithm finds $y ∈ łat$ such that $\\|y- t\\| ≤ n^{1/2 + α} η(łat)$ in time $\\mathrm{poly}(n) · N_α$, where $η(łat)$ is the smoothing parameter of the lattice. The algorithm itself is very simple and was originally studied by Doulgerakis, Laarhoven, and de Weger (to appear in PQCrypto, 2019), who proved its correctness under certain reasonable heuristic assumptions on the preprocessing $P(łat)$ and target $t$. Our primary contribution is a choice of preprocessing that allows us to prove correctness without any heuristic assumptions. Our main motivation for studying this is the recent breakthrough algorithm for IdealSVP due to Hanrot, Pellet–Mary, and Stehlé (to appear in Eurocrypt, 2019), which uses the DLW algorithm as a key subprocedure. In particular, our result implies that the HPS IdealSVP algorithm can be made to work with fewer heuristic assumptions. Our only technical tool is the discrete Gaussian distribution over $łat$, and in particular, a lemma showing that the one-dimensional projections of this distribution behave very similarly to the continuous Gaussian. This lemma might be of independent interest.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n (Gap/S)ETH Hardness of SVP.\n \n \n \n \n\n\n \n Divesh Aggarwal; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In STOC, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"(Gap/S)ETHPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 81 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ASGapETH18,\n  title = {{(Gap/S)ETH} Hardness of {SVP}},\n  abstract = {$ \\newcommand{\\problem}[1]{\\ensuremath{\\mathrm{#1}} } \\newcommand{\\SVP}{\\problem{SVP}} \\newcommand{\\ensuremath}[1]{#1} $We prove the following quantitative hardness results for the Shortest Vector Problem in the $\\ell_p$ norm ($\\SVP_p$), where $n$ is the rank of the input lattice. $\\bullet$ For "almost all" $p > p_0 \\approx 2.1397$, there no $2^{n/C_p}$-time algorithm for $\\SVP_p$ for some explicit constant $C_p > 0$ unless the (randomized) Strong Exponential Time Hypothesis (SETH) is false. $\\bullet$ For any $p > 2$, there is no $2^{o(n)}$-time algorithm for $\\SVP_p$ unless the (randomized) Gap-Exponential Time Hypothesis (Gap-ETH) is false. Furthermore, for each $p > 2$, there exists a constant $\\gamma_p > 1$ such that the same result holds even for $\\gamma_p$-approximate $\\SVP_p$. $\\bullet$ There is no $2^{o(n)}$-time algorithm for $\\SVP_p$ for any $1 \\leq p \\leq 2$ unless either (1) (non-uniform) Gap-ETH is false; or (2) there is no family of lattices with exponential kissing number in the $\\ell_2$ norm. Furthermore, for each $1 \\leq p \\leq 2$, there exists a constant $\\gamma_p > 1$ such that the same result holds even for $\\gamma_p$-approximate $\\SVP_p$.},\n  url = {http://arxiv.org/abs/1712.00942},\n  booktitle = {STOC},\n  author = {Aggarwal, Divesh and {Stephens-Davidowitz}, Noah},\n  year = {2018}\n}\n\n
\n
\n\n\n
\n $ \\newcommand{\\problem}[1]{\\ensuremath{\\mathrm{#1}} } \\newcommand{§VP}{\\problem{SVP}} \\newcommand{\\ensuremath}[1]{#1} $We prove the following quantitative hardness results for the Shortest Vector Problem in the $\\ell_p$ norm ($§VP_p$), where $n$ is the rank of the input lattice. $•$ For \"almost all\" $p > p_0 ≈ 2.1397$, there no $2^{n/C_p}$-time algorithm for $§VP_p$ for some explicit constant $C_p > 0$ unless the (randomized) Strong Exponential Time Hypothesis (SETH) is false. $•$ For any $p > 2$, there is no $2^{o(n)}$-time algorithm for $§VP_p$ unless the (randomized) Gap-Exponential Time Hypothesis (Gap-ETH) is false. Furthermore, for each $p > 2$, there exists a constant $γ_p > 1$ such that the same result holds even for $γ_p$-approximate $§VP_p$. $•$ There is no $2^{o(n)}$-time algorithm for $§VP_p$ for any $1 ≤ p ≤ 2$ unless either (1) (non-uniform) Gap-ETH is false; or (2) there is no family of lattices with exponential kissing number in the $\\ell_2$ norm. Furthermore, for each $1 ≤ p ≤ 2$, there exists a constant $γ_p > 1$ such that the same result holds even for $γ_p$-approximate $§VP_p$.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Just take the average! An embarrassingly simple $2^n$-time algorithm for SVP (and CVP).\n \n \n \n \n\n\n \n Divesh Aggarwal; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In SOSA, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"JustPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 58 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ASJustTake18,\n  title = {Just take the average! An embarrassingly simple $2^n$-time algorithm for {SVP} (and {CVP})},\n  abstract = {We show a $2^{n+o(n)}$-time (and space) algorithm for the Shortest Vector Problem on lattices (SVP) that works by repeatedly running an embarrassingly simple "pair and average" sieving-like procedure on a list of lattice vectors. This matches the running time (and space) of the current fastest known algorithm, due to Aggarwal, Dadush, Regev, and Stephens-Davidowitz (ADRS, in STOC, 2015), with a far simpler algorithm. Our algorithm is in fact a modification of the ADRS algorithm, with a certain careful rejection sampling step removed. The correctness of our algorithm follows from a more general "meta-theorem," showing that such rejection sampling steps are unnecessary for a certain class of algorithms and use cases. In particular, this also applies to the related $2^{n + o(n)}$-time algorithm for the Closest Vector Problem (CVP), due to Aggarwal, Dadush, and Stephens-Davidowitz (ADS, in FOCS, 2015), yielding a similar embarrassingly simple algorithm for $\\gamma$-approximate CVP for any $\\gamma = 1+2^{-o(n/\\log n)}$. (We can also remove the rejection sampling procedure from the $2^{n+o(n)}$-time ADS algorithm for exact CVP, but the resulting algorithm is still quite complicated.)},\n  url = {http://arxiv.org/abs/1709.01535},\n  booktitle = {SOSA},\n  author = {Aggarwal, Divesh and {Stephens-Davidowitz}, Noah},\n  year = {2018}\n}\n\n
\n
\n\n\n
\n We show a $2^{n+o(n)}$-time (and space) algorithm for the Shortest Vector Problem on lattices (SVP) that works by repeatedly running an embarrassingly simple \"pair and average\" sieving-like procedure on a list of lattice vectors. This matches the running time (and space) of the current fastest known algorithm, due to Aggarwal, Dadush, Regev, and Stephens-Davidowitz (ADRS, in STOC, 2015), with a far simpler algorithm. Our algorithm is in fact a modification of the ADRS algorithm, with a certain careful rejection sampling step removed. The correctness of our algorithm follows from a more general \"meta-theorem,\" showing that such rejection sampling steps are unnecessary for a certain class of algorithms and use cases. In particular, this also applies to the related $2^{n + o(n)}$-time algorithm for the Closest Vector Problem (CVP), due to Aggarwal, Dadush, and Stephens-Davidowitz (ADS, in FOCS, 2015), yielding a similar embarrassingly simple algorithm for $γ$-approximate CVP for any $γ = 1+2^{-o(n/łog n)}$. (We can also remove the rejection sampling procedure from the $2^{n+o(n)}$-time ADS algorithm for exact CVP, but the resulting algorithm is still quite complicated.)\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n New (and old) proof systems for lattice problems.\n \n \n \n \n\n\n \n Navid Alamati; Chris Peikert; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In PKC, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"NewPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 19 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{APSNewOld18,\n  title = {New (and old) proof systems for lattice problems},\n  abstract = {$\n\\newcommand{\\Otil}{\\ensuremath{\\widetilde{O}}}\n\\newcommand{\\problem}[1]{\\ensuremath{\\mathsf{#1}}}\n\\newcommand{\\gapspp}{\\problem{GapSPP}}\n\\newcommand{\\oNP}{\\mathsf{coNP}}\n\\newcommand{\\eps}{\\epsilon}\n\\newcommand{\\ensuremath}[1]{#1}\n$We continue the study of statistical zero-knowledge (SZK)\nproofs, both interactive and noninteractive, for computational\nproblems on point lattices.  We are particularly interested in the\nproblem $\\gapspp$ of approximating the $\\epsilon$-smoothing\nparameter (for some $\\epsilon < 1/2$) of an $n$-dimensional\nlattice.  The smoothing parameter is a key quantity in the study of\nlattices, and $\\gapspp$ has been emerging as a core problem in\nlattice-based cryptography, e.g., in worst-case to average-case\nreductions.\n\nWe show that $\\gapspp$ admits SZK proofs for remarkably low\napproximation factors, improving on prior work by up to\nroughly $\\sqrt{n}$.  Specifically:\n\n $\\bullet$  There is a noninteractive SZK proof for\n$O(\\log(n) \\sqrt{\\log (1/\\eps)})$-approximate $\\gapspp$.  Moreover,\nfor any negligible $\\epsilon$ and a larger approximation factor\n$\\Otil(\\sqrt{n \\log(1/\\eps)})$, there is such a proof with an\nefficient prover.\n $\\bullet$  There is an (interactive) SZK proof with an efficient prover for\n$O(\\log n + \\sqrt{\\log(1/\\eps)/\\log n})$-approximate\n$\\problem{coGapSPP}$.  We show this by proving that\n$O(\\log n)$-approximate $\\gapspp$ is in $\\oNP$.\n\nIn addition, we give an (interactive) SZK proof with an efficient\nprover for approximating the lattice covering radius to within\nan $O(\\sqrt{n})$ factor, improving upon the prior best factor of\n$\\omega(\\sqrt{n \\log n})$.},\n  urldate = {2018-05-30},\n  url = {https://eprint.iacr.org/2017/1226},\n  booktitle = {PKC},\n  author = {Alamati, Navid and Peikert, Chris and {Stephens-Davidowitz}, Noah},\n  year = {2018}\n}\n\n
\n
\n\n\n
\n $ \\newcommand{Øtil}{\\ensuremath{\\widetilde{O}}} \\newcommand{\\problem}[1]{\\ensuremath{\\mathsf{#1}}} \\newcommand{\\gapspp}{\\problem{GapSPP}} \\newcommand{øNP}{\\mathsf{coNP}} \\newcommand{\\eps}{ε} \\newcommand{\\ensuremath}[1]{#1} $We continue the study of statistical zero-knowledge (SZK) proofs, both interactive and noninteractive, for computational problems on point lattices. We are particularly interested in the problem $\\gapspp$ of approximating the $ε$-smoothing parameter (for some $ε < 1/2$) of an $n$-dimensional lattice. The smoothing parameter is a key quantity in the study of lattices, and $\\gapspp$ has been emerging as a core problem in lattice-based cryptography, e.g., in worst-case to average-case reductions. We show that $\\gapspp$ admits SZK proofs for remarkably low approximation factors, improving on prior work by up to roughly $\\sqrt{n}$. Specifically: $•$ There is a noninteractive SZK proof for $O(łog(n) \\sqrt{łog (1/\\eps)})$-approximate $\\gapspp$. Moreover, for any negligible $ε$ and a larger approximation factor $Øtil(\\sqrt{n łog(1/\\eps)})$, there is such a proof with an efficient prover. $•$ There is an (interactive) SZK proof with an efficient prover for $O(łog n + \\sqrt{łog(1/\\eps)/łog n})$-approximate $\\problem{coGapSPP}$. We show this by proving that $O(łog n)$-approximate $\\gapspp$ is in $øNP$. In addition, we give an (interactive) SZK proof with an efficient prover for approximating the lattice covering radius to within an $O(\\sqrt{n})$ factor, improving upon the prior best factor of $ω(\\sqrt{n łog n})$.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n An inequality for Gaussians on lattices.\n \n \n \n \n\n\n \n Oded Regev; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n SIDMA, 31(2). 2017.\n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 44 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{RSInequalityGaussians17,\n\ttitle = {An inequality for {Gaussians} on lattices},\n\tabstract = {$ \\newcommand{\\R}{\\ensuremath{\\mathbb{R}}} \\newcommand{\\lat}{\\mathcal{L}} \\newcommand{\\ensuremath}[1]{#1} $We show that for any lattice $\\lat \\subseteq \\R^n$ and vectors $x,y \\in \\R^n$, \\[ f(\\lat + x)^2 f(\\lat + y)^2 \\leq f(\\lat)^2 f(\\lat + x + y) f(\\lat + x - y) \\; , \\] where $f$ is the Gaussian measure $f(A) := \\sum_{w \\in A} \\exp(-\\pi \\| w \\|^2)$. We show a number of applications, including bounds on the moments of the discrete Gaussian distribution, various monotonicity properties of the heat kernel on flat tori, and a positive correlation inequality for Gaussian measures on lattices.},\n\turldate = {2018-05-28},\n\turl = {http://arxiv.org/abs/1502.04796},\n\tjournal = {SIDMA},\n\tauthor = {Regev, Oded and {Stephens-Davidowitz}, Noah},\n\tyear = {2017},\n\t\tvolume = {31},\n\tnumber = {2},\n}\n\n
\n
\n\n\n
\n $ \\newcommand{\\R}{\\ensuremath{ℝ}} \\newcommand{łat}{\\mathcal{L}} \\newcommand{\\ensuremath}[1]{#1} $We show that for any lattice $łat ⊆ \\R^n$ and vectors $x,y ∈ \\R^n$, \\[ f(łat + x)^2 f(łat + y)^2 ≤ f(łat)^2 f(łat + x + y) f(łat + x - y)   , \\] where $f$ is the Gaussian measure $f(A) := ∑_{w ∈ A} \\exp(-π \\| w \\|^2)$. We show a number of applications, including bounds on the moments of the discrete Gaussian distribution, various monotonicity properties of the heat kernel on flat tori, and a positive correlation inequality for Gaussian measures on lattices.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the quantitative hardness of CVP.\n \n \n \n \n\n\n \n Huck Bennett; Alexander Golovnev; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In FOCS, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n \n \"On princeton talk\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 66 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{BGSQuantitativeHardness17,\n  title = {On the quantitative hardness of {CVP}},\n  abstract = {$ \\newcommand{\\eps}{\\epsilon} \\newcommand{\\problem}[1]{\\ensuremath{\\mathrm{#1}} } \\newcommand{\\CVP}{\\problem{CVP}} \\newcommand{\\SVP}{\\problem{SVP}} \\newcommand{\\CVPP}{\\problem{CVPP}} \\newcommand{\\ensuremath}[1]{#1} $For odd integers $p \\geq 1$ (and $p = \\infty$), we show that the Closest Vector Problem in the $\\ell_p$ norm ($\\CVP_p$) over rank $n$ lattices cannot be solved in $2^{(1-\\eps) n}$ time for any constant $\\eps > 0$ unless the Strong Exponential Time Hypothesis (SETH) fails. We then extend this result to "almost all" values of $p \\geq 1$, not including the even integers. This comes tantalizingly close to settling the quantitative time complexity of the important special case of $\\CVP_2$ (i.e., $\\CVP$ in the Euclidean norm), for which a $2^{n +o(n)}$-time algorithm is known. In particular, our result applies for any $p = p(n) \\neq 2$ that approaches $2$ as $n \\to \\infty$. We also show a similar SETH-hardness result for $\\SVP_\\infty$; hardness of approximating $\\CVP_p$ to within some constant factor under the so-called Gap-ETH assumption; and other quantitative hardness results for $\\CVP_p$ and $\\CVPP_p$ for any $1 \\leq p < \\infty$ under different assumptions.},\n  url = {http://arxiv.org/abs/1704.03928},\n  booktitle = {FOCS},\n  author = {Bennett, Huck and Golovnev, Alexander and {Stephens-Davidowitz}, Noah},\n  year = {2017},\n  url_Princeton_Talk = {http://www.youtube.com/watch?v=sd-SMjAl0ks}\n}\n\n
\n
\n\n\n
\n $ \\newcommand{\\eps}{ε} \\newcommand{\\problem}[1]{\\ensuremath{\\mathrm{#1}} } \\newcommand{\\CVP}{\\problem{CVP}} \\newcommand{§VP}{\\problem{SVP}} \\newcommand{\\CVPP}{\\problem{CVPP}} \\newcommand{\\ensuremath}[1]{#1} $For odd integers $p ≥ 1$ (and $p = ∞$), we show that the Closest Vector Problem in the $\\ell_p$ norm ($\\CVP_p$) over rank $n$ lattices cannot be solved in $2^{(1-\\eps) n}$ time for any constant $\\eps > 0$ unless the Strong Exponential Time Hypothesis (SETH) fails. We then extend this result to \"almost all\" values of $p ≥ 1$, not including the even integers. This comes tantalizingly close to settling the quantitative time complexity of the important special case of $\\CVP_2$ (i.e., $\\CVP$ in the Euclidean norm), for which a $2^{n +o(n)}$-time algorithm is known. In particular, our result applies for any $p = p(n) \\neq 2$ that approaches $2$ as $n \\to ∞$. We also show a similar SETH-hardness result for $§VP_∞$; hardness of approximating $\\CVP_p$ to within some constant factor under the so-called Gap-ETH assumption; and other quantitative hardness results for $\\CVP_p$ and $\\CVPP_p$ for any $1 ≤ p < ∞$ under different assumptions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pseudorandomness of Ring-LWE for any ring and modulus.\n \n \n \n \n\n\n \n Chris Peikert; Oded Regev; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In STOC, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"PseudorandomnessPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 67 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{PRSPseudorandomnessRingLWE17,\n  title = {Pseudorandomness of {Ring-LWE} for any ring and modulus},\n  abstract = {We give a polynomial-time quantum reduction from worst-case (ideal) lattice problems directly to the decision version of (Ring-)LWE. This extends to decision all the worst-case hardness results that were previously known for the search version, for the same or even better parameters and with no algebraic restrictions on the modulus or number field. Indeed, our reduction is the first that works for decision Ring-LWE with any number field and any modulus.},\n  url = {https://eprint.iacr.org/2017/258},\n  booktitle = {STOC},\n  author = {Peikert, Chris and Regev, Oded and {Stephens-Davidowitz}, Noah},\n  year = {2017}\n}\n\n
\n
\n\n\n
\n We give a polynomial-time quantum reduction from worst-case (ideal) lattice problems directly to the decision version of (Ring-)LWE. This extends to decision all the worst-case hardness results that were previously known for the search version, for the same or even better parameters and with no algebraic restrictions on the modulus or number field. Indeed, our reduction is the first that works for decision Ring-LWE with any number field and any modulus.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On the Gaussian measure over lattices.\n \n \n \n\n\n \n Noah Stephens-Davidowitz.\n\n\n \n\n\n\n Ph.D. Thesis, New York University, 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{NSDthesis,\n  type = {PhD Thesis},\n  title = {On the {Gaussian} measure over lattices},\n  school = {New York University},\n  author = {{Stephens-Davidowitz}, Noah},\n  year = {2017}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Implementing BP-obfuscation using graph-induced encoding.\n \n \n \n \n\n\n \n Shai Halevi; Tzipora Halevi; Victor Shoup; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In CCS, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"ImplementingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{HHSSImplementingBPObfuscation17,\n  title = {Implementing {BP}-obfuscation using graph-induced encoding},\n  abstract = {We implemented (a simplified version of) the branching-program obfuscator due to Gentry et al. (GGH15), which is itself a variation of the first obfuscation candidate by Garg et al. (GGHRSW13). To keep within the realm of feasibility, we had to give up on some aspects of the construction, specifically the ‘‘multiplicative bundling'' factors that protect against mixed-input attacks. Hence our implementation can only support read-once branching programs.\n\nTo be able to handle anything more than just toy problems, we developed a host of algorithmic and code-level optimizations. These include new variants of discrete Gaussian sampler and lattice trapdoor sampler, efficient matrix-manipulation routines, and many tradeoffs. We expect that these optimizations will find other uses in lattice-based cryptography beyond just obfuscation.\n\nOur implementation is the first obfuscation attempt using the GGH15 graded encoding scheme, offering performance advantages over other graded encoding methods when obfuscating finite-state machines with many states. In out most demanding setting, we were able to obfuscate programs with input length of 20 nibbles (80 bits) and over 100 states, which seems out of reach for prior implementations. Although further optimizations are surely possible, we do not expect any implementation of current schemes to be able to handle much larger parameters.},\n  urldate = {2018-05-30},\n  url = {https://eprint.iacr.org/2017/104},\n  author = {Halevi, Shai and Halevi, Tzipora and Shoup, Victor and {Stephens-Davidowitz}, Noah},\n  year = {2017},\n  booktitle = {CCS},\n}\n\n
\n
\n\n\n
\n We implemented (a simplified version of) the branching-program obfuscator due to Gentry et al. (GGH15), which is itself a variation of the first obfuscation candidate by Garg et al. (GGHRSW13). To keep within the realm of feasibility, we had to give up on some aspects of the construction, specifically the ‘‘multiplicative bundling'' factors that protect against mixed-input attacks. Hence our implementation can only support read-once branching programs. To be able to handle anything more than just toy problems, we developed a host of algorithmic and code-level optimizations. These include new variants of discrete Gaussian sampler and lattice trapdoor sampler, efficient matrix-manipulation routines, and many tradeoffs. We expect that these optimizations will find other uses in lattice-based cryptography beyond just obfuscation. Our implementation is the first obfuscation attempt using the GGH15 graded encoding scheme, offering performance advantages over other graded encoding methods when obfuscating finite-state machines with many states. In out most demanding setting, we were able to obfuscate programs with input length of 20 nibbles (80 bits) and over 100 states, which seems out of reach for prior implementations. Although further optimizations are surely possible, we do not expect any implementation of current schemes to be able to handle much larger parameters.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Search-to-decision reductions for lattice problems with approximation factors (slightly) greater than one.\n \n \n \n \n\n\n \n Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In APPROX, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Search-to-decisionPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 43 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{SteSearchtodecisionReductions16,\n  title = {Search-to-decision reductions for lattice problems with approximation factors (slightly) greater than one},\n  abstract = {We show the first dimension-preserving search-to-decision reductions for approximate SVP and CVP. In particular, for any $\\gamma \\leq 1 + O(\\log n/n)$, we obtain an efficient dimension-preserving reduction from $\\gamma^{O(n/\\log n)}$-SVP to $\\gamma$-GapSVP and an efficient dimension-preserving reduction from $\\gamma^{O(n)}$-CVP to $\\gamma$-GapCVP. These results generalize the known equivalences of the search and decision versions of these problems in the exact case when $\\gamma = 1$. For SVP, we actually obtain something slightly stronger than a search-to-decision reduction--we reduce $\\gamma^{O(n/\\log n)}$-SVP to $\\gamma$-unique SVP, a potentially easier problem than $\\gamma$-GapSVP.},\n  url = {http://arxiv.org/abs/1512.04138},\n  booktitle = {APPROX},\n  author = {{Stephens-Davidowitz}, Noah},\n  year = {2016}\n}\n\n
\n
\n\n\n
\n We show the first dimension-preserving search-to-decision reductions for approximate SVP and CVP. In particular, for any $γ ≤ 1 + O(łog n/n)$, we obtain an efficient dimension-preserving reduction from $γ^{O(n/łog n)}$-SVP to $γ$-GapSVP and an efficient dimension-preserving reduction from $γ^{O(n)}$-CVP to $γ$-GapCVP. These results generalize the known equivalences of the search and decision versions of these problems in the exact case when $γ = 1$. For SVP, we actually obtain something slightly stronger than a search-to-decision reduction–we reduce $γ^{O(n/łog n)}$-SVP to $γ$-unique SVP, a potentially easier problem than $γ$-GapSVP.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the Lattice Distortion Problem.\n \n \n \n \n\n\n \n Huck Bennett; Daniel Dadush; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In ESA, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 14 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{BDSLatticeDistortion16,\n  title = {On the {Lattice Distortion Problem}},\n  abstract = {We introduce and study the \\emph{Lattice Distortion Problem} (LDP). LDP asks how "similar" two lattices are. I.e., what is the minimal distortion of a linear bijection between the two lattices? LDP generalizes the Lattice Isomorphism Problem (the lattice analogue of Graph Isomorphism), which simply asks whether the minimal distortion is one. As our first contribution, we show that the distortion between any two lattices is approximated up to a $n^{O(\\log n)}$ factor by a simple function of their successive minima. Our methods are constructive, allowing us to compute low-distortion mappings that are within a $2^{O(n \\log \\log n/\\log n)}$ factor of optimal in polynomial time and within a $n^{O(\\log n)}$ factor of optimal in singly exponential time. Our algorithms rely on a notion of basis reduction introduced by Seysen (Combinatorica 1993), which we show is intimately related to lattice distortion. Lastly, we show that LDP is NP-hard to approximate to within any constant factor (under randomized reductions), by a reduction from the Shortest Vector Problem.},\n  url = {http://arxiv.org/abs/1605.03613},\n  booktitle = {ESA},\n  author = {Bennett, Huck and Dadush, Daniel and {Stephens-Davidowitz}, Noah},\n  year = {2016}\n}\n\n
\n
\n\n\n
\n We introduce and study the \\emphLattice Distortion Problem (LDP). LDP asks how \"similar\" two lattices are. I.e., what is the minimal distortion of a linear bijection between the two lattices? LDP generalizes the Lattice Isomorphism Problem (the lattice analogue of Graph Isomorphism), which simply asks whether the minimal distortion is one. As our first contribution, we show that the distortion between any two lattices is approximated up to a $n^{O(łog n)}$ factor by a simple function of their successive minima. Our methods are constructive, allowing us to compute low-distortion mappings that are within a $2^{O(n łog łog n/łog n)}$ factor of optimal in polynomial time and within a $n^{O(łog n)}$ factor of optimal in singly exponential time. Our algorithms rely on a notion of basis reduction introduced by Seysen (Combinatorica 1993), which we show is intimately related to lattice distortion. Lastly, we show that LDP is NP-hard to approximate to within any constant factor (under randomized reductions), by a reduction from the Shortest Vector Problem.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Discrete Gaussian sampling reduces to CVP and SVP.\n \n \n \n \n\n\n \n Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In SODA, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"DiscretePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 106 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{SteDiscreteGaussian16,\n  title = {Discrete {Gaussian} sampling reduces to {CVP} and {SVP}},\n  abstract = {The discrete Gaussian $D_{L- t, s}$ is the distribution that assigns to each vector $x$ in a shifted lattice $L - t$ probability proportional to $e^{-\\pi \\|x\\|^2/s^2}$. It has long been an important tool in the study of lattices. More recently, algorithms for discrete Gaussian sampling (DGS) have found many applications in computer science. In particular, polynomial-time algorithms for DGS with very high parameters $s$ have found many uses in cryptography and in reductions between lattice problems. And, in the past year, Aggarwal, Dadush, Regev, and Stephens-Davidowitz showed $2^{n+o(n)}$-time algorithms for DGS with a much wider range of parameters and used them to obtain the current fastest known algorithms for the two most important lattice problems, the Shortest Vector Problem (SVP) and the Closest Vector Problem (CVP). Motivated by its increasing importance, we investigate the complexity of DGS itself and its relationship to CVP and SVP. Our first result is a polynomial-time dimension-preserving reduction from DGS to CVP. There is a simple reduction from CVP to DGS, so this shows that DGS is equivalent to CVP. Our second result, which we find to be more surprising, is a polynomial-time dimension-preserving reduction from centered DGS (the important special case when $ t = 0$) to SVP. In the other direction, there is a simple reduction from $\\gamma$-approximate SVP for any $\\gamma = \\Omega(\\sqrt{n/\\log n})$, and we present some (relatively weak) evidence to suggest that this might be the best achievable approximation factor. We also show that our CVP result extends to a much wider class of distributions and even to other norms.},\n  url = {http://arxiv.org/abs/1506.07490},\n  booktitle = {SODA},\n  author = {{Stephens-Davidowitz}, Noah},\n  year = {2016}\n}\n\n
\n
\n\n\n
\n The discrete Gaussian $D_{L- t, s}$ is the distribution that assigns to each vector $x$ in a shifted lattice $L - t$ probability proportional to $e^{-π \\|x\\|^2/s^2}$. It has long been an important tool in the study of lattices. More recently, algorithms for discrete Gaussian sampling (DGS) have found many applications in computer science. In particular, polynomial-time algorithms for DGS with very high parameters $s$ have found many uses in cryptography and in reductions between lattice problems. And, in the past year, Aggarwal, Dadush, Regev, and Stephens-Davidowitz showed $2^{n+o(n)}$-time algorithms for DGS with a much wider range of parameters and used them to obtain the current fastest known algorithms for the two most important lattice problems, the Shortest Vector Problem (SVP) and the Closest Vector Problem (CVP). Motivated by its increasing importance, we investigate the complexity of DGS itself and its relationship to CVP and SVP. Our first result is a polynomial-time dimension-preserving reduction from DGS to CVP. There is a simple reduction from CVP to DGS, so this shows that DGS is equivalent to CVP. Our second result, which we find to be more surprising, is a polynomial-time dimension-preserving reduction from centered DGS (the important special case when $ t = 0$) to SVP. In the other direction, there is a simple reduction from $γ$-approximate SVP for any $γ = Ω(\\sqrt{n/łog n})$, and we present some (relatively weak) evidence to suggest that this might be the best achievable approximation factor. We also show that our CVP result extends to a much wider class of distributions and even to other norms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Message transmission with Reverse Firewalls–secure communication on corrupted machines.\n \n \n \n \n\n\n \n Yevgeniy Dodis; Ilya Mironov; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In CRYPTO, 2016. \n \n\n\n\n
\n\n\n\n \n \n \"MessagePaper\n  \n \n \n \"Message crypto talk\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 11 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{DMSMessageTransmission16,\n  title = {Message transmission with Reverse Firewalls--secure communication on corrupted machines},\n  abstract = {Suppose Alice wishes to send a message to Bob privately over an untrusted channel. Cryptographers have developed a whole suite of tools to accomplish this task, with a wide variety of notions of security, setup assumptions, and running times. However, almost all prior work on this topic made a seemingly innocent assumption: that Alice has access to a trusted computer with a proper implementation of the protocol. The Snowden revelations show us that, in fact, powerful adversaries can and will corrupt users' machines in order to compromise their security. And, (presumably) accidental vulnerabilities are regularly found in popular cryptographic software, showing that users cannot even trust implementations that were created honestly. This leads to the following (seemingly absurd) question: ‘‘Can Alice securely send a message to Bob even if she cannot trust her own computer?!''\n\nBellare, Paterson, and Rogaway recently studied this question. They show a strong lower bound that in particular rules out even semantically secure public-key encryption in their model. However, Mironov and Stephens-Davidowitz recently introduced a new framework for solving such problems: reverse firewalls. A secure reverse firewall is a third party that ‘‘sits between Alice and the outside world'' and modifies her sent and received messages so that even if the her machine has been corrupted, Alice's security is still guaranteed. We show how to use reverse firewalls to sidestep the impossibility result of Bellare et al., and we achieve strong security guarantees in this extreme setting.\n\nIndeed, we find a rich structure of solutions that vary in efficiency, security, and setup assumptions, in close analogy with message transmission in the classical setting. Our strongest and most important result shows a protocol that achieves interactive, concurrent CCA-secure message transmission with a reverse firewall--i.e., CCA-secure message transmission on a possibly compromised machine! Surprisingly, this protocol is quite efficient and simple, requiring only four rounds and a small constant number of public-key operations for each party. It could easily be used in practice. Behind this result is a technical composition theorem that shows how key agreement with a sufficiently secure reverse firewall can be used to construct a message-transmission protocol with its own secure reverse firewall.},\n  urldate = {2018-05-30},\n  url = {https://eprint.iacr.org/2015/548},\n  booktitle = {CRYPTO},\n  author = {Dodis, Yevgeniy and Mironov, Ilya and {Stephens-Davidowitz}, Noah},\n  year = {2016},\n  url_CRYPTO_talk = {http://www.youtube.com/watch?v=2DOc-9u1EbQ}\n}\n\n
\n
\n\n\n
\n Suppose Alice wishes to send a message to Bob privately over an untrusted channel. Cryptographers have developed a whole suite of tools to accomplish this task, with a wide variety of notions of security, setup assumptions, and running times. However, almost all prior work on this topic made a seemingly innocent assumption: that Alice has access to a trusted computer with a proper implementation of the protocol. The Snowden revelations show us that, in fact, powerful adversaries can and will corrupt users' machines in order to compromise their security. And, (presumably) accidental vulnerabilities are regularly found in popular cryptographic software, showing that users cannot even trust implementations that were created honestly. This leads to the following (seemingly absurd) question: ‘‘Can Alice securely send a message to Bob even if she cannot trust her own computer?!'' Bellare, Paterson, and Rogaway recently studied this question. They show a strong lower bound that in particular rules out even semantically secure public-key encryption in their model. However, Mironov and Stephens-Davidowitz recently introduced a new framework for solving such problems: reverse firewalls. A secure reverse firewall is a third party that ‘‘sits between Alice and the outside world'' and modifies her sent and received messages so that even if the her machine has been corrupted, Alice's security is still guaranteed. We show how to use reverse firewalls to sidestep the impossibility result of Bellare et al., and we achieve strong security guarantees in this extreme setting. Indeed, we find a rich structure of solutions that vary in efficiency, security, and setup assumptions, in close analogy with message transmission in the classical setting. Our strongest and most important result shows a protocol that achieves interactive, concurrent CCA-secure message transmission with a reverse firewall–i.e., CCA-secure message transmission on a possibly compromised machine! Surprisingly, this protocol is quite efficient and simple, requiring only four rounds and a small constant number of public-key operations for each party. It could easily be used in practice. Behind this result is a technical composition theorem that shows how key agreement with a sufficiently secure reverse firewall can be used to construct a message-transmission protocol with its own secure reverse firewall.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Solving the Closest Vector Problem in $2^n$ time–The discrete Gaussian strikes again!.\n \n \n \n \n\n\n \n Divesh Aggarwal; Daniel Dadush; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In FOCS, 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SolvingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 18 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ADSSolvingClosest15,\n  title = {Solving the {Closest Vector Problem} in $2^n$ time--The discrete {Gaussian} strikes again!},\n  abstract = {We give a $2^{n+o(n)}$-time and space randomized algorithm for solving the exact Closest Vector Problem (CVP) on $n$-dimensional Euclidean lattices. This improves on the previous fastest algorithm, the deterministic $\\widetilde{O}(4^{n})$-time and $\\widetilde{O}(2^{n})$-space algorithm of Micciancio and Voulgaris. We achieve our main result in three steps. First, we show how to modify the sampling algorithm from [ADRS15] to solve the problem of discrete Gaussian sampling over lattice shifts, $L- t$, with very low parameters. While the actual algorithm is a natural generalization of [ADRS15], the analysis uses substantial new ideas. This yields a $2^{n+o(n)}$-time algorithm for approximate CVP for any approximation factor $\\gamma = 1+2^{-o(n/\\log n)}$. Second, we show that the approximate closest vectors to a target vector $t$ can be grouped into "lower-dimensional clusters," and we use this to obtain a recursive reduction from exact CVP to a variant of approximate CVP that "behaves well with these clusters." Third, we show that our discrete Gaussian sampling algorithm can be used to solve this variant of approximate CVP. The analysis depends crucially on some new properties of the discrete Gaussian distribution and approximate closest vectors, which might be of independent interest.},\n  url = {http://arxiv.org/abs/1504.01995},\n  booktitle = {FOCS},\n  author = {Aggarwal, Divesh and Dadush, Daniel and {Stephens-Davidowitz}, Noah},\n  year = {2015}\n}\n\n
\n
\n\n\n
\n We give a $2^{n+o(n)}$-time and space randomized algorithm for solving the exact Closest Vector Problem (CVP) on $n$-dimensional Euclidean lattices. This improves on the previous fastest algorithm, the deterministic $\\widetilde{O}(4^{n})$-time and $\\widetilde{O}(2^{n})$-space algorithm of Micciancio and Voulgaris. We achieve our main result in three steps. First, we show how to modify the sampling algorithm from [ADRS15] to solve the problem of discrete Gaussian sampling over lattice shifts, $L- t$, with very low parameters. While the actual algorithm is a natural generalization of [ADRS15], the analysis uses substantial new ideas. This yields a $2^{n+o(n)}$-time algorithm for approximate CVP for any approximation factor $γ = 1+2^{-o(n/łog n)}$. Second, we show that the approximate closest vectors to a target vector $t$ can be grouped into \"lower-dimensional clusters,\" and we use this to obtain a recursive reduction from exact CVP to a variant of approximate CVP that \"behaves well with these clusters.\" Third, we show that our discrete Gaussian sampling algorithm can be used to solve this variant of approximate CVP. The analysis depends crucially on some new properties of the discrete Gaussian distribution and approximate closest vectors, which might be of independent interest.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Solving the Shortest Vector Problem in $2^n$ time via discrete Gaussian sampling.\n \n \n \n \n\n\n \n Divesh Aggarwal; Daniel Dadush; Oded Regev; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In STOC, 2015. \n \n\n\n\n
\n\n\n\n \n \n \"SolvingPaper\n  \n \n \n \"Solving simons talk\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 48 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ADRSSolvingShortest15,\n  title = {Solving the {Shortest Vector Problem} in $2^n$ time via discrete {Gaussian} sampling},\n  abstract = {We give a randomized $2^{n+o(n)}$-time and space algorithm for solving the Shortest Vector Problem (SVP) on n-dimensional Euclidean lattices. This improves on the previous fastest algorithm: the deterministic $\\widetilde{O}(4^n)$-time and $\\widetilde{O}(2^n)$-space algorithm of Micciancio and Voulgaris (STOC 2010, SIAM J. Comp. 2013). In fact, we give a conceptually simple algorithm that solves the (in our opinion, even more interesting) problem of discrete Gaussian sampling (DGS). More specifically, we show how to sample $2^{n/2}$ vectors from the discrete Gaussian distribution at any parameter in $2^{n+o(n)}$ time and space. (Prior work only solved DGS for very large parameters.) Our SVP result then follows from a natural reduction from SVP to DGS. We also show that our DGS algorithm implies a $2^{n + o(n)}$-time algorithm that approximates the Closest Vector Problem to within a factor of $1.97$. In addition, we give a more refined algorithm for DGS above the so-called smoothing parameter of the lattice, which can generate $2^{n/2}$ discrete Gaussian samples in just $2^{n/2+o(n)}$ time and space. Among other things, this implies a $2^{n/2+o(n)}$-time and space algorithm for $1.93$-approximate decision SVP.},\n  url = {http://arxiv.org/abs/1412.7994},\n  booktitle = {STOC},\n  author = {Aggarwal, Divesh and Dadush, Daniel and Regev, Oded and {Stephens-Davidowitz}, Noah},\n  year = {2015},\n  url_Simons_talk = {http://www.youtube.com/watch?v=PWy0ZBRAUxA}\n}\n\n
\n
\n\n\n
\n We give a randomized $2^{n+o(n)}$-time and space algorithm for solving the Shortest Vector Problem (SVP) on n-dimensional Euclidean lattices. This improves on the previous fastest algorithm: the deterministic $\\widetilde{O}(4^n)$-time and $\\widetilde{O}(2^n)$-space algorithm of Micciancio and Voulgaris (STOC 2010, SIAM J. Comp. 2013). In fact, we give a conceptually simple algorithm that solves the (in our opinion, even more interesting) problem of discrete Gaussian sampling (DGS). More specifically, we show how to sample $2^{n/2}$ vectors from the discrete Gaussian distribution at any parameter in $2^{n+o(n)}$ time and space. (Prior work only solved DGS for very large parameters.) Our SVP result then follows from a natural reduction from SVP to DGS. We also show that our DGS algorithm implies a $2^{n + o(n)}$-time algorithm that approximates the Closest Vector Problem to within a factor of $1.97$. In addition, we give a more refined algorithm for DGS above the so-called smoothing parameter of the lattice, which can generate $2^{n/2}$ discrete Gaussian samples in just $2^{n/2+o(n)}$ time and space. Among other things, this implies a $2^{n/2+o(n)}$-time and space algorithm for $1.93$-approximate decision SVP.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Dimension-preserving reductions between lattice problems.\n \n \n \n \n\n\n \n Noah Stephens-Davidowitz.\n\n\n \n\n\n\n 2015.\n \n\n\n\n
\n\n\n\n \n \n \"Dimension-preservingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 97 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{SteDimensionpreservingReductions15,\n  title = {Dimension-preserving reductions between lattice problems},\n  abstract = {Computational problems on lattices have found a remarkable number of applications in computer science. In particular, over the past twenty years, many strong cryptographic primitives have been constructed with their security based on the (worst-case) hardness of various lattice problems. \n\nDue to their importance, there has been much work towards understanding the relationship between these problems. For the parameters that typically interest us, the fastest known algorithms for lattice problems run in time that is exponential in the dimension of the lattice. Therefore, we are typically interested in reductions that preserve this dimension. (We actually relax this notion slightly and consider a reduction to be ``dimension-preserving'' if it increases the dimension by at most an additive constant.) \n\nWe summarize many of the known results in this area.},\n  url = {http://noahsd.com/latticeproblems.pdf},\n  author = {{Stephens-Davidowitz}, Noah},\n  year = {2015}\n}\n\n\n\n
\n
\n\n\n
\n Computational problems on lattices have found a remarkable number of applications in computer science. In particular, over the past twenty years, many strong cryptographic primitives have been constructed with their security based on the (worst-case) hardness of various lattice problems. Due to their importance, there has been much work towards understanding the relationship between these problems. For the parameters that typically interest us, the fastest known algorithms for lattice problems run in time that is exponential in the dimension of the lattice. Therefore, we are typically interested in reductions that preserve this dimension. (We actually relax this notion slightly and consider a reduction to be ``dimension-preserving'' if it increases the dimension by at most an additive constant.) We summarize many of the known results in this area.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Cryptographic Reverse Firewalls.\n \n \n \n \n\n\n \n Ilya Mironov; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In Eurocrypt, 2015. \n \n\n\n\n
\n\n\n\n \n \n \"CryptographicPaper\n  \n \n \n \"Cryptographic slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 35 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{MSCryptographicReverse15,\n  title = {Cryptographic Reverse Firewalls},\n  abstract = {Recent revelations by Edward Snowden show that a user's own hardware and software can be used against her in various ways (e.g., to leak her private information). And, a series of recent announcements has shown that widespread implementations of cryptographic software often contain serious bugs that cripple security. This motivates us to consider the following (seemingly absurd) question: How can we guarantee a user's security when she may be using a malfunctioning or arbitrarily compromised machine? To that end, we introduce the notion of a cryptographic reverse firewall (RF). Such a machine sits between the user's computer and the outside world, potentially modifying the messages that she sends and receives as she engages in a cryptographic protocol.\n\nA good reverse firewall accomplishes three things: (1) it maintains functionality, so that if the user's computer is working correctly, the RF will not break the functionality of the underlying protocol; (2) it preserves security, so that regardless of how the user's machine behaves, the presence of the RF will provide the same security guarantees as the properly implemented protocol; and (3) it resists exfiltration, so that regardless of how the user's machine behaves, the presence of the RF will prevent the machine from leaking any information to the outside world. Importantly, we do not model the firewall as a trusted party. It does not share any secrets with the user, and the protocol should be both secure and functional without the firewall (when it is implemented correctly).\n\nOur security definition for reverse firewalls depends on the security notion(s) of the underlying protocol. As such, our model generalizes much prior work and provides a general framework for building cryptographic schemes that remain secure when run on compromised machine. It is also a modern take on a line of work that received considerable attention in the 80s and 90s.\n\nWe show that our definition is achievable by constructing a private function evaluation protocol with a secure reverse firewall for each party. Along the way, we design an oblivious transfer protocol that also has a secure RF for each party, and a rerandomizable garbled circuit that is both more efficient and more secure than previous constructions. Finally, we show how to convert $\\backslash$emph\\{any\\} protocol into a protocol with an exfiltration-resistant reverse firewall for all parties. (In other words, we provide a generic way to prevent a tampered machine from leaking information to an eavesdropper via any protocol.)},\n  urldate = {2018-05-30},\n  url = {http://eprint.iacr.org/2014/758},\n  booktitle = {Eurocrypt},\n  author = {Mironov, Ilya and {Stephens-Davidowitz}, Noah},\n  year = {2015},\n  url_slides = {http://www.cs.nyu.edu/crg/newSlides/02_23_16.pdf}\n}\n\n
\n
\n\n\n
\n Recent revelations by Edward Snowden show that a user's own hardware and software can be used against her in various ways (e.g., to leak her private information). And, a series of recent announcements has shown that widespread implementations of cryptographic software often contain serious bugs that cripple security. This motivates us to consider the following (seemingly absurd) question: How can we guarantee a user's security when she may be using a malfunctioning or arbitrarily compromised machine? To that end, we introduce the notion of a cryptographic reverse firewall (RF). Such a machine sits between the user's computer and the outside world, potentially modifying the messages that she sends and receives as she engages in a cryptographic protocol. A good reverse firewall accomplishes three things: (1) it maintains functionality, so that if the user's computer is working correctly, the RF will not break the functionality of the underlying protocol; (2) it preserves security, so that regardless of how the user's machine behaves, the presence of the RF will provide the same security guarantees as the properly implemented protocol; and (3) it resists exfiltration, so that regardless of how the user's machine behaves, the presence of the RF will prevent the machine from leaking any information to the outside world. Importantly, we do not model the firewall as a trusted party. It does not share any secrets with the user, and the protocol should be both secure and functional without the firewall (when it is implemented correctly). Our security definition for reverse firewalls depends on the security notion(s) of the underlying protocol. As such, our model generalizes much prior work and provides a general framework for building cryptographic schemes that remain secure when run on compromised machine. It is also a modern take on a line of work that received considerable attention in the 80s and 90s. We show that our definition is achievable by constructing a private function evaluation protocol with a secure reverse firewall for each party. Along the way, we design an oblivious transfer protocol that also has a secure RF for each party, and a rerandomizable garbled circuit that is both more efficient and more secure than previous constructions. Finally, we show how to convert $\\$emph\\any\\ protocol into a protocol with an exfiltration-resistant reverse firewall for all parties. (In other words, we provide a generic way to prevent a tampered machine from leaking information to an eavesdropper via any protocol.)\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n On the Closest Vector Problem with a distance guarantee.\n \n \n \n \n\n\n \n Daniel Dadush; Oded Regev; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n In CCC, 2014. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 56 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{DRSClosestVector14,\n  title = {On the {Closest Vector Problem} with a distance guarantee},\n  url = {http://arxiv.org/abs/1409.8063},\n  booktitle = {CCC},\n  author = {Dadush, Daniel and Regev, Oded and {Stephens-Davidowitz}, Noah},\n  year = {2014}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How to eat your entropy and have it too – optimal recovery strategies for compromised RNGs.\n \n \n \n \n\n\n \n Yevgeniy Dodis; Adi Shamir; Noah Stephens-Davidowitz; and Daniel Wichs.\n\n\n \n\n\n\n In CRYPTO, 2014. \n \n\n\n\n
\n\n\n\n \n \n \"HowPaper\n  \n \n \n \"How crypto talk\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 17 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{DSSWHowEat14,\n  title = {How to eat your entropy and have it too -- optimal recovery strategies for compromised RNGs},\n  abstract = {Random number generators (RNGs) play a crucial role in many cryptographic schemes and protocols, but their security proof usually assumes that their internal state is initialized with truly random seeds and remains secret at all times. However, in many practical situations these are unrealistic assumptions: The seed is often gathered after a reset/reboot from low entropy external events such as the timing of manual key presses, and the state can be compromised at unknown points in time via side channels or penetration attacks. The usual remedy (used by all the major operating systems, including Windows, Linux, FreeBSD, MacOS, iOS, etc.) is to periodically replenish the internal state through an auxiliary input with additional randomness harvested from the environment. However, recovering from such attacks in a provably correct and computationally optimal way had remained an unsolved challenge so far.\n\nIn this paper we formalize the problem of designing an efficient recovery mechanism from state compromise, by considering it as an online optimization problem. If we knew the timing of the last compromise and the amount of entropy gathered since then, we could stop producing any outputs until the state becomes truly random again. However, our challenge is to recover within a time proportional to this optimal solution even in the hardest (and most realistic) case in which (a) we know nothing about the timing of the last state compromise, and the amount of new entropy injected since then into the state, and (b) any premature production of outputs leads to the total loss of all the added entropy \\{$\\backslash$em used by the RNG\\}, since the attacker can use brute force to enumerate all the possible low-entropy states. In other words, the challenge is to develop recovery mechanisms which are guaranteed to save the day as quickly as possible after a compromise we are not even aware of. The dilemma that we face is that any entropy used prematurely will be lost, and any entropy which is kept unused will delay the recovery.\n\nAfter developing our formal definitional framework for RNGs with inputs, we show how to construct a nearly optimal RNG which is secure in our model. Our technique is inspired by the design of the Fortuna RNG (which is a heuristic RNG construction that is currently used by Windows and comes without any formal analysis), but we non-trivially adapt it to our much stronger adversarial setting. Along the way, our formal treatment of Fortuna enables us to improve its entropy efficiency by almost a factor of two, and to show that our improved construction is essentially tight, by proving a rigorous lower bound on the possible efficiency of any recovery mechanism in our very general model of the problem.},\n  urldate = {2018-05-30},\n  url = {https://eprint.iacr.org/2014/167},\n  booktitle = {CRYPTO},\n  author = {Dodis, Yevgeniy and Shamir, Adi and {Stephens-Davidowitz}, Noah and Wichs, Daniel},\n  year = {2014},\n  url_crypto_talk = {http://www.youtube.com/watch?v=CTuA1wY-704}\n}\n\n
\n
\n\n\n
\n Random number generators (RNGs) play a crucial role in many cryptographic schemes and protocols, but their security proof usually assumes that their internal state is initialized with truly random seeds and remains secret at all times. However, in many practical situations these are unrealistic assumptions: The seed is often gathered after a reset/reboot from low entropy external events such as the timing of manual key presses, and the state can be compromised at unknown points in time via side channels or penetration attacks. The usual remedy (used by all the major operating systems, including Windows, Linux, FreeBSD, MacOS, iOS, etc.) is to periodically replenish the internal state through an auxiliary input with additional randomness harvested from the environment. However, recovering from such attacks in a provably correct and computationally optimal way had remained an unsolved challenge so far. In this paper we formalize the problem of designing an efficient recovery mechanism from state compromise, by considering it as an online optimization problem. If we knew the timing of the last compromise and the amount of entropy gathered since then, we could stop producing any outputs until the state becomes truly random again. However, our challenge is to recover within a time proportional to this optimal solution even in the hardest (and most realistic) case in which (a) we know nothing about the timing of the last state compromise, and the amount of new entropy injected since then into the state, and (b) any premature production of outputs leads to the total loss of all the added entropy \\$\\$em used by the RNG\\, since the attacker can use brute force to enumerate all the possible low-entropy states. In other words, the challenge is to develop recovery mechanisms which are guaranteed to save the day as quickly as possible after a compromise we are not even aware of. The dilemma that we face is that any entropy used prematurely will be lost, and any entropy which is kept unused will delay the recovery. After developing our formal definitional framework for RNGs with inputs, we show how to construct a nearly optimal RNG which is secure in our model. Our technique is inspired by the design of the Fortuna RNG (which is a heuristic RNG construction that is currently used by Windows and comes without any formal analysis), but we non-trivially adapt it to our much stronger adversarial setting. Along the way, our formal treatment of Fortuna enables us to improve its entropy efficiency by almost a factor of two, and to show that our improved construction is essentially tight, by proving a rigorous lower bound on the possible efficiency of any recovery mechanism in our very general model of the problem.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2007\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n The Cyclic Sieving Phenomenon on the Alternating Sign Matrices.\n \n \n \n \n\n\n \n Noah Stephens-Davidowitz; and Alex Cloninger.\n\n\n \n\n\n\n 2007.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 28 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{SCCyclicSieving07,\n  title = {The Cyclic Sieving Phenomenon on the Alternating Sign Matrices},\n  abstract = {We first present a previously unpublished result of Stanton that the group of order four generated by rotation by 90 degrees acting on alternating sign matrices exhibits the CSP with the obvious q-analogue of |ASM(n)|.},\n  url = {http://noahsd.com/papers/ASMCSP.pdf},\n  author = {{Stephens-Davidowitz}, Noah and Cloninger, Alex},\n  year = {2007}\n}\n\n
\n
\n\n\n
\n We first present a previously unpublished result of Stanton that the group of order four generated by rotation by 90 degrees acting on alternating sign matrices exhibits the CSP with the obvious q-analogue of |ASM(n)|.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On link patterns and Alternating Sign Matrices.\n \n \n \n \n\n\n \n Fraser Chiu Kim Hong; Alex Cloninger; and Noah Stephens-Davidowitz.\n\n\n \n\n\n\n 2007.\n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 17 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{HCSLinkPatterns07,\n  title = {On link patterns and Alternating Sign Matrices},\n  abstract = {We devise an algorithm to generate an alternating sign matrix(ASM) with the same blue and green link pattern on the circle. We also find a characterization of link patterns that are achieved by a unique ASM.},\n  url = {http://noahsd.com/papers/ASMLinks.pdf},\n  author = {Hong, Fraser Chiu Kim and Cloninger, Alex and {Stephens-Davidowitz}, Noah},\n  year = {2007}\n}\n\n\n
\n
\n\n\n
\n We devise an algorithm to generate an alternating sign matrix(ASM) with the same blue and green link pattern on the circle. We also find a characterization of link patterns that are achieved by a unique ASM.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);