var bibbase_data = {"data":"\n\n
\n <script src=\"https://bibbase.org/show?bib=http%3A%2F%2Fmahdi.ch%2Fwritings%2Fcheraghchi.bib&jsonp=1&fullnames=1&owner=Cheraghchi&jsonp=1\"></script>\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=http%3A%2F%2Fmahdi.ch%2Fwritings%2Fcheraghchi.bib&jsonp=1&fullnames=1&owner=Cheraghchi\");\n print_r($contents);\n ?>\n
\n \n <iframe src=\"https://bibbase.org/show?bib=http%3A%2F%2Fmahdi.ch%2Fwritings%2Fcheraghchi.bib&jsonp=1&fullnames=1&owner=Cheraghchi\"></iframe>\n
\n \n For more details see the documention.\n
\nTo the site owner:
\n\nAction required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n
\n \n \n Fix it now\n
\n@article{ref:CR23,\nauthor = {Cheraghchi, Mahdi and Ribeiro, Jo\\~{a}o},\ntitle = {Simple Codes and Sparse Recovery with Fast Decoding},\njournal = {SIAM Journal on Discrete Mathematics},\nvolume = {37},\nnumber = {2},\npages = {612-631},\nyear = {2023},\ndoi = {10.1137/21M1465354},\nURL = {https://doi.org/10.1137/21M1465354},\neprint = {https://doi.org/10.1137/21M1465354},\nabstract = {Construction of error-correcting codes achieving a designated minimum distance parameter is a central problem in coding theory. In this work, we study a very simple construction of binary linear codes that correct a given number of errors $K$. Moreover, we design a simple, nearly optimal syndrome decoder for the code as well. The running time of the decoder is only logarithmic in the block length of the code and nearly linear in the number of errors $K$. This decoder can be applied to exact for-all sparse recovery over any field, improving upon previous results with the same number of measurements. Furthermore, computation of the syndrome from a received word can be done in nearly linear time in the block length. We also demonstrate an application of these techniques in nonadaptive group testing and construct simple explicit measurement schemes with $O(K^2 \\log^2 N)$ tests and $O(K^3 \\log^2 N)$ recovery time for identifying up to $K$ defectives in a population of size $N$.}\n}\n\n\n
@INPROCEEDINGS{ref:BCGR23,\n author =\t {Huck Bennett and Mahdi Cheraghchi and Venkatesan Guruswami and Jo\\~{a}o Ribeiro},\n title =\t {Parameterized inapproximability of the minimum distance problem over all fields and the shortest vector problem in all $\\ell_p$ norms},\n booktitle =\t {Proceedings of the 55th Annual ACM Symposium on Theory of Computing (STOC)},\n year =\t 2023,\n url_Paper =\t {https://eccc.weizmann.ac.il/report/2022/156/},\n abstract = {We prove that the Minimum Distance Problem (MDP) on linear codes over any fixed finite field and parameterized by the input distance bound is W[1]-hard to approximate within any constant factor. We also prove analogous results for the parameterized Shortest Vector Problem (SVP) on integer lattices. Specifically, we prove that SVP in the $\\ell_p$ norm is W[1]-hard to approximate within any constant factor for any fixed $p > 1$ and W[1]-hard to approximate within a factor approaching 2 for $p = 1$. (These results all hold under randomized reductions.)\n\nThese results answer the main questions left open (and asked) by Bhattacharyya, Bonnet, Egri, Ghoshal, Karthik C. S., Lin, Manurangsi, and Marx (Journal of the ACM, 2021) on the complexity of parameterized MDP and SVP. \nFor MDP, they established similar hardness for binary linear codes and left the case of general fields open. For SVP in $\\ell_p$ norms with $p > 1$, they showed inapproximability within some constant factor (depending on $p$) and left open showing such hardness for arbitrary constant factors. They also left open showing W[1]-hardness even of exact SVP in the $\\ell_1$ norm.}\n}\n\n\n
@article{ref:CHMY22,\n title={One-tape turing machine and branching program lower bounds for {MCSP}},\n author={Cheraghchi, Mahdi and Hirahara, Shuichi and Myrisiotis, Dimitrios and Yoshida, Yuichi},\n journal={Theory of Computing Systems},\n pages={1--32},\n year={2022},\n publisher={Springer},\n doi = {10.1007/s00224-022-10113-9},\n url_Link = {https://link.springer.com/article/10.1007/s00224-022-10113-9},\n url_Paper =\t {https://eccc.weizmann.ac.il/report/2020/103/},\n abstract =\t {For a size parameter\n $s:\\mathbb{N}\\to\\mathbb{N}$, the Minimum\n Circuit Size Problem (denoted by $MCSP[s(n)]$)\n is the problem of deciding whether the minimum\n circuit size of a given function $f: \\{0,1\\}^n\n \\to \\{0,1\\}$ (represented by a string of length $N\n := 2^n$) is at most a threshold $s(n)$. A recent\n line of work exhibited ``hardness magnification''\n phenomena for MCSP: A very weak lower bound for MCSP\n implies a breakthrough result in complexity theory.\n For example, McKay, Murray, and Williams (STOC 2019)\n implicitly showed that, for some constant $\\mu_1 >\n 0$, if $MCSP[2^{\\mu_1\\cdot n}]$ cannot be\n computed by a one-tape Turing machine (with an\n additional one-way read-only input tape) running in\n time $N^{1.01}$, then $P\\neq NP$. In\n this paper, we present the following new lower\n bounds against one-tape Turing machines and\n branching programs: 1) A randomized two-sided error\n one-tape Turing machine (with an additional one-way\n read-only input tape) cannot compute \n $MCSP[2^{\\mu_2\\cdot n}]$ in time $N^{1.99}$, for\n some constant $\\mu_2 > \\mu_1$. 2) A\n non-deterministic (or parity) branching program of\n size $o(N^{1.5}/\\log N)$ cannot compute MKTP, which\n is a time-bounded Kolmogorov complexity analogue of\n MCSP. This is shown by directly applying the\n Ne\\v{c}iporuk method to MKTP, which previously\n appeared to be difficult. These results are the\n first non-trivial lower bounds for MCSP and MKTP\n against one-tape Turing machines and\n non-deterministic branching programs, and\n essentially match the best-known lower bounds for\n any explicit functions against these computational\n models. The first result is based on recent\n constructions of pseudorandom generators for\n read-once oblivious branching programs (ROBPs) and\n combinatorial rectangles (Forbes and Kelley, FOCS\n 2018; Viola 2019). En route, we obtain several\n related results: 1)There exists a (local) hitting\n set generator with seed length\n $\\widetilde{O}(\\sqrt{N})$ secure against read-once\n polynomial-size non-deterministic branching programs\n on $N$-bit inputs. 2) Any read-once\n co-non-deterministic branching program computing\n MCSP must have size at least\n $2^{\\widetilde{\\Omega}(N)}$. }\n}\n\n\n
@ARTICLE{ref:CDRV22,\n author =\t {Mahdi Cheraghchi and Joseph Downs and Jo\\~{a}o Ribeiro\n and Alexandra Veliche},\n title =\t {Mean-Based Trace Reconstruction over Oblivious Synchronization Channels},\n year =\t 2022,\n journal={IEEE Transactions on Information Theory}, \n url_Paper =\t {https://arxiv.org/abs/2102.09490},\n url_Link = {https://ieeexplore.ieee.org/document/9743284},\n doi = {10.1109/TIT.2022.3157383},\n abstract =\t {Mean-based reconstruction is a fundamental, natural approach to worst-case trace reconstruction over channels with synchronization errors.\nIt is known that $\\exp(\\Theta(n^{1/3}))$ traces are necessary and sufficient for mean-based worst-case trace reconstruction over the deletion channel, and this result was also extended to certain channels combining deletions and geometric insertions of uniformly random bits.\nIn this work, we use a simple extension of the original complex-analytic approach to show that these results are examples of a much more general phenomenon.\nWe introduce oblivious synchronization channels, which map each input bit to an arbitrarily distributed sequence of replications and insertions of random bits.\nThis general class captures all previously considered synchronization channels.\nWe show that for any oblivious synchronization channel whose output length follows a sub-exponential distribution either mean-based trace reconstruction is impossible or $\\exp(O(n^{1/3}))$ traces suffice for this task.}\n}\n\n\n
@ARTICLE{9513255,\n author={Bui, Thach V. and Cheraghchi, Mahdi and Echizen, Isao},\n journal={IEEE Transactions on Information Theory}, \n title={Improved non-adaptive algorithms for threshold group testing with a gap}, \n year={2021},\n volume={},\n number={},\n pages={},\n doi = {DOI: 10.1109/TIT.2021.3104670},\n doi={10.1109/TIT.2021.3104670}}\n \n\n
@ARTICLE{ref:jour:CR21,\n author =\t {Mahdi Cheraghchi and Jo\\~{a}o Ribeiro},\n title =\t {Non-Asymptotic Capacity Upper Bounds for the Discrete-Time {Poisson} Channel with Positive Dark Current},\n journal = {IEEE Communications Letters},\n year =\t 2021,\n doi = {10.1109/LCOMM.2021.3120706},\n url_Paper =\t {https://arxiv.org/abs/2010.14858}\n}\n\n\n
@INPROCEEDINGS{ref:conf:ACMTV21,\n author =\t {Eric Allender and Mahdi Cheraghchi and \n Dimitrios Myrisiotis and Harsha Tirumala, and Ilya Volkovich},\n title =\t {One-way functions and a conditional variant of MKTP},\n booktitle =\t {Proceedings of the 41st conference on Foundations \n of Software Technology and Theoretical Computer Science (FSTTCS)},\n year =\t 2021,\n url_Paper =\t {https://eccc.weizmann.ac.il/report/2021/009/}\n}\n\n\n
@INPROCEEDINGS{ref:CGM21,\n author =\t {Mahdi Cheraghchi and Ryan Gabrys and Olgica Milenkovic},\n title =\t {Semiquantitative Group Testing in at Most Two\n Rounds},\n year =\t 2021,\n booktitle =\t {Proceedings of the {IEEE International Symposium on\n Information Theory (ISIT)}},\n url_Paper =\t {https://arxiv.org/abs/2102.04519},\n url_Link = {https://ieeexplore.ieee.org/document/9518270},\n doi = {10.1109/ISIT45174.2021.9518270},\n abstract =\t {Semiquantitative group testing (SQGT) is a pooling\n method in which the test outcomes represent bounded\n intervals for the number of defectives.\n Alternatively, it may be viewed as an adder channel\n with quantized outputs. SQGT represents a natural\n choice for Covid-19 group testing as it allows for a\n straightforward interpretation of the cycle\n threshold values produced by polymerase chain\n reactions (PCR). Prior work on SQGT did not address\n the need for adaptive testing with a small number of\n rounds as required in practice. We propose\n conceptually simple methods for $2$-round and\n nonadaptive SQGT that significantly improve upon\n existing schemes by using ideas on nonbinary\n measurement matrices based on expander graphs and\n list-disjunct matrices.}\n}\n\n\n
@INPROCEEDINGS{ref:CDRV21,\n author =\t {Mahdi Cheraghchi and Joseph Downs and Jo\\~{a}o Ribeiro\n and Alexandra Veliche},\n title =\t {Mean-Based Trace Reconstruction over Practically any\n Replication-Insertion Channel},\n year =\t 2021,\n booktitle =\t {Proceedings of the {IEEE International Symposium on\n Information Theory (ISIT)}},\n url_Paper =\t {https://arxiv.org/abs/2102.09490},\n url_Link = {https://ieeexplore.ieee.org/document/9518161},\n doi = {10.1109/ISIT45174.2021.9518161},\n abstract =\t {Mean-based reconstruction is a fundamental, natural\n approach to worst-case trace reconstruction over\n channels with synchronization errors. It is known\n that $\\exp(O(n^{1/3}))$ traces are necessary and\n sufficient for mean-based worst-case trace\n reconstruction over the deletion channel, and this\n result was also extended to certain channels\n combining deletions and geometric insertions of\n uniformly random bits. In this work, we use a\n simple extension of the original complex-analytic\n approach to show that these results are examples of\n a much more general phenomenon: $\\exp(O(n^{1/3}))$\n traces suffice for mean-based worst-case trace\n reconstruction over any memoryless channel that maps\n each input bit to an arbitrarily distributed\n sequence of replications and insertions of random\n bits, provided the length of this sequence follows a\n sub-exponential distribution.}\n}\n\n\n
@INPROCEEDINGS{ref:BCN21,\n author =\t {Thach V. Bui and Mahdi Cheraghchi and Thuc\n D. Nguyen},\n title =\t {Improved Algorithms for Non-adaptive Group Testing\n with Consecutive Positives},\n year =\t 2021,\n booktitle =\t {Proceedings of the {IEEE International Symposium on\n Information Theory (ISIT)}},\n url_Paper =\t {https://arxiv.org/abs/2101.11294},\n url_Link = {https://ieeexplore.ieee.org/document/9518277},\n doi = {10.1109/ISIT45174.2021.9518277},\n abstract =\t {The goal of group testing is to efficiently identify\n a few specific items, called positives, in a large\n population of items via tests. A test is an action\n on a subset of items that returns positive if the\n subset contains at least one positive and negative\n otherwise. In non-adaptive group testing, all tests\n are independent, can be performed in parallel, and\n represented as a measurement matrix. In this work,\n we consider non-adaptive group testing with\n consecutive positives in which the items are\n linearly ordered and the positives are consecutive\n in that order. We present two algorithms for\n efficiently identifying consecutive positives. In\n particular, without storing measurement matrices, we\n can identify up to $d$ consecutive positives with $2\n \\log_2{\\frac{n}{d}} + 2d$ ($4 \\log_2{\\frac{n}{d}} +\n 2d$, resp.) tests in $O( \\log_2^2{\\frac{n}{d}} + d)$\n ($O( \\log_2{\\frac{n}{d}} + d)$, resp.) time. These\n results significantly improve the state-of-the-art\n scheme in which it takes $5 \\log_2{\\frac{n}{d}} + 2d\n + 21$ tests to identify the positives in $O(\n \\frac{n}{d} \\log_2{\\frac{n}{d}} + d^2)$ time with\n the measurement matrices associated with the scheme\n stored somewhere. }\n}\n\n\n
@INPROCEEDINGS{ref:CGJWX21,\n author =\t {Mahdi Cheraghchi and Elena Grigorescu and Brendan\n Juba and Karl Wimmer and Ning Xie},\n title =\t {List Learning with Attribute Noise},\n year =\t 2021,\n booktitle = \t {Proceedings of The 24th International Conference on Artificial Intelligence and Statistics (AISTATS)},\n pages = \t {2215--2223},\n editor = \t {Banerjee, Arindam and Fukumizu, Kenji},\n volume = \t {130},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {13--15 Apr},\n publisher = {PMLR},\n url_Link = {http://proceedings.mlr.press/v130/cheraghchi21a.html},\n url_Paper =\t {https://arxiv.org/abs/2006.06850},\n abstract =\t {We introduce and study the model of list learning\n with attribute noise. Learning with attribute noise\n was introduced by Shackelford and Volper (COLT 1988)\n as a variant of PAC learning, in which the algorithm\n has access to noisy examples and uncorrupted labels,\n and the goal is to recover an accurate\n hypothesis. Sloan (COLT 1988) and Goldman and Sloan\n (Algorithmica 1995) discovered information-theoretic\n limits to learning in this model, which have impeded\n further progress. In this article we extend the\n model to that of list learning, drawing inspiration\n from the list-decoding model in coding theory, and\n its recent variant studied in the context of\n learning. On the positive side, we show that sparse\n conjunctions can be efficiently list learned under\n some assumptions on the underlying ground-truth\n distribution. On the negative side, our results\n show that even in the list-learning model, efficient\n learning of parities and majorities is not possible\n regardless of the representation used.}\n}\n\n\n
@INPROCEEDINGS{ref:conf:CHMY21,\n author =\t {Mahdi Cheraghchi and Shuichi Hirahara and Dimitrios\n Myrisiotis and Yuichi Yoshida},\n title =\t {One-tape Turing machine and branching program lower\n bounds for {MCSP}},\n booktitle =\t {Proceedings of the 38th {International Symposium on\n Theoretical Aspects of Computer Science (STACS)}},\n year =\t 2021,\n keywords =\t {Minimum Circuit Size Problem, One-Tape Turing\n Machines, Branching Programs, Lower Bounds,\n Pseudorandom Generators, Hitting Set Generators},\n doi =\t\t {10.4230/LIPIcs.STACS.2021.23},\n url_Link = {https://drops.dagstuhl.de/opus/volltexte/2021/13668/},\n url_Paper =\t {https://eccc.weizmann.ac.il/report/2020/103/},\n note = {Invited to the special issue of Theory of Computing Systems.},\n abstract =\t {For a size parameter\n $s:\\mathbb{N}\\to\\mathbb{N}$, the Minimum\n Circuit Size Problem (denoted by $MCSP[s(n)]$)\n is the problem of deciding whether the minimum\n circuit size of a given function $f: \\{0,1\\}^n\n \\to \\{0,1\\}$ (represented by a string of length $N\n := 2^n$) is at most a threshold $s(n)$. A recent\n line of work exhibited ``hardness magnification''\n phenomena for MCSP: A very weak lower bound for MCSP\n implies a breakthrough result in complexity theory.\n For example, McKay, Murray, and Williams (STOC 2019)\n implicitly showed that, for some constant $\\mu_1 >\n 0$, if $MCSP[2^{\\mu_1\\cdot n}]$ cannot be\n computed by a one-tape Turing machine (with an\n additional one-way read-only input tape) running in\n time $N^{1.01}$, then $P\\neq NP$. In\n this paper, we present the following new lower\n bounds against one-tape Turing machines and\n branching programs: 1) A randomized two-sided error\n one-tape Turing machine (with an additional one-way\n read-only input tape) cannot compute \n $MCSP[2^{\\mu_2\\cdot n}]$ in time $N^{1.99}$, for\n some constant $\\mu_2 > \\mu_1$. 2) A\n non-deterministic (or parity) branching program of\n size $o(N^{1.5}/\\log N)$ cannot compute MKTP, which\n is a time-bounded Kolmogorov complexity analogue of\n MCSP. This is shown by directly applying the\n Ne\\v{c}iporuk method to MKTP, which previously\n appeared to be difficult. These results are the\n first non-trivial lower bounds for MCSP and MKTP\n against one-tape Turing machines and\n non-deterministic branching programs, and\n essentially match the best-known lower bounds for\n any explicit functions against these computational\n models. The first result is based on recent\n constructions of pseudorandom generators for\n read-once oblivious branching programs (ROBPs) and\n combinatorial rectangles (Forbes and Kelley, FOCS\n 2018; Viola 2019). En route, we obtain several\n related results: 1)There exists a (local) hitting\n set generator with seed length\n $\\widetilde{O}(\\sqrt{N})$ secure against read-once\n polynomial-size non-deterministic branching programs\n on $N$-bit inputs. 2) Any read-once\n co-non-deterministic branching program computing\n MCSP must have size at least\n $2^{\\widetilde{\\Omega}(N)}$. }\n}\n\n\n
@ARTICLE{ref:CR21,\n author =\t {Mahdi Cheraghchi and Jo\\~{a}o Ribeiro},\n title =\t {An Overview of Capacity Results for Synchronization\n Channels},\n journal =\t {IEEE Transactions on Information Theory},\n doi =\t\t {10.1109/TIT.2020.2997329},\n year={2021},\n volume={67},\n number={6},\n pages={3207--3232},\n url_Link =\t {https://ieeexplore.ieee.org/document/9099526},\n url_Paper =\t {https://arxiv.org/abs/1910.07199},\n abstract =\t {Synchronization channels, such as the well-known\n deletion channel, are surprisingly harder to analyze\n than memoryless channels, and they are a source of\n many fundamental problems in information theory and\n theoretical computer science. One of the most basic\n open problems regarding synchronization channels is\n the derivation of an exact expression for their\n capacity. Unfortunately, most of the classic\n information-theoretic techniques at our disposal\n fail spectacularly when applied to synchronization\n channels. Therefore, new approaches must be\n considered to tackle this problem. This survey gives\n an account of the great effort made over the past\n few decades to better understand the (broadly\n defined) capacity of synchronization channels,\n including both the main results and the novel\n techniques underlying them. Besides the usual notion\n of channel capacity, we also discuss the zero-error\n capacity of adversarial synchronization channels. }\n}\n\n\n
@UNPUBLISHED{ref:GPRRCGM20,\n author =\t {Ryan Gabrys and Srilakshmi Pattabiraman and Vishal\n Rana and Jo\\~{a}o Ribeiro and Mahdi Cheraghchi and\n Venkatesan Guruswami and Olgica Milenkovic},\n title =\t {{AC-DC}: Amplification Curve Diagnostics for\n {Covid-19} Group Testing},\n year =\t {2020},\n eprint =\t {2011.05223},\n archivePrefix ={arXiv},\n primaryClass = {q-bio.QM},\n note =\t {arXiv:2011.05223},\n url_Paper =\t {https://arxiv.org/abs/2011.05223},\n abstract =\t {The first part of the paper presents a review of the\n gold-standard testing protocol for Covid-19,\n real-time, reverse transcriptase PCR, and its\n properties and associated measurement data such as\n amplification curves that can guide the development\n of appropriate and accurate adaptive group testing\n protocols. The second part of the paper is concerned\n with examining various off-the-shelf group testing\n methods for Covid-19 and identifying their strengths\n and weaknesses for the application at hand. The\n third part of the paper contains a collection of new\n analytical results for adaptive semiquantitative\n group testing with probabilistic and combinatorial\n priors, including performance bounds, algorithmic\n solutions, and noisy testing protocols. The\n probabilistic setting is of special importance as it\n is designed to be simple to implement by nonexperts\n and handle heavy hitters. The worst-case paradigm\n extends and improves upon prior work on\n semiquantitative group testing with and without\n specialized PCR noise models.}\n}\n\n\n
@INPROCEEDINGS{ref:CN20,\n author =\t {Mahdi Cheraghchi and Vasileios Nakos},\n title =\t {Combinatorial Group Testing Schemes with\n Near-Optimal Decoding Time},\n booktitle =\t {Proceedings of the 61st Annual {IEEE} Symposium on\n Foundations of Computer Science {(FOCS)}},\n year =\t 2020,\n doi =\t\t {10.1109/FOCS46700.2020.00115},\n url_Link = {https://ieeexplore.ieee.org/document/9317982},\n url_Paper =\t {https://arxiv.org/abs/2006.08420},\n abstract =\t {In the long-studied problem of combinatorial group\n testing, one is asked to detect a set of $k$\n defective items out of a population of size $n$,\n using $m \\ll n$ disjunctive measurements. In the\n non-adaptive setting, the most widely used\n combinatorial objects are disjunct and list-disjunct\n matrices, which define incidence matrices of test\n schemes. Disjunct matrices allow the identification\n of the exact set of defectives, whereas list\n disjunct matrices identify a small superset of the\n defectives. Apart from the combinatorial\n guarantees, it is often of key interest to equip\n measurement designs with efficient decoding\n algorithms. The most efficient decoders should run\n in sublinear time in $n$, and ideally near-linear in\n the number of measurements $m$. In this work, we\n give several constructions with an optimal number of\n measurements and near-optimal decoding time for the\n most fundamental group testing tasks, as well as for\n central tasks in the compressed sensing and heavy\n hitters literature. For many of those tasks, the\n previous measurement-optimal constructions needed\n time either quadratic in the number of measurements\n or linear in the universe size. Among our results\n are the following: a construction of disjunct\n matrices matching the best known construction in\n terms of the number of rows $m$, but achieving\n $O(m)$ decoding time. a construction of list\n disjunct matrices with the optimal $m=O(k\n \\log(n/k))$ number of rows and nearly linear\n decoding time in $m$; error-tolerant variations of\n the above constructions; a non-adaptive group\n testing scheme for the "for-each" model with $m=O(k\n \\log n)$ measurements and $O(m)$ decoding time; a\n streaming algorithm for the "for-all" version of the\n heavy hitters problem in the strict turnstile model\n with near-optimal query time, as well as a "list\n decoding" variant obtaining also near-optimal update\n time and $O(k\\log(n/k))$ space usage; an\n $\\ell_2/\\ell_2$ weak identification system for\n compressed sensing with nearly optimal sample\n complexity and nearly linear decoding time in the\n sketch length. Most of our results are obtained via\n a clean and novel approach which avoids\n list-recoverable codes or related complex techniques\n which were present in almost every state-of-the-art\n work on efficiently decodable constructions of such\n objects. }\n}\n\n\n
@INPROCEEDINGS{ref:LCGSW20,\n author =\t {Fuchun Lin and Mahdi Cheraghchi and Venkatesan\n Guruswami and Reihaneh Safavi-Naini and Huaxiong\n Wang},\n title =\t {Leakage-Resilient Secret Sharing in\n Non-compartmentalized Models},\n year =\t 2020,\n booktitle =\t {Proceedings of the {Conference on\n Information-Theoretic Cryptography (ITC)}},\n pages =\t {7:1--7:24},\n volume =\t 163,\n series =\t {Leibniz International Proceedings in Informatics\n (LIPIcs)},\n url_Link =\t {https://drops.dagstuhl.de/opus/volltexte/2020/12112},\n url_Paper =\t {https://arxiv.org/abs/1902.06195},\n doi =\t\t {10.4230/LIPIcs.ITC.2020.7},\n keywords =\t {Leakage-resilient cryptography, Secret sharing\n scheme, Randomness extractor},\n abstract =\t {Non-malleable secret sharing was recently proposed\n by Goyal and Kumar in independent tampering and\n joint tampering models for threshold secret sharing\n (STOC18) and secret sharing with general access\n structure (CRYPTO18). The idea of making secret\n sharing non-malleable received great attention and\n by now has generated many papers exploring new\n frontiers in this topic, such as multiple-time\n tampering and adding leakage resiliency to the\n one-shot tampering model. Non-compartmentalized\n tampering model was first studied by Agrawal et.al\n (CRYPTO15) for non-malleability against permutation\n composed with bit-wise independent tampering, and\n shown useful in constructing non-malleable string\n commitments. In spite of strong demands in\n application, there are only a few tampering families\n studied in non-compartmentalized model, due to the\n fact that compartmentalization (assuming that the\n adversary can not access all pieces of sensitive\n data at the same time) is crucial for most of the\n known techniques. We initiate the study of\n leakage-resilient secret sharing in the\n non-compartmentalized model. Leakage in\n leakage-resilient secret sharing is usually modelled\n as arbitrary functions with bounded total output\n length applied to each share or up to a certain\n number of shares (but never the full share vector)\n at one time. Arbitrary leakage functions, even with\n one bit output, applied to the full share vector is\n impossible to resist since the reconstruction\n algorithm itself can be used to construct a\n contradiction. We allow the leakage functions to be\n applied to the full share vector\n (non-compartmentalized) but restrict to the class of\n affine leakage functions. The leakage adversary can\n corrupt several players and obtain their shares, as\n in normal secret sharing. The leakage adversary can\n apply arbitrary affine functions with bounded total\n output length to the full share vector and obtain\n the outputs as leakage. These two processes can be\n both non-adaptive and do not depend on each other,\n or both adaptive and depend on each other with\n arbitrary ordering. We use a generic approach that\n combines randomness extractors with error correcting\n codes to construct such leakage-resilient secret\n sharing schemes, and achieve constant information\n ratio (the scheme for non-adaptive adversary is near\n optimal). We then explore making the\n non-compartmentalized leakage-resilient secret\n sharing also non-malleable against tampering. We\n consider a tampering model, where the adversary can\n use the shares obtained from the corrupted players\n and the outputs of the global leakage functions to\n choose a tampering function from a tampering family\n $\\mathcal{F}$. We give two constructions of such\n leakage-resilient non-malleable secret sharing for\n the case $\\mathcal{F}$ is the bit-wise independent\n tampering and, respectively, for the case\n $\\mathcal{F}$ is the affine tampering functions, the\n latter is non-compartmentalized tampering that\n subsumes the permutation composed with bit-wise\n independent tampering mentioned above. }\n}\n\n\n
@INPROCEEDINGS{ref:BCE20,\n author =\t {Thach V. Bui and Mahdi Cheraghchi and Isao Echizen},\n title =\t {Improved Non-adaptive algorithms for Threshold Group\n Testing with a Gap},\n year =\t 2020,\n booktitle =\t {Proceedings of the {IEEE International Symposium on\n Information Theory (ISIT)}},\n doi =\t\t {10.1109/ISIT44484.2020.9174212},\n url_Link = {https://ieeexplore.ieee.org/document/9174212},\n url_Paper =\t {https://arxiv.org/abs/2001.01008},\n abstract =\t {The basic goal of threshold group testing is to\n identify up to $d$ defective items among a\n population of $n$ items, where $d$ is usually much\n smaller than $n$. The outcome of a test on a subset\n of items is positive if the subset has at least $u$\n defective items, negative if it has up to $\\ell$\n defective items, where $0 \\leq \\ell < u$, and\n arbitrary otherwise. This is called threshold group\n testing. The parameter $g = u - \\ell - 1$ is called\n \\textit{the gap}. In this paper, we focus on the\n case $g > 0$, i.e., threshold group testing with a\n gap. Note that the results presented here are also\n applicable to the case $g = 0$; however, the results\n are not as efficient as those in related\n work. Currently, a few reported studies have\n investigated test designs and decoding algorithms\n for identifying defective items. Most of the\n previous studies have not been feasible because\n there are numerous constraints on their problem\n settings or the decoding complexities of their\n proposed schemes are relatively large. Therefore, it\n is compulsory to reduce the number of tests as well\n as the decoding complexity, i.e., the time for\n identifying the defective items, for achieving\n practical schemes. The work presented here makes\n five contributions. The first is a more accurate\n theorem for a non-adaptive algorithm for threshold\n group testing proposed by Chen and Fu. The second is\n an improvement in the construction of disjunct\n matrices, which are the main tools for tackling\n (threshold) group testing and other tasks such as\n constructing cover-free families or learning hidden\n graphs. Specifically, we present a better upper\n bound on the number of tests for disjunct matrices\n compared with that in related work. The third and\n fourth contributions are a reduction in the number\n of tests and a reduction in the decoding time for\n identifying defective items in a noisy setting on\n test outcomes. The fifth contribution is a\n simulation on the number of tests of the resulting\n improvements for previous work and the proposed\n theorems. }\n}\n\n\n
@ARTICLE{ref:CGMR20,\n author =\t {Mahdi Cheraghchi and Ryan Gabrys and Olgica\n Milenkovic and Jo\\~{a}o Ribeiro},\n title =\t {Coded Trace Reconstruction},\n year =\t 2020,\n journal =\t {IEEE Transactions on Information Theory},\n note =\t {Preliminary version in Proceedings of {ITW 2019}.},\n doi =\t\t {10.1109/TIT.2020.2996377},\n volume={66},\n number={10},\n pages={6084-6103},\n url_Link =\t {https://ieeexplore.ieee.org/document/9097932},\n url_Paper =\t {https://arxiv.org/abs/1903.09992},\n abstract =\t {Motivated by average-case trace reconstruction and\n coding for portable DNA-based storage systems, we\n initiate the study of \\textit{coded trace\n reconstruction}, the design and analysis of\n high-rate efficiently encodable codes that can be\n efficiently decoded with high probability from few\n reads (also called \\textit{traces}) corrupted by\n edit errors. Codes used in current portable\n DNA-based storage systems with nanopore sequencers\n are largely based on heuristics, and have no\n provable robustness or performance guarantees even\n for an error model with i.i.d. deletions and\n constant deletion probability. Our work is a first\n step towards the design of efficient codes with\n provable guarantees for such systems. We consider a\n constant rate of i.i.d. deletions, and perform an\n analysis of marker-based code-constructions. This\n gives rise to codes with redundancy $O(n/\\log n)$\n (resp. $O(n/\\log\\log n)$) that can be efficiently\n reconstructed from $\\exp(O(\\log^{2/3}n))$\n (resp. $\\exp(O(\\log\\log n)^{2/3})$) traces, where\n $n$ is the message length. Then, we give a\n construction of a code with $O(\\log n)$ bits of\n redundancy that can be efficiently reconstructed\n from $\\mathrm{poly}(n)$ traces if the deletion\n probability is small enough. Finally, we show how to\n combine both approaches, giving rise to an efficient\n code with $O(n/\\log n)$ bits of redundancy which can\n be reconstructed from $\\mathrm{poly}(\\log n)$ traces\n for a small constant deletion probability. }\n}\n\n\n
@ARTICLE{ref:CKLM20,\n title =\t {Circuit Lower Bounds for {MCSP} from Local\n Pseudorandom Generators},\n author =\t {Mahdi Cheraghchi and Valentine Kabanets and Zhenjian\n Lu and Dimitrios Myrisiotis},\n year =\t 2020,\n journal =\t {{ACM Transactions on Computation Theory (ToCT)}},\n volume =\t 12,\n number =\t 3,\n articleno =\t 21,\n note =\t {Preliminary version in {Proceedings of ICALP 2019}.},\n url_Paper =\t {https://eccc.weizmann.ac.il/report/2019/022},\n url_Link =\t {https://dl.acm.org/doi/abs/10.1145/3404860},\n doi =\t\t {10.1145/3404860},\n abstract =\t {The Minimum Circuit Size Problem (MCSP) asks if a\n given truth table of a Boolean function $f$ can be\n computed by a Boolean circuit of size at most\n $\\theta$, for a given parameter $\\theta$. We improve\n several circuit lower bounds for MCSP, using\n pseudorandom generators (PRGs) that are local; a PRG\n is called \\textit{local} if its output bit strings,\n when viewed as the truth table of a Boolean\n function, can be computed by a Boolean circuit of\n small size. We get new and improved lower bounds for\n MCSP that almost match the best-known lower bounds\n against several circuit models. Specifically, we\n show that computing MCSP, on functions with a truth\n table of length $N$, requires: 1) $N^{3-o(1)}$-size\n de Morgan formulas, improving the recent\n $N^{2-o(1)}$ lower bound by Hirahara and Santhanam\n (CCC 2017), 2) $N^{2-o(1)}$-size formulas over an\n arbitrary basis or general branching programs (no\n non-trivial lower bound was known for MCSP against\n these models), and 3)\n $2^{\\Omega(N^{1/(d+1.01)})}$-size depth-$d$\n $\\mathrm{AC}^0$ circuits, improving the (implicit,\n in their work) exponential size lower bound by\n Allender et al. (SICOMP 2006). The $\\mathrm{AC}^0$\n lower bound stated above matches the best-known\n $\\mathrm{AC}^0$ lower bound (for {\\tt PARITY}) up to\n a small \\textit{additive} constant in the\n depth. Also, for the special case of depth-$2$\n circuits (i.e., CNFs or DNFs), we get an optimal\n lower bound of \\(2^{{\\Omega}(N)}\\) for MCSP. },\n keywords =\t {Minimum circuit size problem (MCSP), circuit lower\n bounds, pseudorandom generators (PRGs),\n constant-depth circuits, branching programs, local\n PRGs, de Morgan formulas}\n}\n\n\n
@INPROCEEDINGS{ref:conf:CGMR19,\n author =\t {Mahdi Cheraghchi and Ryan Gabrys and Olgica\n Milenkovic and Jo\\~{a}o Ribeiro},\n title =\t {Coded Trace Reconstruction},\n year =\t 2019,\n booktitle =\t {Proceedings of the {IEEE Information Theory Workshop\n (ITW)}},\n pages =\t {1--5},\n note =\t {Extended version in {IEEE Transactions on\n Information Theory}.},\n doi =\t\t {10.1109/ITW44776.2019.8989261},\n url_Link =\t {https://ieeexplore.ieee.org/document/8989261},\n url_Paper =\t {https://arxiv.org/abs/1903.09992},\n abstract =\t {Motivated by average-case trace reconstruction and\n coding for portable DNA-based storage systems, we\n initiate the study of \\textit{coded trace\n reconstruction}, the design and analysis of\n high-rate efficiently encodable codes that can be\n efficiently decoded with high probability from few\n reads (also called \\textit{traces}) corrupted by\n edit errors. Codes used in current portable\n DNA-based storage systems with nanopore sequencers\n are largely based on heuristics, and have no\n provable robustness or performance guarantees even\n for an error model with i.i.d. deletions and\n constant deletion probability. Our work is a first\n step towards the design of efficient codes with\n provable guarantees for such systems. We consider a\n constant rate of i.i.d. deletions, and perform an\n analysis of marker-based code-constructions. This\n gives rise to codes with redundancy $O(n/\\log n)$\n (resp. $O(n/\\log\\log n)$) that can be efficiently\n reconstructed from $\\exp(O(\\log^{2/3}n))$\n (resp. $\\exp(O(\\log\\log n)^{2/3})$) traces, where\n $n$ is the message length. Then, we give a\n construction of a code with $O(\\log n)$ bits of\n redundancy that can be efficiently reconstructed\n from $\\mathrm{poly}(n)$ traces if the deletion\n probability is small enough. Finally, we show how to\n combine both approaches, giving rise to an efficient\n code with $O(n/\\log n)$ bits of redundancy which can\n be reconstructed from $\\mathrm{poly}(\\log n)$ traces\n for a small constant deletion probability. }\n}\n\n\n
@INPROCEEDINGS{ref:conf:CKLM19,\n author =\t {Mahdi Cheraghchi and Valentine Kabanets and Zhenjian\n Lu and Dimitrios Myrisiotis},\n title =\t {Circuit Lower Bounds for {MCSP} from Local\n Pseudorandom Generators},\n year =\t 2019,\n booktitle =\t {Proceedings of the 46th {International Colloquium on\n Automata, Languages and Programming (ICALP)}},\n pages =\t {39:1--39:14},\n series =\t {Leibniz International Proceedings in Informatics\n (LIPIcs)},\n volume =\t 132,\n note =\t {Extended version to appear in {ACM Transactions on\n Computation Theory (ToCT)}.},\n doi =\t\t {10.4230/LIPIcs.ICALP.2019.39},\n url_Link =\n {https://drops.dagstuhl.de/opus/frontdoor.php?source_opus=10615},\n url_Paper =\t {https://eccc.weizmann.ac.il/report/2019/022},\n abstract =\t {The Minimum Circuit Size Problem (MCSP) asks if a\n given truth table of a Boolean function $f$ can be\n computed by a Boolean circuit of size at most\n $\\theta$, for a given parameter $\\theta$. We improve\n several circuit lower bounds for MCSP, using\n pseudorandom generators (PRGs) that are local; a PRG\n is called \\textit{local} if its output bit strings,\n when viewed as the truth table of a Boolean\n function, can be computed by a Boolean circuit of\n small size. We get new and improved lower bounds for\n MCSP that almost match the best-known lower bounds\n against several circuit models. Specifically, we\n show that computing MCSP, on functions with a truth\n table of length $N$, requires: 1) $N^{3-o(1)}$-size\n de Morgan formulas, improving the recent\n $N^{2-o(1)}$ lower bound by Hirahara and Santhanam\n (CCC 2017), 2) $N^{2-o(1)}$-size formulas over an\n arbitrary basis or general branching programs (no\n non-trivial lower bound was known for MCSP against\n these models), and 3)\n $2^{\\Omega(N^{1/(d+1.01)})}$-size depth-$d$\n $\\mathrm{AC}^0$ circuits, improving the (implicit,\n in their work) exponential size lower bound by\n Allender et al. (SICOMP 2006). The $\\mathrm{AC}^0$\n lower bound stated above matches the best-known\n $\\mathrm{AC}^0$ lower bound (for {\\tt PARITY}) up to\n a small \\textit{additive} constant in the\n depth. Also, for the special case of depth-$2$\n circuits (i.e., CNFs or DNFs), we get an optimal\n lower bound of \\(2^{{\\Omega}(N)}\\) for MCSP. },\n keywords =\t {minimum circuit size problem (MCSP), circuit lower\n bounds, pseudorandom generators (PRGs), local PRGs,\n de Morgan formulas, branching programs, constant\n depth circuits}\n}\n\n\n
@INPROCEEDINGS{ref:LSCW19,\n author =\t {Fuchun Lin and Reihaneh Safavi-Naini and Mahdi\n Cheraghchi and Huaxiong Wang},\n title =\t {Non-Malleable Codes against Active Physical Layer\n Adversary},\n year =\t 2019,\n doi =\t\t {10.1109/ISIT.2019.8849438},\n booktitle =\t {Proceedings of the {IEEE International Symposium on\n Information Theory (ISIT)}},\n pages =\t {2753--2757},\n url_Link =\t {https://ieeexplore.ieee.org/document/8849438},\n abstract =\t {Non-malleable codes are randomized codes that\n protect coded messages against modification by\n functions in a tampering function class. These codes\n are motivated by providing tamper resilience in\n applications where a cryptographic secret is stored\n in a tamperable storage device and the protection\n goal is to ensure that the adversary cannot benefit\n from their physical tampering with the device. In\n this paper we consider nonmalleable codes for\n protection of secure communication against active\n physical layer adversaries. We define a class of\n functions that closely model tampering of\n communication by adversaries who can eavesdrop on a\n constant fraction of the transmitted codeword, and\n use this information to select a vector of tampering\n functions that will be applied to a second constant\n fraction of codeword components (possibly\n overlapping with the first set). We derive rate\n bounds for non-malleable codes for this function\n class and give a modular construction that adapts\n and provides new analysis for an existing\n construction in the new setting. We discuss our\n results and directions for future work. }\n}\n\n\n
@INPROCEEDINGS{ref:CR19b,\n author =\t {Mahdi Cheraghchi and Jo\\~{a}o Ribeiro},\n title =\t {Simple Codes and Sparse Recovery with Fast Decoding},\n year =\t 2019,\n doi =\t\t {10.1109/ISIT.2019.8849702},\n booktitle =\t {Proceedings of the {IEEE International Symposium on\n Information Theory (ISIT)}},\n pages =\t {156--160},\n url_Link =\t {https://ieeexplore.ieee.org/document/8849702},\n url_Paper =\t {https://arxiv.org/abs/1901.02852},\n abstract =\t {Construction of error-correcting codes achieving a\n designated minimum distance parameter is a central\n problem in coding theory. A classical and algebraic\n family of error-correcting codes studied for this\n purpose are the BCH codes. In this work, we study a\n very simple construction of linear codes that\n achieve a given distance parameter $K$. Moreover, we\n design a simple, nearly optimal syndrome decoder for\n the code as well. The running time of the decoder is\n only logarithmic in the block length of the code,\n and nearly linear in the distance parameter $K$.\n This decoder can be applied to exact for-all sparse\n recovery over any field, improving upon previous\n results with the same number of measurements.\n Furthermore, computation of the syndrome from a\n received word can be done in nearly linear time in\n the block length. We also demonstrate an\n application of these techniques in non-adaptive\n group testing, and construct simple explicit\n measurement schemes with $O(K^2 \\log^2 N)$ tests and\n $O(K^3 \\log^2 N)$ recovery time for identifying up\n to $K$ defectives in a population of size $N$. }\n}\n\n\n
@INPROCEEDINGS{ref:LCGSW19,\n author =\t {Fuchun Lin and Mahdi Cheraghchi and Venkatesan\n Guruswami and Reihaneh Safavi-Naini and Huaxiong\n Wang},\n title =\t {Secret Sharing with Binary Shares},\n year =\t 2019,\n doi =\t\t {10.4230/LIPIcs.ITCS.2019.53},\n booktitle =\t {Proceedings of the 10th {Innovations in Theoretical\n Computer Science Conference (ITCS)}},\n pages =\t {53:1--53:20},\n series =\t {Leibniz International Proceedings in Informatics\n (LIPIcs)},\n volume =\t 124,\n url_Link =\n {https://drops.dagstuhl.de/opus/frontdoor.php?source_opus=10146},\n url_Paper =\t {https://arxiv.org/abs/1808.02974},\n abstract =\t {Shamir's celebrated secret sharing scheme provides\n an efficient method for encoding a secret of\n arbitrary length $\\ell$ among any $N \\leq 2^\\ell$\n players such that for a threshold parameter $t$, (i)\n the knowledge of any $t$ shares does not reveal any\n information about the secret and, (ii) any choice of\n $t+1$ shares fully reveals the secret. It is known\n that any such threshold secret sharing scheme\n necessarily requires shares of length $\\ell$, and in\n this sense Shamir's scheme is optimal. The more\n general notion of ramp schemes requires the\n reconstruction of secret from any $t+g$ shares, for\n a positive integer gap parameter $g$. Ramp secret\n sharing scheme necessarily requires shares of length\n $\\ell/g$. Other than the bound related to secret\n length $\\ell$, the share lengths of ramp schemes can\n not go below a quantity that depends only on the gap\n ratio $g/N$. In this work, we study secret sharing\n in the extremal case of bit-long shares and\n arbitrarily small gap ratio $g/N$, where standard\n ramp secret sharing becomes impossible. We show,\n however, that a slightly relaxed but equally\n effective notion of semantic security for the\n secret, and negligible reconstruction error\n probability, eliminate the impossibility. Moreover,\n we provide explicit constructions of such\n schemes. One of the consequences of our relaxation\n is that, unlike standard ramp schemes with perfect\n secrecy, adaptive and non-adaptive adversaries need\n different analysis and construction. For\n non-adaptive adversaries, we explicitly construct\n secret sharing schemes that provide secrecy against\n any $\\tau$ fraction of observed shares, and\n reconstruction from any $\\rho$ fraction of shares,\n for any choices of $0 \\leq \\tau < \\rho \\leq 1$. Our\n construction achieves secret length\n $N(\\rho-\\tau-o(1))$, which we show to be\n optimal. For adaptive adversaries, we construct\n explicit schemes attaining a secret length\n $\\Omega(N(\\rho-\\tau))$. We discuss our results and\n open questions. },\n keywords =\t {Secret sharing scheme, Wiretap channel}\n}\n\n\n
@ARTICLE{ref:CR19:sticky,\n author =\t {Mahdi Cheraghchi and Jo\\~{a}o Ribeiro},\n title =\t {Sharp Analytical Capacity Upper Bounds for Sticky\n and Related Channels},\n year =\t 2019,\n journal =\t {IEEE Transactions on Information Theory},\n volume =\t 65,\n number =\t 11,\n pages =\t {6950--6974},\n note =\t {Preliminary version in {Proceedings of Allerton\n 2018}.},\n doi =\t\t {10.1109/TIT.2019.2920375},\n url_Link =\t {https://ieeexplore.ieee.org/document/8727914},\n url_Paper =\t {https://arxiv.org/abs/1806.06218},\n abstract =\t {We study natural examples of binary channels with\n synchronization errors. These include the\n duplication channel, which independently outputs a\n given bit once or twice, and geometric channels that\n repeat a given bit according to a geometric rule,\n with or without the possibility of bit deletion. We\n apply the general framework of Cheraghchi (STOC\n 2018) to obtain sharp analytical upper bounds on the\n capacity of these channels. Previously, upper\n bounds were known via numerical computations\n involving the computation of finite approximations\n of the channels by a computer and then using the\n obtained numerical results to upper bound the actual\n capacity. While leading to sharp numerical results,\n further progress on the full understanding of the\n channel capacity inherently remains elusive using\n such methods. Our results can be regarded as a major\n step towards a complete understanding of the\n capacity curves. Quantitatively, our upper bounds\n sharply approach, and in some cases surpass, the\n bounds that were previously only known by purely\n numerical methods. Among our results, we notably\n give a completely analytical proof that, when the\n number of repetitions per bit is geometric\n (supported on $\\{0,1,2,\\dots\\}$) with mean growing\n to infinity, the channel capacity remains\n substantially bounded away from $1$. }\n}\n\n\n
@ARTICLE{ref:Che19:del,\n author =\t {Mahdi Cheraghchi},\n title =\t {Capacity Upper Bounds for Deletion-type Channels},\n year =\t 2019,\n journal =\t {Journal of the {ACM}},\n volume =\t 66,\n number =\t 2,\n month =\t {Mar},\n articleno =\t 9,\n numpages =\t 79,\n note =\t {Preliminary version in Proceedings of {STOC 2018}.},\n doi =\t\t {10.1145/3281275},\n url_Link =\t {https://dl.acm.org/doi/10.1145/3281275},\n url_Paper =\t {https://arxiv.org/abs/1711.01630},\n abstract =\t {We develop a systematic approach, based on convex\n programming and real analysis, for obtaining upper\n bounds on the capacity of the binary deletion\n channel and, more generally, channels with\n i.i.d. insertions and deletions. Other than the\n classical deletion channel, we give a special\n attention to the Poisson-repeat channel introduced\n by Mitzenmacher and Drinea (IEEE Transactions on\n Information Theory, 2006). Our framework can be\n applied to obtain capacity upper bounds for any\n repetition distribution (the deletion and\n Poisson-repeat channels corresponding to the special\n cases of Bernoulli and Poisson distributions). Our\n techniques essentially reduce the task of proving\n capacity upper bounds to maximizing a univariate,\n real-valued, and often concave function over a\n bounded interval. The corresponding univariate\n function is carefully designed according to the\n underlying distribution of repetitions and the\n choices vary depending on the desired strength of\n the upper bounds as well as the desired simplicity\n of the function (e.g., being only efficiently\n computable versus having an explicit closed-form\n expression in terms of elementary, or common\n special, functions). Among our results, we show the\n following: 1) The capacity of the binary deletion\n channel with deletion probability $d$ is at most\n $(1-d) \\log \\phi$ for $d \\geq 1/2$, and, assuming\n that the capacity function is convex, is at most\n $1-d \\log(4/\\phi)$ for $d<1/2$, where\n $\\phi=(1+\\sqrt{5})/2$ is the golden ratio. This is\n the first nontrivial capacity upper bound for any\n value of $d$ outside the limiting case $d \\to 0$\n that is fully explicit and proved without computer\n assistance. 2) We derive the first set of capacity\n upper bounds for the Poisson-repeat channel. Our\n results uncover further striking connections between\n this channel and the deletion channel, and suggest,\n somewhat counter-intuitively, that the\n Poisson-repeat channel is actually analytically\n simpler than the deletion channel and may be of key\n importance to a complete understanding of the\n deletion channel. 3) We derive several novel upper\n bounds on the capacity of the deletion channel. All\n upper bounds are maximums of efficiently computable,\n and concave, univariate real functions over a\n bounded domain. In turn, we upper bound these\n functions in terms of explicit elementary and\n standard special functions, whose maximums can be\n found even more efficiently (and sometimes,\n analytically, for example for $d=1/2$). },\n keywords =\t {Coding theory, Synchronization error-correcting\n codes, Channel coding}\n}\n\n\n
@ARTICLE{ref:CR19:poi,\n author =\t {Mahdi Cheraghchi and Jo\\~{a}o Ribeiro},\n title =\t {Improved Upper Bounds and Structural Results on the\n Capacity of the Discrete-Time {Poisson} Channel},\n year =\t 2019,\n journal =\t {IEEE Transactions on Information Theory},\n volume =\t 65,\n number =\t 7,\n pages =\t {4052--4068},\n note =\t {Preliminary version in Proceedings of {ISIT 2018}.},\n doi =\t\t {10.1109/TIT.2019.2896931},\n url_Link =\t {https://ieeexplore.ieee.org/document/8632953},\n url_Paper =\t {https://arxiv.org/abs/1801.02745},\n abstract =\t {New capacity upper bounds are presented for the\n discrete-time Poisson channel with no dark current\n and an average-power constraint. These bounds are a\n consequence of techniques developed for the\n seemingly unrelated problem of upper bounding the\n capacity of binary deletion and repetition\n channels. Previously, the best known capacity upper\n bound in the regime where the average-power\n constraint does not approach zero was due to\n Martinez (JOSA B, 2007), which is re-derived as a\n special case of the framework developed in this\n paper. Furthermore, this framework is carefully\n instantiated in order to obtain a closed-form bound\n that improves the result of Martinez\n everywhere. Finally, capacity-achieving\n distributions for the discrete-time Poisson channel\n are studied under an average-power constraint and/or\n a peak-power constraint and arbitrary dark\n current. In particular, it is shown that the support\n of the capacity-achieving distribution under an\n average-power constraint must only be countably\n infinite. This settles a conjecture of Shamai (IEE\n Proceedings I, 1990) in the affirmative. Previously,\n it was only known that the support must be an\n unbounded set. }\n}\n\n\n
@ARTICLE{ref:Che19:entropy,\n author =\t {Mahdi Cheraghchi},\n title =\t {Expressions for the Entropy of Basic Discrete\n Distributions},\n year =\t 2019,\n journal =\t {{IEEE Transactions on Information Theory}},\n volume =\t 65,\n number =\t 7,\n pages =\t {3999--4009},\n note =\t {Preliminary version in Proceedings of {ISIT 2018}.},\n doi =\t\t {10.1109/TIT.2019.2900716},\n url_Link =\t {https://ieeexplore.ieee.org/document/8648470},\n url_Paper =\t {https://arxiv.org/abs/1708.06394},\n abstract =\t {We develop a general method for computing\n logarithmic and log-gamma expectations of\n distributions. As a result, we derive series\n expansions and integral representations of the\n entropy for several fundamental distributions,\n including the Poisson, binomial, beta-binomial,\n negative binomial, and hypergeometric\n distributions. Our results also establish\n connections between the entropy functions and to the\n Riemann zeta function and its generalizations. }\n}\n\n\n
@ARTICLE{ref:BKCE19,\n author =\t {Thach V. Bui and Minoru Kuribayashi and Mahdi\n Cheraghchi and Isao Echizen},\n title =\t {Efficiently Decodable Non-Adaptive Threshold Group\n Testing},\n year =\t 2019,\n journal =\t {{IEEE Transactions on Information Theory}},\n volume =\t 65,\n number =\t 9,\n pages =\t {5519--5528},\n note =\t {Preliminary version in Proceedings of {ISIT 2018}.},\n doi =\t\t {10.1109/TIT.2019.2907990},\n url_Link =\t {https://ieeexplore.ieee.org/document/8676252},\n url_Paper =\t {https://arxiv.org/abs/1712.07509},\n abstract =\t {We consider non-adaptive threshold group testing for\n identification of up to $d$ defective items in a set\n of $n$ items, where a test is positive if it\n contains at least $2 \\leq u \\leq d$ defective items,\n and negative otherwise. The defective items can be\n identified using $t = O ( ( \\frac{d}{u} )^u (\n \\frac{d}{d - u} )^{d-u} (u \\log{\\frac{d}{u}} +\n \\log{\\frac{1}{\\epsilon}} ) \\cdot d^2 \\log{n} )$\n tests with probability at least $1 - \\epsilon$ for\n any $\\epsilon > 0$ or $t = O ( ( \\frac{d}{u} )^u (\n \\frac{d}{d -u} )^{d - u} d^3 \\log{n} \\cdot\n \\log{\\frac{n}{d}} )$ tests with probability 1. The\n decoding time is $t \\times \\mathrm{poly}(d^2\n \\log{n})$. This result significantly improves the\n best known results for decoding non-adaptive\n threshold group testing: $O(n\\log{n} + n\n \\log{\\frac{1}{\\epsilon}})$ for probabilistic\n decoding, where $\\epsilon > 0$, and $O(n^u \\log{n})$\n for deterministic decoding. }\n}\n\n\n
@ARTICLE{ref:Che19:SS,\n author =\t {Mahdi Cheraghchi},\n title =\t {Nearly optimal robust secret sharing},\n journal =\t {Designs, Codes and Cryptography},\n volume =\t 87,\n number =\t 8,\n pages =\t {1777--1796},\n year =\t 2019,\n doi =\t\t {10.1007/s10623-018-0578-y},\n url_Link =\n {https://link.springer.com/article/10.1007/s10623-018-0578-y},\n keywords =\t {Coding and information theory, Cryptography,\n Algebraic coding theory},\n abstract =\t {We prove that a known approach to improve Shamir's\n celebrated secret sharing scheme; i.e., adding an\n information-theoretic authentication tag to the\n secret, can make it robust for $n$ parties against\n any collusion of size $\\delta n$, for any constant\n $\\delta \\in (0, 1/2)$. This result holds in the\n so-called ``non-rushing'' model in which the $n$\n shares are submitted simultaneously for\n reconstruction. We thus obtain a fully explicit and\n robust secret sharing scheme in this model that is\n essentially optimal in all parameters including the\n share size which is $k(1+o(1)) + O(\\kappa)$, where\n $k$ is the secret length and $\\kappa$ is the\n security parameter. Like Shamir's scheme, in this\n modified scheme any set of more than $\\delta n$\n honest parties can efficiently recover the secret.\n Using algebraic geometry codes instead of\n Reed-Solomon codes, the share length can be\n decreased to a constant (only depending on $\\delta$)\n while the number of shares $n$ can grow\n independently. In this case, when $n$ is large\n enough, the scheme satisfies the ``threshold''\n requirement in an approximate sense; i.e., any set\n of $\\delta n(1+\\rho)$ honest parties, for\n arbitrarily small $\\rho > 0$, can efficiently\n reconstruct the secret.},\n note =\t {Preliminary version in Proceedings of {ISIT 2016.}},\n url_Paper =\t {https://eprint.iacr.org/2015/951}\n}\n\n\n
@INPROCEEDINGS{ref:conf:CR18,\n author =\t {Mahdi Cheraghchi and Jo\\~{a}o Ribeiro},\n title =\t {Sharp Analytical Capacity Upper Bounds for Sticky\n and Related Channels},\n year =\t 2018,\n booktitle =\t {Proceedings of the 56th {Annual Allerton Conference\n on Communication, Control, and Computing}},\n pages =\t {1104--1111},\n note =\t {Extended version in {IEEE Transactions on\n Information Theory}.},\n doi =\t\t {10.1109/ALLERTON.2018.8636009},\n url_Link =\t {https://ieeexplore.ieee.org/document/8636009},\n url_Paper =\t {https://arxiv.org/abs/1903.09992},\n abstract =\t {We study natural examples of binary channels with\n synchronization errors. These include the\n duplication channel, which independently outputs a\n given bit once or twice, and geometric channels that\n repeat a given bit according to a geometric rule,\n with or without the possibility of bit deletion. We\n apply the general framework of Cheraghchi (STOC\n 2018) to obtain sharp analytical upper bounds on the\n capacity of these channels. Previously, upper\n bounds were known via numerical computations\n involving the computation of finite approximations\n of the channels by a computer and then using the\n obtained numerical results to upper bound the actual\n capacity. While leading to sharp numerical results,\n further progress on the full understanding of the\n channel capacity inherently remains elusive using\n such methods. Our results can be regarded as a major\n step towards a complete understanding of the\n capacity curves. Quantitatively, our upper bounds\n sharply approach, and in some cases surpass, the\n bounds that were previously only known by purely\n numerical methods. Among our results, we notably\n give a completely analytical proof that, when the\n number of repetitions per bit is geometric\n (supported on $\\{0,1,2,\\dots\\}$) with mean growing\n to infinity, the channel capacity remains\n substantially bounded away from $1$. }\n}\n\n\n
@INPROCEEDINGS{ref:conf:Che18:del,\n author =\t {Mahdi Cheraghchi},\n title =\t {Capacity Upper Bounds for Deletion-type Channels},\n year =\t 2018,\n booktitle =\t {Proceedings of the 50th {ACM Symposium on Theory of\n Computing (STOC)}},\n pages =\t {493--506},\n note =\t {Extended version in {Journal of the ACM}.},\n doi =\t\t {10.1145/3188745.3188768},\n url_Link =\t {https://dl.acm.org/doi/10.1145/3188745.3188768},\n url_Paper =\t {https://arxiv.org/abs/1711.01630},\n abstract =\t {We develop a systematic approach, based on convex\n programming and real analysis, for obtaining upper\n bounds on the capacity of the binary deletion\n channel and, more generally, channels with\n i.i.d. insertions and deletions. Other than the\n classical deletion channel, we give a special\n attention to the Poisson-repeat channel introduced\n by Mitzenmacher and Drinea (IEEE Transactions on\n Information Theory, 2006). Our framework can be\n applied to obtain capacity upper bounds for any\n repetition distribution (the deletion and\n Poisson-repeat channels corresponding to the special\n cases of Bernoulli and Poisson distributions). Our\n techniques essentially reduce the task of proving\n capacity upper bounds to maximizing a univariate,\n real-valued, and often concave function over a\n bounded interval. The corresponding univariate\n function is carefully designed according to the\n underlying distribution of repetitions and the\n choices vary depending on the desired strength of\n the upper bounds as well as the desired simplicity\n of the function (e.g., being only efficiently\n computable versus having an explicit closed-form\n expression in terms of elementary, or common\n special, functions). Among our results, we show the\n following: 1) The capacity of the binary deletion\n channel with deletion probability $d$ is at most\n $(1-d) \\log \\phi$ for $d \\geq 1/2$, and, assuming\n that the capacity function is convex, is at most\n $1-d \\log(4/\\phi)$ for $d<1/2$, where\n $\\phi=(1+\\sqrt{5})/2$ is the golden ratio. This is\n the first nontrivial capacity upper bound for any\n value of $d$ outside the limiting case $d \\to 0$\n that is fully explicit and proved without computer\n assistance. 2) We derive the first set of capacity\n upper bounds for the Poisson-repeat channel. Our\n results uncover further striking connections between\n this channel and the deletion channel, and suggest,\n somewhat counter-intuitively, that the\n Poisson-repeat channel is actually analytically\n simpler than the deletion channel and may be of key\n importance to a complete understanding of the\n deletion channel. 3) We derive several novel upper\n bounds on the capacity of the deletion channel. All\n upper bounds are maximums of efficiently computable,\n and concave, univariate real functions over a\n bounded domain. In turn, we upper bound these\n functions in terms of explicit elementary and\n standard special functions, whose maximums can be\n found even more efficiently (and sometimes,\n analytically, for example for $d=1/2$). },\n keywords =\t {Coding theory, Synchronization error-correcting\n codes, Channel coding}\n}\n\n\n
@INPROCEEDINGS{ref:conf:CR18:poi,\n author =\t {Mahdi Cheraghchi and Jo\\~{a}o Ribeiro},\n title =\t {Improved Upper Bounds on the Capacity of the\n Discrete-Time {Poisson} Channel},\n year =\t 2018,\n booktitle =\t {Proceedings of the {IEEE International Symposium on\n Information Theory (ISIT)}},\n pages =\t {1769--1773},\n note =\t {Extended version in {IEEE Transactions on\n Information Theory}.},\n doi =\t\t {10.1109/ISIT.2018.8437514},\n url_Link =\t {https://ieeexplore.ieee.org/document/8437514},\n url_Paper =\t {https://arxiv.org/abs/1801.02745},\n abstract =\t {New capacity upper bounds are presented for the\n discrete-time Poisson channel with no dark current\n and an average-power constraint. These bounds are a\n consequence of techniques developed for the\n seemingly unrelated problem of upper bounding the\n capacity of binary deletion and repetition\n channels. Previously, the best known capacity upper\n bound in the regime where the average-power\n constraint does not approach zero was due to\n Martinez (JOSA B, 2007), which is re-derived as a\n special case of the framework developed in this\n paper. Furthermore, this framework is carefully\n instantiated in order to obtain a closed-form bound\n that improves the result of Martinez\n everywhere. Finally, capacity-achieving\n distributions for the discrete-time Poisson channel\n are studied under an average-power constraint and/or\n a peak-power constraint and arbitrary dark\n current. In particular, it is shown that the support\n of the capacity-achieving distribution under an\n average-power constraint must only be countably\n infinite. This settles a conjecture of Shamai (IEE\n Proceedings I, 1990) in the affirmative. Previously,\n it was only known that the support must be an\n unbounded set. }\n}\n\n\n
@INPROCEEDINGS{ref:conf:Che18:entropy,\n author =\t {Mahdi Cheraghchi},\n title =\t {Expressions for the Entropy of Binomial-Type\n Distributions},\n year =\t 2018,\n booktitle =\t {Proceedings of the {IEEE International Symposium on\n Information Theory (ISIT)}},\n pages =\t {2520--2524},\n note =\t {Extended version in {IEEE Transactions on\n Information Theory}.},\n doi =\t\t {10.1109/ISIT.2018.8437888},\n url_Link =\t {https://ieeexplore.ieee.org/document/8437888},\n url_Paper =\t {https://arxiv.org/abs/1708.06394},\n abstract =\t {We develop a general method for computing\n logarithmic and log-gamma expectations of\n distributions. As a result, we derive series\n expansions and integral representations of the\n entropy for several fundamental distributions,\n including the Poisson, binomial, beta-binomial,\n negative binomial, and hypergeometric\n distributions. Our results also establish\n connections between the entropy functions and to the\n Riemann zeta function and its generalizations. }\n}\n\n\n
@INPROCEEDINGS{ref:conf:BKCE18,\n author =\t {Thach V. Bui and Minoru Kuribayashi and Mahdi\n Cheraghchi and Isao Echizen},\n title =\t {Efficiently Decodable Non-Adaptive Threshold Group\n Testing},\n year =\t 2018,\n booktitle =\t {Proceedings of the {IEEE International Symposium on\n Information Theory (ISIT)}},\n note =\t {Extended version in {IEEE Transactions on\n Information Theory}.},\n pages =\t {2584--2588},\n doi =\t\t {10.1109/ISIT.2018.8437847},\n url_Link =\t {https://ieeexplore.ieee.org/document/8437847},\n url_Paper =\t {https://arxiv.org/abs/1712.07509},\n abstract =\t {We consider non-adaptive threshold group testing for\n identification of up to $d$ defective items in a set\n of $n$ items, where a test is positive if it\n contains at least $2 \\leq u \\leq d$ defective items,\n and negative otherwise. The defective items can be\n identified using $t = O ( ( \\frac{d}{u} )^u (\n \\frac{d}{d - u} )^{d-u} (u \\log{\\frac{d}{u}} +\n \\log{\\frac{1}{\\epsilon}} ) \\cdot d^2 \\log{n} )$\n tests with probability at least $1 - \\epsilon$ for\n any $\\epsilon > 0$ or $t = O ( ( \\frac{d}{u} )^u (\n \\frac{d}{d -u} )^{d - u} d^3 \\log{n} \\cdot\n \\log{\\frac{n}{d}} )$ tests with probability 1. The\n decoding time is $t \\times \\mathrm{poly}(d^2\n \\log{n})$. This result significantly improves the\n best known results for decoding non-adaptive\n threshold group testing: $O(n\\log{n} + n\n \\log{\\frac{1}{\\epsilon}})$ for probabilistic\n decoding, where $\\epsilon > 0$, and $O(n^u \\log{n})$\n for deterministic decoding. }\n}\n\n\n
@ARTICLE{ref:CCGG18,\n author =\t {Chandrasekaran, Karthekeyan and Cheraghchi, Mahdi\n and Gandikota, Venkata and Grigorescu, Elena},\n title =\t {Local Testing of Lattices},\n journal =\t {SIAM Journal on Discrete Mathematics},\n volume =\t 32,\n number =\t 2,\n pages =\t {1265--1295},\n year =\t 2018,\n doi =\t\t {10.1137/17M1110353},\n note =\t {Preliminary version in Proceedings of {FSTTCS\n 2016}.},\n url_Link =\t {https://epubs.siam.org/doi/10.1137/17M1110353},\n url_Paper =\t {https://arxiv.org/abs/1608.00180},\n abstract =\t {Testing membership in lattices is of practical\n relevance, with applications to integer programming,\n error detection in lattice-based communication, and\n cryptography. In this work, we initiate a systematic\n study of local testing for membership in lattices,\n complementing and building upon the extensive body\n of work on locally testable codes. In particular, we\n formally define the notion of local tests for\n lattices and present the following: 1. We show that\n in order to achieve low query complexity, it is\n sufficient to design $1$-sided nonadaptive canonical\n tests. This result is akin to, and based on, an\n analogous result for error-correcting codes due to\n [E. Ben-Sasson, P. Harsha, and S. Raskhodnikova,\n SIAM J. Comput., 35 (2005), pp. 1--21]. 2. We\n demonstrate upper and lower bounds on the query\n complexity of local testing for membership in code\n formula lattices. We instantiate our results for\n code formula lattices constructed from Reed--Muller\n codes to obtain nearly matching upper and lower\n bounds on the query complexity of testing such\n lattices. 3. We contrast lattice testing to code\n testing by showing lower bounds on the query\n complexity of testing low-dimensional lattices. This\n illustrates large lower bounds on the query\n complexity of testing membership in the well-known\n knapsack lattices. On the other hand, we show that\n knapsack lattices with bounded coefficients have\n low-query testers if the inputs are promised to lie\n in the span of the lattice. }\n}\n\n\n
@ARTICLE{ref:CGJWX18,\n author =\t {Mahdi Cheraghchi and Elena Grigorescu and Brendan\n Juba and Karl Wimmer and Ning Xie},\n title =\t {{AC0-MOD2} lower bounds for the {Boolean} Inner\n Product},\n journal =\t {Journal of Computer and System Sciences},\n volume =\t 97,\n pages =\t "45--59",\n year =\t 2018,\n doi =\t\t "10.1016/j.jcss.2018.04.006",\n url_Link =\n "http://www.sciencedirect.com/science/article/pii/S002200001830518X",\n keywords =\t "Boolean analysis, Circuit complexity, Lower bounds",\n abstract =\t "AC0-MOD2 circuits are AC0 circuits augmented with a\n layer of parity gates just above the input layer. We\n study AC0-MOD2 circuit lower bounds for computing\n the Boolean Inner Product functions. Recent works by\n Servedio and Viola (ECCC TR12-144) and Akavia et\n al. (ITCS 2014) have highlighted this problem as a\n frontier problem in circuit complexity that arose\n both as a first step towards solving natural special\n cases of the matrix rigidity problem and as a\n candidate for constructing pseudorandom generators\n of minimal complexity. We give the first superlinear\n lower bound for the Boolean Inner Product function\n against AC0-MOD2 of depth four or\n greater. Specifically, we prove a superlinear lower\n bound for circuits of arbitrary constant depth, and\n an $\\tilde{\\Omega(n^2)}$ lower bound for the special\n case of depth-4 AC0-MOD2.",\n note =\t {Preliminary version in Proceedings of {ICALP 2016.}},\n url_Paper =\t {https://eccc.weizmann.ac.il//report/2015/030/}\n}\n\n\n
@ARTICLE{ref:CI17,\n author =\t {Mahdi Cheraghchi and Piotr Indyk},\n title =\t {Nearly Optimal Deterministic Algorithm for Sparse\n Walsh-Hadamard Transform},\n year =\t 2017,\n volume =\t 13,\n number =\t 3,\n doi =\t\t {10.1145/3029050},\n journal =\t {ACM Transactions on Algorithms},\n month =\t mar,\n articleno =\t 34,\n numpages =\t 36,\n keywords =\t {Sparse recovery, sublinear time algorithms,\n pseudorandomness, explicit constructions, sketching,\n sparse Fourier transform},\n url_Link =\t {https://dl.acm.org/doi/10.1145/3029050},\n abstract =\t {For every fixed constant $\\alpha > 0$, we design an\n algorithm for computing the $k$-sparse\n Walsh-Hadamard transform (i.e., Discrete Fourier\n Transform over the Boolean cube) of an\n $N$-dimensional vector $x \\in \\mathbb{R}^N$ in time\n $k^{1+\\alpha} (\\log N)^{O(1)}$. Specifically, the\n algorithm is given query access to $x$ and computes\n a $k$-sparse $\\tilde{x} \\in \\mathbb{R}^N$ satisfying\n $\\|\\tilde{x} - \\hat{x}\\|_1 \\leq c \\|\\hat{x} -\n H_k(\\hat{x})\\|_1$, for an absolute constant $c > 0$,\n where $\\hat{x}$ is the transform of $x$ and\n $H_k(\\hat{x})$ is its best $k$-sparse\n approximation. Our algorithm is fully deterministic\n and only uses non-adaptive queries to $x$ (i.e., all\n queries are determined and performed in parallel\n when the algorithm starts). An important technical\n tool that we use is a construction of nearly optimal\n and linear lossless condensers which is a careful\n instantiation of the GUV condenser (Guruswami,\n Umans, Vadhan, JACM 2009). Moreover, we design a\n deterministic and non-adaptive $\\ell_1/\\ell_1$\n compressed sensing scheme based on general lossless\n condensers that is equipped with a fast\n reconstruction algorithm running in time\n $k^{1+\\alpha} (\\log N)^{O(1)}$ (for the GUV-based\n condenser) and is of independent interest. Our\n scheme significantly simplifies and improves an\n earlier expander-based construction due to Berinde,\n Gilbert, Indyk, Karloff, Strauss (Allerton 2008).\n Our methods use linear lossless condensers in a\n black box fashion; therefore, any future improvement\n on explicit constructions of such condensers would\n immediately translate to improved parameters in our\n framework (potentially leading to $k (\\log\n N)^{O(1)}$ reconstruction time with a reduced\n exponent in the poly-logarithmic factor, and\n eliminating the extra parameter $\\alpha$). By\n allowing the algorithm to use randomness, while\n still using non-adaptive queries, the running time\n of the algorithm can be improved to $\\tilde{O}(k\n \\log^3 N)$.},\n note =\t {Preliminary version in Proceedings of {SODA 2016.}},\n url_Paper =\t {https://eccc.weizmann.ac.il//report/2015/076/}\n}\n\n\n
@ARTICLE{ref:CG17,\n author =\t {Mahdi Cheraghchi and Venkatesan Guruswami},\n title =\t {Non-malleable Coding Against Bit-Wise and\n Split-State Tampering},\n year =\t 2017,\n volume =\t 30,\n pages =\t {191--241},\n doi =\t\t {10.1007/s00145-015-9219-z},\n journal =\t {Journal of Cryptology},\n keywords =\t {Information theory, Tamper-resilient cryptography,\n Coding theory, Error detection, Randomness\n extractors},\n url_Link =\n {https://link.springer.com/article/10.1007/s00145-015-9219-z},\n abstract =\t {Non-malleable coding, introduced by Dziembowski,\n Pietrzak and Wichs (ICS 2010), aims for protecting\n the integrity of information against tampering\n attacks in situations where error-detection is\n impossible. Intuitively, information encoded by a\n non-malleable code either decodes to the original\n message or, in presence of any tampering, to an\n unrelated message. Non-malleable coding is possible\n against any class of adversaries of bounded size. In\n particular, Dziembowski et al. show that such codes\n exist and may achieve positive rates for any class\n of tampering functions of size at most $2^{2^{\\alpha\n n}}$, for any constant $\\alpha \\in [0, 1)$. However,\n this result is existential and has thus attracted a\n great deal of subsequent research on explicit\n constructions of non-malleable codes against natural\n classes of adversaries. In this work, we consider\n constructions of coding schemes against two\n well-studied classes of tampering functions; namely,\n bit-wise tampering functions (where the adversary\n tampers each bit of the encoding independently) and\n the much more general class of split-state\n adversaries (where two independent adversaries\n arbitrarily tamper each half of the encoded\n sequence). We obtain the following results for these\n models. 1) For bit-tampering adversaries, we obtain\n explicit and efficiently encodable and decodable\n non-malleable codes of length $n$ achieving rate\n $1-o(1)$ and error (also known as "exact security")\n $\\exp(-\\tilde{\\Omega}(n^{1/7}))$. Alternatively, it\n is possible to improve the error to\n $\\exp(-\\tilde{\\Omega}(n))$ at the cost of making the\n construction Monte Carlo with success probability\n $1-\\exp(-\\Omega(n))$ (while still allowing a compact\n description of the code). Previously, the best known\n construction of bit-tampering coding schemes was due\n to Dziembowski et al. (ICS 2010), which is a Monte\n Carlo construction achieving rate close to $.1887$.\n 2) We initiate the study of \\textit{seedless\n non-malleable extractors} as a natural variation of\n the notion of non-malleable extractors introduced by\n Dodis and Wichs (STOC 2009). We show that\n construction of non-malleable codes for the\n split-state model reduces to construction of\n non-malleable two-source extractors. We prove a\n general result on existence of seedless\n non-malleable extractors, which implies that codes\n obtained from our reduction can achieve rates\n arbitrarily close to $1/5$ and exponentially small\n error. In a separate recent work, the authors show\n that the optimal rate in this model is $1/2$.\n Currently, the best known explicit construction of\n split-state coding schemes is due to Aggarwal, Dodis\n and Lovett (ECCC TR13-081) which only achieves\n vanishing (polynomially small) rate. },\n note =\t {Preliminary version in Proceedings of {TCC 2014.}},\n url_Paper =\t {https://eccc.weizmann.ac.il//report/2013/121}\n}\n\n\n
@INPROCEEDINGS{ref:conf:CCGG16,\n author =\t {Chandrasekaran, Karthekeyan and Cheraghchi, Mahdi\n and Gandikota, Venkata and Grigorescu, Elena},\n title =\t {Local Testing for Membership in Lattices},\n booktitle =\t {36th {IARCS Annual Conference on Foundations of\n Software Technology and Theoretical Computer Science\n (FSTTCS)}},\n pages =\t {46:1--46:14},\n series =\t {Leibniz International Proceedings in Informatics\n (LIPIcs)},\n year =\t 2016,\n volume =\t 65,\n doi =\t\t {10.4230/LIPIcs.FSTTCS.2016.46},\n note =\t {Extended version in {SIAM Journal on Discrete\n Mathematics}.},\n url_Link =\t {http://drops.dagstuhl.de/opus/volltexte/2016/6881},\n url_Paper =\t {https://arxiv.org/abs/1608.00180},\n abstract =\t {Testing membership in lattices is of practical\n relevance, with applications to integer programming,\n error detection in lattice-based communication, and\n cryptography. In this work, we initiate a systematic\n study of local testing for membership in lattices,\n complementing and building upon the extensive body\n of work on locally testable codes. In particular, we\n formally define the notion of local tests for\n lattices and present the following: 1. We show that\n in order to achieve low query complexity, it is\n sufficient to design $1$-sided nonadaptive canonical\n tests. This result is akin to, and based on, an\n analogous result for error-correcting codes due to\n [E. Ben-Sasson, P. Harsha, and S. Raskhodnikova,\n SIAM J. Comput., 35 (2005), pp. 1--21]. 2. We\n demonstrate upper and lower bounds on the query\n complexity of local testing for membership in code\n formula lattices. We instantiate our results for\n code formula lattices constructed from Reed--Muller\n codes to obtain nearly matching upper and lower\n bounds on the query complexity of testing such\n lattices. 3. We contrast lattice testing to code\n testing by showing lower bounds on the query\n complexity of testing low-dimensional lattices. This\n illustrates large lower bounds on the query\n complexity of testing membership in the well-known\n knapsack lattices. On the other hand, we show that\n knapsack lattices with bounded coefficients have\n low-query testers if the inputs are promised to lie\n in the span of the lattice. },\n keywords =\t {Lattices, Property Testing, Locally Testable Codes,\n Complexity Theory}\n}\n\n\n
@INPROCEEDINGS{ref:conf:CGJWX16,\n author =\t {Mahdi Cheraghchi and Elena Grigorescu and Brendan\n Juba and Karl Wimmer and Ning Xie},\n title =\t {{AC0-MOD2} lower bounds for the {Boolean} Inner\n Product},\n booktitle =\t {Proceedings of the 43rd {International Colloquium on\n Automata, Languages, and Programming (ICALP 2016)}},\n pages =\t {35:1--35:14},\n series =\t {Leibniz International Proceedings in Informatics\n (LIPIcs)},\n year =\t 2016,\n volume =\t 55,\n url_Link =\t {http://drops.dagstuhl.de/opus/volltexte/2016/6315},\n doi =\t\t {10.4230/LIPIcs.ICALP.2016.35},\n keywords =\t {Boolean analysis, circuit complexity, lower bounds},\n abstract =\t "AC0-MOD2 circuits are AC0 circuits augmented with a\n layer of parity gates just above the input layer. We\n study AC0-MOD2 circuit lower bounds for computing\n the Boolean Inner Product functions. Recent works by\n Servedio and Viola (ECCC TR12-144) and Akavia et\n al. (ITCS 2014) have highlighted this problem as a\n frontier problem in circuit complexity that arose\n both as a first step towards solving natural special\n cases of the matrix rigidity problem and as a\n candidate for constructing pseudorandom generators\n of minimal complexity. We give the first superlinear\n lower bound for the Boolean Inner Product function\n against AC0-MOD2 of depth four or\n greater. Specifically, we prove a superlinear lower\n bound for circuits of arbitrary constant depth, and\n an $\\tilde{\\Omega(n^2)}$ lower bound for the special\n case of depth-4 AC0-MOD2.",\n note =\t {Extended version in {Journal of Computer and System\n Sciences}},\n url_Paper =\t {https://eccc.weizmann.ac.il//report/2015/030/}\n}\n\n\n
@INPROCEEDINGS{ref:conf:Che16:SS,\n author =\t {Mahdi Cheraghchi},\n title =\t {Nearly optimal robust secret sharing},\n booktitle =\t {Proceedings of the {IEEE International Symposium on\n Information Theory (ISIT)}},\n year =\t 2016,\n pages =\t {2509--2513},\n doi =\t\t {10.1109/ISIT.2016.7541751},\n url_Link =\t {https://ieeexplore.ieee.org/document/7541751},\n keywords =\t {Coding and information theory, Cryptography,\n Algebraic coding theory},\n abstract =\t {We prove that a known approach to improve Shamir's\n celebrated secret sharing scheme; i.e., adding an\n information-theoretic authentication tag to the\n secret, can make it robust for $n$ parties against\n any collusion of size $\\delta n$, for any constant\n $\\delta \\in (0, 1/2)$. This result holds in the\n so-called ``non-rushing'' model in which the $n$\n shares are submitted simultaneously for\n reconstruction. We thus obtain a fully explicit and\n robust secret sharing scheme in this model that is\n essentially optimal in all parameters including the\n share size which is $k(1+o(1)) + O(\\kappa)$, where\n $k$ is the secret length and $\\kappa$ is the\n security parameter. Like Shamir's scheme, in this\n modified scheme any set of more than $\\delta n$\n honest parties can efficiently recover the secret.\n Using algebraic geometry codes instead of\n Reed-Solomon codes, the share length can be\n decreased to a constant (only depending on $\\delta$)\n while the number of shares $n$ can grow\n independently. In this case, when $n$ is large\n enough, the scheme satisfies the ``threshold''\n requirement in an approximate sense; i.e., any set\n of $\\delta n(1+\\rho)$ honest parties, for\n arbitrarily small $\\rho > 0$, can efficiently\n reconstruct the secret.},\n note =\t {Extended version in {Designs, Codes and\n Cryptography.}},\n url_Paper =\t {https://eprint.iacr.org/2015/951}\n}\n\n\n
@INPROCEEDINGS{ref:conf:CI16,\n author =\t {Mahdi Cheraghchi and Piotr Indyk},\n title =\t {Nearly Optimal Deterministic Algorithm for Sparse\n Walsh-Hadamard Transform},\n year =\t 2016,\n booktitle =\t {Proceedings of the 27th {Annual ACM-SIAM Symposium\n on Discrete Algorithms (SODA)}},\n pages =\t {298--317},\n numpages =\t 20,\n keywords =\t {Sparse recovery, sublinear time algorithms,\n pseudorandomness, explicit constructions, sketching,\n sparse Fourier transform},\n url_Link =\t {https://dl.acm.org/doi/10.5555/2884435.2884458},\n abstract =\t {For every fixed constant $\\alpha > 0$, we design an\n algorithm for computing the $k$-sparse\n Walsh-Hadamard transform (i.e., Discrete Fourier\n Transform over the Boolean cube) of an\n $N$-dimensional vector $x \\in \\mathbb{R}^N$ in time\n $k^{1+\\alpha} (\\log N)^{O(1)}$. Specifically, the\n algorithm is given query access to $x$ and computes\n a $k$-sparse $\\tilde{x} \\in \\mathbb{R}^N$ satisfying\n $\\|\\tilde{x} - \\hat{x}\\|_1 \\leq c \\|\\hat{x} -\n H_k(\\hat{x})\\|_1$, for an absolute constant $c > 0$,\n where $\\hat{x}$ is the transform of $x$ and\n $H_k(\\hat{x})$ is its best $k$-sparse\n approximation. Our algorithm is fully deterministic\n and only uses non-adaptive queries to $x$ (i.e., all\n queries are determined and performed in parallel\n when the algorithm starts). An important technical\n tool that we use is a construction of nearly optimal\n and linear lossless condensers which is a careful\n instantiation of the GUV condenser (Guruswami,\n Umans, Vadhan, JACM 2009). Moreover, we design a\n deterministic and non-adaptive $\\ell_1/\\ell_1$\n compressed sensing scheme based on general lossless\n condensers that is equipped with a fast\n reconstruction algorithm running in time\n $k^{1+\\alpha} (\\log N)^{O(1)}$ (for the GUV-based\n condenser) and is of independent interest. Our\n scheme significantly simplifies and improves an\n earlier expander-based construction due to Berinde,\n Gilbert, Indyk, Karloff, Strauss (Allerton 2008).\n Our methods use linear lossless condensers in a\n black box fashion; therefore, any future improvement\n on explicit constructions of such condensers would\n immediately translate to improved parameters in our\n framework (potentially leading to $k (\\log\n N)^{O(1)}$ reconstruction time with a reduced\n exponent in the poly-logarithmic factor, and\n eliminating the extra parameter $\\alpha$). By\n allowing the algorithm to use randomness, while\n still using non-adaptive queries, the running time\n of the algorithm can be improved to $\\tilde{O}(k\n \\log^3 N)$.},\n note =\t {Preliminary version in Proceedings of {SODA 2016.}},\n url_Paper =\t {https://eccc.weizmann.ac.il//report/2015/076/}\n}\n\n\n
@ARTICLE{ref:CG16,\n author =\t {Mahdi Cheraghchi and Venkatesan Guruswami},\n title =\t {Capacity of Non-Malleable Codes},\n journal =\t {IEEE Transactions on Information Theory},\n year =\t 2016,\n volume =\t 62,\n number =\t 3,\n pages =\t {1097--1118},\n doi =\t\t {10.1109/TIT.2015.2511784},\n url_Link =\t {https://ieeexplore.ieee.org/document/7365445},\n keywords =\t {cryptography, coding theory, tamper-resilient\n storage, probabilistic method, information theory,\n error detection},\n abstract =\t {Non-malleable codes, introduced by Dziembowski,\n Pietrzak and Wichs (ICS 2010), encode messages $s$\n in a manner so that tampering the codeword causes\n the decoder to either output $s$ or a message that\n is independent of $s$. While this is an impossible\n goal to achieve against unrestricted tampering\n functions, rather surprisingly non-malleable coding\n becomes possible against every fixed family\n $\\mathcal{F}$ of tampering functions that is not too\n large (for instance, when $|\\mathcal{F}| \\le\n 2^{2^{\\alpha n}}$ for some $\\alpha <1$ where $n$ is\n the number of bits in a codeword). In this work, we\n study the "capacity of non-malleable codes," and\n establish optimal bounds on the achievable rate as a\n function of the family size, answering an open\n problem from Dziembowski et al. (ICS 2010).\n Specifically, 1) We prove that for every family\n $\\mathcal{F}$ with $|\\mathcal{F}| \\le 2^{2^{\\alpha\n n}}$, there exist non-malleable codes against\n $\\mathcal{F}$ with rate arbitrarily close to\n $1-\\alpha$ (this is achieved w.h.p. by a randomized\n construction). 2) We show the existence of families\n of size $\\exp(n^{O(1)} 2^{\\alpha n})$ against which\n there is no non-malleable code of rate $1-\\alpha$\n (in fact this is the case w.h.p for a random family\n of this size). 3) We also show that $1-\\alpha$ is\n the best achievable rate for the family of functions\n which are only allowed to tamper the first $\\alpha\n n$ bits of the codeword, which is of special\n interest. As a corollary, this implies that the\n capacity of non-malleable coding in the split-state\n model (where the tampering function acts\n independently but arbitrarily on the two halves of\n the codeword, a model which has received some\n attention recently) equals $1/2$. We also give an\n efficient Monte Carlo construction of codes of rate\n close to $1$ with polynomial time encoding and\n decoding that is non-malleable against any fixed $c\n > 0$ and family $\\mathcal{F}$ of size $2^{n^c}$, in\n particular tampering functions with say cubic size\n circuits. },\n note =\t {Preliminary version in Proceedings of {ITCS 2014.}},\n url_Paper =\t {https://eccc.weizmann.ac.il//report/2013/118}\n}\n\n\n
@INPROCEEDINGS{ref:conf:CG14b,\n author =\t {Mahdi Cheraghchi and Venkatesan Guruswami},\n title =\t {Non-malleable Coding Against Bit-Wise and\n Split-State Tampering},\n year =\t 2014,\n booktitle =\t "Proceedings of Theory of Cryptography Conference\n {(TCC)}",\n pages =\t "440--464",\n doi =\t\t {10.1007/978-3-642-54242-8_19},\n keywords =\t {Information theory, Tamper-resilient cryptography,\n Coding theory, Error detection, Randomness\n extractors},\n url_Link =\n {https://link.springer.com/chapter/10.1007/978-3-642-54242-8_19},\n abstract =\t {Non-malleable coding, introduced by Dziembowski,\n Pietrzak and Wichs (ICS 2010), aims for protecting\n the integrity of information against tampering\n attacks in situations where error-detection is\n impossible. Intuitively, information encoded by a\n non-malleable code either decodes to the original\n message or, in presence of any tampering, to an\n unrelated message. Non-malleable coding is possible\n against any class of adversaries of bounded size. In\n particular, Dziembowski et al. show that such codes\n exist and may achieve positive rates for any class\n of tampering functions of size at most $2^{2^{\\alpha\n n}}$, for any constant $\\alpha \\in [0, 1)$. However,\n this result is existential and has thus attracted a\n great deal of subsequent research on explicit\n constructions of non-malleable codes against natural\n classes of adversaries. In this work, we consider\n constructions of coding schemes against two\n well-studied classes of tampering functions; namely,\n bit-wise tampering functions (where the adversary\n tampers each bit of the encoding independently) and\n the much more general class of split-state\n adversaries (where two independent adversaries\n arbitrarily tamper each half of the encoded\n sequence). We obtain the following results for these\n models. 1) For bit-tampering adversaries, we obtain\n explicit and efficiently encodable and decodable\n non-malleable codes of length $n$ achieving rate\n $1-o(1)$ and error (also known as "exact security")\n $\\exp(-\\tilde{\\Omega}(n^{1/7}))$. Alternatively, it\n is possible to improve the error to\n $\\exp(-\\tilde{\\Omega}(n))$ at the cost of making the\n construction Monte Carlo with success probability\n $1-\\exp(-\\Omega(n))$ (while still allowing a compact\n description of the code). Previously, the best known\n construction of bit-tampering coding schemes was due\n to Dziembowski et al. (ICS 2010), which is a Monte\n Carlo construction achieving rate close to $.1887$.\n 2) We initiate the study of \\textit{seedless\n non-malleable extractors} as a natural variation of\n the notion of non-malleable extractors introduced by\n Dodis and Wichs (STOC 2009). We show that\n construction of non-malleable codes for the\n split-state model reduces to construction of\n non-malleable two-source extractors. We prove a\n general result on existence of seedless\n non-malleable extractors, which implies that codes\n obtained from our reduction can achieve rates\n arbitrarily close to $1/5$ and exponentially small\n error. In a separate recent work, the authors show\n that the optimal rate in this model is $1/2$.\n Currently, the best known explicit construction of\n split-state coding schemes is due to Aggarwal, Dodis\n and Lovett (ECCC TR13-081) which only achieves\n vanishing (polynomially small) rate. },\n note =\t {Extended version in {Journal of Cryptology.}},\n url_Paper =\t {https://eccc.weizmann.ac.il//report/2013/121}\n}\n\n\n
@INPROCEEDINGS{ref:conf:CG14a,\n author =\t {Mahdi Cheraghchi and Venkatesan Guruswami},\n title =\t {Capacity of Non-Malleable Codes},\n year =\t 2014,\n url_Link =\t {https://doi.org/10.1145/2554797.2554814},\n doi =\t\t {10.1145/2554797.2554814},\n booktitle =\t {Proceedings of the 5th {Conference on Innovations in\n Theoretical Computer Science (ITCS)}},\n pages =\t {155--168},\n numpages =\t 14,\n keywords =\t {cryptography, coding theory, tamper-resilient\n storage, probabilistic method, information theory,\n error detection},\n abstract =\t {Non-malleable codes, introduced by Dziembowski,\n Pietrzak and Wichs (ICS 2010), encode messages $s$\n in a manner so that tampering the codeword causes\n the decoder to either output $s$ or a message that\n is independent of $s$. While this is an impossible\n goal to achieve against unrestricted tampering\n functions, rather surprisingly non-malleable coding\n becomes possible against every fixed family\n $\\mathcal{F}$ of tampering functions that is not too\n large (for instance, when $|\\mathcal{F}| \\le\n 2^{2^{\\alpha n}}$ for some $\\alpha <1$ where $n$ is\n the number of bits in a codeword). In this work, we\n study the "capacity of non-malleable codes," and\n establish optimal bounds on the achievable rate as a\n function of the family size, answering an open\n problem from Dziembowski et al. (ICS 2010).\n Specifically, 1) We prove that for every family\n $\\mathcal{F}$ with $|\\mathcal{F}| \\le 2^{2^{\\alpha\n n}}$, there exist non-malleable codes against\n $\\mathcal{F}$ with rate arbitrarily close to\n $1-\\alpha$ (this is achieved w.h.p. by a randomized\n construction). 2) We show the existence of families\n of size $\\exp(n^{O(1)} 2^{\\alpha n})$ against which\n there is no non-malleable code of rate $1-\\alpha$\n (in fact this is the case w.h.p for a random family\n of this size). 3) We also show that $1-\\alpha$ is\n the best achievable rate for the family of functions\n which are only allowed to tamper the first $\\alpha\n n$ bits of the codeword, which is of special\n interest. As a corollary, this implies that the\n capacity of non-malleable coding in the split-state\n model (where the tampering function acts\n independently but arbitrarily on the two halves of\n the codeword, a model which has received some\n attention recently) equals $1/2$. We also give an\n efficient Monte Carlo construction of codes of rate\n close to $1$ with polynomial time encoding and\n decoding that is non-malleable against any fixed $c\n > 0$ and family $\\mathcal{F}$ of size $2^{n^c}$, in\n particular tampering functions with say cubic size\n circuits. },\n note =\t {Extended version in IEEE Transactions on Information\n Theory.},\n url_Paper =\t {https://eccc.weizmann.ac.il//report/2013/118}\n}\n\n\n
@ARTICLE{ref:CGV13,\n author =\t {Cheraghchi, Mahdi and Guruswami, Venkatesan and\n Velingker, Ameya},\n title =\t {Restricted Isometry of {Fourier} Matrices and List\n Decodability of Random Linear Codes},\n journal =\t {SIAM Journal on Computing},\n volume =\t 42,\n number =\t 5,\n pages =\t {1888--1914},\n year =\t 2013,\n doi =\t\t {10.1137/120896773},\n keywords =\t {combinatorial list decoding, compressed sensing,\n {Gaussian} processes},\n url_Link =\t {https://epubs.siam.org/doi/abs/10.1137/120896773},\n abstract =\t {We prove that a random linear code over\n $\\mathbb{F}_q$, with probability arbitrarily close\n to $1$, is list decodable at radius $1-1/q-\\epsilon$\n with list size $L=O(1/\\epsilon^2)$ and rate\n $R=\\Omega_q(\\epsilon^2/(\\log^3(1/\\epsilon)))$. Up to\n the polylogarithmic factor in $1/\\epsilon$ and\n constant factors depending on $q$, this matches the\n lower bound $L=\\Omega_q(1/\\epsilon^2)$ for the list\n size and upper bound $R=O_q(\\epsilon^2)$ for the\n rate. Previously only existence (and not abundance)\n of such codes was known for the special case $q=2$\n (Guruswami, H{\\aa}stad, Sudan and Zuckerman, 2002).\n In order to obtain our result, we employ a relaxed\n version of the well known Johnson bound on list\n decoding that translates the \\textit{average}\n Hamming distance between codewords to list decoding\n guarantees. We furthermore prove that the desired\n average-distance guarantees hold for a code provided\n that a natural complex matrix encoding the codewords\n satisfies the Restricted Isometry Property with\n respect to the Euclidean norm (RIP-2). For the case\n of random binary linear codes, this matrix coincides\n with a random submatrix of the Hadamard-Walsh\n transform matrix that is well studied in the\n compressed sensing literature. Finally, we improve\n the analysis of Rudelson and Vershynin (2008) on the\n number of random frequency samples required for\n exact reconstruction of $k$-sparse signals of length\n $N$. Specifically, we improve the number of samples\n from $O(k \\log(N) \\log^2(k) (\\log k + \\log\\log N))$\n to $O(k \\log(N) \\cdot \\log^3(k))$. The proof\n involves bounding the expected supremum of a related\n Gaussian process by using an improved analysis of\n the metric defined by the process. This improvement\n is crucial for our application in list decoding.},\n note =\t {Preliminary version in Proceedings of {SODA 2013.}},\n url_Paper =\t {https://eccc.weizmann.ac.il//report/2012/082}\n}\n\n\n
@INPROCEEDINGS{ref:conf:CGV13,\n author =\t {Cheraghchi, Mahdi and Guruswami, Venkatesan and\n Velingker, Ameya},\n title =\t {Restricted Isometry of {Fourier} Matrices and List\n Decodability of Random Linear Codes},\n year =\t 2013,\n booktitle =\t {Proceedings of the 24th {Annual ACM-SIAM Symposium\n on Discrete Algorithms (SODA)}},\n pages =\t {432--442},\n numpages =\t 11,\n keywords =\t {combinatorial list decoding, compressed sensing,\n {Gaussian} processes},\n url_Link =\t {https://dl.acm.org/doi/10.5555/2627817.2627848},\n abstract =\t {We prove that a random linear code over\n $\\mathbb{F}_q$, with probability arbitrarily close\n to $1$, is list decodable at radius $1-1/q-\\epsilon$\n with list size $L=O(1/\\epsilon^2)$ and rate\n $R=\\Omega_q(\\epsilon^2/(\\log^3(1/\\epsilon)))$. Up to\n the polylogarithmic factor in $1/\\epsilon$ and\n constant factors depending on $q$, this matches the\n lower bound $L=\\Omega_q(1/\\epsilon^2)$ for the list\n size and upper bound $R=O_q(\\epsilon^2)$ for the\n rate. Previously only existence (and not abundance)\n of such codes was known for the special case $q=2$\n (Guruswami, H{\\aa}stad, Sudan and Zuckerman, 2002).\n In order to obtain our result, we employ a relaxed\n version of the well known Johnson bound on list\n decoding that translates the \\textit{average}\n Hamming distance between codewords to list decoding\n guarantees. We furthermore prove that the desired\n average-distance guarantees hold for a code provided\n that a natural complex matrix encoding the codewords\n satisfies the Restricted Isometry Property with\n respect to the Euclidean norm (RIP-2). For the case\n of random binary linear codes, this matrix coincides\n with a random submatrix of the Hadamard-Walsh\n transform matrix that is well studied in the\n compressed sensing literature. Finally, we improve\n the analysis of Rudelson and Vershynin (2008) on the\n number of random frequency samples required for\n exact reconstruction of $k$-sparse signals of length\n $N$. Specifically, we improve the number of samples\n from $O(k \\log(N) \\log^2(k) (\\log k + \\log\\log N))$\n to $O(k \\log(N) \\cdot \\log^3(k))$. The proof\n involves bounding the expected supremum of a related\n Gaussian process by using an improved analysis of\n the metric defined by the process. This improvement\n is crucial for our application in list decoding.},\n note =\t {Extended version in {SIAM Journal on Computing\n (SICOMP).}},\n url_Paper =\t {https://eccc.weizmann.ac.il//report/2012/082}\n}\n\n\n
@ARTICLE{ref:Che13,\n author =\t {Cheraghchi, Mahdi},\n title =\t {Improved Constructions for Non-adaptive Threshold\n Group Testing},\n year =\t 2013,\n volume =\t 67,\n url_Link =\n {https://link.springer.com/article/10.1007/s00453-013-9754-7},\n url_Slides =\t {http://www.ima.umn.edu/videos/?id=1803},\n doi =\t\t {10.1007/s00453-013-9754-7},\n journal =\t {Algorithmica},\n pages =\t {384--417},\n keywords =\t {Threshold group testing, Lossless expanders,\n Combinatorial designs, Explicit constructions},\n note =\t {Preliminary version in {Proceedings of ICALP 2010.}},\n url_Paper =\t {https://arxiv.org/abs/1002.2244},\n abstract =\t {The basic goal in combinatorial group testing is to\n identify a set of up to $d$ defective items within a\n large population of size $n \\gg d$ using a pooling\n strategy. Namely, the items can be grouped together\n in pools, and a single measurement would reveal\n whether there are one or more defectives in the\n pool. The threshold model is a generalization of\n this idea where a measurement returns positive if\n the number of defectives in the pool reaches a fixed\n threshold $u > 0$, negative if this number is no\n more than a fixed lower threshold $\\ell < u$, and\n may behave arbitrarily otherwise. We study\n non-adaptive threshold group testing (in a possibly\n noisy setting) and show that, for this problem,\n $O(d^{g+2} (\\log d) \\log(n/d))$ measurements (where\n $g := u-\\ell-1$ and $u$ is any fixed constant)\n suffice to identify the defectives, and also present\n almost matching lower bounds. This significantly\n improves the previously known (non-constructive)\n upper bound $O(d^{u+1} \\log(n/d))$. Moreover, we\n obtain a framework for explicit construction of\n measurement schemes using lossless condensers. The\n number of measurements resulting from this scheme is\n ideally bounded by $O(d^{g+3} (\\log d) \\log n)$.\n Using state-of-the-art constructions of lossless\n condensers, however, we obtain explicit testing\n schemes with $O(d^{g+3} (\\log d)\n \\mathrm{quasipoly}(\\log n))$ and\n $O(d^{g+3+\\beta}\\mathrm{poly}(\\log n))$\n measurements, for arbitrary constant $\\beta > 0$.}\n}\n\n\n
@ARTICLE{ref:Che13:testing,\n author =\t {Mahdi Cheraghchi},\n title =\t "Noise-resilient group testing: Limitations and\n constructions",\n journal =\t "Discrete Applied Mathematics",\n volume =\t 161,\n number =\t 1,\n pages =\t "81--95",\n year =\t 2013,\n doi =\t\t "10.1016/j.dam.2012.07.022",\n url_Link =\n "http://www.sciencedirect.com/science/article/pii/S0166218X12003009",\n keywords =\t "Group testing, Randomness condensers, Extractors,\n List decoding",\n note =\t {Preliminary version in Proceedings of the {FCT\n 2009}. arXiv manuscript published in 2008.},\n url_Paper =\t {https://arxiv.org/abs/0811.2609},\n abstract =\t {We study combinatorial group testing schemes for\n learning $d$-sparse boolean vectors using highly\n unreliable disjunctive measurements. We consider an\n adversarial noise model that only limits the number\n of false observations, and show that any\n noise-resilient scheme in this model can only\n approximately reconstruct the sparse vector. On the\n positive side, we take this barrier to our advantage\n and show that approximate reconstruction (within a\n satisfactory degree of approximation) allows us to\n break the information theoretic lower bound of\n $\\tilde{\\Omega}(d^2 \\log n)$ that is known for exact\n reconstruction of $d$-sparse vectors of length $n$\n via non-adaptive measurements, by a multiplicative\n factor $\\tilde{\\Omega}(d)$. Specifically, we give\n simple randomized constructions of non-adaptive\n measurement schemes, with $m=O(d \\log n)$\n measurements, that allow efficient reconstruction of\n $d$-sparse vectors up to $O(d)$ false positives even\n in the presence of $\\delta m$ false positives and\n $O(m/d)$ false negatives within the measurement\n outcomes, for \\textit{any} constant $\\delta < 1$.\n We show that, information theoretically, none of\n these parameters can be substantially improved\n without dramatically affecting the others.\n Furthermore, we obtain several explicit\n constructions, in particular one matching the\n randomized trade-off but using $m = O(d^{1+o(1)}\n \\log n)$ measurements. We also obtain explicit\n constructions that allow fast reconstruction in time\n $\\mathrm{poly}(m)$, which would be sublinear in $n$\n for sufficiently sparse vectors. The main tool used\n in our construction is the list-decoding view of\n randomness condensers and extractors. An immediate\n consequence of our result is an adaptive scheme that\n runs in only two non-adaptive \\textit{rounds} and\n exactly reconstructs any $d$-sparse vector using a\n total $O(d \\log n)$ measurements, a task that would\n be impossible in one round and fairly easy in\n $O(\\log(n/d))$ rounds. }\n}\n\n\n
@UNPUBLISHED{ref:CGM12,\n author =\t {Mahdi Cheraghchi and Anna G\\'al and Andrew Mills},\n title =\t {Correctness and Corruption of Locally Decodable\n Codes},\n year =\t 2012,\n url_Paper =\t {https://eccc.weizmann.ac.il//report/2012/172/},\n abstract =\t {Locally decodable codes (LDCs) are error correcting\n codes with the extra property that it is sufficient\n to read just a small number of positions of a\n possibly corrupted codeword in order to recover any\n one position of the input. To achieve this, it is\n necessary to use randomness in the decoding\n procedures. We refer to the probability of\n returning the correct answer as the {\\em\n correctness} of the decoding algorithm. Thus far,\n the study of LDCs has focused on the question of the\n tradeoff between their length and the query\n complexity of the decoders. Another natural\n question is what is the largest possible\n correctness, as a function of the amount of codeword\n corruption and the number of queries, regardless of\n the length of the codewords. Goldreich et\n al. (Computational Complexity 15(3), 2006) observed\n that for a given number of queries and fraction of\n errors, the correctness probability cannot be\n arbitrarily close to $1$. However, the quantitative\n dependence between the largest possible correctness\n and the amount of corruption $\\delta$ has not been\n established before. We present several bounds on\n the largest possible correctness for LDCs, as a\n function of the amount of corruption tolerated and\n the number of queries used, regardless of the length\n of the code. Our bounds are close to tight. We\n also investigate the relationship between the amount\n of corruption tolerated by an LDC and its minimum\n distance as an error correcting code. Even though\n intuitively the two notions are expected to be\n related, we demonstrate that in general this is not\n the case. However, we show a close relationship\n between minimum distance and amount of corruption\n tolerated for linear codes over arbitrary finite\n fields, and for binary nonlinear codes. We use\n these results to strengthen the known bounds on the\n largest possible amount of corruption that can be\n tolerated by LDCs (with any nontrivial correctness\n better than random guessing) regardless of the query\n complexity or the length of the code. },\n note =\t {Manuscript.}\n}\n\n\n
@INPROCEEDINGS{ref:CKKL12,\n author =\t {Cheraghchi, Mahdi and Klivans, Adam and Kothari,\n Pravesh and Lee, Homin K.},\n title =\t {Submodular Functions Are Noise Stable},\n year =\t 2012,\n booktitle =\t {Proceedings of the 23rd {Annual ACM-SIAM Symposium\n on Discrete Algorithms (SODA)}},\n pages =\t {1586--1592},\n numpages =\t 7,\n url_Link =\t {https://dl.acm.org/doi/10.5555/2095116.2095242},\n url_Paper =\t {https://eccc.weizmann.ac.il//report/2011/090/},\n abstract =\t {We show that all non-negative submodular functions\n have high \\textit{noise-stability}. As a\n consequence, we obtain a polynomial-time learning\n algorithm for this class with respect to any product\n distribution on $\\{-1,1\\}^n$ (for any constant\n accuracy parameter $\\epsilon$). Our algorithm also\n succeeds in the agnostic setting. Previous work on\n learning submodular functions required either query\n access or strong assumptions about the types of\n submodular functions to be learned (and did not hold\n in the agnostic setting). Additionally we give\n simple algorithms that efficiently release\n differentially private answers to all Boolean\n conjunctions and to all halfspaces with constant\n average error, subsuming and improving recent work\n due to Gupta, Hardt, Roth and Ullman (STOC 2011).}\n}\n\n\n
@ARTICLE{ref:CHIS12,\n author =\t {Cheraghchi, Mahdi and H\\r{a}stad, Johan and\n Isaksson, Marcus and Svensson, Ola},\n title =\t {Approximating Linear Threshold Predicates},\n year =\t 2012,\n volume =\t 4,\n number =\t 1,\n url_Link =\t {https://dl.acm.org/doi/10.1145/2141938.2141940},\n doi =\t\t {10.1145/2141938.2141940},\n journal =\t {ACM Transactions on Computation Theory (ToCT)},\n month =\t mar,\n articleno =\t 2,\n numpages =\t 31,\n keywords =\t {linear threshold predicates, constraint satisfactory\n problems, approximation algorithms},\n note =\t {Preliminary version in {Proceedings of APPROX\n 2010.}},\n url_Paper =\t {https://eccc.weizmann.ac.il//report/2010/132},\n abstract =\t {We study constraint satisfaction problems on the\n domain $\\{-1,1\\}$, where the given constraints are\n homogeneous linear threshold predicates. That is,\n predicates of the form $\\mathrm{sgn}(w_1 x_1 +\n \\cdots +w_n x_n)$ for some positive integer weights\n $w_1, \\dots, w_n$. Despite their simplicity, current\n techniques fall short of providing a classification\n of these predicates in terms of approximability. In\n fact, it is not easy to guess whether there exists a\n homogeneous linear threshold predicate that is\n approximation resistant or not. The focus of this\n paper is to identify and study the approximation\n curve of a class of threshold predicates that allow\n for non-trivial approximation. Arguably the\n simplest such predicate is the majority predicate\n $\\mathrm{sgn}(x_1+ \\cdots + x_n)$, for which we\n obtain an almost complete understanding of the\n asymptotic approximation curve, assuming the Unique\n Games Conjecture. Our techniques extend to a more\n general class of "majority-like" predicates and we\n obtain parallel results for them. In order to\n classify these predicates, we introduce the notion\n of \\textit{Chow-robustness} that might be of\n independent interest.}\n}\n\n\n
@ARTICLE{ref:CKMS12,\n author =\t {Mahdi Cheraghchi and Amin Karbasi and Soheil Mohajer\n and Vankatesh Saligrama},\n journal =\t {IEEE Transactions on Information Theory},\n title =\t {Graph-Constrained Group Testing},\n year =\t 2012,\n volume =\t 58,\n number =\t 1,\n pages =\t {248--262},\n note =\t {Preliminary version in Proceedings of {ISIT 2010}.},\n doi =\t\t {10.1109/TIT.2011.2169535},\n url_Link =\t {https://ieeexplore.ieee.org/document/6121984},\n url_Paper =\t {https://arxiv.org/abs/1001.1445},\n abstract =\t {Non-adaptive group testing involves grouping\n arbitrary subsets of $n$ items into different\n pools. Each pool is then tested and defective items\n are identified. A fundamental question involves\n minimizing the number of pools required to identify\n at most $d$ defective items. Motivated by\n applications in network tomography, sensor networks\n and infection propagation, a variation of group\n testing problems on graphs is formulated. Unlike\n conventional group testing problems, each group here\n must conform to the constraints imposed by a\n graph. For instance, items can be associated with\n vertices and each pool is any set of nodes that must\n be path connected. In this paper, a test is\n associated with a random walk. In this context,\n conventional group testing corresponds to the\n special case of a complete graph on $n$ vertices.\n For interesting classes of graphs a rather\n surprising result is obtained, namely, that the\n number of tests required to identify $d$ defective\n items is substantially similar to what is required\n in conventional group testing problems, where no\n such constraints on pooling is\n imposed. Specifically, if $T(n)$ corresponds to the\n mixing time of the graph $G$, it is shown that with\n $m=O(d^2T^2(n)\\log(n/d))$ non-adaptive tests, one\n can identify the defective items. Consequently, for\n the Erd\\H{o}s-R\\'enyi random graph $G(n,p)$, as well\n as expander graphs with constant spectral gap, it\n follows that $m=O(d^2\\log^3n)$ non-adaptive tests\n are sufficient to identify $d$ defective\n items. Next, a specific scenario is considered that\n arises in network tomography, for which it is shown\n that $m=O(d^3\\log^3n)$ non-adaptive tests are\n sufficient to identify $d$ defective items. Noisy\n counterparts of the graph constrained group testing\n problem are considered, for which parallel results\n are developed. We also briefly discuss extensions to\n compressive sensing on graphs. }\n}\n\n\n
@ARTICLE{ref:CDS12,\n author =\t {Mahdi Cheraghchi and Fr\\'ed\\'eric Didier and Amin\n Shokrollahi},\n title =\t {Invertible Extractors and Wiretap Protocols},\n journal =\t {IEEE Transactions on Information Theory},\n year =\t 2012,\n volume =\t 58,\n number =\t 2,\n pages =\t {1254--1274},\n note =\t {Preliminary version in Proceedings of {ISIT 2009}.},\n doi =\t\t {10.1109/TIT.2011.2170660},\n url_Link =\t {https://ieeexplore.ieee.org/document/6035782},\n url_Paper =\t {https://arxiv.org/abs/0901.2120},\n abstract =\t {A wiretap protocol is a pair of randomized encoding\n and decoding functions such that knowledge of a\n bounded fraction of the encoding of a message\n reveals essentially no information about the\n message, while knowledge of the entire encoding\n reveals the message using the decoder. In this\n paper, the notion of efficiently invertible\n extractors is studied and it is shown that a wiretap\n protocol can be constructed from such an extractor.\n Then, invertible extractors for symbol-fixing,\n affine, and general sources are constructed and used\n to create wiretap protocols with asymptotically\n optimal trade-offs between their rate (ratio of the\n length of the message versus its encoding) and\n resilience (ratio of the observed positions of the\n encoding and the length of the encoding). The\n results are further applied to create wiretap\n protocols for challenging communication problems,\n such as active intruders who change portions of the\n encoding, network coding, and intruders observing\n arbitrary Boolean functions of the encoding. As a\n by-product of the constructions, new explicit\n extractors for a restricted family of affine sources\n over large fields is obtained (that in particular\n generalizes the notion of symbol-fixing sources)\n which is of independent interest. These extractors\n are able to extract the entire source entropy with\n zero error. },\n keywords =\t {Wiretap Channel, Extractors, Network Coding, Active\n Intrusion, Exposure Resilient Cryptography}\n}\n\n\n
@INPROCEEDINGS{ref:conf:Che11,\n author =\t {Mahdi Cheraghchi},\n title =\t {Coding-Theoretic Methods for Sparse Recovery},\n year =\t 2011,\n booktitle =\t {Proceedings of the 49th {Annual Allerton Conference\n on Communication, Control, and Computing}},\n pages =\t {909--916},\n doi =\t\t {10.1109/Allerton.2011.6120263},\n url_Link =\t {https://ieeexplore.ieee.org/document/6120263},\n url_Paper =\t {https://arxiv.org/abs/1110.0279},\n abstract =\t { We review connections between coding-theoretic\n objects and sparse learning problems. In particular,\n we show how seemingly different combinatorial\n objects such as error-correcting codes,\n combinatorial designs, spherical codes, compressed\n sensing matrices and group testing designs can be\n obtained from one another. The reductions enable one\n to translate upper and lower bounds on the\n parameters attainable by one object to another. We\n survey some of the well-known reductions in a\n unified presentation, and bring some existing gaps\n to attention. New reductions are also introduced; in\n particular, we bring up the notion of minimum\n "L-wise distance" of codes and show that this notion\n closely captures the combinatorial structure of\n RIP-2 matrices. Moreover, we show how this weaker\n variation of the minimum distance is related to\n combinatorial list-decoding properties of codes. }\n}\n\n\n
@ARTICLE{ref:CHKV11,\n author =\t {Mahdi Cheraghchi and Ali Hormati and Amin Karbasi\n and Martin Vetterli},\n title =\t {Group Testing With Probabilistic Tests: Theory,\n Design and Application},\n year =\t 2011,\n journal =\t {IEEE Transactions on Information Theory},\n volume =\t 57,\n number =\t 10,\n pages =\t {7057--7067},\n note =\t {Preliminary version in Proceedings of the {Allerton\n 2009}.},\n doi =\t\t {10.1109/TIT.2011.2148691},\n url_Link =\t {https://ieeexplore.ieee.org/document/5759087},\n url_Paper =\t {https://arxiv.org/abs/0909.3508},\n abstract =\t {Detection of defective members of large populations\n has been widely studied in the statistics community\n under the name "group testing", a problem which\n dates back to World War II when it was suggested for\n syphilis screening. There, the main interest is to\n identify a small number of infected people among a\n large population using \\textit{collective\n samples}. In viral epidemics, one way to acquire\n collective samples is by sending agents inside the\n population. While in classical group testing, it is\n assumed that the sampling procedure is fully known\n to the reconstruction algorithm, in this work we\n assume that the decoder possesses only\n \\textit{partial} knowledge about the sampling\n process. This assumption is justified by observing\n the fact that in a viral sickness, there is a chance\n that an agent remains healthy despite having contact\n with an infected person. Therefore, the\n reconstruction method has to cope with two different\n types of uncertainty; namely, identification of the\n infected population and the partially unknown\n sampling procedure. In this work, by using a\n natural probabilistic model for "viral infections",\n we design non-adaptive sampling procedures that\n allow successful identification of the infected\n population with overwhelming probability\n $1-o(1)$. We propose both probabilistic and explicit\n design procedures that require a "small" number of\n agents to single out the infected individuals. More\n precisely, for a contamination probability $p$, the\n number of agents required by the probabilistic and\n explicit designs for identification of up to $k$\n infected members is bounded by $m = O(k^2 (\\log n) /\n p^2)$ and $m = O(k^2 (\\log^2 n) / p^2)$,\n respectively. In both cases, a simple decoder is\n able to successfully identify the infected\n population in time $O(mn)$. }\n}\n\n\n
@INPROCEEDINGS{ref:conf:Che10,\n author =\t {Mahdi Cheraghchi},\n title =\t {Derandomization and Group Testing},\n year =\t 2010,\n booktitle =\t {Proceedings of the 48th {Annual Allerton Conference\n on Communication, Control, and Computing}},\n pages =\t {991--997},\n doi =\t\t {10.1109/ALLERTON.2010.5707017},\n url_Link =\t {https://ieeexplore.ieee.org/document/5707017},\n url_Paper =\t {https://arxiv.org/abs/1010.0433},\n abstract =\t { The rapid development of derandomization theory,\n which is a fundamental area in theoretical computer\n science, has recently led to many surprising\n applications outside its initial intention. We will\n review some recent such developments related to\n combinatorial group testing. In its most basic\n setting, the aim of group testing is to identify a\n set of "positive" individuals in a population of\n items by taking groups of items and asking whether\n there is a positive in each group. In particular,\n we will discuss explicit constructions of optimal or\n nearly-optimal group testing schemes using\n "randomness-conducting" functions. Among such\n developments are constructions of error-correcting\n group testing schemes using randomness extractors\n and condensers, as well as threshold group testing\n schemes from lossless condensers. }\n}\n\n\n
@INPROCEEDINGS{ref:conf:CHIS10,\n author =\t {Cheraghchi, Mahdi and H\\r{a}stad, Johan and\n Isaksson, Marcus and Svensson, Ola},\n title =\t {Approximating Linear Threshold Predicates},\n booktitle =\t "Proceedings of {APPROX}",\n year =\t 2010,\n pages =\t "110--123",\n url_Link =\n {https://link.springer.com/chapter/10.1007/978-3-642-15369-3_9},\n doi =\t\t {10.1007/978-3-642-15369-3_9},\n keywords =\t {linear threshold predicates, constraint satisfactory\n problems, approximation algorithms},\n note =\t {Extended version in {ACM Transactions on Computation\n Theory (ToCT).}},\n url_Paper =\t {https://eccc.weizmann.ac.il//report/2010/132},\n abstract =\t {We study constraint satisfaction problems on the\n domain $\\{-1,1\\}$, where the given constraints are\n homogeneous linear threshold predicates. That is,\n predicates of the form $\\mathrm{sgn}(w_1 x_1 +\n \\cdots +w_n x_n)$ for some positive integer weights\n $w_1, \\dots, w_n$. Despite their simplicity, current\n techniques fall short of providing a classification\n of these predicates in terms of approximability. In\n fact, it is not easy to guess whether there exists a\n homogeneous linear threshold predicate that is\n approximation resistant or not. The focus of this\n paper is to identify and study the approximation\n curve of a class of threshold predicates that allow\n for non-trivial approximation. Arguably the\n simplest such predicate is the majority predicate\n $\\mathrm{sgn}(x_1+ \\cdots + x_n)$, for which we\n obtain an almost complete understanding of the\n asymptotic approximation curve, assuming the Unique\n Games Conjecture. Our techniques extend to a more\n general class of "majority-like" predicates and we\n obtain parallel results for them. In order to\n classify these predicates, we introduce the notion\n of \\textit{Chow-robustness} that might be of\n independent interest.}\n}\n\n\n
@INPROCEEDINGS{ref:conf:Che10:threshold,\n author =\t {Cheraghchi, Mahdi},\n title =\t {Improved Constructions for Non-adaptive Threshold\n Group Testing},\n booktitle =\t "Proceedings of the 37th {International Colloquium on\n Automata, Languages and Programming (ICALP)}",\n year =\t 2010,\n pages =\t "552--564",\n url_Link =\n {https://link.springer.com/chapter/10.1007/978-3-642-14165-2_47},\n url_Slides =\t {http://www.ima.umn.edu/videos/?id=1803},\n doi =\t\t {10.1007/978-3-642-14165-2_47},\n keywords =\t {Threshold group testing, Lossless expanders,\n Combinatorial designs, Explicit constructions},\n note =\t {Extended version in {Algorithmica.}},\n url_Paper =\t {https://arxiv.org/abs/1002.2244},\n abstract =\t {The basic goal in combinatorial group testing is to\n identify a set of up to $d$ defective items within a\n large population of size $n \\gg d$ using a pooling\n strategy. Namely, the items can be grouped together\n in pools, and a single measurement would reveal\n whether there are one or more defectives in the\n pool. The threshold model is a generalization of\n this idea where a measurement returns positive if\n the number of defectives in the pool reaches a fixed\n threshold $u > 0$, negative if this number is no\n more than a fixed lower threshold $\\ell < u$, and\n may behave arbitrarily otherwise. We study\n non-adaptive threshold group testing (in a possibly\n noisy setting) and show that, for this problem,\n $O(d^{g+2} (\\log d) \\log(n/d))$ measurements (where\n $g := u-\\ell-1$ and $u$ is any fixed constant)\n suffice to identify the defectives, and also present\n almost matching lower bounds. This significantly\n improves the previously known (non-constructive)\n upper bound $O(d^{u+1} \\log(n/d))$. Moreover, we\n obtain a framework for explicit construction of\n measurement schemes using lossless condensers. The\n number of measurements resulting from this scheme is\n ideally bounded by $O(d^{g+3} (\\log d) \\log n)$.\n Using state-of-the-art constructions of lossless\n condensers, however, we obtain explicit testing\n schemes with $O(d^{g+3} (\\log d)\n \\mathrm{quasipoly}(\\log n))$ and $O(d^{g+3+\\beta}\n \\mathrm{poly}(\\log n))$ measurements, for arbitrary\n constant $\\beta > 0$.}\n}\n\n\n
@INPROCEEDINGS{ref:conf:CKMS10,\n author =\t {Mahdi Cheraghchi and Amin Karbasi and Soheil Mohajer\n and Vankatesh Saligrama},\n title =\t {Graph-constrained group testing},\n booktitle =\t {Proceedings of the {IEEE International Symposium on\n Information Theory (ISIT)}},\n year =\t 2010,\n pages =\t {1913--1917},\n note =\t {Extended version in IEEE Transactions on Information\n Theory.},\n doi =\t\t {10.1109/ISIT.2010.5513397},\n url_Link =\t {https://ieeexplore.ieee.org/document/5513397},\n url_Paper =\t {https://arxiv.org/abs/1001.1445},\n abstract =\t {Non-adaptive group testing involves grouping\n arbitrary subsets of $n$ items into different pools\n and identifying defective items based on tests\n obtained for each pool. Motivated by applications\n in network tomography, sensor networks and infection\n propagation we formulate non-adaptive group testing\n problems on graphs. Unlike conventional group\n testing problems each group here must conform to the\n constraints imposed by a graph. For instance, items\n can be associated with vertices and each pool is any\n set of nodes that must be path connected. In this\n paper we associate a test with a random walk. In\n this context conventional group testing corresponds\n to the special case of a complete graph on $n$\n vertices. For interesting classes of graphs we\n arrive at a rather surprising result, namely, that\n the number of tests required to identify $d$\n defective items is substantially similar to that\n required in conventional group testing problems,\n where no such constraints on pooling is\n imposed. Specifically, if $T(n)$ corresponds to the\n mixing time of the graph $G$, we show that with\n $m=O(d^2T^2(n)\\log(n/d))$ non-adaptive tests, one\n can identify the defective items. Consequently, for\n the Erd\\H{o}s-R\\'enyi random graph $G(n,p)$, as well\n as expander graphs with constant spectral gap, it\n follows that $m=O(d^2\\log^3n)$ non-adaptive tests\n are sufficient to identify $d$ defective items. We\n next consider a specific scenario that arises in\n network tomography and show that $m=O(d^3\\log^3n)$\n non-adaptive tests are sufficient to identify $d$\n defective items. We also consider noisy counterparts\n of the graph constrained group testing problem and\n develop parallel results for these cases. }\n}\n\n\n
@phdthesis{ref:Che10:thesis,\n author =\t {Mahdi Cheraghchi},\n title =\t {Applications of Derandomization Theory in Coding},\n school =\t {EPFL},\n url_Link =\t {http://infoscience.epfl.ch/record/149074},\n url_Paper =\t {https://arxiv.org/abs/1107.4709},\n institution =\t {IIF},\n publisher =\t {EPFL},\n address =\t {Lausanne, Switzerland},\n pages =\t 210,\n year =\t 2010,\n abstract =\t {Randomized techniques play a fundamental role in\n theoretical computer science and discrete\n mathematics, in particular for the design of\n efficient algorithms and construction of\n combinatorial objects. The basic goal in\n derandomization theory is to eliminate or reduce the\n need for randomness in such randomized\n constructions. Towards this goal, numerous\n fundamental notions have been developed to provide a\n unified framework for approaching various\n derandomization problems and to improve our general\n understanding of the power of randomness in\n computation. Two important classes of such tools\n are pseudorandom generators and randomness\n extractors. Pseudorandom generators transform a\n short, purely random, sequence into a much longer\n sequence that looks random, while extractors\n transform a weak source of randomness into a\n perfectly random one (or one with much better\n qualities, in which case the transformation is\n called a randomness condenser). In this thesis, we\n explore some applications of the fundamental notions\n in derandomization theory to problems outside the\n core of theoretical computer science, and in\n particular, certain problems related to coding\n theory. First, we consider the wiretap channel\n problem which involves a communication system in\n which an intruder can eavesdrop a limited portion of\n the transmissions. We utilize randomness extractors\n to construct efficient and information-theoretically\n optimal communication protocols for this model. Then\n we consider the combinatorial group testing\n problem. In this classical problem, one aims to\n determine a set of defective items within a large\n population by asking a number of queries, where each\n query reveals whether a defective item is present\n within a specified group of items. We use randomness\n condensers to explicitly construct optimal, or\n nearly optimal, group testing schemes for a setting\n where the query outcomes can be highly unreliable,\n as well as the threshold model where a query returns\n positive if the number of defectives pass a certain\n threshold. Next, we use randomness condensers and\n extractors to design ensembles of error-correcting\n codes that achieve the information-theoretic\n capacity of a large class of communication channels,\n and then use the obtained ensembles for construction\n of explicit capacity achieving codes. Finally, we\n consider the problem of explicit construction of\n error-correcting codes on the Gilbert-Varshamov\n bound and extend the original idea of Nisan and\n Wigderson to obtain a small ensemble of codes,\n mostly achieving the bound, under suitable\n computational hardness assumptions.},\n url =\t\t {http://infoscience.epfl.ch/record/149074},\n doi =\t\t {10.5075/epfl-thesis-4767}\n}\n\n\n
@INPROCEEDINGS{ref:conf:CHKV09,\n author =\t {Mahdi Cheraghchi and Ali Hormati and Amin Karbasi\n and Martin Vetterli},\n title =\t {Compressed sensing with probabilistic measurements:\n A group testing solution},\n booktitle =\t {Proceedings of the 47th {Annual Allerton Conference\n on Communication, Control, and Computing}},\n year =\t 2009,\n pages =\t {30--35},\n note =\t {Extended version in {IEEE Transactions on\n Information Theory}.},\n doi =\t\t {10.1109/ALLERTON.2009.5394829},\n url_Link =\t {https://ieeexplore.ieee.org/document/5394829},\n url_Paper =\t {https://arxiv.org/abs/0909.3508},\n abstract =\t {Detection of defective members of large populations\n has been widely studied in the statistics community\n under the name "group testing", a problem which\n dates back to World War II when it was suggested for\n syphilis screening. There, the main interest is to\n identify a small number of infected people among a\n large population using \\textit{collective\n samples}. In viral epidemics, one way to acquire\n collective samples is by sending agents inside the\n population. While in classical group testing, it is\n assumed that the sampling procedure is fully known\n to the reconstruction algorithm, in this work we\n assume that the decoder possesses only\n \\textit{partial} knowledge about the sampling\n process. This assumption is justified by observing\n the fact that in a viral sickness, there is a chance\n that an agent remains healthy despite having contact\n with an infected person. Therefore, the\n reconstruction method has to cope with two different\n types of uncertainty; namely, identification of the\n infected population and the partially unknown\n sampling procedure. In this work, by using a\n natural probabilistic model for "viral infections",\n we design non-adaptive sampling procedures that\n allow successful identification of the infected\n population with overwhelming probability\n $1-o(1)$. We propose both probabilistic and explicit\n design procedures that require a "small" number of\n agents to single out the infected individuals. More\n precisely, for a contamination probability $p$, the\n number of agents required by the probabilistic and\n explicit designs for identification of up to $k$\n infected members is bounded by $m = O(k^2 (\\log n) /\n p^2)$ and $m = O(k^2 (\\log^2 n) / p^2)$,\n respectively. In both cases, a simple decoder is\n able to successfully identify the infected\n population in time $O(mn)$. }\n}\n\n\n
@INPROCEEDINGS{ref:conf:Che09:testing,\n author =\t {Mahdi Cheraghchi},\n title =\t "Noise-resilient group testing: Limitations and\n constructions",\n booktitle =\t "Proceedings of the 17th {International Symposium on\n Fundamentals of Computation Theory (FCT)}",\n year =\t 2009,\n pages =\t "62--73",\n doi =\t\t "10.1007/978-3-642-03409-1_7",\n url_Link =\n "https://link.springer.com/chapter/10.1007/978-3-642-03409-1_7",\n keywords =\t "Group testing, Randomness condensers, Extractors,\n List decoding",\n note =\t {Extended version in {Discrete Applied\n Mathematics}. arXiv manuscript published in 2008.},\n url_Paper =\t {https://arxiv.org/abs/0811.2609},\n abstract =\t {We study combinatorial group testing schemes for\n learning $d$-sparse boolean vectors using highly\n unreliable disjunctive measurements. We consider an\n adversarial noise model that only limits the number\n of false observations, and show that any\n noise-resilient scheme in this model can only\n approximately reconstruct the sparse vector. On the\n positive side, we take this barrier to our advantage\n and show that approximate reconstruction (within a\n satisfactory degree of approximation) allows us to\n break the information theoretic lower bound of\n $\\tilde{\\Omega}(d^2 \\log n)$ that is known for exact\n reconstruction of $d$-sparse vectors of length $n$\n via non-adaptive measurements, by a multiplicative\n factor $\\tilde{\\Omega}(d)$. Specifically, we give\n simple randomized constructions of non-adaptive\n measurement schemes, with $m=O(d \\log n)$\n measurements, that allow efficient reconstruction of\n $d$-sparse vectors up to $O(d)$ false positives even\n in the presence of $\\delta m$ false positives and\n $O(m/d)$ false negatives within the measurement\n outcomes, for \\textit{any} constant $\\delta < 1$.\n We show that, information theoretically, none of\n these parameters can be substantially improved\n without dramatically affecting the others.\n Furthermore, we obtain several explicit\n constructions, in particular one matching the\n randomized trade-off but using $m = O(d^{1+o(1)}\n \\log n)$ measurements. We also obtain explicit\n constructions that allow fast reconstruction in time\n $\\mathrm{poly}(m)$, which would be sublinear in $n$\n for sufficiently sparse vectors. The main tool used\n in our construction is the list-decoding view of\n randomness condensers and extractors. An immediate\n consequence of our result is an adaptive scheme that\n runs in only two non-adaptive \\textit{rounds} and\n exactly reconstructs any $d$-sparse vector using a\n total $O(d \\log n)$ measurements, a task that would\n be impossible in one round and fairly easy in\n $O(\\log(n/d))$ rounds. }\n}\n\n\n
@INPROCEEDINGS{ref:conf:Che09:capacity,\n author =\t {Mahdi Cheraghchi},\n booktitle =\t {Proceedings of the {IEEE International Symposium on\n Information Theory (ISIT)}},\n title =\t {Capacity achieving codes from randomness conductors},\n year =\t 2009,\n pages =\t {2639--2643},\n doi =\t\t {10.1109/ISIT.2009.5205931},\n url_Link =\n {https://ieeexplore.ieee.org/abstract/document/5205931},\n url_Paper =\t {https://arxiv.org/abs/0901.1866},\n abstract =\t {We establish a general framework for construction of\n small ensembles of capacity achieving linear codes\n for a wide range of (not necessarily memoryless)\n discrete symmetric channels, and in particular, the\n binary erasure and symmetric channels. The main\n tool used in our constructions is the notion of\n randomness extractors and lossless condensers that\n are regarded as central tools in theoretical\n computer science. Same as random codes, the\n resulting ensembles preserve their capacity\n achieving properties under any change of basis.\n Using known explicit constructions of condensers, we\n obtain specific ensembles whose size is as small as\n polynomial in the block length. By applying our\n construction to Justesen's concatenation scheme\n (Justesen, 1972) we obtain explicit capacity\n achieving codes for BEC (resp., BSC) with almost\n linear time encoding and almost linear time (resp.,\n quadratic time) decoding and exponentially small\n error probability. },\n keywords =\t {Capacity achieving codes, Randomness extractors,\n Lossless condensers, Code ensembles, Concatenated\n codes}\n}\n\n\n
@INPROCEEDINGS{ref:conf:CDS09,\n author =\t {Mahdi Cheraghchi and Fr\\'ed\\'eric Didier and Amin\n Shokrollahi},\n title =\t {Invertible Extractors and Wiretap Protocols},\n booktitle =\t {Proceedings of the {IEEE International Symposium on\n Information Theory (ISIT)}},\n year =\t 2009,\n pages =\t {1934--1938},\n note =\t {Extended version in IEEE Transactions on Information\n Theory.},\n doi =\t\t {10.1109/ISIT.2009.5205583},\n url_Link =\t {https://ieeexplore.ieee.org/document/5205583},\n url_Paper =\t {https://arxiv.org/abs/0901.2120},\n abstract =\t {A wiretap protocol is a pair of randomized encoding\n and decoding functions such that knowledge of a\n bounded fraction of the encoding of a message\n reveals essentially no information about the\n message, while knowledge of the entire encoding\n reveals the message using the decoder. In this\n paper, the notion of efficiently invertible\n extractors is studied and it is shown that a wiretap\n protocol can be constructed from such an extractor.\n Then, invertible extractors for symbol-fixing,\n affine, and general sources are constructed and used\n to create wiretap protocols with asymptotically\n optimal trade-offs between their rate (ratio of the\n length of the message versus its encoding) and\n resilience (ratio of the observed positions of the\n encoding and the length of the encoding). The\n results are further applied to create wiretap\n protocols for challenging communication problems,\n such as active intruders who change portions of the\n encoding, network coding, and intruders observing\n arbitrary Boolean functions of the encoding. As a\n by-product of the constructions, new explicit\n extractors for a restricted family of affine sources\n over large fields is obtained (that in particular\n generalizes the notion of symbol-fixing sources)\n which is of independent interest. These extractors\n are able to extract the entire source entropy with\n zero error. },\n keywords =\t {Wiretap Channel, Extractors, Network Coding, Active\n Intrusion, Exposure Resilient Cryptography}\n}\n\n\n
@INPROCEEDINGS{ref:conf:ACS09,\n author =\t {Ehsan Ardestanizadeh and Mahdi Cheraghchi and Amin\n Shokrollahi},\n title =\t {Bit precision analysis for compressed sensing},\n booktitle =\t {Proceedings of the {IEEE International Symposium on\n Information Theory (ISIT)}},\n year =\t 2009,\n pages =\t {1-5},\n doi =\t\t {10.1109/ISIT.2009.5206076},\n url_Link =\t {https://ieeexplore.ieee.org/document/5206076},\n url_Paper =\t {https://arxiv.org/abs/0901.2147},\n abstract =\t {This paper studies the stability of some\n reconstruction algorithms for compressed sensing in\n terms of the \\textit{bit precision}. Considering the\n fact that practical digital systems deal with\n discretized signals, we motivate the importance of\n the total number of accurate bits needed from the\n measurement outcomes in addition to the number of\n measurements. It is shown that if one uses a $2k\n \\times n $ Vandermonde matrix with roots on the unit\n circle as the measurement matrix, $O(\\ell + k \\log\n \\frac{n}{k})$ bits of precision per measurement are\n sufficient to reconstruct a $k$-sparse signal $x \\in\n \\mathbb{R}^n$ with \\textit{dynamic range} (i.e., the\n absolute ratio between the largest and the smallest\n nonzero coefficients) at most $2^\\ell$ within $\\ell$\n bits of precision, hence identifying its correct\n support. Finally, we obtain an upper bound on the\n total number of required bits when the measurement\n matrix satisfies a restricted isometry property,\n which is in particular the case for random Fourier\n and Gaussian matrices. For very sparse signals, the\n upper bound on the number of required bits for\n Vandermonde matrices is shown to be better than this\n general upper bound. }\n}\n\n\n
@INPROCEEDINGS{ref:conf:CS09,\n author =\t {Mahdi Cheraghchi and Amin Shokrollahi},\n title =\t {Almost-Uniform Sampling of Points on\n High-Dimensional Algebraic Varieties},\n booktitle =\t {Proceedings of the 26th {International Symposium on\n Theoretical Aspects of Computer Science (STACS)}},\n pages =\t {277--288},\n year =\t 2009,\n series =\t {Leibniz International Proceedings in Informatics\n (LIPIcs)},\n volume =\t 3,\n url_Link =\t {http://drops.dagstuhl.de/opus/volltexte/2009/1817},\n doi =\t\t {10.4230/LIPIcs.STACS.2009.1817},\n keywords =\t {Uniform sampling, Algebraic varieties, Randomized\n algorithms, Computational complexity},\n url_Paper =\t {https://arxiv.org/abs/0902.1254},\n abstract =\t {We consider the problem of uniform sampling of\n points on an algebraic variety. Specifically, we\n develop a randomized algorithm that, given a small\n set of multivariate polynomials over a sufficiently\n large finite field, produces a common zero of the\n polynomials almost uniformly at random. The\n statistical distance between the output distribution\n of the algorithm and the uniform distribution on the\n set of common zeros is polynomially small in the\n field size, and the running time of the algorithm is\n polynomial in the description of the polynomials and\n their degrees provided that the number of the\n polynomials is a constant. }\n}\n\n\n
@INPROCEEDINGS{ref:conf:CSW06,\n author =\t {Mahdi Cheraghchi and Amin Shokrollahi and Avi\n Wigderson},\n title =\t {Computational Hardness and Explicit Constructions of\n Error Correcting Codes},\n year =\t 2006,\n booktitle =\t {Proceedings of the 44th {Annual Allerton Conference\n on Communication, Control, and Computing}},\n pages =\t {1173--1179},\n url_Paper =\t {https://infoscience.epfl.ch/record/101078},\n abstract =\t { We outline a procedure for using pseudorandom\n generators to construct binary codes with good\n properties, assuming the existence of sufficiently\n hard functions. Specifically, we give a polynomial\n time algorithm, which for every integers $n$ and\n $k$, constructs polynomially many linear codes of\n block length $n$ and dimension $k$, most of which\n achieving the Gilbert-Varshamov bound. The success\n of the procedure relies on the assumption that the\n exponential time class of $\\mathrm{E} :=\n \\mathrm{DTIME}[2^{O(n)}]$ is not contained in the\n sub-exponential space class\n $\\mathrm{DSPACE}[2^{o(n)}]$. The methods used in\n this paper are by now standard within computational\n complexity theory, and the main contribution of this\n note is observing that they are relevant to the\n construction of optimal codes. We attempt to make\n this note self contained, and describe the relevant\n results and proofs from the theory of\n pseudorandomness in some detail. }\n}\n\n\n
@UNPUBLISHED{ref:Che05,\n author =\t {Mahdi Cheraghchi},\n title =\t {On Matrix Rigidity and the Complexity of Linear\n Forms},\n year =\t 2005,\n note =\t {ECCC Technical Report TR05-070.},\n abstract =\t { The rigidity function of a matrix is defined as the\n minimum number of its entries that need to be\n changed in order to reduce the rank of the matrix to\n below a given parameter. Proving a strong enough\n lower bound on the rigidity of a matrix implies a\n nontrivial lower bound on the complexity of any\n linear circuit computing the set of linear forms\n associated with it. However, although it is shown\n that most matrices are rigid enough, no explicit\n construction of a rigid family of matrices is known.\n We review the concept of rigidity and some of its\n interesting variations as well as several notable\n works related to that. We also show the existence of\n highly rigid matrices constructed by evaluation of\n bivariate polynomials over a finite field. },\n keywords =\t {Matrix Rigidity, Low Level Complexity, Circuit\n Complexity, Linear Forms}\n}\n\n\n
@mastersthesis{ref:Che05:thesis,\n author =\t {Mahdi Cheraghchi},\n title =\t {Locally Testble Codes},\n school =\t {EPFL},\n url_Paper =\n {https://eccc.weizmann.ac.il//static/books/Locally_Testable_Codes},\n institution =\t {IIF},\n publisher =\t {EPFL},\n address =\t {Lausanne, Switzerland},\n year =\t 2005,\n abstract =\t { Error correcting codes are combinatorial objects\n that allow reliable recovery of information in\n presence of errors by cleverly augmenting the\n original information with a certain amount of\n redundancy. The availability of efficient means of\n error detection is considered as a fundamental\n criterion for error correcting codes. Locally\n testable codes are families of error correcting\n codes that are highly robust against transmission\n errors and in addition provide super-efficient\n (sublinear time) probabilistic algorithms for error\n detection. In particular, the error detection\n algorithm probes the received sequence only at a\n small (or even constant) number of locations. There\n seems to be a trade-off between the amount of\n redundancy and the number of probes for the error\n detection procedure in locally testable codes. Even\n though currently best constructions allow reduction\n of redundancy to a nearly linear amount, it is not\n clear whether this can be further reduced to linear\n while preserving a constant number of probes. We\n study the formal notion of locally testable codes\n and survey several major results in this area. We\n also investigate closely related concepts, and in\n particular, polynomial low-degree tests and\n probabilistically checkable proofs. },\n keywords =\t {Error Correcting Codes, Locally Testable Codes,\n Probabilistically Checkable Proofs (PCP), Low-Degree\n Tests, Hardness of Approximation}\n}\n\n