@inproceedings{14921, abstract = {Neural collapse (NC) refers to the surprising structure of the last layer of deep neural networks in the terminal phase of gradient descent training. Recently, an increasing amount of experimental evidence has pointed to the propagation of NC to earlier layers of neural networks. However, while the NC in the last layer is well studied theoretically, much less is known about its multi-layered counterpart - deep neural collapse (DNC). In particular, existing work focuses either on linear layers or only on the last two layers at the price of an extra assumption. Our paper fills this gap by generalizing the established analytical framework for NC - the unconstrained features model - to multiple non-linear layers. Our key technical contribution is to show that, in a deep unconstrained features model, the unique global optimum for binary classification exhibits all the properties typical of DNC. This explains the existing experimental evidence of DNC. We also empirically show that (i) by optimizing deep unconstrained features models via gradient descent, the resulting solution agrees well with our theory, and (ii) trained networks recover the unconstrained features suitable for the occurrence of DNC, thus supporting the validity of this modeling principle.}, author = {Súkeník, Peter and Mondelli, Marco and Lampert, Christoph}, booktitle = {37th Annual Conference on Neural Information Processing Systems}, location = {New Orleans, LA, United States}, title = {{Deep neural collapse is provably optimal for the deep unconstrained features model}}, year = {2023}, } @inproceedings{14924, abstract = {The stochastic heavy ball method (SHB), also known as stochastic gradient descent (SGD) with Polyak's momentum, is widely used in training neural networks. However, despite the remarkable success of such algorithm in practice, its theoretical characterization remains limited. In this paper, we focus on neural networks with two and three layers and provide a rigorous understanding of the properties of the solutions found by SHB: \emph{(i)} stability after dropping out part of the neurons, \emph{(ii)} connectivity along a low-loss path, and \emph{(iii)} convergence to the global optimum. To achieve this goal, we take a mean-field view and relate the SHB dynamics to a certain partial differential equation in the limit of large network widths. This mean-field perspective has inspired a recent line of work focusing on SGD while, in contrast, our paper considers an algorithm with momentum. More specifically, after proving existence and uniqueness of the limit differential equations, we show convergence to the global optimum and give a quantitative bound between the mean-field limit and the SHB dynamics of a finite-width network. Armed with this last bound, we are able to establish the dropout-stability and connectivity of SHB solutions.}, author = {Wu, Diyuan and Kungurtsev, Vyacheslav and Mondelli, Marco}, booktitle = {Transactions on Machine Learning Research}, publisher = {ML Research Press}, title = {{Mean-field analysis for heavy ball methods: Dropout-stability, connectivity, and global convergence}}, year = {2023}, } @inproceedings{14923, abstract = {We study the performance of a Bayesian statistician who estimates a rank-one signal corrupted by non-symmetric rotationally invariant noise with a generic distribution of singular values. As the signal-to-noise ratio and the noise structure are unknown, a Gaussian setup is incorrectly assumed. We derive the exact analytic expression for the error of the mismatched Bayes estimator and also provide the analysis of an approximate message passing (AMP) algorithm. The first result exploits the asymptotic behavior of spherical integrals for rectangular matrices and of low-rank matrix perturbations; the second one relies on the design and analysis of an auxiliary AMP. The numerical experiments show that there is a performance gap between the AMP and Bayes estimators, which is due to the incorrect estimation of the signal norm.}, author = {Fu, Teng and Liu, YuHao and Barbier, Jean and Mondelli, Marco and Liang, ShanSuo and Hou, TianQi}, booktitle = {Proceedings of 2023 IEEE International Symposium on Information Theory}, location = {Taipei, Taiwan}, publisher = {IEEE}, title = {{Mismatched estimation of non-symmetric rank-one matrices corrupted by structured noise}}, doi = {10.1109/isit54713.2023.10206671}, year = {2023}, } @inproceedings{14922, abstract = {We propose a novel approach to concentration for non-independent random variables. The main idea is to ``pretend'' that the random variables are independent and pay a multiplicative price measuring how far they are from actually being independent. This price is encapsulated in the Hellinger integral between the joint and the product of the marginals, which is then upper bounded leveraging tensorisation properties. Our bounds represent a natural generalisation of concentration inequalities in the presence of dependence: we recover exactly the classical bounds (McDiarmid's inequality) when the random variables are independent. Furthermore, in a ``large deviations'' regime, we obtain the same decay in the probability as for the independent case, even when the random variables display non-trivial dependencies. To show this, we consider a number of applications of interest. First, we provide a bound for Markov chains with finite state space. Then, we consider the Simple Symmetric Random Walk, which is a non-contracting Markov chain, and a non-Markovian setting in which the stochastic process depends on its entire past. To conclude, we propose an application to Markov Chain Monte Carlo methods, where our approach leads to an improved lower bound on the minimum burn-in period required to reach a certain accuracy. In all of these settings, we provide a regime of parameters in which our bound fares better than what the state of the art can provide.}, author = {Esposito, Amedeo Roberto and Mondelli, Marco}, booktitle = {Proceedings of 2023 IEEE International Symposium on Information Theory}, issn = {2157-8117}, location = {Taipei, Taiwan}, pages = {400--405}, publisher = {IEEE}, title = {{Concentration without independence via information measures}}, doi = {10.1109/isit54713.2023.10206899}, year = {2023}, } @article{11420, abstract = {Understanding the properties of neural networks trained via stochastic gradient descent (SGD) is at the heart of the theory of deep learning. In this work, we take a mean-field view, and consider a two-layer ReLU network trained via noisy-SGD for a univariate regularized regression problem. Our main result is that SGD with vanishingly small noise injected in the gradients is biased towards a simple solution: at convergence, the ReLU network implements a piecewise linear map of the inputs, and the number of “knot” points -- i.e., points where the tangent of the ReLU network estimator changes -- between two consecutive training inputs is at most three. In particular, as the number of neurons of the network grows, the SGD dynamics is captured by the solution of a gradient flow and, at convergence, the distribution of the weights approaches the unique minimizer of a related free energy, which has a Gibbs form. Our key technical contribution consists in the analysis of the estimator resulting from this minimizer: we show that its second derivative vanishes everywhere, except at some specific locations which represent the “knot” points. We also provide empirical evidence that knots at locations distinct from the data points might occur, as predicted by our theory.}, author = {Shevchenko, Aleksandr and Kungurtsev, Vyacheslav and Mondelli, Marco}, issn = {1533-7928}, journal = {Journal of Machine Learning Research}, number = {130}, pages = {1--55}, publisher = {Journal of Machine Learning Research}, title = {{Mean-field analysis of piecewise linear solutions for wide ReLU networks}}, volume = {23}, year = {2022}, } @inproceedings{12011, abstract = {We characterize the capacity for the discrete-time arbitrarily varying channel with discrete inputs, outputs, and states when (a) the encoder and decoder do not share common randomness, (b) the input and state are subject to cost constraints, (c) the transition matrix of the channel is deterministic given the state, and (d) at each time step the adversary can only observe the current and past channel inputs when choosing the state at that time. The achievable strategy involves stochastic encoding together with list decoding and a disambiguation step. The converse uses a two-phase "babble-and-push" strategy where the adversary chooses the state randomly in the first phase, list decodes the output, and then chooses state inputs to symmetrize the channel in the second phase. These results generalize prior work on specific channels models (additive, erasure) to general discrete alphabets and models.}, author = {Zhang, Yihan and Jaggi, Sidharth and Langberg, Michael and Sarwate, Anand D.}, booktitle = {2022 IEEE International Symposium on Information Theory}, isbn = {9781665421591}, issn = {2157-8095}, location = {Espoo, Finland}, pages = {2523--2528}, publisher = {IEEE}, title = {{The capacity of causal adversarial channels}}, doi = {10.1109/ISIT50566.2022.9834709}, volume = {2022}, year = {2022}, } @inproceedings{12017, abstract = {In the classic adversarial communication problem, two parties communicate over a noisy channel in the presence of a malicious jamming adversary. The arbitrarily varying channels (AVCs) offer an elegant framework to study a wide range of interesting adversary models. The optimal throughput or capacity over such AVCs is intimately tied to the underlying adversary model; in some cases, capacity is unknown and the problem is known to be notoriously hard. The omniscient adversary, one which knows the sender’s entire channel transmission a priori, is one of such classic models of interest; the capacity under such an adversary remains an exciting open problem. The myopic adversary is a generalization of that model where the adversary’s observation may be corrupted over a noisy discrete memoryless channel. Through the adversary’s myopicity, one can unify the slew of different adversary models, ranging from the omniscient adversary to one that is completely blind to the transmission (the latter is the well known oblivious model where the capacity is fully characterized).In this work, we present new results on the capacity under both the omniscient and myopic adversary models. We completely characterize the positive capacity threshold over general AVCs with omniscient adversaries. The characterization is in terms of two key combinatorial objects: the set of completely positive distributions and the CP-confusability set. For omniscient AVCs with positive capacity, we present non-trivial lower and upper bounds on the capacity; unlike some of the previous bounds, our bounds hold under fairly general input and jamming constraints. Our lower bound improves upon the generalized Gilbert-Varshamov bound for general AVCs while the upper bound generalizes the well known Elias-Bassalygo bound (known for binary and q-ary alphabets). For the myopic AVCs, we build on prior results known for the so-called sufficiently myopic model, and present new results on the positive rate communication threshold over the so-called insufficiently myopic regime (a completely insufficient myopic adversary specializes to an omniscient adversary). We present interesting examples for the widely studied models of adversarial bit-flip and bit-erasure channels. In fact, for the bit-flip AVC with additive adversarial noise as well as random noise, we completely characterize the omniscient model capacity when the random noise is sufficiently large vis-a-vis the adversary’s budget.}, author = {Yadav, Anuj Kumar and Alimohammadi, Mohammadreza and Zhang, Yihan and Budkuley, Amitalok J. and Jaggi, Sidharth}, booktitle = {2022 IEEE International Symposium on Information Theory}, isbn = {9781665421591}, issn = {2157-8095}, location = {Espoo, Finland}, pages = {2535--2540}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{New results on AVCs with omniscient and myopic adversaries}}, doi = {10.1109/ISIT50566.2022.9834632}, volume = {2022}, year = {2022}, } @inproceedings{12013, abstract = {We consider the problem of communication over adversarial channels with feedback. Two parties comprising sender Alice and receiver Bob seek to communicate reliably. An adversary James observes Alice's channel transmission entirely and chooses, maliciously, its additive channel input or jamming state thereby corrupting Bob's observation. Bob can communicate over a one-way reverse link with Alice; we assume that transmissions over this feedback link cannot be corrupted by James. Our goal in this work is to study the optimum throughput or capacity over such channels with feedback. We first present results for the quadratically-constrained additive channel where communication is known to be impossible when the noise-to-signal (power) ratio (NSR) is at least 1. We present a novel achievability scheme to establish that positive rate communication is possible even when the NSR is as high as 8/9. We also present new converse upper bounds on the capacity of this channel under potentially stochastic encoders and decoders. We also study feedback communication over the more widely studied q-ary alphabet channel under additive noise. For the q -ary channel, where q > 2, it is well known that capacity is positive under full feedback if and only if the adversary can corrupt strictly less than half the transmitted symbols. We generalize this result and show that the same threshold holds for positive rate communication when the noiseless feedback may only be partial; our scheme employs a stochastic decoder. We extend this characterization, albeit partially, to fully deterministic schemes under partial noiseless feedback. We also present new converse upper bounds for q-ary channels under full feedback, where the encoder and/or decoder may privately randomize. Our converse results bring to the fore an interesting alternate expression for the well known converse bound for the q—ary channel under full feedback which, when specialized to the binary channel, also equals its known capacity.}, author = {Joshi, Pranav and Purkayastha, Amritakshya and Zhang, Yihan and Budkuley, Amitalok J. and Jaggi, Sidharth}, booktitle = {2022 IEEE International Symposium on Information Theory}, isbn = {9781665421591}, issn = {2157-8095}, location = {Espoo, Finland}, pages = {504--509}, publisher = {IEEE}, title = {{On the capacity of additive AVCs with feedback}}, doi = {10.1109/ISIT50566.2022.9834850}, volume = {2022}, year = {2022}, } @inproceedings{12016, abstract = {We consider the problem of coded distributed computing using polar codes. The average execution time of a coded computing system is related to the error probability for transmission over the binary erasure channel in recent work by Soleymani, Jamali and Mahdavifar, where the performance of binary linear codes is investigated. In this paper, we focus on polar codes and unveil a connection between the average execution time and the scaling exponent μ of the family of codes. In the finite-length characterization of polar codes, the scaling exponent is a key object capturing the speed of convergence to capacity. In particular, we show that (i) the gap between the normalized average execution time of polar codes and that of optimal MDS codes is O(n –1/μ ), and (ii) this upper bound can be improved to roughly O(n –1/2 ) by considering polar codes with large kernels. We conjecture that these bounds could be improved to O(n –2/μ ) and O(n –1 ), respectively, and provide a heuristic argument as well as numerical evidence supporting this view.}, author = {Fathollahi, Dorsa and Mondelli, Marco}, booktitle = {2022 IEEE International Symposium on Information Theory}, isbn = {9781665421591}, issn = {2157-8095}, location = {Espoo, Finland}, pages = {2154--2159}, publisher = {IEEE}, title = {{Polar coded computing: The role of the scaling exponent}}, doi = {10.1109/ISIT50566.2022.9834712}, volume = {2022}, year = {2022}, } @inproceedings{12012, abstract = {This paper is eligible for the Jack Keil Wolf ISIT Student Paper Award. We generalize a previous framework for designing utility-optimal differentially private (DP) mechanisms via graphs, where datasets are vertices in the graph and edges represent dataset neighborhood. The boundary set contains datasets where an individual’s response changes the binary-valued query compared to its neighbors. Previous work was limited to the homogeneous case where the privacy parameter ε across all datasets was the same and the mechanism at boundary datasets was identical. In our work, the mechanism can take different distributions at the boundary and the privacy parameter ε is a function of neighboring datasets, which recovers an earlier definition of personalized DP as special case. The problem is how to extend the mechanism, which is only defined at the boundary set, to other datasets in the graph in a computationally efficient and utility optimal manner. Using the concept of strongest induced DP condition we solve this problem efficiently in polynomial time (in the size of the graph).}, author = {Torkamani, Sahel and Ebrahimi, Javad B. and Sadeghi, Parastoo and D'Oliveira, Rafael G.L. and Médard, Muriel}, booktitle = {2022 IEEE International Symposium on Information Theory}, isbn = {9781665421591}, issn = {2157-8095}, location = {Espoo, Finland}, pages = {1623--1628}, publisher = {IEEE}, title = {{Heterogeneous differential privacy via graphs}}, doi = {10.1109/ISIT50566.2022.9834711}, volume = {2022}, year = {2022}, } @inproceedings{12018, abstract = {We study the problem of characterizing the maximal rates of list decoding in Euclidean spaces for finite list sizes. For any positive integer L ≥ 2 and real N > 0, we say that a subset C⊂Rn is an (N,L – 1)-multiple packing or an (N,L– 1)-list decodable code if every Euclidean ball of radius nN−−−√ in ℝ n contains no more than L − 1 points of C. We study this problem with and without ℓ 2 norm constraints on C, and derive the best-known lower bounds on the maximal rate for (N,L−1) multiple packing. Our bounds are obtained via error exponents for list decoding over Additive White Gaussian Noise (AWGN) channels. We establish a curious inequality which relates the error exponent, a quantity of average-case nature, to the list-decoding radius, a quantity of worst-case nature. We derive various bounds on the error exponent for list decoding in both bounded and unbounded settings which could be of independent interest beyond multiple packing.}, author = {Zhang, Yihan and Vatedka, Shashank}, booktitle = {2022 IEEE International Symposium on Information Theory}, isbn = {9781665421591}, issn = {2157-8095}, location = {Espoo, Finland}, pages = {1324--1329}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{Lower bounds on list decoding capacity using error exponents}}, doi = {10.1109/ISIT50566.2022.9834815}, volume = {2022}, year = {2022}, } @inproceedings{12015, abstract = {We study the problem of high-dimensional multiple packing in Euclidean space. Multiple packing is a natural generalization of sphere packing and is defined as follows. Let P, N > 0 and L∈Z≥2. A multiple packing is a set C of points in Bn(0–,nP−−−√) such that any point in ℝ n lies in the intersection of at most L – 1 balls of radius nN−−−√ around points in C. 1 In this paper, we derive two lower bounds on the largest possible density of a multiple packing. These bounds are obtained through a stronger notion called average-radius multiple packing. Specifically, we exactly pin down the asymptotics of (expurgated) Gaussian codes and (expurgated) spherical codes under average-radius multiple packing. To this end, we apply tools from high-dimensional geometry and large deviation theory. The bound for spherical codes matches the previous best known bound which was obtained for the standard (weaker) notion of multiple packing through a curious connection with error exponents [Bli99], [ZV21]. The bound for Gaussian codes suggests that they are strictly inferior to spherical codes.}, author = {Zhang, Yihan and Vatedka, Shashank}, booktitle = {2022 IEEE International Symposium on Information Theory}, isbn = {9781665421591}, issn = {2157-8095}, location = {Espoo, Finland}, pages = {3085--3090}, publisher = {IEEE}, title = {{Lower bounds for multiple packing}}, doi = {10.1109/ISIT50566.2022.9834443}, volume = {2022}, year = {2022}, } @inproceedings{12014, abstract = {We study the problem of high-dimensional multiple packing in Euclidean space. Multiple packing is a natural generalization of sphere packing and is defined as follows. Let N > 0 and L∈Z≥2. A multiple packing is a set C of points in Rn such that any point in Rn lies in the intersection of at most L – 1 balls of radius nN−−−√ around points in C. Given a well-known connection with coding theory, multiple packings can be viewed as the Euclidean analog of list-decodable codes, which are well-studied for finite fields. In this paper, we exactly pin down the asymptotic density of (expurgated) Poisson Point Processes under a stronger notion called average-radius multiple packing. To this end, we apply tools from high-dimensional geometry and large deviation theory. This gives rise to the best known lower bound on the largest multiple packing density. Our result corrects a mistake in a previous paper by Blinovsky [Bli05].}, author = {Zhang, Yihan and Vatedka, Shashank}, booktitle = {2022 IEEE International Symposium on Information Theory}, isbn = {9781665421591}, issn = {2157-8095}, location = {Espoo, Finland}, pages = {2559--2564}, publisher = {IEEE}, title = {{List-decodability of Poisson Point Processes}}, doi = {10.1109/ISIT50566.2022.9834512}, volume = {2022}, year = {2022}, } @inproceedings{12019, abstract = {This paper studies combinatorial properties of codes for the Z-channel. A Z-channel with error fraction τ takes as input a length-n binary codeword and injects in an adversarial manner up to nτ asymmetric errors, i.e., errors that only zero out bits but do not flip 0’s to 1’s. It is known that the largest (L − 1)-list-decodable code for the Z-channel with error fraction τ has exponential (in n) size if τ is less than a critical value that we call the Plotkin point and has constant size if τ is larger than the threshold. The (L−1)-list-decoding Plotkin point is known to be L−1L−1−L−LL−1. In this paper, we show that the largest (L−1)-list-decodable code ε-above the Plotkin point has size Θ L (ε −3/2 ) for any L − 1 ≥ 1.}, author = {Polyanskii, Nikita and Zhang, Yihan}, booktitle = {2022 IEEE International Symposium on Information Theory}, isbn = {9781665421591}, issn = {2157-8095}, location = {Espoo, Finland}, pages = {2553--2558}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{List-decodable zero-rate codes for the Z-channel}}, doi = {10.1109/ISIT50566.2022.9834829}, volume = {2022}, year = {2022}, } @inproceedings{12540, abstract = {We consider the problem of signal estimation in generalized linear models defined via rotationally invariant design matrices. Since these matrices can have an arbitrary spectral distribution, this model is well suited for capturing complex correlation structures which often arise in applications. We propose a novel family of approximate message passing (AMP) algorithms for signal estimation, and rigorously characterize their performance in the high-dimensional limit via a state evolution recursion. Our rotationally invariant AMP has complexity of the same order as the existing AMP derived under the restrictive assumption of a Gaussian design; our algorithm also recovers this existing AMP as a special case. Numerical results showcase a performance close to Vector AMP (which is conjectured to be Bayes-optimal in some settings), but obtained with a much lower complexity, as the proposed algorithm does not require a computationally expensive singular value decomposition.}, author = {Venkataramanan, Ramji and Kögler, Kevin and Mondelli, Marco}, booktitle = {Proceedings of the 39th International Conference on Machine Learning}, location = {Baltimore, MD, United States}, publisher = {ML Research Press}, title = {{Estimation in rotationally invariant generalized linear models via approximate message passing}}, volume = {162}, year = {2022}, } @unpublished{12536, abstract = {We consider the problem of estimating a rank-1 signal corrupted by structured rotationally invariant noise, and address the following question: how well do inference algorithms perform when the noise statistics is unknown and hence Gaussian noise is assumed? While the matched Bayes-optimal setting with unstructured noise is well understood, the analysis of this mismatched problem is only at its premises. In this paper, we make a step towards understanding the effect of the strong source of mismatch which is the noise statistics. Our main technical contribution is the rigorous analysis of a Bayes estimator and of an approximate message passing (AMP) algorithm, both of which incorrectly assume a Gaussian setup. The first result exploits the theory of spherical integrals and of low-rank matrix perturbations; the idea behind the second one is to design and analyze an artificial AMP which, by taking advantage of the flexibility in the denoisers, is able to "correct" the mismatch. Armed with these sharp asymptotic characterizations, we unveil a rich and often unexpected phenomenology. For example, despite AMP is in principle designed to efficiently compute the Bayes estimator, the former is outperformed by the latter in terms of mean-square error. We show that this performance gap is due to an incorrect estimation of the signal norm. In fact, when the SNR is large enough, the overlaps of the AMP and the Bayes estimator coincide, and they even match those of optimal estimators taking into account the structure of the noise.}, author = {Barbier, Jean and Hou, TianQi and Mondelli, Marco and Saenz, Manuel}, booktitle = {arXiv}, title = {{The price of ignorance: How much does it cost to forget noise structure in low-rank matrix estimation?}}, doi = {10.48550/arXiv.2205.10009}, year = {2022}, } @unpublished{12860, abstract = {Memorization of the relation between entities in a dataset can lead to privacy issues when using a trained model for question answering. We introduce Relational Memorization (RM) to understand, quantify and control this phenomenon. While bounding general memorization can have detrimental effects on the performance of a trained model, bounding RM does not prevent effective learning. The difference is most pronounced when the data distribution is long-tailed, with many queries having only few training examples: Impeding general memorization prevents effective learning, while impeding only relational memorization still allows learning general properties of the underlying concepts. We formalize the notion of Relational Privacy (RP) and, inspired by Differential Privacy (DP), we provide a possible definition of Differential Relational Privacy (DrP). These notions can be used to describe and compute bounds on the amount of RM in a trained model. We illustrate Relational Privacy concepts in experiments with large-scale models for Question Answering.}, author = {Bombari, Simone and Achille, Alessandro and Wang, Zijian and Wang, Yu-Xiang and Xie, Yusheng and Singh, Kunwar Yashraj and Appalaraju, Srikar and Mahadevan, Vijay and Soatto, Stefano}, booktitle = {arXiv}, title = {{Towards differential relational privacy and its use in question answering}}, doi = {10.48550/arXiv.2203.16701}, year = {2022}, } @article{11639, abstract = {We study the list decodability of different ensembles of codes over the real alphabet under the assumption of an omniscient adversary. It is a well-known result that when the source and the adversary have power constraints P and N respectively, the list decoding capacity is equal to 1/2logP/N. Random spherical codes achieve constant list sizes, and the goal of the present paper is to obtain a better understanding of the smallest achievable list size as a function of the gap to capacity. We show a reduction from arbitrary codes to spherical codes, and derive a lower bound on the list size of typical random spherical codes. We also give an upper bound on the list size achievable using nested Construction-A lattices and infinite Construction-A lattices. We then define and study a class of infinite constellations that generalize Construction-A lattices and prove upper and lower bounds for the same. Other goodness properties such as packing goodness and AWGN goodness of infinite constellations are proved along the way. Finally, we consider random lattices sampled from the Haar distribution and show that if a certain conjecture that originates in analytic number theory is true, then the list size grows as a polynomial function of the gap-to-capacity.}, author = {Zhang, Yihan and Vatedka, Shashank}, issn = {1557-9654}, journal = {IEEE Transactions on Information Theory}, number = {12}, pages = {7753--7786}, publisher = {IEEE}, title = {{List decoding random Euclidean codes and Infinite constellations}}, doi = {10.1109/TIT.2022.3189542}, volume = {68}, year = {2022}, } @article{12233, abstract = {A novel recursive list decoding (RLD) algorithm for Reed-Muller (RM) codes based on successive permutations (SP) of the codeword is presented. A low-complexity SP scheme applied to a subset of the symmetry group of RM codes is first proposed to carefully select a good codeword permutation on the fly. Then, the proposed SP technique is integrated into an improved RLD algorithm that initializes different decoding paths with random codeword permutations, which are sampled from the full symmetry group of RM codes. Finally, efficient latency and complexity reduction schemes are introduced that virtually preserve the error-correction performance of the proposed decoder. Simulation results demonstrate that at the target frame error rate of 10−3 for the RM code of length 256 with 163 information bits, the proposed decoder reduces 6% of the computational complexity and 22% of the decoding latency of the state-of-the-art semi-parallel simplified successive-cancellation decoder with fast Hadamard transform (SSC-FHT) that uses 96 permutations from the full symmetry group of RM codes, while relatively maintaining the error-correction performance and memory consumption of the semi-parallel permuted SSC-FHT decoder.}, author = {Doan, Nghia and Hashemi, Seyyed Ali and Mondelli, Marco and Gross, Warren J.}, issn = {1558-0857}, journal = {IEEE Transactions on Communications}, number = {11}, pages = {7134--7145}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{Decoding Reed-Muller codes with successive codeword permutations}}, doi = {10.1109/tcomm.2022.3211101}, volume = {70}, year = {2022}, } @article{12273, abstract = {We study communication in the presence of a jamming adversary where quadratic power constraints are imposed on the transmitter and the jammer. The jamming signal is allowed to be a function of the codebook, and a noncausal but noisy observation of the transmitted codeword. For a certain range of the noise-to-signal ratios (NSRs) of the transmitter and the jammer, we are able to characterize the capacity of this channel under deterministic encoding or stochastic encoding, i.e., with no common randomness between the encoder/decoder pair. For the remaining NSR regimes, we determine the capacity under the assumption of a small amount of common randomness (at most 2log(n) bits in one sub-regime, and at most Ω(n) bits in the other sub-regime) available to the encoder-decoder pair. Our proof techniques involve a novel myopic list-decoding result for achievability, and a Plotkin-type push attack for the converse in a subregion of the NSRs, both of which may be of independent interest. We also give bounds on the strong secrecy capacity of this channel assuming that the jammer is simultaneously eavesdropping.}, author = {Zhang, Yihan and Vatedka, Shashank and Jaggi, Sidharth and Sarwate, Anand D.}, issn = {1557-9654}, journal = {IEEE Transactions on Information Theory}, number = {8}, pages = {4901--4948}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{Quadratically constrained myopic adversarial channels}}, doi = {10.1109/tit.2022.3167554}, volume = {68}, year = {2022}, } @article{10364, abstract = {This paper characterizes the latency of the simplified successive-cancellation (SSC) decoding scheme for polar codes under hardware resource constraints. In particular, when the number of processing elements P that can perform SSC decoding operations in parallel is limited, as is the case in practice, the latency of SSC decoding is O(N1-1/μ + N/P log2 log2 N/P), where N is the block length of the code and μ is the scaling exponent of the channel. Three direct consequences of this bound are presented. First, in a fully-parallel implementation where P = N/2, the latency of SSC decoding is O(N1-1/μ), which is sublinear in the block length. This recovers a result from our earlier work. Second, in a fully-serial implementation where P = 1, the latency of SSC decoding scales as O(N log2 log2 N). The multiplicative constant is also calculated: we show that the latency of SSC decoding when P = 1 is given by (2 + o(1))N log2 log2 N. Third, in a semi-parallel implementation, the smallest P that gives the same latency as that of the fully-parallel implementation is P = N1/μ. The tightness of our bound on SSC decoding latency and the applicability of the foregoing results is validated through extensive simulations.}, author = {Hashemi, Seyyed Ali and Mondelli, Marco and Fazeli, Arman and Vardy, Alexander and Cioffi, John and Goldsmith, Andrea}, issn = {1558-2248}, journal = {IEEE Transactions on Wireless Communications}, number = {6}, pages = {3909--3920}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{Parallelism versus latency in simplified successive-cancellation decoding of polar codes}}, doi = {10.1109/TWC.2021.3125626}, volume = {21}, year = {2022}, } @article{12538, abstract = {In this paper, we study the compression of a target two-layer neural network with N nodes into a compressed network with M