@article{9828,
  abstract     = {Amplitude demodulation is a classical operation used in signal processing. For a long time, its effective applications in practice have been limited to narrowband signals. In this work, we generalize amplitude demodulation to wideband signals. We pose demodulation as a recovery problem of an oversampled corrupted signal and introduce special iterative schemes belonging to the family of alternating projection algorithms to solve it. Sensibly chosen structural assumptions on the demodulation outputs allow us to reveal the high inferential accuracy of the method over a rich set of relevant signals. This new approach surpasses current state-of-the-art demodulation techniques apt to wideband signals in computational efficiency by up to many orders of magnitude with no sacrifice in quality. Such performance opens the door for applications of the amplitude demodulation procedure in new contexts. In particular, the new method makes online and large-scale offline data processing feasible, including the calculation of modulator-carrier pairs in higher dimensions and poor sampling conditions, independent of the signal bandwidth. We illustrate the utility and specifics of applications of the new method in practice by using natural speech and synthetic signals.},
  author       = {Gabrielaitis, Mantas},
  issn         = {1941-0476},
  journal      = {IEEE Transactions on Signal Processing},
  pages        = {4039 -- 4054},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Fast and accurate amplitude demodulation of wideband signals}},
  doi          = {10.1109/TSP.2021.3087899},
  volume       = {69},
  year         = {2021},
}

@article{8268,
  abstract     = {Modern scientific instruments produce vast amounts of data, which can overwhelm the processing ability of computer systems. Lossy compression of data is an intriguing solution, but comes with its own drawbacks, such as potential signal loss, and the need for careful optimization of the compression ratio. In this work, we focus on a setting where this problem is especially acute: compressive sensing frameworks for interferometry and medical imaging. We ask the following question: can the precision of the data representation be lowered for all inputs, with recovery guarantees and practical performance Our first contribution is a theoretical analysis of the normalized Iterative Hard Thresholding (IHT) algorithm when all input data, meaning both the measurement matrix and the observation vector are quantized aggressively. We present a variant of low precision normalized IHT that, under mild conditions, can still provide recovery guarantees. The second contribution is the application of our quantization framework to radio astronomy and magnetic resonance imaging. We show that lowering the precision of the data can significantly accelerate image recovery. We evaluate our approach on telescope data and samples of brain images using CPU and FPGA implementations achieving up to a 9x speedup with negligible loss of recovery quality.},
  author       = {Gurel, Nezihe Merve and Kara, Kaan and Stojanov, Alen and Smith, Tyler and Lemmin, Thomas and Alistarh, Dan-Adrian and Puschel, Markus and Zhang, Ce},
  issn         = {1941-0476},
  journal      = {IEEE Transactions on Signal Processing},
  pages        = {4268--4282},
  publisher    = {IEEE},
  title        = {{Compressive sensing using iterative hard thresholding with low precision data representation: Theory and applications}},
  doi          = {10.1109/TSP.2020.3010355},
  volume       = {68},
  year         = {2020},
}

@article{6750,
  abstract     = {Polar codes have gained extensive attention during the past few years and recently they have been selected for the next generation of wireless communications standards (5G). Successive-cancellation-based (SC-based) decoders, such as SC list (SCL) and SC flip (SCF), provide a reasonable error performance for polar codes at the cost of low decoding speed. Fast SC-based decoders, such as Fast-SSC, Fast-SSCL, and Fast-SSCF, identify the special constituent codes in a polar code graph off-line, produce a list of operations, store the list in memory, and feed the list to the decoder to decode the constituent codes in order efficiently, thus increasing the decoding speed. However, the list of operations is dependent on the code rate and as the rate changes, a new list is produced, making fast SC-based decoders not rate-flexible. In this paper, we propose a completely rate-flexible fast SC-based decoder by creating the list of operations directly in hardware, with low implementation complexity. We further propose a hardware architecture implementing the proposed method and show that the area occupation of the rate-flexible fast SC-based decoder in this paper is only 38% of the total area of the memory-based base-line decoder when 5G code rates are supported. },
  author       = {Hashemi, Seyyed Ali and Condo, Carlo and Mondelli, Marco and Gross, Warren J},
  issn         = {1053-587X},
  journal      = {IEEE Transactions on Signal Processing},
  number       = {22},
  publisher    = {IEEE},
  title        = {{Rate-flexible fast polar decoders}},
  doi          = {10.1109/TSP.2019.2944738},
  volume       = {67},
  year         = {2019},
}

@article{18278,
  abstract     = {Solving inverse problems with iterative algorithms is popular, especially for large data. Due to time constraints, the number of possible iterations is usually limited, potentially affecting the achievable accuracy. Given an error one is willing to tolerate, an important question is whether it is possible to modify the original iterations to obtain faster convergence to a minimizer achieving the allowed error without increasing the computational cost of each iteration considerably. Relying on recent recovery techniques developed for settings in which the desired signal belongs to some low-dimensional set, we show that using a coarse estimate of this set may lead to faster convergence at the cost of an additional reconstruction error related to the accuracy of the set approximation. Our theory ties to recent advances in sparse recovery, compressed sensing, and deep learning. Particularly, it may provide a possible explanation to the successful approximation of the ℓ 1 -minimization solution by neural networks with layers representing iterations, as practiced in the learned iterative shrinkage-thresholding algorithm.},
  author       = {Giryes, Raja and Eldar, Yonina C. and Bronstein, Alexander and Sapiro, Guillermo},
  issn         = {1941-0476},
  journal      = {IEEE Transactions on Signal Processing},
  number       = {7},
  pages        = {1676--1690},
  publisher    = {IEEE},
  title        = {{Tradeoffs between convergence speed and reconstruction accuracy in inverse problems}},
  doi          = {10.1109/tsp.2018.2791945},
  volume       = {66},
  year         = {2018},
}

@article{18419,
  abstract     = {Three important properties of a classification machinery are i) the system preserves the core information of the input data; ii) the training examples convey information about unseen data; and iii) the system is able to treat differently points from different classes. In this paper, we show that these fundamental properties are satisfied by the architecture of deep neural networks. We formally prove that these networks with random Gaussian weights perform a distance-preserving embedding of the data, with a special treatment for in-class and out-of-class data. Similar points at the input of the network are likely to have a similar output. The theoretical analysis of deep networks here presented exploits tools used in the compressed sensing and dictionary learning literature, thereby making a formal connection between these important topics. The derived results allow drawing conclusions on the metric learning properties of the network and their relation to its structure, as well as providing bounds on the required size of the training set such that the training examples would represent faithfully the unseen data. The results are validated with state-of-the-art trained networks.},
  author       = {Giryes, Raja and Sapiro, Guillermo and Bronstein, Alexander},
  issn         = {1941-0476},
  journal      = {IEEE Transactions on Signal Processing},
  number       = {13},
  pages        = {3444--3457},
  publisher    = {IEEE},
  title        = {{Deep neural networks with random Gaussian weights: A universal classification strategy?}},
  doi          = {10.1109/tsp.2016.2546221},
  volume       = {64},
  year         = {2016},
}

@article{18417,
  abstract     = {We propose a relative optimization framework for quasi-maximum likelihood (QML) blind deconvolution and the relative Newton method as its particular instance. Special Hessian structure allows fast Newton system construction and solution, resulting in a fast-convergent algorithm with iteration complexity comparable to that of gradient methods. We also propose the use of rational infinite impulse response (IIR) restoration kernels, which constitute a richer family of filters than the traditionally used finite impulse response (FIR) kernels. We discuss different choices of nonlinear functions that are suitable for deconvolution of super- and sub-Gaussian sources and formulate the conditions under which the QML estimation is stable. Simulation results demonstrate the efficiency of the proposed methods.},
  author       = {Bronstein, Alexander and Bronstein, M.M. and Zibulevsky, M.},
  issn         = {1053-587X},
  journal      = {IEEE Transactions on Signal Processing},
  number       = {6},
  pages        = {2018--2026},
  publisher    = {Institute of Electrical and Electronics Engineers (IEEE)},
  title        = {{Relative optimization for blind deconvolution}},
  doi          = {10.1109/tsp.2005.847822},
  volume       = {53},
  year         = {2005},
}

@article{18418,
  abstract     = {In this correspondence, we consider the problem of multi-input multi output (MIMO) quasi maximum likelihood (QML) blind deconvolution. We examine two classes of estimators, which are commonly believed to be suitable for super and sub-Gaussian sources. We state the consistency conditions and demonstrate a source distribution, for which the studied estimators are unsuitable, in the sense that they are inconsistent.},
  author       = {Bronstein, Alexander and Bronstein, M.M. and Zibulevsky, M.},
  issn         = {1941-0476},
  journal      = {IEEE Transactions on Signal Processing},
  number       = {7},
  pages        = {2576--2579},
  publisher    = {Institute of Electrical and Electronics Engineers (IEEE)},
  title        = {{Quasi maximum likelihood MIMO blind deconvolution: Super- and sub-Gaussianity versus consistency}},
  doi          = {10.1109/tsp.2005.849221},
  volume       = {53},
  year         = {2005},
}

