@inproceedings{14182,
  abstract     = {When machine learning systems meet real world applications, accuracy is only
one of several requirements. In this paper, we assay a complementary
perspective originating from the increasing availability of pre-trained and
regularly improving state-of-the-art models. While new improved models develop
at a fast pace, downstream tasks vary more slowly or stay constant. Assume that
we have a large unlabelled data set for which we want to maintain accurate
predictions. Whenever a new and presumably better ML models becomes available,
we encounter two problems: (i) given a limited budget, which data points should
be re-evaluated using the new model?; and (ii) if the new predictions differ
from the current ones, should we update? Problem (i) is about compute cost,
which matters for very large data sets and models. Problem (ii) is about
maintaining consistency of the predictions, which can be highly relevant for
downstream applications; our demand is to avoid negative flips, i.e., changing
correct to incorrect predictions. In this paper, we formalize the Prediction
Update Problem and present an efficient probabilistic approach as answer to the
above questions. In extensive experiments on standard classification benchmark
data sets, we show that our method outperforms alternative strategies along key
metrics for backward-compatible prediction updates.},
  author       = {Träuble, Frederik and Kügelgen, Julius von and Kleindessner, Matthäus and Locatello, Francesco and Schölkopf, Bernhard and Gehler, Peter},
  booktitle    = {35th Conference on Neural Information Processing Systems},
  isbn         = {9781713845393},
  location     = {Virtual},
  pages        = {116--128},
  title        = {{Backward-compatible prediction updates: A probabilistic approach}},
  volume       = {34},
  year         = {2021},
}

@misc{14185,
  abstract     = {A method involves receiving a perceptual representation including a plurality of feature vectors, and initializing a plurality of slot vectors represented by a neural network memory unit. Each respective slot vector is configured to represent a corresponding entity in the perceptual representation. The method also involves determining an attention matrix based on a product of the plurality of feature vectors transformed by a key function and the plurality of slot vectors transformed by a query function. Each respective value of a plurality of values along each respective dimension of the attention matrix is normalized with respect to the plurality of values. The method additionally involves determining an update matrix based on the plurality of feature vectors transformed by a value function and the attention matrix, and updating the plurality of slot vectors based on the update matrix by way of the neural network memory unit.},
  author       = {Weissenborn, Dirk and Uszkoreit, Jakob and Unterthiner, Thomas and Mahendran, Aravindh and Locatello, Francesco and Kipf, Thomas and Heigold, Georg and Dosovitskiy, Alexey},
  title        = {{Object-centric learning with slot attention}},
  year         = {2021},
}

@unpublished{14221,
  abstract     = {The world is structured in countless ways. It may be prudent to enforce corresponding structural properties to a learning algorithm's solution, such as incorporating prior beliefs, natural constraints, or causal structures. Doing so may translate to faster, more accurate, and more flexible models, which may directly relate to real-world impact. In this dissertation, we consider two different research areas that concern structuring a learning algorithm's solution: when the structure is known and when it has to be discovered.},
  author       = {Locatello, Francesco},
  booktitle    = {arXiv},
  title        = {{Enforcing and discovering structure in machine learning}},
  doi          = {10.48550/arXiv.2111.13693},
  year         = {2021},
}

@inproceedings{14332,
  abstract     = {Learning data representations that are useful for various downstream tasks is a cornerstone of artificial intelligence. While existing methods are typically evaluated on downstream tasks such as classification or generative image quality, we propose to assess representations through their usefulness in downstream control tasks, such as reaching or pushing objects. By training over 10,000 reinforcement learning policies, we extensively evaluate to what extent different representation properties affect out-of-distribution (OOD) generalization. Finally, we demonstrate zero-shot transfer of these policies from simulation to the real world, without any domain randomization or fine-tuning. This paper aims to establish the first systematic characterization of the usefulness of learned representations for real-world OOD downstream tasks.},
  author       = {Träuble, Frederik and Dittadi, Andrea and Wuthrich, Manuel and Widmaier, Felix and Gehler, Peter Vincent and Winther, Ole and Locatello, Francesco and Bachem, Olivier and Schölkopf, Bernhard and Bauer, Stefan},
  booktitle    = {ICML 2021 Workshop on Unsupervised Reinforcement Learning},
  location     = {Virtual},
  title        = {{Representation learning for out-of-distribution generalization in reinforcement learning}},
  year         = {2021},
}

@article{17876,
  abstract     = {The scanning tunneling microscope-based break-junction (STM-BJ) technique is the most common method used to study the electronic properties of single-molecule junctions. It relies on repeatedly forming and rupturing a Au contact in an environment of the target molecules. The probability of junction formation is typically very high (∼70–95%), prompting questions relating to how the nanoscale structure of the Au electrode before the metal point contact ruptures alters junction formation. Here we analyze conductance traces measured with the STM-BJ setup by combining correlation analysis and multiple machine learning tools, including gradient-boosted trees and neural networks. We show that two key features describing the Au–Au contact prior to rupture determine the extent of contact relaxation (snapback) and the probability of junction formation. Importantly, our data strongly indicate that molecular junctions are formed prior to the rupture of the Au–Au contact, explaining the high probability of junction formation observed in room-temperature solution measurements.},
  author       = {Fu, Tianren and Frommer, Kathleen and Nuckolls, Colin and Venkataraman, Latha},
  issn         = {1948-7185},
  journal      = {The Journal of Physical Chemistry Letters},
  number       = {44},
  pages        = {10802--10807},
  publisher    = {American Chemical Society},
  title        = {{Single-molecule junction formation in break-junction measurements}},
  doi          = {10.1021/acs.jpclett.1c03160},
  volume       = {12},
  year         = {2021},
}

@article{17877,
  abstract     = {Chemical reactions that occur at nanostructured electrodes have garnered widespread interest because of their potential applications in fields including nanotechnology, green chemistry and fundamental physical organic chemistry. Much of our present understanding of these reactions comes from probes that interrogate ensembles of molecules undergoing various stages of the transformation concurrently. Exquisite control over single-molecule reactivity lets us construct new molecules and further our understanding of nanoscale chemical phenomena. We can study single molecules using instruments such as the scanning tunnelling microscope, which can additionally be part of a mechanically controlled break junction. These are unique tools that can offer a high level of detail. They probe the electronic conductance of individual molecules and catalyse chemical reactions by establishing environments with reactive metal sites on nanoscale electrodes. This Review describes how chemical reactions involving bond cleavage and formation can be triggered at nanoscale electrodes and studied one molecule at a time.},
  author       = {Stone, Ilana and Starr, Rachel L. and Zang, Yaping and Nuckolls, Colin and Steigerwald, Michael L. and Lambert, Tristan H. and Roy, Xavier and Venkataraman, Latha},
  issn         = {2397-3358},
  journal      = {Nature Reviews Chemistry},
  number       = {10},
  pages        = {695--710},
  publisher    = {Springer Nature},
  title        = {{A single-molecule blueprint for synthesis}},
  doi          = {10.1038/s41570-021-00316-y},
  volume       = {5},
  year         = {2021},
}

@article{17899,
  abstract     = {Designing highly insulating sub-nanometer molecules is difficult because tunneling conductance increases exponentially with decreasing molecular length. This challenge is further enhanced by the fact that most molecules cannot achieve full conductance suppression with destructive quantum interference. Here, we present results for a series of small saturated heterocyclic alkanes where we show that conductance is suppressed due to destructive interference. Using the STM-BJ technique and density functional theory calculations, we confirm that their single-molecule junction conductance is lower than analogous alkanes of similar length. We rationalize the suppression of conductance in the junctions through analysis of the computed ballistic current density. We find there are highly symmetric ring currents, which reverse direction at the antiresonance in the Landauer transmission near the Fermi energy. This pattern has not been seen in earlier studies of larger bicyclic systems exhibiting interference effects and constitutes clear-cut evidence of destructive σ-interference. The finding of heterocyclic alkanes with destructive quantum interference charts a pathway for chemical design of short molecular insulators using organic molecules.},
  author       = {Zhang, Boyuan and Garner, Marc H. and Li, Liang and Campos, Luis M. and Solomon, Gemma C. and Venkataraman, Latha},
  issn         = {2041-6539},
  journal      = {Chemical Science},
  number       = {30},
  pages        = {10299--10305},
  publisher    = {Royal Society of Chemistry},
  title        = {{Destructive quantum interference in heterocyclic alkanes: The search for ultra-short molecular insulators}},
  doi          = {10.1039/d1sc02287c},
  volume       = {12},
  year         = {2021},
}

@article{17900,
  abstract     = {To rival the performance of modern integrated circuits, single-molecule devices must be designed to exhibit extremely nonlinear current–voltage (I–V) characteristics1,2,3,4. A common approach is to design molecular backbones where destructive quantum interference (QI) between the highest occupied molecular orbital (HOMO) and the lowest unoccupied molecular orbital (LUMO) produces a nonlinear energy-dependent tunnelling probability near the electrode Fermi energy (EF)5,6,7,8. However, tuning such systems is not straightforward, as aligning the frontier orbitals to EF is hard to control9. Here, we instead create a molecular system where constructive QI between the HOMO and LUMO is suppressed and destructive QI between the HOMO and strongly coupled occupied orbitals of opposite phase is enhanced. We use a series of fluorene oligomers containing a central benzothiadiazole10 unit to demonstrate that this strategy can be used to create highly nonlinear single-molecule circuits. Notably, we are able to reproducibly modulate the conductance of a 6-nm molecule by a factor of more than 10^4.},
  author       = {Greenwald, Julia E. and Cameron, Joseph and Findlay, Neil J. and Fu, Tianren and Gunasekaran, Suman and Skabara, Peter J. and Venkataraman, Latha},
  issn         = {1748-3395},
  journal      = {Nature Nanotechnology},
  number       = {3},
  pages        = {313--317},
  publisher    = {Springer Nature},
  title        = {{Highly nonlinear transport across single-molecule junctions via destructive quantum interference}},
  doi          = {10.1038/s41565-020-00807-x},
  volume       = {16},
  year         = {2021},
}

@article{17901,
  abstract     = {A 1,1,2,2-tetrakis(4-aminophenyl)ethene with three paths of π-conjugation, linear-cis, linear-trans and a cross-conjugation, has been prepared. The molecule is able to bind to gold electrodes forming molecular junctions for single-molecule conductance measurements. Only two regimes of conduction are found experimentally. The modelling of the conductance allows to assign them to through-bond transmission in the linear case, while the cross-conjugated channel is further assisted by through-space transmission, partially alleviating the destructive quantum interference.},
  author       = {Medina Rivero, Samara and García Arroyo, Paloma and Li, Liang and Gunasekaran, Suman and Stuyver, Thijs and Mancheño, María José and Alonso, Mercedes and Venkataraman, Latha and Segura, José L. and Casado, Juan},
  issn         = {1364-548X},
  journal      = {Chemical Communications},
  number       = {5},
  pages        = {591--594},
  publisher    = {Royal Society of Chemistry},
  title        = {{Single-molecule conductance in a unique cross-conjugated tetra(aminoaryl)ethene}},
  doi          = {10.1039/d0cc07124b},
  volume       = {57},
  year         = {2021},
}

@article{18192,
  abstract     = {Current quantum simulation experiments are starting to explore nonequilibrium many-body dynamics in previously inaccessible regimes in terms of system sizes and timescales. Therefore, the question emerges as to which observables are best suited to study the dynamics in such quantum many-body systems. Using machine learning techniques, we investigate the dynamics and, in particular, the thermalization behavior of an interacting quantum system that undergoes a nonequilibrium phase transition from an ergodic to a many-body localized phase. We employ supervised and unsupervised training methods to distinguish nonequilibrium from equilibrium data, using the network performance as a probe for the thermalization behavior of the system. We test our methods with experimental snapshots of ultracold atoms taken with a quantum gas microscope. Our results provide a path to analyze highly entangled large-scale quantum states for system sizes where numerical calculations of conventional observables become challenging.},
  author       = {Bohrdt, A. and Kim, S. and Lukin, A. and Rispoli, M. and Schittko, R. and Knap, M. and Greiner, M. and Leonard, Julian},
  issn         = {0031-9007},
  journal      = {Physical Review Letters},
  number       = {15},
  publisher    = {American Physical Society},
  title        = {{Analyzing nonequilibrium quantum states through snapshots with artificial neural networks}},
  doi          = {10.1103/physrevlett.127.150504},
  volume       = {127},
  year         = {2021},
}

@article{18193,
  abstract     = {Topological states of matter, such as fractional quantum Hall states, are an active field of research due to their exotic excitations. In particular, ultracold atoms in optical lattices provide a highly controllable and adaptable platform to study such new types of quantum matter. However, finding a clear route to realize non-Abelian quantum Hall states in these systems remains challenging. Here we use the density-matrix renormalization-group (DMRG) method to study the Hofstadter-Bose-Hubbard model at filling factor 𝜈=1 and find strong indications that at 𝛼=1/6 magnetic flux quanta per plaquette the ground state is a lattice analog of the continuum non-Abelian Pfaffian. We study the on-site correlations of the ground state, which indicate its paired nature at 𝜈=1, and find an incompressible state characterized by a charge gap in the bulk. We argue that the emergence of a charge density wave on thin cylinders and the behavior of the two- and three-particle correlation functions at short distances provide evidence for the state being closely related to the continuum Pfaffian. The signatures discussed in this letter are accessible in current cold atom experiments and we show that the Pfaffian-like state is readily realizable in few-body systems using adiabatic preparation schemes.},
  author       = {Palm, F. A. and Buser, M. and Leonard, Julian and Aidelsburger, M. and Schollwöck, U. and Grusdt, F.},
  issn         = {2469-9969},
  journal      = {Physical Review B},
  number       = {16},
  publisher    = {American Physical Society},
  title        = {{Bosonic Pfaffian state in the Hofstadter-Bose-Hubbard model}},
  doi          = {10.1103/physrevb.103.l161101},
  volume       = {103},
  year         = {2021},
}

@article{18233,
  abstract     = {Neural network quantization enables the deployment of large models on resource-constrained devices. Current post-training quantization methods fall short in terms of accuracy for INT4 (or lower) but provide reasonable accuracy for INT8 (or above). In this work, we study the effect of quantization on the structure of the loss landscape. We show that the structure is flat and separable for mild quantization, enabling straightforward post-training quantization methods to achieve good results. We show that with more aggressive quantization, the loss landscape becomes highly non-separable with steep curvature, making the selection of quantization parameters more challenging. Armed with this understanding, we design a method that quantizes the layer parameters jointly, enabling significant accuracy improvement over current post-training quantization methods. Reference implementation is available at https://github.com/ynahshan/nn-quantization-pytorch/tree/master/lapq.},
  author       = {Nahshan, Yury and Chmiel, Brian and Baskin, Chaim and Zheltonozhskii, Evgenii and Banner, Ron and Bronstein, Alexander and Mendelson, Avi},
  issn         = {1573-0565},
  journal      = {Machine Learning},
  number       = {11-12},
  pages        = {3245--3262},
  publisher    = {Springer Nature},
  title        = {{Loss aware post-training quantization}},
  doi          = {10.1007/s10994-021-06053-z},
  volume       = {110},
  year         = {2021},
}

@article{18234,
  abstract     = {Convolutional Neural Networks (CNNs) are very popular in many fields including computer vision, speech recognition, natural language processing, etc. Though deep learning leads to groundbreaking performance in those domains, the networks used are very computationally demanding and are far from being able to perform in real-time applications even on a GPU, which is not power efficient and therefore does not suit low power systems such as mobile devices. To overcome this challenge, some solutions have been proposed for quantizing the weights and activations of these networks, which accelerate the runtime significantly. Yet, this acceleration comes at the cost of a larger error unless spatial adjustments are carried out. The method proposed in this work trains quantized neural networks by noise injection and a learned clamping, which improve accuracy. This leads to state-of-the-art results on various regression and classification tasks, e.g., ImageNet classification with architectures such as ResNet-18/34/50 with as low as 3 bit weights and activations. We implement the proposed solution on an FPGA to demonstrate its applicability for low-power real-time applications. The quantization code will become publicly available upon acceptance.},
  author       = {Baskin, Chaim and Zheltonozhkii, Evgenii and Rozen, Tal and Liss, Natan and Chai, Yoav and Schwartz, Eli and Giryes, Raja and Bronstein, Alexander and Mendelson, Avi},
  issn         = {2227-7390},
  journal      = {Mathematics},
  number       = {17},
  publisher    = {MDPI},
  title        = {{NICE: Noise Injection and Clamping Estimation for neural network quantization}},
  doi          = {10.3390/math9172144},
  volume       = {9},
  year         = {2021},
}

@article{18235,
  abstract     = {Recently, great progress has been made in the field of Few-Shot Learning (FSL). While many different methods have been proposed, one of the key factors leading to higher FSL performance is surprisingly simple. It is the backbone network architecture used to embed the images of the few-shot tasks. While first works on FSL resorted to small architectures with just a few convolution layers, recent works show that large architectures pre-trained on the training portion of FSL datasets produce strong features that are more easily transferable to novel few-shot tasks, thus attaining significant gains to methods using them. Despite these observations, little to no work has been done towards finding the right backbone for FSL. In this paper we propose MetAdapt that not only meta-searches for an optimized architecture for FSL using Network Architecture Search (NAS), but also results in a model that can adaptively ‘re-wire’ itself predicting the better architecture for a given novel few-shot task. Using the proposed approach we observe strong results on two popular few-shot benchmarks: miniImageNet and FC100.},
  author       = {Doveh, Sivan and Schwartz, Eli and Xue, Chao and Feris, Rogerio and Bronstein, Alexander and Giryes, Raja and Karlinsky, Leonid},
  issn         = {0167-8655},
  journal      = {Pattern Recognition Letters},
  pages        = {130--136},
  publisher    = {Elsevier},
  title        = {{MetAdapt: Meta-learned task-adaptive architecture for few-shot classification}},
  doi          = {10.1016/j.patrec.2021.05.010},
  volume       = {149},
  year         = {2021},
}

@article{18236,
  abstract     = {Despite their great promise, artificial intelligence (AI) systems have yet to become ubiquitous in the daily practice of medicine largely due to several crucial unmet needs of healthcare practitioners. These include lack of explanations in clinically meaningful terms, handling the presence of unknown medical conditions, and transparency regarding the system’s limitations, both in terms of statistical performance as well as recognizing situations for which the system’s predictions are irrelevant. We articulate these unmet clinical needs as machine-learning (ML) problems and systematically address them with cutting-edge ML techniques. We focus on electrocardiogram (ECG) analysis as an example domain in which AI has great potential and tackle two challenging tasks: the detection of a heterogeneous mix of known and unknown arrhythmias from ECG and the identification of underlying cardio-pathology from segments annotated as normal sinus rhythm recorded in patients with an intermittent arrhythmia. We validate our methods by simulating a screening for arrhythmias in a large-scale population while adhering to statistical significance requirements. Specifically, our system 1) visualizes the relative importance of each part of an ECG segment for the final model decision; 2) upholds specified statistical constraints on its out-of-sample performance and provides uncertainty estimation for its predictions; 3) handles inputs containing unknown rhythm types; and 4) handles data from unseen patients while also flagging cases in which the model’s outputs are not usable for a specific patient. This work represents a significant step toward overcoming the limitations currently impeding the integration of AI into clinical practice in cardiology and medicine in general.},
  author       = {Elul, Yonatan and Rosenberg, Aviv A. and Schuster, Assaf and Bronstein, Alexander and Yaniv, Yael},
  issn         = {1091-6490},
  journal      = {Proceedings of the National Academy of Sciences},
  number       = {24},
  publisher    = {National Academy of Sciences},
  title        = {{Meeting the unmet needs of clinicians from AI systems showcased for cardiology with deep-learning–based ECG analysis}},
  doi          = {10.1073/pnas.2020620118},
  volume       = {118},
  year         = {2021},
}

@article{18237,
  abstract     = {We present a novel method for neural network quantization. Our method, named UNIQ, emulates a non-uniform k-quantile quantizer and adapts the model to perform well with quantized weights by injecting noise to the weights at training time. As a by-product of injecting noise to weights, we find that activations can also be quantized to as low as 8-bit with only a minor accuracy degradation. Our non-uniform quantization approach provides a novel alternative to the existing uniform quantization techniques for neural networks. We further propose a novel complexity metric of number of bit operations performed (BOPs), and we show that this metric has a linear relation with logic utilization and power. We suggest evaluating the trade-off of accuracy vs. complexity (BOPs). The proposed method, when evaluated on ResNet18/34/50 and MobileNet on ImageNet, outperforms the prior state of the art both in the low-complexity regime and the high accuracy regime. We demonstrate the practical applicability of this approach, by implementing our non-uniformly quantized CNN on FPGA.},
  author       = {Baskin, Chaim and Liss, Natan and Schwartz, Eli and Zheltonozhskii, Evgenii and Giryes, Raja and Bronstein, Alexander and Mendelson, Avi},
  issn         = {1557-7333},
  journal      = {ACM Transactions on Computer Systems},
  number       = {1-4},
  pages        = {1--15},
  publisher    = {Association for Computing Machinery},
  title        = {{UNIQ: Uniform Noise Injection for Non-Uniform Quantization of neural networks}},
  doi          = {10.1145/3444943},
  volume       = {37},
  year         = {2021},
}

@article{18238,
  abstract     = {The demand for running NNs in embedded environments has increased significantly in recent years due to the significant success of convolutional neural network (CNN) approaches in various tasks, including image recognition and generation. The task of achieving high accuracy on resource-restricted devices, however, is still considered to be challenging, which is mainly due to the vast number of design parameters that need to be balanced. While the quantization of CNN parameters leads to a reduction of power and area, it can also generate unexpected changes in the balance between communication and computation. This change is hard to evaluate, and the lack of balance may lead to lower utilization of either memory bandwidth or computational resources, thereby reducing performance. This paper introduces a hardware performance analysis framework for identifying bottlenecks in the early stages of CNN hardware design. We demonstrate how the proposed method can help in evaluating different architecture alternatives of resource-restricted CNN accelerators (e.g., part of real-time embedded systems) early in design stages and, thus, prevent making design mistakes.},
  author       = {Karbachevsky, Alex and Baskin, Chaim and Zheltonozhskii, Evgenii and Yermolin, Yevgeny and Gabbay, Freddy and Bronstein, Alexander and Mendelson, Avi},
  issn         = {2071-1050},
  journal      = {Sustainability},
  number       = {2},
  publisher    = {MDPI},
  title        = {{Early-stage neural network hardware performance analysis}},
  doi          = {10.3390/su13020717},
  volume       = {13},
  year         = {2021},
}

@inproceedings{18239,
  abstract     = {Nowadays, there is an abundance of data involving images and surrounding free-form text weakly corresponding to those images. Weakly Supervised phrase-Grounding (WSG) deals with the task of using this data to learn to localize (or to ground) arbitrary text phrases in images without any additional annotations. However, most recent SotA methods for WSG assume an existence of a pre-trained object detector, relying on it to produce the ROIs for localization. In this work, we focus on the task of Detector-Free WSG (DF-WSG) to solve WSG without relying on a pre-trained detector. The key idea behind our proposed Grounding by Separation (GbS) method is synthesizing ‘text to image-regions’ associations by random alpha-blending of arbitrary image pairs and using the corresponding texts of the pair as conditions to recover the alpha map from the blended image via a segmentation network. At test time, this allows using the query phrase as a condition for a non-blended query image, thus interpreting the test image as a composition of a region corresponding to the phrase and the complement region. Our GbS shows an 8.5% accuracy improvement over previous DF-WSG SotA, for a range of benchmarks including Flickr30K, Visual Genome, and ReferIt, as well as a complementary improvement (above 7%) over the detector-based approaches for WSG.},
  author       = {Arbelle, Assaf and Doveh, Sivan and Alfassy, Amit and Shtok, Joseph and Lev, Guy and Schwartz, Eli and Kuehne, Hilde and Levi, Hila Barak and Sattigeri, Prasanna and Panda, Rameswar and Chen, Chun-Fu and Bronstein, Alexander and Saenko, Kate and Ullman, Shimon and Giryes, Raja and Feris, Rogerio and Karlinsky, Leonid},
  booktitle    = {IEEE/CVF International Conference on Computer Vision},
  location     = {Montreal, Canada},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Detector-free weakly supervised grounding by separation}},
  doi          = {10.1109/iccv48922.2021.00182},
  volume       = {15},
  year         = {2021},
}

@inproceedings{18240,
  abstract     = {Mechanical image stabilization using actuated gimbals enables capturing long-exposure shots without suffering from blur due to camera motion. These devices, however, are often physically cumbersome and expensive, limiting their widespread use. In this work, we propose to digitally emulate a mechanically stabilized system from the input of a fast unstabilized camera. To exploit the trade-off between motion blur at long exposures and low SNR at short exposures, we train a CNN that estimates a sharp high-SNR image by aggregating a burst of noisy short-exposure frames, related by unknown motion. We further suggest learning the burst’s exposure times in an end-to-end manner, thus balancing the noise and blur across the frames. We demonstrate this method’s advantage over the traditional approach of deblurring a single image or denoising a fixed-exposure burst on both synthetic and real data.},
  author       = {Dahary, Omer and Jacoby, Matan and Bronstein, Alexander},
  booktitle    = {IEEE/CVF Conference on Computer Vision and Pattern Recognition},
  location     = {Nashville, TN, United States},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Digital gimbal: End-to-end deep image stabilization with learnable exposure times}},
  doi          = {10.1109/cvpr46437.2021.01176},
  volume       = {38},
  year         = {2021},
}

@inproceedings{18241,
  abstract     = {Multiple-input multiple-output (MIMO) radar is one of the leading depth sensing modalities. However, the usage of multiple receive channels lead to relative high costs and prevent the penetration of MIMOs in many areas such as the automotive industry. Over the last years, few studies concentrated on designing reduced measurement schemes and image reconstruction schemes for MIMO radars, however these problems have been so far addressed separately. On the other hand, recent works in optical computational imaging have demonstrated growing success of simultaneous learning-based design of the acquisition and reconstruction schemes, manifesting significant improvement in the reconstruction quality. Inspired by these successes, in this work, we propose to learn MIMO acquisition parameters in the form of receive (Rx) antenna elements locations jointly with an image neural-network based reconstruction. To this end, we propose an algorithm for training the combined acquisition-reconstruction pipeline end-to-end in a differentiable way. We demonstrate the significance of using our learned acquisition parameters with and without the neural-network reconstruction. Code and datasets will be released upon publication.},
  author       = {Weiss, Tomer and Peretz, Nissim and Vedula, Sanketh and Feuer, Arie and Bronstein, Alexander},
  booktitle    = {31st International Workshop on Machine Learning for Signal Processing},
  location     = {Gold Coast, Australia},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Joint optimization of system design and reconstruction in MIMO radar imaging}},
  doi          = {10.1109/mlsp52302.2021.9596168},
  volume       = {4},
  year         = {2021},
}

