@inproceedings{20034,
  abstract     = {We introduce LDAdam, a memory-efficient optimizer for training large models, that performs adaptive optimization steps within lower dimensional subspaces, while consistently exploring the full parameter space during training. This strategy keeps the optimizer's memory footprint to a fraction of the model size. LDAdam relies on a new projection-aware update rule for the optimizer states that allows for transitioning between subspaces, i.e., estimation of the statistics of the projected gradients. To mitigate the errors due to low-rank projection, LDAdam integrates a new generalized error feedback mechanism, which explicitly accounts for both gradient and optimizer state compression. We prove the convergence of LDAdam under standard assumptions, and provide empirical evidence that LDAdam allows for efficient fine-tuning and pre-training of language models.},
  author       = {Robert, Thomas and Safaryan, Mher and Modoranu, Ionut-Vlad and Alistarh, Dan-Adrian},
  booktitle    = {13th International Conference on Learning Representations},
  isbn         = {9798331320850},
  location     = {Singapore, Singapore},
  pages        = {101877--101913},
  publisher    = {ICLR},
  title        = {{LDAdam: Adaptive optimization from low-dimensional gradient statistics}},
  year         = {2025},
}

@inproceedings{18976,
  abstract     = {We analyze asynchronous-type algorithms for distributed SGD in the heterogeneous setting, where each worker has its own computation and communication speeds, as well as data distribution. In these algorithms, workers compute possibly stale and stochastic gradients associated with their local data at some iteration back in history and then return those gradients to the server without synchronizing with other workers. We present a unified convergence theory for non-convex smooth functions in the heterogeneous regime. The proposed analysis provides convergence for pure asynchronous SGD and its various modifications. Moreover, our theory explains what affects the convergence rate and what can be done to improve the performance of asynchronous algorithms. In particular, we introduce a novel asynchronous method based on worker shuffling. As a by-product of our analysis, we also demonstrate convergence guarantees for gradient-type algorithms such as SGD with random reshuffling and shuffle-once mini-batch SGD. The derived rates match the best-known results for those algorithms, highlighting the tightness of our approach. Finally, our numerical evaluations support theoretical findings and show the good practical performance of our method.},
  author       = {Islamov, Rustem and Safaryan, Mher and Alistarh, Dan-Adrian},
  booktitle    = {Proceedings of The 27th International Conference on Artificial Intelligence and Statistics},
  issn         = {2640-3498},
  location     = {Valencia, Spain},
  pages        = {649--657},
  publisher    = {ML Research Press},
  title        = {{AsGrad: A sharp unified analysis of asynchronous-SGD algorithms}},
  volume       = {238},
  year         = {2024},
}

@inproceedings{19510,
  abstract     = {We propose a new variant of the Adam optimizer [Kingma and Ba, 2014] called
MICROADAM that specifically minimizes memory overheads, while maintaining
theoretical convergence guarantees. We achieve this by compressing the gradient
information before it is fed into the optimizer state, thereby reducing its memory
footprint significantly. We control the resulting compression error via a novel
instance of the classical error feedback mechanism from distributed optimization [Seide et al., 2014, Alistarh et al., 2018, Karimireddy et al., 2019] in which
the error correction information is itself compressed to allow for practical memory
gains. We prove that the resulting approach maintains theoretical convergence
guarantees competitive to those of AMSGrad, while providing good practical performance. Specifically, we show that MICROADAM can be implemented efficiently
on GPUs: on both million-scale (BERT) and billion-scale (LLaMA) models, MICROADAM provides practical convergence competitive to that of the uncompressed
Adam baseline, with lower memory usage and similar running time. Our code is
available at https://github.com/IST-DASLab/MicroAdam.},
  author       = {Modoranu, Ionut-Vlad and Safaryan, Mher and Malinovsky, Grigory and Kurtic, Eldar and Robert, Thomas and Richtárik, Peter and Alistarh, Dan-Adrian},
  booktitle    = {38th Conference on Neural Information Processing Systems},
  issn         = {1049-5258},
  publisher    = {Neural Information Processing Systems Foundation},
  title        = {{MICROADAM: Accurate adaptive optimization with low space overhead and provable convergence}},
  volume       = {37},
  year         = {2024},
}

@inproceedings{19518,
  abstract     = {The rising footprint of machine learning has led to a focus on imposing model
sparsity as a means of reducing computational and memory costs. For deep neural
networks (DNNs), the state-of-the-art accuracy-vs-sparsity is achieved by heuristics
inspired by the classical Optimal Brain Surgeon (OBS) framework [LeCun et al.,
1989, Hassibi and Stork, 1992, Hassibi et al., 1993], which leverages loss curvature
information to make better pruning decisions. Yet, these results still lack a solid
theoretical understanding, and it is unclear whether they can be improved by
leveraging connections to the wealth of work on sparse recovery algorithms. In this
paper, we draw new connections between these two areas and present new sparse
recovery algorithms inspired by the OBS framework that comes with theoretical
guarantees under reasonable assumptions and have strong practical performance.
Specifically, our work starts from the observation that we can leverage curvature
information in OBS-like fashion upon the projection step of classic iterative sparse
recovery algorithms such as IHT. We show for the first time that this leads both
to improved convergence bounds under standard assumptions. Furthermore, we
present extensions of this approach to the practical task of obtaining accurate sparse
DNNs, and validate it experimentally at scale for Transformer-based models on
vision and language tasks.},
  author       = {Wu, Diyuan and Modoranu, Ionut-Vlad and Safaryan, Mher and Kuznedelev, Denis and Alistarh, Dan-Adrian},
  booktitle    = {38th Conference on Neural Information Processing Systems},
  issn         = {1049-5258},
  location     = {Vancouver, Canada},
  publisher    = {Neural Information Processing Systems Foundation},
  title        = {{The iterative optimal brain surgeon: Faster sparse recovery by leveraging second-order information}},
  volume       = {37},
  year         = {2024},
}

@article{14815,
  abstract     = {In the last few years, various communication compression techniques have emerged as an indispensable tool helping to alleviate the communication bottleneck in distributed learning. However, despite the fact biased compressors often show superior performance in practice when compared to the much more studied and understood unbiased compressors, very little is known about them. In this work we study three classes of biased compression operators, two of which are new, and their performance when applied to (stochastic) gradient descent and distributed (stochastic) gradient descent. We show for the first time that biased compressors can lead to linear convergence rates both in the single node and distributed settings. We prove that distributed compressed SGD method, employed with error feedback mechanism, enjoys the ergodic rate O(δLexp[−μKδL]+(C+δD)Kμ), where δ≥1 is a compression parameter which grows when more compression is applied, L and μ are the smoothness and strong convexity constants, C captures stochastic gradient noise (C=0 if full gradients are computed on each node) and D captures the variance of the gradients at the optimum (D=0 for over-parameterized models). Further, via a theoretical study of several synthetic and empirical distributions of communicated gradients, we shed light on why and by how much biased compressors outperform their unbiased variants. Finally, we propose several new biased compressors with promising theoretical guarantees and practical performance.},
  author       = {Beznosikov, Aleksandr and Horvath, Samuel and Richtarik, Peter and Safaryan, Mher},
  issn         = {1533-7928},
  journal      = {Journal of Machine Learning Research},
  pages        = {1--50},
  publisher    = {Journal of Machine Learning Research},
  title        = {{On biased compression for distributed learning}},
  volume       = {24},
  year         = {2023},
}

@inproceedings{15363,
  abstract     = {Knowledge distillation is a popular approach for enhancing the performance of "student" models, with lower representational capacity, by taking advantage of more powerful "teacher" models. Despite its apparent simplicity, the underlying mechanics behind knowledge distillation (KD) are not yet fully understood. In this work, we shed new light on the inner workings of this method, by examining it from an optimization perspective. Specifically, we show that, in the context of linear and deep linear models, KD can be interpreted as a novel type of stochastic variance reduction mechanism. We provide a detailed convergence analysis of the resulting dynamics, which hold under standard assumptions for both strongly-convex and non-convex losses, showing that KD acts as a form of \emph{partial variance reduction}, which can reduce the stochastic gradient noise, but may not eliminate it completely, depending on the properties of the teacher'' model. Our analysis puts further emphasis on the need for careful parametrization of KD, in particular w.r.t. the weighting of the distillation loss, and is validated empirically on both linear models and deep neural networks.},
  author       = {Safaryan, Mher and Peste, Elena-Alexandra and Alistarh, Dan-Adrian},
  booktitle    = {36th Conference on Neural Information Processing Systems},
  issn         = {1049-5258},
  location     = {New Orleans, LA, United States},
  title        = {{Knowledge distillation performs partial variance reduction}},
  volume       = {36},
  year         = {2023},
}

