@article{12662,
  abstract     = {Modern machine learning tasks often require considering not just one but multiple objectives. For example, besides the prediction quality, this could be the efficiency, robustness or fairness of the learned models, or any of their combinations. Multi-objective learning offers a natural framework for handling such problems without having to commit to early trade-offs. Surprisingly, statistical learning theory so far offers almost no insight into the generalization properties of multi-objective learning. In this work, we make first steps to fill this gap: We establish foundational generalization bounds for the multi-objective setting as well as generalization and excess bounds for learning with scalarizations. We also provide the first theoretical analysis of the relation between the Pareto-optimal sets of the true objectives and the Pareto-optimal sets of their empirical approximations from training data. In particular, we show a surprising asymmetry: All Pareto-optimal solutions can be approximated by empirically Pareto-optimal ones, but not vice versa.},
  author       = {Súkeník, Peter and Lampert, Christoph},
  issn         = {1433-3058},
  journal      = {Neural Computing and Applications},
  pages        = {24669–24683},
  publisher    = {Springer Nature},
  title        = {{Generalization in multi-objective machine learning}},
  doi          = {10.1007/s00521-024-10616-1},
  volume       = {37},
  year         = {2025},
}

@inproceedings{20296,
  abstract     = {Learning-based systems are increasingly deployed across various domains, yet the complexity of traditional neural networks poses significant challenges for formal verification. Unlike conventional neural networks, learned Logic Gate Networks (LGNs) replace multiplications with Boolean logic gates, yielding a sparse, netlist-like architecture that is inherently more amenable to symbolic verification, while still delivering promising performance. In this paper, we introduce a SAT encoding for verifying global robustness and fairness in LGNs. We evaluate our method on five benchmark datasets, including a newly constructed 5-class variant, and find that LGNs are both verification-friendly and maintain strong predictive performance.},
  author       = {Kresse, Fabian and Yu, Zhengqi and Lampert, Christoph and Henzinger, Thomas A},
  booktitle    = {2nd International Conferenceon Neuro-Symbolic Systems},
  issn         = {2640-3498},
  location     = {Philadephia, PA, United States},
  publisher    = {ML Research Press},
  title        = {{Logic gate neural networks are good for verification}},
  volume       = {288},
  year         = {2025},
}

@inproceedings{20455,
  abstract     = {Despite extensive research since the community learned about adversarial examples 10 years ago, we still do not know how to train high-accuracy classifiers that are guaranteed to be robust to small perturbations of their inputs. Previous works often argued that this might be because no classifier exists that is robust and accurate at the same time. However, in computer vision this assumption does not match reality where humans are usually accurate and robust on most tasks of interest. We offer an alternative explanation and show that in certain settings robust generalization is only possible with unrealistically large amounts of data. Specifically, we find a setting where a robust classifier exists, it is easy to learn an accurate classifier, yet it requires an exponential amount of data to learn a robust classifier. Based on this theoretical result, we evaluate the influence of the amount of training data on datasets such as CIFAR10. Our findings indicate that the the amount of training data is the main factor determining the robust performance. Furthermore we show that that there are low magnitude directions in the data which are useful for non-robust generalization but are not available for robust classifiers. This implies that robust classification is a strictly harder tasks than normal classification, thereby providing an explanation why robust classification requires more data.},
  author       = {Prach, Bernd and Lampert, Christoph},
  booktitle    = {2025 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops},
  isbn         = {9798331599942},
  issn         = {2160-7516},
  location     = {Nashville, TN, United States},
  pages        = {660--669},
  publisher    = {IEEE},
  title        = {{Intriguing properties of robust classification}},
  doi          = {10.1109/CVPRW67362.2025.00071},
  year         = {2025},
}

@inproceedings{20819,
  abstract     = {Clustering is a cornerstone of data analysis that is particularly suited to identifying coherent subgroups or substructures in unlabeled data, as are generated continuously in large amounts these days. However, in many cases traditional clustering methods are not applicable, because data are increasingly being produced and stored in a distributed way, e.g. on edge devices, and privacy concerns prevent it from being transferred to a central server. To address this challenge, we present FedDP-KMeans, a new algorithm for 
-means clustering that is fully-federated as well as differentially private. Our approach leverages (potentially small and out-of-distribution) server-side data to overcome the primary challenge of differentially private clustering methods: the need for a good initialization. Combining our initialization with a simple federated DP-Lloyds algorithm we obtain an algorithm that achieves excellent results on synthetic and real-world benchmark tasks. We also provide a theoretical analysis of our method that provides bounds on the convergence speed and cluster identification success.},
  author       = {Scott, Jonathan A and Lampert, Christoph and Saulpic, David},
  booktitle    = {42nd International Conference on Machine Learning},
  issn         = {2640-3498},
  location     = {Vancouver, Canada},
  pages        = {53757--53790},
  publisher    = {ML Research Press},
  title        = {{Differentially private federated k-means clustering with server-side data}},
  volume       = {267},
  year         = {2025},
}

@unpublished{21207,
  abstract     = {Personalized federated learning has emerged as a popular approach to training on devices holding statistically heterogeneous data, known as clients. However, most existing approaches require a client to have labeled data for training or finetuning in order to obtain their own personalized model. In this paper we address this by proposing FLowDUP, a novel method that is able to generate a personalized model using only a forward pass with unlabeled data. The generated model parameters reside in a low-dimensional subspace, enabling efficient communication and computation. FLowDUP's learning objective is theoretically motivated by our new transductive multi-task PAC-Bayesian generalization bound, that provides performance guarantees for unlabeled clients. The objective is structured in such a way that it allows both clients with labeled data and clients with only unlabeled data to contribute to the training process. To supplement our theoretical results we carry out a thorough experimental evaluation of FLowDUP, demonstrating strong empirical performance on a range of datasets with differing sorts of statistically heterogeneous clients. Through numerous ablation studies, we test the efficacy of the individual components of the method.},
  author       = {Zakerinia, Hossein and Scott, Jonathan A and Lampert, Christoph},
  booktitle    = {arXiv},
  title        = {{Federated learning with unlabeled clients: Personalization can happen in low dimensions}},
  doi          = {10.48550/ARXIV.2505.15579},
  year         = {2025},
}

@inproceedings{18118,
  abstract     = {We introduce a new framework for studying meta-learning methods using PAC-Bayesian theory. Its main advantage over previous work is that it allows for more flexibility in how the transfer of knowledge between tasks is realized. For previous approaches, this could only happen indirectly, by means of learning prior distributions over models. In contrast, the new generalization bounds that we prove express the process of meta-learning much more directly as learning the learning algorithm that should be used for future tasks. The flexibility of our framework makes it suitable to analyze a wide range of meta-learning mechanisms and even design new mechanisms. Other than our theoretical contributions we also show empirically that our framework improves the prediction quality in practical meta-learning mechanisms.},
  author       = {Zakerinia, Hossein and Behjati, Amin and Lampert, Christoph},
  booktitle    = {Proceedings of the 41st International Conference on Machine Learning},
  issn         = {2640-3498},
  location     = {Vienna, Austria},
  pages        = {58122--58139},
  publisher    = {ML Research Press},
  title        = {{More flexible PAC-Bayesian meta-learning by learning learning algorithms}},
  volume       = {235},
  year         = {2024},
}

@article{18856,
  abstract     = {This research is aimed to solve the tweet/user geolocation prediction task and provide a flexible methodology for the geo-tagging of textual big data. The suggested approach implements neural networks for natural language processing (NLP) to estimate the location as coordinate pairs (longitude, latitude) and two-dimensional Gaussian Mixture Models (GMMs). The scope of proposed models has been finetuned on a Twitter dataset using pretrained Bidirectional Encoder Representations from Transformers (BERT) as base models. Performance metrics show a median error of fewer than 30 km on a worldwide-level, and fewer than 15 km on the US-level datasets for the models trained and evaluated on text features of tweets' content and metadata context. Our source code and data are available at https://github.com/K4TEL/geo-twitter.git.},
  author       = {Lutsai, Kateryna and Lampert, Christoph},
  issn         = {1948-660X},
  journal      = {Journal of Spatial Information Science},
  number       = {29},
  pages        = {69--99},
  publisher    = {University of Maine},
  title        = {{Predicting the geolocation of tweets using transformer models on customized data}},
  doi          = {10.5311/JOSIS.2024.29.295},
  year         = {2024},
}

@inproceedings{18875,
  abstract     = {Current state-of-the-art methods for differentially private model training are based on matrix factorization techniques. However, these methods suffer from high computational overhead because they require numerically solving a demanding optimization problem to determine an approximately optimal factorization prior to the actual model training. In this work, we present a new matrix factorization approach, BSR, which overcomes this computational bottleneck. By exploiting properties of the standard matrix square root, BSR allows to efficiently handle also large-scale problems. For the key scenario of stochastic gradient descent with momentum and weight decay, we even derive analytical expressions for BSR that render the computational overhead negligible. We prove bounds on the approximation quality that hold both in the centralized and in the federated learning setting. Our numerical experiments demonstrate that models trained using BSR perform on par with the best existing methods, while completely avoiding their computational overhead.},
  author       = {Kalinin, Nikita and Lampert, Christoph},
  booktitle    = {38th Annual Conference on Neural Information Processing Systems},
  issn         = {1049-5258},
  location     = {Vancouver, Canada},
  publisher    = {Neural Information Processing Systems Foundation},
  title        = {{Banded square root matrix factorization for differentially private model training}},
  volume       = {37},
  year         = {2024},
}

@inproceedings{18891,
  abstract     = {Deep neural networks (DNNs) exhibit a surprising structure in their final layer
known as neural collapse (NC), and a growing body of works has currently investigated the propagation of neural collapse to earlier layers of DNNs – a phenomenon
called deep neural collapse (DNC). However, existing theoretical results are restricted to special cases: linear models, only two layers or binary classification.
In contrast, we focus on non-linear models of arbitrary depth in multi-class classification and reveal a surprising qualitative shift. As soon as we go beyond two
layers or two classes, DNC stops being optimal for the deep unconstrained features
model (DUFM) – the standard theoretical framework for the analysis of collapse.
The main culprit is a low-rank bias of multi-layer regularization schemes: this bias
leads to optimal solutions of even lower rank than the neural collapse. We support
our theoretical findings with experiments on both DUFM and real data, which show
the emergence of the low-rank structure in the solution found by gradient descent.},
  author       = {Súkeník, Peter and Lampert, Christoph and Mondelli, Marco},
  booktitle    = {38th Annual Conference on Neural Information Processing Systems},
  location     = {Vancouver, Canada},
  publisher    = {Neural Information Processing Systems Foundation},
  title        = {{Neural collapse versus low-rank bias: Is deep neural collapse really optimal?}},
  volume       = {37},
  year         = {2024},
}

@unpublished{19063,
  abstract     = {Instruction-tuned Large Language Models (LLMs) show impressive results in numerous practical applications, but they lack essential safety features that are common in other areas of computer science, particularly an explicit separation of instructions and data. This makes them vulnerable to manipulations such as indirect prompt injections and generally unsuitable for safety-critical tasks. Surprisingly, there is currently no established definition or benchmark to quantify this phenomenon. In this work, we close this gap by introducing a formal measure for instruction-data separation and an empirical variant that is calculable from a model's outputs. We also present a new dataset, SEP, that allows estimating the measure for real-world models. Our results on various LLMs show that the problem of instruction-data separation is real: all models fail to achieve high separation, and canonical mitigation techniques, such as prompt engineering and fine-tuning, either fail to substantially improve separation or reduce model utility. The source code and SEP dataset are openly accessible at https://github.com/egozverev/Shold-It-Be-Executed-Or-Processed.
},
  author       = {Zverev, Egor and Abdelnabi, Sahar and Tabesh, Soroush and Fritz, Mario and Lampert, Christoph},
  booktitle    = {arXiv},
  title        = {{Can LLMs separate instructions from data? And what do we even mean by that?}},
  doi          = {10.48550/arXiv.2403.06833},
  year         = {2024},
}

@article{19408,
  abstract     = {Continual learning is a subfield of machine learning, which aims to allow machine learning models to continuously learn on new data, by accumulating knowledge without forgetting what was learned in the past. In this work, we take a step back, and ask: "Why should one care about continual learning in the first place?". We set the stage by examining recent continual learning papers published at four major machine learning conferences, and show that memory-constrained settings dominate the field. Then, we discuss five open problems in machine learning, and even though they might seem unrelated to continual learning at first sight, we show that continual learning will inevitably be part of their solution. These problems are model editing, personalization and specialization, on-device learning, faster (re-)training and reinforcement learning. Finally, by comparing the desiderata from these unsolved problems and the current assumptions in continual learning, we highlight and discuss four future directions for continual learning research. We hope that this work offers an interesting perspective on the future of continual learning, while displaying its potential value and the paths we have to pursue in order to make it successful. This work is the result of the many discussions the authors had at the Dagstuhl seminar on Deep Continual Learning, in March 2023.},
  author       = {Verwimp, Eli and Aljundi, Rahaf and Ben-David, Shai and Bethge, Matthias and Cossu, Andrea and Gepperth, Alexander and Hayes, Tyler L. and Hüllermeier, Eyke and Kanan, Christopher and Kudithipudi, Dhireesha and Lampert, Christoph and Mundt, Martin and Pascanu, Razvan and Popescu, Adrian and Tolias, Andreas S. and Van De Weijer, Joost and Liu, Bing and Lomonaco, Vincenzo and Tuytelaars, Tinne and Van De Ven, Gido M.},
  issn         = {2835-8856},
  journal      = {Transactions on Machine Learning Research},
  publisher    = {Transactions on Machine Learning Research},
  title        = {{Continual learning: Applications and the road forward}},
  volume       = {2024},
  year         = {2024},
}

@inproceedings{17411,
  abstract     = {We present PeFLL, a new personalized federated learning algorithm that improves
over the state-of-the-art in three aspects: 1) it produces more accurate models,
especially in the low-data regime, and not only for clients present during its
training phase, but also for any that may emerge in the future; 2) it reduces the
amount of on-client computation and client-server communication by providing
future clients with ready-to-use personalized models that require no additional
finetuning or optimization; 3) it comes with theoretical guarantees that establish
generalization from the observed clients to future ones.
At the core of PeFLL lies a learning-to-learn approach that jointly trains an
embedding network and a hypernetwork. The embedding network is used to
represent clients in a latent descriptor space in a way that reflects their similarity
to each other. The hypernetwork takes as input such descriptors and outputs the
parameters of fully personalized client models. In combination, both networks
constitute a learning algorithm that achieves state-of-the-art performance in several
personalized federated learning benchmarks},
  author       = {Scott, Jonathan A and Zakerinia, Hossein and Lampert, Christoph},
  booktitle    = {12th International Conference on Learning Representations},
  location     = {Vienna, Austria},
  publisher    = {OpenReview},
  title        = {{PEFLL: Personalized federated learning by learning to learn}},
  year         = {2024},
}

@inproceedings{17426,
  abstract     = {The robustness of neural networks against input perturbations with bounded
magnitude represents a serious concern in the deployment of deep learning
models in safety-critical systems. Recently, the scientific community has
focused on enhancing certifiable robustness guarantees by crafting 1-Lipschitz
neural networks that leverage Lipschitz bounded dense and convolutional layers.
Although different methods have been proposed in the literature to achieve this
goal, understanding the performance of such methods is not straightforward,
since different metrics can be relevant (e.g., training time, memory usage,
accuracy, certifiable robustness) for different applications. For this reason,
this work provides a thorough theoretical and empirical comparison between
methods by evaluating them in terms of memory usage, speed, and certifiable
robust accuracy. The paper also provides some guidelines and recommendations to
support the user in selecting the methods that work best depending on the
available resources. We provide code at
https://github.com/berndprach/1LipschitzLayersCompared.},
  author       = {Prach, Bernd and Brau, Fabio and Buttazzo, Giorgio and Lampert, Christoph},
  booktitle    = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
  location     = {Seattle, WA, United States},
  pages        = {24574--24583},
  publisher    = {Computer Vision Foundation},
  title        = {{1-Lipschitz layers compared: Memory, speed, and certifiable robustness}},
  doi          = {10.1109/CVPR52733.2024.02320},
  year         = {2024},
}

@unpublished{18874,
  abstract     = {Despite extensive research since the community learned about adversarial
examples 10 years ago, we still do not know how to train high-accuracy
classifiers that are guaranteed to be robust to small perturbations of their
inputs. Previous works often argued that this might be because no classifier
exists that is robust and accurate at the same time. However, in computer
vision this assumption does not match reality where humans are usually accurate
and robust on most tasks of interest. We offer an alternative explanation and
show that in certain settings robust generalization is only possible with
unrealistically large amounts of data. More precisely we find a setting where a
robust classifier exists, it is easy to learn an accurate classifier, yet it
requires an exponential amount of data to learn a robust classifier. Based on
this theoretical result, we explore how well robust classifiers generalize on
datasets such as CIFAR-10. We come to the conclusion that on this datasets, the
limitation of current robust models also lies in the generalization, and that
they require a lot of data to do well on the test set. We also show that the
problem is not in the expressiveness or generalization capabilities of current
architectures, and that there are low magnitude features in the data which are
useful for non-robust generalization but are not available for robust
classifiers.},
  author       = {Prach, Bernd and Lampert, Christoph},
  booktitle    = {arXiv},
  title        = {{Intriguing properties of robust classification}},
  doi          = {10.48550/arXiv.2412.04245},
  year         = {2024},
}

@inproceedings{14410,
  abstract     = {This paper focuses on the implementation details of the baseline methods and a recent lightweight conditional model extrapolation algorithm LIMES [5] for streaming data under class-prior shift. LIMES achieves superior performance over the baseline methods, especially concerning the minimum-across-day accuracy, which is important for the users of the system. In this work, the key measures to facilitate reproducibility and enhance the credibility of the results are described.},
  author       = {Tomaszewska, Paulina and Lampert, Christoph},
  booktitle    = {International Workshop on Reproducible Research in Pattern Recognition},
  isbn         = {9783031407727},
  issn         = {1611-3349},
  location     = {Montreal, Canada},
  pages        = {67--73},
  publisher    = {Springer Nature},
  title        = {{On the implementation of baselines and lightweight conditional model extrapolation (LIMES) under class-prior shift}},
  doi          = {10.1007/978-3-031-40773-4_6},
  volume       = {14068},
  year         = {2023},
}

@inproceedings{12660,
  abstract     = {We present Cross-Client Label Propagation(XCLP), a new method for transductive federated learning. XCLP estimates a data graph jointly from the data of multiple clients and computes labels for the unlabeled data by propagating label information across the graph. To avoid clients having to share their data with anyone, XCLP employs two cryptographically secure protocols: secure Hamming distance computation and secure summation. We demonstrate two distinct applications of XCLP within federated learning. In the first, we use it in a one-shot way to predict labels for unseen test points. In the second, we use it to repeatedly pseudo-label unlabeled training data in a federated semi-supervised setting. Experiments on both real federated and standard benchmark datasets show that in both applications XCLP achieves higher classification accuracy than alternative approaches.},
  author       = {Scott, Jonathan A and Yeo, Michelle X and Lampert, Christoph},
  booktitle    = {Transactions in Machine Learning},
  issn         = {2835-8856},
  publisher    = {Curran Associates},
  title        = {{Cross-client label propagation for transductive and semi-supervised federated learning}},
  year         = {2023},
}

@inproceedings{14921,
  abstract     = {Neural collapse (NC) refers to the surprising structure of the last layer of deep neural networks in the terminal phase of gradient descent training. Recently, an increasing amount of experimental evidence has pointed to the propagation of NC to earlier layers of neural networks. However, while the NC in the last layer is well studied theoretically, much less is known about its multi-layered counterpart - deep neural collapse (DNC). In particular, existing work focuses either on linear layers or only on the last two layers at the price of an extra assumption. Our paper fills this gap by generalizing the established analytical framework for NC - the unconstrained features model - to multiple non-linear layers. Our key technical contribution is to show that, in a deep unconstrained features model, the unique global optimum for binary classification exhibits all the properties typical of DNC. This explains the existing experimental evidence of DNC. We also empirically show that (i) by optimizing deep unconstrained features models via gradient descent, the resulting solution agrees well with our theory, and (ii) trained networks recover the unconstrained features suitable for the occurrence of DNC, thus supporting the validity of this modeling principle.},
  author       = {Súkeník, Peter and Mondelli, Marco and Lampert, Christoph},
  booktitle    = {37th Annual Conference on Neural Information Processing Systems},
  location     = {New Orleans, LA, United States},
  title        = {{Deep neural collapse is provably optimal for the deep unconstrained features model}},
  year         = {2023},
}

@unpublished{15039,
  abstract     = {A crucial property for achieving secure, trustworthy and interpretable deep learning systems is their robustness: small changes to a system's inputs should not result in large changes to its outputs. Mathematically, this means one strives for networks with a small Lipschitz constant. Several recent works have focused on how to construct such Lipschitz networks, typically by imposing constraints on the weight matrices. In this work, we study an orthogonal aspect, namely the role of the activation function. We show that commonly used activation functions, such as MaxMin, as well as all piece-wise linear ones with two segments unnecessarily restrict the class of representable functions, even in the simplest one-dimensional setting. We furthermore introduce the new N-activation function that is provably more expressive than currently popular activation functions. We provide code at this https URL.},
  author       = {Prach, Bernd and Lampert, Christoph},
  booktitle    = {arXiv},
  title        = {{1-Lipschitz neural networks are more expressive with N-activations}},
  doi          = {10.48550/ARXIV.2311.06103},
  year         = {2023},
}

@inproceedings{13053,
  abstract     = {Deep neural networks (DNNs) often have to be compressed, via pruning and/or quantization, before they can be deployed in practical settings. In this work we propose a new compression-aware minimizer dubbed CrAM that modifies the optimization step in a principled way, in order to produce models whose local loss behavior is stable under compression operations such as pruning. Thus, dense models trained via CrAM should be compressible post-training, in a single step, without significant accuracy loss. Experimental results on standard benchmarks, such as residual networks for ImageNet classification and BERT models for language modelling, show that CrAM produces dense models that can be more accurate than the standard SGD/Adam-based baselines, but which are stable under weight pruning: specifically, we can prune models in one-shot to 70-80% sparsity with almost no accuracy loss, and to 90% with reasonable (∼1%) accuracy loss, which is competitive with gradual compression methods. Additionally, CrAM can produce sparse models which perform well for transfer learning, and it also works for semi-structured 2:4 pruning patterns supported by GPU hardware. The code for reproducing the results is available at this https URL .},
  author       = {Peste, Elena-Alexandra and Vladu, Adrian and Kurtic, Eldar and Lampert, Christoph and Alistarh, Dan-Adrian},
  booktitle    = {11th International Conference on Learning Representations },
  location     = {Kigali, Rwanda },
  publisher    = {OpenReview},
  title        = {{CrAM: A Compression-Aware Minimizer}},
  year         = {2023},
}

@inproceedings{12161,
  abstract     = {We introduce LIMES, a new method for learning with non-stationary streaming data, inspired by the recent success of meta-learning. The main idea is not to attempt to learn a single classifier that would have to work well across all occurring data distributions, nor many separate classifiers, but to exploit a hybrid strategy: we learn a single set of model parameters from which a specific classifier for any specific data distribution is derived via classifier adaptation. Assuming a multiclass classification setting with class-prior shift, the adaptation step can be performed analytically with only the classifier’s bias terms being affected. Another contribution of our work is an extrapolation step that predicts suitable adaptation parameters for future time steps based on the previous data. In combination, we obtain a lightweight procedure for learning from streaming data with varying class distribution that adds no trainable parameters and almost no memory or computational overhead compared to training a single model. Experiments on a set of exemplary tasks using Twitter data show that LIMES achieves higher accuracy than alternative approaches, especially with respect to the relevant real-world metric of lowest within-day accuracy.},
  author       = {Tomaszewska, Paulina and Lampert, Christoph},
  booktitle    = {26th International Conference on Pattern Recognition},
  issn         = {2831-7475},
  location     = {Montreal, Canada},
  pages        = {2128--2134},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Lightweight conditional model extrapolation for streaming data under class-prior shift}},
  doi          = {10.1109/icpr56361.2022.9956195},
  volume       = {2022},
  year         = {2022},
}

