[{"type":"journal_article","quality_controlled":"1","_id":"12662","language":[{"iso":"eng"}],"abstract":[{"lang":"eng","text":"Modern machine learning tasks often require considering not just one but multiple objectives. For example, besides the prediction quality, this could be the efficiency, robustness or fairness of the learned models, or any of their combinations. Multi-objective learning offers a natural framework for handling such problems without having to commit to early trade-offs. Surprisingly, statistical learning theory so far offers almost no insight into the generalization properties of multi-objective learning. In this work, we make first steps to fill this gap: We establish foundational generalization bounds for the multi-objective setting as well as generalization and excess bounds for learning with scalarizations. We also provide the first theoretical analysis of the relation between the Pareto-optimal sets of the true objectives and the Pareto-optimal sets of their empirical approximations from training data. In particular, we show a surprising asymmetry: All Pareto-optimal solutions can be approximated by empirically Pareto-optimal ones, but not vice versa."}],"external_id":{"arxiv":["2208.13499"]},"has_accepted_license":"1","day":"01","citation":{"apa":"Súkeník, P., &#38; Lampert, C. (2025). Generalization in multi-objective machine learning. <i>Neural Computing and Applications</i>. Springer Nature. <a href=\"https://doi.org/10.1007/s00521-024-10616-1\">https://doi.org/10.1007/s00521-024-10616-1</a>","ieee":"P. Súkeník and C. Lampert, “Generalization in multi-objective machine learning,” <i>Neural Computing and Applications</i>, vol. 37. Springer Nature, pp. 24669–24683, 2025.","short":"P. Súkeník, C. Lampert, Neural Computing and Applications 37 (2025) 24669–24683.","mla":"Súkeník, Peter, and Christoph Lampert. “Generalization in Multi-Objective Machine Learning.” <i>Neural Computing and Applications</i>, vol. 37, Springer Nature, 2025, pp. 24669–24683, doi:<a href=\"https://doi.org/10.1007/s00521-024-10616-1\">10.1007/s00521-024-10616-1</a>.","chicago":"Súkeník, Peter, and Christoph Lampert. “Generalization in Multi-Objective Machine Learning.” <i>Neural Computing and Applications</i>. Springer Nature, 2025. <a href=\"https://doi.org/10.1007/s00521-024-10616-1\">https://doi.org/10.1007/s00521-024-10616-1</a>.","ista":"Súkeník P, Lampert C. 2025. Generalization in multi-objective machine learning. Neural Computing and Applications. 37, 24669–24683.","ama":"Súkeník P, Lampert C. Generalization in multi-objective machine learning. <i>Neural Computing and Applications</i>. 2025;37:24669–24683. doi:<a href=\"https://doi.org/10.1007/s00521-024-10616-1\">10.1007/s00521-024-10616-1</a>"},"year":"2025","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","ddc":["004"],"intvolume":"        37","date_published":"2025-10-01T00:00:00Z","OA_place":"publisher","file_date_updated":"2025-12-30T06:39:11Z","publication":"Neural Computing and Applications","date_updated":"2025-12-30T06:39:56Z","status":"public","page":"24669–24683","PlanS_conform":"1","scopus_import":"1","publication_status":"published","file":[{"content_type":"application/pdf","success":1,"file_size":500213,"date_created":"2025-12-30T06:39:11Z","access_level":"open_access","date_updated":"2025-12-30T06:39:11Z","relation":"main_file","creator":"dernst","file_id":"20877","checksum":"61ad4591aee16b1e02daf6c164321a42","file_name":"2025_NeuralCompApplic_Sukenik.pdf"}],"arxiv":1,"publication_identifier":{"eissn":["1433-3058"],"issn":["0941-0643"]},"oa_version":"Published Version","OA_type":"hybrid","date_created":"2023-02-20T08:23:06Z","title":"Generalization in multi-objective machine learning","acknowledgement":"Open access funding provided by Institute of Science and Technology (IST Austria).","doi":"10.1007/s00521-024-10616-1","oa":1,"article_type":"original","department":[{"_id":"ChLa"}],"month":"10","author":[{"first_name":"Peter","id":"d64d6a8d-eb8e-11eb-b029-96fd216dec3c","full_name":"Súkeník, Peter","last_name":"Súkeník"},{"id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph","orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph","last_name":"Lampert"}],"volume":37,"publisher":"Springer Nature","corr_author":"1","tmp":{"short":"CC BY (4.0)","image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"article_processing_charge":"Yes (via OA deal)"},{"has_accepted_license":"1","citation":{"chicago":"Kresse, Fabian, Emily Yu, Christoph Lampert, and Thomas A Henzinger. “Logic Gate Neural Networks Are Good for Verification.” In <i>2nd International Conferenceon Neuro-Symbolic Systems</i>, Vol. 288. ML Research Press, 2025.","ista":"Kresse F, Yu E, Lampert C, Henzinger TA. 2025. Logic gate neural networks are good for verification. 2nd International Conferenceon Neuro-Symbolic Systems. NeuS: International Conferenceon Neuro-Symbolic Systems, PMLR, vol. 288, 26.","ama":"Kresse F, Yu E, Lampert C, Henzinger TA. Logic gate neural networks are good for verification. In: <i>2nd International Conferenceon Neuro-Symbolic Systems</i>. Vol 288. ML Research Press; 2025.","apa":"Kresse, F., Yu, E., Lampert, C., &#38; Henzinger, T. A. (2025). Logic gate neural networks are good for verification. In <i>2nd International Conferenceon Neuro-Symbolic Systems</i> (Vol. 288). Philadephia, PA, United States: ML Research Press.","ieee":"F. Kresse, E. Yu, C. Lampert, and T. A. Henzinger, “Logic gate neural networks are good for verification,” in <i>2nd International Conferenceon Neuro-Symbolic Systems</i>, Philadephia, PA, United States, 2025, vol. 288.","short":"F. Kresse, E. Yu, C. Lampert, T.A. Henzinger, in:, 2nd International Conferenceon Neuro-Symbolic Systems, ML Research Press, 2025.","mla":"Kresse, Fabian, et al. “Logic Gate Neural Networks Are Good for Verification.” <i>2nd International Conferenceon Neuro-Symbolic Systems</i>, vol. 288, 26, ML Research Press, 2025."},"day":"01","abstract":[{"lang":"eng","text":"Learning-based systems are increasingly deployed across various domains, yet the complexity of traditional neural networks poses significant challenges for formal verification. Unlike conventional neural networks, learned Logic Gate Networks (LGNs) replace multiplications with Boolean logic gates, yielding a sparse, netlist-like architecture that is inherently more amenable to symbolic verification, while still delivering promising performance. In this paper, we introduce a SAT encoding for verifying global robustness and fairness in LGNs. We evaluate our method on five benchmark datasets, including a newly constructed 5-class variant, and find that LGNs are both verification-friendly and maintain strong predictive performance."}],"_id":"20296","language":[{"iso":"eng"}],"external_id":{"arxiv":["2505.19932"]},"type":"conference","quality_controlled":"1","date_published":"2025-06-01T00:00:00Z","OA_place":"publisher","article_number":"26","intvolume":"       288","year":"2025","ddc":["000"],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_updated":"2025-09-09T08:12:44Z","publication":"2nd International Conferenceon Neuro-Symbolic Systems","acknowledged_ssus":[{"_id":"ScienComp"}],"file_date_updated":"2025-09-09T08:10:13Z","ec_funded":1,"scopus_import":"1","status":"public","file":[{"access_level":"open_access","file_size":295466,"date_created":"2025-09-09T08:10:13Z","content_type":"application/pdf","success":1,"file_name":"2025_NeuS_Kresse.pdf","checksum":"90a32defed34787e771a5c1623b6b0d2","file_id":"20314","creator":"dernst","relation":"main_file","date_updated":"2025-09-09T08:10:13Z"}],"publication_status":"published","conference":{"end_date":"2025-05-30","location":"Philadephia, PA, United States","name":"NeuS: International Conferenceon Neuro-Symbolic Systems","start_date":"2025-05-28"},"alternative_title":["PMLR"],"date_created":"2025-09-07T22:01:34Z","title":"Logic gate neural networks are good for verification","oa_version":"Published Version","OA_type":"diamond","arxiv":1,"publication_identifier":{"eissn":["2640-3498"]},"volume":288,"author":[{"id":"faff3c84-23f6-11ef-9085-e5187b51c604","first_name":"Fabian","last_name":"Kresse","full_name":"Kresse, Fabian"},{"first_name":"Zhengqi","id":"20aa2ae8-f2f1-11ed-bbfa-8205053f1342","full_name":"Yu, Zhengqi","last_name":"Yu"},{"last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Thomas A","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-2985-7724","full_name":"Henzinger, Thomas A","last_name":"Henzinger"}],"project":[{"name":"Vigilant Algorithmic Monitoring of Software","_id":"62781420-2b32-11ec-9570-8d9b63373d4d","call_identifier":"H2020","grant_number":"101020093"}],"department":[{"_id":"ChLa"},{"_id":"ToHe"}],"month":"06","acknowledgement":"This work is supported in part by the ERC grant under Grant No. ERC-2020-AdG 101020093 and\r\nthe Austrian Science Fund (FWF) [10.55776/COE12]. This research was supported by the Scientific\r\nService Units (SSU) of ISTA through resources provided by Scientific Computing (SciComp).","oa":1,"corr_author":"1","article_processing_charge":"No","publisher":"ML Research Press"},{"doi":"10.1109/CVPRW67362.2025.00071","oa":1,"department":[{"_id":"ChLa"}],"month":"06","author":[{"first_name":"Bernd","id":"2D561D42-C427-11E9-89B4-9C1AE6697425","last_name":"Prach","full_name":"Prach, Bernd"},{"first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8622-7887","last_name":"Lampert","full_name":"Lampert, Christoph"}],"publisher":"IEEE","article_processing_charge":"No","corr_author":"1","related_material":{"record":[{"id":"18874","status":"public","relation":"earlier_version"}]},"publication_status":"published","conference":{"name":"CVPR: Conference on Computer Vision and Pattern Recognition","start_date":"2025-06-11","end_date":"2025-06-12","location":"Nashville, TN, United States"},"publication_identifier":{"eissn":["2160-7516"],"issn":["2160-7508"],"isbn":["9798331599942"]},"arxiv":1,"OA_type":"green","oa_version":"Preprint","date_created":"2025-10-12T22:01:26Z","title":"Intriguing properties of robust classification","publication":"2025 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops","date_updated":"2025-10-13T07:18:26Z","status":"public","page":"660-669","scopus_import":"1","quality_controlled":"1","type":"conference","external_id":{"arxiv":["2412.04245"]},"abstract":[{"lang":"eng","text":"Despite extensive research since the community learned about adversarial examples 10 years ago, we still do not know how to train high-accuracy classifiers that are guaranteed to be robust to small perturbations of their inputs. Previous works often argued that this might be because no classifier exists that is robust and accurate at the same time. However, in computer vision this assumption does not match reality where humans are usually accurate and robust on most tasks of interest. We offer an alternative explanation and show that in certain settings robust generalization is only possible with unrealistically large amounts of data. Specifically, we find a setting where a robust classifier exists, it is easy to learn an accurate classifier, yet it requires an exponential amount of data to learn a robust classifier. Based on this theoretical result, we evaluate the influence of the amount of training data on datasets such as CIFAR10. Our findings indicate that the the amount of training data is the main factor determining the robust performance. Furthermore we show that that there are low magnitude directions in the data which are useful for non-robust generalization but are not available for robust classifiers. This implies that robust classification is a strictly harder tasks than normal classification, thereby providing an explanation why robust classification requires more data."}],"_id":"20455","language":[{"iso":"eng"}],"day":"15","citation":{"ieee":"B. Prach and C. Lampert, “Intriguing properties of robust classification,” in <i>2025 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops</i>, Nashville, TN, United States, 2025, pp. 660–669.","short":"B. Prach, C. Lampert, in:, 2025 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, IEEE, 2025, pp. 660–669.","mla":"Prach, Bernd, and Christoph Lampert. “Intriguing Properties of Robust Classification.” <i>2025 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops</i>, IEEE, 2025, pp. 660–69, doi:<a href=\"https://doi.org/10.1109/CVPRW67362.2025.00071\">10.1109/CVPRW67362.2025.00071</a>.","apa":"Prach, B., &#38; Lampert, C. (2025). Intriguing properties of robust classification. In <i>2025 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops</i> (pp. 660–669). Nashville, TN, United States: IEEE. <a href=\"https://doi.org/10.1109/CVPRW67362.2025.00071\">https://doi.org/10.1109/CVPRW67362.2025.00071</a>","ista":"Prach B, Lampert C. 2025. Intriguing properties of robust classification. 2025 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops. CVPR: Conference on Computer Vision and Pattern Recognition, 660–669.","ama":"Prach B, Lampert C. Intriguing properties of robust classification. In: <i>2025 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops</i>. IEEE; 2025:660-669. doi:<a href=\"https://doi.org/10.1109/CVPRW67362.2025.00071\">10.1109/CVPRW67362.2025.00071</a>","chicago":"Prach, Bernd, and Christoph Lampert. “Intriguing Properties of Robust Classification.” In <i>2025 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops</i>, 660–69. IEEE, 2025. <a href=\"https://doi.org/10.1109/CVPRW67362.2025.00071\">https://doi.org/10.1109/CVPRW67362.2025.00071</a>."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","year":"2025","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2412.04245","open_access":"1"}],"OA_place":"repository","date_published":"2025-06-15T00:00:00Z"},{"conference":{"end_date":"2025-07-19","location":"Vancouver, Canada","name":"ICML: International Conference on Machine Learning","start_date":"2025-07-13"},"publication_status":"published","file":[{"file_name":"2025_ICML_Scott.pdf","checksum":"815b32b463023ca21e569c2158745c15","file_id":"20829","creator":"dernst","date_updated":"2025-12-16T12:38:29Z","relation":"main_file","access_level":"open_access","date_created":"2025-12-16T12:38:29Z","file_size":746612,"content_type":"application/pdf","success":1}],"arxiv":1,"publication_identifier":{"eissn":["2640-3498"]},"alternative_title":["PMLR"],"date_created":"2025-12-14T23:02:05Z","title":"Differentially private federated k-means clustering with server-side data","OA_type":"gold","oa_version":"Published Version","oa":1,"acknowledgement":"This research was funded in part by the Austrian Science Fund (FWF) [10.55776/COE12] and supported by the Scientific Service Units (SSU) of ISTA through resources provided by Scientific Computing (SciComp).\r\n","volume":267,"author":[{"id":"e499926b-f6e0-11ea-865d-9c63db0031e8","first_name":"Jonathan A","last_name":"Scott","full_name":"Scott, Jonathan A"},{"last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87"},{"first_name":"David","id":"f8e48cf0-b0ff-11ed-b0e9-b4c35598f964","full_name":"Saulpic, David","last_name":"Saulpic"}],"month":"05","department":[{"_id":"ChLa"},{"_id":"MoHe"}],"article_processing_charge":"No","tmp":{"short":"CC BY (4.0)","image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"corr_author":"1","publisher":"ML Research Press","related_material":{"record":[{"status":"public","id":"21198","relation":"dissertation_contains"}]},"external_id":{"arxiv":["2506.05408"]},"_id":"20819","language":[{"iso":"eng"}],"abstract":[{"lang":"eng","text":"Clustering is a cornerstone of data analysis that is particularly suited to identifying coherent subgroups or substructures in unlabeled data, as are generated continuously in large amounts these days. However, in many cases traditional clustering methods are not applicable, because data are increasingly being produced and stored in a distributed way, e.g. on edge devices, and privacy concerns prevent it from being transferred to a central server. To address this challenge, we present FedDP-KMeans, a new algorithm for \r\n-means clustering that is fully-federated as well as differentially private. Our approach leverages (potentially small and out-of-distribution) server-side data to overcome the primary challenge of differentially private clustering methods: the need for a good initialization. Combining our initialization with a simple federated DP-Lloyds algorithm we obtain an algorithm that achieves excellent results on synthetic and real-world benchmark tasks. We also provide a theoretical analysis of our method that provides bounds on the convergence speed and cluster identification success."}],"quality_controlled":"1","type":"conference","citation":{"chicago":"Scott, Jonathan A, Christoph Lampert, and David Saulpic. “Differentially Private Federated K-Means Clustering with Server-Side Data.” In <i>42nd International Conference on Machine Learning</i>, 267:53757–90. ML Research Press, 2025.","ista":"Scott JA, Lampert C, Saulpic D. 2025. Differentially private federated k-means clustering with server-side data. 42nd International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 267, 53757–53790.","ama":"Scott JA, Lampert C, Saulpic D. Differentially private federated k-means clustering with server-side data. In: <i>42nd International Conference on Machine Learning</i>. Vol 267. ML Research Press; 2025:53757-53790.","apa":"Scott, J. A., Lampert, C., &#38; Saulpic, D. (2025). Differentially private federated k-means clustering with server-side data. In <i>42nd International Conference on Machine Learning</i> (Vol. 267, pp. 53757–53790). Vancouver, Canada: ML Research Press.","ieee":"J. A. Scott, C. Lampert, and D. Saulpic, “Differentially private federated k-means clustering with server-side data,” in <i>42nd International Conference on Machine Learning</i>, Vancouver, Canada, 2025, vol. 267, pp. 53757–53790.","short":"J.A. Scott, C. Lampert, D. Saulpic, in:, 42nd International Conference on Machine Learning, ML Research Press, 2025, pp. 53757–53790.","mla":"Scott, Jonathan A., et al. “Differentially Private Federated K-Means Clustering with Server-Side Data.” <i>42nd International Conference on Machine Learning</i>, vol. 267, ML Research Press, 2025, pp. 53757–90."},"day":"01","has_accepted_license":"1","intvolume":"       267","ddc":["000"],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","year":"2025","OA_place":"publisher","date_published":"2025-05-01T00:00:00Z","file_date_updated":"2025-12-16T12:38:29Z","date_updated":"2026-04-07T11:46:11Z","publication":"42nd International Conference on Machine Learning","acknowledged_ssus":[{"_id":"ScienComp"}],"page":"53757-53790","status":"public","scopus_import":"1"},{"related_material":{"record":[{"relation":"dissertation_contains","id":"21198","status":"public"}]},"corr_author":"1","tmp":{"short":"CC BY (4.0)","image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"article_processing_charge":"No","status":"public","publication":"arXiv","date_updated":"2026-04-07T11:46:11Z","author":[{"first_name":"Hossein","id":"653bd8b6-f394-11eb-9cf6-c0bbf6cd78d4","orcid":"0009-0007-3977-6462","full_name":"Zakerinia, Hossein","last_name":"Zakerinia"},{"last_name":"Scott","full_name":"Scott, Jonathan A","id":"e499926b-f6e0-11ea-865d-9c63db0031e8","first_name":"Jonathan A"},{"last_name":"Lampert","full_name":"Lampert, Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph","orcid":"0000-0001-8622-7887"}],"month":"05","department":[{"_id":"ChLa"}],"oa":1,"doi":"10.48550/ARXIV.2505.15579","date_published":"2025-05-21T00:00:00Z","title":"Federated learning with unlabeled clients: Personalization can happen in low dimensions","date_created":"2026-02-10T08:20:59Z","OA_place":"repository","oa_version":"Preprint","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2505.15579","open_access":"1"}],"year":"2025","user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","day":"21","citation":{"chicago":"Zakerinia, Hossein, Jonathan A Scott, and Christoph Lampert. “Federated Learning with Unlabeled Clients: Personalization Can Happen in Low Dimensions.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/ARXIV.2505.15579\">https://doi.org/10.48550/ARXIV.2505.15579</a>.","ama":"Zakerinia H, Scott JA, Lampert C. Federated learning with unlabeled clients: Personalization can happen in low dimensions. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/ARXIV.2505.15579\">10.48550/ARXIV.2505.15579</a>","ista":"Zakerinia H, Scott JA, Lampert C. Federated learning with unlabeled clients: Personalization can happen in low dimensions. arXiv, <a href=\"https://doi.org/10.48550/ARXIV.2505.15579\">10.48550/ARXIV.2505.15579</a>.","apa":"Zakerinia, H., Scott, J. A., &#38; Lampert, C. (n.d.). Federated learning with unlabeled clients: Personalization can happen in low dimensions. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/ARXIV.2505.15579\">https://doi.org/10.48550/ARXIV.2505.15579</a>","mla":"Zakerinia, Hossein, et al. “Federated Learning with Unlabeled Clients: Personalization Can Happen in Low Dimensions.” <i>ArXiv</i>, doi:<a href=\"https://doi.org/10.48550/ARXIV.2505.15579\">10.48550/ARXIV.2505.15579</a>.","short":"H. Zakerinia, J.A. Scott, C. Lampert, ArXiv (n.d.).","ieee":"H. Zakerinia, J. A. Scott, and C. Lampert, “Federated learning with unlabeled clients: Personalization can happen in low dimensions,” <i>arXiv</i>. ."},"_id":"21207","language":[{"iso":"eng"}],"abstract":[{"lang":"eng","text":"Personalized federated learning has emerged as a popular approach to training on devices holding statistically heterogeneous data, known as clients. However, most existing approaches require a client to have labeled data for training or finetuning in order to obtain their own personalized model. In this paper we address this by proposing FLowDUP, a novel method that is able to generate a personalized model using only a forward pass with unlabeled data. The generated model parameters reside in a low-dimensional subspace, enabling efficient communication and computation. FLowDUP's learning objective is theoretically motivated by our new transductive multi-task PAC-Bayesian generalization bound, that provides performance guarantees for unlabeled clients. The objective is structured in such a way that it allows both clients with labeled data and clients with only unlabeled data to contribute to the training process. To supplement our theoretical results we carry out a thorough experimental evaluation of FLowDUP, demonstrating strong empirical performance on a range of datasets with differing sorts of statistically heterogeneous clients. Through numerous ablation studies, we test the efficacy of the individual components of the method."}],"type":"preprint","publication_status":"draft"},{"status":"public","page":"58122-58139","scopus_import":"1","date_updated":"2024-10-01T09:30:03Z","publication":"Proceedings of the 41st International Conference on Machine Learning","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","year":"2024","intvolume":"       235","main_file_link":[{"open_access":"1","url":" https://doi.org/10.48550/arXiv.2402.04054"}],"date_published":"2024-09-01T00:00:00Z","quality_controlled":"1","type":"conference","external_id":{"arxiv":["2402.04054"]},"language":[{"iso":"eng"}],"_id":"18118","abstract":[{"text":"We introduce a new framework for studying meta-learning methods using PAC-Bayesian theory. Its main advantage over previous work is that it allows for more flexibility in how the transfer of knowledge between tasks is realized. For previous approaches, this could only happen indirectly, by means of learning prior distributions over models. In contrast, the new generalization bounds that we prove express the process of meta-learning much more directly as learning the learning algorithm that should be used for future tasks. The flexibility of our framework makes it suitable to analyze a wide range of meta-learning mechanisms and even design new mechanisms. Other than our theoretical contributions we also show empirically that our framework improves the prediction quality in practical meta-learning mechanisms.","lang":"eng"}],"citation":{"mla":"Zakerinia, Hossein, et al. “More Flexible PAC-Bayesian Meta-Learning by Learning Learning Algorithms.” <i>Proceedings of the 41st International Conference on Machine Learning</i>, vol. 235, ML Research Press, 2024, pp. 58122–39.","ieee":"H. Zakerinia, A. Behjati, and C. Lampert, “More flexible PAC-Bayesian meta-learning by learning learning algorithms,” in <i>Proceedings of the 41st International Conference on Machine Learning</i>, Vienna, Austria, 2024, vol. 235, pp. 58122–58139.","short":"H. Zakerinia, A. Behjati, C. Lampert, in:, Proceedings of the 41st International Conference on Machine Learning, ML Research Press, 2024, pp. 58122–58139.","apa":"Zakerinia, H., Behjati, A., &#38; Lampert, C. (2024). More flexible PAC-Bayesian meta-learning by learning learning algorithms. In <i>Proceedings of the 41st International Conference on Machine Learning</i> (Vol. 235, pp. 58122–58139). Vienna, Austria: ML Research Press.","ama":"Zakerinia H, Behjati A, Lampert C. More flexible PAC-Bayesian meta-learning by learning learning algorithms. In: <i>Proceedings of the 41st International Conference on Machine Learning</i>. Vol 235. ML Research Press; 2024:58122-58139.","ista":"Zakerinia H, Behjati A, Lampert C. 2024. More flexible PAC-Bayesian meta-learning by learning learning algorithms. Proceedings of the 41st International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 235, 58122–58139.","chicago":"Zakerinia, Hossein, Amin Behjati, and Christoph Lampert. “More Flexible PAC-Bayesian Meta-Learning by Learning Learning Algorithms.” In <i>Proceedings of the 41st International Conference on Machine Learning</i>, 235:58122–39. ML Research Press, 2024."},"day":"01","publisher":"ML Research Press","article_processing_charge":"No","corr_author":"1","oa":1,"month":"09","department":[{"_id":"ChLa"}],"volume":235,"author":[{"id":"653bd8b6-f394-11eb-9cf6-c0bbf6cd78d4","first_name":"Hossein","last_name":"Zakerinia","full_name":"Zakerinia, Hossein"},{"full_name":"Behjati, Amin","last_name":"Behjati","first_name":"Amin"},{"orcid":"0000-0001-8622-7887","first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","full_name":"Lampert, Christoph","last_name":"Lampert"}],"publication_identifier":{"eissn":["2640-3498"]},"arxiv":1,"oa_version":"Published Version","date_created":"2024-09-22T22:01:45Z","title":"More flexible PAC-Bayesian meta-learning by learning learning algorithms","alternative_title":["PMLR"],"publication_status":"published","conference":{"name":"ICML: International Conference on Machine Learning","start_date":"2024-07-21","end_date":"2024-07-27","location":"Vienna, Austria"}},{"publication_status":"published","file":[{"date_updated":"2025-01-20T08:41:10Z","file_id":"18857","creator":"dernst","relation":"main_file","checksum":"b82413f00398ffb5168e8e747571a98d","file_name":"2024_JourSpatialInfoScience_Lutsai.pdf","content_type":"application/pdf","success":1,"date_created":"2025-01-20T08:41:10Z","file_size":7250655,"access_level":"open_access"}],"publication_identifier":{"eissn":["1948-660X"]},"OA_type":"gold","oa_version":"Published Version","date_created":"2025-01-19T23:01:53Z","title":"Predicting the geolocation of tweets using transformer models on customized data","oa":1,"doi":"10.5311/JOSIS.2024.29.295","acknowledgement":"The authors acknowledge the Institute of Science and Technology (ISTA) for their material support and for granting access to the Twitter database archive, which was essential for the research.","month":"12","department":[{"_id":"ChLa"}],"article_type":"original","author":[{"full_name":"Lutsai, Kateryna","last_name":"Lutsai","first_name":"Kateryna"},{"last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87"}],"publisher":"University of Maine","article_processing_charge":"Yes","corr_author":"1","tmp":{"name":"Creative Commons Attribution 3.0 Unported (CC BY 3.0)","legal_code_url":"https://creativecommons.org/licenses/by/3.0/legalcode","image":"/images/cc_by.png","short":"CC BY (3.0)"},"related_material":{"link":[{"relation":"software","url":"https://github.com/K4TEL/geo-twitter.git"}]},"quality_controlled":"1","type":"journal_article","language":[{"iso":"eng"}],"_id":"18856","abstract":[{"text":"This research is aimed to solve the tweet/user geolocation prediction task and provide a flexible methodology for the geo-tagging of textual big data. The suggested approach implements neural networks for natural language processing (NLP) to estimate the location as coordinate pairs (longitude, latitude) and two-dimensional Gaussian Mixture Models (GMMs). The scope of proposed models has been finetuned on a Twitter dataset using pretrained Bidirectional Encoder Representations from Transformers (BERT) as base models. Performance metrics show a median error of fewer than 30 km on a worldwide-level, and fewer than 15 km on the US-level datasets for the models trained and evaluated on text features of tweets' content and metadata context. Our source code and data are available at https://github.com/K4TEL/geo-twitter.git.","lang":"eng"}],"day":"26","citation":{"apa":"Lutsai, K., &#38; Lampert, C. (2024). Predicting the geolocation of tweets using transformer models on customized data. <i>Journal of Spatial Information Science</i>. University of Maine. <a href=\"https://doi.org/10.5311/JOSIS.2024.29.295\">https://doi.org/10.5311/JOSIS.2024.29.295</a>","mla":"Lutsai, Kateryna, and Christoph Lampert. “Predicting the Geolocation of Tweets Using Transformer Models on Customized Data.” <i>Journal of Spatial Information Science</i>, no. 29, University of Maine, 2024, pp. 69–99, doi:<a href=\"https://doi.org/10.5311/JOSIS.2024.29.295\">10.5311/JOSIS.2024.29.295</a>.","ieee":"K. Lutsai and C. Lampert, “Predicting the geolocation of tweets using transformer models on customized data,” <i>Journal of Spatial Information Science</i>, no. 29. University of Maine, pp. 69–99, 2024.","short":"K. Lutsai, C. Lampert, Journal of Spatial Information Science (2024) 69–99.","chicago":"Lutsai, Kateryna, and Christoph Lampert. “Predicting the Geolocation of Tweets Using Transformer Models on Customized Data.” <i>Journal of Spatial Information Science</i>. University of Maine, 2024. <a href=\"https://doi.org/10.5311/JOSIS.2024.29.295\">https://doi.org/10.5311/JOSIS.2024.29.295</a>.","ama":"Lutsai K, Lampert C. Predicting the geolocation of tweets using transformer models on customized data. <i>Journal of Spatial Information Science</i>. 2024;(29):69-99. doi:<a href=\"https://doi.org/10.5311/JOSIS.2024.29.295\">10.5311/JOSIS.2024.29.295</a>","ista":"Lutsai K, Lampert C. 2024. Predicting the geolocation of tweets using transformer models on customized data. Journal of Spatial Information Science. (29), 69–99."},"has_accepted_license":"1","user_id":"68b8ca59-c5b3-11ee-8790-cd641c68093d","ddc":["500"],"year":"2024","OA_place":"publisher","date_published":"2024-12-26T00:00:00Z","file_date_updated":"2025-01-20T08:41:10Z","date_updated":"2025-06-05T13:47:12Z","publication":"Journal of Spatial Information Science","page":"69-99","status":"public","scopus_import":"1","license":"https://creativecommons.org/licenses/by/3.0/","issue":"29","DOAJ_listed":"1"},{"year":"2024","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","ddc":["000"],"intvolume":"        37","date_published":"2024-12-01T00:00:00Z","OA_place":"publisher","type":"conference","quality_controlled":"1","_id":"18875","language":[{"iso":"eng"}],"abstract":[{"text":"Current state-of-the-art methods for differentially private model training are based on matrix factorization techniques. However, these methods suffer from high computational overhead because they require numerically solving a demanding optimization problem to determine an approximately optimal factorization prior to the actual model training. In this work, we present a new matrix factorization approach, BSR, which overcomes this computational bottleneck. By exploiting properties of the standard matrix square root, BSR allows to efficiently handle also large-scale problems. For the key scenario of stochastic gradient descent with momentum and weight decay, we even derive analytical expressions for BSR that render the computational overhead negligible. We prove bounds on the approximation quality that hold both in the centralized and in the federated learning setting. Our numerical experiments demonstrate that models trained using BSR perform on par with the best existing methods, while completely avoiding their computational overhead.","lang":"eng"}],"external_id":{"arxiv":["2405.13763"]},"has_accepted_license":"1","citation":{"apa":"Kalinin, N., &#38; Lampert, C. (2024). Banded square root matrix factorization for differentially private model training. In <i>38th Annual Conference on Neural Information Processing Systems</i> (Vol. 37). Vancouver, Canada: Neural Information Processing Systems Foundation.","ieee":"N. Kalinin and C. Lampert, “Banded square root matrix factorization for differentially private model training,” in <i>38th Annual Conference on Neural Information Processing Systems</i>, Vancouver, Canada, 2024, vol. 37.","short":"N. Kalinin, C. Lampert, in:, 38th Annual Conference on Neural Information Processing Systems, Neural Information Processing Systems Foundation, 2024.","mla":"Kalinin, Nikita, and Christoph Lampert. “Banded Square Root Matrix Factorization for Differentially Private Model Training.” <i>38th Annual Conference on Neural Information Processing Systems</i>, vol. 37, Neural Information Processing Systems Foundation, 2024.","chicago":"Kalinin, Nikita, and Christoph Lampert. “Banded Square Root Matrix Factorization for Differentially Private Model Training.” In <i>38th Annual Conference on Neural Information Processing Systems</i>, Vol. 37. Neural Information Processing Systems Foundation, 2024.","ista":"Kalinin N, Lampert C. 2024. Banded square root matrix factorization for differentially private model training. 38th Annual Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems, Advances in Neural Information Processing Systems, vol. 37.","ama":"Kalinin N, Lampert C. Banded square root matrix factorization for differentially private model training. In: <i>38th Annual Conference on Neural Information Processing Systems</i>. Vol 37. Neural Information Processing Systems Foundation; 2024."},"day":"01","status":"public","scopus_import":"1","file_date_updated":"2025-01-27T09:52:15Z","publication":"38th Annual Conference on Neural Information Processing Systems","date_updated":"2025-05-14T11:34:20Z","publication_identifier":{"eissn":["1049-5258"]},"arxiv":1,"oa_version":"Published Version","OA_type":"gold","date_created":"2025-01-24T17:58:16Z","title":"Banded square root matrix factorization for differentially private model training","alternative_title":["Advances in Neural Information Processing Systems"],"conference":{"start_date":"2024-12-16","name":"NeurIPS: Neural Information Processing Systems","location":"Vancouver, Canada","end_date":"2024-12-16"},"publication_status":"published","file":[{"file_name":"2024_NeurIPS_Nikita.pdf","checksum":"a216cab8eddc1fe7840aede0e2c0d41e","creator":"dernst","relation":"main_file","date_updated":"2025-01-27T09:52:15Z","file_id":"18888","access_level":"open_access","date_created":"2025-01-27T09:52:15Z","file_size":1144656,"content_type":"application/pdf","success":1}],"publisher":"Neural Information Processing Systems Foundation","tmp":{"short":"CC BY (4.0)","image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"corr_author":"1","article_processing_charge":"No","oa":1,"department":[{"_id":"GradSch"},{"_id":"ChLa"}],"month":"12","volume":37,"author":[{"first_name":"Nikita","id":"4b14526e-14d2-11ed-ba64-c14c9553d137","full_name":"Kalinin, Nikita","last_name":"Kalinin"},{"first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8622-7887","last_name":"Lampert","full_name":"Lampert, Christoph"}]},{"publication_status":"published","conference":{"name":"NeurIPS: Neural Information Processing Systems","start_date":"2024-12-16","end_date":"2024-12-16","location":"Vancouver, Canada"},"file":[{"success":1,"content_type":"application/pdf","date_created":"2025-02-04T08:11:25Z","file_size":1784118,"access_level":"open_access","relation":"main_file","creator":"dernst","file_id":"18989","date_updated":"2025-02-04T08:11:25Z","checksum":"b7b79f1ea3ac1e9e11b3d91faaeb0780","file_name":"2024_NeurIPS_Sukenik.pdf"}],"arxiv":1,"date_created":"2025-01-27T11:15:18Z","title":"Neural collapse versus low-rank bias: Is deep neural collapse really optimal?","alternative_title":["Advances in Neural Information Processing Systems"],"OA_type":"gold","oa_version":"Published Version","oa":1,"acknowledgement":"Marco Mondelli is partially supported by the 2019 Lopez-Loreta prize. This research was supported by the Scientific Service Units (SSU) of ISTA through resources provided by Scientific Computing (SciComp).","volume":37,"author":[{"id":"d64d6a8d-eb8e-11eb-b029-96fd216dec3c","first_name":"Peter","full_name":"Súkeník, Peter","last_name":"Súkeník"},{"full_name":"Lampert, Christoph","last_name":"Lampert","orcid":"0000-0001-8622-7887","first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87"},{"orcid":"0000-0002-3242-7020","first_name":"Marco","id":"27EB676C-8706-11E9-9510-7717E6697425","full_name":"Mondelli, Marco","last_name":"Mondelli"}],"department":[{"_id":"GradSch"},{"_id":"MaMo"},{"_id":"ChLa"}],"month":"12","project":[{"name":"Prix Lopez-Loretta 2019 - Marco Mondelli","_id":"059876FA-7A3F-11EA-A408-12923DDC885E"}],"article_processing_charge":"No","tmp":{"short":"CC BY (4.0)","image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"corr_author":"1","publisher":"Neural Information Processing Systems Foundation","external_id":{"arxiv":["2405.14468"]},"abstract":[{"text":"Deep neural networks (DNNs) exhibit a surprising structure in their final layer\r\nknown as neural collapse (NC), and a growing body of works has currently investigated the propagation of neural collapse to earlier layers of DNNs – a phenomenon\r\ncalled deep neural collapse (DNC). However, existing theoretical results are restricted to special cases: linear models, only two layers or binary classification.\r\nIn contrast, we focus on non-linear models of arbitrary depth in multi-class classification and reveal a surprising qualitative shift. As soon as we go beyond two\r\nlayers or two classes, DNC stops being optimal for the deep unconstrained features\r\nmodel (DUFM) – the standard theoretical framework for the analysis of collapse.\r\nThe main culprit is a low-rank bias of multi-layer regularization schemes: this bias\r\nleads to optimal solutions of even lower rank than the neural collapse. We support\r\nour theoretical findings with experiments on both DUFM and real data, which show\r\nthe emergence of the low-rank structure in the solution found by gradient descent.","lang":"eng"}],"_id":"18891","language":[{"iso":"eng"}],"quality_controlled":"1","type":"conference","citation":{"chicago":"Súkeník, Peter, Christoph Lampert, and Marco Mondelli. “Neural Collapse versus Low-Rank Bias: Is Deep Neural Collapse Really Optimal?” In <i>38th Annual Conference on Neural Information Processing Systems</i>, Vol. 37. Neural Information Processing Systems Foundation, 2024.","ista":"Súkeník P, Lampert C, Mondelli M. 2024. Neural collapse versus low-rank bias: Is deep neural collapse really optimal? 38th Annual Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems, Advances in Neural Information Processing Systems, vol. 37.","ama":"Súkeník P, Lampert C, Mondelli M. Neural collapse versus low-rank bias: Is deep neural collapse really optimal? In: <i>38th Annual Conference on Neural Information Processing Systems</i>. Vol 37. Neural Information Processing Systems Foundation; 2024.","apa":"Súkeník, P., Lampert, C., &#38; Mondelli, M. (2024). Neural collapse versus low-rank bias: Is deep neural collapse really optimal? In <i>38th Annual Conference on Neural Information Processing Systems</i> (Vol. 37). Vancouver, Canada: Neural Information Processing Systems Foundation.","ieee":"P. Súkeník, C. Lampert, and M. Mondelli, “Neural collapse versus low-rank bias: Is deep neural collapse really optimal?,” in <i>38th Annual Conference on Neural Information Processing Systems</i>, Vancouver, Canada, 2024, vol. 37.","short":"P. Súkeník, C. Lampert, M. Mondelli, in:, 38th Annual Conference on Neural Information Processing Systems, Neural Information Processing Systems Foundation, 2024.","mla":"Súkeník, Peter, et al. “Neural Collapse versus Low-Rank Bias: Is Deep Neural Collapse Really Optimal?” <i>38th Annual Conference on Neural Information Processing Systems</i>, vol. 37, Neural Information Processing Systems Foundation, 2024."},"day":"01","has_accepted_license":"1","intvolume":"        37","ddc":["000"],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","year":"2024","OA_place":"publisher","date_published":"2024-12-01T00:00:00Z","file_date_updated":"2025-02-04T08:11:25Z","publication":"38th Annual Conference on Neural Information Processing Systems","date_updated":"2025-06-04T07:19:21Z","acknowledged_ssus":[{"_id":"ScienComp"}],"status":"public"},{"file_date_updated":"2025-02-20T10:11:45Z","acknowledged_ssus":[{"_id":"ScienComp"}],"publication":"arXiv","date_updated":"2025-02-24T12:52:23Z","status":"public","license":"https://creativecommons.org/licenses/by-sa/4.0/","type":"preprint","abstract":[{"text":"Instruction-tuned Large Language Models (LLMs) show impressive results in numerous practical applications, but they lack essential safety features that are common in other areas of computer science, particularly an explicit separation of instructions and data. This makes them vulnerable to manipulations such as indirect prompt injections and generally unsuitable for safety-critical tasks. Surprisingly, there is currently no established definition or benchmark to quantify this phenomenon. In this work, we close this gap by introducing a formal measure for instruction-data separation and an empirical variant that is calculable from a model's outputs. We also present a new dataset, SEP, that allows estimating the measure for real-world models. Our results on various LLMs show that the problem of instruction-data separation is real: all models fail to achieve high separation, and canonical mitigation techniques, such as prompt engineering and fine-tuning, either fail to substantially improve separation or reduce model utility. The source code and SEP dataset are openly accessible at https://github.com/egozverev/Shold-It-Be-Executed-Or-Processed.\r\n","lang":"eng"}],"_id":"19063","language":[{"iso":"eng"}],"external_id":{"arxiv":["2403.06833"]},"has_accepted_license":"1","citation":{"apa":"Zverev, E., Abdelnabi, S., Tabesh, S., Fritz, M., &#38; Lampert, C. (2024). Can LLMs separate instructions from data? And what do we even mean by that? <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2403.06833\">https://doi.org/10.48550/arXiv.2403.06833</a>","ieee":"E. Zverev, S. Abdelnabi, S. Tabesh, M. Fritz, and C. Lampert, “Can LLMs separate instructions from data? And what do we even mean by that?,” <i>arXiv</i>. 2024.","short":"E. Zverev, S. Abdelnabi, S. Tabesh, M. Fritz, C. Lampert, ArXiv (2024).","mla":"Zverev, Egor, et al. “Can LLMs Separate Instructions from Data? And What Do We Even Mean by That?” <i>ArXiv</i>, 2403.06833, 2024, doi:<a href=\"https://doi.org/10.48550/arXiv.2403.06833\">10.48550/arXiv.2403.06833</a>.","chicago":"Zverev, Egor, Sahar Abdelnabi, Soroush Tabesh, Mario Fritz, and Christoph Lampert. “Can LLMs Separate Instructions from Data? And What Do We Even Mean by That?” <i>ArXiv</i>, 2024. <a href=\"https://doi.org/10.48550/arXiv.2403.06833\">https://doi.org/10.48550/arXiv.2403.06833</a>.","ista":"Zverev E, Abdelnabi S, Tabesh S, Fritz M, Lampert C. 2024. Can LLMs separate instructions from data? And what do we even mean by that? arXiv, 2403.06833.","ama":"Zverev E, Abdelnabi S, Tabesh S, Fritz M, Lampert C. Can LLMs separate instructions from data? And what do we even mean by that? <i>arXiv</i>. 2024. doi:<a href=\"https://doi.org/10.48550/arXiv.2403.06833\">10.48550/arXiv.2403.06833</a>"},"day":"01","year":"2024","ddc":["000"],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","article_number":"2403.06833","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2403.06833","open_access":"1"}],"date_published":"2024-03-01T00:00:00Z","OA_place":"repository","acknowledgement":"The authors would like to sincerely thank Juan Rocamonde for valuable feedback to our manuscript. We acknowledge the support from the Scientific Service Units (SSU) of ISTA through resources provided by Scientific Computing (SciComp). We thank Dan Alistarh for providing us with computational resources. This work was partially funded by the German Federal Ministry of Education and Research (BMBF) under the grant AIgenCY (16KIS2012) and ELSA – European Lighthouse on Secure and Safe AI funded by the European Union under grant agreement No. 101070617. Views and opinions expressed are however those of the authors only and do not necessarily reflect those of the European Union or European Commission. Neither the European Union nor the European Commission can be held responsible for them.","doi":"10.48550/arXiv.2403.06833","oa":1,"month":"03","department":[{"_id":"GradSch"},{"_id":"ChLa"}],"author":[{"last_name":"Zverev","full_name":"Zverev, Egor","first_name":"Egor","id":"05162b19-1340-11ed-8f02-fa94e0e8c3bc"},{"first_name":"Sahar","full_name":"Abdelnabi, Sahar","last_name":"Abdelnabi"},{"last_name":"Tabesh","full_name":"Tabesh, Soroush","first_name":"Soroush","id":"06000900-6068-11ef-8d61-c2472ef2e752","orcid":"0009-0003-4119-6281"},{"full_name":"Fritz, Mario","last_name":"Fritz","first_name":"Mario"},{"full_name":"Lampert, Christoph","last_name":"Lampert","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph","orcid":"0000-0001-8622-7887"}],"corr_author":"1","tmp":{"image":"/images/cc_by_sa.png","short":"CC BY-SA (4.0)","name":"Creative Commons Attribution-ShareAlike 4.0 International Public License (CC BY-SA 4.0)","legal_code_url":"https://creativecommons.org/licenses/by-sa/4.0/legalcode"},"article_processing_charge":"No","related_material":{"link":[{"url":" https://github.com/egozverev/Shold-It-Be-Executed-Or-Processed","relation":"software"}]},"publication_status":"published","file":[{"access_level":"open_access","date_created":"2025-02-20T10:11:45Z","file_size":530972,"content_type":"application/pdf","success":1,"file_name":"2403.06833v3.pdf","checksum":"35eb43968684b87be59144603ef10af0","creator":"ezverev","file_id":"19064","relation":"main_file","date_updated":"2025-02-20T10:11:45Z"}],"arxiv":1,"oa_version":"Preprint","OA_type":"green","date_created":"2025-02-20T10:13:42Z","title":"Can LLMs separate instructions from data? And what do we even mean by that?"},{"publication_status":"published","file":[{"date_updated":"2025-03-20T09:02:18Z","creator":"dernst","file_id":"19426","relation":"main_file","checksum":"0714e12f7423cd098976ed9974561155","file_name":"2024_TMLR_Verwimp.pdf","content_type":"application/pdf","success":1,"date_created":"2025-03-20T09:02:18Z","file_size":1367966,"access_level":"open_access"}],"arxiv":1,"publication_identifier":{"eissn":["2835-8856"]},"date_created":"2025-03-16T23:01:25Z","title":"Continual learning: Applications and the road forward","alternative_title":["TMLR"],"OA_type":"diamond","oa_version":"Published Version","oa":1,"author":[{"first_name":"Eli","full_name":"Verwimp, Eli","last_name":"Verwimp"},{"first_name":"Rahaf","last_name":"Aljundi","full_name":"Aljundi, Rahaf"},{"first_name":"Shai","last_name":"Ben-David","full_name":"Ben-David, Shai"},{"full_name":"Bethge, Matthias","last_name":"Bethge","first_name":"Matthias"},{"full_name":"Cossu, Andrea","last_name":"Cossu","first_name":"Andrea"},{"first_name":"Alexander","last_name":"Gepperth","full_name":"Gepperth, Alexander"},{"first_name":"Tyler L.","last_name":"Hayes","full_name":"Hayes, Tyler L."},{"first_name":"Eyke","full_name":"Hüllermeier, Eyke","last_name":"Hüllermeier"},{"full_name":"Kanan, Christopher","last_name":"Kanan","first_name":"Christopher"},{"last_name":"Kudithipudi","full_name":"Kudithipudi, Dhireesha","first_name":"Dhireesha"},{"last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Martin","full_name":"Mundt, Martin","last_name":"Mundt"},{"last_name":"Pascanu","full_name":"Pascanu, Razvan","first_name":"Razvan"},{"first_name":"Adrian","last_name":"Popescu","full_name":"Popescu, Adrian"},{"first_name":"Andreas S.","full_name":"Tolias, Andreas S.","last_name":"Tolias"},{"first_name":"Joost","full_name":"Van De Weijer, Joost","last_name":"Van De Weijer"},{"first_name":"Bing","last_name":"Liu","full_name":"Liu, Bing"},{"first_name":"Vincenzo","full_name":"Lomonaco, Vincenzo","last_name":"Lomonaco"},{"full_name":"Tuytelaars, Tinne","last_name":"Tuytelaars","first_name":"Tinne"},{"first_name":"Gido M.","last_name":"Van De Ven","full_name":"Van De Ven, Gido M."}],"volume":2024,"month":"04","department":[{"_id":"ChLa"}],"article_type":"original","article_processing_charge":"No","tmp":{"short":"CC BY (4.0)","image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"publisher":"Transactions on Machine Learning Research","external_id":{"arxiv":["2311.11908"]},"_id":"19408","language":[{"iso":"eng"}],"abstract":[{"text":"Continual learning is a subfield of machine learning, which aims to allow machine learning models to continuously learn on new data, by accumulating knowledge without forgetting what was learned in the past. In this work, we take a step back, and ask: \"Why should one care about continual learning in the first place?\". We set the stage by examining recent continual learning papers published at four major machine learning conferences, and show that memory-constrained settings dominate the field. Then, we discuss five open problems in machine learning, and even though they might seem unrelated to continual learning at first sight, we show that continual learning will inevitably be part of their solution. These problems are model editing, personalization and specialization, on-device learning, faster (re-)training and reinforcement learning. Finally, by comparing the desiderata from these unsolved problems and the current assumptions in continual learning, we highlight and discuss four future directions for continual learning research. We hope that this work offers an interesting perspective on the future of continual learning, while displaying its potential value and the paths we have to pursue in order to make it successful. This work is the result of the many discussions the authors had at the Dagstuhl seminar on Deep Continual Learning, in March 2023.","lang":"eng"}],"quality_controlled":"1","type":"journal_article","citation":{"ieee":"E. Verwimp <i>et al.</i>, “Continual learning: Applications and the road forward,” <i>Transactions on Machine Learning Research</i>, vol. 2024. Transactions on Machine Learning Research, 2024.","short":"E. Verwimp, R. Aljundi, S. Ben-David, M. Bethge, A. Cossu, A. Gepperth, T.L. Hayes, E. Hüllermeier, C. Kanan, D. Kudithipudi, C. Lampert, M. Mundt, R. Pascanu, A. Popescu, A.S. Tolias, J. Van De Weijer, B. Liu, V. Lomonaco, T. Tuytelaars, G.M. Van De Ven, Transactions on Machine Learning Research 2024 (2024).","mla":"Verwimp, Eli, et al. “Continual Learning: Applications and the Road Forward.” <i>Transactions on Machine Learning Research</i>, vol. 2024, Transactions on Machine Learning Research, 2024.","apa":"Verwimp, E., Aljundi, R., Ben-David, S., Bethge, M., Cossu, A., Gepperth, A., … Van De Ven, G. M. (2024). Continual learning: Applications and the road forward. <i>Transactions on Machine Learning Research</i>. Transactions on Machine Learning Research.","ista":"Verwimp E, Aljundi R, Ben-David S, Bethge M, Cossu A, Gepperth A, Hayes TL, Hüllermeier E, Kanan C, Kudithipudi D, Lampert C, Mundt M, Pascanu R, Popescu A, Tolias AS, Van De Weijer J, Liu B, Lomonaco V, Tuytelaars T, Van De Ven GM. 2024. Continual learning: Applications and the road forward. Transactions on Machine Learning Research. 2024.","ama":"Verwimp E, Aljundi R, Ben-David S, et al. Continual learning: Applications and the road forward. <i>Transactions on Machine Learning Research</i>. 2024;2024.","chicago":"Verwimp, Eli, Rahaf Aljundi, Shai Ben-David, Matthias Bethge, Andrea Cossu, Alexander Gepperth, Tyler L. Hayes, et al. “Continual Learning: Applications and the Road Forward.” <i>Transactions on Machine Learning Research</i>. Transactions on Machine Learning Research, 2024."},"day":"12","has_accepted_license":"1","intvolume":"      2024","ddc":["000"],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","year":"2024","OA_place":"publisher","date_published":"2024-04-12T00:00:00Z","file_date_updated":"2025-03-20T09:02:18Z","publication":"Transactions on Machine Learning Research","date_updated":"2025-03-20T09:21:02Z","status":"public","scopus_import":"1"},{"status":"public","scopus_import":"1","file_date_updated":"2024-08-12T07:38:06Z","acknowledged_ssus":[{"_id":"ScienComp"}],"publication":"12th International Conference on Learning Representations","date_updated":"2026-04-07T11:46:11Z","ddc":["000"],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","year":"2024","date_published":"2024-03-07T00:00:00Z","quality_controlled":"1","type":"conference","external_id":{"arxiv":["2306.05515"]},"abstract":[{"lang":"eng","text":"We present PeFLL, a new personalized federated learning algorithm that improves\r\nover the state-of-the-art in three aspects: 1) it produces more accurate models,\r\nespecially in the low-data regime, and not only for clients present during its\r\ntraining phase, but also for any that may emerge in the future; 2) it reduces the\r\namount of on-client computation and client-server communication by providing\r\nfuture clients with ready-to-use personalized models that require no additional\r\nfinetuning or optimization; 3) it comes with theoretical guarantees that establish\r\ngeneralization from the observed clients to future ones.\r\nAt the core of PeFLL lies a learning-to-learn approach that jointly trains an\r\nembedding network and a hypernetwork. The embedding network is used to\r\nrepresent clients in a latent descriptor space in a way that reflects their similarity\r\nto each other. The hypernetwork takes as input such descriptors and outputs the\r\nparameters of fully personalized client models. In combination, both networks\r\nconstitute a learning algorithm that achieves state-of-the-art performance in several\r\npersonalized federated learning benchmarks"}],"_id":"17411","language":[{"iso":"eng"}],"day":"07","citation":{"mla":"Scott, Jonathan A., et al. “PEFLL: Personalized Federated Learning by Learning to Learn.” <i>12th International Conference on Learning Representations</i>, OpenReview, 2024.","short":"J.A. Scott, H. Zakerinia, C. Lampert, in:, 12th International Conference on Learning Representations, OpenReview, 2024.","ieee":"J. A. Scott, H. Zakerinia, and C. Lampert, “PEFLL: Personalized federated learning by learning to learn,” in <i>12th International Conference on Learning Representations</i>, Vienna, Austria, 2024.","apa":"Scott, J. A., Zakerinia, H., &#38; Lampert, C. (2024). PEFLL: Personalized federated learning by learning to learn. In <i>12th International Conference on Learning Representations</i>. Vienna, Austria: OpenReview.","ama":"Scott JA, Zakerinia H, Lampert C. PEFLL: Personalized federated learning by learning to learn. In: <i>12th International Conference on Learning Representations</i>. OpenReview; 2024.","ista":"Scott JA, Zakerinia H, Lampert C. 2024. PEFLL: Personalized federated learning by learning to learn. 12th International Conference on Learning Representations. ICLR: International Conference on Learning Representations.","chicago":"Scott, Jonathan A, Hossein Zakerinia, and Christoph Lampert. “PEFLL: Personalized Federated Learning by Learning to Learn.” In <i>12th International Conference on Learning Representations</i>. OpenReview, 2024."},"has_accepted_license":"1","publisher":"OpenReview","article_processing_charge":"No","corr_author":"1","related_material":{"record":[{"id":"21198","status":"public","relation":"dissertation_contains"}]},"oa":1,"acknowledgement":"This research was supported by the Scientific Service Units (SSU) of ISTA through resources provided by Scientific Computing (SciComp).\r\n","month":"03","department":[{"_id":"ChLa"}],"author":[{"first_name":"Jonathan A","id":"e499926b-f6e0-11ea-865d-9c63db0031e8","full_name":"Scott, Jonathan A","last_name":"Scott"},{"id":"653bd8b6-f394-11eb-9cf6-c0bbf6cd78d4","first_name":"Hossein","orcid":"0009-0007-3977-6462","last_name":"Zakerinia","full_name":"Zakerinia, Hossein"},{"full_name":"Lampert, Christoph","last_name":"Lampert","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph","orcid":"0000-0001-8622-7887"}],"arxiv":1,"oa_version":"Published Version","date_created":"2024-08-11T22:01:12Z","title":"PEFLL: Personalized federated learning by learning to learn","conference":{"name":"ICLR: International Conference on Learning Representations","start_date":"2024-03-07","end_date":"2024-03-07","location":"Vienna, Austria"},"publication_status":"published","file":[{"checksum":"81b7ea2e667adaf9c7a7b6b376b1f251","file_name":"2024_ICLR_Scott.pdf","creator":"dernst","relation":"main_file","file_id":"17415","date_updated":"2024-08-12T07:38:06Z","access_level":"open_access","content_type":"application/pdf","success":1,"date_created":"2024-08-12T07:38:06Z","file_size":1029219}]},{"related_material":{"record":[{"relation":"dissertation_contains","status":"public","id":"19759"}],"link":[{"url":"https://github.com/berndprach/1LipschitzLayersCompared","relation":"software"}]},"article_processing_charge":"No","corr_author":"1","publisher":"Computer Vision Foundation","author":[{"last_name":"Prach","full_name":"Prach, Bernd","first_name":"Bernd","id":"2D561D42-C427-11E9-89B4-9C1AE6697425"},{"full_name":"Brau, Fabio","last_name":"Brau","first_name":"Fabio"},{"full_name":"Buttazzo, Giorgio","last_name":"Buttazzo","first_name":"Giorgio"},{"full_name":"Lampert, Christoph","last_name":"Lampert","orcid":"0000-0001-8622-7887","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph"}],"month":"06","department":[{"_id":"GradSch"},{"_id":"ChLa"}],"doi":"10.1109/CVPR52733.2024.02320","oa":1,"acknowledgement":"This work was partially supported by project SERICS (PE00000014) under the MUR National Recovery and Resilience Plan funded by the European Union - NextGenerationEU.\r\n","date_created":"2024-08-14T08:42:32Z","title":"1-Lipschitz layers compared: Memory, speed, and certifiable robustness","OA_type":"green","oa_version":"Preprint","arxiv":1,"conference":{"name":"CVPR: Conference on Computer Vision and Pattern Recognition","start_date":"2024-06-16","end_date":"2024-06-22","location":"Seattle, WA, United States"},"publication_status":"published","status":"public","page":"24574-24583","publication":"Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition","date_updated":"2026-04-07T11:49:51Z","isi":1,"OA_place":"repository","date_published":"2024-06-01T00:00:00Z","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2311.16833","open_access":"1"}],"user_id":"317138e5-6ab7-11ef-aa6d-ffef3953e345","year":"2024","day":"01","citation":{"mla":"Prach, Bernd, et al. “1-Lipschitz Layers Compared: Memory, Speed, and Certifiable Robustness.” <i>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, Computer Vision Foundation, 2024, pp. 24574–83, doi:<a href=\"https://doi.org/10.1109/CVPR52733.2024.02320\">10.1109/CVPR52733.2024.02320</a>.","ieee":"B. Prach, F. Brau, G. Buttazzo, and C. Lampert, “1-Lipschitz layers compared: Memory, speed, and certifiable robustness,” in <i>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, Seattle, WA, United States, 2024, pp. 24574–24583.","short":"B. Prach, F. Brau, G. Buttazzo, C. Lampert, in:, Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, Computer Vision Foundation, 2024, pp. 24574–24583.","apa":"Prach, B., Brau, F., Buttazzo, G., &#38; Lampert, C. (2024). 1-Lipschitz layers compared: Memory, speed, and certifiable robustness. In <i>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</i> (pp. 24574–24583). Seattle, WA, United States: Computer Vision Foundation. <a href=\"https://doi.org/10.1109/CVPR52733.2024.02320\">https://doi.org/10.1109/CVPR52733.2024.02320</a>","ama":"Prach B, Brau F, Buttazzo G, Lampert C. 1-Lipschitz layers compared: Memory, speed, and certifiable robustness. In: <i>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>. Computer Vision Foundation; 2024:24574-24583. doi:<a href=\"https://doi.org/10.1109/CVPR52733.2024.02320\">10.1109/CVPR52733.2024.02320</a>","ista":"Prach B, Brau F, Buttazzo G, Lampert C. 2024. 1-Lipschitz layers compared: Memory, speed, and certifiable robustness. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. CVPR: Conference on Computer Vision and Pattern Recognition, 24574–24583.","chicago":"Prach, Bernd, Fabio Brau, Giorgio Buttazzo, and Christoph Lampert. “1-Lipschitz Layers Compared: Memory, Speed, and Certifiable Robustness.” In <i>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, 24574–83. Computer Vision Foundation, 2024. <a href=\"https://doi.org/10.1109/CVPR52733.2024.02320\">https://doi.org/10.1109/CVPR52733.2024.02320</a>."},"has_accepted_license":"1","external_id":{"isi":["001344387500055"],"arxiv":["2311.16833"]},"language":[{"iso":"eng"}],"_id":"17426","abstract":[{"lang":"eng","text":"The robustness of neural networks against input perturbations with bounded\r\nmagnitude represents a serious concern in the deployment of deep learning\r\nmodels in safety-critical systems. Recently, the scientific community has\r\nfocused on enhancing certifiable robustness guarantees by crafting 1-Lipschitz\r\nneural networks that leverage Lipschitz bounded dense and convolutional layers.\r\nAlthough different methods have been proposed in the literature to achieve this\r\ngoal, understanding the performance of such methods is not straightforward,\r\nsince different metrics can be relevant (e.g., training time, memory usage,\r\naccuracy, certifiable robustness) for different applications. For this reason,\r\nthis work provides a thorough theoretical and empirical comparison between\r\nmethods by evaluating them in terms of memory usage, speed, and certifiable\r\nrobust accuracy. The paper also provides some guidelines and recommendations to\r\nsupport the user in selecting the methods that work best depending on the\r\navailable resources. We provide code at\r\nhttps://github.com/berndprach/1LipschitzLayersCompared."}],"quality_controlled":"1","type":"conference"},{"oa":1,"doi":"10.48550/arXiv.2412.04245","date_updated":"2026-04-07T11:49:51Z","publication":"arXiv","author":[{"full_name":"Prach, Bernd","last_name":"Prach","first_name":"Bernd","id":"2D561D42-C427-11E9-89B4-9C1AE6697425"},{"id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph","orcid":"0000-0001-8622-7887","last_name":"Lampert","full_name":"Lampert, Christoph"}],"month":"12","department":[{"_id":"GradSch"},{"_id":"ChLa"}],"corr_author":"1","article_processing_charge":"No","status":"public","related_material":{"record":[{"relation":"later_version","status":"public","id":"20455"},{"status":"public","id":"19759","relation":"dissertation_contains"}]},"_id":"18874","abstract":[{"text":"Despite extensive research since the community learned about adversarial\r\nexamples 10 years ago, we still do not know how to train high-accuracy\r\nclassifiers that are guaranteed to be robust to small perturbations of their\r\ninputs. Previous works often argued that this might be because no classifier\r\nexists that is robust and accurate at the same time. However, in computer\r\nvision this assumption does not match reality where humans are usually accurate\r\nand robust on most tasks of interest. We offer an alternative explanation and\r\nshow that in certain settings robust generalization is only possible with\r\nunrealistically large amounts of data. More precisely we find a setting where a\r\nrobust classifier exists, it is easy to learn an accurate classifier, yet it\r\nrequires an exponential amount of data to learn a robust classifier. Based on\r\nthis theoretical result, we explore how well robust classifiers generalize on\r\ndatasets such as CIFAR-10. We come to the conclusion that on this datasets, the\r\nlimitation of current robust models also lies in the generalization, and that\r\nthey require a lot of data to do well on the test set. We also show that the\r\nproblem is not in the expressiveness or generalization capabilities of current\r\narchitectures, and that there are low magnitude features in the data which are\r\nuseful for non-robust generalization but are not available for robust\r\nclassifiers.","lang":"eng"}],"language":[{"iso":"eng"}],"external_id":{"arxiv":["2412.04245"]},"type":"preprint","publication_status":"draft","citation":{"ieee":"B. Prach and C. Lampert, “Intriguing properties of robust classification,” <i>arXiv</i>. .","short":"B. Prach, C. Lampert, ArXiv (n.d.).","mla":"Prach, Bernd, and Christoph Lampert. “Intriguing Properties of Robust Classification.” <i>ArXiv</i>, 2412.04245, doi:<a href=\"https://doi.org/10.48550/arXiv.2412.04245\">10.48550/arXiv.2412.04245</a>.","apa":"Prach, B., &#38; Lampert, C. (n.d.). Intriguing properties of robust classification. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2412.04245\">https://doi.org/10.48550/arXiv.2412.04245</a>","ista":"Prach B, Lampert C. Intriguing properties of robust classification. arXiv, 2412.04245.","ama":"Prach B, Lampert C. Intriguing properties of robust classification. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.2412.04245\">10.48550/arXiv.2412.04245</a>","chicago":"Prach, Bernd, and Christoph Lampert. “Intriguing Properties of Robust Classification.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.2412.04245\">https://doi.org/10.48550/arXiv.2412.04245</a>."},"day":"05","article_number":"2412.04245","arxiv":1,"year":"2024","user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","date_published":"2024-12-05T00:00:00Z","title":"Intriguing properties of robust classification","date_created":"2025-01-24T16:57:29Z","OA_place":"repository","oa_version":"Preprint","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2412.04245","open_access":"1"}]},{"type":"conference","conference":{"start_date":"2022-08-21","name":"RRPR: Reproducible Research in Pattern Recognition","location":"Montreal, Canada","end_date":"2022-08-21"},"publication_status":"published","quality_controlled":"1","_id":"14410","language":[{"iso":"eng"}],"abstract":[{"lang":"eng","text":"This paper focuses on the implementation details of the baseline methods and a recent lightweight conditional model extrapolation algorithm LIMES [5] for streaming data under class-prior shift. LIMES achieves superior performance over the baseline methods, especially concerning the minimum-across-day accuracy, which is important for the users of the system. In this work, the key measures to facilitate reproducibility and enhance the credibility of the results are described."}],"day":"20","citation":{"chicago":"Tomaszewska, Paulina, and Christoph Lampert. “On the Implementation of Baselines and Lightweight Conditional Model Extrapolation (LIMES) under Class-Prior Shift.” In <i>International Workshop on Reproducible Research in Pattern Recognition</i>, 14068:67–73. Springer Nature, 2023. <a href=\"https://doi.org/10.1007/978-3-031-40773-4_6\">https://doi.org/10.1007/978-3-031-40773-4_6</a>.","ama":"Tomaszewska P, Lampert C. On the implementation of baselines and lightweight conditional model extrapolation (LIMES) under class-prior shift. In: <i>International Workshop on Reproducible Research in Pattern Recognition</i>. Vol 14068. Springer Nature; 2023:67-73. doi:<a href=\"https://doi.org/10.1007/978-3-031-40773-4_6\">10.1007/978-3-031-40773-4_6</a>","ista":"Tomaszewska P, Lampert C. 2023. On the implementation of baselines and lightweight conditional model extrapolation (LIMES) under class-prior shift. International Workshop on Reproducible Research in Pattern Recognition. RRPR: Reproducible Research in Pattern Recognition, LNCS, vol. 14068, 67–73.","apa":"Tomaszewska, P., &#38; Lampert, C. (2023). On the implementation of baselines and lightweight conditional model extrapolation (LIMES) under class-prior shift. In <i>International Workshop on Reproducible Research in Pattern Recognition</i> (Vol. 14068, pp. 67–73). Montreal, Canada: Springer Nature. <a href=\"https://doi.org/10.1007/978-3-031-40773-4_6\">https://doi.org/10.1007/978-3-031-40773-4_6</a>","mla":"Tomaszewska, Paulina, and Christoph Lampert. “On the Implementation of Baselines and Lightweight Conditional Model Extrapolation (LIMES) under Class-Prior Shift.” <i>International Workshop on Reproducible Research in Pattern Recognition</i>, vol. 14068, Springer Nature, 2023, pp. 67–73, doi:<a href=\"https://doi.org/10.1007/978-3-031-40773-4_6\">10.1007/978-3-031-40773-4_6</a>.","short":"P. Tomaszewska, C. Lampert, in:, International Workshop on Reproducible Research in Pattern Recognition, Springer Nature, 2023, pp. 67–73.","ieee":"P. Tomaszewska and C. Lampert, “On the implementation of baselines and lightweight conditional model extrapolation (LIMES) under class-prior shift,” in <i>International Workshop on Reproducible Research in Pattern Recognition</i>, Montreal, Canada, 2023, vol. 14068, pp. 67–73."},"publication_identifier":{"isbn":["9783031407727"],"issn":["0302-9743"],"eissn":["1611-3349"]},"year":"2023","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","intvolume":"     14068","oa_version":"None","date_published":"2023-08-20T00:00:00Z","alternative_title":["LNCS"],"title":"On the implementation of baselines and lightweight conditional model extrapolation (LIMES) under class-prior shift","date_created":"2023-10-08T22:01:18Z","doi":"10.1007/978-3-031-40773-4_6","month":"08","department":[{"_id":"ChLa"}],"publication":"International Workshop on Reproducible Research in Pattern Recognition","date_updated":"2023-10-09T06:48:02Z","author":[{"first_name":"Paulina","last_name":"Tomaszewska","full_name":"Tomaszewska, Paulina"},{"last_name":"Lampert","full_name":"Lampert, Christoph","first_name":"Christoph","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8622-7887"}],"volume":14068,"publisher":"Springer Nature","page":"67-73","status":"public","article_processing_charge":"No","scopus_import":"1"},{"publication_identifier":{"issn":["2835-8856"]},"arxiv":1,"OA_type":"green","oa_version":"Preprint","date_created":"2023-02-20T08:21:50Z","alternative_title":["TMLR"],"title":"Cross-client label propagation for transductive and semi-supervised federated learning","publication_status":"published","file":[{"success":1,"content_type":"application/pdf","date_created":"2025-02-04T08:30:05Z","file_size":553717,"access_level":"open_access","relation":"main_file","creator":"dernst","date_updated":"2025-02-04T08:30:05Z","file_id":"18990","checksum":"aa322ad91cbd229f5cafe6733a119bd1","file_name":"2023_TMLR_Scott.pdf"}],"publisher":"Curran Associates","article_processing_charge":"No","corr_author":"1","tmp":{"short":"CC BY (4.0)","image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"related_material":{"link":[{"url":"https://github.com/jonnyascott/xclp","relation":"software"}]},"oa":1,"month":"11","department":[{"_id":"ChLa"}],"author":[{"last_name":"Scott","full_name":"Scott, Jonathan A","id":"e499926b-f6e0-11ea-865d-9c63db0031e8","first_name":"Jonathan A"},{"full_name":"Yeo, Michelle X","last_name":"Yeo","id":"2D82B818-F248-11E8-B48F-1D18A9856A87","first_name":"Michelle X","orcid":"0009-0001-3676-4809"},{"id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph","orcid":"0000-0001-8622-7887","full_name":"Lampert, Christoph","last_name":"Lampert"}],"ddc":["004"],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","year":"2023","OA_place":"repository","date_published":"2023-11-27T00:00:00Z","quality_controlled":"1","type":"conference","external_id":{"arxiv":["2210.06434"]},"_id":"12660","abstract":[{"lang":"eng","text":"We present Cross-Client Label Propagation(XCLP), a new method for transductive federated learning. XCLP estimates a data graph jointly from the data of multiple clients and computes labels for the unlabeled data by propagating label information across the graph. To avoid clients having to share their data with anyone, XCLP employs two cryptographically secure protocols: secure Hamming distance computation and secure summation. We demonstrate two distinct applications of XCLP within federated learning. In the first, we use it in a one-shot way to predict labels for unseen test points. In the second, we use it to repeatedly pseudo-label unlabeled training data in a federated semi-supervised setting. Experiments on both real federated and standard benchmark datasets show that in both applications XCLP achieves higher classification accuracy than alternative approaches."}],"language":[{"iso":"eng"}],"citation":{"apa":"Scott, J. A., Yeo, M. X., &#38; Lampert, C. (2023). Cross-client label propagation for transductive and semi-supervised federated learning. In <i>Transactions in Machine Learning</i>. Curran Associates.","short":"J.A. Scott, M.X. Yeo, C. Lampert, in:, Transactions in Machine Learning, Curran Associates, 2023.","ieee":"J. A. Scott, M. X. Yeo, and C. Lampert, “Cross-client label propagation for transductive and semi-supervised federated learning,” in <i>Transactions in Machine Learning</i>, 2023.","mla":"Scott, Jonathan A., et al. “Cross-Client Label Propagation for Transductive and Semi-Supervised Federated Learning.” <i>Transactions in Machine Learning</i>, Curran Associates, 2023.","chicago":"Scott, Jonathan A, Michelle X Yeo, and Christoph Lampert. “Cross-Client Label Propagation for Transductive and Semi-Supervised Federated Learning.” In <i>Transactions in Machine Learning</i>. Curran Associates, 2023.","ista":"Scott JA, Yeo MX, Lampert C. 2023. Cross-client label propagation for transductive and semi-supervised federated learning. Transactions in Machine Learning. , TMLR, .","ama":"Scott JA, Yeo MX, Lampert C. Cross-client label propagation for transductive and semi-supervised federated learning. In: <i>Transactions in Machine Learning</i>. Curran Associates; 2023."},"day":"27","has_accepted_license":"1","status":"public","file_date_updated":"2025-02-04T08:30:05Z","date_updated":"2025-02-04T08:32:19Z","publication":"Transactions in Machine Learning"},{"author":[{"first_name":"Peter","id":"d64d6a8d-eb8e-11eb-b029-96fd216dec3c","full_name":"Súkeník, Peter","last_name":"Súkeník"},{"orcid":"0000-0002-3242-7020","id":"27EB676C-8706-11E9-9510-7717E6697425","first_name":"Marco","full_name":"Mondelli, Marco","last_name":"Mondelli"},{"orcid":"0000-0001-8622-7887","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph","last_name":"Lampert","full_name":"Lampert, Christoph"}],"publication":"37th Annual Conference on Neural Information Processing Systems","date_updated":"2025-04-15T07:50:16Z","month":"12","department":[{"_id":"MaMo"},{"_id":"ChLa"}],"project":[{"name":"Prix Lopez-Loretta 2019 - Marco Mondelli","_id":"059876FA-7A3F-11EA-A408-12923DDC885E"}],"oa":1,"acknowledgement":"M. M. is partially supported by the 2019 Lopez-Loreta Prize. The authors would like to thank Eugenia Iofinova, Bernd Prach and Simone Bombari for valuable feedback on the manuscript.","article_processing_charge":"No","corr_author":"1","status":"public","citation":{"short":"P. Súkeník, M. Mondelli, C. Lampert, in:, 37th Annual Conference on Neural Information Processing Systems, 2023.","ieee":"P. Súkeník, M. Mondelli, and C. Lampert, “Deep neural collapse is provably optimal for the deep unconstrained features model,” in <i>37th Annual Conference on Neural Information Processing Systems</i>, New Orleans, LA, United States, 2023.","mla":"Súkeník, Peter, et al. “Deep Neural Collapse Is Provably Optimal for the Deep Unconstrained Features Model.” <i>37th Annual Conference on Neural Information Processing Systems</i>, 2023.","apa":"Súkeník, P., Mondelli, M., &#38; Lampert, C. (2023). Deep neural collapse is provably optimal for the deep unconstrained features model. In <i>37th Annual Conference on Neural Information Processing Systems</i>. New Orleans, LA, United States.","ista":"Súkeník P, Mondelli M, Lampert C. 2023. Deep neural collapse is provably optimal for the deep unconstrained features model. 37th Annual Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems, NeurIPS, .","ama":"Súkeník P, Mondelli M, Lampert C. Deep neural collapse is provably optimal for the deep unconstrained features model. In: <i>37th Annual Conference on Neural Information Processing Systems</i>. ; 2023.","chicago":"Súkeník, Peter, Marco Mondelli, and Christoph Lampert. “Deep Neural Collapse Is Provably Optimal for the Deep Unconstrained Features Model.” In <i>37th Annual Conference on Neural Information Processing Systems</i>, 2023."},"day":"15","external_id":{"arxiv":["2305.13165"]},"_id":"14921","abstract":[{"lang":"eng","text":"Neural collapse (NC) refers to the surprising structure of the last layer of deep neural networks in the terminal phase of gradient descent training. Recently, an increasing amount of experimental evidence has pointed to the propagation of NC to earlier layers of neural networks. However, while the NC in the last layer is well studied theoretically, much less is known about its multi-layered counterpart - deep neural collapse (DNC). In particular, existing work focuses either on linear layers or only on the last two layers at the price of an extra assumption. Our paper fills this gap by generalizing the established analytical framework for NC - the unconstrained features model - to multiple non-linear layers. Our key technical contribution is to show that, in a deep unconstrained features model, the unique global optimum for binary classification exhibits all the properties typical of DNC. This explains the existing experimental evidence of DNC. We also empirically show that (i) by optimizing deep unconstrained features models via gradient descent, the resulting solution agrees well with our theory, and (ii) trained networks recover the unconstrained features suitable for the occurrence of DNC, thus supporting the validity of this modeling principle."}],"language":[{"iso":"eng"}],"quality_controlled":"1","publication_status":"published","conference":{"location":"New Orleans, LA, United States","end_date":"2023-12-16","start_date":"2023-12-10","name":"NeurIPS: Neural Information Processing Systems"},"type":"conference","title":"Deep neural collapse is provably optimal for the deep unconstrained features model","date_created":"2024-02-02T11:17:41Z","alternative_title":["NeurIPS"],"date_published":"2023-12-15T00:00:00Z","main_file_link":[{"url":" https://doi.org/10.48550/arXiv.2305.13165","open_access":"1"}],"oa_version":"Preprint","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","arxiv":1,"year":"2023"},{"external_id":{"arxiv":["2311.06103"]},"language":[{"iso":"eng"}],"_id":"15039","abstract":[{"lang":"eng","text":"A crucial property for achieving secure, trustworthy and interpretable deep learning systems is their robustness: small changes to a system's inputs should not result in large changes to its outputs. Mathematically, this means one strives for networks with a small Lipschitz constant. Several recent works have focused on how to construct such Lipschitz networks, typically by imposing constraints on the weight matrices. In this work, we study an orthogonal aspect, namely the role of the activation function. We show that commonly used activation functions, such as MaxMin, as well as all piece-wise linear ones with two segments unnecessarily restrict the class of representable functions, even in the simplest one-dimensional setting. We furthermore introduce the new N-activation function that is provably more expressive than currently popular activation functions. We provide code at this https URL."}],"publication_status":"draft","type":"preprint","day":"10","citation":{"ista":"Prach B, Lampert C. 1-Lipschitz neural networks are more expressive with N-activations. arXiv, 2311.06103.","ama":"Prach B, Lampert C. 1-Lipschitz neural networks are more expressive with N-activations. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/ARXIV.2311.06103\">10.48550/ARXIV.2311.06103</a>","chicago":"Prach, Bernd, and Christoph Lampert. “1-Lipschitz Neural Networks Are More Expressive with N-Activations.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/ARXIV.2311.06103\">https://doi.org/10.48550/ARXIV.2311.06103</a>.","ieee":"B. Prach and C. Lampert, “1-Lipschitz neural networks are more expressive with N-activations,” <i>arXiv</i>. .","short":"B. Prach, C. Lampert, ArXiv (n.d.).","mla":"Prach, Bernd, and Christoph Lampert. “1-Lipschitz Neural Networks Are More Expressive with N-Activations.” <i>ArXiv</i>, 2311.06103, doi:<a href=\"https://doi.org/10.48550/ARXIV.2311.06103\">10.48550/ARXIV.2311.06103</a>.","apa":"Prach, B., &#38; Lampert, C. (n.d.). 1-Lipschitz neural networks are more expressive with N-activations. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/ARXIV.2311.06103\">https://doi.org/10.48550/ARXIV.2311.06103</a>"},"article_number":"2311.06103","user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","year":"2023","arxiv":1,"title":"1-Lipschitz neural networks are more expressive with N-activations","OA_place":"repository","date_created":"2024-02-28T17:59:32Z","date_published":"2023-11-10T00:00:00Z","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2311.06103","open_access":"1"}],"oa_version":"Preprint","oa":1,"doi":"10.48550/ARXIV.2311.06103","author":[{"id":"2D561D42-C427-11E9-89B4-9C1AE6697425","first_name":"Bernd","full_name":"Prach, Bernd","last_name":"Prach"},{"full_name":"Lampert, Christoph","last_name":"Lampert","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph","orcid":"0000-0001-8622-7887"}],"date_updated":"2026-04-07T11:49:51Z","publication":"arXiv","month":"11","department":[{"_id":"GradSch"},{"_id":"ChLa"}],"article_processing_charge":"No","tmp":{"short":"CC BY (4.0)","image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)"},"corr_author":"1","status":"public","related_material":{"record":[{"relation":"dissertation_contains","status":"public","id":"19759"}]}},{"oa_version":"Published Version","title":"CrAM: A Compression-Aware Minimizer","date_created":"2023-05-23T11:36:18Z","arxiv":1,"file":[{"file_name":"2023_ICLR_Peste.pdf","checksum":"a6eec897e13a91cdc3eeaf309801752c","relation":"main_file","date_updated":"2024-07-22T09:09:45Z","creator":"dernst","file_id":"17294","access_level":"open_access","file_size":458201,"date_created":"2024-07-22T09:09:45Z","content_type":"application/pdf","success":1}],"conference":{"name":"ICLR: International Conference on Learning Representations","start_date":"2023-05-01","end_date":"2023-05-05","location":"Kigali, Rwanda "},"publication_status":"published","related_material":{"record":[{"relation":"dissertation_contains","status":"public","id":"13074"}],"link":[{"url":"https://github.com/IST-DASLab/CrAM","relation":"software"}]},"publisher":"OpenReview","corr_author":"1","article_processing_charge":"No","project":[{"_id":"268A44D6-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"805223","name":"Elastic Coordination for Scalable Machine Learning"}],"department":[{"_id":"GradSch"},{"_id":"DaAl"},{"_id":"ChLa"}],"month":"05","author":[{"last_name":"Peste","full_name":"Peste, Elena-Alexandra","first_name":"Elena-Alexandra","id":"32D78294-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Adrian","full_name":"Vladu, Adrian","last_name":"Vladu"},{"id":"47beb3a5-07b5-11eb-9b87-b108ec578218","first_name":"Eldar","last_name":"Kurtic","full_name":"Kurtic, Eldar"},{"full_name":"Lampert, Christoph","last_name":"Lampert","orcid":"0000-0001-8622-7887","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph"},{"last_name":"Alistarh","full_name":"Alistarh, Dan-Adrian","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","first_name":"Dan-Adrian","orcid":"0000-0003-3650-940X"}],"acknowledgement":"AP, EK, DA received funding from the European Research Council (ERC) under the European\r\nUnion’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML). AV acknowledges the support of the French Agence Nationale de la Recherche (ANR), under grant ANR-21-CE48-0016 (project COMCOPT). We further acknowledge the support from the Scientific Service Units (SSU) of ISTA through resources provided by Scientific Computing (SciComp).","oa":1,"main_file_link":[{"open_access":"1","url":"https://openreview.net/pdf?id=_eTZBs-yedr"}],"date_published":"2023-05-01T00:00:00Z","year":"2023","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","ddc":["000"],"has_accepted_license":"1","day":"01","citation":{"mla":"Krumes, Alexandra, et al. “CrAM: A Compression-Aware Minimizer.” <i>11th International Conference on Learning Representations </i>, OpenReview, 2023.","short":"A. Krumes, A. Vladu, E. Kurtic, C. Lampert, D.-A. Alistarh, in:, 11th International Conference on Learning Representations , OpenReview, 2023.","ieee":"A. Krumes, A. Vladu, E. Kurtic, C. Lampert, and D.-A. Alistarh, “CrAM: A Compression-Aware Minimizer,” in <i>11th International Conference on Learning Representations </i>, Kigali, Rwanda , 2023.","apa":"Krumes, A., Vladu, A., Kurtic, E., Lampert, C., &#38; Alistarh, D.-A. (2023). CrAM: A Compression-Aware Minimizer. In <i>11th International Conference on Learning Representations </i>. Kigali, Rwanda : OpenReview.","ama":"Krumes A, Vladu A, Kurtic E, Lampert C, Alistarh D-A. CrAM: A Compression-Aware Minimizer. In: <i>11th International Conference on Learning Representations </i>. OpenReview; 2023.","ista":"Krumes A, Vladu A, Kurtic E, Lampert C, Alistarh D-A. 2023. CrAM: A Compression-Aware Minimizer. 11th International Conference on Learning Representations . ICLR: International Conference on Learning Representations.","chicago":"Krumes, Alexandra, Adrian Vladu, Eldar Kurtic, Christoph Lampert, and Dan-Adrian Alistarh. “CrAM: A Compression-Aware Minimizer.” In <i>11th International Conference on Learning Representations </i>. OpenReview, 2023."},"type":"conference","quality_controlled":"1","_id":"13053","language":[{"iso":"eng"}],"abstract":[{"text":"Deep neural networks (DNNs) often have to be compressed, via pruning and/or quantization, before they can be deployed in practical settings. In this work we propose a new compression-aware minimizer dubbed CrAM that modifies the optimization step in a principled way, in order to produce models whose local loss behavior is stable under compression operations such as pruning. Thus, dense models trained via CrAM should be compressible post-training, in a single step, without significant accuracy loss. Experimental results on standard benchmarks, such as residual networks for ImageNet classification and BERT models for language modelling, show that CrAM produces dense models that can be more accurate than the standard SGD/Adam-based baselines, but which are stable under weight pruning: specifically, we can prune models in one-shot to 70-80% sparsity with almost no accuracy loss, and to 90% with reasonable (∼1%) accuracy loss, which is competitive with gradual compression methods. Additionally, CrAM can produce sparse models which perform well for transfer learning, and it also works for semi-structured 2:4 pruning patterns supported by GPU hardware. The code for reproducing the results is available at this https URL .","lang":"eng"}],"external_id":{"arxiv":["2207.14200"]},"ec_funded":1,"status":"public","acknowledged_ssus":[{"_id":"ScienComp"}],"publication":"11th International Conference on Learning Representations ","date_updated":"2026-04-07T13:30:19Z","file_date_updated":"2024-07-22T09:09:45Z"},{"date_updated":"2024-10-09T21:03:41Z","publication":"26th International Conference on Pattern Recognition","isi":1,"scopus_import":"1","page":"2128-2134","status":"public","citation":{"chicago":"Tomaszewska, Paulina, and Christoph Lampert. “Lightweight Conditional Model Extrapolation for Streaming Data under Class-Prior Shift.” In <i>26th International Conference on Pattern Recognition</i>, 2022:2128–34. Institute of Electrical and Electronics Engineers, 2022. <a href=\"https://doi.org/10.1109/icpr56361.2022.9956195\">https://doi.org/10.1109/icpr56361.2022.9956195</a>.","ista":"Tomaszewska P, Lampert C. 2022. Lightweight conditional model extrapolation for streaming data under class-prior shift. 26th International Conference on Pattern Recognition. ICPR: International Conference on Pattern Recognition vol. 2022, 2128–2134.","ama":"Tomaszewska P, Lampert C. Lightweight conditional model extrapolation for streaming data under class-prior shift. In: <i>26th International Conference on Pattern Recognition</i>. Vol 2022. Institute of Electrical and Electronics Engineers; 2022:2128-2134. doi:<a href=\"https://doi.org/10.1109/icpr56361.2022.9956195\">10.1109/icpr56361.2022.9956195</a>","apa":"Tomaszewska, P., &#38; Lampert, C. (2022). Lightweight conditional model extrapolation for streaming data under class-prior shift. In <i>26th International Conference on Pattern Recognition</i> (Vol. 2022, pp. 2128–2134). Montreal, Canada: Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/icpr56361.2022.9956195\">https://doi.org/10.1109/icpr56361.2022.9956195</a>","short":"P. Tomaszewska, C. Lampert, in:, 26th International Conference on Pattern Recognition, Institute of Electrical and Electronics Engineers, 2022, pp. 2128–2134.","ieee":"P. Tomaszewska and C. Lampert, “Lightweight conditional model extrapolation for streaming data under class-prior shift,” in <i>26th International Conference on Pattern Recognition</i>, Montreal, Canada, 2022, vol. 2022, pp. 2128–2134.","mla":"Tomaszewska, Paulina, and Christoph Lampert. “Lightweight Conditional Model Extrapolation for Streaming Data under Class-Prior Shift.” <i>26th International Conference on Pattern Recognition</i>, vol. 2022, Institute of Electrical and Electronics Engineers, 2022, pp. 2128–34, doi:<a href=\"https://doi.org/10.1109/icpr56361.2022.9956195\">10.1109/icpr56361.2022.9956195</a>."},"day":"29","external_id":{"isi":["000897707602018"],"arxiv":["2206.05181"]},"language":[{"iso":"eng"}],"_id":"12161","abstract":[{"lang":"eng","text":"We introduce LIMES, a new method for learning with non-stationary streaming data, inspired by the recent success of meta-learning. The main idea is not to attempt to learn a single classifier that would have to work well across all occurring data distributions, nor many separate classifiers, but to exploit a hybrid strategy: we learn a single set of model parameters from which a specific classifier for any specific data distribution is derived via classifier adaptation. Assuming a multiclass classification setting with class-prior shift, the adaptation step can be performed analytically with only the classifier’s bias terms being affected. Another contribution of our work is an extrapolation step that predicts suitable adaptation parameters for future time steps based on the previous data. In combination, we obtain a lightweight procedure for learning from streaming data with varying class distribution that adds no trainable parameters and almost no memory or computational overhead compared to training a single model. Experiments on a set of exemplary tasks using Twitter data show that LIMES achieves higher accuracy than alternative approaches, especially with respect to the relevant real-world metric of lowest within-day accuracy."}],"quality_controlled":"1","type":"conference","date_published":"2022-11-29T00:00:00Z","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2206.05181"}],"intvolume":"      2022","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","year":"2022","author":[{"first_name":"Paulina","full_name":"Tomaszewska, Paulina","last_name":"Tomaszewska"},{"last_name":"Lampert","full_name":"Lampert, Christoph","orcid":"0000-0001-8622-7887","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","first_name":"Christoph"}],"volume":2022,"month":"11","department":[{"_id":"ChLa"}],"doi":"10.1109/icpr56361.2022.9956195","oa":1,"article_processing_charge":"No","corr_author":"1","publisher":"Institute of Electrical and Electronics Engineers","conference":{"end_date":"2022-08-25","location":"Montreal, Canada","name":"ICPR: International Conference on Pattern Recognition","start_date":"2022-08-21"},"publication_status":"published","date_created":"2023-01-12T12:09:38Z","title":"Lightweight conditional model extrapolation for streaming data under class-prior shift","oa_version":"Preprint","publication_identifier":{"eissn":["2831-7475"],"eisbn":["9781665490627"]},"arxiv":1}]
