@article{20029,
  abstract     = {Vacuolar acidification is crucial for the homeostasis of intracellular pH and the recycling of proteins and nutrients in cells, thereby playing important roles in various physiological processes related to vacuolar function. The key factors regulating vacuolar acidification and underlying mechanisms remain unclear. Here, we report that Arabidopsis phospholipase Dζ2 (PLDζ2) promotes the acidification of the vacuolar lumen to stimulate autophagic degradation under phosphorus deficiency. The pldζ2 mutant massively accumulates autophagic structures while exhibiting premature leaf senescence under nutrient starvation. Impaired autophagic flux, lytic vacuole morphology, and lytic degradation in pldζ2 indicate that PLDζ2 regulates autophagy by affecting the vacuolar function. PLDζ2 locates in both tonoplast and cytoplasm. Genetic, structural, and biochemical studies demonstrate that PLDζ2 directly interacts with vacuolar-type ATPase (V-ATPase) subunit D (VATD) to promote vacuolar acidification and autophagy under phosphorus starvation. These findings reveal the importance of V-ATPase and vacuolar pH in autophagic activity and provide clues in elucidating the regulatory mechanism of vacuolar acidification.},
  author       = {Guan, Bin and Xie, Ke Xuan and Du, Xin Qiao and Bai, Yu Xuan and Hao, Peng Chao and Lin, Wen Hui and Friml, Jiří and Xue, Hong Wei},
  issn         = {2211-1247},
  journal      = {Cell Reports},
  number       = {7},
  publisher    = {Elsevier},
  title        = {{Arabidopsis phospholipase Dζ2 facilitates vacuolar acidification and autophagy under phosphorus starvation by interacting with VATD}},
  doi          = {10.1016/j.celrep.2025.116024},
  volume       = {44},
  year         = {2025},
}

@article{20030,
  abstract     = {We report the discovery of a Lyα emitter (LAE) candidate in the immediate foreground of the quasar PSO J158-14 at zQSO = 6.0685 at a projected distance ∼29 pkpc that is associated with an extremely metal-poor absorption system. This system was found in archival observations of the quasar field with the Very Large Telescope (VLT)/Multi-Unit Spectroscopic Explorer (MUSE) and was previously missed in searches of absorption systems using quasar absorption line spectroscopy, as it imparts no detectable metal absorption lines on the background quasar spectrum. The detected Lyα emission line at a redshift of zLAE = 6.0323 is well aligned with the outer edge of the quasar’s proximity zone and can plausibly cause its observed damping wing if it is associated with a proximate subdamped Lyα absorption system with a column density of log Nhi/cm^-2 19.7. A >10 hr medium-resolution spectrum of the quasar observed with the Magellan/Folded-port InfraRed Echellette (FIRE) and VLT/X-Shooter spectrographs reveals a metallicity constraint of [Z/H] < −3. Such low metallicity makes this system an extremely metal-poor galaxy candidate and provides an exciting site to study possible signatures of Population III stars.},
  author       = {Ďurovčíková, Dominika and Eilers, Anna Christina and Simcoe, Robert A. and Welsh, Louise and Meyer, Romain A. and Matthee, Jorryt J and Ryan-Weber, Emma V. and Yue, Minghao and Katz, Harley and Satyavolu, Sindhu and Becker, George and Davies, Frederick B. and Farina, Emanuele Paolo},
  issn         = {2041-8213},
  journal      = {The Astrophysical Journal Letters},
  number       = {2},
  publisher    = {IOP Publishing},
  title        = {{An extremely metal-poor Lyα emitter candidate at z = 6 revealed through absorption spectroscopy}},
  doi          = {10.3847/2041-8213/ade71c},
  volume       = {987},
  year         = {2025},
}

@article{20031,
  abstract     = {The central vacuole is a multifunctional organelle with the most significant occupancy in a differentiated plant cell. Plants depend on the function of the vacuole for critical development, growth, and environmental responses. As the cell expands, the vacuole changes shape and size, increasing its membrane and luminal content. The set of these events is called the vacuolar configuration process, which has not been well described. Our research highlights the impact of plasma membrane internalization on vacuole morphology during the vacuolar configuration process. We observed a direct correlation between differential endocytosis rates and the enrichment of vacuolar membranous structures. Chemical and genetic interference with clathrin-mediated endocytosis (CME) revealed that it is required for the vacuolar configuration of growing root cells. The contribution of CME to the vacuole configuration process co-occurs with the induction of post-trans-Golgi network (TGN)/early endosome (EE) trafficking with the participation of the Rab GTPases ARA6 and ARA7. Our results show that the CME plays an active role during vacuole configuration, most probably carrying the material that allows the establishment of the vacuole in elongating cells. Since membrane trafficking through the EE/TGN is required to reach the vacuole, additional players must be defined.},
  author       = {Osorio-Navarro, Claudio and Neira-Valenzuela, Gabriel and Sierra, Paula and Adamowski, Maciek and Toledo, Jorge and Norambuena, Lorena},
  issn         = {1460-2431},
  journal      = {Journal of Experimental Botany},
  number       = {10},
  pages        = {2700--2714},
  publisher    = {Oxford University Press},
  title        = {{The configuration of the vacuole is driven by clathrin-mediated trafficking in root cells of Arabidopsis thaliana}},
  doi          = {10.1093/jxb/eraf084},
  volume       = {76},
  year         = {2025},
}

@inproceedings{20032,
  abstract     = {We propose Scalable Mechanistic Neural Network (S-MNN), an enhanced neural network framework designed for scientific machine learning applications involving long temporal sequences. By reformulating the original Mechanistic Neural Network (MNN) (Pervez et al., 2024), we reduce the computational time and space complexities from cubic and quadratic with respect to the sequence length, respectively, to linear. This significant improvement enables efficient modeling of long-term dynamics without sacrificing accuracy or interpretability. Extensive experiments demonstrate that S-MNN matches the original MNN in precision while substantially reducing computational resources. Consequently, S-MNN can drop-in replace the original MNN in applications, providing a practical and efficient tool for integrating mechanistic bottlenecks into neural network models of complex dynamical systems. Source code is available at https://github.com/IST-DASLab/ScalableMNN.},
  author       = {Chen, Jiale and Yao, Dingling and Pervez, Adeel A and Alistarh, Dan-Adrian and Locatello, Francesco},
  booktitle    = {13th International Conference on Learning Representations},
  isbn         = {9798331320850},
  location     = {Singapore, Singapore},
  pages        = {63716--63737},
  publisher    = {ICLR},
  title        = {{Scalable mechanistic neural networks}},
  year         = {2025},
}

@inproceedings{20033,
  abstract     = {A growing number of machine learning scenarios rely on knowledge distillation where one uses the output of a surrogate model as labels to supervise the training of a target model. In this work, we provide a sharp characterization of this process for ridgeless, high-dimensional regression, under two settings: (i) model shift, where the surrogate model is arbitrary, and (ii) distribution shift, where the surrogate model is the solution of empirical risk minimization with out-of-distribution data. In both cases, we characterize the precise risk of the target model through non-asymptotic bounds in terms of sample size and data distribution under mild conditions. As a consequence, we identify the form of the optimal surrogate model, which reveals the benefits and limitations of discarding weak features in a data-dependent fashion. In the context of weak-to-strong (W2S) generalization, this has the interpretation that (i) W2S training, with the surrogate as the weak model, can provably outperform training with strong labels under the same data budget, but (ii) it is unable to improve the data scaling law. We validate our results on numerical experiments both on ridgeless regression and on neural network architectures.},
  author       = {Emrullah Ildiz, M. and Gozeten, Halil Alperen and Taga, Ege Onur and Mondelli, Marco and Oymak, Samet},
  booktitle    = {13th International Conference on Learning Representations},
  isbn         = {9798331320850},
  location     = {Singapore, Singapore},
  pages        = {2967--3006},
  publisher    = {ICLR},
  title        = {{High-dimensional analysis of knowledge distillation: Weak-to-Strong generalization and scaling laws}},
  year         = {2025},
}

@inproceedings{20034,
  abstract     = {We introduce LDAdam, a memory-efficient optimizer for training large models, that performs adaptive optimization steps within lower dimensional subspaces, while consistently exploring the full parameter space during training. This strategy keeps the optimizer's memory footprint to a fraction of the model size. LDAdam relies on a new projection-aware update rule for the optimizer states that allows for transitioning between subspaces, i.e., estimation of the statistics of the projected gradients. To mitigate the errors due to low-rank projection, LDAdam integrates a new generalized error feedback mechanism, which explicitly accounts for both gradient and optimizer state compression. We prove the convergence of LDAdam under standard assumptions, and provide empirical evidence that LDAdam allows for efficient fine-tuning and pre-training of language models.},
  author       = {Robert, Thomas and Safaryan, Mher and Modoranu, Ionut-Vlad and Alistarh, Dan-Adrian},
  booktitle    = {13th International Conference on Learning Representations},
  isbn         = {9798331320850},
  location     = {Singapore, Singapore},
  pages        = {101877--101913},
  publisher    = {ICLR},
  title        = {{LDAdam: Adaptive optimization from low-dimensional gradient statistics}},
  year         = {2025},
}

@inproceedings{20035,
  abstract     = {Deep neural networks (DNNs) at convergence consistently represent the training data in the last layer via a geometric structure referred to as neural collapse. This empirical evidence has spurred a line of theoretical research aimed at proving the emergence of neural collapse, mostly focusing on the unconstrained features model. Here, the features of the penultimate layer are free variables, which makes the model data-agnostic and puts into question its ability to capture DNN training. Our work addresses the issue, moving away from unconstrained features and
studying DNNs that end with at least two linear layers. We first prove generic guarantees on neural collapse that assume (i) low training error and balancedness of linear layers (for within-class variability collapse), and (ii) bounded conditioning of the features before the linear part (for orthogonality of class-means, and their alignment with weight matrices). The balancedness refers to the fact that W⊤ℓ+1Wℓ+1 ≈ WℓW⊤ℓfor any pair of consecutive weight matrices of the linear part, and the bounded conditioning requires a well-behaved ratio between largest and smallest non-zero singular values of the features. We then show that such assumptions hold for gradient descent training with weight decay: (i) for networks with a wide first layer, we prove low training error and balancedness, and (ii) for solutions that are either nearly optimal or stable under large learning rates, we additionally prove the bounded conditioning. Taken together, our results are the first to show neural collapse in the end-to-end training of DNNs.},
  author       = {Jacot, Arthur and Súkeník, Peter and Wang, Zihan and Mondelli, Marco},
  booktitle    = {13th International Conference on Learning Representations},
  isbn         = {9798331320850},
  location     = {Singapore, Singapore},
  pages        = {1905--1931},
  publisher    = {ICLR},
  title        = {{Wide neural networks trained with weight decay provably exhibit neural collapse}},
  year         = {2025},
}

@inproceedings{20036,
  abstract     = {We introduce NeCo: Patch Neighbor Consistency, a novel self-supervised training loss that enforces patch-level nearest neighbor consistency across a student and teacher model. Compared to contrastive approaches that only yield binary learning signals, i.e. "attract" and "repel", this approach benefits from the more fine-grained learning signal of sorting spatially dense features relative to reference patches. Our method leverages differentiable sorting applied on top of pretrained representations, such as DINOv2-registers to bootstrap the learning signal and further improve upon them. This dense post-pretraining leads to superior performance across various models and datasets, despite requiring only 19 hours on a single GPU. This method generates high-quality dense feature encoders and establishes several new state-of-the-art results such as +2.3 % and +4.2% for non-parametric in-context semantic segmentation on ADE20k and Pascal VOC, +1.6% and +4.8% for linear segmentation evaluations on COCO-Things and -Stuff and improvements in the 3D understanding of multi-view consistency on SPair-71k, by more than 1.5%.},
  author       = {Pariza, Valentinos and Salehi, Mohammadreza and Burghouts, Gertjan and Locatello, Francesco and Asano, Yuki M.},
  booktitle    = {13th International Conference on Learning Representations},
  isbn         = {9798331320850},
  location     = {Singapore, Singapore},
  pages        = {72303--72330},
  publisher    = {ICLR},
  title        = {{Near, far: Patch-ordering enhances vision foundation models' scene understanding}},
  year         = {2025},
}

@inproceedings{20037,
  abstract     = {Disentangling polysemantic neurons is at the core of many current approaches to interpretability of large language models. Here we attempt to study how disentanglement can be used to understand performance, particularly under weight sparsity, a leading post-training optimization technique. We suggest a novel measure for estimating neuronal entanglement: the Wasserstein distance of a neuron's output distribution to a Gaussian. Moreover, we show the existence of a small number of highly entangled "Wasserstein Neurons" in each linear layer of an LLM, characterized by their highly non-Gaussian output distributions, their role in mapping similar inputs to dissimilar outputs, and their significant impact on model accuracy. To study these phenomena, we propose a new experimental framework for disentangling polysemantic neurons. Our framework separates each layer's inputs to create a mixture of experts where each neuron's output is computed by a mixture of neurons of lower Wasserstein distance, each better at maintaining accuracy when sparsified without retraining. We provide strong evidence that this is because the mixture of sparse experts is effectively disentangling the input-output relationship of individual neurons, in particular the difficult Wasserstein neurons.},
  author       = {Sawmya, Shashata and Kong, Linghao and Markov, Ilia and Alistarh, Dan-Adrian and Shavit, Nir},
  booktitle    = {13th International Conference on Learning Representations},
  isbn         = {9798331320850},
  location     = {Singapore, Singapore},
  pages        = {26244--26274},
  publisher    = {ICLR},
  title        = {{Wasserstein distances, neuronal entanglement, and sparsity}},
  year         = {2025},
}

@inproceedings{20038,
  abstract     = {Pruning eliminates unnecessary parameters in neural networks; it offers a promising solution to the growing computational demands of large language models (LLMs). While many focus on post-training pruning, sparse pre-training--which combines pruning and pre-training into a single phase--provides a simpler alternative. In this work, we present the first systematic exploration of optimal sparse pre-training configurations for LLMs through an examination of 80 unique pruning schedules across different sparsity levels and training durations. We find that initiating pruning at 25% of total training compute and concluding at 75% achieves near-optimal final evaluation loss. These findings provide valuable insights for efficient and effective sparse pre-training of LLMs. Furthermore, we propose a new scaling law that modifies the Chinchilla scaling law to use the average parameter count over pre-training. Through empirical and theoretical validation, we demonstrate that this modified scaling law accurately models evaluation loss for both sparsely and densely pre-trained LLMs, unifying scaling laws across pre-training paradigms. Our findings indicate that while sparse pre-training achieves the same final model quality as dense pre-training for equivalent compute budgets, it provides substantial benefits through reduced model size, enabling significant potential computational savings during inference.},
  author       = {Jin, Tian and Humayun, Ahmed Imtiaz and Evci, Utku and Subramanian, Suvinay and Yazdanbakhsh, Amir and Alistarh, Dan-Adrian and Dziugaite, Gintare Karolina},
  booktitle    = {13th International Conference on Learning Representations},
  isbn         = {9798331320850},
  location     = {Singapore, Singapore},
  pages        = {85165--85181},
  publisher    = {ICLR},
  title        = {{The journey matters: Average parameter count over pre-training unifies sparse and dense scaling laws}},
  year         = {2025},
}

@article{20040,
  abstract     = {Contractive coupling rates have been recently introduced by Conforti as a tool to establish convex Sobolev inequalities (including modified log-Sobolev and Poincaré inequality) for some classes of Markov chains. In this work, for most of the examples discussed by Conforti, we use contractive coupling rates to prove stronger inequalities, in the form of curvature lower bounds (in entropic and discrete Bakry–Émery sense) and geodesic convexity of some entropic functionals. In addition, we recall and give straightforward generalizations of some notions of coarse Ricci curvature, and we discuss some of their properties and relations with the concepts of couplings and coupling rates: as an application, we show exponential contraction of the p-Wasserstein distance for the heat flow in the aforementioned examples.},
  author       = {Pedrotti, Francesco},
  issn         = {1050-5164},
  journal      = {The Annals of Applied Probability},
  number       = {1},
  pages        = {196 -- 250},
  publisher    = {Institute of Mathematical Statistics},
  title        = {{Contractive coupling rates and curvature lower bounds for Markov chains}},
  doi          = {10.1214/24-aap2113},
  volume       = {35},
  year         = {2025},
}

@article{20043,
  abstract     = {We establish an isomorphism of complex K-theory of the moduli space  M  of “SL n​ ”-Higgs bundles of degree d and rank n (in the sense of Hausel–Thaddeus) and twisted complex K-theory of the orbifold  M  of PGL n​ -Higgs bundles of degree e, where (n,d)=(n,e)=1. Along the way, we prove the vanishing of torsion for H ∗ ( M ) and certain twisted complex K-theory groups of  M . We also extend Arinkin’s autoduality of compactified Jacobian to a derived equivalence between SL n​ - and PGL n​ -Hitchin systems over the elliptic locus. In the appendix, we develop a formalism of G-sheaves of spectra, generalising equivariant homotopy theory to a relative setting.},
  author       = {Groechenig, Michael and Shen, Shiyu},
  issn         = {1435-9863},
  journal      = {Journal of the European Mathematical Society},
  publisher    = {EMS Press},
  title        = {{Complex K-theory of moduli spaces of Higgs bundles}},
  doi          = {10.4171/jems/1601},
  year         = {2025},
}

@article{20044,
  abstract     = {Genetic trade-offs—which occur when variants that are beneficial in some contexts of natural selection are harmful in others—can influence a wide range of evolutionary phenomena, from the maintenance of genetic variation to the evolution of aging and sex differences. An extensive body of evolutionary theory has focused on the consequences of such trade-offs, and recent analyses of Fisher’s geometric model have further quantified the expected proportion of new mutations that exhibit trade-offs. However, the theory remains silent regarding the prevalence of trade-offs among the variants that contribute to adaptation. Here, we extend Fisher’s geometric model to predict the prevalence of trade-offs among the adaptive mutations that become established or fixed in a population. We consider trade-offs between sexes, habitats, fitness components, and temporally fluctuating environments. In all 4 scenarios, trade-off alleles are consistently under-represented among established relative to new beneficial mutations—an effect that arises from the greater susceptibility of trade-off alleles to genetic drift. Adaptation during a population size decline exacerbates this deficit of trade-offs among established mutations, whereas population expansions dampen it. Consequently, threatened populations should primarily adapt using unconditionally beneficial alleles, while invasive populations are more prone to adaptation using variants that exhibit trade-offs.},
  author       = {Connallon, Tim and Czuppon, Peter and Olito, Colin and Goedert, Debora and Kokko, Hanna and Nava-Bolaños, Angela and Nilén, Sofie and Svensson, Erik I and Zwoinska, Martyna and Dutoit, Ludovic and Ruzicka, Filip},
  issn         = {1558-5646},
  journal      = {Evolution},
  number       = {7},
  pages        = {1243--1255},
  publisher    = {Oxford University Press},
  title        = {{Predicting the prevalence of genetic trade-offs among adaptive substitutions}},
  doi          = {10.1093/evolut/qpaf061},
  volume       = {79},
  year         = {2025},
}

@article{20045,
  abstract     = {We consider the time evolution of the renormalized Nelson model, which describes N bosons linearly coupled to a quantized scalar field, in the mean-field limit of many particles N≫1 with coupling constant proportional to N^−1/2. First, we show that initial states exhibiting Bose–Einstein condensation for the particles and approximating a coherent state for the quantum field retain their structure under the many-body time evolution. Concretely, the dynamics of the reduced densities are approximated by solutions of two coupled PDEs, the Schrödinger–Klein–Gordon equations. Second, we construct a renormalized Bogoliubov evolution that describes the quantum fluctuations around the Schrödinger–Klein–Gordon equations. This evolution is used to extend the approximation of the evolved many-body state to the full norm topology. In summary, we provide a comprehensive analysis of the Nelson model that reveals the role of renormalization in the mean-field Bogoliubov theory.},
  author       = {Falconi, Marco and Lampart, Jonas and Leopold, Nikolai and Mitrouskas, David Johannes},
  issn         = {1873-1430},
  journal      = {Annales de l'Institut Henri Poincaré C},
  publisher    = {EMS Press},
  title        = {{Renormalized Bogoliubov theory for the Nelson model}},
  doi          = {10.4171/aihpc/154},
  year         = {2025},
}

@article{20046,
  abstract     = {A Laplacian matrix is a real symmetric matrix whose row and column sums are zero. We investigate the limiting distribution of the largest eigenvalues of a Laplacian random matrix with Gaussian entries. Unlike many classical matrix ensembles, this random matrix model contains dependent entries. Our main results show that the extreme eigenvalues of this model exhibit Poisson statistics. In particular, after properly shifting and scaling, we show that the largest eigenvalue converges to the Gumbel distribution as the dimension of the matrix tends to infinity. While the largest diagonal entry is also shown to have Gumbel fluctuations, there is a rather surprising difference between its deterministic centering term and the centering term required for the largest eigenvalues.},
  author       = {Campbell, Andrew J and Luh, Kyle and O’Rourke, Sean and Arenas-Velilla, Santiago and Perez-Abreu, Victor},
  issn         = {1083-6489},
  journal      = {Electronic Journal of Probability},
  pages        = {1--52},
  publisher    = {Institute of Mathematical Statistics},
  title        = {{Extreme eigenvalues of Laplacian random matrices with Gaussian entries}},
  doi          = {10.1214/25-ejp1366},
  volume       = {30},
  year         = {2025},
}

@article{20048,
  abstract     = {During embryonic development, cell behaviors need to be tightly regulated in time and space. Yet how the temporal and spatial regulations of cell behaviors are interconnected during embryonic development remains elusive. To address this, we turned to zebrafish gastrulation, the process whereby dynamic cell behaviors generate the three principal germ layers of the early embryo. Here, we show that Hoxb cluster genes are expressed in a temporally collinear manner at the blastoderm margin, where mesodermal and endodermal (mesendoderm) progenitor cells are specified and ingress to form mesendoderm/hypoblast. Functional analysis shows that these Hoxb genes regulate the timing of cell ingression: under- or overexpression of Hoxb genes perturb the timing of mesendoderm cell ingression and, consequently, the positioning of these cells along the forming anterior-posterior body axis after gastrulation. Finally, we found that Hoxb genes control the timing of mesendoderm ingression by regulating cellular bleb formation and cell surface fluctuations in the ingressing cells. Collectively, our findings suggest that Hoxb genes interconnect the temporal and spatial pattern of cell behaviors during zebrafish gastrulation by controlling cell surface fluctuations.},
  author       = {Moriyama, Yuuta and Mitsui, Toshiyuki and Heisenberg, Carl-Philipp J},
  issn         = {1477-9129},
  journal      = {Development},
  number       = {12},
  publisher    = {The Company of Biologists},
  title        = {{Hoxb genes determine the timing of cell ingression by regulating cell surface fluctuations during zebrafish gastrulation}},
  doi          = {10.1242/dev.204261},
  volume       = {152},
  year         = {2025},
}

@article{20050,
  abstract     = {We prove upper bounds on the L∞-Wasserstein distance from optimal transport between strongly log-concave probability densities and log-Lipschitz perturbations. In the simplest setting, such a bound amounts to a transport-information inequality involving the L∞-Wasserstein metric and the relative L∞-Fisher information. We show that this inequality can be sharpened significantly in situations where the involved densities are anisotropic. Our proof is based on probabilistic techniques using Langevin dynamics. As an application of these results, we obtain sharp exponential rates of convergence in Fisher’s infinitesimal model from quantitative genetics, generalising recent results by Calvez, Poyato, and Santambrogio in dimension 1 to arbitrary dimensions.},
  author       = {Khudiakova, Kseniia and Maas, Jan and Pedrotti, Francesco},
  issn         = {1050-5164},
  journal      = {The Annals of Applied Probability},
  number       = {3},
  pages        = {1913--1940},
  publisher    = {Institute of Mathematical Statistics},
  title        = {{L∞-optimal transport of anisotropic log-concave measures and exponential convergence in Fisher’s infinitesimal model}},
  doi          = {10.1214/25-aap2162},
  volume       = {35},
  year         = {2025},
}

@inproceedings{20051,
  abstract     = {We revisit the majority problem in the population protocol communication model, as first studied by Angluin et al. (Distributed Computing 2008). We consider a more general version of this problem known as plurality consensus, which has already been studied intensively in the literature. In this problem, each node in a system of n nodes, has initially one of k different opinions, and they need to agree on the (relative) majority opinion. In particular, we consider the important and intensively studied model of Undecided State Dynamics.
Our main contribution is an almost tight lower bound on the stabilization time: we prove that there exists an initial configuration, even with bias \Delta = \omega(\sqrt{n\log n}), where stabilization requires \Omega(kn\log \frac {\sqrt n} {k \log n}) interactions, or equivalently, \Omega(k\log \frac {\sqrt n} {k \log n}) parallel time for any k = o\left(\frac {\sqrt n}{\log n}\right). This bound is tight for any k \le n^{\frac 1 2 - \epsilon}, where \epsilon >0 can be any small constant, as Amir et al.~(PODC'23) gave a O(k\log n) parallel time upper bound for k = O\left(\frac {\sqrt n} {\log ^2 n}\right).},
  author       = {El-Hayek, Antoine and Elsässer, Robert and Schmid, Stefan},
  booktitle    = {Proceedings of the ACM Symposium on Principles of Distributed Computing},
  isbn         = { 9798400718854},
  location     = {Huatulco, Mexico},
  publisher    = {Association for Computing Machinery},
  title        = {{An almost tight lower bound for plurality consensus with undecided state dynamics in the population protocol model}},
  doi          = {10.1145/3732772.3733505},
  year         = {2025},
}

@inproceedings{20052,
  abstract     = {This paper revisits a fundamental distributed computing problem in the population protocol model. Provided n agents each starting with an input color in [k], the relative majority problem asks to find the predominant color. In the population protocol model, at each time step, a scheduler selects two agents that first learn each other's states and then update their states based on what they learned.
We present the Circles protocol that solves the relative majority problem with k3 states. It is always-correct under weakly fair scheduling. Not only does it improve upon the best known upper bound of O(k7), but it also shows a strikingly simpler design inspired by energy minimization in chemical settings.},
  author       = {Breitkopf, Tom-Lukas and Dallot, Julien and El-Hayek, Antoine and Schmid, Stefan},
  booktitle    = {Proceedings of the ACM Symposium on Principles of Distributed Computing},
  isbn         = {9798400718854},
  location     = {Huatulco, Mexico},
  pages        = {549--552},
  publisher    = {Association for Computing Machinery},
  title        = {{Brief announcement: Minimizing energy solves relative majority with a cubic number of states in population protocols}},
  doi          = {10.1145/3732772.3733512},
  year         = {2025},
}

@inproceedings{20053,
  abstract     = {Liquid democracy is a transitive vote delegation mechanism over voting graphs. It enables each voter to delegate their vote(s) to another better-informed voter, with the goal of collectively making a better decision. The question of whether liquid democracy outperforms direct voting has been previously studied in the context of local delegation mechanisms (where voters can only delegate to someone in their neighbourhood) and binary decision problems. It has previously been shown that it is impossible for local delegation mechanisms to outperform direct voting in general graphs. This raises the question: for which classes of graphs do local delegation mechanisms yield good results?
In this work, we analyse (1) properties of specific graphs and (2) properties of local delegation mechanisms on these graphs, determining where local delegation actually outperforms direct voting. We show that a critical graph property enabling liquid democracy is that the voting outcome of local delegation mechanisms preserves a sufficient amount of variance, thereby avoiding situations where delegation falls behind direct voting1. These insights allow us to prove our main results, namely that there exist local delegation mechanisms that perform no worse and in fact quantitatively better than direct voting in natural graph topologies like complete, random d-regular, and bounded degree graphs, lending a more nuanced perspective to previous impossibility results.},
  author       = {Chatterjee, Krishnendu and Gilbert, Seth and Schmid, Stefan and Svoboda, Jakub and Yeo, Michelle X},
  booktitle    = {Proceedings of the ACM Symposium on Principles of Distributed Computing},
  isbn         = {9798400718854},
  location     = {Huatulco, Mexico},
  pages        = {241--251},
  publisher    = {Association for Computing Machinery},
  title        = {{When is liquid democracy possible?: On the manipulation of variance}},
  doi          = {10.1145/3732772.3733544},
  year         = {2025},
}

