@inproceedings{20034,
  abstract     = {We introduce LDAdam, a memory-efficient optimizer for training large models, that performs adaptive optimization steps within lower dimensional subspaces, while consistently exploring the full parameter space during training. This strategy keeps the optimizer's memory footprint to a fraction of the model size. LDAdam relies on a new projection-aware update rule for the optimizer states that allows for transitioning between subspaces, i.e., estimation of the statistics of the projected gradients. To mitigate the errors due to low-rank projection, LDAdam integrates a new generalized error feedback mechanism, which explicitly accounts for both gradient and optimizer state compression. We prove the convergence of LDAdam under standard assumptions, and provide empirical evidence that LDAdam allows for efficient fine-tuning and pre-training of language models.},
  author       = {Robert, Thomas and Safaryan, Mher and Modoranu, Ionut-Vlad and Alistarh, Dan-Adrian},
  booktitle    = {13th International Conference on Learning Representations},
  isbn         = {9798331320850},
  location     = {Singapore, Singapore},
  pages        = {101877--101913},
  publisher    = {ICLR},
  title        = {{LDAdam: Adaptive optimization from low-dimensional gradient statistics}},
  year         = {2025},
}

@inproceedings{20035,
  abstract     = {Deep neural networks (DNNs) at convergence consistently represent the training data in the last layer via a geometric structure referred to as neural collapse. This empirical evidence has spurred a line of theoretical research aimed at proving the emergence of neural collapse, mostly focusing on the unconstrained features model. Here, the features of the penultimate layer are free variables, which makes the model data-agnostic and puts into question its ability to capture DNN training. Our work addresses the issue, moving away from unconstrained features and
studying DNNs that end with at least two linear layers. We first prove generic guarantees on neural collapse that assume (i) low training error and balancedness of linear layers (for within-class variability collapse), and (ii) bounded conditioning of the features before the linear part (for orthogonality of class-means, and their alignment with weight matrices). The balancedness refers to the fact that W⊤ℓ+1Wℓ+1 ≈ WℓW⊤ℓfor any pair of consecutive weight matrices of the linear part, and the bounded conditioning requires a well-behaved ratio between largest and smallest non-zero singular values of the features. We then show that such assumptions hold for gradient descent training with weight decay: (i) for networks with a wide first layer, we prove low training error and balancedness, and (ii) for solutions that are either nearly optimal or stable under large learning rates, we additionally prove the bounded conditioning. Taken together, our results are the first to show neural collapse in the end-to-end training of DNNs.},
  author       = {Jacot, Arthur and Súkeník, Peter and Wang, Zihan and Mondelli, Marco},
  booktitle    = {13th International Conference on Learning Representations},
  isbn         = {9798331320850},
  location     = {Singapore, Singapore},
  pages        = {1905--1931},
  publisher    = {ICLR},
  title        = {{Wide neural networks trained with weight decay provably exhibit neural collapse}},
  year         = {2025},
}

@inproceedings{20036,
  abstract     = {We introduce NeCo: Patch Neighbor Consistency, a novel self-supervised training loss that enforces patch-level nearest neighbor consistency across a student and teacher model. Compared to contrastive approaches that only yield binary learning signals, i.e. "attract" and "repel", this approach benefits from the more fine-grained learning signal of sorting spatially dense features relative to reference patches. Our method leverages differentiable sorting applied on top of pretrained representations, such as DINOv2-registers to bootstrap the learning signal and further improve upon them. This dense post-pretraining leads to superior performance across various models and datasets, despite requiring only 19 hours on a single GPU. This method generates high-quality dense feature encoders and establishes several new state-of-the-art results such as +2.3 % and +4.2% for non-parametric in-context semantic segmentation on ADE20k and Pascal VOC, +1.6% and +4.8% for linear segmentation evaluations on COCO-Things and -Stuff and improvements in the 3D understanding of multi-view consistency on SPair-71k, by more than 1.5%.},
  author       = {Pariza, Valentinos and Salehi, Mohammadreza and Burghouts, Gertjan and Locatello, Francesco and Asano, Yuki M.},
  booktitle    = {13th International Conference on Learning Representations},
  isbn         = {9798331320850},
  location     = {Singapore, Singapore},
  pages        = {72303--72330},
  publisher    = {ICLR},
  title        = {{Near, far: Patch-ordering enhances vision foundation models' scene understanding}},
  year         = {2025},
}

@inproceedings{20037,
  abstract     = {Disentangling polysemantic neurons is at the core of many current approaches to interpretability of large language models. Here we attempt to study how disentanglement can be used to understand performance, particularly under weight sparsity, a leading post-training optimization technique. We suggest a novel measure for estimating neuronal entanglement: the Wasserstein distance of a neuron's output distribution to a Gaussian. Moreover, we show the existence of a small number of highly entangled "Wasserstein Neurons" in each linear layer of an LLM, characterized by their highly non-Gaussian output distributions, their role in mapping similar inputs to dissimilar outputs, and their significant impact on model accuracy. To study these phenomena, we propose a new experimental framework for disentangling polysemantic neurons. Our framework separates each layer's inputs to create a mixture of experts where each neuron's output is computed by a mixture of neurons of lower Wasserstein distance, each better at maintaining accuracy when sparsified without retraining. We provide strong evidence that this is because the mixture of sparse experts is effectively disentangling the input-output relationship of individual neurons, in particular the difficult Wasserstein neurons.},
  author       = {Sawmya, Shashata and Kong, Linghao and Markov, Ilia and Alistarh, Dan-Adrian and Shavit, Nir},
  booktitle    = {13th International Conference on Learning Representations},
  isbn         = {9798331320850},
  location     = {Singapore, Singapore},
  pages        = {26244--26274},
  publisher    = {ICLR},
  title        = {{Wasserstein distances, neuronal entanglement, and sparsity}},
  year         = {2025},
}

@inproceedings{20038,
  abstract     = {Pruning eliminates unnecessary parameters in neural networks; it offers a promising solution to the growing computational demands of large language models (LLMs). While many focus on post-training pruning, sparse pre-training--which combines pruning and pre-training into a single phase--provides a simpler alternative. In this work, we present the first systematic exploration of optimal sparse pre-training configurations for LLMs through an examination of 80 unique pruning schedules across different sparsity levels and training durations. We find that initiating pruning at 25% of total training compute and concluding at 75% achieves near-optimal final evaluation loss. These findings provide valuable insights for efficient and effective sparse pre-training of LLMs. Furthermore, we propose a new scaling law that modifies the Chinchilla scaling law to use the average parameter count over pre-training. Through empirical and theoretical validation, we demonstrate that this modified scaling law accurately models evaluation loss for both sparsely and densely pre-trained LLMs, unifying scaling laws across pre-training paradigms. Our findings indicate that while sparse pre-training achieves the same final model quality as dense pre-training for equivalent compute budgets, it provides substantial benefits through reduced model size, enabling significant potential computational savings during inference.},
  author       = {Jin, Tian and Humayun, Ahmed Imtiaz and Evci, Utku and Subramanian, Suvinay and Yazdanbakhsh, Amir and Alistarh, Dan-Adrian and Dziugaite, Gintare Karolina},
  booktitle    = {13th International Conference on Learning Representations},
  isbn         = {9798331320850},
  location     = {Singapore, Singapore},
  pages        = {85165--85181},
  publisher    = {ICLR},
  title        = {{The journey matters: Average parameter count over pre-training unifies sparse and dense scaling laws}},
  year         = {2025},
}

@article{20040,
  abstract     = {Contractive coupling rates have been recently introduced by Conforti as a tool to establish convex Sobolev inequalities (including modified log-Sobolev and Poincaré inequality) for some classes of Markov chains. In this work, for most of the examples discussed by Conforti, we use contractive coupling rates to prove stronger inequalities, in the form of curvature lower bounds (in entropic and discrete Bakry–Émery sense) and geodesic convexity of some entropic functionals. In addition, we recall and give straightforward generalizations of some notions of coarse Ricci curvature, and we discuss some of their properties and relations with the concepts of couplings and coupling rates: as an application, we show exponential contraction of the p-Wasserstein distance for the heat flow in the aforementioned examples.},
  author       = {Pedrotti, Francesco},
  issn         = {1050-5164},
  journal      = {The Annals of Applied Probability},
  number       = {1},
  pages        = {196 -- 250},
  publisher    = {Institute of Mathematical Statistics},
  title        = {{Contractive coupling rates and curvature lower bounds for Markov chains}},
  doi          = {10.1214/24-aap2113},
  volume       = {35},
  year         = {2025},
}

@article{20043,
  abstract     = {We establish an isomorphism of complex K-theory of the moduli space  M  of “SL n​ ”-Higgs bundles of degree d and rank n (in the sense of Hausel–Thaddeus) and twisted complex K-theory of the orbifold  M  of PGL n​ -Higgs bundles of degree e, where (n,d)=(n,e)=1. Along the way, we prove the vanishing of torsion for H ∗ ( M ) and certain twisted complex K-theory groups of  M . We also extend Arinkin’s autoduality of compactified Jacobian to a derived equivalence between SL n​ - and PGL n​ -Hitchin systems over the elliptic locus. In the appendix, we develop a formalism of G-sheaves of spectra, generalising equivariant homotopy theory to a relative setting.},
  author       = {Groechenig, Michael and Shen, Shiyu},
  issn         = {1435-9863},
  journal      = {Journal of the European Mathematical Society},
  publisher    = {EMS Press},
  title        = {{Complex K-theory of moduli spaces of Higgs bundles}},
  doi          = {10.4171/jems/1601},
  year         = {2025},
}

@article{20044,
  abstract     = {Genetic trade-offs—which occur when variants that are beneficial in some contexts of natural selection are harmful in others—can influence a wide range of evolutionary phenomena, from the maintenance of genetic variation to the evolution of aging and sex differences. An extensive body of evolutionary theory has focused on the consequences of such trade-offs, and recent analyses of Fisher’s geometric model have further quantified the expected proportion of new mutations that exhibit trade-offs. However, the theory remains silent regarding the prevalence of trade-offs among the variants that contribute to adaptation. Here, we extend Fisher’s geometric model to predict the prevalence of trade-offs among the adaptive mutations that become established or fixed in a population. We consider trade-offs between sexes, habitats, fitness components, and temporally fluctuating environments. In all 4 scenarios, trade-off alleles are consistently under-represented among established relative to new beneficial mutations—an effect that arises from the greater susceptibility of trade-off alleles to genetic drift. Adaptation during a population size decline exacerbates this deficit of trade-offs among established mutations, whereas population expansions dampen it. Consequently, threatened populations should primarily adapt using unconditionally beneficial alleles, while invasive populations are more prone to adaptation using variants that exhibit trade-offs.},
  author       = {Connallon, Tim and Czuppon, Peter and Olito, Colin and Goedert, Debora and Kokko, Hanna and Nava-Bolaños, Angela and Nilén, Sofie and Svensson, Erik I and Zwoinska, Martyna and Dutoit, Ludovic and Ruzicka, Filip},
  issn         = {1558-5646},
  journal      = {Evolution},
  number       = {7},
  pages        = {1243--1255},
  publisher    = {Oxford University Press},
  title        = {{Predicting the prevalence of genetic trade-offs among adaptive substitutions}},
  doi          = {10.1093/evolut/qpaf061},
  volume       = {79},
  year         = {2025},
}

@article{20045,
  abstract     = {We consider the time evolution of the renormalized Nelson model, which describes N bosons linearly coupled to a quantized scalar field, in the mean-field limit of many particles N≫1 with coupling constant proportional to N^−1/2. First, we show that initial states exhibiting Bose–Einstein condensation for the particles and approximating a coherent state for the quantum field retain their structure under the many-body time evolution. Concretely, the dynamics of the reduced densities are approximated by solutions of two coupled PDEs, the Schrödinger–Klein–Gordon equations. Second, we construct a renormalized Bogoliubov evolution that describes the quantum fluctuations around the Schrödinger–Klein–Gordon equations. This evolution is used to extend the approximation of the evolved many-body state to the full norm topology. In summary, we provide a comprehensive analysis of the Nelson model that reveals the role of renormalization in the mean-field Bogoliubov theory.},
  author       = {Falconi, Marco and Lampart, Jonas and Leopold, Nikolai and Mitrouskas, David Johannes},
  issn         = {1873-1430},
  journal      = {Annales de l'Institut Henri Poincaré C},
  publisher    = {EMS Press},
  title        = {{Renormalized Bogoliubov theory for the Nelson model}},
  doi          = {10.4171/aihpc/154},
  year         = {2025},
}

@article{20046,
  abstract     = {A Laplacian matrix is a real symmetric matrix whose row and column sums are zero. We investigate the limiting distribution of the largest eigenvalues of a Laplacian random matrix with Gaussian entries. Unlike many classical matrix ensembles, this random matrix model contains dependent entries. Our main results show that the extreme eigenvalues of this model exhibit Poisson statistics. In particular, after properly shifting and scaling, we show that the largest eigenvalue converges to the Gumbel distribution as the dimension of the matrix tends to infinity. While the largest diagonal entry is also shown to have Gumbel fluctuations, there is a rather surprising difference between its deterministic centering term and the centering term required for the largest eigenvalues.},
  author       = {Campbell, Andrew J and Luh, Kyle and O’Rourke, Sean and Arenas-Velilla, Santiago and Perez-Abreu, Victor},
  issn         = {1083-6489},
  journal      = {Electronic Journal of Probability},
  pages        = {1--52},
  publisher    = {Institute of Mathematical Statistics},
  title        = {{Extreme eigenvalues of Laplacian random matrices with Gaussian entries}},
  doi          = {10.1214/25-ejp1366},
  volume       = {30},
  year         = {2025},
}

@article{20048,
  abstract     = {During embryonic development, cell behaviors need to be tightly regulated in time and space. Yet how the temporal and spatial regulations of cell behaviors are interconnected during embryonic development remains elusive. To address this, we turned to zebrafish gastrulation, the process whereby dynamic cell behaviors generate the three principal germ layers of the early embryo. Here, we show that Hoxb cluster genes are expressed in a temporally collinear manner at the blastoderm margin, where mesodermal and endodermal (mesendoderm) progenitor cells are specified and ingress to form mesendoderm/hypoblast. Functional analysis shows that these Hoxb genes regulate the timing of cell ingression: under- or overexpression of Hoxb genes perturb the timing of mesendoderm cell ingression and, consequently, the positioning of these cells along the forming anterior-posterior body axis after gastrulation. Finally, we found that Hoxb genes control the timing of mesendoderm ingression by regulating cellular bleb formation and cell surface fluctuations in the ingressing cells. Collectively, our findings suggest that Hoxb genes interconnect the temporal and spatial pattern of cell behaviors during zebrafish gastrulation by controlling cell surface fluctuations.},
  author       = {Moriyama, Yuuta and Mitsui, Toshiyuki and Heisenberg, Carl-Philipp J},
  issn         = {1477-9129},
  journal      = {Development},
  number       = {12},
  publisher    = {The Company of Biologists},
  title        = {{Hoxb genes determine the timing of cell ingression by regulating cell surface fluctuations during zebrafish gastrulation}},
  doi          = {10.1242/dev.204261},
  volume       = {152},
  year         = {2025},
}

@article{20050,
  abstract     = {We prove upper bounds on the L∞-Wasserstein distance from optimal transport between strongly log-concave probability densities and log-Lipschitz perturbations. In the simplest setting, such a bound amounts to a transport-information inequality involving the L∞-Wasserstein metric and the relative L∞-Fisher information. We show that this inequality can be sharpened significantly in situations where the involved densities are anisotropic. Our proof is based on probabilistic techniques using Langevin dynamics. As an application of these results, we obtain sharp exponential rates of convergence in Fisher’s infinitesimal model from quantitative genetics, generalising recent results by Calvez, Poyato, and Santambrogio in dimension 1 to arbitrary dimensions.},
  author       = {Khudiakova, Kseniia and Maas, Jan and Pedrotti, Francesco},
  issn         = {1050-5164},
  journal      = {The Annals of Applied Probability},
  number       = {3},
  pages        = {1913--1940},
  publisher    = {Institute of Mathematical Statistics},
  title        = {{L∞-optimal transport of anisotropic log-concave measures and exponential convergence in Fisher’s infinitesimal model}},
  doi          = {10.1214/25-aap2162},
  volume       = {35},
  year         = {2025},
}

@inproceedings{20051,
  abstract     = {We revisit the majority problem in the population protocol communication model, as first studied by Angluin et al. (Distributed Computing 2008). We consider a more general version of this problem known as plurality consensus, which has already been studied intensively in the literature. In this problem, each node in a system of n nodes, has initially one of k different opinions, and they need to agree on the (relative) majority opinion. In particular, we consider the important and intensively studied model of Undecided State Dynamics.
Our main contribution is an almost tight lower bound on the stabilization time: we prove that there exists an initial configuration, even with bias \Delta = \omega(\sqrt{n\log n}), where stabilization requires \Omega(kn\log \frac {\sqrt n} {k \log n}) interactions, or equivalently, \Omega(k\log \frac {\sqrt n} {k \log n}) parallel time for any k = o\left(\frac {\sqrt n}{\log n}\right). This bound is tight for any k \le n^{\frac 1 2 - \epsilon}, where \epsilon >0 can be any small constant, as Amir et al.~(PODC'23) gave a O(k\log n) parallel time upper bound for k = O\left(\frac {\sqrt n} {\log ^2 n}\right).},
  author       = {El-Hayek, Antoine and Elsässer, Robert and Schmid, Stefan},
  booktitle    = {Proceedings of the ACM Symposium on Principles of Distributed Computing},
  isbn         = { 9798400718854},
  location     = {Huatulco, Mexico},
  publisher    = {Association for Computing Machinery},
  title        = {{An almost tight lower bound for plurality consensus with undecided state dynamics in the population protocol model}},
  doi          = {10.1145/3732772.3733505},
  year         = {2025},
}

@inproceedings{20052,
  abstract     = {This paper revisits a fundamental distributed computing problem in the population protocol model. Provided n agents each starting with an input color in [k], the relative majority problem asks to find the predominant color. In the population protocol model, at each time step, a scheduler selects two agents that first learn each other's states and then update their states based on what they learned.
We present the Circles protocol that solves the relative majority problem with k3 states. It is always-correct under weakly fair scheduling. Not only does it improve upon the best known upper bound of O(k7), but it also shows a strikingly simpler design inspired by energy minimization in chemical settings.},
  author       = {Breitkopf, Tom-Lukas and Dallot, Julien and El-Hayek, Antoine and Schmid, Stefan},
  booktitle    = {Proceedings of the ACM Symposium on Principles of Distributed Computing},
  isbn         = {9798400718854},
  location     = {Huatulco, Mexico},
  pages        = {549--552},
  publisher    = {Association for Computing Machinery},
  title        = {{Brief announcement: Minimizing energy solves relative majority with a cubic number of states in population protocols}},
  doi          = {10.1145/3732772.3733512},
  year         = {2025},
}

@inproceedings{20053,
  abstract     = {Liquid democracy is a transitive vote delegation mechanism over voting graphs. It enables each voter to delegate their vote(s) to another better-informed voter, with the goal of collectively making a better decision. The question of whether liquid democracy outperforms direct voting has been previously studied in the context of local delegation mechanisms (where voters can only delegate to someone in their neighbourhood) and binary decision problems. It has previously been shown that it is impossible for local delegation mechanisms to outperform direct voting in general graphs. This raises the question: for which classes of graphs do local delegation mechanisms yield good results?
In this work, we analyse (1) properties of specific graphs and (2) properties of local delegation mechanisms on these graphs, determining where local delegation actually outperforms direct voting. We show that a critical graph property enabling liquid democracy is that the voting outcome of local delegation mechanisms preserves a sufficient amount of variance, thereby avoiding situations where delegation falls behind direct voting1. These insights allow us to prove our main results, namely that there exist local delegation mechanisms that perform no worse and in fact quantitatively better than direct voting in natural graph topologies like complete, random d-regular, and bounded degree graphs, lending a more nuanced perspective to previous impossibility results.},
  author       = {Chatterjee, Krishnendu and Gilbert, Seth and Schmid, Stefan and Svoboda, Jakub and Yeo, Michelle X},
  booktitle    = {Proceedings of the ACM Symposium on Principles of Distributed Computing},
  isbn         = {9798400718854},
  location     = {Huatulco, Mexico},
  pages        = {241--251},
  publisher    = {Association for Computing Machinery},
  title        = {{When is liquid democracy possible?: On the manipulation of variance}},
  doi          = {10.1145/3732772.3733544},
  year         = {2025},
}

@inproceedings{20054,
  author       = {Horta, Sharona},
  booktitle    = {Proceedings of the MATSUS Spring 2025 Conference},
  location     = {Sevilla, Spain},
  publisher    = {Fundació de la comunitat valenciana SCITO},
  title        = {{Solid state diffusion in metal-semiconductors core-shell nanoparticle}},
  doi          = {10.29363/nanoge.matsusspring.2025.220},
  year         = {2025},
}

@inproceedings{20055,
  abstract     = {Supercrystals represent three-dimensional orderings of colloidal nanocrystals (NCs), showcasing collective properties in photonics, phononics, and electronics applications.1,2 Recent studies have shown that such assemblies are directly produced during nanocrystal reactions.3–6 However, a fundamental understanding of in situ formed supercrystals that withstand typical NC purification processes remains underexplored, which is important for further use. Herein, we report the reaction precursor-mediated formation of stable PbTe supercrystals. Rationalizing the formation of these assemblies through small-angle x-ray scattering (SAXS) measurements, we unveil their formation mechanism. Our findings reveal that the supercrystal formation occurs in the presence of an excess of lead oleates in the crude solution. It should be noted that the formed supercrystals can be stabilized under specific conditions determined by the lead oleate cluster concentration, content of trioctylphosphine telluride (TOP-Te), NC size and the need of an annealing step at mild conditions. Furthermore, this approach allows for the continuous growth of a secondary phase within the supercrystal; for example in the case of PbTe supercrystals, a PbS shell can be grown on each PbTe NC constituent, resulting in core-shell PbTe-PbS supercrystals. Our work elucidates that reaction precursors play an important role in in situ SC formation and stabilization, implying the possibility of applying this knowledge to other NC reactions.},
  author       = {Lee, Seungho and Balazs, Daniel and Horta, Sharona and Rayaroth Puthiyaveettil, Aiswarya and Ibáñez, Maria},
  booktitle    = {Proceedings of the MATSUS Spring 2025 Conference},
  location     = {Sevilla, Spain},
  publisher    = {Fundació de la comunitat valenciana SCITO},
  title        = {{Reaction precursor-mediated formation of stable supercrystals in colloidal nanocrystal synthesis: PbTe case}},
  doi          = {10.29363/nanoge.matsusspring.2025.173},
  year         = {2025},
}

@article{20056,
  abstract     = {Theoretical studies have shown that stochasticity can affect the dynamics of ecosystems in counterintuitive ways. However, without knowing the equations governing the dynamics of populations or ecosystems, it is difficult to ascertain the role of stochasticity in real datasets. Therefore, the inverse problem of inferring the governing stochastic equations from datasets is important. Here, we present an equation discovery methodology that takes time series data of state variables as input and outputs a stochastic differential equation. We achieve this by combining traditional approaches from stochastic calculus with the equation discovery techniques. We demonstrate the generality of the method via several applications. First, we deliberately choose various stochastic models with fundamentally different governing equations, yet they produce nearly identical steady-state distributions. We show that we can recover the correct underlying equations, and thus infer the structure of their stability, accurately from the analysis of time series data alone. We demonstrate our method on two real-world datasets—fish schooling and single-cell migration—that have vastly different spatiotemporal scales and dynamics. We illustrate various limitations and potential pitfalls of the method and how to overcome them via diagnostic measures. Finally, we provide our open-source code via a package named PyDaDDy (Python Library for Data-Driven Dynamics).},
  author       = {Nabeel, Arshed and Karichannavar, Ashwin and Palathingal, Shuaib and Jhawar, Jitesh and Brückner, David and Raj M, Danny and Guttal, Vishwesha},
  issn         = {1537-5323},
  journal      = {The American Naturalist},
  number       = {4},
  pages        = {E100--E117},
  publisher    = {University of Chicago Press},
  title        = {{Discovering stochastic dynamical equations from ecological time series data}},
  doi          = {10.1086/734083},
  volume       = {205},
  year         = {2025},
}

@article{20077,
  abstract     = {Hyaluronic acid (HA) is a key extracellular matrix component of vertebrates, where it mediates cell adhesion, immune regulation, and tissue remodeling through its interaction with specific receptors. Although HA has been detected in a few invertebrate species, the lack of fundamental components of the molecular HA pathway poses relevant objections about its functional role in these species. Mining genomic and transcriptomic data, we considered the conservation of the gene locus encoding for the extracellular link protein (XLINK) in marine mussels as well as its expression patterns. Structural and phylogenetic analyses were undertaken to evaluate possible similarities with vertebrate orthologs and to infer the origin of this gene in invertebrates. Biochemical analysis was used to quantify HA in tissues of Mytilus galloprovincialis. As a result, we confirm that the mussel can produce HA (up to 1.02 ng/mg in mantle) and that its genome encodes two XLINK gene loci. These loci are conserved in Mytilidae species and show a complex evolutionary path. Mussel XLINK genes appeared to be expressed during developmental stages in three mussel species, ranking in the top 100 expressed genes in M. trossulus at 17 h post-fertilization. In conclusion, the presence of HA and an active gene with the potential to bind HA suggests that mussels have the potential to synthesize and use HA and are among the few invertebrates encoding this gene.},
  author       = {Rosani, Umberto and Altan, Nehir and Venier, Paola and Bortoletto, Enrico and Volpi, Nicola and Bernecky, Carrie A},
  issn         = {2079-7737},
  journal      = {Biology},
  number       = {8},
  publisher    = {MDPI},
  title        = {{Ancestral origin and functional expression of a hyaluronic acid pathway complement in mussels}},
  doi          = {10.3390/biology14080930},
  volume       = {14},
  year         = {2025},
}

@article{20078,
  abstract     = {Let A be an abelian variety defined over a number field K, E/K be an elliptic curve, and ϕ : A → Em be an isogeny defined over K. Let P ∈ A(K) be such that ϕ(P)=(Q1,..., Qm) with RankZ(⟨Q1,...,Qm⟩)=1. We will study a divisibility sequence related to the point P and show its relation with elliptic divisibility sequences.},
  author       = {Barańczuk, Stefan and Naskręcki, Bartosz and Verzobio, Matteo},
  issn         = {0022-314X},
  journal      = {Journal of Number Theory},
  pages        = {170--183},
  publisher    = {Elsevier},
  title        = {{Divisibility sequences related to abelian varieties isogenous to a power of an elliptic curve}},
  doi          = {10.1016/j.jnt.2025.06.001},
  volume       = {279},
  year         = {2025},
}

