@article{20986,
  abstract     = {During complex vocal interactions, different features of acoustic stimuli are integrated to produce appropriate vocal responses,1 such as copying sounds during vocal matching behavior in some animals.2,3,4,5,6,7,8,9,10,11,12 However, little is known about the interplay and possible trade-offs between the different temporal and spectral acoustic features during these vocal exchanges.2,13,14 Nightingales can flexibly match the pitch of their tonal “whistle songs” in real time during counter-singing duels.15,16 Here, we show that the syllable duration of whistle playbacks could alter the song responses of wild nightingales, causing their whistle duration distribution to shift toward the presented stimulus duration. When exposed to whistle playbacks featuring unnatural combinations of pitch and duration, nightingales demonstrate a flexible trade-off between pitch matching and temporal imitation, yet they are constrained by their vocal repertoire. They selectively adapted their vocal responses to approximate these novel stimuli, aligning them with their natural whistle repertoire. We developed a computational model of nightingale whistle-matching behavior that revealed a hierarchical organization of acoustic feature production. During whistle matching, the feature integration process is constrained by the duration of syllables, and pitch matching follows within this temporal framework, forcing a trade-off between the two features. Our findings reveal a complex interplay between the spectral and temporal domains that shapes song-matching behavior.},
  author       = {Calderon Garcia, Juan Sebastian and Costalunga, Giacomo and Vogels, Tim P and Vallentin, Daniela},
  issn         = {1879-0445},
  journal      = {Current Biology},
  publisher    = {Elsevier},
  title        = {{Interplay between syllable duration and pitch during whistle matching in wild nightingales}},
  doi          = {10.1016/j.cub.2025.12.025},
  year         = {2026},
}

@article{19068,
  abstract     = {Whether or not the neuron emits a spike in response to stimulation by an excitatory current pulse is determined by a strength-duration curve (SDC) for the pulse parameters. The SDC is a dependence of the minimal pulse amplitude required to elicit the spiking response on either the pulse duration or its decay time. Excitatory neurons affect the others through pulses of excitatory postsynaptic current. A simple yet plausible approximation for the time course of such a pulse is the alpha function, with linear rise at the start and exponential decay at the end. However, an exact analytical SDC for this case is hitherto not known, even for the leaky integrate-and-fire (LIF) neuron, the simplest spiking neuron model used in practice. We have obtained general SDC equations for the LIF neuron. Using the Lambert W function — a widely-implemented special function, we have found the exact analytical SDC for the spiking response of the LIF neuron stimulated by an excitatory current pulse in the form of the alpha function. To compare results in a unified way, we have also derived the analytical SDCs for (i) rectangular pulse, (ii) ascending ramp pulse, and (iii) instantly rising and exponentially decaying pulse. In the limit of no leakage, we show that the SDC is reduced to the classical hyperbola for all considered cases.},
  author       = {Paraskevov, Alexander},
  issn         = {2590-0374},
  journal      = {Results in Applied Mathematics},
  publisher    = {Elsevier},
  title        = {{Analytical strength-duration curve for the spiking response of the LIF neuron to an alpha-function-shaped excitatory current pulse}},
  doi          = {10.1016/j.rinam.2025.100548},
  volume       = {25},
  year         = {2025},
}

@article{17886,
  abstract     = {Thin pancake-like neuronal networks cultured on top of a planar microelectrode array have been extensively tried out in neuroengineering, as a substrate for the mobile robot’s control unit, i.e., as a cyborg’s brain. Most of these attempts failed due to intricate self-organizing dynamics in the neuronal systems. In particular, the networks may exhibit an emergent spatial map of steady nucleation sites (“n-sites”) of spontaneous population spikes. Being unpredictable and independent of the surface electrode locations, the n-sites drastically change local ability of the network to generate spikes. Here, using a spiking neuronal network model with generative spatially-embedded connectome, we systematically show in simulations that the number, location, and relative activity of spontaneously formed n-sites (“the vitals”) crucially depend on the samplings of three distributions: (1) the network distribution of neuronal excitability, (2) the distribution of connections between neurons of the network, and (3) the distribution of maximal amplitudes of a single synaptic current pulse. Moreover, blocking the dynamics of a small fraction (about 4%) of non-pacemaker neurons having the highest excitability was enough to completely suppress the occurrence of population spikes and their n-sites. This key result is explained theoretically. Remarkably, the n-sites occur taking into account only short-term synaptic plasticity, i.e., without a Hebbian-type plasticity. As the spiking network model used in this study is strictly deterministic, all simulation results can be accurately reproduced. The model, which has already demonstrated a very high richness-to-complexity ratio, can also be directly extended into the three-dimensional case, e.g., for targeting peculiarities of spiking dynamics in cerebral (or brain) organoids. We recommend the model as an excellent illustrative tool for teaching network-level computational neuroscience, complementing a few benchmark models.},
  author       = {Zendrikov, Dmitrii and Paraskevov, Alexander},
  issn         = {1879-2782},
  journal      = {Neural Networks},
  publisher    = {Elsevier},
  title        = {{The vitals for steady nucleation maps of spontaneous spiking coherence in autonomous two-dimensional neuronal networks}},
  doi          = {10.1016/j.neunet.2024.106589},
  volume       = {180},
  year         = {2024},
}

@article{14841,
  abstract     = {De novo heterozygous variants in KCNC2 encoding the voltage-gated potassium (K+) channel subunit Kv3.2 are a recently described cause of developmental and epileptic encephalopathy (DEE). A de novo variant in KCNC2 c.374G > A (p.Cys125Tyr) was identified via exome sequencing in a patient with DEE. Relative to wild-type Kv3.2, Kv3.2-p.Cys125Tyr induces K+ currents exhibiting a large hyperpolarizing shift in the voltage dependence of activation, accelerated activation, and delayed deactivation consistent with a relative stabilization of the open conformation, along with increased current density. Leveraging the cryogenic electron microscopy (cryo-EM) structure of Kv3.1, molecular dynamic simulations suggest that a strong π-π stacking interaction between the variant Tyr125 and Tyr156 in the α-6 helix of the T1 domain promotes a relative stabilization of the open conformation of the channel, which underlies the observed gain of function. A multicompartment computational model of a Kv3-expressing parvalbumin-positive cerebral cortex fast-spiking γ-aminobutyric acidergic (GABAergic) interneuron (PV-IN) demonstrates how the Kv3.2-Cys125Tyr variant impairs neuronal excitability and dysregulates inhibition in cerebral cortex circuits to explain the resulting epilepsy.},
  author       = {Clatot, Jerome and Currin, Christopher and Liang, Qiansheng and Pipatpolkai, Tanadet and Massey, Shavonne L. and Helbig, Ingo and Delemotte, Lucie and Vogels, Tim P and Covarrubias, Manuel and Goldberg, Ethan M.},
  issn         = {1091-6490},
  journal      = {Proceedings of the National Academy of Sciences of the United States of America},
  number       = {3},
  publisher    = {National Academy of Sciences},
  title        = {{A structurally precise mechanism links an epilepsy-associated KCNC2 potassium channel mutation to interneuron dysfunction}},
  doi          = {10.1073/pnas.2307776121},
  volume       = {121},
  year         = {2024},
}

@article{15171,
  abstract     = {The brain’s functionality is developed and maintained through synaptic plasticity. As synapses undergo plasticity, they also affect each other. The nature of such ‘co-dependency’ is difficult to disentangle experimentally, because multiple synapses must be monitored simultaneously. To help understand the experimentally observed phenomena, we introduce a framework that formalizes synaptic co-dependency between different connection types. The resulting model explains how inhibition can gate excitatory plasticity while neighboring excitatory–excitatory interactions determine the strength of long-term potentiation. Furthermore, we show how the interplay between excitatory and inhibitory synapses can account for the quick rise and long-term stability of a variety of synaptic weight profiles, such as orientation tuning and dendritic clustering of co-active synapses. In recurrent neuronal networks, co-dependent plasticity produces rich and stable motor cortex-like dynamics with high input sensitivity. Our results suggest an essential role for the neighborly synaptic interaction during learning, connecting micro-level physiology with network-wide phenomena.},
  author       = {Agnes, Everton J. and Vogels, Tim P},
  issn         = {1546-1726},
  journal      = {Nature Neuroscience},
  pages        = {964--974},
  publisher    = {Springer Nature},
  title        = {{Co-dependent excitatory and inhibitory plasticity accounts for quick, stable and long-lasting memories in biological networks}},
  doi          = {10.1038/s41593-024-01597-4},
  volume       = {27},
  year         = {2024},
}

@phdthesis{14422,
  abstract     = {Animals exhibit a remarkable ability to learn and remember new behaviors, skills, and associations throughout their lifetime. These capabilities are made possible thanks to a variety of
changes in the brain throughout adulthood, regrouped under the term "plasticity". Some cells
in the brain —neurons— and specifically changes in the connections between neurons, the
synapses, were shown to be crucial for the formation, selection, and consolidation of memories
from past experiences. These ongoing changes of synapses across time are called synaptic
plasticity. Understanding how a myriad of biochemical processes operating at individual
synapses can somehow work in concert to give rise to meaningful changes in behavior is a
fascinating problem and an active area of research.
However, the experimental search for the precise plasticity mechanisms at play in the brain
is daunting, as it is difficult to control and observe synapses during learning. Theoretical
approaches have thus been the default method to probe the plasticity-behavior connection. Such
studies attempt to extract unifying principles across synapses and model all observed synaptic
changes using plasticity rules: equations that govern the evolution of synaptic strengths across
time in neuronal network models. These rules can use many relevant quantities to determine
the magnitude of synaptic changes, such as the precise timings of pre- and postsynaptic
action potentials, the recent neuronal activity levels, the state of neighboring synapses, etc.
However, analytical studies rely heavily on human intuition and are forced to make simplifying
assumptions about plasticity rules.
In this thesis, we aim to assist and augment human intuition in this search for plasticity rules.
We explore whether a numerical approach could automatically discover the plasticity rules
that elicit desired behaviors in large networks of interconnected neurons. This approach is
dubbed meta-learning synaptic plasticity: learning plasticity rules which themselves will make
neuronal networks learn how to solve a desired task. We first write all the potential plasticity
mechanisms to consider using a single expression with adjustable parameters. We then optimize
these plasticity parameters using evolutionary strategies or Bayesian inference on tasks known
to involve synaptic plasticity, such as familiarity detection and network stabilization.
We show that these automated approaches are powerful tools, able to complement established
analytical methods. By comprehensively screening plasticity rules at all synapse types in
realistic, spiking neuronal network models, we discover entire sets of degenerate plausible
plasticity rules that reliably elicit memory-related behaviors. Our approaches allow for more
robust experimental predictions, by abstracting out the idiosyncrasies of individual plasticity
rules, and provide fresh insights on synaptic plasticity in spiking network models.
},
  author       = {Confavreux, Basile J},
  issn         = {2663-337X},
  pages        = {148},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Synapseek: Meta-learning synaptic plasticity rules}},
  doi          = {10.15479/at:ista:14422},
  year         = {2023},
}

@article{12009,
  abstract     = {Changes in the short-term dynamics of excitatory synapses over development have been observed throughout cortex, but their purpose and consequences remain unclear. Here, we propose that developmental changes in synaptic dynamics buffer the effect of slow inhibitory long-term plasticity, allowing for continuously stable neural activity. Using computational modeling we demonstrate that early in development excitatory short-term depression quickly stabilises neural activity, even in the face of strong, unbalanced excitation. We introduce a model of the commonly observed developmental shift from depression to facilitation and show that neural activity remains stable throughout development, while inhibitory synaptic plasticity slowly balances excitation, consistent with experimental observations. Our model predicts changes in the input responses from phasic to phasic-and-tonic and more precise spike timings. We also observe a gradual emergence of short-lasting memory traces governed by short-term plasticity development. We conclude that the developmental depression-to-facilitation shift may control excitation-inhibition balance throughout development with important functional consequences.},
  author       = {Jia, David W. and Vogels, Tim P and Costa, Rui Ponte},
  issn         = {2399-3642},
  journal      = {Communications biology},
  publisher    = {Springer Nature},
  title        = {{Developmental depression-to-facilitation shift controls excitation-inhibition balance}},
  doi          = {10.1038/s42003-022-03801-2},
  volume       = {5},
  year         = {2022},
}

@inproceedings{13239,
  abstract     = {Brains are thought to engage in predictive learning - learning to predict upcoming stimuli - to construct an internal model of their environment. This is especially notable for spatial navigation, as first described by Tolman’s latent learning tasks. However, predictive learning has also been observed in sensory cortex, in settings unrelated to spatial navigation. Apart from normative frameworks such as active inference or efficient coding, what could be the utility of learning to predict the patterns of occurrence of correlated stimuli? Here we show that prediction, and thereby the construction of an internal model of sequential stimuli, can bootstrap the learning process of a working memory task in a recurrent neural network. We implemented predictive learning alongside working memory match-tasks, and networks emerged to solve the prediction task first by encoding information across time to predict upcoming stimuli, and then eavesdropped on this solution to solve the matching task. Eavesdropping was most beneficial when neural resources were limited. Hence, predictive learning acts as a general neural mechanism to learn to store sensory information that can later be essential for working memory tasks.},
  author       = {Van Der Plas, Thijs L. and Vogels, Tim P and Manohar, Sanjay G.},
  booktitle    = {Proceedings of Machine Learning Research},
  issn         = {2640-3498},
  pages        = {518--531},
  publisher    = {ML Research Press},
  title        = {{Predictive learning enables neural networks to learn complex working memory tasks}},
  volume       = {199},
  year         = {2022},
}

@article{11143,
  abstract     = {Dravet syndrome is a neurodevelopmental disorder characterized by epilepsy, intellectual disability, and sudden death due to pathogenic variants in SCN1A with loss of function of the sodium channel subunit Nav1.1. Nav1.1-expressing parvalbumin GABAergic interneurons (PV-INs) from young Scn1a+/− mice show impaired action potential generation. An approach assessing PV-IN function in the same mice at two time points shows impaired spike generation in all Scn1a+/− mice at postnatal days (P) 16–21, whether deceased prior or surviving to P35, with normalization by P35 in surviving mice. However, PV-IN synaptic transmission is dysfunctional in young Scn1a+/− mice that did not survive and in Scn1a+/− mice ≥ P35. Modeling confirms that PV-IN axonal propagation is more sensitive to decreased sodium conductance than spike generation. These results demonstrate dynamic dysfunction in Dravet syndrome: combined abnormalities of PV-IN spike generation and propagation drives early disease severity, while ongoing dysfunction of synaptic transmission contributes to chronic pathology.},
  author       = {Kaneko, Keisuke and Currin, Christopher and Goff, Kevin M. and Wengert, Eric R. and Somarowthu, Ala and Vogels, Tim P and Goldberg, Ethan M.},
  issn         = {2211-1247},
  journal      = {Cell Reports},
  number       = {13},
  publisher    = {Elsevier},
  title        = {{Developmentally regulated impairment of parvalbumin interneuron synaptic transmission in an experimental model of Dravet syndrome}},
  doi          = {10.1016/j.celrep.2022.110580},
  volume       = {38},
  year         = {2022},
}

@article{8253,
  abstract     = {Brains process information in spiking neural networks. Their intricate connections shape the diverse functions these networks perform. In comparison, the functional capabilities of models of spiking networks are still rudimentary. This shortcoming is mainly due to the lack of insight and practical algorithms to construct the necessary connectivity. Any such algorithm typically attempts to build networks by iteratively reducing the error compared to a desired output. But assigning credit to hidden units in multi-layered spiking networks has remained challenging due to the non-differentiable nonlinearity of spikes. To avoid this issue, one can employ surrogate gradients to discover the required connectivity in spiking network models. However, the choice of a surrogate is not unique, raising the question of how its implementation influences the effectiveness of the method. Here, we use numerical simulations to systematically study how essential design parameters of surrogate gradients impact learning performance on a range of classification problems. We show that surrogate gradient learning is robust to different shapes of underlying surrogate derivatives, but the choice of the derivative’s scale can substantially affect learning performance. When we combine surrogate gradients with a suitable activity regularization technique, robust information processing can be achieved in spiking networks even at the sparse activity limit. Our study provides a systematic account of the remarkable robustness of surrogate gradient learning and serves as a practical guide to model functional spiking neural networks.},
  author       = {Zenke, Friedemann and Vogels, Tim P},
  issn         = {1530-888X},
  journal      = {Neural Computation},
  number       = {4},
  pages        = {899--925},
  publisher    = {MIT Press},
  title        = {{The remarkable robustness of surrogate gradient learning for instilling complex function in spiking neural networks}},
  doi          = {10.1162/neco_a_01367},
  volume       = {33},
  year         = {2021},
}

@article{8127,
  abstract     = {Mechanistic modeling in neuroscience aims to explain observed phenomena in terms of underlying causes. However, determining which model parameters agree with complex and stochastic neural data presents a significant challenge. We address this challenge with a machine learning tool which uses deep neural density estimators—trained using model simulations—to carry out Bayesian inference and retrieve the full space of parameters compatible with raw data or selected data features. Our method is scalable in parameters and data features and can rapidly analyze new data after initial training. We demonstrate the power and flexibility of our approach on receptive fields, ion channels, and Hodgkin–Huxley models. We also characterize the space of circuit configurations giving rise to rhythmic activity in the crustacean stomatogastric ganglion, and use these results to derive hypotheses for underlying compensation mechanisms. Our approach will help close the gap between data-driven and theory-driven models of neural dynamics.},
  author       = {Gonçalves, Pedro J. and Lueckmann, Jan-Matthis and Deistler, Michael and Nonnenmacher, Marcel and Öcal, Kaan and Bassetto, Giacomo and Chintaluri, Chaitanya and Podlaski, William F. and Haddad, Sara A. and Vogels, Tim P and Greenberg, David S. and Macke, Jakob H.},
  issn         = {2050-084X},
  journal      = {eLife},
  publisher    = {eLife Sciences Publications},
  title        = {{Training deep neural density estimators to identify mechanistic models of neural dynamics}},
  doi          = {10.7554/eLife.56261},
  volume       = {9},
  year         = {2020},
}

@inproceedings{9633,
  abstract     = {The search for biologically faithful synaptic plasticity rules has resulted in a large body of models. They are usually inspired by – and fitted to – experimental data, but they rarely produce neural dynamics that serve complex functions. These failures suggest that current plasticity models are still under-constrained by existing data. Here, we present an alternative approach that uses meta-learning to discover plausible synaptic plasticity rules. Instead of experimental data, the rules are constrained by the functions they implement and the structure they are meant to produce. Briefly, we parameterize synaptic plasticity rules by a Volterra expansion and then use supervised learning methods (gradient descent or evolutionary strategies) to minimize a problem-dependent loss function that quantifies how effectively a candidate plasticity rule transforms an initially random network into one with the desired function. We first validate our approach by re-discovering previously described plasticity rules, starting at the single-neuron level and “Oja’s rule”, a simple Hebbian plasticity rule that captures the direction of most variability of inputs to a neuron (i.e., the first principal component). We expand the problem to the network level and ask the framework to find Oja’s rule together with an anti-Hebbian rule such that an initially random two-layer firing-rate network will recover several principal components of the input space after learning. Next, we move to networks of integrate-and-fire neurons with plastic inhibitory afferents. We train for rules that achieve a target firing rate by countering tuned excitation. Our algorithm discovers a specific subset of the manifold of rules that can solve this task. Our work is a proof of principle of an automated and unbiased approach to unveil synaptic plasticity rules that obey biological constraints and can solve complex functions.},
  author       = {Confavreux, Basile J and Zenke, Friedemann and Agnes, Everton J. and Lillicrap, Timothy and Vogels, Tim P},
  booktitle    = {Advances in Neural Information Processing Systems},
  issn         = {1049-5258},
  location     = {Vancouver, Canada},
  pages        = {16398--16408},
  title        = {{A meta-learning approach to (re)discover plasticity rules that carve a desired function into a neural network}},
  volume       = {33},
  year         = {2020},
}

