@phdthesis{10007,
  abstract     = {The present thesis is concerned with the derivation of weak-strong uniqueness principles for curvature driven interface evolution problems not satisfying a comparison principle. The specific examples being treated are two-phase Navier-Stokes flow with surface tension, modeling the evolution of two incompressible, viscous and immiscible fluids separated by a sharp interface, and multiphase mean curvature flow, which serves as an idealized model for the motion of grain boundaries in an annealing polycrystalline material. Our main results - obtained in joint works with Julian Fischer, Tim Laux and Theresa M. Simon - state that prior to the formation of geometric singularities due to topology changes, the weak solution concept of Abels (Interfaces Free Bound. 9, 2007) to two-phase Navier-Stokes flow with surface tension and the weak solution concept of Laux and Otto (Calc. Var. Partial Differential Equations 55, 2016) to multiphase mean curvature flow (for networks in R^2 or double bubbles in R^3) represents the unique solution to these interface evolution problems within the class of classical solutions, respectively. To the best of the author's knowledge, for interface evolution problems not admitting a geometric comparison principle the derivation of a weak-strong uniqueness principle represented an open problem, so that the works contained in the present thesis constitute the first positive results in this direction. The key ingredient of our approach consists of the introduction of a novel concept of relative entropies for a class of curvature driven interface evolution problems, for which the associated energy contains an interfacial contribution being proportional to the surface area of the evolving (network of) interface(s). The interfacial part of the relative entropy gives sufficient control on the interface error between a weak and a classical solution, and its time evolution can be computed, at least in principle, for any energy dissipating weak solution concept. A resulting stability estimate for the relative entropy essentially entails the above mentioned weak-strong uniqueness principles. The present thesis contains a detailed introduction to our relative entropy approach, which in particular highlights potential applications to other problems in curvature driven interface evolution not treated in this thesis.},
  author       = {Hensel, Sebastian},
  issn         = {2663-337X},
  pages        = {300},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Curvature driven interface evolution: Uniqueness properties of weak solution concepts}},
  doi          = {10.15479/at:ista:10007},
  year         = {2021},
}

@phdthesis{10429,
  abstract     = {The scalability of concurrent data structures and distributed algorithms strongly depends on
reducing the contention for shared resources and the costs of synchronization and communication. We show how such cost reductions can be attained by relaxing the strict consistency conditions required by sequential implementations. In the first part of the thesis, we consider relaxation in the context of concurrent data structures. Specifically, in data structures 
such as priority queues, imposing strong semantics renders scalability impossible, since a correct implementation of the remove operation should return only the element with highest priority. Intuitively, attempting to invoke remove operations concurrently  creates a race condition. This bottleneck  can be circumvented by relaxing semantics of the affected data structure, thus allowing removal of the elements which are no longer required to have the highest priority. We prove that the randomized implementations of relaxed data structures provide provable guarantees on the priority of the removed elements even under concurrency. Additionally, we show that in some cases the relaxed data structures can be used to scale the classical algorithms which are usually implemented with the exact ones. In the second part, we study parallel variants of the  stochastic gradient descent (SGD) algorithm, which distribute computation  among the multiple processors, thus reducing the running time. Unfortunately, in order for standard parallel SGD to succeed, each processor has to maintain a local copy of the necessary model parameter, which is identical to the local copies of other processors; the overheads from this perfect consistency in terms of communication and synchronization can negate the speedup gained by distributing the computation. We show that the consistency conditions required by SGD can be  relaxed, allowing the algorithm to be more flexible in terms of tolerating quantized communication, asynchrony, or even crash faults, while its convergence remains asymptotically the same.},
  author       = {Nadiradze, Giorgi},
  issn         = {2663-337X},
  pages        = {132},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{On achieving scalability through relaxation}},
  doi          = {10.15479/at:ista:10429},
  year         = {2021},
}

@inproceedings{10435,
  abstract     = {Decentralized optimization is emerging as a viable alternative for scalable distributed machine learning, but also introduces new challenges in terms of synchronization costs. To this end, several communication-reduction techniques, such as non-blocking communication, quantization, and local steps, have been explored in the decentralized setting. Due to the complexity of analyzing optimization in such a relaxed setting, this line of work often assumes \emph{global} communication rounds, which require additional synchronization. In this paper, we consider decentralized optimization in the simpler, but harder to analyze, \emph{asynchronous gossip} model, in which communication occurs in discrete, randomly chosen pairings among nodes. Perhaps surprisingly, we show that a variant of SGD called \emph{SwarmSGD} still converges in this setting, even if \emph{non-blocking communication}, \emph{quantization}, and \emph{local steps} are all applied \emph{in conjunction}, and even if the node data distributions and underlying graph topology are both \emph{heterogenous}. Our analysis is based on a new connection with multi-dimensional load-balancing processes. We implement this algorithm and deploy it in a super-computing environment, showing that it can outperform previous decentralized methods in terms of end-to-end training time, and that it can even rival carefully-tuned large-batch SGD for certain tasks.},
  author       = {Nadiradze, Giorgi and Sabour, Amirmojtaba and Davies, Peter and Li, Shigang and Alistarh, Dan-Adrian},
  booktitle    = {35th Conference on Neural Information Processing Systems},
  location     = {Sydney, Australia},
  publisher    = {Neural Information Processing Systems Foundation},
  title        = {{Asynchronous decentralized SGD with quantized and local updates}},
  year         = {2021},
}

@inproceedings{10432,
  abstract     = {One key element behind the recent progress of machine learning has been the ability to train machine learning models in large-scale distributed shared-memory and message-passing environments. Most of these models are trained employing variants of stochastic gradient descent (SGD) based optimization, but most methods involve some type of consistency relaxation relative to sequential SGD, to mitigate its large communication or synchronization costs at scale. In this paper, we introduce a general consistency condition covering communication-reduced and asynchronous distributed SGD implementations. Our framework, called elastic consistency, decouples the system-specific aspects of the implementation from the SGD convergence requirements, giving a general way to obtain convergence bounds for a wide variety of distributed SGD methods used in practice. Elastic consistency can be used to re-derive or improve several previous convergence bounds in message-passing and shared-memory settings, but also to analyze new models and distribution schemes. As a direct application, we propose and analyze a new synchronization-avoiding scheduling scheme for distributed SGD, and show that it can be used to efficiently train deep convolutional models for image classification.},
  author       = {Nadiradze, Giorgi and Markov, Ilia and Chatterjee, Bapi and Kungurtsev, Vyacheslav  and Alistarh, Dan-Adrian},
  booktitle    = {Proceedings of the AAAI Conference on Artificial Intelligence},
  location     = {Virtual},
  number       = {10},
  pages        = {9037--9045},
  title        = {{Elastic consistency: A practical consistency model for distributed stochastic gradient descent}},
  volume       = {35},
  year         = {2021},
}

@unpublished{10013,
  abstract     = {We derive a weak-strong uniqueness principle for BV solutions to multiphase mean curvature flow of triple line clusters in three dimensions. Our proof is based on the explicit construction of a gradient-flow calibration in the sense of the recent work of Fischer et al. [arXiv:2003.05478] for any such cluster. This extends the two-dimensional construction to the three-dimensional case of surfaces meeting along triple junctions.},
  author       = {Hensel, Sebastian and Laux, Tim},
  booktitle    = {arXiv},
  title        = {{Weak-strong uniqueness for the mean curvature flow of double bubbles}},
  doi          = {10.48550/arXiv.2108.01733},
  year         = {2021},
}

@phdthesis{9418,
  abstract     = {Deep learning is best known for its empirical success across a wide range of applications
spanning computer vision, natural language processing and speech. Of equal significance,
though perhaps less known, are its ramifications for learning theory: deep networks have
been observed to perform surprisingly well in the high-capacity regime, aka the overfitting
or underspecified regime. Classically, this regime on the far right of the bias-variance curve
is associated with poor generalisation; however, recent experiments with deep networks
challenge this view.

This thesis is devoted to investigating various aspects of underspecification in deep learning.
First, we argue that deep learning models are underspecified on two levels: a) any given
training dataset can be fit by many different functions, and b) any given function can be
expressed by many different parameter configurations. We refer to the second kind of
underspecification as parameterisation redundancy and we precisely characterise its extent.
Second, we characterise the implicit criteria (the inductive bias) that guide learning in the
underspecified regime. Specifically, we consider a nonlinear but tractable classification
setting, and show that given the choice, neural networks learn classifiers with a large margin.
Third, we consider learning scenarios where the inductive bias is not by itself sufficient to
deal with underspecification. We then study different ways of ‘tightening the specification’: i)
In the setting of representation learning with variational autoencoders, we propose a hand-
crafted regulariser based on mutual information. ii) In the setting of binary classification, we
consider soft-label (real-valued) supervision. We derive a generalisation bound for linear
networks supervised in this way and verify that soft labels facilitate fast learning. Finally, we
explore an application of soft-label supervision to the training of multi-exit models.},
  author       = {Bui Thi Mai, Phuong},
  issn         = {2663-337X},
  pages        = {125},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Underspecification in deep learning}},
  doi          = {10.15479/AT:ISTA:9418},
  year         = {2021},
}

@phdthesis{9056,
  abstract     = {In this thesis we study persistence of multi-covers of Euclidean balls and the geometric structures underlying their computation, in particular Delaunay mosaics and Voronoi tessellations. The k-fold cover for some discrete input point set consists of the space where at least k balls of radius r around the input points overlap. Persistence is a notion that captures, in some sense, the topology of the shape underlying the input. While persistence is usually computed for the union of balls, the k-fold cover is of interest as it captures local density,
and thus might approximate the shape of the input better if the input data is noisy. To compute persistence of these k-fold covers, we need a discretization that is provided by higher-order Delaunay mosaics. We present and implement a simple and efficient algorithm for the computation of higher-order Delaunay mosaics, and use it to give experimental results for their combinatorial properties. The algorithm makes use of a new geometric structure, the rhomboid tiling. It contains the higher-order Delaunay mosaics as slices, and by introducing a filtration
function on the tiling, we also obtain higher-order α-shapes as slices. These allow us to compute persistence of the multi-covers for varying radius r; the computation for varying k is less straight-foward and involves the rhomboid tiling directly. We apply our algorithms to experimental sphere packings to shed light on their structural properties. Finally, inspired by periodic structures in packings and materials, we propose and implement an algorithm for periodic Delaunay triangulations to be integrated into the Computational Geometry Algorithms Library (CGAL), and discuss the implications on persistence for periodic data sets.},
  author       = {Osang, Georg F},
  issn         = {2663-337X},
  pages        = {134},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Multi-cover persistence and Delaunay mosaics}},
  doi          = {10.15479/AT:ISTA:9056},
  year         = {2021},
}

@inproceedings{9416,
  abstract     = {We study the inductive bias of two-layer ReLU networks trained by gradient flow. We identify a class of easy-to-learn (`orthogonally separable') datasets, and characterise the solution that ReLU networks trained on such datasets converge to. Irrespective of network width, the solution turns out to be a combination of two max-margin classifiers: one corresponding to the positive data subset and one corresponding to the negative data subset. The proof is based on the recently introduced concept of extremal sectors, for which we prove a number of properties in the context of orthogonal separability. In particular, we prove stationarity of activation patterns from some time  onwards, which enables a reduction of the ReLU network to an ensemble of linear subnetworks.},
  author       = {Bui Thi Mai, Phuong and Lampert, Christoph},
  booktitle    = {9th International Conference on Learning Representations},
  location     = {Virtual},
  title        = {{The inductive bias of ReLU networks on orthogonally separable data}},
  year         = {2021},
}

@phdthesis{10035,
  abstract     = {Many security definitions come in two flavors: a stronger “adaptive” flavor, where the adversary can arbitrarily make various choices during the course of the attack, and a weaker “selective” flavor where the adversary must commit to some or all of their choices a-priori. For example, in the context of identity-based encryption, selective security requires the adversary to decide on the identity of the attacked party at the very beginning of the game whereas adaptive security allows the attacker to first see the master public key and some secret keys before making this choice. Often, it appears to be much easier to achieve selective security than it is to achieve adaptive security. A series of several recent works shows how to cleverly achieve adaptive security in several such scenarios including generalized selective decryption [Pan07][FJP15], constrained PRFs [FKPR14], and Yao’s garbled circuits [JW16]. Although the above works expressed vague intuition that they share a common technique, the connection was never made precise. In this work we present a new framework (published at Crypto ’17 [JKK+17a]) that connects all of these works and allows us to present them in a unified and simplified fashion. Having the framework in place, we show how to achieve adaptive security for proxy re-encryption schemes (published at PKC ’19 [FKKP19]) and provide the first adaptive security proofs for continuous group key agreement protocols (published at S&P ’21 [KPW+21]). Questioning optimality of our framework, we then show that currently used proof techniques cannot lead to significantly better security guarantees for "graph-building" games (published at TCC ’21 [KKPW21a]). These games cover generalized selective decryption, as well as the security of prominent constructions for constrained PRFs, continuous group key agreement, and proxy re-encryption. Finally, we revisit the adaptive security of Yao’s garbled circuits and extend the analysis of Jafargholi and Wichs in two directions: While they prove adaptive security only for a modified construction with increased online complexity, we provide the first positive results for the original construction by Yao (published at TCC ’21 [KKP21a]). On the negative side, we prove that the results of Jafargholi and Wichs are essentially optimal by showing that no black-box reduction can provide a significantly better security bound (published at Crypto ’21 [KKPW21c]).},
  author       = {Klein, Karen},
  issn         = {2663-337X},
  pages        = {276},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{On the adaptive security of graph-based games}},
  doi          = {10.15479/at:ista:10035},
  year         = {2021},
}

@inproceedings{10041,
  abstract     = {Yao’s garbling scheme is one of the most fundamental cryptographic constructions. Lindell and Pinkas (Journal of Cryptograhy 2009) gave a formal proof of security in the selective setting where the adversary chooses the challenge inputs before seeing the garbled circuit assuming secure symmetric-key encryption (and hence one-way functions). This was followed by results, both positive and negative, concerning its security in the, stronger, adaptive setting. Applebaum et al. (Crypto 2013) showed that it cannot satisfy adaptive security as is, due to a simple incompressibility argument. Jafargholi and Wichs (TCC 2017) considered a natural adaptation of Yao’s scheme (where the output mapping is sent in the online phase, together with the garbled input) that circumvents this negative result, and proved that it is adaptively secure, at least for shallow circuits. In particular, they showed that for the class of circuits of depth   δ , the loss in security is at most exponential in   δ . The above results all concern the simulation-based notion of security. In this work, we show that the upper bound of Jafargholi and Wichs is basically optimal in a strong sense. As our main result, we show that there exists a family of Boolean circuits, one for each depth  δ∈N , such that any black-box reduction proving the adaptive indistinguishability of the natural adaptation of Yao’s scheme from any symmetric-key encryption has to lose a factor that is exponential in   δ√ . Since indistinguishability is a weaker notion than simulation, our bound also applies to adaptive simulation. To establish our results, we build on the recent approach of Kamath et al. (Eprint 2021), which uses pebbling lower bounds in conjunction with oracle separations to prove fine-grained lower bounds on loss in cryptographic security.},
  author       = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z and Wichs, Daniel},
  booktitle    = {41st Annual International Cryptology Conference, Part II },
  isbn         = {978-3-030-84244-4},
  issn         = {1611-3349},
  location     = {Virtual},
  pages        = {486--515},
  publisher    = {Springer Nature},
  title        = {{Limits on the Adaptive Security of Yao’s Garbling}},
  doi          = {10.1007/978-3-030-84245-1_17},
  volume       = {12826},
  year         = {2021},
}

@inproceedings{10049,
  abstract     = {While messaging systems with strong security guarantees are widely used in practice, designing a protocol that scales efficiently to large groups and enjoys similar security guarantees remains largely open. The two existing proposals to date are ART (Cohn-Gordon et al., CCS18) and TreeKEM (IETF, The Messaging Layer Security Protocol, draft). TreeKEM is the currently considered candidate by the IETF MLS working group, but dynamic group operations (i.e. adding and removing users) can cause efficiency issues. In this paper we formalize and analyze a variant of TreeKEM which we term Tainted TreeKEM (TTKEM for short). The basic idea underlying TTKEM was suggested by Millican (MLS mailing list, February 2018). This version is more efficient than TreeKEM for some natural distributions of group operations, we quantify this through simulations.Our second contribution is two security proofs for TTKEM which establish post compromise and forward secrecy even against adaptive attackers. The security loss (to the underlying PKE) in the Random Oracle Model is a polynomial factor, and a quasipolynomial one in the Standard Model. Our proofs can be adapted to TreeKEM as well. Before our work no security proof for any TreeKEM-like protocol establishing tight security against an adversary who can adaptively choose the sequence of operations was known. We also are the first to prove (or even formalize) active security where the server can arbitrarily deviate from the protocol specification. Proving fully active security – where also the users can arbitrarily deviate – remains open.},
  author       = {Klein, Karen and Pascual Perez, Guillermo and Walter, Michael and Kamath Hosdurg, Chethan and Capretto, Margarita and Cueto Noval, Miguel and Markov, Ilia and Yeo, Michelle X and Alwen, Joel F and Pietrzak, Krzysztof Z},
  booktitle    = {2021 IEEE Symposium on Security and Privacy },
  location     = {San Francisco, CA, United States},
  pages        = {268--284},
  publisher    = {IEEE},
  title        = {{Keep the dirt: tainted TreeKEM, adaptively and actively secure continuous group key agreement}},
  doi          = {10.1109/sp40001.2021.00035},
  year         = {2021},
}

@inproceedings{10048,
  abstract     = {The security of cryptographic primitives and protocols against adversaries that are allowed to make adaptive choices (e.g., which parties to corrupt or which queries to make) is notoriously difficult to establish. A broad theoretical
framework was introduced by Jafargholi et al. [Crypto’17] for this purpose. In this paper we initiate the study of lower bounds on loss in adaptive security for certain cryptographic protocols considered in the framework. We prove lower
bounds that almost match the upper bounds (proven using the framework) for proxy re-encryption, prefix-constrained PRFs and generalized selective decryption, a security game that captures the security of certain group messaging and
broadcast encryption schemes. Those primitives have in common that their security game involves an underlying graph that can be adaptively built by the adversary. Some of our lower bounds only apply to a restricted class of black-box reductions which we term “oblivious” (the existing upper bounds are of this restricted type), some apply to the broader but still restricted class of non-rewinding reductions, while our lower bound for proxy re-encryption applies to all black-box reductions. The fact that some of our lower bounds seem to crucially rely on obliviousness or at least a non-rewinding reduction hints to the exciting possibility that the existing upper bounds can be improved by using more sophisticated reductions. Our main conceptual contribution is a two-player multi-stage game called the Builder-Pebbler Game. We can translate bounds on the winning probabilities for various instantiations of this game into cryptographic lower bounds for the above-mentioned primitives using oracle separation techniques.
},
  author       = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z and Walter, Michael},
  booktitle    = {19th Theory of Cryptography Conference 2021},
  location     = {Raleigh, NC, United States},
  publisher    = {International Association for Cryptologic Research},
  title        = {{The cost of adaptivity in security games on graphs}},
  year         = {2021},
}

@inproceedings{10044,
  abstract     = {We show that Yao’s garbling scheme is adaptively indistinguishable for the class of Boolean circuits of size S and treewidth w with only a S^O(w) loss in security. For instance, circuits with constant treewidth are as a result adaptively indistinguishable with only a polynomial loss. This (partially) complements a negative result of Applebaum et al. (Crypto 2013), which showed (assuming one-way functions) that Yao’s garbling scheme cannot be adaptively simulatable. As main technical contributions, we introduce a new pebble game that abstracts out our security reduction and then present a pebbling strategy for this game where the number of pebbles used is roughly O(d w log(S)), d being the fan-out of the circuit. The design of the strategy relies on separators, a graph-theoretic notion with connections to circuit complexity.},
  author       = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z},
  booktitle    = {19th Theory of Cryptography Conference 2021},
  location     = {Raleigh, NC, United States},
  publisher    = {International Association for Cryptologic Research},
  title        = {{On treewidth, separators and Yao's garbling}},
  year         = {2021},
}

@article{9928,
  abstract     = {There are two elementary superconducting qubit types that derive directly from the quantum harmonic oscillator. In one, the inductor is replaced by a nonlinear Josephson junction to realize the widely used charge qubits with a compact phase variable and a discrete charge wave function. In the other, the junction is added in parallel, which gives rise to an extended phase variable, continuous wave functions, and a rich energy-level structure due to the loop topology. While the corresponding rf superconducting quantum interference device Hamiltonian was introduced as a quadratic quasi-one-dimensional potential approximation to describe the fluxonium qubit implemented with long Josephson-junction arrays, in this work we implement it directly using a linear superinductor formed by a single uninterrupted aluminum wire. We present a large variety of qubits, all stemming from the same circuit but with drastically different characteristic energy scales. This includes flux and fluxonium qubits but also the recently introduced quasicharge qubit with strongly enhanced zero-point phase fluctuations and a heavily suppressed flux dispersion. The use of a geometric inductor results in high reproducibility of the inductive energy as guaranteed by top-down lithography—a key ingredient for intrinsically protected superconducting qubits.},
  author       = {Peruzzo, Matilda and Hassani, Farid and Szep, Gregory and Trioni, Andrea and Redchenko, Elena and Zemlicka, Martin and Fink, Johannes M},
  issn         = {2691-3399},
  journal      = {PRX Quantum},
  keywords     = {quantum physics, mesoscale and nanoscale physics},
  number       = {4},
  pages        = {040341},
  publisher    = {American Physical Society},
  title        = {{Geometric superinductance qubits: Controlling phase delocalization across a single Josephson junction}},
  doi          = {10.1103/PRXQuantum.2.040341},
  volume       = {2},
  year         = {2021},
}

@phdthesis{9920,
  abstract     = {This work is concerned with two fascinating circuit quantum electrodynamics components, the Josephson junction and the geometric superinductor, and the interesting experiments that can be done by combining the two. The Josephson junction has revolutionized the field of superconducting circuits as a non-linear dissipation-less circuit element and is used in almost all superconducting qubit implementations since the 90s. On the other hand, the superinductor is a relatively new circuit element introduced as a key component of the fluxonium qubit in 2009. This is an inductor with characteristic impedance larger than the resistance quantum and self-resonance frequency in the GHz regime. The combination of these two elements can occur in two fundamental ways: in parallel and in series. When connected in parallel the two create the fluxonium qubit, a loop with large inductance and a rich energy spectrum reliant on quantum tunneling. On the other hand placing the two elements in series aids with the measurement of the IV curve of a single Josephson junction in a high impedance environment. In this limit theory predicts that the junction will behave as its dual element: the phase-slip junction. While the Josephson junction acts as a non-linear inductor the phase-slip junction has the behavior of a non-linear capacitance and can be used to measure new Josephson junction phenomena, namely Coulomb blockade of Cooper pairs and phase-locked Bloch oscillations. The latter experiment allows for a direct link between frequency and current which is an elusive connection in quantum metrology. This work introduces the geometric superinductor, a superconducting circuit element where the high inductance is due to the geometry rather than the material properties of the superconductor, realized from a highly miniaturized superconducting planar coil. These structures will be described and characterized as resonators and qubit inductors and progress towards the measurement of phase-locked Bloch oscillations will be presented.},
  author       = {Peruzzo, Matilda},
  isbn         = {978-3-99078-013-8},
  issn         = {2663-337X},
  keywords     = {quantum computing, superinductor, quantum metrology},
  pages        = {149},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Geometric superinductors and their applications in circuit quantum electrodynamics}},
  doi          = {10.15479/at:ista:9920},
  year         = {2021},
}

@article{10816,
  abstract     = {Pattern separation is a fundamental brain computation that converts small differences in input patterns into large differences in output patterns. Several synaptic mechanisms of pattern separation have been proposed, including code expansion, inhibition and plasticity; however, which of these mechanisms play a role in the entorhinal cortex (EC)–dentate gyrus (DG)–CA3 circuit, a classical pattern separation circuit, remains unclear. Here we show that a biologically realistic, full-scale EC–DG–CA3 circuit model, including granule cells (GCs) and parvalbumin-positive inhibitory interneurons (PV+-INs) in the DG, is an efficient pattern separator. Both external gamma-modulated inhibition and internal lateral inhibition mediated by PV+-INs substantially contributed to pattern separation. Both local connectivity and fast signaling at GC–PV+-IN synapses were important for maximum effectiveness. Similarly, mossy fiber synapses with conditional detonator properties contributed to pattern separation. By contrast, perforant path synapses with Hebbian synaptic plasticity and direct EC–CA3 connection shifted the network towards pattern completion. Our results demonstrate that the specific properties of cells and synapses optimize higher-order computations in biological networks and might be useful to improve the deep learning capabilities of technical networks.},
  author       = {Guzmán, José and Schlögl, Alois and Espinoza Martinez, Claudia  and Zhang, Xiaomin and Suter, Benjamin and Jonas, Peter M},
  issn         = {2662-8457},
  journal      = {Nature Computational Science},
  keywords     = {general medicine},
  number       = {12},
  pages        = {830--842},
  publisher    = {Springer Nature},
  title        = {{How connectivity rules and synaptic properties shape the efficacy of pattern separation in the entorhinal cortex–dentate gyrus–CA3 network}},
  doi          = {10.1038/s43588-021-00157-1},
  volume       = {1},
  year         = {2021},
}

@article{9817,
  abstract     = {Elastic bending of initially flat slender elements allows the realization and economic fabrication of intriguing curved shapes. In this work, we derive an intuitive but rigorous geometric characterization of the design space of plane elastic rods with variable stiffness. It enables designers to determine which shapes are physically viable with active bending by visual inspection alone. Building on these insights, we propose a method for efficiently designing the geometry of a flat elastic rod that realizes a target equilibrium curve, which only requires solving a linear program. We implement this method in an interactive computational design tool that gives feedback about the feasibility of a design, and computes the geometry of the structural elements necessary to realize it within an instant. The tool also offers an iterative optimization routine that improves the fabricability of a model while modifying it as little as possible. In addition, we use our geometric characterization to derive an algorithm for analyzing and recovering the stability of elastic curves that would otherwise snap out of their unstable equilibrium shapes by buckling. We show the efficacy of our approach by designing and manufacturing several physical models that are assembled from flat elements.},
  author       = {Hafner, Christian and Bickel, Bernd},
  issn         = {1557-7368},
  journal      = {ACM Transactions on Graphics},
  keywords     = {Computing methodologies, shape modeling, modeling and simulation, theory of computation, computational geometry, mathematics of computing, mathematical optimization},
  location     = {Virtual},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{The design space of plane elastic curves}},
  doi          = {10.1145/3450626.3459800},
  volume       = {40},
  year         = {2021},
}

@misc{10110,
  abstract     = {Pattern separation is a fundamental brain computation that converts small differences in input patterns into large differences in output patterns. Several synaptic mechanisms of pattern separation have been proposed, including code expansion, inhibition and plasticity; however, which of these mechanisms play a role in the entorhinal cortex (EC)–dentate gyrus (DG)–CA3 circuit, a classical pattern separation circuit, remains unclear. Here we show that a biologically realistic, full-scale EC–DG–CA3 circuit model, including granule cells (GCs) and parvalbumin-positive inhibitory interneurons (PV+-INs) in the DG, is an efficient pattern separator. Both external gamma-modulated inhibition and internal lateral inhibition mediated by PV+-INs substantially contributed to pattern separation. Both local connectivity and fast signaling at GC–PV+-IN synapses were important for maximum effectiveness. Similarly, mossy fiber synapses with conditional detonator properties contributed to pattern separation. By contrast, perforant path synapses with Hebbian synaptic plasticity and direct EC–CA3 connection shifted the network towards pattern completion. Our results demonstrate that the specific properties of cells and synapses optimize higher-order computations in biological networks and might be useful to improve the deep learning capabilities of technical networks.},
  author       = {Guzmán, José and Schlögl, Alois and Espinoza Martinez, Claudia  and Zhang, Xiaomin and Suter, Benjamin and Jonas, Peter M},
  publisher    = {IST Austria},
  title        = {{How connectivity rules and synaptic properties shape the efficacy of pattern separation in the entorhinal cortex–dentate gyrus–CA3 network}},
  doi          = {10.15479/AT:ISTA:10110},
  year         = {2021},
}

@article{8966,
  abstract     = {During development, a single cell is transformed into a highly complex organism through progressive cell division, specification and rearrangement. An important prerequisite for the emergence of patterns within the developing organism is to establish asymmetries at various scales, ranging from individual cells to the entire embryo, eventually giving rise to the different body structures. This becomes especially apparent during gastrulation, when the earliest major lineage restriction events lead to the formation of the different germ layers. Traditionally, the unfolding of the developmental program from symmetry breaking to germ layer formation has been studied by dissecting the contributions of different signaling pathways and cellular rearrangements in the in vivo context of intact embryos. Recent efforts, using the intrinsic capacity of embryonic stem cells to self-assemble and generate embryo-like structures de novo, have opened new avenues for understanding the many ways by which an embryo can be built and the influence of extrinsic factors therein. Here, we discuss and compare divergent and conserved strategies leading to germ layer formation in embryos as compared to in vitro systems, their upstream molecular cascades and the role of extrinsic factors in this process.},
  author       = {Schauer, Alexandra and Heisenberg, Carl-Philipp J},
  issn         = {0012-1606},
  journal      = {Developmental Biology},
  keywords     = {Developmental Biology, Cell Biology, Molecular Biology},
  pages        = {71--81},
  publisher    = {Elsevier},
  title        = {{Reassembling gastrulation}},
  doi          = {10.1016/j.ydbio.2020.12.014},
  volume       = {474},
  year         = {2021},
}

@article{9438,
  abstract     = {Rigorous investigation of synaptic transmission requires analysis of unitary synaptic events by simultaneous recording from presynaptic terminals and postsynaptic target neurons. However, this has been achieved at only a limited number of model synapses, including the squid giant synapse and the mammalian calyx of Held. Cortical presynaptic terminals have been largely inaccessible to direct presynaptic recording, due to their small size. Here, we describe a protocol for improved subcellular patch-clamp recording in rat and mouse brain slices, with the synapse in a largely intact environment. Slice preparation takes ~2 h, recording ~3 h and post hoc morphological analysis 2 d. Single presynaptic hippocampal mossy fiber terminals are stimulated minimally invasively in the bouton-attached configuration, in which the cytoplasmic content remains unperturbed, or in the whole-bouton configuration, in which the cytoplasmic composition can be precisely controlled. Paired pre–postsynaptic recordings can be integrated with biocytin labeling and morphological analysis, allowing correlative investigation of synapse structure and function. Paired recordings can be obtained from mossy fiber terminals in slices from both rats and mice, implying applicability to genetically modified synapses. Paired recordings can also be performed together with axon tract stimulation or optogenetic activation, allowing comparison of unitary and compound synaptic events in the same target cell. Finally, paired recordings can be combined with spontaneous event analysis, permitting collection of miniature events generated at a single identified synapse. In conclusion, the subcellular patch-clamp techniques detailed here should facilitate analysis of biophysics, plasticity and circuit function of cortical synapses in the mammalian central nervous system.},
  author       = {Vandael, David H and Okamoto, Yuji and Borges Merjane, Carolina and Vargas Barroso, Victor M and Suter, Benjamin and Jonas, Peter M},
  issn         = {1750-2799},
  journal      = {Nature Protocols},
  number       = {6},
  pages        = {2947–2967},
  publisher    = {Springer Nature},
  title        = {{Subcellular patch-clamp techniques for single-bouton stimulation and simultaneous pre- and postsynaptic recording at cortical synapses}},
  doi          = {10.1038/s41596-021-00526-0},
  volume       = {16},
  year         = {2021},
}

