@article{1162,
  abstract     = {Selected universal experimental properties of high-temperature superconducting (HTS) cuprates have been singled out in the last decade. One of the pivotal challenges in this field is the designation of a consistent interpretation framework within which we can describe quantitatively the universal features of those systems. Here we analyze in a detailed manner the principal experimental data and compare them quantitatively with the approach based on a single-band model of strongly correlated electrons supplemented with strong antiferromagnetic (super)exchange interaction (the so-called t−J−U model). The model rationale is provided by estimating its microscopic parameters on the basis of the three-band approach for the Cu-O plane. We use our original full Gutzwiller wave-function solution by going beyond the renormalized mean-field theory (RMFT) in a systematic manner. Our approach reproduces very well the observed hole doping (δ) dependence of the kinetic-energy gain in the superconducting phase, one of the principal non-Bardeen-Cooper-Schrieffer features of the cuprates. The calculated Fermi velocity in the nodal direction is practically δ-independent and its universal value agrees very well with that determined experimentally. Also, a weak doping dependence of the Fermi wave vector leads to an almost constant value of the effective mass in a pure superconducting phase which is both observed in experiment and reproduced within our approach. An assessment of the currently used models (t−J, Hubbard) is carried out and the results of the canonical RMFT as a zeroth-order solution are provided for comparison to illustrate the necessity of the introduced higher-order contributions.},
  author       = {Spałek, Jozef and Zegrodnik, Michał and Kaczmarczyk, Jan},
  issn         = {2469-9950},
  journal      = {Physical Review B - Condensed Matter and Materials Physics},
  number       = {2},
  publisher    = {American Physical Society},
  title        = {{Universal properties of high temperature superconductors from real space pairing t-J-U model and its quantitative comparison with experiment}},
  doi          = {10.1103/PhysRevB.95.024506},
  volume       = {95},
  year         = {2017},
}

@article{1168,
  abstract     = {Optimum experimental design theory has recently been extended for parameter estimation in copula models. The use of these models allows one to gain in flexibility by considering the model parameter set split into marginal and dependence parameters. However, this separation also leads to the natural issue of estimating only a subset of all model parameters. In this work, we treat this problem with the application of the (Formula presented.)-optimality to copula models. First, we provide an extension of the corresponding equivalence theory. Then, we analyze a wide range of flexible copula models to highlight the usefulness of (Formula presented.)-optimality in many possible scenarios. Finally, we discuss how the usage of the introduced design criterion also relates to the more general issue of copula selection and optimal design for model discrimination.},
  author       = {Perrone, Elisa and Rappold, Andreas and Müller, Werner},
  journal      = {Statistical Methods and Applications},
  number       = {3},
  pages        = {403 -- 418},
  publisher    = {Springer},
  title        = {{D inf s optimality in copula models}},
  doi          = {10.1007/s10260-016-0375-6},
  volume       = {26},
  year         = {2017},
}

@article{1169,
  abstract     = {Dispersal is a crucial factor in natural evolution, since it determines the habitat experienced by any population and defines the spatial scale of interactions between individuals. There is compelling evidence for systematic differences in dispersal characteristics within the same population, i.e., genotype-dependent dispersal. The consequences of genotype-dependent dispersal on other evolutionary phenomena, however, are poorly understood. In this article we investigate the effect of genotype-dependent dispersal on spatial gene frequency patterns, using a generalization of the classical diffusion model of selection and dispersal. Dispersal is characterized by the variance of dispersal (diffusion coefficient) and the mean displacement (directional advection term). We demonstrate that genotype-dependent dispersal may change the qualitative behavior of Fisher waves, which change from being “pulled” to being “pushed” wave fronts as the discrepancy in dispersal between genotypes increases. The speed of any wave is partitioned into components due to selection, genotype-dependent variance of dispersal, and genotype-dependent mean displacement. We apply our findings to wave fronts maintained by selection against heterozygotes. Furthermore, we identify a benefit of increased variance of dispersal, quantify its effect on the speed of the wave, and discuss the implications for the evolution of dispersal strategies.},
  author       = {Novak, Sebastian and Kollár, Richard},
  issn         = {0016-6731},
  journal      = {Genetics},
  number       = {1},
  pages        = {367 -- 374},
  publisher    = {Genetics Society of America},
  title        = {{Spatial gene frequency waves under genotype dependent dispersal}},
  doi          = {10.1534/genetics.116.193946},
  volume       = {205},
  year         = {2017},
}

@article{1173,
  abstract     = {We introduce the Voronoi functional of a triangulation of a finite set of points in the Euclidean plane and prove that among all geometric triangulations of the point set, the Delaunay triangulation maximizes the functional. This result neither extends to topological triangulations in the plane nor to geometric triangulations in three and higher dimensions.},
  author       = {Edelsbrunner, Herbert and Glazyrin, Alexey and Musin, Oleg and Nikitenko, Anton},
  issn         = {0209-9683},
  journal      = {Combinatorica},
  number       = {5},
  pages        = {887 -- 910},
  publisher    = {Springer},
  title        = {{The Voronoi functional is maximized by the Delaunay triangulation in the plane}},
  doi          = {10.1007/s00493-016-3308-y},
  volume       = {37},
  year         = {2017},
}

@inproceedings{1174,
  abstract     = {Security of cryptographic applications is typically defined by security games. The adversary, within certain resources, cannot win with probability much better than 0 (for unpredictability applications, like one-way functions) or much better than 1/2 (indistinguishability applications for instance encryption schemes). In so called squared-friendly applications the winning probability of the adversary, for different values of the application secret randomness, is not only close to 0 or 1/2 on average, but also concentrated in the sense that its second central moment is small. The class of squared-friendly applications, which contains all unpredictability applications and many indistinguishability applications, is particularly important for key derivation. Barak et al. observed that for square-friendly applications one can beat the &quot;RT-bound&quot;, extracting secure keys with significantly smaller entropy loss. In turn Dodis and Yu showed that in squared-friendly applications one can directly use a &quot;weak&quot; key, which has only high entropy, as a secure key. In this paper we give sharp lower bounds on square security assuming security for &quot;weak&quot; keys. We show that any application which is either (a) secure with weak keys or (b) allows for entropy savings for keys derived by universal hashing, must be square-friendly. Quantitatively, our lower bounds match the positive results of Dodis and Yu and Barak et al. (TCC\'13, CRYPTO\'11) Hence, they can be understood as a general characterization of squared-friendly applications. While the positive results on squared-friendly applications where derived by one clever application of the Cauchy-Schwarz Inequality, for tight lower bounds we need more machinery. In our approach we use convex optimization techniques and some theory of circular matrices.},
  author       = {Skórski, Maciej},
  issn         = {1868-8969},
  location     = {Hannover, Germany},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Lower bounds on key derivation for square-friendly applications}},
  doi          = {10.4230/LIPIcs.STACS.2017.57},
  volume       = {66},
  year         = {2017},
}

@inproceedings{1175,
  abstract     = {We study space complexity and time-space trade-offs with a focus not on peak memory usage but on overall memory consumption throughout the computation.  Such a cumulative space measure was introduced for the computational model of parallel black pebbling by [Alwen and Serbinenko ’15] as a tool for obtaining results in cryptography. We consider instead the non- deterministic black-white pebble game and prove optimal cumulative space lower bounds and trade-offs, where in order to minimize pebbling time the space has to remain large during a significant fraction of the pebbling. We also initiate the study of cumulative space in proof complexity, an area where other space complexity measures have been extensively studied during the last 10–15 years. Using and extending the connection between proof complexity and pebble games in [Ben-Sasson and Nordström ’08, ’11] we obtain several strong cumulative space results for (even parallel versions of) the resolution proof system, and outline some possible future directions of study of this, in our opinion, natural and interesting space measure.},
  author       = {Alwen, Joel F and De Rezende, Susanna and Nordstrom, Jakob and Vinyals, Marc},
  editor       = {Papadimitriou, Christos},
  issn         = {1868-8969},
  location     = {Berkeley, CA, United States},
  pages        = {38:1--38--21},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Cumulative space in black-white pebbling and resolution}},
  doi          = {10.4230/LIPIcs.ITCS.2017.38},
  volume       = {67},
  year         = {2017},
}

@inproceedings{1176,
  abstract     = {The algorithm Argon2i-B of Biryukov, Dinu and Khovratovich is currently being considered by the IRTF (Internet Research Task Force) as a new de-facto standard for password hashing. An older version (Argon2i-A) of the same algorithm was chosen as the winner of the recent Password Hashing Competition. An important competitor to Argon2i-B is the recently introduced Balloon Hashing (BH) algorithm of Corrigan-Gibs, Boneh and Schechter. A key security desiderata for any such algorithm is that evaluating it (even using a custom device) requires a large amount of memory amortized across multiple instances. Alwen and Blocki (CRYPTO 2016) introduced a class of theoretical attacks against Argon2i-A and BH. While these attacks yield large asymptotic reductions in the amount of memory, it was not, a priori, clear if (1) they can be extended to the newer Argon2i-B, (2) the attacks are effective on any algorithm for practical parameter ranges (e.g., 1GB of memory) and (3) if they can be effectively instantiated against any algorithm under realistic hardware constrains. In this work we answer all three of these questions in the affirmative for all three algorithms. This is also the first work to analyze the security of Argon2i-B. In more detail, we extend the theoretical attacks of Alwen and Blocki (CRYPTO 2016) to the recent Argon2i-B proposal demonstrating severe asymptotic deficiencies in its security. Next we introduce several novel heuristics for improving the attack's concrete memory efficiency even when on-chip memory bandwidth is bounded. We then simulate our attacks on randomly sampled Argon2i-A, Argon2i-B and BH instances and measure the resulting memory consumption for various practical parameter ranges and for a variety of upperbounds on the amount of parallelism available to the attacker. Finally we describe, implement, and test a new heuristic for applying the Alwen-Blocki attack to functions employing a technique developed by Corrigan-Gibs et al. for improving concrete security of memory-hard functions. We analyze the collected data and show the effects various parameters have on the memory consumption of the attack. In particular, we can draw several interesting conclusions about the level of security provided by these functions. · For the Alwen-Blocki attack to fail against practical memory parameters, Argon2i-B must be instantiated with more than 10 passes on memory - beyond the "paranoid" parameter setting in the current IRTF proposal. · The technique of Corrigan-Gibs for improving security can also be overcome by the Alwen-Blocki attack under realistic hardware constraints. · On a positive note, both the asymptotic and concrete security of Argon2i-B seem to improve on that of Argon2i-A.},
  author       = {Alwen, Joel F and Blocki, Jeremiah},
  isbn         = {978-150905761-0},
  location     = {Paris, France},
  publisher    = {IEEE},
  title        = {{Towards practical attacks on Argon2i and balloon hashing}},
  doi          = {10.1109/EuroSP.2017.47},
  year         = {2017},
}

@article{1180,
  abstract     = {In this article we define an algebraic vertex of a generalized polyhedron and show that the set of algebraic vertices is the smallest set of points needed to define the polyhedron. We prove that the indicator function of a generalized polytope P is a linear combination of indicator functions of simplices whose vertices are algebraic vertices of P. We also show that the indicator function of any generalized polyhedron is a linear combination, with integer coefficients, of indicator functions of cones with apices at algebraic vertices and line-cones. The concept of an algebraic vertex is closely related to the Fourier–Laplace transform. We show that a point v is an algebraic vertex of a generalized polyhedron P if and only if the tangent cone of P, at v, has non-zero Fourier–Laplace transform.},
  author       = {Akopyan, Arseniy and Bárány, Imre and Robins, Sinai},
  issn         = {0001-8708},
  journal      = {Advances in Mathematics},
  pages        = {627 -- 644},
  publisher    = {Academic Press},
  title        = {{Algebraic vertices of non-convex polyhedra}},
  doi          = {10.1016/j.aim.2016.12.026},
  volume       = {308},
  year         = {2017},
}

@article{1187,
  abstract     = {We construct efficient authentication protocols and message authentication codes (MACs) whose security can be reduced to the learning parity with noise (LPN) problem. Despite a large body of work—starting with the (Formula presented.) protocol of Hopper and Blum in 2001—until now it was not even known how to construct an efficient authentication protocol from LPN which is secure against man-in-the-middle attacks. A MAC implies such a (two-round) protocol.},
  author       = {Kiltz, Eike and Pietrzak, Krzysztof Z and Venturi, Daniele and Cash, David and Jain, Abhishek},
  journal      = {Journal of Cryptology},
  number       = {4},
  pages        = {1238 -- 1275},
  publisher    = {Springer},
  title        = {{Efficient authentication from hard learning problems}},
  doi          = {10.1007/s00145-016-9247-3},
  volume       = {30},
  year         = {2017},
}

@article{1191,
  abstract     = {Variation in genotypes may be responsible for differences in dispersal rates, directional biases, and growth rates of individuals. These traits may favor certain genotypes and enhance their spatiotemporal spreading into areas occupied by the less advantageous genotypes. We study how these factors influence the speed of spreading in the case of two competing genotypes under the assumption that spatial variation of the total population is small compared to the spatial variation of the frequencies of the genotypes in the population. In that case, the dynamics of the frequency of one of the genotypes is approximately described by a generalized Fisher–Kolmogorov–Petrovskii–Piskunov (F–KPP) equation. This generalized F–KPP equation with (nonlinear) frequency-dependent diffusion and advection terms admits traveling wave solutions that characterize the invasion of the dominant genotype. Our existence results generalize the classical theory for traveling waves for the F–KPP with constant coefficients. Moreover, in the particular case of the quadratic (monostable) nonlinear growth–decay rate in the generalized F–KPP we study in detail the influence of the variance in diffusion and mean displacement rates of the two genotypes on the minimal wave propagation speed.},
  author       = {Kollár, Richard and Novak, Sebastian},
  journal      = {Bulletin of Mathematical Biology},
  number       = {3},
  pages        = {525--559},
  publisher    = {Springer},
  title        = {{Existence of traveling waves for the generalized F–KPP equation}},
  doi          = {10.1007/s11538-016-0244-3},
  volume       = {79},
  year         = {2017},
}

@inproceedings{1192,
  abstract     = {The main result of this paper is a generalization of the classical blossom algorithm for finding perfect matchings. Our algorithm can efficiently solve Boolean CSPs where each variable appears in exactly two constraints (we call it edge CSP) and all constraints are even Δ-matroid relations (represented by lists of tuples). As a consequence of this, we settle the complexity classification of planar Boolean CSPs started by Dvorak and Kupec. Knowing that edge CSP is tractable for even Δ-matroid constraints allows us to extend the tractability result to a larger class of Δ-matroids that includes many classes that were known to be tractable before, namely co-independent, compact, local and binary.},
  author       = {Kazda, Alexandr and Kolmogorov, Vladimir and Rolinek, Michal},
  isbn         = {978-161197478-2},
  location     = {Barcelona, Spain},
  pages        = {307 -- 326},
  publisher    = {SIAM},
  title        = {{Even delta-matroids and the complexity of planar Boolean CSPs}},
  doi          = {10.1137/1.9781611974782.20},
  year         = {2017},
}

@article{1196,
  abstract     = {We define the . model-measuring problem: given a model . M and specification . ϕ, what is the maximal distance . ρ such that all models . M' within distance . ρ from . M satisfy (or violate) . ϕ. The model-measuring problem presupposes a distance function on models. We concentrate on . automatic distance functions, which are defined by weighted automata. The model-measuring problem subsumes several generalizations of the classical model-checking problem, in particular, quantitative model-checking problems that measure the degree of satisfaction of a specification; robustness problems that measure how much a model can be perturbed without violating the specification; and parameter synthesis for hybrid systems. We show that for automatic distance functions, and (a) . ω-regular linear-time, (b) . ω-regular branching-time, and (c) hybrid specifications, the model-measuring problem can be solved.We use automata-theoretic model-checking methods for model measuring, replacing the emptiness question for word, tree, and hybrid automata by the . optimal-value question for the weighted versions of these automata. For automata over words and trees, we consider weighted automata that accumulate weights by maximizing, summing, discounting, and limit averaging. For hybrid automata, we consider monotonic (parametric) hybrid automata, a hybrid counterpart of (discrete) weighted automata.We give several examples of using the model-measuring problem to compute various notions of robustness and quantitative satisfaction for temporal specifications. Further, we propose the modeling framework for model measuring to ease the specification and reduce the likelihood of errors in modeling.Finally, we present a variant of the model-measuring problem, called the . model-repair problem. The model-repair problem applies to models that do not satisfy the specification; it can be used to derive restrictions, under which the model satisfies the specification, i.e., to repair the model.},
  author       = {Henzinger, Thomas A and Otop, Jan},
  journal      = {Nonlinear Analysis: Hybrid Systems},
  pages        = {166 -- 190},
  publisher    = {Elsevier},
  title        = {{Model measuring for discrete and hybrid systems}},
  doi          = {10.1016/j.nahs.2016.09.001},
  volume       = {23},
  year         = {2017},
}

@article{1199,
  abstract     = {Much of quantitative genetics is based on the ‘infinitesimal model’, under which selection has a negligible effect on the genetic variance. This is typically justified by assuming a very large number of loci with additive effects. However, it applies even when genes interact, provided that the number of loci is large enough that selection on each of them is weak relative to random drift. In the long term, directional selection will change allele frequencies, but even then, the effects of epistasis on the ultimate change in trait mean due to selection may be modest. Stabilising selection can maintain many traits close to their optima, even when the underlying alleles are weakly selected. However, the number of traits that can be optimised is apparently limited to ~4Ne by the ‘drift load’, and this is hard to reconcile with the apparent complexity of many organisms. Just as for the mutation load, this limit can be evaded by a particular form of negative epistasis. A more robust limit is set by the variance in reproductive success. This suggests that selection accumulates information most efficiently in the infinitesimal regime, when selection on individual alleles is weak, and comparable with random drift. A review of evidence on selection strength suggests that although most variance in fitness may be because of alleles with large Nes, substantial amounts of adaptation may be because of alleles in the infinitesimal regime, in which epistasis has modest effects.},
  author       = {Barton, Nicholas H},
  journal      = {Heredity},
  pages        = {96 -- 109},
  publisher    = {Nature Publishing Group},
  title        = {{How does epistasis influence the response to selection?}},
  doi          = {10.1038/hdy.2016.109},
  volume       = {118},
  year         = {2017},
}

@article{1207,
  abstract     = {The eigenvalue distribution of the sum of two large Hermitian matrices, when one of them is conjugated by a Haar distributed unitary matrix, is asymptotically given by the free convolution of their spectral distributions. We prove that this convergence also holds locally in the bulk of the spectrum, down to the optimal scales larger than the eigenvalue spacing. The corresponding eigenvectors are fully delocalized. Similar results hold for the sum of two real symmetric matrices, when one is conjugated by Haar orthogonal matrix.},
  author       = {Bao, Zhigang and Erdös, László and Schnelli, Kevin},
  issn         = {0010-3616},
  journal      = {Communications in Mathematical Physics},
  number       = {3},
  pages        = {947 -- 990},
  publisher    = {Springer},
  title        = {{Local law of addition of random matrices on optimal scale}},
  doi          = {10.1007/s00220-016-2805-6},
  volume       = {349},
  year         = {2017},
}

@article{1208,
  abstract     = {We study parameter estimation in linear Gaussian covariance models, which are p-dimensional Gaussian models with linear constraints on the covariance matrix. Maximum likelihood estimation for this class of models leads to a non-convex optimization problem which typically has many local maxima. Using recent results on the asymptotic distribution of extreme eigenvalues of the Wishart distribution, we provide sufficient conditions for any hill climbing method to converge to the global maximum. Although we are primarily interested in the case in which n≫p, the proofs of our results utilize large sample asymptotic theory under the scheme n/p→γ&gt;1. Remarkably, our numerical simulations indicate that our results remain valid for p as small as 2. An important consequence of this analysis is that, for sample sizes n≃14p, maximum likelihood estimation for linear Gaussian covariance models behaves as if it were a convex optimization problem. © 2016 The Royal Statistical Society and Blackwell Publishing Ltd.},
  author       = {Zwiernik, Piotr and Uhler, Caroline and Richards, Donald},
  issn         = {1369-7412},
  journal      = {Journal of the Royal Statistical Society. Series B: Statistical Methodology},
  number       = {4},
  pages        = {1269 -- 1292},
  publisher    = {Wiley-Blackwell},
  title        = {{Maximum likelihood estimation for linear Gaussian covariance models}},
  doi          = {10.1111/rssb.12217},
  volume       = {79},
  year         = {2017},
}

@article{1211,
  abstract     = {Systems such as fluid flows in channels and pipes or the complex Ginzburg–Landau system, defined over periodic domains, exhibit both continuous symmetries, translational and rotational, as well as discrete symmetries under spatial reflections or complex conjugation. The simplest, and very common symmetry of this type is the equivariance of the defining equations under the orthogonal group O(2). We formulate a novel symmetry reduction scheme for such systems by combining the method of slices with invariant polynomial methods, and show how it works by applying it to the Kuramoto–Sivashinsky system in one spatial dimension. As an example, we track a relative periodic orbit through a sequence of bifurcations to the onset of chaos. Within the symmetry-reduced state space we are able to compute and visualize the unstable manifolds of relative periodic orbits, their torus bifurcations, a transition to chaos via torus breakdown, and heteroclinic connections between various relative periodic orbits. It would be very hard to carry through such analysis in the full state space, without a symmetry reduction such as the one we present here.},
  author       = {Budanur, Nazmi B and Cvitanović, Predrag},
  journal      = {Journal of Statistical Physics},
  number       = {3-4},
  pages        = {636--655},
  publisher    = {Springer},
  title        = {{Unstable manifolds of relative periodic orbits in the symmetry reduced state space of the Kuramoto–Sivashinsky system}},
  doi          = {10.1007/s10955-016-1672-z},
  volume       = {167},
  year         = {2017},
}

@inbook{1213,
  abstract     = {Bacterial cytokinesis is commonly initiated by the Z-ring, a dynamic cytoskeletal structure that assembles at the site of division. Its primary component is FtsZ, a tubulin-like GTPase, that like its eukaryotic relative forms protein filaments in the presence of GTP. Since the discovery of the Z-ring 25 years ago, various models for the role of FtsZ have been suggested. However, important information about the architecture and dynamics of FtsZ filaments during cytokinesis is still missing. One reason for this lack of knowledge has been the small size of bacteria, which has made it difficult to resolve the orientation and dynamics of individual FtsZ filaments in the Z-ring. While superresolution microscopy experiments have helped to gain more information about the organization of the Z-ring in the dividing cell, they were not yet able to elucidate a mechanism of how FtsZ filaments reorganize during assembly and disassembly of the Z-ring. In this chapter, we explain how to use an in vitro reconstitution approach to investigate the self-organization of FtsZ filaments recruited to a biomimetic lipid bilayer by its membrane anchor FtsA. We show how to perform single-molecule experiments to study the behavior of individual FtsZ monomers during the constant reorganization of the FtsZ-FtsA filament network. We describe how to analyze the dynamics of single molecules and explain why this information can help to shed light onto possible mechanism of Z-ring constriction. We believe that similar experimental approaches will be useful to study the mechanism of membrane-based polymerization of other cytoskeletal systems, not only from prokaryotic but also eukaryotic origin.},
  author       = {Baranova, Natalia and Loose, Martin},
  booktitle    = {Cytokinesis},
  editor       = {Echard, Arnaud },
  issn         = {0091-679X},
  pages        = {355 -- 370},
  publisher    = {Academic Press},
  title        = {{Single-molecule measurements to study polymerization dynamics of FtsZ-FtsA copolymers}},
  doi          = {10.1016/bs.mcb.2016.03.036},
  volume       = {137},
  year         = {2017},
}

@inproceedings{274,
  abstract     = {We consider the problem of estimating the partition function Z(β)=∑xexp(−β(H(x)) of a Gibbs distribution with a Hamilton H(⋅), or more precisely the logarithm of the ratio q=lnZ(0)/Z(β). It has been recently shown how to approximate q with high probability assuming the existence of an oracle that produces samples from the Gibbs distribution for a given parameter value in [0,β]. The current best known approach due to Huber [9] uses O(qlnn⋅[lnq+lnlnn+ε−2]) oracle calls on average where ε is the desired accuracy of approximation and H(⋅) is assumed to lie in {0}∪[1,n]. We improve the complexity to O(qlnn⋅ε−2) oracle calls. We also show that the same complexity can be achieved if exact oracles are replaced with approximate sampling oracles that are within O(ε2qlnn) variation distance from exact oracles. Finally, we prove a lower bound of Ω(q⋅ε−2) oracle calls under a natural model of computation.},
  author       = {Kolmogorov, Vladimir},
  booktitle    = {Proceedings of the 31st Conference On Learning Theory},
  pages        = {228--249},
  publisher    = {ML Research Press},
  title        = {{A faster approximation algorithm for the Gibbs partition function}},
  volume       = {75},
  year         = {2017},
}

@inproceedings{313,
  abstract     = {Tunneling of a particle through a potential barrier remains one of the most remarkable quantum phenomena. Owing to advances in laser technology, electric fields comparable to those electrons experience in atoms are readily generated and open opportunities to dynamically investigate the process of electron tunneling through the potential barrier formed by the superposition of both laser and atomic fields. Attosecond-time and angstrom-space resolution of the strong laser-field technique allow to address fundamental questions related to tunneling, which are still open and debated: Which time is spent under the barrier and what momentum is picked up by the particle in the meantime? In this combined experimental and theoretical study we demonstrate that for strong-field ionization the leading quantum mechanical Wigner treatment for the time resolved description of tunneling is valid. We achieve a high sensitivity on the tunneling barrier and unambiguously isolate its effects by performing a differential study of two systems with almost identical tunneling geometry. Moreover, working with a low frequency laser, we essentially limit the non-adiabaticity of the process as a major source of uncertainty. The agreement between experiment and theory implies two substantial corrections with respect to the widely employed quasiclassical treatment: In addition to a non-vanishing longitudinal momentum along the laser field-direction we provide clear evidence for a non-zero tunneling time delay. This addresses also the fundamental question how the transition occurs from the tunnel barrier to free space classical evolution of the ejected electron.},
  author       = {Camus, Nicolas and Yakaboylu, Enderalp and Fechner, Lutz and Klaiber, Michael and Laux, Martin and Mi, Yonghao and Hatsagortsyan, Karen and Pfeifer, Thomas and Keitel, Cristoph and Moshammer, Robert},
  issn         = {1742-6588},
  location     = {Kazan, Russian Federation},
  number       = {1},
  publisher    = {American Physical Society},
  title        = {{Experimental evidence for Wigner's tunneling time}},
  doi          = {10.1088/1742-6596/999/1/012004},
  volume       = {999},
  year         = {2017},
}

@article{1338,
  abstract     = {We present a computer-aided programming approach to concurrency. The approach allows programmers to program assuming a friendly, non-preemptive scheduler, and our synthesis procedure inserts synchronization to ensure that the final program works even with a preemptive scheduler. The correctness specification is implicit, inferred from the non-preemptive behavior. Let us consider sequences of calls that the program makes to an external interface. The specification requires that any such sequence produced under a preemptive scheduler should be included in the set of sequences produced under a non-preemptive scheduler. We guarantee that our synthesis does not introduce deadlocks and that the synchronization inserted is optimal w.r.t. a given objective function. The solution is based on a finitary abstraction, an algorithm for bounded language inclusion modulo an independence relation, and generation of a set of global constraints over synchronization placements. Each model of the global constraints set corresponds to a correctness-ensuring synchronization placement. The placement that is optimal w.r.t. the given objective function is chosen as the synchronization solution. We apply the approach to device-driver programming, where the driver threads call the software interface of the device and the API provided by the operating system. Our experiments demonstrate that our synthesis method is precise and efficient. The implicit specification helped us find one concurrency bug previously missed when model-checking using an explicit, user-provided specification. We implemented objective functions for coarse-grained and fine-grained locking and observed that different synchronization placements are produced for our experiments, favoring a minimal number of synchronization operations or maximum concurrency, respectively.},
  author       = {Cerny, Pavol and Clarke, Edmund and Henzinger, Thomas A and Radhakrishna, Arjun and Ryzhyk, Leonid and Samanta, Roopsha and Tarrach, Thorsten},
  journal      = {Formal Methods in System Design},
  number       = {2-3},
  pages        = {97 -- 139},
  publisher    = {Springer},
  title        = {{From non-preemptive to preemptive scheduling using synchronization synthesis}},
  doi          = {10.1007/s10703-016-0256-5},
  volume       = {50},
  year         = {2017},
}

