@inproceedings{1175,
  abstract     = {We study space complexity and time-space trade-offs with a focus not on peak memory usage but on overall memory consumption throughout the computation.  Such a cumulative space measure was introduced for the computational model of parallel black pebbling by [Alwen and Serbinenko ’15] as a tool for obtaining results in cryptography. We consider instead the non- deterministic black-white pebble game and prove optimal cumulative space lower bounds and trade-offs, where in order to minimize pebbling time the space has to remain large during a significant fraction of the pebbling. We also initiate the study of cumulative space in proof complexity, an area where other space complexity measures have been extensively studied during the last 10–15 years. Using and extending the connection between proof complexity and pebble games in [Ben-Sasson and Nordström ’08, ’11] we obtain several strong cumulative space results for (even parallel versions of) the resolution proof system, and outline some possible future directions of study of this, in our opinion, natural and interesting space measure.},
  author       = {Alwen, Joel F and De Rezende, Susanna and Nordstrom, Jakob and Vinyals, Marc},
  editor       = {Papadimitriou, Christos},
  issn         = {1868-8969},
  location     = {Berkeley, CA, United States},
  pages        = {38:1--38--21},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Cumulative space in black-white pebbling and resolution}},
  doi          = {10.4230/LIPIcs.ITCS.2017.38},
  volume       = {67},
  year         = {2017},
}

@inproceedings{1176,
  abstract     = {The algorithm Argon2i-B of Biryukov, Dinu and Khovratovich is currently being considered by the IRTF (Internet Research Task Force) as a new de-facto standard for password hashing. An older version (Argon2i-A) of the same algorithm was chosen as the winner of the recent Password Hashing Competition. An important competitor to Argon2i-B is the recently introduced Balloon Hashing (BH) algorithm of Corrigan-Gibs, Boneh and Schechter. A key security desiderata for any such algorithm is that evaluating it (even using a custom device) requires a large amount of memory amortized across multiple instances. Alwen and Blocki (CRYPTO 2016) introduced a class of theoretical attacks against Argon2i-A and BH. While these attacks yield large asymptotic reductions in the amount of memory, it was not, a priori, clear if (1) they can be extended to the newer Argon2i-B, (2) the attacks are effective on any algorithm for practical parameter ranges (e.g., 1GB of memory) and (3) if they can be effectively instantiated against any algorithm under realistic hardware constrains. In this work we answer all three of these questions in the affirmative for all three algorithms. This is also the first work to analyze the security of Argon2i-B. In more detail, we extend the theoretical attacks of Alwen and Blocki (CRYPTO 2016) to the recent Argon2i-B proposal demonstrating severe asymptotic deficiencies in its security. Next we introduce several novel heuristics for improving the attack's concrete memory efficiency even when on-chip memory bandwidth is bounded. We then simulate our attacks on randomly sampled Argon2i-A, Argon2i-B and BH instances and measure the resulting memory consumption for various practical parameter ranges and for a variety of upperbounds on the amount of parallelism available to the attacker. Finally we describe, implement, and test a new heuristic for applying the Alwen-Blocki attack to functions employing a technique developed by Corrigan-Gibs et al. for improving concrete security of memory-hard functions. We analyze the collected data and show the effects various parameters have on the memory consumption of the attack. In particular, we can draw several interesting conclusions about the level of security provided by these functions. · For the Alwen-Blocki attack to fail against practical memory parameters, Argon2i-B must be instantiated with more than 10 passes on memory - beyond the "paranoid" parameter setting in the current IRTF proposal. · The technique of Corrigan-Gibs for improving security can also be overcome by the Alwen-Blocki attack under realistic hardware constraints. · On a positive note, both the asymptotic and concrete security of Argon2i-B seem to improve on that of Argon2i-A.},
  author       = {Alwen, Joel F and Blocki, Jeremiah},
  isbn         = {978-150905761-0},
  location     = {Paris, France},
  publisher    = {IEEE},
  title        = {{Towards practical attacks on Argon2i and balloon hashing}},
  doi          = {10.1109/EuroSP.2017.47},
  year         = {2017},
}

@inproceedings{11772,
  abstract     = {A dynamic graph algorithm is a data structure that supports operations on dynamically changing graphs.},
  author       = {Henzinger, Monika H},
  booktitle    = {44th International Conference on Current Trends in Theory and Practice of Computer Science},
  isbn         = {9783319731162},
  issn         = {0302-9743},
  location     = {Krems, Austria},
  pages        = {40–44},
  publisher    = {Springer Nature},
  title        = {{The state of the art in dynamic graph algorithms}},
  doi          = {10.1007/978-3-319-73117-9_3},
  volume       = {10706},
  year         = {2017},
}

@inproceedings{1178,
  abstract     = {For any pair (X, Z) of correlated random variables we can think of Z as a randomized function of X. If the domain of Z is small, one can make this function computationally efficient by allowing it to be only approximately correct. In folklore this problem is known as simulating auxiliary inputs. This idea of simulating auxiliary information turns out to be a very usefull tool, finding applications in complexity theory, cryptography, pseudorandomness and zero-knowledge. In this paper we revisit this problem, achieving the following results: (a) We present a novel boosting algorithm for constructing the simulator. This boosting proof is of independent interest, as it shows how to handle “negative mass” issues when constructing probability measures by shifting distinguishers in descent algorithms. Our technique essentially fixes the flaw in the TCC’14 paper “How to Fake Auxiliary Inputs”. (b) The complexity of our simulator is better than in previous works, including results derived from the uniform min-max theorem due to Vadhan and Zheng. To achieve (s,ϵ) -indistinguishability we need the complexity O(s⋅25ℓϵ−2) in time/circuit size, which improve previous bounds by a factor of ϵ−2. In particular, with we get meaningful provable security for the EUROCRYPT’09 leakage-resilient stream cipher instantiated with a standard 256-bit block cipher, like },
  author       = {Skórski, Maciej},
  pages        = {159 -- 179},
  publisher    = {Springer},
  title        = {{Simulating auxiliary inputs, revisited}},
  doi          = {10.1007/978-3-662-53641-4_7},
  volume       = {9985},
  year         = {2017},
}

@article{1180,
  abstract     = {In this article we define an algebraic vertex of a generalized polyhedron and show that the set of algebraic vertices is the smallest set of points needed to define the polyhedron. We prove that the indicator function of a generalized polytope P is a linear combination of indicator functions of simplices whose vertices are algebraic vertices of P. We also show that the indicator function of any generalized polyhedron is a linear combination, with integer coefficients, of indicator functions of cones with apices at algebraic vertices and line-cones. The concept of an algebraic vertex is closely related to the Fourier–Laplace transform. We show that a point v is an algebraic vertex of a generalized polyhedron P if and only if the tangent cone of P, at v, has non-zero Fourier–Laplace transform.},
  author       = {Akopyan, Arseniy and Bárány, Imre and Robins, Sinai},
  issn         = {0001-8708},
  journal      = {Advances in Mathematics},
  pages        = {627 -- 644},
  publisher    = {Academic Press},
  title        = {{Algebraic vertices of non-convex polyhedra}},
  doi          = {10.1016/j.aim.2016.12.026},
  volume       = {308},
  year         = {2017},
}

@inproceedings{11829,
  abstract     = {In recent years it has become popular to study dynamic problems in a sensitivity setting: Instead of allowing for an arbitrary sequence of updates, the sensitivity model only allows to apply batch updates of small size to the original input data. The sensitivity model is particularly appealing since recent strong conditional lower bounds ruled out fast algorithms for many dynamic problems, such as shortest paths, reachability, or subgraph connectivity.

In this paper we prove conditional lower bounds for these and additional problems in a sensitivity setting. For example, we show that under the Boolean Matrix Multiplication (BMM) conjecture combinatorial algorithms cannot compute the (4/3-\varepsilon)-approximate diameter of an undirected unweighted dense graph with truly subcubic preprocessing time and truly subquadratic update/query time. This result is surprising since in the static setting it is not clear whether a reduction from BMM to diameter is possible. We further show under the BMM conjecture that many problems, such as reachability or approximate shortest paths, cannot be solved faster than by recomputation from scratch even after only one or two edge insertions. We extend our reduction from BMM to Diameter to give a reduction from All Pairs Shortest Paths to Diameter under one deletion in weighted graphs. This is intriguing, as in the static setting it is a big open problem whether Diameter is as hard as APSP. We further get a nearly tight lower bound for shortest paths after two edge deletions based on the APSP conjecture. We give more lower bounds under the Strong Exponential Time Hypothesis. Many of our lower bounds also hold for static oracle data structures where no sensitivity is required.

Finally, we give the first algorithm for the (1+\varepsilon)-approximate radius, diameter, and eccentricity problems in directed or undirected unweighted graphs in case of single edges failures. The algorithm has a truly subcubic running time for graphs with a truly subquadratic number of edges; it is tight w.r.t. the conditional lower bounds we obtain.},
  author       = {Henzinger, Monika H and Lincoln, Andrea and Neumann, Stefan and Vassilevska Williams, Virginia},
  booktitle    = {8th Innovations in Theoretical Computer Science Conference},
  isbn         = {9783959770293},
  issn         = {1868-8969},
  location     = {Berkley, CA, United States},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Conditional hardness for sensitivity problems}},
  doi          = {10.4230/LIPICS.ITCS.2017.26},
  volume       = {67},
  year         = {2017},
}

@inproceedings{11831,
  abstract     = {Graph Sparsification aims at compressing large graphs into smaller ones while (approximately) preserving important characteristics of the input graph. In this work we study Vertex Sparsifiers, i.e., sparsifiers whose goal is to reduce the number of vertices. Given a weighted graph G=(V,E), and a terminal set K with |K|=k, a quality-q vertex cut sparsifier of G is a graph H with K contained in V_H that preserves the value of minimum cuts separating any bipartition of K, up to a factor of q. We show that planar graphs with all the k terminals lying on the same face admit quality-1 vertex cut sparsifier of size O(k^2) that are also planar. Our result extends to vertex flow and distance sparsifiers. It improves the previous best known bound of O(k^2 2^(2k)) for cut and flow sparsifiers by an exponential factor, and matches an Omega(k^2) lower-bound for this class of graphs.

We also study vertex reachability sparsifiers for directed graphs. Given a digraph G=(V,E) and a terminal set K, a vertex reachability sparsifier of G is a digraph H=(V_H,E_H), K contained in V_H that preserves all reachability information among terminal pairs. We introduce the notion of reachability-preserving minors, i.e., we require H to be a minor of G. Among others, for general planar digraphs, we construct reachability-preserving minors of size O(k^2 log^2 k). We complement our upper-bound by showing that there exists an infinite family of acyclic planar digraphs such that any reachability-preserving minor must have Omega(k^2) vertices.},
  author       = {Goranci, Gramoz and Henzinger, Monika H and Peng, Pan},
  booktitle    = {25th Annual European Symposium on Algorithms},
  isbn         = {978-3-95977-049-1},
  issn         = {1868-8969},
  location     = {Vienna, Austria},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Improved guarantees for vertex sparsification in planar graphs}},
  doi          = {10.4230/LIPICS.ESA.2017.44},
  volume       = {87},
  year         = {2017},
}

@inproceedings{11832,
  abstract     = {In this paper, we study the problem of opening centers to cluster a set of clients in a metric space so as to minimize the sum of the costs of the centers and of the cluster radii, in a dynamic environment where clients arrive and depart, and the solution must be updated efficiently while remaining competitive with respect to the current optimal solution. We call this dynamic sum-of-radii clustering problem.

We present a data structure that maintains a solution whose cost is within a constant factor of the cost of an optimal solution in metric spaces with bounded doubling dimension and whose worst-case update time is logarithmic in the parameters of the problem.},
  author       = {Henzinger, Monika H and Leniowski, Dariusz and Mathieu, Claire},
  booktitle    = {25th Annual European Symposium on Algorithms},
  isbn         = {978-3-95977-049-1},
  issn         = {1868-8969},
  location     = {Vienna, Austria},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Dynamic clustering to minimize the sum of radii}},
  doi          = {10.4230/LIPICS.ESA.2017.48},
  volume       = {87},
  year         = {2017},
}

@inproceedings{11833,
  abstract     = {We introduce a new algorithmic framework for designing dynamic graph algorithms in minor-free graphs, by exploiting the structure of such graphs and a tool called vertex sparsification, which is a way to compress large graphs into small ones that well preserve relevant properties among a subset of vertices and has previously mainly been used in the design of approximation algorithms.

Using this framework, we obtain a Monte Carlo randomized fully dynamic algorithm for (1 + epsilon)-approximating the energy of electrical flows in n-vertex planar graphs with tilde{O}(r epsilon^{-2}) worst-case update time and tilde{O}((r + n / sqrt{r}) epsilon^{-2}) worst-case query time, for any r larger than some constant. For r=n^{2/3}, this gives tilde{O}(n^{2/3} epsilon^{-2}) update time and tilde{O}(n^{2/3} epsilon^{-2}) query time. We also extend this algorithm to work for minor-free graphs with similar approximation and running time guarantees. Furthermore, we illustrate our framework on the all-pairs max flow and shortest path problems by giving corresponding dynamic algorithms in minor-free graphs with both sublinear update and query times. To the best of our knowledge, our results are the first to systematically establish such a connection between dynamic graph algorithms and vertex sparsification.

We also present both upper bound and lower bound for maintaining the energy of electrical flows in the incremental subgraph model, where updates consist of only vertex activations, which might be of independent interest.},
  author       = {Goranci, Gramoz and Henzinger, Monika H and Peng, Pan},
  booktitle    = {25th Annual European Symposium on Algorithms},
  isbn         = {978-3-95977-049-1},
  issn         = {1868-8969},
  location     = {Vienna, Austria},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{The power of vertex sparsifiers in dynamic graph algorithms}},
  doi          = {10.4230/LIPICS.ESA.2017.45},
  volume       = {87},
  year         = {2017},
}

@article{1187,
  abstract     = {We construct efficient authentication protocols and message authentication codes (MACs) whose security can be reduced to the learning parity with noise (LPN) problem. Despite a large body of work—starting with the (Formula presented.) protocol of Hopper and Blum in 2001—until now it was not even known how to construct an efficient authentication protocol from LPN which is secure against man-in-the-middle attacks. A MAC implies such a (two-round) protocol.},
  author       = {Kiltz, Eike and Pietrzak, Krzysztof Z and Venturi, Daniele and Cash, David and Jain, Abhishek},
  journal      = {Journal of Cryptology},
  number       = {4},
  pages        = {1238 -- 1275},
  publisher    = {Springer},
  title        = {{Efficient authentication from hard learning problems}},
  doi          = {10.1007/s00145-016-9247-3},
  volume       = {30},
  year         = {2017},
}

@inproceedings{11873,
  abstract     = {We study the problem of computing a minimum cut in a simple, undirected graph and give a deterministic O(m log2 n log log2 n) time algorithm. This improves both on the best previously known deterministic running time of O(m log12 n) (Kawarabayashi and Thorup [12]) and the best previously known randomized running time of O(mlog3n) (Karger [11]) for this problem, though Karger's algorithm can be further applied to weighted graphs.

Our approach is using the Kawarabayashi and Tho- rup graph compression technique, which repeatedly finds low-conductance cuts. To find these cuts they use a diffusion-based local algorithm. We use instead a flow- based local algorithm and suitably adjust their framework to work with our flow-based subroutine. Both flow and diffusion based methods have a long history of being applied to finding low conductance cuts. Diffusion algorithms have several variants that are naturally local while it is more complicated to make flow methods local. Some prior work has proven nice properties for local flow based algorithms with respect to improving or cleaning up low conductance cuts. Our flow subroutine, however, is the first that is both local and produces low conductance cuts. Thus, it may be of independent interest.},
  author       = {Henzinger, Monika H and Rao, Satish and Wang, Di},
  booktitle    = {28th Annual ACM-SIAM Symposium on Discrete Algorithms},
  location     = {Barcelona, Spain},
  pages        = {1919--1938},
  publisher    = {Society for Industrial and Applied Mathematics},
  title        = {{Local flow partitioning for faster edge connectivity}},
  doi          = {10.1137/1.9781611974782.125},
  year         = {2017},
}

@inproceedings{11874,
  abstract     = {We consider the problem of maintaining an approximately maximum (fractional) matching and an approximately minimum vertex cover in a dynamic graph. Starting with the seminal paper by Onak and Rubinfeld [STOC 2010], this problem has received significant attention in recent years. There remains, however, a polynomial gap between the best known worst case update time and the best known amortised update time for this problem, even after allowing for randomisation. Specifically, Bernstein and Stein [ICALP 2015, SODA 2016] have the best known worst case update time. They present a deterministic data structure with approximation ratio (3/2 + ∊) and worst case update time O(m1/4/ ∊2), where m is the number of edges in the graph. In recent past, Gupta and Peng [FOCS 2013] gave a deterministic data structure with approximation ratio (1+ ∊) and worst case update time  No known randomised data structure beats the worst case update times of these two results. In contrast, the paper by Onak and Rubinfeld [STOC 2010] gave a randomised data structure with approximation ratio O(1) and amortised update time O(log2 n), where n is the number of nodes in the graph. This was later improved by Baswana, Gupta and Sen [FOCS 2011] and Solomon [FOCS 2016], leading to a randomised date structure with approximation ratio 2 and amortised update time O(1).

We bridge the polynomial gap between the worst case and amortised update times for this problem, without using any randomisation. We present a deterministic data structure with approximation ratio (2 + ∊) and worst case update time O(log3 n), for all sufficiently small constants ∊.},
  author       = {Bhattacharya, Sayan and Henzinger, Monika H and Nanongkai, Danupon},
  booktitle    = {28th Annual ACM-SIAM Symposium on Discrete Algorithms},
  location     = {Barcelona, Spain},
  pages        = {470 -- 489},
  publisher    = {Society for Industrial and Applied Mathematics},
  title        = {{Fully dynamic approximate maximum matching and minimum vertex cover in o(log3 n) worst case update time}},
  doi          = {10.1137/1.9781611974782.30},
  year         = {2017},
}

@article{11903,
  abstract     = {Online social networks allow the collection of large amounts of data about the influence between users connected by a friendship-like relationship. When distributing items among agents forming a social network, this information allows us to exploit network externalities that each agent receives from his neighbors that get the same item. In this paper we consider Friends-of-Friends (2-hop) network externalities, i.e., externalities that not only depend on the neighbors that get the same item but also on neighbors of neighbors. For these externalities we study a setting where multiple different items are assigned to unit-demand agents. Specifically, we study the problem of welfare maximization under different types of externality functions. Let n be the number of agents and m be the number of items. Our contributions are the following: (1) We show that welfare maximization is APX-hard; we show that even for step functions with 2-hop (and also with 1-hop) externalities it is NP-hard to approximate social welfare better than (1−1/e). (2) On the positive side we present (i) an 𝑂(𝑛√)-approximation algorithm for general concave externality functions, (ii) an O(log m)-approximation algorithm for linear externality functions, and (iii) a 518(1−1/𝑒)-approximation algorithm for 2-hop step function externalities. We also improve the result from [7] for 1-hop step function externalities by giving a 12(1−1/𝑒)-approximation algorithm.},
  author       = {Bhattacharya, Sayan and Dvořák, Wolfgang and Henzinger, Monika H and Starnberger, Martin},
  issn         = {1433-0490},
  journal      = {Theory of Computing Systems},
  number       = {4},
  pages        = {948--986},
  publisher    = {Springer Nature},
  title        = {{Welfare maximization with friends-of-friends network externalities}},
  doi          = {10.1007/s00224-017-9759-8},
  volume       = {61},
  year         = {2017},
}

@article{1191,
  abstract     = {Variation in genotypes may be responsible for differences in dispersal rates, directional biases, and growth rates of individuals. These traits may favor certain genotypes and enhance their spatiotemporal spreading into areas occupied by the less advantageous genotypes. We study how these factors influence the speed of spreading in the case of two competing genotypes under the assumption that spatial variation of the total population is small compared to the spatial variation of the frequencies of the genotypes in the population. In that case, the dynamics of the frequency of one of the genotypes is approximately described by a generalized Fisher–Kolmogorov–Petrovskii–Piskunov (F–KPP) equation. This generalized F–KPP equation with (nonlinear) frequency-dependent diffusion and advection terms admits traveling wave solutions that characterize the invasion of the dominant genotype. Our existence results generalize the classical theory for traveling waves for the F–KPP with constant coefficients. Moreover, in the particular case of the quadratic (monostable) nonlinear growth–decay rate in the generalized F–KPP we study in detail the influence of the variance in diffusion and mean displacement rates of the two genotypes on the minimal wave propagation speed.},
  author       = {Kollár, Richard and Novak, Sebastian},
  journal      = {Bulletin of Mathematical Biology},
  number       = {3},
  pages        = {525--559},
  publisher    = {Springer},
  title        = {{Existence of traveling waves for the generalized F–KPP equation}},
  doi          = {10.1007/s11538-016-0244-3},
  volume       = {79},
  year         = {2017},
}

@inproceedings{1192,
  abstract     = {The main result of this paper is a generalization of the classical blossom algorithm for finding perfect matchings. Our algorithm can efficiently solve Boolean CSPs where each variable appears in exactly two constraints (we call it edge CSP) and all constraints are even Δ-matroid relations (represented by lists of tuples). As a consequence of this, we settle the complexity classification of planar Boolean CSPs started by Dvorak and Kupec. Knowing that edge CSP is tractable for even Δ-matroid constraints allows us to extend the tractability result to a larger class of Δ-matroids that includes many classes that were known to be tractable before, namely co-independent, compact, local and binary.},
  author       = {Kazda, Alexandr and Kolmogorov, Vladimir and Rolinek, Michal},
  isbn         = {978-161197478-2},
  location     = {Barcelona, Spain},
  pages        = {307 -- 326},
  publisher    = {SIAM},
  title        = {{Even delta-matroids and the complexity of planar Boolean CSPs}},
  doi          = {10.1137/1.9781611974782.20},
  year         = {2017},
}

@article{1196,
  abstract     = {We define the . model-measuring problem: given a model . M and specification . ϕ, what is the maximal distance . ρ such that all models . M' within distance . ρ from . M satisfy (or violate) . ϕ. The model-measuring problem presupposes a distance function on models. We concentrate on . automatic distance functions, which are defined by weighted automata. The model-measuring problem subsumes several generalizations of the classical model-checking problem, in particular, quantitative model-checking problems that measure the degree of satisfaction of a specification; robustness problems that measure how much a model can be perturbed without violating the specification; and parameter synthesis for hybrid systems. We show that for automatic distance functions, and (a) . ω-regular linear-time, (b) . ω-regular branching-time, and (c) hybrid specifications, the model-measuring problem can be solved.We use automata-theoretic model-checking methods for model measuring, replacing the emptiness question for word, tree, and hybrid automata by the . optimal-value question for the weighted versions of these automata. For automata over words and trees, we consider weighted automata that accumulate weights by maximizing, summing, discounting, and limit averaging. For hybrid automata, we consider monotonic (parametric) hybrid automata, a hybrid counterpart of (discrete) weighted automata.We give several examples of using the model-measuring problem to compute various notions of robustness and quantitative satisfaction for temporal specifications. Further, we propose the modeling framework for model measuring to ease the specification and reduce the likelihood of errors in modeling.Finally, we present a variant of the model-measuring problem, called the . model-repair problem. The model-repair problem applies to models that do not satisfy the specification; it can be used to derive restrictions, under which the model satisfies the specification, i.e., to repair the model.},
  author       = {Henzinger, Thomas A and Otop, Jan},
  journal      = {Nonlinear Analysis: Hybrid Systems},
  pages        = {166 -- 190},
  publisher    = {Elsevier},
  title        = {{Model measuring for discrete and hybrid systems}},
  doi          = {10.1016/j.nahs.2016.09.001},
  volume       = {23},
  year         = {2017},
}

@article{11961,
  abstract     = {Flow chemistry involves the use of channels or tubing to conduct a reaction in a continuous stream rather than in a flask. Flow equipment provides chemists with unique control over reaction parameters enhancing reactivity or in some cases enabling new reactions. This relatively young technology has received a remarkable amount of attention in the past decade with many reports on what can be done in flow. Until recently, however, the question, “Should we do this in flow?” has merely been an afterthought. This review introduces readers to the basic principles and fundamentals of flow chemistry and critically discusses recent flow chemistry accounts.},
  author       = {Plutschack, Matthew B. and Pieber, Bartholomäus and Gilmore, Kerry and Seeberger, Peter H.},
  issn         = {1520-6890},
  journal      = {Chemical Reviews},
  number       = {18},
  pages        = {11796--11893},
  publisher    = {American Chemical Society},
  title        = {{The Hitchhiker’s Guide to flow chemistry}},
  doi          = {10.1021/acs.chemrev.7b00183},
  volume       = {117},
  year         = {2017},
}

@article{11976,
  abstract     = {The way organic multistep synthesis is performed is changing due to the adoption of flow chemical techniques, which has enabled the development of improved methods to make complex molecules. The modular nature of the technique provides not only access to target molecules via linear flow approaches but also for the targeting of structural cores with single systems. This perspective article summarizes the state of the art of continuous multistep synthesis and discusses the main challenges and opportunities in this area.},
  author       = {Pieber, Bartholomäus and Gilmore, Kerry and Seeberger, Peter H.},
  issn         = {2063-0212},
  journal      = {Journal of Flow Chemistry},
  number       = {3-4},
  pages        = {129--136},
  publisher    = {AKJournals},
  title        = {{Integrated flow processing - challenges in continuous multistep synthesis}},
  doi          = {10.1556/1846.2017.00016},
  volume       = {7},
  year         = {2017},
}

@article{1199,
  abstract     = {Much of quantitative genetics is based on the ‘infinitesimal model’, under which selection has a negligible effect on the genetic variance. This is typically justified by assuming a very large number of loci with additive effects. However, it applies even when genes interact, provided that the number of loci is large enough that selection on each of them is weak relative to random drift. In the long term, directional selection will change allele frequencies, but even then, the effects of epistasis on the ultimate change in trait mean due to selection may be modest. Stabilising selection can maintain many traits close to their optima, even when the underlying alleles are weakly selected. However, the number of traits that can be optimised is apparently limited to ~4Ne by the ‘drift load’, and this is hard to reconcile with the apparent complexity of many organisms. Just as for the mutation load, this limit can be evaded by a particular form of negative epistasis. A more robust limit is set by the variance in reproductive success. This suggests that selection accumulates information most efficiently in the infinitesimal regime, when selection on individual alleles is weak, and comparable with random drift. A review of evidence on selection strength suggests that although most variance in fitness may be because of alleles with large Nes, substantial amounts of adaptation may be because of alleles in the infinitesimal regime, in which epistasis has modest effects.},
  author       = {Barton, Nicholas H},
  journal      = {Heredity},
  pages        = {96 -- 109},
  publisher    = {Nature Publishing Group},
  title        = {{How does epistasis influence the response to selection?}},
  doi          = {10.1038/hdy.2016.109},
  volume       = {118},
  year         = {2017},
}

@article{1207,
  abstract     = {The eigenvalue distribution of the sum of two large Hermitian matrices, when one of them is conjugated by a Haar distributed unitary matrix, is asymptotically given by the free convolution of their spectral distributions. We prove that this convergence also holds locally in the bulk of the spectrum, down to the optimal scales larger than the eigenvalue spacing. The corresponding eigenvectors are fully delocalized. Similar results hold for the sum of two real symmetric matrices, when one is conjugated by Haar orthogonal matrix.},
  author       = {Bao, Zhigang and Erdös, László and Schnelli, Kevin},
  issn         = {0010-3616},
  journal      = {Communications in Mathematical Physics},
  number       = {3},
  pages        = {947 -- 990},
  publisher    = {Springer},
  title        = {{Local law of addition of random matrices on optimal scale}},
  doi          = {10.1007/s00220-016-2805-6},
  volume       = {349},
  year         = {2017},
}

