@article{1156,
  abstract     = {Let k, n, and r be positive integers with k &lt; n and r≤⌊nk⌋. We determine the facets of the r-stable n, k-hypersimplex. As a result, it turns out that the r-stable n, k-hypersimplex has exactly 2n facets for every r&lt;⌊nk⌋. We then utilize the equations of the facets to study when the r-stable hypersimplex is Gorenstein. For every k &gt; 0 we identify an infinite collection of Gorenstein r-stable hypersimplices, consequently expanding the collection of r-stable hypersimplices known to have unimodal Ehrhart δ-vectors.},
  author       = {Hibi, Takayugi and Liam Solus},
  journal      = {Annals of Combinatorics},
  number       = {4},
  pages        = {815 -- 829},
  publisher    = {Springer},
  title        = {{Facets of the r-stable (n, k)-hypersimplex}},
  doi          = {10.1007/s00026-016-0325-x},
  volume       = {20},
  year         = {2016},
}

@article{1157,
  abstract     = {We consider sample covariance matrices of the form Q = ( σ1/2X)(σ1/2X)∗, where the sample X is an M ×N random matrix whose entries are real independent random variables with variance 1/N and whereσ is an M × M positive-definite deterministic matrix. We analyze the asymptotic fluctuations of the largest rescaled eigenvalue of Q when both M and N tend to infinity with N/M →d ϵ (0,∞). For a large class of populations σ in the sub-critical regime, we show that the distribution of the largest rescaled eigenvalue of Q is given by the type-1 Tracy-Widom distribution under the additional assumptions that (1) either the entries of X are i.i.d. Gaussians or (2) that σ is diagonal and that the entries of X have a sub-exponential decay.},
  author       = {Lee, Ji and Schnelli, Kevin},
  journal      = {Annals of Applied Probability},
  number       = {6},
  pages        = {3786 -- 3839},
  publisher    = {Institute of Mathematical Statistics},
  title        = {{Tracy-widom distribution for the largest eigenvalue of real sample covariance matrices with general population}},
  doi          = {10.1214/16-AAP1193},
  volume       = {26},
  year         = {2016},
}

@article{11574,
  abstract     = {We present new results from the widest narrow-band survey search for Lyα emitters at z = 5.7, just after reionization. We survey a total of 7 deg2 spread over the COSMOS, UDS and SA22 fields. We find over 11 000 line emitters, out of which 514 are robust Lyα candidates at z = 5.7 within a volume of 6.3 × 106 Mpc3. Our Lyα emitters span a wide range in Lyα luminosities, from faint to bright (LLyα ∼ 1042.5–44 erg s−1) and rest-frame equivalent widths (EW0 ∼ 25–1000 Å) in a single, homogeneous data set. By combining all our fields, we find that the faint end slope of the z = 5.7 Lyα luminosity function is very steep, with α=−2.3+0.4−0.3⁠. We also present an updated z = 6.6 Lyα luminosity function, based on comparable volumes and obtained with the same methods, which we directly compare with that at z = 5.7. We find a significant decline of the number density of faint Lyα emitters from z = 5.7 to 6.6 (by 0.5 ± 0.1 dex), but no evolution at the bright end/no evolution in L*. Faint Lyα emitters at z = 6.6 show much more extended haloes than those at z = 5.7, suggesting that neutral Hydrogen plays an important role, increasing the scattering and leading to observations missing faint Lyα emission within the epoch of reionization. Altogether, our results suggest that we are observing patchy reionization which happens first around the brightest Lyα emitters, allowing the number densities of those sources to remain unaffected by the increase of neutral Hydrogen fraction from z ∼ 5 to 7.},
  author       = {Santos, Sérgio and Sobral, David and Matthee, Jorryt J},
  issn         = {1365-2966},
  journal      = {Monthly Notices of the Royal Astronomical Society},
  keywords     = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: high-redshift, galaxies: luminosity function, mass function, cosmology: observations, dark ages, reionization, first stars},
  number       = {2},
  pages        = {1678--1691},
  publisher    = {Oxford University Press},
  title        = {{The Lyα luminosity function at z= 5.7–6.6 and the steep drop of the faint end: Implications for reionization}},
  doi          = {10.1093/mnras/stw2076},
  volume       = {463},
  year         = {2016},
}

@article{11575,
  abstract     = {We investigate correlations between different physical properties of star-forming galaxies in the ‘Evolution and Assembly of GaLaxies and their Environments’ (EAGLE) cosmological hydrodynamical simulation suite over the redshift range 0 ≤ z ≤ 4.5. A principal component analysis reveals that neutral gas fraction (fgas,neutral), stellar mass (Mstellar) and star formation rate (SFR) account for most of the variance seen in the population, with galaxies tracing a two-dimensional, nearly flat, surface in the three-dimensional space of fgas, neutral–Mstellar–SFR with little scatter. The location of this plane varies little with redshift, whereas galaxies themselves move along the plane as their fgas, neutral and SFR drop with redshift. The positions of galaxies along the plane are highly correlated with gas metallicity. The metallicity can therefore be robustly predicted from fgas, neutral, or from the Mstellar and SFR. We argue that the appearance of this ‘Fundamental Plane of star formation’ is a consequence of self-regulation, with the plane's curvature set by the dependence of the SFR on gas density and metallicity. We analyse a large compilation of observations spanning the redshift range 0 ≲ z ≲ 3, and find that such a plane is also present in the data. The properties of the observed Fundamental Plane of star formation are in good agreement with EAGLE's predictions.},
  author       = {Lagos, Claudia del P. and Theuns, Tom and Schaye, Joop and Furlong, Michelle and Bower, Richard G. and Schaller, Matthieu and Crain, Robert A. and Trayford, James W. and Matthee, Jorryt J},
  issn         = {1365-2966},
  journal      = {Monthly Notices of the Royal Astronomical Society},
  keywords     = {Space and Planetary Science, Astronomy and Astrophysics  stars: formation, ISM: evolution, galaxies: evolution, galaxies: formation, galaxies: ISM},
  number       = {3},
  pages        = {2632--2650},
  publisher    = {Oxford University Press},
  title        = {{The Fundamental Plane of star formation in galaxies revealed by the EAGLE hydrodynamical simulations}},
  doi          = {10.1093/mnras/stw717},
  volume       = {459},
  year         = {2016},
}

@article{11576,
  abstract     = {We use new near-infrared spectroscopic observations to investigate the nature and evolution of the most luminous Hα emitters at z ∼ 0.8–2.23, which evolve strongly in number density over this period, and compare them to more typical Hα emitters. We study 59 luminous Hα emitters with LHα > L∗Hα⁠, roughly equally split per redshift slice at z ∼ 0.8, 1.47 and 2.23 from the HiZELS and CF-HiZELS surveys. We find that, overall, 30 ± 8 per cent are active galactic nuclei [AGNs; 80 ± 30 per cent of these AGNs are broad-line AGNs, BL-AGNs], and we find little to no evolution in the AGN fraction with redshift, within the errors. However, the AGN fraction increases strongly with Hα luminosity and correlates best with LHα/L∗Hα(z)⁠. While LHα ≤ L∗Hα(z) Hα emitters are largely dominated by star-forming galaxies (>80 per cent), the most luminous Hα emitters (⁠LHα>10L∗Hα(z)⁠) at any cosmic time are essentially all BL-AGN. Using our AGN-decontaminated sample of luminous star-forming galaxies, and integrating down to a fixed Hα luminosity, we find a factor of ∼1300 evolution in the star formation rate density from z = 0 to 2.23. This is much stronger than the evolution from typical Hα star-forming galaxies and in line with the evolution seen for constant luminosity cuts used to select ‘ultraluminous’ infrared galaxies and/or sub-millimetre galaxies. By taking into account the evolution in the typical Hα luminosity, we show that the most strongly star-forming Hα-selected galaxies at any epoch (⁠LHα>L∗Hα(z)⁠) contribute the same fractional amount of ≈15 per cent to the total star formation rate density, at least up to z = 2.23.},
  author       = {Sobral, David and Kohn, Saul A. and Best, Philip N. and Smail, Ian and Harrison, Chris M. and Stott, John and Calhau, João and Matthee, Jorryt J},
  issn         = {1365-2966},
  journal      = {Monthly Notices of the Royal Astronomical Society},
  keywords     = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: evolution, galaxies: high-redshift, cosmology: observations},
  number       = {2},
  pages        = {1739--1752},
  publisher    = {Oxford University Press},
  title        = {{The most luminous H α emitters at z ∼ 0.8–2.23 from HiZELS: Evolution of AGN and star-forming galaxies}},
  doi          = {10.1093/mnras/stw022},
  volume       = {457},
  year         = {2016},
}

@article{11578,
  abstract     = {We present the first results from our CAlibrating LYMan α with Hα (CALYMHA) pilot survey at the Isaac Newton Telescope. We measure Lyα emission for 488 Hα selected galaxies at z = 2.23 from High-z Emission Line Survey in the COSMOS and UDS fields with a specially designed narrow-band filter (λc = 3918 Å, Δλ = 52 Å). We find 17 dual Hα-Lyα emitters [fLyα > 5 × 10−17 erg s−1 cm−2, of which five are X-ray active galactic nuclei (AGN)]. For star-forming galaxies, we find a range of Lyα escape fractions (fesc, measured with 3 arcsec apertures) from 2 to 30 per cent. These galaxies have masses from 3 × 108 M⊙ to 1011 M⊙ and dust attenuations E(B − V) = 0–0.5. Using stacking, we measure a median escape fraction of 1.6 ± 0.5 per cent (4.0 ± 1.0 per cent without correcting Hα for dust), but show that this depends on galaxy properties. The stacked fesc tends to decrease with increasing star formation rate and dust attenuation. However, at the highest masses and dust attenuations, we detect individual galaxies with fesc much higher than the typical values from stacking, indicating significant scatter in the values of fesc. Relations between fesc and UV slope are bimodal, with high fesc for either the bluest or reddest galaxies. We speculate that this bimodality and large scatter in the values of fesc is due to additional physical mechanisms such as outflows facilitating fesc for dusty/massive systems. Lyα is significantly more extended than Hα and the UV. fesc continues to increase up to at least 20 kpc (3σ, 40 kpc [2σ]) for typical star-forming galaxies and thus the aperture is the most important predictor of fesc.},
  author       = {Matthee, Jorryt J and Sobral, David and Oteo, Iván and Best, Philip and Smail, Ian and Röttgering, Huub and Paulino-Afonso, Ana},
  issn         = {1365-2966},
  journal      = {Monthly Notices of the Royal Astronomical Society},
  keywords     = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: evolution, galaxies: high-redshift, galaxies: ISM},
  number       = {1},
  pages        = {449--467},
  publisher    = {Oxford University Press},
  title        = {{The CALYMHA survey: Lyα escape fraction and its dependence on galaxy properties at z = 2.23}},
  doi          = {10.1093/mnras/stw322},
  volume       = {458},
  year         = {2016},
}

@article{1158,
  abstract     = {Speciation results from the progressive accumulation of mutations that decrease the probability of mating between parental populations or reduce the fitness of hybrids—the so-called species barriers. The speciation genomic literature, however, is mainly a collection of case studies, each with its own approach and specificities, such that a global view of the gradual process of evolution from one to two species is currently lacking. Of primary importance is the prevalence of gene flow between diverging entities, which is central in most species concepts and has been widely discussed in recent years. Here, we explore the continuum of speciation thanks to a comparative analysis of genomic data from 61 pairs of populations/species of animals with variable levels of divergence. Gene flow between diverging gene pools is assessed under an approximate Bayesian computation (ABC) framework. We show that the intermediate &quot;grey zone&quot; of speciation, in which taxonomy is often controversial, spans from 0.5% to 2% of net synonymous divergence, irrespective of species life history traits or ecology. Thanks to appropriate modeling of among-locus variation in genetic drift and introgression rate, we clarify the status of the majority of ambiguous cases and uncover a number of cryptic species. Our analysis also reveals the high incidence in animals of semi-isolated species (when some but not all loci are affected by barriers to gene flow) and highlights the intrinsic difficulty, both statistical and conceptual, of delineating species in the grey zone of speciation.},
  author       = {Roux, Camille and Fraisse, Christelle and Romiguier, Jonathan and Anciaux, Youann and Galtier, Nicolas and Bierne, Nicolas},
  journal      = {PLoS Biology},
  number       = {12},
  publisher    = {Public Library of Science},
  title        = {{Shedding light on the grey zone of speciation along a continuum of genomic divergence}},
  doi          = {10.1371/journal.pbio.2000234},
  volume       = {14},
  year         = {2016},
}

@inproceedings{1164,
  abstract     = {A drawing of a graph G is radial if the vertices of G are placed on concentric circles C1, … , Ck with common center c, and edges are drawn radially: every edge intersects every circle centered at c at most once. G is radial planar if it has a radial embedding, that is, a crossing-free radial drawing. If the vertices of G are ordered or partitioned into ordered levels (as they are for leveled graphs), we require that the assignment of vertices to circles corresponds to the given ordering or leveling. A pair of edges e and f in a graph is independent if e and f do not share a vertex. We show that a graph G is radial planar if G has a radial drawing in which every two independent edges cross an even number of times; the radial embedding has the same leveling as the radial drawing. In other words, we establish the strong Hanani-Tutte theorem for radial planarity. This characterization yields a very simple algorithm for radial planarity testing.},
  author       = {Fulek, Radoslav and Pelsmajer, Michael and Schaefer, Marcus},
  location     = {Athens, Greece},
  pages        = {468 -- 481},
  publisher    = {Springer},
  title        = {{Hanani-Tutte for radial planarity II}},
  doi          = {10.1007/978-3-319-50106-2_36},
  volume       = {9801},
  year         = {2016},
}

@inproceedings{1165,
  abstract     = {We show that c-planarity is solvable in quadratic time for flat clustered graphs with three clusters if the combinatorial embedding of the underlying graph is fixed. In simpler graph-theoretical terms our result can be viewed as follows. Given a graph G with the vertex set partitioned into three parts embedded on a 2-sphere, our algorithm decides if we can augment G by adding edges without creating an edge-crossing so that in the resulting spherical graph the vertices of each part induce a connected sub-graph. We proceed by a reduction to the problem of testing the existence of a perfect matching in planar bipartite graphs. We formulate our result in a slightly more general setting of cyclic clustered graphs, i.e., the simple graph obtained by contracting each cluster, where we disregard loops and multi-edges, is a cycle.},
  author       = {Fulek, Radoslav},
  location     = {Athens, Greece},
  pages        = {94 -- 106},
  publisher    = {Springer},
  title        = {{C-planarity of embedded cyclic c-graphs}},
  doi          = {10.1007/978-3-319-50106-2_8},
  volume       = {9801 },
  year         = {2016},
}

@inproceedings{1166,
  abstract     = {POMDPs are standard models for probabilistic planning problems, where an agent interacts with an uncertain environment. We study the problem of almost-sure reachability, where given a set of target states, the question is to decide whether there is a policy to ensure that the target set is reached with probability 1 (almost-surely). While in general the problem is EXPTIMEcomplete, in many practical cases policies with a small amount of memory suffice. Moreover, the existing solution to the problem is explicit, which first requires to construct explicitly an exponential reduction to a belief-support MDP. In this work, we first study the existence of observation-stationary strategies, which is NP-complete, and then small-memory strategies. We present a symbolic algorithm by an efficient encoding to SAT and using a SAT solver for the problem. We report experimental results demonstrating the scalability of our symbolic (SAT-based) approach. © 2016, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved.},
  author       = {Chatterjee, Krishnendu and Chmelik, Martin and Davies, Jessica},
  booktitle    = {Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence},
  location     = {Phoenix, AZ, United States},
  pages        = {3225 -- 3232},
  publisher    = {AAAI Press},
  title        = {{A symbolic SAT based algorithm for almost sure reachability with small strategies in POMDPs}},
  doi          = {10.1609/aaai.v30i1.10422},
  volume       = {2016},
  year         = {2016},
}

@article{1167,
  abstract     = {Evolutionary pathways describe trajectories of biological evolution in the space of different variants of organisms (genotypes). The probability of existence and the number of evolutionary pathways that lead from a given genotype to a better-adapted genotype are important measures of accessibility of local fitness optima and the reproducibility of evolution. Both quantities have been studied in simple mathematical models where genotypes are represented as binary sequences of two types of basic units, and the network of permitted mutations between the genotypes is a hypercube graph. However, it is unclear how these results translate to the biologically relevant case in which genotypes are represented by sequences of more than two units, for example four nucleotides (DNA) or 20 amino acids (proteins), and the mutational graph is not the hypercube. Here we investigate accessibility of the best-adapted genotype in the general case of K &gt; 2 units. Using computer generated and experimental fitness landscapes we show that accessibility of the global fitness maximum increases with K and can be much higher than for binary sequences. The increase in accessibility comes from the increase in the number of indirect trajectories exploited by evolution for higher K. As one of the consequences, the fraction of genotypes that are accessible increases by three orders of magnitude when the number of units K increases from 2 to 16 for landscapes of size N ∼ 106genotypes. This suggests that evolution can follow many different trajectories on such landscapes and the reconstruction of evolutionary pathways from experimental data might be an extremely difficult task.},
  author       = {Zagórski, Marcin P and Burda, Zdzisław and Wacław, Bartłomiej},
  journal      = {PLoS Computational Biology},
  number       = {12},
  publisher    = {Public Library of Science},
  title        = {{Beyond the hypercube evolutionary accessibility of fitness landscapes with realistic mutational networks}},
  doi          = {10.1371/journal.pcbi.1005218},
  volume       = {12},
  year         = {2016},
}

@article{1170,
  abstract     = {The increasing complexity of dynamic models in systems and synthetic biology poses computational challenges especially for the identification of model parameters. While modularization of the corresponding optimization problems could help reduce the “curse of dimensionality,” abundant feedback and crosstalk mechanisms prohibit a simple decomposition of most biomolecular networks into subnetworks, or modules. Drawing on ideas from network modularization and multiple-shooting optimization, we present here a modular parameter identification approach that explicitly allows for such interdependencies. Interfaces between our modules are given by the experimentally measured molecular species. This definition allows deriving good (initial) estimates for the inter-module communication directly from the experimental data. Given these estimates, the states and parameter sensitivities of different modules can be integrated independently. To achieve consistency between modules, we iteratively adjust the estimates for inter-module communication while optimizing the parameters. After convergence to an optimal parameter set---but not during earlier iterations---the intermodule communication as well as the individual modules\' state dynamics agree with the dynamics of the nonmodularized network. Our modular parameter identification approach allows for easy parallelization; it can reduce the computational complexity for larger networks and decrease the probability to converge to suboptimal local minima. We demonstrate the algorithm\'s performance in parameter estimation for two biomolecular networks, a synthetic genetic oscillator and a mammalian signaling pathway.},
  author       = {Lang, Moritz and Stelling, Jörg},
  journal      = {SIAM Journal on Scientific Computing},
  number       = {6},
  pages        = {B988 -- B1008},
  publisher    = {Society for Industrial and Applied Mathematics },
  title        = {{Modular parameter identification of biomolecular networks}},
  doi          = {10.1137/15M103306X},
  volume       = {38},
  year         = {2016},
}

@article{1171,
  author       = {Tkacik, Gasper},
  journal      = {Physics of Life Reviews},
  pages        = {166 -- 167},
  publisher    = {Elsevier},
  title        = {{Understanding regulatory networks requires more than computing a multitude of graph statistics: Comment on &quot;Drivers of structural features in gene regulatory networks: From biophysical constraints to biological function&quot; by O. C. Martin et al.}},
  doi          = {10.1016/j.plrev.2016.06.005},
  volume       = {17},
  year         = {2016},
}

@article{1172,
  abstract     = {A central issue in cell biology is the physico-chemical basis of organelle biogenesis in intracellular trafficking pathways, its most impressive manifestation being the biogenesis of Golgi cisternae. At a basic level, such morphologically and chemically distinct compartments should arise from an interplay between the molecular transport and chemical maturation. Here, we formulate analytically tractable, minimalist models, that incorporate this interplay between transport and chemical progression in physical space, and explore the conditions for de novo biogenesis of distinct cisternae. We propose new quantitative measures that can discriminate between the various models of transport in a qualitative manner-this includes measures of the dynamics in steady state and the dynamical response to perturbations of the kind amenable to live-cell imaging.},
  author       = {Sachdeva, Himani and Barma, Mustansir and Rao, Madan},
  journal      = {Scientific Reports},
  publisher    = {Nature Publishing Group},
  title        = {{Nonequilibrium description of de novo biogenesis and transport through Golgi-like cisternae}},
  doi          = {10.1038/srep38840},
  volume       = {6},
  year         = {2016},
}

@article{1177,
  abstract     = {Boldyreva, Palacio and Warinschi introduced a multiple forking game as an extension of general forking. The notion of (multiple) forking is a useful abstraction from the actual simulation of cryptographic scheme to the adversary in a security reduction, and is achieved through the intermediary of a so-called wrapper algorithm. Multiple forking has turned out to be a useful tool in the security argument of several cryptographic protocols. However, a reduction employing multiple forking incurs a significant degradation of (Formula presented.) , where (Formula presented.) denotes the upper bound on the underlying random oracle calls and (Formula presented.) , the number of forkings. In this work we take a closer look at the reasons for the degradation with a tighter security bound in mind. We nail down the exact set of conditions for success in the multiple forking game. A careful analysis of the cryptographic schemes and corresponding security reduction employing multiple forking leads to the formulation of ‘dependence’ and ‘independence’ conditions pertaining to the output of the wrapper in different rounds. Based on the (in)dependence conditions we propose a general framework of multiple forking and a General Multiple Forking Lemma. Leveraging (in)dependence to the full allows us to improve the degradation factor in the multiple forking game by a factor of (Formula presented.). By implication, the cost of a single forking involving two random oracles (augmented forking) matches that involving a single random oracle (elementary forking). Finally, we study the effect of these observations on the concrete security of existing schemes employing multiple forking. We conclude that by careful design of the protocol (and the wrapper in the security reduction) it is possible to harness our observations to the full extent.},
  author       = {Kamath Hosdurg, Chethan and Chatterjee, Sanjit},
  journal      = {Algorithmica},
  number       = {4},
  pages        = {1321 -- 1362},
  publisher    = {Springer},
  title        = {{A closer look at multiple-forking: Leveraging (in)dependence for a tighter bound}},
  doi          = {10.1007/s00453-015-9997-6},
  volume       = {74},
  year         = {2016},
}

@inproceedings{1179,
  abstract     = {Computational notions of entropy have recently found many applications, including leakage-resilient cryptography, deterministic encryption or memory delegation. The two main types of results which make computational notions so useful are (1) Chain rules, which quantify by how much the computational entropy of a variable decreases if conditioned on some other variable (2) Transformations, which quantify to which extend one type of entropy implies another.

Such chain rules and transformations typically lose a significant amount in quality of the entropy, and are the reason why applying these results one gets rather weak quantitative security bounds. In this paper we for the first time prove lower bounds in this context, showing that existing results for transformations are, unfortunately, basically optimal for non-adaptive black-box reductions (and it’s hard to imagine how non black-box reductions or adaptivity could be useful here.)

A variable X has k bits of HILL entropy of quality (ϵ,s)
if there exists a variable Y with k bits min-entropy which cannot be distinguished from X with advantage ϵ

by distinguishing circuits of size s. A weaker notion is Metric entropy, where we switch quantifiers, and only require that for every distinguisher of size s, such a Y exists.

We first describe our result concerning transformations. By definition, HILL implies Metric without any loss in quality. Metric entropy often comes up in applications, but must be transformed to HILL for meaningful security guarantees. The best known result states that if a variable X has k bits of Metric entropy of quality (ϵ,s)
, then it has k bits of HILL with quality (2ϵ,s⋅ϵ2). We show that this loss of a factor Ω(ϵ−2)

in circuit size is necessary. In fact, we show the stronger result that this loss is already necessary when transforming so called deterministic real valued Metric entropy to randomised boolean Metric (both these variants of Metric entropy are implied by HILL without loss in quality).

The chain rule for HILL entropy states that if X has k bits of HILL entropy of quality (ϵ,s)
, then for any variable Z of length m, X conditioned on Z has k−m bits of HILL entropy with quality (ϵ,s⋅ϵ2/2m). We show that a loss of Ω(2m/ϵ) in circuit size necessary here. Note that this still leaves a gap of ϵ between the known bound and our lower bound.},
  author       = {Pietrzak, Krzysztof Z and Maciej, Skorski},
  location     = {Beijing, China},
  pages        = {183 -- 203},
  publisher    = {Springer},
  title        = {{Pseudoentropy: Lower-bounds for chain rules and transformations}},
  doi          = {10.1007/978-3-662-53641-4_8},
  volume       = {9985},
  year         = {2016},
}

@article{1181,
  abstract     = {This review accompanies a 2016 SFN mini-symposium presenting examples of current studies that address a central question: How do neural stem cells (NSCs) divide in different ways to produce heterogeneous daughter types at the right time and in proper numbers to build a cerebral cortex with the appropriate size and structure? We will focus on four aspects of corticogenesis: cytokinesis events that follow apical mitoses of NSCs; coordinating abscission with delamination from the apical membrane; timing of neurogenesis and its indirect regulation through emergence of intermediate progenitors; and capacity of single NSCs to generate the correct number and laminar fate of cortical neurons. Defects in these mechanisms can cause microcephaly and other brain malformations, and understanding them is critical to designing diagnostic tools and preventive and corrective therapies.},
  author       = {Dwyer, Noelle and Chen, Bin and Chou, Shen and Hippenmeyer, Simon and Nguyen, Laurent and Ghashghaei, Troy},
  journal      = {Journal of Neuroscience},
  number       = {45},
  pages        = {11394 -- 11401},
  publisher    = {Society for Neuroscience},
  title        = {{Neural stem cells to cerebral cortex: Emerging mechanisms regulating progenitor behavior and productivity}},
  doi          = {10.1523/JNEUROSCI.2359-16.2016},
  volume       = {36},
  year         = {2016},
}

@inproceedings{1182,
  abstract     = {Balanced knockout tournaments are ubiquitous in sports competitions and are also used in decisionmaking and elections. The traditional computational question, that asks to compute a draw (optimal draw) that maximizes the winning probability for a distinguished player, has received a lot of attention. Previous works consider the problem where the pairwise winning probabilities are known precisely, while we study how robust is the winning probability with respect to small errors in the pairwise winning probabilities. First, we present several illuminating examples to establish: (a) there exist deterministic tournaments (where the pairwise winning probabilities are 0 or 1) where one optimal draw is much more robust than the other; and (b) in general, there exist tournaments with slightly suboptimal draws that are more robust than all the optimal draws. The above examples motivate the study of the computational problem of robust draws that guarantee a specified winning probability. Second, we present a polynomial-time algorithm for approximating the robustness of a draw for sufficiently small errors in pairwise winning probabilities, and obtain that the stated computational problem is NP-complete. We also show that two natural cases of deterministic tournaments where the optimal draw could be computed in polynomial time also admit polynomial-time algorithms to compute robust optimal draws.},
  author       = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Tkadlec, Josef},
  location     = {New York, NY, USA},
  pages        = {172 -- 179},
  publisher    = {AAAI Press},
  title        = {{Robust draws in balanced knockout tournaments}},
  volume       = {2016-January},
  year         = {2016},
}

@inproceedings{11834,
  abstract     = {We present a deterministic incremental algorithm for exactly maintaining the size of a minimum cut with ~O(1) amortized time per edge insertion and O(1) query time. This result partially answers an open question posed by Thorup [Combinatorica 2007]. It also stays in sharp contrast to a polynomial conditional lower-bound for the fully-dynamic weighted minimum cut problem. Our algorithm is obtained by combining a recent sparsification technique of Kawarabayashi and Thorup [STOC 2015] and an exact incremental algorithm of Henzinger [J. of Algorithm 1997].

We also study space-efficient incremental algorithms for the minimum cut problem. Concretely, we show that there exists an O(n log n/epsilon^2) space Monte-Carlo algorithm that can process a stream of edge insertions starting from an empty graph, and with high probability, the algorithm maintains a (1+epsilon)-approximation to the minimum cut. The algorithm has ~O(1) amortized update-time and constant query-time.},
  author       = {Goranci, Gramoz and Henzinger, Monika H and Thorup, Mikkel},
  booktitle    = {24th Annual European Symposium on Algorithms},
  isbn         = {978-3-95977-015-6},
  issn         = {1868-8969},
  location     = {Aarhus, Denmark},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Incremental exact min-cut in poly-logarithmic amortized update time}},
  doi          = {10.4230/LIPICS.ESA.2016.46},
  volume       = {57},
  year         = {2016},
}

@inproceedings{11835,
  abstract     = {During the last 10 years it has become popular to study dynamic graph problems in a emergency planning or sensitivity setting: Instead of considering the general fully dynamic problem, we only have to process a single batch update of size d; after the update we have to answer queries.

In this paper, we consider the dynamic subgraph connectivity problem with sensitivity d: We are given a graph of which some vertices are activated and some are deactivated. After that we get a single update in which the states of up to $d$ vertices are changed. Then we get a sequence of connectivity queries in the subgraph of activated vertices.

We present the first fully dynamic algorithm for this problem which has an update and query time only slightly worse than the best decremental algorithm. In addition, we present the first incremental algorithm which is tight with respect to the best known conditional lower bound; moreover, the algorithm is simple and we believe it is implementable and efficient in practice.},
  author       = {Henzinger, Monika H and Neumann, Stefan},
  booktitle    = {24th Annual European Symposium on Algorithms},
  isbn         = {978-3-95977-015-6},
  issn         = {1868-8969},
  location     = {Aarhus, Denmark},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Incremental and fully dynamic subgraph connectivity for emergency planning}},
  doi          = {10.4230/LIPICS.ESA.2016.48},
  volume       = {57},
  year         = {2016},
}

