@article{11578,
  abstract     = {We present the first results from our CAlibrating LYMan α with Hα (CALYMHA) pilot survey at the Isaac Newton Telescope. We measure Lyα emission for 488 Hα selected galaxies at z = 2.23 from High-z Emission Line Survey in the COSMOS and UDS fields with a specially designed narrow-band filter (λc = 3918 Å, Δλ = 52 Å). We find 17 dual Hα-Lyα emitters [fLyα > 5 × 10−17 erg s−1 cm−2, of which five are X-ray active galactic nuclei (AGN)]. For star-forming galaxies, we find a range of Lyα escape fractions (fesc, measured with 3 arcsec apertures) from 2 to 30 per cent. These galaxies have masses from 3 × 108 M⊙ to 1011 M⊙ and dust attenuations E(B − V) = 0–0.5. Using stacking, we measure a median escape fraction of 1.6 ± 0.5 per cent (4.0 ± 1.0 per cent without correcting Hα for dust), but show that this depends on galaxy properties. The stacked fesc tends to decrease with increasing star formation rate and dust attenuation. However, at the highest masses and dust attenuations, we detect individual galaxies with fesc much higher than the typical values from stacking, indicating significant scatter in the values of fesc. Relations between fesc and UV slope are bimodal, with high fesc for either the bluest or reddest galaxies. We speculate that this bimodality and large scatter in the values of fesc is due to additional physical mechanisms such as outflows facilitating fesc for dusty/massive systems. Lyα is significantly more extended than Hα and the UV. fesc continues to increase up to at least 20 kpc (3σ, 40 kpc [2σ]) for typical star-forming galaxies and thus the aperture is the most important predictor of fesc.},
  author       = {Matthee, Jorryt J and Sobral, David and Oteo, Iván and Best, Philip and Smail, Ian and Röttgering, Huub and Paulino-Afonso, Ana},
  issn         = {1365-2966},
  journal      = {Monthly Notices of the Royal Astronomical Society},
  keywords     = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: evolution, galaxies: high-redshift, galaxies: ISM},
  number       = {1},
  pages        = {449--467},
  publisher    = {Oxford University Press},
  title        = {{The CALYMHA survey: Lyα escape fraction and its dependence on galaxy properties at z = 2.23}},
  doi          = {10.1093/mnras/stw322},
  volume       = {458},
  year         = {2016},
}

@article{1158,
  abstract     = {Speciation results from the progressive accumulation of mutations that decrease the probability of mating between parental populations or reduce the fitness of hybrids—the so-called species barriers. The speciation genomic literature, however, is mainly a collection of case studies, each with its own approach and specificities, such that a global view of the gradual process of evolution from one to two species is currently lacking. Of primary importance is the prevalence of gene flow between diverging entities, which is central in most species concepts and has been widely discussed in recent years. Here, we explore the continuum of speciation thanks to a comparative analysis of genomic data from 61 pairs of populations/species of animals with variable levels of divergence. Gene flow between diverging gene pools is assessed under an approximate Bayesian computation (ABC) framework. We show that the intermediate &quot;grey zone&quot; of speciation, in which taxonomy is often controversial, spans from 0.5% to 2% of net synonymous divergence, irrespective of species life history traits or ecology. Thanks to appropriate modeling of among-locus variation in genetic drift and introgression rate, we clarify the status of the majority of ambiguous cases and uncover a number of cryptic species. Our analysis also reveals the high incidence in animals of semi-isolated species (when some but not all loci are affected by barriers to gene flow) and highlights the intrinsic difficulty, both statistical and conceptual, of delineating species in the grey zone of speciation.},
  author       = {Roux, Camille and Fraisse, Christelle and Romiguier, Jonathan and Anciaux, Youann and Galtier, Nicolas and Bierne, Nicolas},
  journal      = {PLoS Biology},
  number       = {12},
  publisher    = {Public Library of Science},
  title        = {{Shedding light on the grey zone of speciation along a continuum of genomic divergence}},
  doi          = {10.1371/journal.pbio.2000234},
  volume       = {14},
  year         = {2016},
}

@inproceedings{1164,
  abstract     = {A drawing of a graph G is radial if the vertices of G are placed on concentric circles C1, … , Ck with common center c, and edges are drawn radially: every edge intersects every circle centered at c at most once. G is radial planar if it has a radial embedding, that is, a crossing-free radial drawing. If the vertices of G are ordered or partitioned into ordered levels (as they are for leveled graphs), we require that the assignment of vertices to circles corresponds to the given ordering or leveling. A pair of edges e and f in a graph is independent if e and f do not share a vertex. We show that a graph G is radial planar if G has a radial drawing in which every two independent edges cross an even number of times; the radial embedding has the same leveling as the radial drawing. In other words, we establish the strong Hanani-Tutte theorem for radial planarity. This characterization yields a very simple algorithm for radial planarity testing.},
  author       = {Fulek, Radoslav and Pelsmajer, Michael and Schaefer, Marcus},
  location     = {Athens, Greece},
  pages        = {468 -- 481},
  publisher    = {Springer},
  title        = {{Hanani-Tutte for radial planarity II}},
  doi          = {10.1007/978-3-319-50106-2_36},
  volume       = {9801},
  year         = {2016},
}

@inproceedings{1165,
  abstract     = {We show that c-planarity is solvable in quadratic time for flat clustered graphs with three clusters if the combinatorial embedding of the underlying graph is fixed. In simpler graph-theoretical terms our result can be viewed as follows. Given a graph G with the vertex set partitioned into three parts embedded on a 2-sphere, our algorithm decides if we can augment G by adding edges without creating an edge-crossing so that in the resulting spherical graph the vertices of each part induce a connected sub-graph. We proceed by a reduction to the problem of testing the existence of a perfect matching in planar bipartite graphs. We formulate our result in a slightly more general setting of cyclic clustered graphs, i.e., the simple graph obtained by contracting each cluster, where we disregard loops and multi-edges, is a cycle.},
  author       = {Fulek, Radoslav},
  location     = {Athens, Greece},
  pages        = {94 -- 106},
  publisher    = {Springer},
  title        = {{C-planarity of embedded cyclic c-graphs}},
  doi          = {10.1007/978-3-319-50106-2_8},
  volume       = {9801 },
  year         = {2016},
}

@inproceedings{1166,
  abstract     = {POMDPs are standard models for probabilistic planning problems, where an agent interacts with an uncertain environment. We study the problem of almost-sure reachability, where given a set of target states, the question is to decide whether there is a policy to ensure that the target set is reached with probability 1 (almost-surely). While in general the problem is EXPTIMEcomplete, in many practical cases policies with a small amount of memory suffice. Moreover, the existing solution to the problem is explicit, which first requires to construct explicitly an exponential reduction to a belief-support MDP. In this work, we first study the existence of observation-stationary strategies, which is NP-complete, and then small-memory strategies. We present a symbolic algorithm by an efficient encoding to SAT and using a SAT solver for the problem. We report experimental results demonstrating the scalability of our symbolic (SAT-based) approach. © 2016, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved.},
  author       = {Chatterjee, Krishnendu and Chmelik, Martin and Davies, Jessica},
  booktitle    = {Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence},
  location     = {Phoenix, AZ, United States},
  pages        = {3225 -- 3232},
  publisher    = {AAAI Press},
  title        = {{A symbolic SAT based algorithm for almost sure reachability with small strategies in POMDPs}},
  doi          = {10.1609/aaai.v30i1.10422},
  volume       = {2016},
  year         = {2016},
}

@article{1167,
  abstract     = {Evolutionary pathways describe trajectories of biological evolution in the space of different variants of organisms (genotypes). The probability of existence and the number of evolutionary pathways that lead from a given genotype to a better-adapted genotype are important measures of accessibility of local fitness optima and the reproducibility of evolution. Both quantities have been studied in simple mathematical models where genotypes are represented as binary sequences of two types of basic units, and the network of permitted mutations between the genotypes is a hypercube graph. However, it is unclear how these results translate to the biologically relevant case in which genotypes are represented by sequences of more than two units, for example four nucleotides (DNA) or 20 amino acids (proteins), and the mutational graph is not the hypercube. Here we investigate accessibility of the best-adapted genotype in the general case of K &gt; 2 units. Using computer generated and experimental fitness landscapes we show that accessibility of the global fitness maximum increases with K and can be much higher than for binary sequences. The increase in accessibility comes from the increase in the number of indirect trajectories exploited by evolution for higher K. As one of the consequences, the fraction of genotypes that are accessible increases by three orders of magnitude when the number of units K increases from 2 to 16 for landscapes of size N ∼ 106genotypes. This suggests that evolution can follow many different trajectories on such landscapes and the reconstruction of evolutionary pathways from experimental data might be an extremely difficult task.},
  author       = {Zagórski, Marcin P and Burda, Zdzisław and Wacław, Bartłomiej},
  journal      = {PLoS Computational Biology},
  number       = {12},
  publisher    = {Public Library of Science},
  title        = {{Beyond the hypercube evolutionary accessibility of fitness landscapes with realistic mutational networks}},
  doi          = {10.1371/journal.pcbi.1005218},
  volume       = {12},
  year         = {2016},
}

@article{1170,
  abstract     = {The increasing complexity of dynamic models in systems and synthetic biology poses computational challenges especially for the identification of model parameters. While modularization of the corresponding optimization problems could help reduce the “curse of dimensionality,” abundant feedback and crosstalk mechanisms prohibit a simple decomposition of most biomolecular networks into subnetworks, or modules. Drawing on ideas from network modularization and multiple-shooting optimization, we present here a modular parameter identification approach that explicitly allows for such interdependencies. Interfaces between our modules are given by the experimentally measured molecular species. This definition allows deriving good (initial) estimates for the inter-module communication directly from the experimental data. Given these estimates, the states and parameter sensitivities of different modules can be integrated independently. To achieve consistency between modules, we iteratively adjust the estimates for inter-module communication while optimizing the parameters. After convergence to an optimal parameter set---but not during earlier iterations---the intermodule communication as well as the individual modules\' state dynamics agree with the dynamics of the nonmodularized network. Our modular parameter identification approach allows for easy parallelization; it can reduce the computational complexity for larger networks and decrease the probability to converge to suboptimal local minima. We demonstrate the algorithm\'s performance in parameter estimation for two biomolecular networks, a synthetic genetic oscillator and a mammalian signaling pathway.},
  author       = {Lang, Moritz and Stelling, Jörg},
  journal      = {SIAM Journal on Scientific Computing},
  number       = {6},
  pages        = {B988 -- B1008},
  publisher    = {Society for Industrial and Applied Mathematics },
  title        = {{Modular parameter identification of biomolecular networks}},
  doi          = {10.1137/15M103306X},
  volume       = {38},
  year         = {2016},
}

@article{1171,
  author       = {Tkacik, Gasper},
  journal      = {Physics of Life Reviews},
  pages        = {166 -- 167},
  publisher    = {Elsevier},
  title        = {{Understanding regulatory networks requires more than computing a multitude of graph statistics: Comment on &quot;Drivers of structural features in gene regulatory networks: From biophysical constraints to biological function&quot; by O. C. Martin et al.}},
  doi          = {10.1016/j.plrev.2016.06.005},
  volume       = {17},
  year         = {2016},
}

@article{1172,
  abstract     = {A central issue in cell biology is the physico-chemical basis of organelle biogenesis in intracellular trafficking pathways, its most impressive manifestation being the biogenesis of Golgi cisternae. At a basic level, such morphologically and chemically distinct compartments should arise from an interplay between the molecular transport and chemical maturation. Here, we formulate analytically tractable, minimalist models, that incorporate this interplay between transport and chemical progression in physical space, and explore the conditions for de novo biogenesis of distinct cisternae. We propose new quantitative measures that can discriminate between the various models of transport in a qualitative manner-this includes measures of the dynamics in steady state and the dynamical response to perturbations of the kind amenable to live-cell imaging.},
  author       = {Sachdeva, Himani and Barma, Mustansir and Rao, Madan},
  journal      = {Scientific Reports},
  publisher    = {Nature Publishing Group},
  title        = {{Nonequilibrium description of de novo biogenesis and transport through Golgi-like cisternae}},
  doi          = {10.1038/srep38840},
  volume       = {6},
  year         = {2016},
}

@article{1177,
  abstract     = {Boldyreva, Palacio and Warinschi introduced a multiple forking game as an extension of general forking. The notion of (multiple) forking is a useful abstraction from the actual simulation of cryptographic scheme to the adversary in a security reduction, and is achieved through the intermediary of a so-called wrapper algorithm. Multiple forking has turned out to be a useful tool in the security argument of several cryptographic protocols. However, a reduction employing multiple forking incurs a significant degradation of (Formula presented.) , where (Formula presented.) denotes the upper bound on the underlying random oracle calls and (Formula presented.) , the number of forkings. In this work we take a closer look at the reasons for the degradation with a tighter security bound in mind. We nail down the exact set of conditions for success in the multiple forking game. A careful analysis of the cryptographic schemes and corresponding security reduction employing multiple forking leads to the formulation of ‘dependence’ and ‘independence’ conditions pertaining to the output of the wrapper in different rounds. Based on the (in)dependence conditions we propose a general framework of multiple forking and a General Multiple Forking Lemma. Leveraging (in)dependence to the full allows us to improve the degradation factor in the multiple forking game by a factor of (Formula presented.). By implication, the cost of a single forking involving two random oracles (augmented forking) matches that involving a single random oracle (elementary forking). Finally, we study the effect of these observations on the concrete security of existing schemes employing multiple forking. We conclude that by careful design of the protocol (and the wrapper in the security reduction) it is possible to harness our observations to the full extent.},
  author       = {Kamath Hosdurg, Chethan and Chatterjee, Sanjit},
  journal      = {Algorithmica},
  number       = {4},
  pages        = {1321 -- 1362},
  publisher    = {Springer},
  title        = {{A closer look at multiple-forking: Leveraging (in)dependence for a tighter bound}},
  doi          = {10.1007/s00453-015-9997-6},
  volume       = {74},
  year         = {2016},
}

@inproceedings{1179,
  abstract     = {Computational notions of entropy have recently found many applications, including leakage-resilient cryptography, deterministic encryption or memory delegation. The two main types of results which make computational notions so useful are (1) Chain rules, which quantify by how much the computational entropy of a variable decreases if conditioned on some other variable (2) Transformations, which quantify to which extend one type of entropy implies another.

Such chain rules and transformations typically lose a significant amount in quality of the entropy, and are the reason why applying these results one gets rather weak quantitative security bounds. In this paper we for the first time prove lower bounds in this context, showing that existing results for transformations are, unfortunately, basically optimal for non-adaptive black-box reductions (and it’s hard to imagine how non black-box reductions or adaptivity could be useful here.)

A variable X has k bits of HILL entropy of quality (ϵ,s)
if there exists a variable Y with k bits min-entropy which cannot be distinguished from X with advantage ϵ

by distinguishing circuits of size s. A weaker notion is Metric entropy, where we switch quantifiers, and only require that for every distinguisher of size s, such a Y exists.

We first describe our result concerning transformations. By definition, HILL implies Metric without any loss in quality. Metric entropy often comes up in applications, but must be transformed to HILL for meaningful security guarantees. The best known result states that if a variable X has k bits of Metric entropy of quality (ϵ,s)
, then it has k bits of HILL with quality (2ϵ,s⋅ϵ2). We show that this loss of a factor Ω(ϵ−2)

in circuit size is necessary. In fact, we show the stronger result that this loss is already necessary when transforming so called deterministic real valued Metric entropy to randomised boolean Metric (both these variants of Metric entropy are implied by HILL without loss in quality).

The chain rule for HILL entropy states that if X has k bits of HILL entropy of quality (ϵ,s)
, then for any variable Z of length m, X conditioned on Z has k−m bits of HILL entropy with quality (ϵ,s⋅ϵ2/2m). We show that a loss of Ω(2m/ϵ) in circuit size necessary here. Note that this still leaves a gap of ϵ between the known bound and our lower bound.},
  author       = {Pietrzak, Krzysztof Z and Maciej, Skorski},
  location     = {Beijing, China},
  pages        = {183 -- 203},
  publisher    = {Springer},
  title        = {{Pseudoentropy: Lower-bounds for chain rules and transformations}},
  doi          = {10.1007/978-3-662-53641-4_8},
  volume       = {9985},
  year         = {2016},
}

@article{1181,
  abstract     = {This review accompanies a 2016 SFN mini-symposium presenting examples of current studies that address a central question: How do neural stem cells (NSCs) divide in different ways to produce heterogeneous daughter types at the right time and in proper numbers to build a cerebral cortex with the appropriate size and structure? We will focus on four aspects of corticogenesis: cytokinesis events that follow apical mitoses of NSCs; coordinating abscission with delamination from the apical membrane; timing of neurogenesis and its indirect regulation through emergence of intermediate progenitors; and capacity of single NSCs to generate the correct number and laminar fate of cortical neurons. Defects in these mechanisms can cause microcephaly and other brain malformations, and understanding them is critical to designing diagnostic tools and preventive and corrective therapies.},
  author       = {Dwyer, Noelle and Chen, Bin and Chou, Shen and Hippenmeyer, Simon and Nguyen, Laurent and Ghashghaei, Troy},
  journal      = {Journal of Neuroscience},
  number       = {45},
  pages        = {11394 -- 11401},
  publisher    = {Society for Neuroscience},
  title        = {{Neural stem cells to cerebral cortex: Emerging mechanisms regulating progenitor behavior and productivity}},
  doi          = {10.1523/JNEUROSCI.2359-16.2016},
  volume       = {36},
  year         = {2016},
}

@inproceedings{1182,
  abstract     = {Balanced knockout tournaments are ubiquitous in sports competitions and are also used in decisionmaking and elections. The traditional computational question, that asks to compute a draw (optimal draw) that maximizes the winning probability for a distinguished player, has received a lot of attention. Previous works consider the problem where the pairwise winning probabilities are known precisely, while we study how robust is the winning probability with respect to small errors in the pairwise winning probabilities. First, we present several illuminating examples to establish: (a) there exist deterministic tournaments (where the pairwise winning probabilities are 0 or 1) where one optimal draw is much more robust than the other; and (b) in general, there exist tournaments with slightly suboptimal draws that are more robust than all the optimal draws. The above examples motivate the study of the computational problem of robust draws that guarantee a specified winning probability. Second, we present a polynomial-time algorithm for approximating the robustness of a draw for sufficiently small errors in pairwise winning probabilities, and obtain that the stated computational problem is NP-complete. We also show that two natural cases of deterministic tournaments where the optimal draw could be computed in polynomial time also admit polynomial-time algorithms to compute robust optimal draws.},
  author       = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Tkadlec, Josef},
  location     = {New York, NY, USA},
  pages        = {172 -- 179},
  publisher    = {AAAI Press},
  title        = {{Robust draws in balanced knockout tournaments}},
  volume       = {2016-January},
  year         = {2016},
}

@inproceedings{11834,
  abstract     = {We present a deterministic incremental algorithm for exactly maintaining the size of a minimum cut with ~O(1) amortized time per edge insertion and O(1) query time. This result partially answers an open question posed by Thorup [Combinatorica 2007]. It also stays in sharp contrast to a polynomial conditional lower-bound for the fully-dynamic weighted minimum cut problem. Our algorithm is obtained by combining a recent sparsification technique of Kawarabayashi and Thorup [STOC 2015] and an exact incremental algorithm of Henzinger [J. of Algorithm 1997].

We also study space-efficient incremental algorithms for the minimum cut problem. Concretely, we show that there exists an O(n log n/epsilon^2) space Monte-Carlo algorithm that can process a stream of edge insertions starting from an empty graph, and with high probability, the algorithm maintains a (1+epsilon)-approximation to the minimum cut. The algorithm has ~O(1) amortized update-time and constant query-time.},
  author       = {Goranci, Gramoz and Henzinger, Monika H and Thorup, Mikkel},
  booktitle    = {24th Annual European Symposium on Algorithms},
  isbn         = {978-3-95977-015-6},
  issn         = {1868-8969},
  location     = {Aarhus, Denmark},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Incremental exact min-cut in poly-logarithmic amortized update time}},
  doi          = {10.4230/LIPICS.ESA.2016.46},
  volume       = {57},
  year         = {2016},
}

@inproceedings{11835,
  abstract     = {During the last 10 years it has become popular to study dynamic graph problems in a emergency planning or sensitivity setting: Instead of considering the general fully dynamic problem, we only have to process a single batch update of size d; after the update we have to answer queries.

In this paper, we consider the dynamic subgraph connectivity problem with sensitivity d: We are given a graph of which some vertices are activated and some are deactivated. After that we get a single update in which the states of up to $d$ vertices are changed. Then we get a sequence of connectivity queries in the subgraph of activated vertices.

We present the first fully dynamic algorithm for this problem which has an update and query time only slightly worse than the best decremental algorithm. In addition, we present the first incremental algorithm which is tight with respect to the best known conditional lower bound; moreover, the algorithm is simple and we believe it is implementable and efficient in practice.},
  author       = {Henzinger, Monika H and Neumann, Stefan},
  booktitle    = {24th Annual European Symposium on Algorithms},
  isbn         = {978-3-95977-015-6},
  issn         = {1868-8969},
  location     = {Aarhus, Denmark},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Incremental and fully dynamic subgraph connectivity for emergency planning}},
  doi          = {10.4230/LIPICS.ESA.2016.48},
  volume       = {57},
  year         = {2016},
}

@inproceedings{11836,
  abstract     = {Given a graph where vertices are partitioned into k terminals and non-terminals, the goal is to compress the graph (i.e., reduce the number of non-terminals) using minor operations while preserving terminal distances approximately. The distortion of a compressed graph is the maximum multiplicative blow-up of distances between all pairs of terminals. We study the trade-off between the number of non-terminals and the distortion. This problem generalizes the Steiner Point Removal (SPR) problem, in which all non-terminals must be removed.

We introduce a novel black-box reduction to convert any lower bound on distortion for the SPR problem into a super-linear lower bound on the number of non-terminals, with the same distortion, for our problem. This allows us to show that there exist graphs such that every minor with distortion less than 2 / 2.5 / 3 must have Omega(k^2) / Omega(k^{5/4}) / Omega(k^{6/5}) non-terminals, plus more trade-offs in between. The black-box reduction has an interesting consequence: if the tight lower bound on distortion for the SPR problem is super-constant, then allowing any O(k) non-terminals will not help improving the lower bound to a constant.

We also build on the existing results on spanners, distance oracles and connected 0-extensions to show a number of upper bounds for general graphs, planar graphs, graphs that exclude a fixed minor and bounded treewidth graphs. Among others, we show that any graph admits a minor with O(log k) distortion and O(k^2) non-terminals, and any planar graph admits a minor with
1 + epsilon distortion and ~O((k/epsilon)^2) non-terminals.},
  author       = {Cheung, Yun Kuen and Goranci, Gramoz and Henzinger, Monika H},
  booktitle    = {43rd International Colloquium on Automata, Languages, and Programming},
  isbn         = {978-3-95977-013-2},
  issn         = {1868-8969},
  location     = {Rome, Italy},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Graph minors for preserving terminal distances approximately - lower and upper bounds}},
  doi          = {10.4230/LIPICS.ICALP.2016.131},
  volume       = {55},
  year         = {2016},
}

@article{1184,
  abstract     = {Across multicellular organisms, the costs of reproduction and self-maintenance result in a life history trade-off between fecundity and longevity. Queens of perennial social Hymenoptera are both highly fertile and long-lived, and thus, this fundamental trade-off is lacking. Whether social insect males similarly evade the fecundity/longevity trade-off remains largely unstudied. Wingless males of the ant genus Cardiocondyla stay in their natal colonies throughout their relatively long lives and mate with multiple female sexuals. Here, we show that Cardiocondyla obscurior males that were allowed to mate with large numbers of female sexuals had a shortened life span compared to males that mated at a low frequency or virgin males. Although frequent mating negatively affects longevity, males clearly benefit from a “live fast, die young strategy” by inseminating as many female sexuals as possible at a cost to their own survival.},
  author       = {Metzler, Sina and Heinze, Jürgen and Schrempf, Alexandra},
  journal      = {Ecology and Evolution},
  number       = {24},
  pages        = {8903 -- 8906},
  publisher    = {Wiley-Blackwell},
  title        = {{Mating and longevity in ant males}},
  doi          = {10.1002/ece3.2474},
  volume       = {6},
  year         = {2016},
}

@article{1185,
  abstract     = {The developmental programme of the pistil is under the control of both auxin and cytokinin. Crosstalk between these factors converges on regulation of the auxin carrier PIN-FORMED 1 (PIN1). Here, we show that in the triple transcription factor mutant cytokinin response factor 2 (crf2) crf3 crf6 both pistil length and ovule number were reduced. PIN1 expression was also lower in the triple mutant and the phenotypes could not be rescued by exogenous cytokinin application. pin1 complementation studies using genomic PIN1 constructs showed that the pistil phenotypes were only rescued when the PCRE1 domain, to which CRFs bind, was present. Without this domain, pin mutants resemble the crf2 crf3 crf6 triple mutant, indicating the pivotal role of CRFs in auxin-cytokinin crosstalk.},
  author       = {Cucinotta, Mara and Manrique, Silvia and Guazzotti, Andrea and Quadrelli, Nadia and Mendes, Marta and Benková, Eva and Colombo, Lucia},
  journal      = {Development},
  number       = {23},
  pages        = {4419 -- 4424},
  publisher    = {Company of Biologists},
  title        = {{Cytokinin response factors integrate auxin and cytokinin pathways for female reproductive organ development}},
  doi          = {10.1242/dev.143545},
  volume       = {143},
  year         = {2016},
}

@article{1186,
  abstract     = {The human pathogen Streptococcus pneumoniae is decorated with a special class of surface-proteins known as choline-binding proteins (CBPs) attached to phosphorylcholine (PCho) moieties from cell-wall teichoic acids. By a combination of X-ray crystallography, NMR, molecular dynamics techniques and in vivo virulence and phagocytosis studies, we provide structural information of choline-binding protein L (CbpL) and demonstrate its impact on pneumococcal pathogenesis and immune evasion. CbpL is a very elongated three-module protein composed of (i) an Excalibur Ca 2+ -binding domain -reported in this work for the very first time-, (ii) an unprecedented anchorage module showing alternate disposition of canonical and non-canonical choline-binding sites that allows vine-like binding of fully-PCho-substituted teichoic acids (with two choline moieties per unit), and (iii) a Ltp-Lipoprotein domain. Our structural and infection assays indicate an important role of the whole multimodular protein allowing both to locate CbpL at specific places on the cell wall and to interact with host components in order to facilitate pneumococcal lung infection and transmigration from nasopharynx to the lungs and blood. CbpL implication in both resistance against killing by phagocytes and pneumococcal pathogenesis further postulate this surface-protein as relevant among the pathogenic arsenal of the pneumococcus.},
  author       = {Gutierrez-Fernandez, Javier and Saleh, Malek and Alcorlo, Martín and Gómez Mejóa, Alejandro and Pantoja Uceda, David and Treviño, Miguel and Vob, Franziska and Abdullah, Mohammed and Galán Bartual, Sergio and Seinen, Jolien and Sánchez Murcia, Pedro and Gago, Federico and Bruix, Marta and Hammerschmidt, Sven and Hermoso, Juan},
  journal      = {Scientific Reports},
  publisher    = {Nature Publishing Group},
  title        = {{Modular architecture and unique teichoic acid recognition features of choline-binding protein L CbpL contributing to pneumococcal pathogenesis}},
  doi          = {10.1038/srep38094},
  volume       = {6},
  year         = {2016},
}

@inproceedings{11866,
  abstract     = {We present a deterministic (1+o(1))-approximation O(n1/2+o(1)+D1+o(1))-time algorithm for solving the single-source shortest paths problem on distributed weighted networks (the CONGEST model); here n is the number of nodes in the network and D is its (hop) diameter. This is the first non-trivial deterministic algorithm for this problem. It also improves (i) the running time of the randomized (1+o(1))-approximation Õ(n1/2D1/4+D)-time algorithm of Nanongkai [STOC 2014] by a factor of as large as n1/8, and (ii) the O(є−1logє−1)-approximation factor of Lenzen and Patt-Shamir’s Õ(n1/2+є+D)-time algorithm [STOC 2013] within the same running time. Our running time matches the known time lower bound of Ω(n1/2/logn + D) [Das Sarma et al. STOC 2011] modulo some lower-order terms, thus essentially settling the status of this problem which was raised at least a decade ago [Elkin SIGACT News 2004]. It also implies a (2+o(1))-approximation O(n1/2+o(1)+D1+o(1))-time algorithm for approximating a network’s weighted diameter which almost matches the lower bound by Holzer et al. [PODC 2012].

In achieving this result, we develop two techniques which might be of independent interest and useful in other settings: (i) a deterministic process that replaces the “hitting set argument” commonly used for shortest paths computation in various settings, and (ii) a simple, deterministic, construction of an (no(1), o(1))-hop set of size O(n1+o(1)). We combine these techniques with many distributed algorithmic techniques, some of which from problems that are not directly related to shortest paths, e.g. ruling sets [Goldberg et al. STOC 1987], source detection [Lenzen, Peleg PODC 2013], and partial distance estimation [Lenzen, Patt-Shamir PODC 2015]. Our hop set construction also leads to single-source shortest paths algorithms in two other settings: (i) a (1+o(1))-approximation O(no(1))-time algorithm on congested cliques, and (ii) a (1+o(1))-approximation O(no(1)logW)-pass O(n1+o(1)logW)-space streaming algorithm, when edge weights are in {1, 2, …, W}. The first result answers an open problem in [Nanongkai, STOC 2014]. The second result partially answers an open problem raised by McGregor in 2006 [<pre>sublinear.info</pre>, Problem 14].},
  author       = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon},
  booktitle    = {48th Annual ACM SIGACT Symposium on Theory of Computing},
  isbn         = {978-145034132-5},
  issn         = {0737-8017},
  location     = {Cambridge, MA, United States},
  pages        = {489 -- 498},
  publisher    = {Association for Computing Machinery},
  title        = {{A deterministic almost-tight distributed algorithm for approximating single-source shortest paths}},
  doi          = {10.1145/2897518.2897638},
  year         = {2016},
}

