@article{626,
abstract = {Our focus here is on the infinitesimal model. In this model, one or several quantitative traits are described as the sum of a genetic and a non-genetic component, the first being distributed within families as a normal random variable centred at the average of the parental genetic components, and with a variance independent of the parental traits. Thus, the variance that segregates within families is not perturbed by selection, and can be predicted from the variance components. This does not necessarily imply that the trait distribution across the whole population should be Gaussian, and indeed selection or population structure may have a substantial effect on the overall trait distribution. One of our main aims is to identify some general conditions on the allelic effects for the infinitesimal model to be accurate. We first review the long history of the infinitesimal model in quantitative genetics. Then we formulate the model at the phenotypic level in terms of individual trait values and relationships between individuals, but including different evolutionary processes: genetic drift, recombination, selection, mutation, population structure, …. We give a range of examples of its application to evolutionary questions related to stabilising selection, assortative mating, effective population size and response to selection, habitat preference and speciation. We provide a mathematical justification of the model as the limit as the number M of underlying loci tends to infinity of a model with Mendelian inheritance, mutation and environmental noise, when the genetic component of the trait is purely additive. We also show how the model generalises to include epistatic effects. We prove in particular that, within each family, the genetic components of the individual trait values in the current generation are indeed normally distributed with a variance independent of ancestral traits, up to an error of order 1∕M. Simulations suggest that in some cases the convergence may be as fast as 1∕M.},
author = {Barton, Nicholas H and Etheridge, Alison and Véber, Amandine},
issn = {00405809},
journal = {Theoretical Population Biology},
pages = {50 -- 73},
publisher = {Academic Press},
title = {{The infinitesimal model: Definition derivation and implications}},
doi = {10.1016/j.tpb.2017.06.001},
volume = {118},
year = {2017},
}
@article{627,
abstract = {Beige adipocytes are a new type of recruitable brownish adipocytes, with highly mitochondrial membrane uncoupling protein 1 expression and thermogenesis. Beige adipocytes were found among white adipocytes, especially in subcutaneous white adipose tissue (sWAT). Therefore, beige adipocytes may be involved in the regulation of energy metabolism and fat deposition. Transient receptor potential melastatin 8 (TRPM8), a Ca2+-permeable non-selective cation channel, plays vital roles in the regulation of various cellular functions. It has been reported that TRPM8 activation enhanced the thermogenic function of brown adiposytes. However, the involvement of TRPM8 in the thermogenic function of WAT remains unexplored. Our data revealed that TRPM8 was expressed in mouse white adipocytes at mRNA, protein and functional levels. The mRNA expression of Trpm8 was significantly increased in the differentiated white adipocytes than pre-adipocytes. Moreover, activation of TRPM8 by menthol enhanced the expression of thermogenic genes in cultured white aidpocytes. And menthol-induced increases of the thermogenic genes in white adipocytes was inhibited by either KT5720 (a protein kinase A inhibitor) or BAPTA-AM. In addition, high fat diet (HFD)-induced obesity in mice was significantly recovered by co-treatment with menthol. Dietary menthol enhanced WAT "browning" and improved glucose metabolism in HFD-induced obesity mice as well. Therefore, we concluded that TRPM8 might be involved in WAT "browning" by increasing the expression levels of genes related to thermogenesis and energy metabolism. And dietary menthol could be a novel approach for combating human obesity and related metabolic diseases.},
author = {Jiang, Changyu and Zhai, Ming-Zhu and Yan, Dong and Li, Da and Li, Chen and Zhang, Yonghong and Xiao, Lizu and Xiong, Donglin and Deng, Qiwen and Sun, Wuping},
issn = {19492553},
journal = {Oncotarget},
number = {43},
pages = {75114 -- 75126},
publisher = {Impact Journals LLC},
title = {{Dietary menthol-induced TRPM8 activation enhances WAT “browning” and ameliorates diet-induced obesity}},
doi = {10.18632/oncotarget.20540},
volume = {8},
year = {2017},
}
@inproceedings{628,
abstract = {We consider the problem of developing automated techniques for solving recurrence relations to aid the expected-runtime analysis of programs. The motivation is that several classical textbook algorithms have quite efficient expected-runtime complexity, whereas the corresponding worst-case bounds are either inefficient (e.g., Quick-Sort), or completely ineffective (e.g., Coupon-Collector). Since the main focus of expected-runtime analysis is to obtain efficient bounds, we consider bounds that are either logarithmic, linear or almost-linear (O(log n), O(n), O(n · log n), respectively, where n represents the input size). Our main contribution is an efficient (simple linear-time algorithm) sound approach for deriving such expected-runtime bounds for the analysis of recurrence relations induced by randomized algorithms. The experimental results show that our approach can efficiently derive asymptotically optimal expected-runtime bounds for recurrences of classical randomized algorithms, including Randomized-Search, Quick-Sort, Quick-Select, Coupon-Collector, where the worst-case bounds are either inefficient (such as linear as compared to logarithmic expected-runtime complexity, or quadratic as compared to linear or almost-linear expected-runtime complexity), or ineffective.},
author = {Chatterjee, Krishnendu and Fu, Hongfei and Murhekar, Aniket},
editor = {Majumdar, Rupak and Kunčak, Viktor},
isbn = {978-331963386-2},
location = {Heidelberg, Germany},
pages = {118 -- 139},
publisher = {Springer},
title = {{Automated recurrence analysis for almost linear expected runtime bounds}},
doi = {10.1007/978-3-319-63387-9_6},
volume = {10426},
year = {2017},
}
@phdthesis{6287,
abstract = {The main objects considered in the present work are simplicial and CW-complexes with vertices forming a random point cloud. In particular, we consider a Poisson point process in R^n and study Delaunay and Voronoi complexes of the first and higher orders and weighted Delaunay complexes obtained as sections of Delaunay complexes, as well as the Čech complex. Further, we examine theDelaunay complex of a Poisson point process on the sphere S^n, as well as of a uniform point cloud, which is equivalent to the convex hull, providing a connection to the theory of random polytopes. Each of the complexes in question can be endowed with a radius function, which maps its cells to the radii of appropriately chosen circumspheres, called the radius of the cell. Applying and developing discrete Morse theory for these functions, joining it together with probabilistic and sometimes analytic machinery, and developing several integral geometric tools, we aim at getting the distributions of circumradii of typical cells. For all considered complexes, we are able to generalize and obtain up to constants the distribution of radii of typical intervals of all types. In low dimensions the constants can be computed explicitly, thus providing the explicit expressions for the expected numbers of cells. In particular, it allows to find the expected density of simplices of every dimension for a Poisson point process in R^4, whereas the result for R^3 was known already in 1970's.},
author = {Nikitenko, Anton},
pages = {86},
publisher = {IST Austria},
title = {{Discrete Morse theory for random complexes }},
doi = {10.15479/AT:ISTA:th_873},
year = {2017},
}
@inbook{629,
abstract = {Even simple cells like bacteria have precisely regulated cellular anatomies, which allow them to grow, divide and to respond to internal or external cues with high fidelity. How spatial and temporal intracellular organization in prokaryotic cells is achieved and maintained on the basis of locally interacting proteins still remains largely a mystery. Bulk biochemical assays with purified components and in vivo experiments help us to approach key cellular processes from two opposite ends, in terms of minimal and maximal complexity. However, to understand how cellular phenomena emerge, that are more than the sum of their parts, we have to assemble cellular subsystems step by step from the bottom up. Here, we review recent in vitro reconstitution experiments with proteins of the bacterial cell division machinery and illustrate how they help to shed light on fundamental cellular mechanisms that constitute spatiotemporal order and regulate cell division.},
author = {Loose, Martin and Zieske, Katja and Schwille, Petra},
booktitle = {Prokaryotic Cytoskeletons},
pages = {419 -- 444},
publisher = {Springer},
title = {{Reconstitution of protein dynamics involved in bacterial cell division}},
doi = {10.1007/978-3-319-53047-5_15},
volume = {84},
year = {2017},
}
@phdthesis{6291,
abstract = {Bacteria and their pathogens – phages – are the most abundant living entities on Earth. Throughout their coevolution, bacteria have evolved multiple immune systems to overcome the ubiquitous threat from the phages. Although the molecu- lar details of these immune systems’ functions are relatively well understood, their epidemiological consequences for the phage-bacterial communities have been largely neglected. In this thesis we employed both experimental and theoretical methods to explore whether herd and social immunity may arise in bacterial popu- lations. Using our experimental system consisting of Escherichia coli strains with a CRISPR based immunity to the T7 phage we show that herd immunity arises in phage-bacterial communities and that it is accentuated when the populations are spatially structured. By fitting a mathematical model, we inferred expressions for the herd immunity threshold and the velocity of spread of a phage epidemic in partially resistant bacterial populations, which both depend on the bacterial growth rate, phage burst size and phage latent period. We also investigated the poten- tial for social immunity in Streptococcus thermophilus and its phage 2972 using a bioinformatic analysis of potentially coding short open reading frames with a signalling signature, encoded within the CRISPR associated genes. Subsequently, we tested one identified potentially signalling peptide and found that its addition to a phage-challenged culture increases probability of survival of bacteria two fold, although the results were only marginally significant. Together, these results demonstrate that the ubiquitous arms races between bacteria and phages have further consequences at the level of the population.},
author = {Payne, Pavel},
pages = {83},
publisher = {IST Austria},
title = {{Bacterial herd and social immunity to phages}},
year = {2017},
}
@inproceedings{630,
abstract = {Background: Standards have become available to share semantically encoded vital parameters from medical devices, as required for example by personal healthcare records. Standardised sharing of biosignal data largely remains open. Objectives: The goal of this work is to explore available biosignal file format and data exchange standards and profiles, and to conceptualise end-To-end solutions. Methods: The authors reviewed and discussed available biosignal file format standards with other members of international standards development organisations (SDOs). Results: A raw concept for standards based acquisition, storage, archiving and sharing of biosignals was developed. The GDF format may serve for storing biosignals. Signals can then be shared using FHIR resources and may be stored on FHIR servers or in DICOM archives, with DICOM waveforms as one possible format. Conclusion: Currently a group of international SDOs (e.g. HL7, IHE, DICOM, IEEE) is engaged in intensive discussions. This discussion extends existing work that already was adopted by large implementer communities. The concept presented here only reports the current status of the discussion in Austria. The discussion will continue internationally, with results to be expected over the coming years.},
author = {Sauermann, Stefan and David, Veronika and Schlögl, Alois and Egelkraut, Reinhard and Frohner, Matthias and Pohn, Birgit and Urbauer, Philipp and Mense, Alexander},
isbn = {978-161499758-0},
location = {Vienna, Austria},
pages = {356 -- 362},
publisher = {IOS Press},
title = {{Biosignals standards and FHIR: The way to go}},
doi = {10.3233/978-1-61499-759-7-356},
volume = {236},
year = {2017},
}
@inproceedings{631,
abstract = {Template polyhedra generalize intervals and octagons to polyhedra whose facets are orthogonal to a given set of arbitrary directions. They have been employed in the abstract interpretation of programs and, with particular success, in the reachability analysis of hybrid automata. While previously, the choice of directions has been left to the user or a heuristic, we present a method for the automatic discovery of directions that generalize and eliminate spurious counterexamples. We show that for the class of convex hybrid automata, i.e., hybrid automata with (possibly nonlinear) convex constraints on derivatives, such directions always exist and can be found using convex optimization. We embed our method inside a CEGAR loop, thus enabling the time-unbounded reachability analysis of an important and richer class of hybrid automata than was previously possible. We evaluate our method on several benchmarks, demonstrating also its superior efficiency for the special case of linear hybrid automata.},
author = {Bogomolov, Sergiy and Frehse, Goran and Giacobbe, Mirco and Henzinger, Thomas A},
isbn = {978-366254576-8},
location = {Uppsala, Sweden},
pages = {589 -- 606},
publisher = {Springer},
title = {{Counterexample guided refinement of template polyhedra}},
doi = {10.1007/978-3-662-54577-5_34},
volume = {10205},
year = {2017},
}
@article{632,
abstract = {We consider a 2D quantum system of N bosons in a trapping potential |x|s, interacting via a pair potential of the form N2β−1 w(Nβ x). We show that for all 0 < β < (s + 1)/(s + 2), the leading order behavior of ground states of the many-body system is described in the large N limit by the corresponding cubic nonlinear Schrödinger energy functional. Our result covers the focusing case (w < 0) where even the stability of the many-body system is not obvious. This answers an open question mentioned by X. Chen and J. Holmer for harmonic traps (s = 2). Together with the BBGKY hierarchy approach used by these authors, our result implies the convergence of the many-body quantum dynamics to the focusing NLS equation with harmonic trap for all 0 < β < 3/4. },
author = {Lewin, Mathieu and Nam, Phan and Rougerie, Nicolas},
journal = {Proceedings of the American Mathematical Society},
number = {6},
pages = {2441 -- 2454},
publisher = {American Mathematical Society},
title = {{A note on 2D focusing many boson systems}},
doi = {10.1090/proc/13468},
volume = {145},
year = {2017},
}
@inproceedings{633,
abstract = {A Rapidly-exploring Random Tree (RRT) is an algorithm which can search a non-convex region of space by incrementally building a space-filling tree. The tree is constructed from random points drawn from system’s state space and is biased to grow towards large unexplored areas in the system. RRT can provide better coverage of a system’s possible behaviors compared with random simulations, but is more lightweight than full reachability analysis. In this paper, we explore some of the design decisions encountered while implementing a hybrid extension of the RRT algorithm, which have not been elaborated on before. In particular, we focus on handling non-determinism, which arises due to discrete transitions. We introduce the notion of important points to account for this phenomena. We showcase our ideas using heater and navigation benchmarks.},
author = {Bak, Stanley and Bogomolov, Sergiy and Henzinger, Thomas A and Kumar, Aviral},
editor = {Abate, Alessandro and Bodo, Sylvie},
isbn = {978-331963500-2},
location = {Heidelberg, Germany},
pages = {83 -- 89},
publisher = {Springer},
title = {{Challenges and tool implementation of hybrid rapidly exploring random trees}},
doi = {10.1007/978-3-319-63501-9_6},
volume = {10381},
year = {2017},
}
@inbook{634,
abstract = {As autism spectrum disorder (ASD) is largely regarded as a neurodevelopmental condition, long-time consensus was that its hallmark features are irreversible. However, several studies from recent years using defined mouse models of ASD have provided clear evidence that in mice neurobiological and behavioural alterations can be ameliorated or even reversed by genetic restoration or pharmacological treatment either before or after symptom onset. Here, we review findings on genetic and pharmacological reversibility of phenotypes in mouse models of ASD. Our review should give a comprehensive overview on both aspects and encourage future studies to better understand the underlying molecular mechanisms that might be translatable from animals to humans.},
author = {Schroeder, Jan and Deliu, Elena and Novarino, Gaia and Schmeisser, Michael},
booktitle = {Translational Anatomy and Cell Biology of Autism Spectrum Disorder},
editor = {Schmeisser, Michael and Boekers, Tobias},
pages = {189 -- 211},
publisher = {Springer},
title = {{Genetic and pharmacological reversibility of phenotypes in mouse models of autism spectrum disorder}},
doi = {10.1007/978-3-319-52498-6_10},
volume = {224},
year = {2017},
}
@inproceedings{635,
abstract = {Memory-hard functions (MHFs) are hash algorithms whose evaluation cost is dominated by memory cost. As memory, unlike computation, costs about the same across different platforms, MHFs cannot be evaluated at significantly lower cost on dedicated hardware like ASICs. MHFs have found widespread applications including password hashing, key derivation, and proofs-of-work. This paper focuses on scrypt, a simple candidate MHF designed by Percival, and described in RFC 7914. It has been used within a number of cryptocurrencies (e.g., Litecoin and Dogecoin) and has been an inspiration for Argon2d, one of the winners of the recent password-hashing competition. Despite its popularity, no rigorous lower bounds on its memory complexity are known. We prove that scrypt is optimally memory-hard, i.e., its cumulative memory complexity (cmc) in the parallel random oracle model is Ω(n2w), where w and n are the output length and number of invocations of the underlying hash function, respectively. High cmc is a strong security target for MHFs introduced by Alwen and Serbinenko (STOC’15) which implies high memory cost even for adversaries who can amortize the cost over many evaluations and evaluate the underlying hash functions many times in parallel. Our proof is the first showing optimal memory-hardness for any MHF. Our result improves both quantitatively and qualitatively upon the recent work by Alwen et al. (EUROCRYPT’16) who proved a weaker lower bound of Ω(n2w/ log2 n) for a restricted class of adversaries.},
author = {Alwen, Joel F and Chen, Binchi and Pietrzak, Krzysztof Z and Reyzin, Leonid and Tessaro, Stefano},
editor = {Coron, Jean-Sébastien and Buus Nielsen, Jesper},
isbn = {978-331956616-0},
location = {Paris, France},
pages = {33 -- 62},
publisher = {Springer},
title = {{Scrypt is maximally memory hard}},
doi = {10.1007/978-3-319-56617-7_2},
volume = {10212},
year = {2017},
}
@inproceedings{636,
abstract = {Signal regular expressions can specify sequential properties of real-valued signals based on threshold conditions, regular operations, and duration constraints. In this paper we endow them with a quantitative semantics which indicates how robustly a signal matches or does not match a given expression. First, we show that this semantics is a safe approximation of a distance between the signal and the language defined by the expression. Then, we consider the robust matching problem, that is, computing the quantitative semantics of every segment of a given signal relative to an expression. We present an algorithm that solves this problem for piecewise-constant and piecewise-linear signals and show that for such signals the robustness map is a piecewise-linear function. The availability of an indicator describing how robustly a signal segment matches some regular pattern provides a general framework for quantitative monitoring of cyber-physical systems.},
author = {Bakhirkin, Alexey and Ferrere, Thomas and Maler, Oded and Ulus, Dogan},
editor = {Abate, Alessandro and Geeraerts, Gilles},
isbn = {978-331965764-6},
location = {Berlin, Germany},
pages = {189 -- 206},
publisher = {Springer},
title = {{On the quantitative semantics of regular expressions over real-valued signals}},
doi = {10.1007/978-3-319-65765-3_11},
volume = {10419},
year = {2017},
}
@inproceedings{637,
abstract = {For many cryptographic primitives, it is relatively easy to achieve selective security (where the adversary commits a-priori to some of the choices to be made later in the attack) but appears difficult to achieve the more natural notion of adaptive security (where the adversary can make all choices on the go as the attack progresses). A series of several recent works shows how to cleverly achieve adaptive security in several such scenarios including generalized selective decryption (Panjwani, TCC ’07 and Fuchsbauer et al., CRYPTO ’15), constrained PRFs (Fuchsbauer et al., ASIACRYPT ’14), and Yao garbled circuits (Jafargholi and Wichs, TCC ’16b). Although the above works expressed vague intuition that they share a common technique, the connection was never made precise. In this work we present a new framework that connects all of these works and allows us to present them in a unified and simplified fashion. Moreover, we use the framework to derive a new result for adaptively secure secret sharing over access structures defined via monotone circuits. We envision that further applications will follow in the future. Underlying our framework is the following simple idea. It is well known that selective security, where the adversary commits to n-bits of information about his future choices, automatically implies adaptive security at the cost of amplifying the adversary’s advantage by a factor of up to 2n. However, in some cases the proof of selective security proceeds via a sequence of hybrids, where each pair of adjacent hybrids locally only requires some smaller partial information consisting of m ≪ n bits. The partial information needed might be completely different between different pairs of hybrids, and if we look across all the hybrids we might rely on the entire n-bit commitment. Nevertheless, the above is sufficient to prove adaptive security, at the cost of amplifying the adversary’s advantage by a factor of only 2m ≪ 2n. In all of our examples using the above framework, the different hybrids are captured by some sort of a graph pebbling game and the amount of information that the adversary needs to commit to in each pair of hybrids is bounded by the maximum number of pebbles in play at any point in time. Therefore, coming up with better strategies for proving adaptive security translates to various pebbling strategies for different types of graphs.},
author = {Jafargholi, Zahra and Kamath Hosdurg, Chethan and Klein, Karen and Komargodski, Ilan and Pietrzak, Krzysztof Z and Wichs, Daniel},
editor = {Katz, Jonathan and Shacham, Hovav},
isbn = {978-331963687-0},
location = {Santa Barbara, CA, United States},
pages = {133 -- 163},
publisher = {Springer},
title = {{Be adaptive avoid overcommitting}},
doi = {10.1007/978-3-319-63688-7_5},
volume = {10401},
year = {2017},
}
@inproceedings{640,
abstract = {Data-independent Memory Hard Functions (iMHFS) are finding a growing number of applications in security; especially in the domain of password hashing. An important property of a concrete iMHF is specified by fixing a directed acyclic graph (DAG) Gn on n nodes. The quality of that iMHF is then captured by the following two pebbling complexities of Gn: – The parallel cumulative pebbling complexity Π∥cc(Gn) must be as high as possible (to ensure that the amortized cost of computing the function on dedicated hardware is dominated by the cost of memory). – The sequential space-time pebbling complexity Πst(Gn) should be as close as possible to Π∥cc(Gn) (to ensure that using many cores in parallel and amortizing over many instances does not give much of an advantage). In this paper we construct a family of DAGs with best possible parameters in an asymptotic sense, i.e., where Π∥cc(Gn) = Ω(n2/ log(n)) (which matches a known upper bound) and Πst(Gn) is within a constant factor of Π∥cc(Gn). Our analysis relies on a new connection between the pebbling complexity of a DAG and its depth-robustness (DR) – a well studied combinatorial property. We show that high DR is sufficient for high Π∥cc. Alwen and Blocki (CRYPTO’16) showed that high DR is necessary and so, together, these results fully characterize DAGs with high Π∥cc in terms of DR. Complementing these results, we provide new upper and lower bounds on the Π∥cc of several important candidate iMHFs from the literature. We give the first lower bounds on the memory hardness of the Catena and Balloon Hashing functions in a parallel model of computation and we give the first lower bounds of any kind for (a version) of Argon2i. Finally we describe a new class of pebbling attacks improving on those of Alwen and Blocki (CRYPTO’16). By instantiating these attacks we upperbound the Π∥cc of the Password Hashing Competition winner Argon2i and one of the Balloon Hashing functions by O (n1.71). We also show an upper bound of O(n1.625) for the Catena functions and the two remaining Balloon Hashing functions.},
author = {Alwen, Joel F and Blocki, Jeremiah and Pietrzak, Krzysztof Z},
editor = {Coron, Jean-Sébastien and Buus Nielsen, Jesper},
isbn = {978-331956616-0},
location = {Paris, France},
pages = {3 -- 32},
publisher = {Springer},
title = {{Depth-robust graphs and their cumulative memory complexity}},
doi = {10.1007/978-3-319-56617-7_1},
volume = {10212},
year = {2017},
}
@inproceedings{641,
abstract = {We introduce two novel methods for learning parameters of graphical models for image labelling. The following two tasks underline both methods: (i) perturb model parameters based on given features and ground truth labelings, so as to exactly reproduce these labelings as optima of the local polytope relaxation of the labelling problem; (ii) train a predictor for the perturbed model parameters so that improved model parameters can be applied to the labelling of novel data. Our first method implements task (i) by inverse linear programming and task (ii) using a regressor e.g. a Gaussian process. Our second approach simultaneously solves tasks (i) and (ii) in a joint manner, while being restricted to linearly parameterised predictors. Experiments demonstrate the merits of both approaches.},
author = {Trajkovska, Vera and Swoboda, Paul and Åström, Freddie and Petra, Stefanie},
editor = {Lauze, François and Dong, Yiqiu and Bjorholm Dahl, Anders},
isbn = {978-331958770-7},
location = {Kolding, Denmark},
pages = {323 -- 334},
publisher = {Springer},
title = {{Graphical model parameter learning by inverse linear programming}},
doi = {10.1007/978-3-319-58771-4_26},
volume = {10302},
year = {2017},
}
@article{642,
abstract = {Cauchy problems with SPDEs on the whole space are localized to Cauchy problems on a ball of radius R. This localization reduces various kinds of spatial approximation schemes to finite dimensional problems. The error is shown to be exponentially small. As an application, a numerical scheme is presented which combines the localization and the space and time discretization, and thus is fully implementable.},
author = {Gerencser, Mate and Gyöngy, István},
issn = {00255718},
journal = {Mathematics of Computation},
number = {307},
pages = {2373 -- 2397},
publisher = {American Mathematical Society},
title = {{Localization errors in solving stochastic partial differential equations in the whole space}},
doi = {10.1090/mcom/3201},
volume = {86},
year = {2017},
}
@misc{6426,
abstract = {Synchronous programs are easy to specify because the side effects of an operation are finished by the time the invocation of the operation returns to the caller. Asynchronous programs, on the other hand, are difficult to specify because there are side effects due to pending computation scheduled as a result of the invocation of an operation. They are also difficult to verify because of the large number of possible interleavings of concurrent asynchronous computation threads. We show that specifications and correctness proofs for asynchronous programs can be structured by introducing the fiction, for proof purposes, that intermediate, non-quiescent states of asynchronous operations can be ignored. Then, the task of specification becomes relatively simple and the task of verification can be naturally decomposed into smaller sub-tasks. The sub-tasks iteratively summarize, guided by the structure of an asynchronous program, the atomic effect of non-atomic operations and the synchronous effect of asynchronous operations. This structuring of specifications and proofs corresponds to the introduction of multiple layers of stepwise refinement for asynchronous programs. We present the first proof rule, called synchronization, to reduce asynchronous invocations on a lower layer to synchronous invocations on a higher layer. We implemented our proof method in CIVL and evaluated it on a collection of benchmark programs.},
author = {Henzinger, Thomas A and Kragl, Bernhard and Qadeer, Shaz},
issn = {2664-1690},
pages = {28},
publisher = {IST Austria},
title = {{Synchronizing the asynchronous}},
doi = {10.15479/AT:IST-2018-853-v2-2},
year = {2017},
}
@article{643,
abstract = {It has been reported that nicotinamide-overload induces oxidative stress associated with insulin resistance, the key feature of type 2 diabetes mellitus (T2DM). This study aimed to investigate the effects of B vitamins in T2DM. Glucose tolerance tests (GTT) were carried out in adult Sprague-Dawley rats treated with or without cumulative doses of B vitamins. More specifically, insulin tolerance tests (ITT) were also carried out in adult Sprague-Dawley rats treated with or without cumulative doses of Vitamin B3. We found that cumulative Vitamin B1 and Vitamin B3 administration significantly increased the plasma H2O2 levels associated with high insulin levels. Only Vitamin B3 reduced muscular and hepatic glycogen contents. Cumulative administration of nicotinic acid, another form of Vitamin B3, also significantly increased plasma insulin level and H2O2 generation. Moreover, cumulative administration of nicotinic acid or nicotinamide impaired glucose metabolism. This study suggested that excess Vitamin B1 and Vitamin B3 caused oxidative stress and insulin resistance.},
author = {Sun, Wuping and Zhai, Ming-Zhu and Zhou, Qian and Qian, Chengrui and Jiang, Changyu},
issn = {03044920},
journal = {Chinese Journal of Physiology},
number = {4},
pages = {207 -- 214},
publisher = {Chinese Physiological Society},
title = {{Effects of B vitamins overload on plasma insulin level and hydrogen peroxide generation in rats}},
doi = {10.4077/CJP.2017.BAF469},
volume = {60},
year = {2017},
}
@article{644,
abstract = {An instance of the valued constraint satisfaction problem (VCSP) is given by a finite set of variables, a finite domain of labels, and a sum of functions, each function depending on a subset of the variables. Each function can take finite values specifying costs of assignments of labels to its variables or the infinite value, which indicates an infeasible assignment. The goal is to find an assignment of labels to the variables that minimizes the sum. We study, assuming that P 6= NP, how the complexity of this very general problem depends on the set of functions allowed in the instances, the so-called constraint language. The case when all allowed functions take values in f0;1g corresponds to ordinary CSPs, where one deals only with the feasibility issue, and there is no optimization. This case is the subject of the algebraic CSP dichotomy conjecture predicting for which constraint languages CSPs are tractable (i.e., solvable in polynomial time) and for which they are NP-hard. The case when all allowed functions take only finite values corresponds to a finitevalued CSP, where the feasibility aspect is trivial and one deals only with the optimization issue. The complexity of finite-valued CSPs was fully classified by Thapper and Živný. An algebraic necessary condition for tractability of a general-valued CSP with a fixed constraint language was recently given by Kozik and Ochremiak. As our main result, we prove that if a constraint language satisfies this algebraic necessary condition, and the feasibility CSP (i.e., the problem of deciding whether a given instance has a feasible solution) corresponding to the VCSP with this language is tractable, then the VCSP is tractable. The algorithm is a simple combination of the assumed algorithm for the feasibility CSP and the standard LP relaxation. As a corollary, we obtain that a dichotomy for ordinary CSPs would imply a dichotomy for general-valued CSPs.},
author = {Kolmogorov, Vladimir and Krokhin, Andrei and Rolinek, Michal},
journal = {SIAM Journal on Computing},
number = {3},
pages = {1087 -- 1110},
publisher = {SIAM},
title = {{The complexity of general-valued CSPs}},
doi = {10.1137/16M1091836},
volume = {46},
year = {2017},
}
@inproceedings{645,
abstract = {Markov decision processes (MDPs) are standard models for probabilistic systems with non-deterministic behaviours. Long-run average rewards provide a mathematically elegant formalism for expressing long term performance. Value iteration (VI) is one of the simplest and most efficient algorithmic approaches to MDPs with other properties, such as reachability objectives. Unfortunately, a naive extension of VI does not work for MDPs with long-run average rewards, as there is no known stopping criterion. In this work our contributions are threefold. (1) We refute a conjecture related to stopping criteria for MDPs with long-run average rewards. (2) We present two practical algorithms for MDPs with long-run average rewards based on VI. First, we show that a combination of applying VI locally for each maximal end-component (MEC) and VI for reachability objectives can provide approximation guarantees. Second, extending the above approach with a simulation-guided on-demand variant of VI, we present an anytime algorithm that is able to deal with very large models. (3) Finally, we present experimental results showing that our methods significantly outperform the standard approaches on several benchmarks.},
author = {Ashok, Pranav and Chatterjee, Krishnendu and Daca, Przemyslaw and Kretinsky, Jan and Meggendorfer, Tobias},
editor = {Majumdar, Rupak and Kunčak, Viktor},
isbn = {978-331963386-2},
location = {Heidelberg, Germany},
pages = {201 -- 221},
publisher = {Springer},
title = {{Value iteration for long run average reward in markov decision processes}},
doi = {10.1007/978-3-319-63387-9_10},
volume = {10426},
year = {2017},
}
@inproceedings{646,
abstract = {We present a novel convex relaxation and a corresponding inference algorithm for the non-binary discrete tomography problem, that is, reconstructing discrete-valued images from few linear measurements. In contrast to state of the art approaches that split the problem into a continuous reconstruction problem for the linear measurement constraints and a discrete labeling problem to enforce discrete-valued reconstructions, we propose a joint formulation that addresses both problems simultaneously, resulting in a tighter convex relaxation. For this purpose a constrained graphical model is set up and evaluated using a novel relaxation optimized by dual decomposition. We evaluate our approach experimentally and show superior solutions both mathematically (tighter relaxation) and experimentally in comparison to previously proposed relaxations.},
author = {Kuske, Jan and Swoboda, Paul and Petra, Stefanie},
editor = {Lauze, François and Dong, Yiqiu and Bjorholm Dahl, Anders},
isbn = {978-331958770-7},
location = {Kolding, Denmark},
pages = {235 -- 246},
publisher = {Springer},
title = {{A novel convex relaxation for non binary discrete tomography}},
doi = {10.1007/978-3-319-58771-4_19},
volume = {10302},
year = {2017},
}
@inproceedings{647,
abstract = {Despite researchers’ efforts in the last couple of decades, reachability analysis is still a challenging problem even for linear hybrid systems. Among the existing approaches, the most practical ones are mainly based on bounded-time reachable set over-approximations. For the purpose of unbounded-time analysis, one important strategy is to abstract the original system and find an invariant for the abstraction. In this paper, we propose an approach to constructing a new kind of abstraction called conic abstraction for affine hybrid systems, and to computing reachable sets based on this abstraction. The essential feature of a conic abstraction is that it partitions the state space of a system into a set of convex polyhedral cones which is derived from a uniform conic partition of the derivative space. Such a set of polyhedral cones is able to cut all trajectories of the system into almost straight segments so that every segment of a reach pipe in a polyhedral cone tends to be straight as well, and hence can be over-approximated tightly by polyhedra using similar techniques as HyTech or PHAVer. In particular, for diagonalizable affine systems, our approach can guarantee to find an invariant for unbounded reachable sets, which is beyond the capability of bounded-time reachability analysis tools. We implemented the approach in a tool and experiments on benchmarks show that our approach is more powerful than SpaceEx and PHAVer in dealing with diagonalizable systems.},
author = {Bogomolov, Sergiy and Giacobbe, Mirco and Henzinger, Thomas A and Kong, Hui},
isbn = {978-331965764-6},
location = {Berlin, Germany},
pages = {116 -- 132},
publisher = {Springer},
title = {{Conic abstractions for hybrid systems}},
doi = {10.1007/978-3-319-65765-3_7},
volume = {10419 },
year = {2017},
}
@inproceedings{648,
abstract = {Pseudoentropy has found a lot of important applications to cryptography and complexity theory. In this paper we focus on the foundational problem that has not been investigated so far, namely by how much pseudoentropy (the amount seen by computationally bounded attackers) diﬀers from its information-theoretic counterpart (seen by unbounded observers), given certain limits on attacker’s computational power? We provide the following answer for HILL pseudoentropy, which exhibits a threshold behavior around the size exponential in the entropy amount:– If the attacker size (s) and advantage () satisfy s (formula presented) where k is the claimed amount of pseudoentropy, then the pseudoentropy boils down to the information-theoretic smooth entropy. – If s (formula presented) then pseudoentropy could be arbitrarily bigger than the information-theoretic smooth entropy. Besides answering the posted question, we show an elegant application of our result to the complexity theory, namely that it implies the clas-sical result on the existence of functions hard to approximate (due to Pippenger). In our approach we utilize non-constructive techniques: the duality of linear programming and the probabilistic method.},
author = {Skórski, Maciej},
editor = {Jäger, Gerhard and Steila, Silvia},
isbn = {978-331955910-0},
location = {Bern, Switzerland},
pages = {600 -- 613},
publisher = {Springer},
title = {{On the complexity of breaking pseudoentropy}},
doi = {10.1007/978-3-319-55911-7_43},
volume = {10185},
year = {2017},
}
@inproceedings{650,
abstract = {In this work we present a short and unified proof for the Strong and Weak Regularity Lemma, based on the cryptographic tech-nique called low-complexity approximations. In short, both problems reduce to a task of finding constructively an approximation for a certain target function under a class of distinguishers (test functions), where dis-tinguishers are combinations of simple rectangle-indicators. In our case these approximations can be learned by a simple iterative procedure, which yields a unified and simple proof, achieving for any graph with density d and any approximation parameter the partition size. The novelty in our proof is: (a) a simple approach which yields both strong and weaker variant, and (b) improvements when d = o(1). At an abstract level, our proof can be seen a refinement and simplification of the “analytic” proof given by Lovasz and Szegedy.},
author = {Skórski, Maciej},
editor = {Jäger, Gerhard and Steila, Silvia},
issn = {03029743},
location = {Bern, Switzerland},
pages = {586 -- 599},
publisher = {Springer},
title = {{A cryptographic view of regularity lemmas: Simpler unified proofs and refined bounds}},
doi = {10.1007/978-3-319-55911-7_42},
volume = {10185},
year = {2017},
}
@article{651,
abstract = {Superhydrophobic surfaces reduce the frictional drag between water and solid materials, but this effect is often temporary. The realization of sustained drag reduction has applications for water vehicles and pipeline flows.
},
author = {Hof, Björn},
issn = {00280836},
journal = {Nature},
number = {7636},
pages = {161 -- 162},
publisher = {Nature Publishing Group},
title = {{Fluid dynamics: Water flows out of touch}},
doi = {10.1038/541161a},
volume = {541},
year = {2017},
}
@inproceedings{6517,
abstract = {A (possibly degenerate) drawing of a graph G in the plane is approximable by an embedding if it can be turned into an embedding by an arbitrarily small perturbation. We show that testing, whether a drawing of a planar graph G in the plane is approximable by an embedding, can be carried out in polynomial time, if a desired embedding of G belongs to a fixed isotopy class, i.e., the rotation system (or equivalently the faces) of the embedding of G and the choice of outer face are fixed. In other words, we show that c-planarity with embedded pipes is tractable for graphs with fixed embeddings. To the best of our knowledge an analogous result was previously known essentially only when G is a cycle.},
author = {Fulek, Radoslav},
location = {Phuket, Thailand},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Embedding graphs into embedded graphs}},
doi = {10.4230/LIPICS.ISAAC.2017.34},
volume = {92},
year = {2017},
}
@inproceedings{652,
abstract = {We present an approach that enables robots to self-organize their sensorimotor behavior from scratch without providing specific information about neither the robot nor its environment. This is achieved by a simple neural control law that increases the consistency between external sensor dynamics and internal neural dynamics of the utterly simple controller. In this way, the embodiment and the agent-environment coupling are the only source of individual development. We show how an anthropomorphic tendon driven arm-shoulder system develops different behaviors depending on that coupling. For instance: Given a bottle half-filled with water, the arm starts to shake it, driven by the physical response of the water. When attaching a brush, the arm can be manipulated into wiping a table, and when connected to a revolvable wheel it finds out how to rotate it. Thus, the robot may be said to discover the affordances of the world. When allowing two (simulated) humanoid robots to interact physically, they engage into a joint behavior development leading to, for instance, spontaneous cooperation. More social effects are observed if the robots can visually perceive each other. Although, as an observer, it is tempting to attribute an apparent intentionality, there is nothing of the kind put in. As a conclusion, we argue that emergent behavior may be much less rooted in explicit intentions, internal motivations, or specific reward systems than is commonly believed.},
author = {Der, Ralf and Martius, Georg S},
isbn = {978-150905069-7},
location = {Cergy-Pontoise, France},
publisher = {IEEE},
title = {{Dynamical self consistency leads to behavioral development and emergent social interactions in robots}},
doi = {10.1109/DEVLRN.2016.7846789},
year = {2017},
}
@inproceedings{6526,
abstract = {This paper studies the complexity of estimating Rényi divergences of discrete distributions: p observed from samples and the baseline distribution q known a priori. Extending the results of Acharya et al. (SODA'15) on estimating Rényi entropy, we present improved estimation techniques together with upper and lower bounds on the sample complexity. We show that, contrarily to estimating Rényi entropy where a sublinear (in the alphabet size) number of samples suffices, the sample complexity is heavily dependent on events occurring unlikely in q, and is unbounded in general (no matter what an estimation technique is used). For any divergence of integer order bigger than 1, we provide upper and lower bounds on the number of samples dependent on probabilities of p and q (the lower bounds hold for non-integer orders as well). We conclude that the worst-case sample complexity is polynomial in the alphabet size if and only if the probabilities of q are non-negligible. This gives theoretical insights into heuristics used in the applied literature to handle numerical instability, which occurs for small probabilities of q. Our result shows that they should be handled with care not only because of numerical issues, but also because of a blow up in the sample complexity.},
author = {Skórski, Maciej},
booktitle = {2017 IEEE International Symposium on Information Theory (ISIT)},
isbn = {9781509040964},
location = {Aachen, Germany},
publisher = {IEEE},
title = {{On the complexity of estimating Rènyi divergences}},
doi = {10.1109/isit.2017.8006529},
year = {2017},
}
@inproceedings{6527,
abstract = {A memory-hard function (MHF) ƒn with parameter n can be computed in sequential time and space n. Simultaneously, a high amortized parallel area-time complexity (aAT) is incurred per evaluation. In practice, MHFs are used to limit the rate at which an adversary (using a custom computational device) can evaluate a security sensitive function that still occasionally needs to be evaluated by honest users (using an off-the-shelf general purpose device). The most prevalent examples of such sensitive functions are Key Derivation Functions (KDFs) and password hashing algorithms where rate limits help mitigate off-line dictionary attacks. As the honest users' inputs to these functions are often (low-entropy) passwords special attention is given to a class of side-channel resistant MHFs called iMHFs.
Essentially all iMHFs can be viewed as some mode of operation (making n calls to some round function) given by a directed acyclic graph (DAG) with very low indegree. Recently, a combinatorial property of a DAG has been identified (called "depth-robustness") which results in good provable security for an iMHF based on that DAG. Depth-robust DAGs have also proven useful in other cryptographic applications. Unfortunately, up till now, all known very depth-robust DAGs are impractically complicated and little is known about their exact (i.e. non-asymptotic) depth-robustness both in theory and in practice.
In this work we build and analyze (both formally and empirically) several exceedingly simple and efficient to navigate practical DAGs for use in iMHFs and other applications. For each DAG we:
*Prove that their depth-robustness is asymptotically maximal.
*Prove bounds of at least 3 orders of magnitude better on their exact depth-robustness compared to known bounds for other practical iMHF.
*Implement and empirically evaluate their depth-robustness and aAT against a variety of state-of-the art (and several new) depth-reduction and low aAT attacks.
We find that, against all attacks, the new DAGs perform significantly better in practice than Argon2i, the most widely deployed iMHF in practice.
Along the way we also improve the best known empirical attacks on the aAT of Argon2i by implementing and testing several heuristic versions of a (hitherto purely theoretical) depth-reduction attack. Finally, we demonstrate practicality of our constructions by modifying the Argon2i code base to use one of the new high aAT DAGs. Experimental benchmarks on a standard off-the-shelf CPU show that the new modifications do not adversely affect the impressive throughput of Argon2i (despite seemingly enjoying significantly higher aAT).
},
author = {Alwen, Joel F and Blocki, Jeremiah and Harsha, Ben},
booktitle = {Proceedings of the 2017 ACM SIGSAC Conference on Computer and Communications Security},
isbn = {9781450349468},
location = {Dallas, TX, USA},
pages = {1001--1017},
publisher = {ACM Press},
title = {{Practical graphs for optimal side-channel resistant memory-hard functions}},
doi = {10.1145/3133956.3134031},
year = {2017},
}
@article{654,
abstract = {In November 2016, developmental biologists, synthetic biologists and engineers gathered in Paris for a meeting called ‘Engineering the embryo’. The participants shared an interest in exploring how synthetic systems can reveal new principles of embryonic development, and how the in vitro manipulation and modeling of development using stem cells can be used to integrate ideas and expertise from physics, developmental biology and tissue engineering. As we review here, the conference pinpointed some of the challenges arising at the intersection of these fields, along with great enthusiasm for finding new approaches and collaborations.},
author = {Kicheva, Anna and Rivron, Nicolas},
issn = {09501991},
journal = {Development},
number = {5},
pages = {733 -- 736},
publisher = {Company of Biologists},
title = {{Creating to understand – developmental biology meets engineering in Paris}},
doi = {10.1242/dev.144915},
volume = {144},
year = {2017},
}
@article{655,
abstract = {The bacterial flagellum is a self-assembling nanomachine. The external flagellar filament, several times longer than a bacterial cell body, is made of a few tens of thousands subunits of a single protein: flagellin. A fundamental problem concerns the molecular mechanism of how the flagellum grows outside the cell, where no discernible energy source is available. Here, we monitored the dynamic assembly of individual flagella using in situ labelling and real-time immunostaining of elongating flagellar filaments. We report that the rate of flagellum growth, initially ~1,700 amino acids per second, decreases with length and that the previously proposed chain mechanism does not contribute to the filament elongation dynamics. Inhibition of the proton motive force-dependent export apparatus revealed a major contribution of substrate injection in driving filament elongation. The combination of experimental and mathematical evidence demonstrates that a simple, injection-diffusion mechanism controls bacterial flagella growth outside the cell.},
author = {Renault, Thibaud and Abraham, Anthony and Bergmiller, Tobias and Paradis, Guillaume and Rainville, Simon and Charpentier, Emmanuelle and Guet, Calin C and Tu, Yuhai and Namba, Keiichi and Keener, James and Minamino, Tohru and Erhardt, Marc},
issn = {2050084X},
journal = {eLife},
publisher = {eLife Sciences Publications},
title = {{Bacterial flagella grow through an injection diffusion mechanism}},
doi = {10.7554/eLife.23136},
volume = {6},
year = {2017},
}
@article{656,
abstract = {Human neurons transplanted into a mouse model for Alzheimer’s disease show human-specific vulnerability to β-amyloid plaques and may help to identify new therapeutic targets.},
author = {Novarino, Gaia},
issn = {19466234},
journal = {Science Translational Medicine},
number = {381},
publisher = {American Association for the Advancement of Science},
title = {{Modeling Alzheimer's disease in mice with human neurons}},
doi = {10.1126/scitranslmed.aam9867},
volume = {9},
year = {2017},
}
@article{657,
abstract = {Plant organs are typically organized into three main tissue layers. The middle ground tissue layer comprises the majority of the plant body and serves a wide range of functions, including photosynthesis, selective nutrient uptake and storage, and gravity sensing. Ground tissue patterning and maintenance in Arabidopsis are controlled by a well-established gene network revolving around the key regulator SHORT-ROOT (SHR). In contrast, it is completely unknown how ground tissue identity is first specified from totipotent precursor cells in the embryo. The plant signaling molecule auxin, acting through AUXIN RESPONSE FACTOR (ARF) transcription factors, is critical for embryo patterning. The auxin effector ARF5/MONOPTEROS (MP) acts both cell-autonomously and noncell-autonomously to control embryonic vascular tissue formation and root initiation, respectively. Here we show that auxin response and ARF activity cell-autonomously control the asymmetric division of the first ground tissue cells. By identifying embryonic target genes, we show that MP transcriptionally initiates the ground tissue lineage and acts upstream of the regulatory network that controls ground tissue patterning and maintenance. Strikingly, whereas the SHR network depends on MP, this MP function is, at least in part, SHR independent. Our study therefore identifies auxin response as a regulator of ground tissue specification in the embryonic root, and reveals that ground tissue initiation and maintenance use different regulators and mechanisms. Moreover, our data provide a framework for the simultaneous formation of multiple cell types by the same transcriptional regulator.},
author = {Möller, Barbara and Ten Hove, Colette and Xiang, Daoquan and Williams, Nerys and López, Lorena and Yoshida, Saiko and Smit, Margot and Datla, Raju and Weijers, Dolf},
issn = {00278424},
journal = {PNAS},
number = {12},
pages = {E2533 -- E2539},
publisher = {National Academy of Sciences},
title = {{Auxin response cell autonomously controls ground tissue initiation in the early arabidopsis embryo}},
doi = {10.1073/pnas.1616493114},
volume = {114},
year = {2017},
}
@article{658,
abstract = {With the accelerated development of robot technologies, control becomes one of the central themes of research. In traditional approaches, the controller, by its internal functionality, finds appropriate actions on the basis of specific objectives for the task at hand. While very successful in many applications, self-organized control schemes seem to be favored in large complex systems with unknown dynamics or which are difficult to model. Reasons are the expected scalability, robustness, and resilience of self-organizing systems. The paper presents a self-learning neurocontroller based on extrinsic differential plasticity introduced recently, applying it to an anthropomorphic musculoskeletal robot arm with attached objects of unknown physical dynamics. The central finding of the paper is the following effect: by the mere feedback through the internal dynamics of the object, the robot is learning to relate each of the objects with a very specific sensorimotor pattern. Specifically, an attached pendulum pilots the arm into a circular motion, a half-filled bottle produces axis oriented shaking behavior, a wheel is getting rotated, and wiping patterns emerge automatically in a table-plus-brush setting. By these object-specific dynamical patterns, the robot may be said to recognize the object's identity, or in other words, it discovers dynamical affordances of objects. Furthermore, when including hand coordinates obtained from a camera, a dedicated hand-eye coordination self-organizes spontaneously. These phenomena are discussed from a specific dynamical system perspective. Central is the dedicated working regime at the border to instability with its potentially infinite reservoir of (limit cycle) attractors "waiting" to be excited. Besides converging toward one of these attractors, variate behavior is also arising from a self-induced attractor morphing driven by the learning rule. We claim that experimental investigations with this anthropomorphic, self-learning robot not only generate interesting and potentially useful behaviors, but may also help to better understand what subjective human muscle feelings are, how they can be rooted in sensorimotor patterns, and how these concepts may feed back on robotics.},
author = {Der, Ralf and Martius, Georg S},
issn = {16625218},
journal = {Frontiers in Neurorobotics},
number = {MAR},
publisher = {Frontiers Research Foundation},
title = {{Self organized behavior generation for musculoskeletal robots}},
doi = {10.3389/fnbot.2017.00008},
volume = {11},
year = {2017},
}
@article{659,
abstract = {Migration frequently involves Rac-mediated protrusion of lamellipodia, formed by Arp2/3 complex-dependent branching thought to be crucial for force generation and stability of these networks. The formins FMNL2 and FMNL3 are Cdc42 effectors targeting to the lamellipodium tip and shown here to nucleate and elongate actin filaments with complementary activities in vitro. In migrating B16-F1 melanoma cells, both formins contribute to the velocity of lamellipodium protrusion. Loss of FMNL2/3 function in melanoma cells and fibroblasts reduces lamellipodial width, actin filament density and -bundling, without changing patterns of Arp2/3 complex incorporation. Strikingly, in melanoma cells, FMNL2/3 gene inactivation almost completely abolishes protrusion forces exerted by lamellipodia and modifies their ultrastructural organization. Consistently, CRISPR/Cas-mediated depletion of FMNL2/3 in fibroblasts reduces both migration and capability of cells to move against viscous media. Together, we conclude that force generation in lamellipodia strongly depends on FMNL formin activity, operating in addition to Arp2/3 complex-dependent filament branching.},
author = {Kage, Frieda and Winterhoff, Moritz and Dimchev, Vanessa and Müller, Jan and Thalheim, Tobias and Freise, Anika and Brühmann, Stefan and Kollasser, Jana and Block, Jennifer and Dimchev, Georgi A and Geyer, Matthias and Schnittler, Hams and Brakebusch, Cord and Stradal, Theresia and Carlier, Marie and Sixt, Michael K and Käs, Josef and Faix, Jan and Rottner, Klemens},
issn = {20411723},
journal = {Nature Communications},
publisher = {Nature Publishing Group},
title = {{FMNL formins boost lamellipodial force generation}},
doi = {10.1038/ncomms14832},
volume = {8},
year = {2017},
}
@article{660,
abstract = {Growing microtubules are protected from depolymerization by the presence of a GTP or GDP/Pi cap. End-binding proteins of the EB1 family bind to the stabilizing cap, allowing monitoring of its size in real time. The cap size has been shown to correlate with instantaneous microtubule stability. Here we have quantitatively characterized the properties of cap size fluctuations during steadystate growth and have developed a theory predicting their timescale and amplitude from the kinetics of microtubule growth and cap maturation. In contrast to growth speed fluctuations, cap size fluctuations show a characteristic timescale, which is defined by the lifetime of the cap sites. Growth fluctuations affect the amplitude of cap size fluctuations; however, cap size does not affect growth speed, indicating that microtubules are far from instability during most of their time of growth. Our theory provides the basis for a quantitative understanding of microtubule stability fluctuations during steady-state growth.},
author = {Rickman, Jamie and Düllberg, Christian F and Cade, Nicholas and Griffin, Lewis and Surrey, Thomas},
issn = {00278424},
journal = {PNAS},
number = {13},
pages = {3427 -- 3432},
publisher = {National Academy of Sciences},
title = {{Steady state EB cap size fluctuations are determined by stochastic microtubule growth and maturation}},
doi = {10.1073/pnas.1620274114},
volume = {114},
year = {2017},
}
@article{662,
abstract = {We report a direct-numerical-simulation study of the Taylor-Couette flow in the quasi-Keplerian regime at shear Reynolds numbers up to (105). Quasi-Keplerian rotating flow has been investigated for decades as a simplified model system to study the origin of turbulence in accretion disks that is not fully understood. The flow in this study is axially periodic and thus the experimental end-wall effects on the stability of the flow are avoided. Using optimal linear perturbations as initial conditions, our simulations find no sustained turbulence: the strong initial perturbations distort the velocity profile and trigger turbulence that eventually decays.},
author = {Shi, Liang and Hof, Björn and Rampp, Markus and Avila, Marc},
issn = {10706631},
journal = {Physics of Fluids},
number = {4},
publisher = {American Institute of Physics},
title = {{Hydrodynamic turbulence in quasi Keplerian rotating flows}},
doi = {10.1063/1.4981525},
volume = {29},
year = {2017},
}
@inproceedings{663,
abstract = {In this paper, we propose an approach to automatically compute invariant clusters for nonlinear semialgebraic hybrid systems. An invariant cluster for an ordinary differential equation (ODE) is a multivariate polynomial invariant g(u→, x→) = 0, parametric in u→, which can yield an infinite number of concrete invariants by assigning different values to u→ so that every trajectory of the system can be overapproximated precisely by the intersection of a group of concrete invariants. For semialgebraic systems, which involve ODEs with multivariate polynomial right-hand sides, given a template multivariate polynomial g(u→, x→), an invariant cluster can be obtained by first computing the remainder of the Lie derivative of g(u→, x→) divided by g(u→, x→) and then solving the system of polynomial equations obtained from the coefficients of the remainder. Based on invariant clusters and sum-of-squares (SOS) programming, we present a new method for the safety verification of hybrid systems. Experiments on nonlinear benchmark systems from biology and control theory show that our approach is efficient. },
author = {Kong, Hui and Bogomolov, Sergiy and Schilling, Christian and Jiang, Yu and Henzinger, Thomas A},
booktitle = {Proceedings of the 20th International Conference on Hybrid Systems},
isbn = {978-145034590-3},
location = {Pittsburgh, PA, United States},
pages = {163 -- 172},
publisher = {ACM},
title = {{Safety verification of nonlinear hybrid systems based on invariant clusters}},
doi = {10.1145/3049797.3049814},
year = {2017},
}
@article{665,
abstract = {The molecular mechanisms underlying phenotypic variation in isogenic bacterial populations remain poorly understood.We report that AcrAB-TolC, the main multidrug efflux pump of Escherichia coli, exhibits a strong partitioning bias for old cell poles by a segregation mechanism that is mediated by ternary AcrAB-TolC complex formation. Mother cells inheriting old poles are phenotypically distinct and display increased drug efflux activity relative to daughters. Consequently, we find systematic and long-lived growth differences between mother and daughter cells in the presence of subinhibitory drug concentrations. A simple model for biased partitioning predicts a population structure of long-lived and highly heterogeneous phenotypes. This straightforward mechanism of generating sustained growth rate differences at subinhibitory antibiotic concentrations has implications for understanding the emergence of multidrug resistance in bacteria.},
author = {Bergmiller, Tobias and Andersson, Anna M and Tomasek, Kathrin and Balleza, Enrique and Kiviet, Daniel and Hauschild, Robert and Tkacik, Gasper and Guet, Calin C},
issn = {00368075},
journal = {Science},
number = {6335},
pages = {311 -- 315},
publisher = {American Association for the Advancement of Science},
title = {{Biased partitioning of the multidrug efflux pump AcrAB TolC underlies long lived phenotypic heterogeneity}},
doi = {10.1126/science.aaf4762},
volume = {356},
year = {2017},
}
@article{666,
abstract = {Antibiotics elicit drastic changes in microbial gene expression, including the induction of stress response genes. While certain stress responses are known to “cross-protect” bacteria from other stressors, it is unclear whether cellular responses to antibiotics have a similar protective role. By measuring the genome-wide transcriptional response dynamics of Escherichia coli to four antibiotics, we found that trimethoprim induces a rapid acid stress response that protects bacteria from subsequent exposure to acid. Combining microfluidics with time-lapse imaging to monitor survival and acid stress response in single cells revealed that the noisy expression of the acid resistance operon gadBC correlates with single-cell survival. Cells with higher gadBC expression following trimethoprim maintain higher intracellular pH and survive the acid stress longer. The seemingly random single-cell survival under acid stress can therefore be predicted from gadBC expression and rationalized in terms of GadB/C molecular function. Overall, we provide a roadmap for identifying the molecular mechanisms of single-cell cross-protection between antibiotics and other stressors.},
author = {Mitosch, Karin and Rieckh, Georg and Bollenbach, Tobias},
issn = {24054712},
journal = {Cell Systems},
number = {4},
pages = {393 -- 403},
publisher = {Cell Press},
title = {{Noisy response to antibiotic stress predicts subsequent single cell survival in an acidic environment}},
doi = {10.1016/j.cels.2017.03.001},
volume = {4},
year = {2017},
}
@article{667,
abstract = {Perinatal exposure to penicillin may result in longlasting gut and behavioral changes.},
author = {Novarino, Gaia},
issn = {19466234},
journal = {Science Translational Medicine},
number = {387},
publisher = {American Association for the Advancement of Science},
title = {{The antisocial side of antibiotics}},
doi = {10.1126/scitranslmed.aan2786},
volume = {9},
year = {2017},
}
@inproceedings{6679,
abstract = {Polar codes represent one of the major recent breakthroughs in coding theory and, because of their attractive features, they have been selected for the incoming 5G standard. As such, a lot of attention has been devoted to the development of decoding algorithms with good error performance and efficient hardware implementation. One of the leading candidates in this regard is represented by successive-cancellation list (SCL) decoding. However, its hardware implementation requires a large amount of memory. Recently, a partitioned SCL (PSCL) decoder has been proposed to significantly reduce the memory consumption [1]. In this paper, we examine the paradigm of PSCL decoding from both theoretical and practical standpoints: (i) by changing the construction of the code, we are able to improve the performance at no additional computational, latency or memory cost, (ii) we present an optimal scheme to allocate cyclic redundancy checks (CRCs), and (iii) we provide an upper bound on the list size that allows MAP performance.},
author = {Hashemi, Seyyed Ali and Mondelli, Marco and Hassani, Hamed and Urbanke, Ruediger and Gross, Warren},
booktitle = {2017 IEEE Global Communications Conference},
location = {Singapore, Singapore},
pages = {1--7},
publisher = {IEEE},
title = {{Partitioned list decoding of polar codes: Analysis and improvement of finite length performance}},
doi = {10.1109/glocom.2017.8254940},
year = {2017},
}
@article{668,
abstract = {Macrophage filopodia, finger-like membrane protrusions, were first implicated in phagocytosis more than 100 years ago, but little is still known about the involvement of these actin-dependent structures in particle clearance. Using spinning disk confocal microscopy to image filopodial dynamics in mouse resident Lifeact-EGFP macrophages, we show that filopodia, or filopodia-like structures, support pathogen clearance by multiple means. Filopodia supported the phagocytic uptake of bacterial (Escherichia coli) particles by (i) capturing along the filopodial shaft and surfing toward the cell body, the most common mode of capture; (ii) capturing via the tip followed by retraction; (iii) combinations of surfing and retraction; or (iv) sweeping actions. In addition, filopodia supported the uptake of zymosan (Saccharomyces cerevisiae) particles by (i) providing fixation, (ii) capturing at the tip and filopodia-guided actin anterograde flow with phagocytic cup formation, and (iii) the rapid growth of new protrusions. To explore the role of filopodia-inducing Cdc42, we generated myeloid-restricted Cdc42 knock-out mice. Cdc42-deficient macrophages exhibited rapid phagocytic cup kinetics, but reduced particle clearance, which could be explained by the marked rounded-up morphology of these cells. Macrophages lacking Myo10, thought to act downstream of Cdc42, had normal morphology, motility, and phagocytic cup formation, but displayed markedly reduced filopodia formation. In conclusion, live-cell imaging revealed multiple mechanisms involving macrophage filopodia in particle capture and engulfment. Cdc42 is not critical for filopodia or phagocytic cup formation, but plays a key role in driving macrophage lamellipodial spreading.},
author = {Horsthemke, Markus and Bachg, Anne and Groll, Katharina and Moyzio, Sven and Müther, Barbara and Hemkemeyer, Sandra and Wedlich Söldner, Roland and Sixt, Michael K and Tacke, Sebastian and Bähler, Martin and Hanley, Peter},
issn = {00219258},
journal = {Journal of Biological Chemistry},
number = {17},
pages = {7258 -- 7273},
publisher = {American Society for Biochemistry and Molecular Biology},
title = {{Multiple roles of filopodial dynamics in particle capture and phagocytosis and phenotypes of Cdc42 and Myo10 deletion}},
doi = {10.1074/jbc.M116.766923},
volume = {292},
year = {2017},
}
@article{669,
abstract = {The exocyst, a eukaryotic tethering complex, coregulates targeted exocytosis as an effector of small GTPases in polarized cell growth. In land plants, several exocyst subunits are encoded by double or triple paralogs, culminating in tens of EXO70 paralogs. Out of 23 Arabidopsis thaliana EXO70 isoforms, we analyzed seven isoforms expressed in pollen. Genetic and microscopic analyses of single mutants in EXO70A2, EXO70C1, EXO70C2, EXO70F1, EXO70H3, EXO70H5, and EXO70H6 genes revealed that only a loss-of-function EXO70C2 allele resulted in a significant male-specific transmission defect (segregation 40%:51%:9%) due to aberrant pollen tube growth. Mutant pollen tubes grown in vitro exhibited an enhanced growth rate and a decreased thickness of the tip cell wall, causing tip bursts. However, exo70C2 pollen tubes could frequently recover and restart their speedy elongation, resulting in a repetitive stop-and-go growth dynamics. A pollenspecific depletion of the closest paralog, EXO70C1, using artificial microRNA in the exo70C2 mutant background, resulted in a complete pollen-specific transmission defect, suggesting redundant functions of EXO70C1 and EXO70C2. Both EXO70C1 and EXO70C2, GFP tagged and expressed under the control of their native promoters, localized in the cytoplasm of pollen grains, pollen tubes, and also root trichoblast cells. The expression of EXO70C2-GFP complemented the aberrant growth of exo70C2 pollen tubes. The absent EXO70C2 interactions with core exocyst subunits in the yeast two-hybrid assay, cytoplasmic localization, and genetic effect suggest an unconventional EXO70 function possibly as a regulator of exocytosis outside the exocyst complex. In conclusion, EXO70C2 is a novel factor contributing to the regulation of optimal tip growth of Arabidopsis pollen tubes. },
author = {Synek, Lukáš and Vukašinović, Nemanja and Kulich, Ivan and Hála, Michal and Aldorfová, Klára and Fendrych, Matyas and Žárský, Viktor},
issn = {00320889},
journal = {Plant Physiology},
number = {1},
pages = {223 -- 240},
publisher = {American Society of Plant Biologists},
title = {{EXO70C2 is a key regulatory factor for optimal tip growth of pollen}},
doi = {10.1104/pp.16.01282},
volume = {174},
year = {2017},
}
@article{670,
abstract = {We propose an efficient method to model paper tearing in the context of interactive modeling. The method uses geometrical information to automatically detect potential starting points of tears. We further introduce a new hybrid geometrical and physical-based method to compute the trajectory of tears while procedurally synthesizing high resolution details of the tearing path using a texture based approach. The results obtained are compared with real paper and with previous studies on the expected geometric paths of paper that tears.},
author = {Schreck, Camille and Rohmer, Damien and Hahmann, Stefanie},
issn = {01677055},
journal = {Computer Graphics Forum},
number = {2},
pages = {95 -- 106},
publisher = {Wiley},
title = {{Interactive paper tearing}},
doi = {10.1111/cgf.13110},
volume = {36},
year = {2017},
}
@article{671,
abstract = {Humans routinely use conditionally cooperative strategies when interacting in repeated social dilemmas. They are more likely to cooperate if others cooperated before, and are ready to retaliate if others defected. To capture the emergence of reciprocity, most previous models consider subjects who can only choose from a restricted set of representative strategies, or who react to the outcome of the very last round only. As players memorize more rounds, the dimension of the strategy space increases exponentially. This increasing computational complexity renders simulations for individuals with higher cognitive abilities infeasible, especially if multiplayer interactions are taken into account. Here, we take an axiomatic approach instead. We propose several properties that a robust cooperative strategy for a repeated multiplayer dilemma should have. These properties naturally lead to a unique class of cooperative strategies, which contains the classical Win-Stay Lose-Shift rule as a special case. A comprehensive numerical analysis for the prisoner's dilemma and for the public goods game suggests that strategies of this class readily evolve across various memory-n spaces. Our results reveal that successful strategies depend not only on how cooperative others were in the past but also on the respective context of cooperation.},
author = {Hilbe, Christian and Martinez, Vaquero and Chatterjee, Krishnendu and Nowak, Martin},
issn = {00278424},
journal = {PNAS},
number = {18},
pages = {4715 -- 4720},
publisher = {National Academy of Sciences},
title = {{Memory-n strategies of direct reciprocity}},
doi = {10.1073/pnas.1621239114},
volume = {114},
year = {2017},
}
@article{672,
abstract = {Trafficking cells frequently transmigrate through epithelial and endothelial monolayers. How monolayers cooperate with the penetrating cells to support their transit is poorly understood. We studied dendritic cell (DC) entry into lymphatic capillaries as a model system for transendothelial migration. We find that the chemokine CCL21, which is the decisive guidance cue for intravasation, mainly localizes in the trans-Golgi network and intracellular vesicles of lymphatic endothelial cells. Upon DC transmigration, these Golgi deposits disperse and CCL21 becomes extracellularly enriched at the sites of endothelial cell-cell junctions. When we reconstitute the transmigration process in vitro, we find that secretion of CCL21-positive vesicles is triggered by a DC contact-induced calcium signal, and selective calcium chelation in lymphatic endothelium attenuates transmigration. Altogether, our data demonstrate a chemokine-mediated feedback between DCs and lymphatic endothelium, which facilitates transendothelial migration.},
author = {Vaahtomeri, Kari and Brown, Markus and Hauschild, Robert and De Vries, Ingrid and Leithner, Alexander F and Mehling, Matthias and Kaufmann, Walter and Sixt, Michael K},
issn = {22111247},
journal = {Cell Reports},
number = {5},
pages = {902 -- 909},
publisher = {Cell Press},
title = {{Locally triggered release of the chemokine CCL21 promotes dendritic cell transmigration across lymphatic endothelia}},
doi = {10.1016/j.celrep.2017.04.027},
volume = {19},
year = {2017},
}
@inproceedings{6729,
abstract = {Consider the problem of constructing a polar code of block length N for the transmission over a given channel W. Typically this requires to compute the reliability of all the N synthetic channels and then to include those that are sufficiently reliable. However, we know from [1], [2] that there is a partial order among the synthetic channels. Hence, it is natural to ask whether we can exploit it to reduce the computational burden of the construction problem. We show that, if we take advantage of the partial order [1], [2], we can construct a polar code by computing the reliability of roughly N/ log 3/2 N synthetic channels. Such a set of synthetic channels is universal, in the sense that it allows one to construct polar codes for any W, and it can be identified by solving a maximum matching problem on a bipartite graph. Our proof technique consists in reducing the construction problem to the problem of computing the maximum cardinality of an antichain for a suitable partially ordered set. As such, this method is general and it can be used to further improve the complexity of the construction problem in case a new partial order on the synthetic channels of polar codes is discovered.},
author = {Mondelli, Marco and Hassani, S. Hamed and Urbanke, Rudiger},
booktitle = {2017 IEEE International Symposium on Information Theory },
isbn = {9781509040964},
issn = {2157-8117},
location = {Aachen, Germany},
pages = {1853--1857},
publisher = {IEEE},
title = {{Construction of polar codes with sublinear complexity}},
doi = {10.1109/isit.2017.8006850},
year = {2017},
}
@article{673,
abstract = {We present a numerical study of wavy supercritical cylindrical Couette flow between counter-rotating cylinders in which the wavy pattern propagates either prograde with the inner cylinder or retrograde opposite the rotation of the inner cylinder. The wave propagation reversals from prograde to retrograde and vice versa occur at distinct values of the inner cylinder Reynolds number when the associated frequency of the wavy instability vanishes. The reversal occurs for both twofold and threefold symmetric wavy vortices. Moreover, the wave propagation reversal only occurs for sufficiently strong counter-rotation. The flow pattern reversal appears to be intrinsic in the system as either periodic boundary conditions or fixed end wall boundary conditions for different system sizes always result in the wave propagation reversal. We present a detailed bifurcation sequence and parameter space diagram with respect to retrograde behavior of wavy flows. The retrograde propagation of the instability occurs when the inner Reynolds number is about two times the outer Reynolds number. The mechanism for the retrograde propagation is associated with the inviscidly unstable region near the inner cylinder and the direction of the global average azimuthal velocity. Flow dynamics, spatio-temporal behavior, global mean angular velocity, and torque of the flow with the wavy pattern are explored.},
author = {Altmeyer, Sebastian and Lueptow, Richard},
issn = {24700045},
journal = {Physical Review E - Statistical, Nonlinear, and Soft Matter Physics},
number = {5},
publisher = {American Physiological Society},
title = {{Wave propagation reversal for wavy vortices in wide gap counter rotating cylindrical Couette flow}},
doi = {10.1103/PhysRevE.95.053103},
volume = {95},
year = {2017},
}
@article{6730,
abstract = {We introduce a new approach to proving that a sequence of deterministic linear codes achieves capacity on an erasure channel under maximum a posteriori decoding. Rather than relying on the precise structure of the codes, our method exploits code symmetry. In particular, the technique applies to any sequence of linear codes where the blocklengths are strictly increasing, the code rates converge, and the permutation group of each code is doubly transitive. In other words, we show that symmetry alone implies near-optimal performance. An important consequence of this result is that a sequence of Reed-Muller codes with increasing block length and converging rate achieves capacity. This possibility has been suggested previously in the literature but it has only been proven for cases where the limiting code rate is 0 or 1. Moreover, these results extend naturally to all affine-invariant codes and, thus, to extended primitive narrow-sense BCH codes. This also resolves, in the affirmative, the existence question for capacity-achieving sequences of binary cyclic codes. The primary tools used in the proof are the sharp threshold property for symmetric monotone Boolean functions and the area theorem for extrinsic information transfer functions.},
author = {Kudekar, Shrinivas and Kumar, Santhosh and Mondelli, Marco and Pfister, Henry D. and Sasoglu, Eren and Urbanke, Ridiger L.},
issn = {1557-9654},
journal = {IEEE Transactions on Information Theory},
number = {7},
pages = {4298--4316},
publisher = {IEEE},
title = {{Reed–Muller codes achieve capacity on erasure channels}},
doi = {10.1109/tit.2017.2673829},
volume = {63},
year = {2017},
}
@inproceedings{6731,
abstract = {We present a rate-compatible polar coding scheme that achieves the capacity of any family of channels. Our solution generalizes the previous results [1], [2] that provide capacity-achieving rate-compatible polar codes for a degraded family of channels. The motivation for our extension comes from the fact that in many practical scenarios, e.g., MIMO systems and non-Gaussian interference, the channels cannot be ordered by degradation. The main technical contribution of this paper consists in removing the degradation condition. To do so, we exploit the ideas coming from the construction of universal polar codes. Our scheme possesses the usual attractive features of polar codes: low complexity code construction, encoding, and decoding; super-polynomial scaling of the error probability with the block length; and absence of error floors. On the negative side, the scaling of the gap to capacity with the block length is slower than in standard polar codes, and we prove an upper bound on the scaling exponent.},
author = {Mondelli, Marco and Hassani, Hamed and Maric, Ivana and Hui, Dennis and Hong, Song-Nam},
booktitle = {2017 IEEE Wireless Communications and Networking Conference Workshops },
isbn = {9781509059089},
location = {San Francisco, CA, USA},
publisher = {IEEE},
title = {{Capacity-achieving rate-compatible polar codes for general channels}},
doi = {10.1109/wcncw.2017.7919107},
year = {2017},
}
@article{674,
abstract = {Navigation of cells along gradients of guidance cues is a determining step in many developmental and immunological processes. Gradients can either be soluble or immobilized to tissues as demonstrated for the haptotactic migration of dendritic cells (DCs) toward higher concentrations of immobilized chemokine CCL21. To elucidate how gradient characteristics govern cellular response patterns, we here introduce an in vitro system allowing to track migratory responses of DCs to precisely controlled immobilized gradients of CCL21. We find that haptotactic sensing depends on the absolute CCL21 concentration and local steepness of the gradient, consistent with a scenario where DC directionality is governed by the signal-to-noise ratio of CCL21 binding to the receptor CCR7. We find that the conditions for optimal DC guidance are perfectly provided by the CCL21 gradients we measure in vivo. Furthermore, we find that CCR7 signal termination by the G-protein-coupled receptor kinase 6 (GRK6) is crucial for haptotactic but dispensable for chemotactic CCL21 gradient sensing in vitro and confirm those observations in vivo. These findings suggest that stable, tissue-bound CCL21 gradients as sustainable “roads” ensure optimal guidance in vivo.},
author = {Schwarz, Jan and Bierbaum, Veronika and Vaahtomeri, Kari and Hauschild, Robert and Brown, Markus and De Vries, Ingrid and Leithner, Alexander F and Reversat, Anne and Merrin, Jack and Tarrant, Teresa and Bollenbach, Tobias and Sixt, Michael K},
issn = {09609822},
journal = {Current Biology},
number = {9},
pages = {1314 -- 1325},
publisher = {Cell Press},
title = {{Dendritic cells interpret haptotactic chemokine gradients in a manner governed by signal to noise ratio and dependent on GRK6}},
doi = {10.1016/j.cub.2017.04.004},
volume = {27},
year = {2017},
}
@article{675,
abstract = {We report the enhancement of infrared absorption of chemisorbed carbon monoxide on platinum in the gap of plasmonic nanoantennas. Our method is based on the self-assembled formation of platinum nanoislands on nanoscopic dipole antenna arrays manufactured via electron beam lithography. We employ systematic variations of the plasmonic antenna resonance to precisely couple to the molecular stretch vibration of carbon monoxide adsorbed on the platinum nanoislands. Ultimately, we reach more than 1500-fold infrared absorption enhancements, allowing for an ultrasensitive detection of a monolayer of chemisorbed carbon monoxide. The developed procedure can be adapted to other metal adsorbents and molecular species and could be utilized for coverage sensing in surface catalytic reactions. },
author = {Haase, Johannes and Bagiante, Salvatore and Sigg, Hans and Van Bokhoven, Jeroen},
journal = {Optics Letters},
number = {10},
pages = {1931 -- 1934},
publisher = {OSA},
title = {{Surface enhanced infrared absorption of chemisorbed carbon monoxide using plasmonic nanoantennas}},
doi = {10.1364/OL.42.001931},
volume = {42},
year = {2017},
}
@article{677,
abstract = {The INO80 complex (INO80-C) is an evolutionarily conserved nucleosome remodeler that acts in transcription, replication, and genome stability. It is required for resistance against genotoxic agents and is involved in the repair of DNA double-strand breaks (DSBs) by homologous recombination (HR). However, the causes of the HR defect in INO80-C mutant cells are controversial. Here, we unite previous findings using a system to study HR with high spatial resolution in budding yeast. We find that INO80-C has at least two distinct functions during HR—DNA end resection and presynaptic filament formation. Importantly, the second function is linked to the histone variant H2A.Z. In the absence of H2A.Z, presynaptic filament formation and HR are restored in INO80-C-deficient mutants, suggesting that presynaptic filament formation is the crucial INO80-C function during HR.},
author = {Lademann, Claudio and Renkawitz, Jörg and Pfander, Boris and Jentsch, Stefan},
issn = {22111247},
journal = {Cell Reports},
number = {7},
pages = {1294 -- 1303},
publisher = {Cell Press},
title = {{The INO80 complex removes H2A.Z to promote presynaptic filament formation during homologous recombination}},
doi = {10.1016/j.celrep.2017.04.051},
volume = {19},
year = {2017},
}
@article{678,
abstract = {The seminal observation that mechanical signals can elicit changes in biochemical signalling within cells, a process commonly termed mechanosensation and mechanotransduction, has revolutionized our understanding of the role of cell mechanics in various fundamental biological processes, such as cell motility, adhesion, proliferation and differentiation. In this Review, we will discuss how the interplay and feedback between mechanical and biochemical signals control tissue morphogenesis and cell fate specification in embryonic development.},
author = {Petridou, Nicoletta and Spiro, Zoltan P and Heisenberg, Carl-Philipp J},
issn = {14657392},
journal = {Nature Cell Biology},
number = {6},
pages = {581 -- 588},
publisher = {Nature Publishing Group},
title = {{Multiscale force sensing in development}},
doi = {10.1038/ncb3524},
volume = {19},
year = {2017},
}
@article{680,
abstract = {In order to respond reliably to specific features of their environment, sensory neurons need to integrate multiple incoming noisy signals. Crucially, they also need to compete for the interpretation of those signals with other neurons representing similar features. The form that this competition should take depends critically on the noise corrupting these signals. In this study we show that for the type of noise commonly observed in sensory systems, whose variance scales with the mean signal, sensory neurons should selectively divide their input signals by their predictions, suppressing ambiguous cues while amplifying others. Any change in the stimulus context alters which inputs are suppressed, leading to a deep dynamic reshaping of neural receptive fields going far beyond simple surround suppression. Paradoxically, these highly variable receptive fields go alongside and are in fact required for an invariant representation of external sensory features. In addition to offering a normative account of context-dependent changes in sensory responses, perceptual inference in the presence of signal-dependent noise accounts for ubiquitous features of sensory neurons such as divisive normalization, gain control and contrast dependent temporal dynamics.},
author = {Chalk, Matthew J and Masset, Paul and Gutkin, Boris and Denève, Sophie},
issn = {1553734X},
journal = {PLoS Computational Biology},
number = {6},
publisher = {Public Library of Science},
title = {{Sensory noise predicts divisive reshaping of receptive fields}},
doi = {10.1371/journal.pcbi.1005582},
volume = {13},
year = {2017},
}
@inproceedings{683,
abstract = {Given a triangulation of a point set in the plane, a flip deletes an edge e whose removal leaves a convex quadrilateral, and replaces e by the opposite diagonal of the quadrilateral. It is well known that any triangulation of a point set can be reconfigured to any other triangulation by some sequence of flips. We explore this question in the setting where each edge of a triangulation has a label, and a flip transfers the label of the removed edge to the new edge. It is not true that every labelled triangulation of a point set can be reconfigured to every other labelled triangulation via a sequence of flips, but we characterize when this is possible. There is an obvious necessary condition: for each label l, if edge e has label l in the first triangulation and edge f has label l in the second triangulation, then there must be some sequence of flips that moves label l from e to f, ignoring all other labels. Bose, Lubiw, Pathak and Verdonschot formulated the Orbit Conjecture, which states that this necessary condition is also sufficient, i.e. that all labels can be simultaneously mapped to their destination if and only if each label individually can be mapped to its destination. We prove this conjecture. Furthermore, we give a polynomial-time algorithm to find a sequence of flips to reconfigure one labelled triangulation to another, if such a sequence exists, and we prove an upper bound of O(n7) on the length of the flip sequence. Our proof uses the topological result that the sets of pairwise non-crossing edges on a planar point set form a simplicial complex that is homeomorphic to a high-dimensional ball (this follows from a result of Orden and Santos; we give a different proof based on a shelling argument). The dual cell complex of this simplicial ball, called the flip complex, has the usual flip graph as its 1-skeleton. We use properties of the 2-skeleton of the flip complex to prove the Orbit Conjecture.},
author = {Lubiw, Anna and Masárová, Zuzana and Wagner, Uli},
location = {Brisbane, Australia},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{A proof of the orbit conjecture for flipping edge labelled triangulations}},
doi = {10.4230/LIPIcs.SoCG.2017.49},
volume = {77},
year = {2017},
}
@article{684,
abstract = {We generalize winning conditions in two-player games by adding a structural acceptance condition called obligations. Obligations are orthogonal to the linear winning conditions that define whether a play is winning. Obligations are a declaration that player 0 can achieve a certain value from a configuration. If the obligation is met, the value of that configuration for player 0 is 1. We define the value in such games and show that obligation games are determined. For Markov chains with Borel objectives and obligations, and finite turn-based stochastic parity games with obligations we give an alternative and simpler characterization of the value function. Based on this simpler definition we show that the decision problem of winning finite turn-based stochastic parity games with obligations is in NP∩co-NP. We also show that obligation games provide a game framework for reasoning about p-automata. © 2017 The Association for Symbolic Logic.},
author = {Chatterjee, Krishnendu and Piterman, Nir},
issn = {1943-5886},
journal = {Journal of Symbolic Logic},
number = {2},
pages = {420 -- 452},
publisher = {Cambridge University Press},
title = {{Obligation blackwell games and p-automata}},
doi = {10.1017/jsl.2016.71},
volume = {82},
year = {2017},
}
@inproceedings{6841,
abstract = {In classical machine learning, regression is treated as a black box process of identifying a suitable function from a hypothesis set without attempting to gain insight into the mechanism connecting inputs and outputs. In the natural sciences, however, finding an interpretable function for a phenomenon is the prime goal as it allows to understand and generalize results. This paper proposes a novel type of function learning network, called equation learner (EQL), that can learn analytical expressions and is able to extrapolate to unseen domains. It is implemented as an end-to-end differentiable feed-forward network and allows for efficient gradient based training. Due to sparsity regularization concise interpretable expressions can be obtained. Often the true underlying source expression is identified.},
author = {Martius, Georg S and Lampert, Christoph},
booktitle = {5th International Conference on Learning Representations, ICLR 2017 - Workshop Track Proceedings},
location = {Toulon, France},
publisher = {International Conference on Learning Representations},
title = {{Extrapolation and learning equations}},
year = {2017},
}
@article{685,
abstract = {By applying methods and principles from the physical sciences to biological problems, D'Arcy Thompson's On Growth and Form demonstrated how mathematical reasoning reveals elegant, simple explanations for seemingly complex processes. This has had a profound influence on subsequent generations of developmental biologists. We discuss how this influence can be traced through twentieth century morphologists, embryologists and theoreticians to current research that explores the molecular and cellular mechanisms of tissue growth and patterning, including our own studies of the vertebrate neural tube.},
author = {Briscoe, James and Kicheva, Anna},
issn = {09254773},
journal = {Mechanisms of Development},
pages = {26 -- 31},
publisher = {Elsevier},
title = {{The physics of development 100 years after D'Arcy Thompson's “on growth and form”}},
doi = {10.1016/j.mod.2017.03.005},
volume = {145},
year = {2017},
}
@article{686,
abstract = {Tissues are thought to behave like fluids with a given surface tension. Differences in tissue surface tension (TST) have been proposed to trigger cell sorting and tissue envelopment. D'Arcy Thompson in his seminal book ‘On Growth and Form’ has introduced this concept of differential TST as a key physical mechanism dictating tissue formation and organization within the developing organism. Over the past century, many studies have picked up the concept of differential TST and analyzed the role and cell biological basis of TST in development, underlining the importance and influence of this concept in developmental biology.},
author = {Heisenberg, Carl-Philipp J},
issn = {09254773},
journal = {Mechanisms of Development},
pages = {32 -- 37},
publisher = {Elsevier},
title = {{D'Arcy Thompson's ‘on growth and form’: From soap bubbles to tissue self organization}},
doi = {10.1016/j.mod.2017.03.006},
volume = {145},
year = {2017},
}
@article{687,
abstract = {Pursuing the similarity between the Kontsevich-Soibelman construction of the cohomological Hall algebra (CoHA) of BPS states and Lusztig's construction of canonical bases for quantum enveloping algebras, and the similarity between the integrality conjecture for motivic Donaldson-Thomas invariants and the PBW theorem for quantum enveloping algebras, we build a coproduct on the CoHA associated to a quiver with potential. We also prove a cohomological dimensional reduction theorem, further linking a special class of CoHAs with Yangians, and explaining how to connect the study of character varieties with the study of CoHAs.},
author = {Davison, Ben},
issn = {00335606},
journal = {Quarterly Journal of Mathematics},
number = {2},
pages = {635 -- 703},
publisher = {Oxford University Press},
title = {{The critical CoHA of a quiver with potential}},
doi = {10.1093/qmath/haw053},
volume = {68},
year = {2017},
}
@inproceedings{688,
abstract = {We show that the framework of topological data analysis can be extended from metrics to general Bregman divergences, widening the scope of possible applications. Examples are the Kullback - Leibler divergence, which is commonly used for comparing text and images, and the Itakura - Saito divergence, popular for speech and sound. In particular, we prove that appropriately generalized čech and Delaunay (alpha) complexes capture the correct homotopy type, namely that of the corresponding union of Bregman balls. Consequently, their filtrations give the correct persistence diagram, namely the one generated by the uniformly growing Bregman balls. Moreover, we show that unlike the metric setting, the filtration of Vietoris-Rips complexes may fail to approximate the persistence diagram. We propose algorithms to compute the thus generalized čech, Vietoris-Rips and Delaunay complexes and experimentally test their efficiency. Lastly, we explain their surprisingly good performance by making a connection with discrete Morse theory. },
author = {Edelsbrunner, Herbert and Wagner, Hubert},
issn = {18688969},
location = {Brisbane, Australia},
pages = {391--3916},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Topological data analysis with Bregman divergences}},
doi = {10.4230/LIPIcs.SoCG.2017.39},
volume = {77},
year = {2017},
}
@article{689,
abstract = {Rett syndrome modeling in monkey mirrors the human disorder.},
author = {Novarino, Gaia},
issn = {19466234},
journal = {Science Translational Medicine},
number = {393},
publisher = {American Association for the Advancement of Science},
title = {{Rett syndrome modeling goes simian}},
doi = {10.1126/scitranslmed.aan8196},
volume = {9},
year = {2017},
}
@article{693,
abstract = {Many central synapses contain a single presynaptic active zone and a single postsynaptic density. Vesicular release statistics at such “simple synapses” indicate that they contain a small complement of docking sites where vesicles repetitively dock and fuse. In this work, we investigate functional and morphological aspects of docking sites at simple synapses made between cerebellar parallel fibers and molecular layer interneurons. Using immunogold labeling of SDS-treated freeze-fracture replicas, we find that Cav2.1 channels form several clusters per active zone with about nine channels per cluster. The mean value and range of intersynaptic variation are similar for Cav2.1 cluster numbers and for functional estimates of docking-site numbers obtained from the maximum numbers of released vesicles per action potential. Both numbers grow in relation with synaptic size and decrease by a similar extent with age between 2 wk and 4 wk postnatal. Thus, the mean docking-site numbers were 3.15 at 2 wk (range: 1–10) and 2.03 at 4 wk (range: 1–4), whereas the mean numbers of Cav2.1 clusters were 2.84 at 2 wk (range: 1–8) and 2.37 at 4 wk (range: 1–5). These changes were accompanied by decreases of miniature current amplitude (from 93 pA to 56 pA), active-zone surface area (from 0.0427 μm2 to 0.0234 μm2), and initial success rate (from 0.609 to 0.353), indicating a tightening of synaptic transmission with development. Altogether, these results suggest a close correspondence between the number of functionally defined vesicular docking sites and that of clusters of voltage-gated calcium channels. },
author = {Miki, Takafumi and Kaufmann, Walter and Malagon, Gerardo and Gomez, Laura and Tabuchi, Katsuhiko and Watanabe, Masahiko and Shigemoto, Ryuichi and Marty, Alain},
issn = {00278424},
journal = {PNAS},
number = {26},
pages = {E5246 -- E5255},
publisher = {National Academy of Sciences},
title = {{Numbers of presynaptic Ca2+ channel clusters match those of functionally defined vesicular docking sites in single central synapses}},
doi = {10.1073/pnas.1704470114},
volume = {114},
year = {2017},
}
@inproceedings{6932,
abstract = {LCLs or locally checkable labelling problems (e.g. maximal independent set, maximal matching, and vertex colouring) in the LOCAL model of computation are very well-understood in cycles (toroidal 1-dimensional grids): every problem has a complexity of O(1), Θ(log* n), or Θ(n), and the design of optimal algorithms can be fully automated. This work develops the complexity theory of LCL problems for toroidal 2-dimensional grids. The complexity classes are the same as in the 1-dimensional case: O(1), Θ(log* n), and Θ(n). However, given an LCL problem it is undecidable whether its complexity is Θ(log* n) or Θ(n) in 2-dimensional grids.
Nevertheless, if we correctly guess that the complexity of a problem is Θ(log* n), we can completely automate the design of optimal algorithms. For any problem we can find an algorithm that is of a normal form A' o Sk, where A' is a finite function, Sk is an algorithm for finding a maximal independent set in kth power of the grid, and k is a constant.
Finally, partially with the help of automated design tools, we classify the complexity of several concrete LCL problems related to colourings and orientations.},
author = {Brandt, Sebastian and Hirvonen, Juho and Korhonen, Janne H. and Lempiäinen, Tuomo and Östergård, Patric R.J. and Purcell, Christopher and Rybicki, Joel and Suomela, Jukka and Uznański, Przemysław},
isbn = {9781450349925},
location = {Washington, DC, United States},
pages = {101--110},
publisher = {ACM Press},
title = {{LCL problems on grids}},
doi = {10.1145/3087801.3087833},
year = {2017},
}
@article{694,
abstract = {A change regarding the extent of adhesion - hereafter referred to as adhesion plasticity - between adhesive and less-adhesive states of mammalian cells is important for their behavior. To investigate adhesion plasticity, we have selected a stable isogenic subpopulation of human MDA-MB-468 breast carcinoma cells growing in suspension. These suspension cells are unable to re-adhere to various matrices or to contract three-dimensional collagen lattices. By using transcriptome analysis, we identified the focal adhesion protein tensin3 (Tns3) as a determinant of adhesion plasticity. Tns3 is strongly reduced at mRNA and protein levels in suspension cells. Furthermore, by transiently challenging breast cancer cells to grow under non-adherent conditions markedly reduces Tns3 protein expression, which is regained upon re-adhesion. Stable knockdown of Tns3 in parental MDA-MB-468 cells results in defective adhesion, spreading and migration. Tns3-knockdown cells display impaired structure and dynamics of focal adhesion complexes as determined by immunostaining. Restoration of Tns3 protein expression in suspension cells partially rescues adhesion and focal contact composition. Our work identifies Tns3 as a crucial focal adhesion component regulated by, and functionally contributing to, the switch between adhesive and non-adhesive states in MDA-MB-468 cancer cells.},
author = {Veß, Astrid and Blache, Ulrich and Leitner, Laura and Kurz, Angela and Ehrenpfordt, Anja and Sixt, Michael K and Posern, Guido},
issn = {00219533},
journal = {Journal of Cell Science},
number = {13},
pages = {2172 -- 2184},
publisher = {Company of Biologists},
title = {{A dual phenotype of MDA MB 468 cancer cells reveals mutual regulation of tensin3 and adhesion plasticity}},
doi = {10.1242/jcs.200899},
volume = {130},
year = {2017},
}
@article{9065,
abstract = {Magnetic anisotropy in strontium iridate (Sr2IrO4) is found to be large because of the strong spin-orbit interactions. In our work, we studied the in-plane magnetic anisotropy of Sr2IrO4 and traced the anisotropic exchange interactions between the isospins in the crystal. The magnetic-field-dependent torque τ(H) showed a prominent transition from the canted antiferromagnetic state to the weak ferromagnetic (WFM) state. A comprehensive analysis was conducted to examine the isotropic and anisotropic regimes and probe the easy magnetization axis along the a b plane. The angle-dependent torque τ(θ) revealed a deviation from the sinusoidal behavior, and small differences in hysteresis were observed around 0° and 90° in the low-magnetic-field regime. This indicates that the orientation of the easy axis of the FM component is along the b axis, where the antiferromagnetic to WFM spin-flop transition occurs. We compared the coefficients of the magnetic susceptibility tensors and captured the anisotropy of the material. The in-plane τ(θ) revealed a tendency toward isotropic behavior for fields with values above the field value of the WFM transition.},
author = {Nauman, Muhammad and Hong, Yunjeong and Hussain, Tayyaba and Seo, M. S. and Park, S. Y. and Lee, N. and Choi, Y. J. and Kang, Woun and Jo, Younjung},
issn = {2469-9950},
journal = {Physical Review B},
number = {15},
publisher = {American Physical Society},
title = {{In-plane magnetic anisotropy in strontium iridate Sr2IrO4}},
doi = {10.1103/physrevb.96.155102},
volume = {96},
year = {2017},
}
@article{909,
abstract = {We study the lengths of curves passing through a fixed number of points on the boundary of a convex shape in the plane. We show that, for any convex shape K, there exist four points on the boundary of K such that the length of any curve passing through these points is at least half of the perimeter of K. It is also shown that the same statement does not remain valid with the additional constraint that the points are extreme points of K. Moreover, the factor ½ cannot be achieved with any fixed number of extreme points. We conclude the paper with a few other inequalities related to the perimeter of a convex shape.},
author = {Akopyan, Arseniy and Vysotsky, Vladislav},
issn = {00029890},
journal = {The American Mathematical Monthly},
number = {7},
pages = {588 -- 596},
publisher = {Mathematical Association of America},
title = {{On the lengths of curves passing through boundary points of a planar convex shape}},
doi = {10.4169/amer.math.monthly.124.7.588},
volume = {124},
year = {2017},
}
@article{910,
abstract = {Frequency-independent selection is generally considered as a force that acts to reduce the genetic variation in evolving populations, yet rigorous arguments for this idea are scarce. When selection fluctuates in time, it is unclear whether frequency-independent selection may maintain genetic polymorphism without invoking additional mechanisms. We show that constant frequency-independent selection with arbitrary epistasis on a well-mixed haploid population eliminates genetic variation if we assume linkage equilibrium between alleles. To this end, we introduce the notion of frequency-independent selection at the level of alleles, which is sufficient to prove our claim and contains the notion of frequency-independent selection on haploids. When selection and recombination are weak but of the same order, there may be strong linkage disequilibrium; numerical calculations show that stable equilibria are highly unlikely. Using the example of a diallelic two-locus model, we then demonstrate that frequency-independent selection that fluctuates in time can maintain stable polymorphism if linkage disequilibrium changes its sign periodically. We put our findings in the context of results from the existing literature and point out those scenarios in which the possible role of frequency-independent selection in maintaining genetic variation remains unclear.
},
author = {Novak, Sebastian and Barton, Nicholas H},
journal = {Genetics},
number = {2},
pages = {653 -- 668},
publisher = {Genetics Society of America},
title = {{When does frequency-independent selection maintain genetic variation?}},
doi = {10.1534/genetics.117.300129},
volume = {207},
year = {2017},
}
@inproceedings{911,
abstract = {We develop a probabilistic technique for colorizing grayscale natural images. In light of the intrinsic uncertainty of this task, the proposed probabilistic framework has numerous desirable properties. In particular, our model is able to produce multiple plausible and vivid colorizations for a given grayscale image and is one of the first colorization models to provide a proper stochastic sampling scheme. Moreover, our training procedure is supported by a rigorous theoretical framework that does not require any ad hoc heuristics and allows for efficient modeling and learning of the joint pixel color distribution.We demonstrate strong quantitative and qualitative experimental results on the CIFAR-10 dataset and the challenging ILSVRC 2012 dataset.},
author = {Royer, Amélie and Kolesnikov, Alexander and Lampert, Christoph},
location = {London, United Kingdom},
pages = {85.1--85.12},
publisher = {BMVA Press},
title = {{Probabilistic image colorization}},
doi = {10.5244/c.31.85},
year = {2017},
}
@article{912,
abstract = {We consider a many-body system of fermionic atoms interacting via a local pair potential and subject to an external potential within the framework of Bardeen-Cooper-Schrieffer (BCS) theory. We measure the free energy of the whole sample with respect to the free energy of a reference state which allows us to define a BCS functional with boundary conditions at infinity. Our main result is a lower bound for this energy functional in terms of expressions that typically appear in Ginzburg-Landau functionals.
},
author = {Deuchert, Andreas},
issn = {00222488},
journal = { Journal of Mathematical Physics},
number = {8},
publisher = {AIP},
title = {{A lower bound for the BCS functional with boundary conditions at infinity}},
doi = {10.1063/1.4996580},
volume = {58},
year = {2017},
}
@article{9137,
abstract = {Pools of air cooled by partial rain evaporation span up to several hundreds of kilometers in nature and typically last less than 1 day, ultimately losing their identity to the large-scale flow. These fundamentally differ in character from the radiatively-driven dry pools defining convective aggregation. Advancement in remote sensing and in computer capabilities has promoted exploration of how precipitation-induced cold pool processes modify the convective spectrum and life cycle. This contribution surveys current understanding of such cold pools over the tropical and subtropical oceans. In shallow convection with low rain rates, the cold pools moisten, preserving the near-surface equivalent potential temperature or increasing it if the surface moisture fluxes cannot ventilate beyond the new surface layer; both conditions indicate downdraft origin air from within the boundary layer. When rain rates exceed ∼ 2 mm h−1, convective-scale downdrafts can bring down drier air of lower equivalent potential temperature from above the boundary layer. The resulting density currents facilitate the lifting of locally thermodynamically favorable air and can impose an arc-shaped mesoscale cloud organization. This organization allows clouds capable of reaching 4–5 km within otherwise dry environments. These are more commonly observed in the northern hemisphere trade wind regime, where the flow to the intertropical convergence zone is unimpeded by the equator. Their near-surface air properties share much with those shown from cold pools sampled in the equatorial Indian Ocean. Cold pools are most effective at influencing the mesoscale organization when the atmosphere is moist in the lower free troposphere and dry above, suggesting an optimal range of water vapor paths. Outstanding questions on the relationship between cold pools, their accompanying moisture distribution and cloud cover are detailed further. Near-surface water vapor rings are documented in one model inside but near the cold pool edge; these are not consistent with observations, but do improve with smaller horizontal grid spacings.},
author = {Zuidema, Paquita and Torri, Giuseppe and Muller, Caroline J and Chandra, Arunchandra},
issn = {0169-3298},
journal = {Surveys in Geophysics},
keywords = {Geochemistry and Petrology, Geophysics},
number = {6},
pages = {1283--1305},
publisher = {Springer Nature},
title = {{A survey of precipitation-induced atmospheric cold pools over oceans and their interactions with the larger-scale environment}},
doi = {10.1007/s10712-017-9447-x},
volume = {38},
year = {2017},
}
@article{9138,
abstract = {Convective self-aggregation, the spontaneous organization of initially scattered convection into isolated convective clusters despite spatially homogeneous boundary conditions and forcing, was first recognized and studied in idealized numerical simulations. While there is a rich history of observational work on convective clustering and organization, there have been only a few studies that have analyzed observations to look specifically for processes related to self-aggregation in models. Here we review observational work in both of these categories and motivate the need for more of this work. We acknowledge that self-aggregation may appear to be far-removed from observed convective organization in terms of time scales, initial conditions, initiation processes, and mean state extremes, but we argue that these differences vary greatly across the diverse range of model simulations in the literature and that these comparisons are already offering important insights into real tropical phenomena. Some preliminary new findings are presented, including results showing that a self-aggregation simulation with square geometry has too broad distribution of humidity and is too dry in the driest regions when compared with radiosonde records from Nauru, while an elongated channel simulation has realistic representations of atmospheric humidity and its variability. We discuss recent work increasing our understanding of how organized convection and climate change may interact, and how model discrepancies related to this question are prompting interest in observational comparisons. We also propose possible future directions for observational work related to convective aggregation, including novel satellite approaches and a ground-based observational network.},
author = {Holloway, Christopher E. and Wing, Allison A. and Bony, Sandrine and Muller, Caroline J and Masunaga, Hirohiko and L’Ecuyer, Tristan S. and Turner, David D. and Zuidema, Paquita},
issn = {0169-3298},
journal = {Surveys in Geophysics},
keywords = {Geochemistry and Petrology, Geophysics},
number = {6},
pages = {1199--1236},
publisher = {Springer Nature},
title = {{Observing convective aggregation}},
doi = {10.1007/s10712-017-9419-1},
volume = {38},
year = {2017},
}
@article{9139,
abstract = {Organized convection in the tropics occurs across a range of spatial and temporal scales and strongly influences cloud cover and humidity. One mode of organization found is “self-aggregation,” in which moist convection spontaneously organizes into one or several isolated clusters despite spatially homogeneous boundary conditions and forcing. Self-aggregation is driven by interactions between clouds, moisture, radiation, surface fluxes, and circulation, and occurs in a wide variety of idealized simulations of radiative–convective equilibrium. Here we provide a review of convective self-aggregation in numerical simulations, including its character, causes, and effects. We describe the evolution of self-aggregation including its time and length scales and the physical mechanisms leading to its triggering and maintenance, and we also discuss possible links to climate and climate change.},
author = {Wing, Allison A. and Emanuel, Kerry and Holloway, Christopher E. and Muller, Caroline J},
issn = {0169-3298},
journal = {Surveys in Geophysics},
keywords = {Geochemistry and Petrology, Geophysics},
number = {6},
pages = {1173--1197},
publisher = {Springer Nature},
title = {{Convective self-aggregation in numerical simulations: A review}},
doi = {10.1007/s10712-017-9408-4},
volume = {38},
year = {2017},
}
@inproceedings{915,
abstract = {We propose a dual decomposition and linear program relaxation of the NP-hard minimum cost multicut problem. Unlike other polyhedral relaxations of the multicut polytope, it is amenable to efficient optimization by message passing. Like other polyhedral relaxations, it can be tightened efficiently by cutting planes. We define an algorithm that alternates between message passing and efficient separation of cycle- and odd-wheel inequalities. This algorithm is more efficient than state-of-the-art algorithms based on linear programming, including algorithms written in the framework of leading commercial software, as we show in experiments with large instances of the problem from applications in computer vision, biomedical image analysis and data mining.},
author = {Swoboda, Paul and Andres, Bjoern},
isbn = {978-153860457-1},
location = {Honolulu, HA, United States},
pages = {4990--4999},
publisher = {IEEE},
title = {{A message passing algorithm for the minimum cost multicut problem}},
doi = {10.1109/CVPR.2017.530},
volume = {2017},
year = {2017},
}
@article{9152,
abstract = {Previous numerical studies of the dissipation of internal tides in idealized settings suggest the existence of a critical latitude (~29°) where dissipation is enhanced. But observations only indicate a modest enhancement at this latitude. To resolve this difference between observational and numerical results, the authors study the latitudinal dependence of internal tides’ dissipation in more realistic conditions. In particular, the ocean is not a quiescent medium; the presence of large-scale currents or mesoscale eddies can impact the propagation and dissipation of internal tides. This paper investigates the impact of a weak background mean current in numerical simulations. The authors focus on the local dissipation of high spatial mode internal waves near their generation site. The vertical profile of dissipation and its variation with latitude without the mean current are consistent with earlier studies. But adding a weak mean current has a major impact on the latitudinal distribution of dissipation. The peak at the critical latitude disappears, and the dissipation is closer to a constant, albeit with two weak peaks at ~25° and ~35° latitude. This disappearance results from the Doppler shift of the internal tides’ frequency, which hinders the nonlinear transfer of energy to small-scale secondary waves via the parametric subharmonic instability (PSI). The new two weak peaks correspond to the Doppler-shifted critical latitudes of the left- and right-propagating waves. The results are confirmed in simulations with simple sinusoidal topography. Thus, although nonlinear transfers via PSI are efficient at dissipating internal tides, the exact location of the dissipation is sensitive to large-scale oceanic conditions.},
author = {Richet, O. and Muller, Caroline J and Chomaz, J.-M.},
issn = {0022-3670},
journal = {Journal of Physical Oceanography},
keywords = {Oceanography},
number = {6},
pages = {1457--1472},
publisher = {American Meteorological Society},
title = {{Impact of a mean current on the internal tide energy dissipation at the critical latitude}},
doi = {10.1175/jpo-d-16-0197.1},
volume = {47},
year = {2017},
}
@inproceedings{916,
abstract = {We study the quadratic assignment problem, in computer vision also known as graph matching. Two leading solvers for this problem optimize the Lagrange decomposition duals with sub-gradient and dual ascent (also known as message passing) updates. We explore this direction further and propose several additional Lagrangean relaxations of the graph matching problem along with corresponding algorithms, which are all based on a common dual ascent framework. Our extensive empirical evaluation gives several theoretical insights and suggests a new state-of-the-art anytime solver for the considered problem. Our improvement over state-of-the-art is particularly visible on a new dataset with large-scale sparse problem instances containing more than 500 graph nodes each.},
author = {Swoboda, Paul and Rother, Carsten and Abu Alhaija, Carsten and Kainmueller, Dagmar and Savchynskyy, Bogdan},
isbn = {978-153860457-1},
location = {Honolulu, HA, United States},
pages = {7062--7071},
publisher = {IEEE},
title = {{A study of lagrangean decompositions and dual ascent solvers for graph matching}},
doi = {10.1109/CVPR.2017.747},
volume = {2017},
year = {2017},
}
@article{9165,
abstract = {Advances in colloidal synthesis allow for the design of particles with controlled patches. This article reviews routes towards colloidal locomotion, where energy is consumed and converted into motion, and its implementation with active patchy particles. A special emphasis is given to phoretic swimmers, where the self-propulsion originates from an interfacial phenomenon, raising experimental challenges and opening up opportunities for particles with controlled anisotropic surface chemistry and novel behaviors.},
author = {Aubret, A. and Ramananarivo, S. and Palacci, Jérémie A},
issn = {1359-0294},
journal = {Current Opinion in Colloid & Interface Science},
pages = {81--89},
publisher = {Elsevier},
title = {{Eppur si muove, and yet it moves: Patchy (phoretic) swimmers}},
doi = {10.1016/j.cocis.2017.05.007},
volume = {30},
year = {2017},
}
@inproceedings{917,
abstract = {We propose a general dual ascent framework for Lagrangean decomposition of combinatorial problems. Although methods of this type have shown their efficiency for a number of problems, so far there was no general algorithm applicable to multiple problem types. In this work, we propose such a general algorithm. It depends on several parameters, which can be used to optimize its performance in each particular setting. We demonstrate efficacy of our method on graph matching and multicut problems, where it outperforms state-of-the-art solvers including those based on subgradient optimization and off-the-shelf linear programming solvers.},
author = {Swoboda, Paul and Kuske, Jan and Savchynskyy, Bogdan},
isbn = {978-153860457-1},
location = {Honolulu, HA, United States},
pages = {4950--4960},
publisher = {IEEE},
title = {{A dual ascent framework for Lagrangean decomposition of combinatorial problems}},
doi = {10.1109/CVPR.2017.526},
volume = {2017},
year = {2017},
}
@article{9190,
abstract = {Plant meristems carry pools of continuously active stem cells, whose activity is controlled by developmental and environmental signals. After stem cell division, daughter cells that exit the stem cell domain acquire transit amplifying cell identity before they are incorporated into organs and differentiate. In this study, we used an integrated approach to elucidate the role of HECATE (HEC) genes in regulating developmental trajectories of shoot stem cells in Arabidopsis thaliana. Our work reveals that HEC function stabilizes cell fate in distinct zones of the shoot meristem thereby controlling the spatio-temporal dynamics of stem cell differentiation. Importantly, this activity is concomitant with the local modulation of cellular responses to cytokinin and auxin, two key phytohormones regulating cell behaviour. Mechanistically, we show that HEC factors transcriptionally control and physically interact with MONOPTEROS (MP), a key regulator of auxin signalling, and modulate the autocatalytic stabilization of auxin signalling output.},
author = {Gaillochet, Christophe and Stiehl, Thomas and Wenzl, Christian and Ripoll, Juan-José and Bailey-Steinitz, Lindsay J and Li, Lanxin and Pfeiffer, Anne and Miotk, Andrej and Hakenjos, Jana P and Forner, Joachim and Yanofsky, Martin F and Marciniak-Czochra, Anna and Lohmann, Jan U},
issn = {2050-084X},
journal = {eLife},
publisher = {eLife Sciences Publications},
title = {{Control of plant cell fate transitions by transcriptional and hormonal signals}},
doi = {10.7554/elife.30135},
volume = {6},
year = {2017},
}
@article{1173,
abstract = {We introduce the Voronoi functional of a triangulation of a finite set of points in the Euclidean plane and prove that among all geometric triangulations of the point set, the Delaunay triangulation maximizes the functional. This result neither extends to topological triangulations in the plane nor to geometric triangulations in three and higher dimensions.},
author = {Edelsbrunner, Herbert and Glazyrin, Alexey and Musin, Oleg and Nikitenko, Anton},
issn = {02099683},
journal = {Combinatorica},
number = {5},
pages = {887 -- 910},
publisher = {Springer},
title = {{The Voronoi functional is maximized by the Delaunay triangulation in the plane}},
doi = {10.1007/s00493-016-3308-y},
volume = {37},
year = {2017},
}
@inproceedings{1174,
abstract = {Security of cryptographic applications is typically defined by security games. The adversary, within certain resources, cannot win with probability much better than 0 (for unpredictability applications, like one-way functions) or much better than 1/2 (indistinguishability applications for instance encryption schemes). In so called squared-friendly applications the winning probability of the adversary, for different values of the application secret randomness, is not only close to 0 or 1/2 on average, but also concentrated in the sense that its second central moment is small. The class of squared-friendly applications, which contains all unpredictability applications and many indistinguishability applications, is particularly important for key derivation. Barak et al. observed that for square-friendly applications one can beat the "RT-bound", extracting secure keys with significantly smaller entropy loss. In turn Dodis and Yu showed that in squared-friendly applications one can directly use a "weak" key, which has only high entropy, as a secure key. In this paper we give sharp lower bounds on square security assuming security for "weak" keys. We show that any application which is either (a) secure with weak keys or (b) allows for entropy savings for keys derived by universal hashing, must be square-friendly. Quantitatively, our lower bounds match the positive results of Dodis and Yu and Barak et al. (TCC\'13, CRYPTO\'11) Hence, they can be understood as a general characterization of squared-friendly applications. While the positive results on squared-friendly applications where derived by one clever application of the Cauchy-Schwarz Inequality, for tight lower bounds we need more machinery. In our approach we use convex optimization techniques and some theory of circular matrices.},
author = {Skórski, Maciej},
issn = {18688969},
location = {Hannover, Germany},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Lower bounds on key derivation for square-friendly applications}},
doi = {10.4230/LIPIcs.STACS.2017.57},
volume = {66},
year = {2017},
}
@inproceedings{1175,
abstract = {We study space complexity and time-space trade-offs with a focus not on peak memory usage but on overall memory consumption throughout the computation. Such a cumulative space measure was introduced for the computational model of parallel black pebbling by [Alwen and Serbinenko ’15] as a tool for obtaining results in cryptography. We consider instead the non- deterministic black-white pebble game and prove optimal cumulative space lower bounds and trade-offs, where in order to minimize pebbling time the space has to remain large during a significant fraction of the pebbling. We also initiate the study of cumulative space in proof complexity, an area where other space complexity measures have been extensively studied during the last 10–15 years. Using and extending the connection between proof complexity and pebble games in [Ben-Sasson and Nordström ’08, ’11] we obtain several strong cumulative space results for (even parallel versions of) the resolution proof system, and outline some possible future directions of study of this, in our opinion, natural and interesting space measure.},
author = {Alwen, Joel F and De Rezende, Susanna and Nordstrom, Jakob and Vinyals, Marc},
editor = {Papadimitriou, Christos},
issn = {18688969},
location = {Berkeley, CA, United States},
pages = {38:1--38--21},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Cumulative space in black-white pebbling and resolution}},
doi = {10.4230/LIPIcs.ITCS.2017.38},
volume = {67},
year = {2017},
}
@inproceedings{1176,
abstract = {The algorithm Argon2i-B of Biryukov, Dinu and Khovratovich is currently being considered by the IRTF (Internet Research Task Force) as a new de-facto standard for password hashing. An older version (Argon2i-A) of the same algorithm was chosen as the winner of the recent Password Hashing Competition. An important competitor to Argon2i-B is the recently introduced Balloon Hashing (BH) algorithm of Corrigan-Gibs, Boneh and Schechter. A key security desiderata for any such algorithm is that evaluating it (even using a custom device) requires a large amount of memory amortized across multiple instances. Alwen and Blocki (CRYPTO 2016) introduced a class of theoretical attacks against Argon2i-A and BH. While these attacks yield large asymptotic reductions in the amount of memory, it was not, a priori, clear if (1) they can be extended to the newer Argon2i-B, (2) the attacks are effective on any algorithm for practical parameter ranges (e.g., 1GB of memory) and (3) if they can be effectively instantiated against any algorithm under realistic hardware constrains. In this work we answer all three of these questions in the affirmative for all three algorithms. This is also the first work to analyze the security of Argon2i-B. In more detail, we extend the theoretical attacks of Alwen and Blocki (CRYPTO 2016) to the recent Argon2i-B proposal demonstrating severe asymptotic deficiencies in its security. Next we introduce several novel heuristics for improving the attack's concrete memory efficiency even when on-chip memory bandwidth is bounded. We then simulate our attacks on randomly sampled Argon2i-A, Argon2i-B and BH instances and measure the resulting memory consumption for various practical parameter ranges and for a variety of upperbounds on the amount of parallelism available to the attacker. Finally we describe, implement, and test a new heuristic for applying the Alwen-Blocki attack to functions employing a technique developed by Corrigan-Gibs et al. for improving concrete security of memory-hard functions. We analyze the collected data and show the effects various parameters have on the memory consumption of the attack. In particular, we can draw several interesting conclusions about the level of security provided by these functions. · For the Alwen-Blocki attack to fail against practical memory parameters, Argon2i-B must be instantiated with more than 10 passes on memory - beyond the "paranoid" parameter setting in the current IRTF proposal. · The technique of Corrigan-Gibs for improving security can also be overcome by the Alwen-Blocki attack under realistic hardware constraints. · On a positive note, both the asymptotic and concrete security of Argon2i-B seem to improve on that of Argon2i-A.},
author = {Alwen, Joel F and Blocki, Jeremiah},
isbn = {978-150905761-0},
location = {Paris, France},
publisher = {IEEE},
title = {{Towards practical attacks on Argon2i and balloon hashing}},
doi = {10.1109/EuroSP.2017.47},
year = {2017},
}
@inproceedings{1178,
abstract = {For any pair (X, Z) of correlated random variables we can think of Z as a randomized function of X. If the domain of Z is small, one can make this function computationally efficient by allowing it to be only approximately correct. In folklore this problem is known as simulating auxiliary inputs. This idea of simulating auxiliary information turns out to be a very usefull tool, finding applications in complexity theory, cryptography, pseudorandomness and zero-knowledge. In this paper we revisit this problem, achieving the following results: (a) We present a novel boosting algorithm for constructing the simulator. This boosting proof is of independent interest, as it shows how to handle “negative mass” issues when constructing probability measures by shifting distinguishers in descent algorithms. Our technique essentially fixes the flaw in the TCC’14 paper “How to Fake Auxiliary Inputs”. (b) The complexity of our simulator is better than in previous works, including results derived from the uniform min-max theorem due to Vadhan and Zheng. To achieve (s,ϵ) -indistinguishability we need the complexity O(s⋅25ℓϵ−2) in time/circuit size, which improve previous bounds by a factor of ϵ−2. In particular, with we get meaningful provable security for the EUROCRYPT’09 leakage-resilient stream cipher instantiated with a standard 256-bit block cipher, like },
author = {Skórski, Maciej},
pages = {159 -- 179},
publisher = {Springer},
title = {{Simulating auxiliary inputs, revisited}},
doi = {10.1007/978-3-662-53641-4_7},
volume = {9985},
year = {2017},
}
@article{1180,
abstract = {In this article we define an algebraic vertex of a generalized polyhedron and show that the set of algebraic vertices is the smallest set of points needed to define the polyhedron. We prove that the indicator function of a generalized polytope P is a linear combination of indicator functions of simplices whose vertices are algebraic vertices of P. We also show that the indicator function of any generalized polyhedron is a linear combination, with integer coefficients, of indicator functions of cones with apices at algebraic vertices and line-cones. The concept of an algebraic vertex is closely related to the Fourier–Laplace transform. We show that a point v is an algebraic vertex of a generalized polyhedron P if and only if the tangent cone of P, at v, has non-zero Fourier–Laplace transform.},
author = {Akopyan, Arseniy and Bárány, Imre and Robins, Sinai},
issn = {00018708},
journal = {Advances in Mathematics},
pages = {627 -- 644},
publisher = {Academic Press},
title = {{Algebraic vertices of non-convex polyhedra}},
doi = {10.1016/j.aim.2016.12.026},
volume = {308},
year = {2017},
}
@article{1187,
abstract = {We construct efficient authentication protocols and message authentication codes (MACs) whose security can be reduced to the learning parity with noise (LPN) problem. Despite a large body of work—starting with the (Formula presented.) protocol of Hopper and Blum in 2001—until now it was not even known how to construct an efficient authentication protocol from LPN which is secure against man-in-the-middle attacks. A MAC implies such a (two-round) protocol.},
author = {Kiltz, Eike and Pietrzak, Krzysztof Z and Venturi, Daniele and Cash, David and Jain, Abhishek},
journal = {Journal of Cryptology},
number = {4},
pages = {1238 -- 1275},
publisher = {Springer},
title = {{Efficient authentication from hard learning problems}},
doi = {10.1007/s00145-016-9247-3},
volume = {30},
year = {2017},
}
@article{1191,
abstract = {Variation in genotypes may be responsible for differences in dispersal rates, directional biases, and growth rates of individuals. These traits may favor certain genotypes and enhance their spatiotemporal spreading into areas occupied by the less advantageous genotypes. We study how these factors influence the speed of spreading in the case of two competing genotypes under the assumption that spatial variation of the total population is small compared to the spatial variation of the frequencies of the genotypes in the population. In that case, the dynamics of the frequency of one of the genotypes is approximately described by a generalized Fisher–Kolmogorov–Petrovskii–Piskunov (F–KPP) equation. This generalized F–KPP equation with (nonlinear) frequency-dependent diffusion and advection terms admits traveling wave solutions that characterize the invasion of the dominant genotype. Our existence results generalize the classical theory for traveling waves for the F–KPP with constant coefficients. Moreover, in the particular case of the quadratic (monostable) nonlinear growth–decay rate in the generalized F–KPP we study in detail the influence of the variance in diffusion and mean displacement rates of the two genotypes on the minimal wave propagation speed.},
author = {Kollár, Richard and Novak, Sebastian},
journal = {Bulletin of Mathematical Biology},
number = {3},
pages = {525--559},
publisher = {Springer},
title = {{Existence of traveling waves for the generalized F–KPP equation}},
doi = {10.1007/s11538-016-0244-3},
volume = {79},
year = {2017},
}
@inproceedings{1192,
abstract = {The main result of this paper is a generalization of the classical blossom algorithm for finding perfect matchings. Our algorithm can efficiently solve Boolean CSPs where each variable appears in exactly two constraints (we call it edge CSP) and all constraints are even Δ-matroid relations (represented by lists of tuples). As a consequence of this, we settle the complexity classification of planar Boolean CSPs started by Dvorak and Kupec. Knowing that edge CSP is tractable for even Δ-matroid constraints allows us to extend the tractability result to a larger class of Δ-matroids that includes many classes that were known to be tractable before, namely co-independent, compact, local and binary.},
author = {Kazda, Alexandr and Kolmogorov, Vladimir and Rolinek, Michal},
isbn = {978-161197478-2},
location = {Barcelona, Spain},
pages = {307 -- 326},
publisher = {SIAM},
title = {{Even delta-matroids and the complexity of planar Boolean CSPs}},
doi = {10.1137/1.9781611974782.20},
year = {2017},
}
@inproceedings{1194,
abstract = {Termination is one of the basic liveness properties, and we study the termination problem for probabilistic programs with real-valued variables. Previous works focused on the qualitative problem that asks whether an input program terminates with probability~1 (almost-sure termination). A powerful approach for this qualitative problem is the notion of ranking supermartingales with respect to a given set of invariants. The quantitative problem (probabilistic termination) asks for bounds on the termination probability. A fundamental and conceptual drawback of the existing approaches to address probabilistic termination is that even though the supermartingales consider the probabilistic behavior of the programs, the invariants are obtained completely ignoring the probabilistic aspect. In this work we address the probabilistic termination problem for linear-arithmetic probabilistic programs with nondeterminism. We define the notion of {\em stochastic invariants}, which are constraints along with a probability bound that the constraints hold. We introduce a concept of {\em repulsing supermartingales}. First, we show that repulsing supermartingales can be used to obtain bounds on the probability of the stochastic invariants. Second, we show the effectiveness of repulsing supermartingales in the following three ways: (1)~With a combination of ranking and repulsing supermartingales we can compute lower bounds on the probability of termination; (2)~repulsing supermartingales provide witnesses for refutation of almost-sure termination; and (3)~with a combination of ranking and repulsing supermartingales we can establish persistence properties of probabilistic programs. We also present results on related computational problems and an experimental evaluation of our approach on academic examples. },
author = {Chatterjee, Krishnendu and Novotny, Petr and Zikelic, Djordje},
issn = {07308566},
location = {Paris, France},
number = {1},
pages = {145 -- 160},
publisher = {ACM},
title = {{Stochastic invariants for probabilistic termination}},
doi = {10.1145/3009837.3009873},
volume = {52},
year = {2017},
}
@article{1196,
abstract = {We define the . model-measuring problem: given a model . M and specification . ϕ, what is the maximal distance . ρ such that all models . M' within distance . ρ from . M satisfy (or violate) . ϕ. The model-measuring problem presupposes a distance function on models. We concentrate on . automatic distance functions, which are defined by weighted automata. The model-measuring problem subsumes several generalizations of the classical model-checking problem, in particular, quantitative model-checking problems that measure the degree of satisfaction of a specification; robustness problems that measure how much a model can be perturbed without violating the specification; and parameter synthesis for hybrid systems. We show that for automatic distance functions, and (a) . ω-regular linear-time, (b) . ω-regular branching-time, and (c) hybrid specifications, the model-measuring problem can be solved.We use automata-theoretic model-checking methods for model measuring, replacing the emptiness question for word, tree, and hybrid automata by the . optimal-value question for the weighted versions of these automata. For automata over words and trees, we consider weighted automata that accumulate weights by maximizing, summing, discounting, and limit averaging. For hybrid automata, we consider monotonic (parametric) hybrid automata, a hybrid counterpart of (discrete) weighted automata.We give several examples of using the model-measuring problem to compute various notions of robustness and quantitative satisfaction for temporal specifications. Further, we propose the modeling framework for model measuring to ease the specification and reduce the likelihood of errors in modeling.Finally, we present a variant of the model-measuring problem, called the . model-repair problem. The model-repair problem applies to models that do not satisfy the specification; it can be used to derive restrictions, under which the model satisfies the specification, i.e., to repair the model.},
author = {Henzinger, Thomas A and Otop, Jan},
journal = {Nonlinear Analysis: Hybrid Systems},
pages = {166 -- 190},
publisher = {Elsevier},
title = {{Model measuring for discrete and hybrid systems}},
doi = {10.1016/j.nahs.2016.09.001},
volume = {23},
year = {2017},
}
@article{1198,
abstract = {We consider a model of fermions interacting via point interactions, defined via a certain weighted Dirichlet form. While for two particles the interaction corresponds to infinite scattering length, the presence of further particles effectively decreases the interaction strength. We show that the model becomes trivial in the thermodynamic limit, in the sense that the free energy density at any given particle density and temperature agrees with the corresponding expression for non-interacting particles.},
author = {Moser, Thomas and Seiringer, Robert},
issn = {03779017},
journal = {Letters in Mathematical Physics},
number = {3},
pages = { 533 -- 552},
publisher = {Springer},
title = {{Triviality of a model of particles with point interactions in the thermodynamic limit}},
doi = {10.1007/s11005-016-0915-x},
volume = {107},
year = {2017},
}
@article{1207,
abstract = {The eigenvalue distribution of the sum of two large Hermitian matrices, when one of them is conjugated by a Haar distributed unitary matrix, is asymptotically given by the free convolution of their spectral distributions. We prove that this convergence also holds locally in the bulk of the spectrum, down to the optimal scales larger than the eigenvalue spacing. The corresponding eigenvectors are fully delocalized. Similar results hold for the sum of two real symmetric matrices, when one is conjugated by Haar orthogonal matrix.},
author = {Bao, Zhigang and Erdös, László and Schnelli, Kevin},
issn = {00103616},
journal = {Communications in Mathematical Physics},
number = {3},
pages = {947 -- 990},
publisher = {Springer},
title = {{Local law of addition of random matrices on optimal scale}},
doi = {10.1007/s00220-016-2805-6},
volume = {349},
year = {2017},
}
@article{1208,
abstract = {We study parameter estimation in linear Gaussian covariance models, which are p-dimensional Gaussian models with linear constraints on the covariance matrix. Maximum likelihood estimation for this class of models leads to a non-convex optimization problem which typically has many local maxima. Using recent results on the asymptotic distribution of extreme eigenvalues of the Wishart distribution, we provide sufficient conditions for any hill climbing method to converge to the global maximum. Although we are primarily interested in the case in which n≫p, the proofs of our results utilize large sample asymptotic theory under the scheme n/p→γ>1. Remarkably, our numerical simulations indicate that our results remain valid for p as small as 2. An important consequence of this analysis is that, for sample sizes n≃14p, maximum likelihood estimation for linear Gaussian covariance models behaves as if it were a convex optimization problem. © 2016 The Royal Statistical Society and Blackwell Publishing Ltd.},
author = {Zwiernik, Piotr and Uhler, Caroline and Richards, Donald},
issn = {13697412},
journal = {Journal of the Royal Statistical Society. Series B: Statistical Methodology},
number = {4},
pages = {1269 -- 1292},
publisher = {Wiley-Blackwell},
title = {{Maximum likelihood estimation for linear Gaussian covariance models}},
doi = {10.1111/rssb.12217},
volume = {79},
year = {2017},
}
@article{1211,
abstract = {Systems such as fluid flows in channels and pipes or the complex Ginzburg–Landau system, defined over periodic domains, exhibit both continuous symmetries, translational and rotational, as well as discrete symmetries under spatial reflections or complex conjugation. The simplest, and very common symmetry of this type is the equivariance of the defining equations under the orthogonal group O(2). We formulate a novel symmetry reduction scheme for such systems by combining the method of slices with invariant polynomial methods, and show how it works by applying it to the Kuramoto–Sivashinsky system in one spatial dimension. As an example, we track a relative periodic orbit through a sequence of bifurcations to the onset of chaos. Within the symmetry-reduced state space we are able to compute and visualize the unstable manifolds of relative periodic orbits, their torus bifurcations, a transition to chaos via torus breakdown, and heteroclinic connections between various relative periodic orbits. It would be very hard to carry through such analysis in the full state space, without a symmetry reduction such as the one we present here.},
author = {Budanur, Nazmi B and Cvitanović, Predrag},
journal = {Journal of Statistical Physics},
number = {3-4},
pages = {636--655},
publisher = {Springer},
title = {{Unstable manifolds of relative periodic orbits in the symmetry reduced state space of the Kuramoto–Sivashinsky system}},
doi = {10.1007/s10955-016-1672-z},
volume = {167},
year = {2017},
}
@inbook{1213,
abstract = {Bacterial cytokinesis is commonly initiated by the Z-ring, a dynamic cytoskeletal structure that assembles at the site of division. Its primary component is FtsZ, a tubulin-like GTPase, that like its eukaryotic relative forms protein filaments in the presence of GTP. Since the discovery of the Z-ring 25 years ago, various models for the role of FtsZ have been suggested. However, important information about the architecture and dynamics of FtsZ filaments during cytokinesis is still missing. One reason for this lack of knowledge has been the small size of bacteria, which has made it difficult to resolve the orientation and dynamics of individual FtsZ filaments in the Z-ring. While superresolution microscopy experiments have helped to gain more information about the organization of the Z-ring in the dividing cell, they were not yet able to elucidate a mechanism of how FtsZ filaments reorganize during assembly and disassembly of the Z-ring. In this chapter, we explain how to use an in vitro reconstitution approach to investigate the self-organization of FtsZ filaments recruited to a biomimetic lipid bilayer by its membrane anchor FtsA. We show how to perform single-molecule experiments to study the behavior of individual FtsZ monomers during the constant reorganization of the FtsZ-FtsA filament network. We describe how to analyze the dynamics of single molecules and explain why this information can help to shed light onto possible mechanism of Z-ring constriction. We believe that similar experimental approaches will be useful to study the mechanism of membrane-based polymerization of other cytoskeletal systems, not only from prokaryotic but also eukaryotic origin.},
author = {Baranova, Natalia and Loose, Martin},
booktitle = {Cytokinesis},
editor = {Echard, Arnaud },
issn = {0091679X},
pages = {355 -- 370},
publisher = {Academic Press},
title = {{Single-molecule measurements to study polymerization dynamics of FtsZ-FtsA copolymers}},
doi = {10.1016/bs.mcb.2016.03.036},
volume = {137},
year = {2017},
}
@article{123,
abstract = {The Leidenfrost effect occurs when an object near a hot surface vaporizes rapidly enough to lift itself up and hover. Although well understood for liquids and stiff sublimable solids, nothing is known about the effect with materials whose stiffness lies between these extremes. Here we introduce a new phenomenon that occurs with vaporizable soft solids - the elastic Leidenfrost effect. By dropping hydrogel spheres onto hot surfaces we find that, rather than hovering, they energetically bounce several times their diameter for minutes at a time. With high-speed video during a single impact, we uncover high-frequency microscopic gap dynamics at the sphere/substrate interface. We show how these otherwise-hidden agitations constitute work cycles that harvest mechanical energy from the vapour and sustain the bouncing. Our findings suggest a new strategy for injecting mechanical energy into a widely used class of soft materials, with potential relevance to fields such as active matter, soft robotics and microfluidics.},
author = {Waitukaitis, Scott R and Zuiderwijk, Antal and Souslov, Anton and Coulais, Corentin and Van Hecke, Martin},
journal = {Nature Physics},
number = {11},
pages = {1095 -- 1099},
publisher = {Nature Publishing Group},
title = {{Coupling the Leidenfrost effect and elastic deformations to power sustained bouncing}},
doi = {10.1038/nphys4194},
volume = {13},
year = {2017},
}
@article{1294,
abstract = {We study controller synthesis problems for finite-state Markov decision processes, where the objective is to optimize the expected mean-payoff performance and stability (also known as variability in the literature). We argue that the basic notion of expressing the stability using the statistical variance of the mean payoff is sometimes insufficient, and propose an alternative definition. We show that a strategy ensuring both the expected mean payoff and the variance below given bounds requires randomization and memory, under both the above definitions. We then show that the problem of finding such a strategy can be expressed as a set of constraints.},
author = {Brázdil, Tomáš and Chatterjee, Krishnendu and Forejt, Vojtěch and Kučera, Antonín},
journal = {Journal of Computer and System Sciences},
pages = {144 -- 170},
publisher = {Elsevier},
title = {{Trading performance for stability in Markov decision processes}},
doi = {10.1016/j.jcss.2016.09.009},
volume = {84},
year = {2017},
}