@article{6228,
abstract = {Following the recent observation that turbulent pipe flow can be relaminarised bya relatively simple modification of the mean velocity profile, we here carry out aquantitative experimental investigation of this phenomenon. Our study confirms thata flat velocity profile leads to a collapse of turbulence and in order to achieve theblunted profile shape, we employ a moving pipe segment that is briefly and rapidlyshifted in the streamwise direction. The relaminarisation threshold and the minimumshift length and speeds are determined as a function of Reynolds number. Althoughturbulence is still active after the acceleration phase, the modulated profile possessesa severely decreased lift-up potential as measured by transient growth. As shown,this results in an exponential decay of fluctuations and the flow relaminarises. Whilethis method can be easily applied at low to moderate flow speeds, the minimumstreamwise length over which the acceleration needs to act increases linearly with theReynolds number.},
author = {Scarselli, Davide and Kühnen, Jakob and Hof, Björn},
issn = {14697645},
journal = {Journal of Fluid Mechanics},
pages = {934--948},
publisher = {Cambridge University Press},
title = {{Relaminarising pipe flow by wall movement}},
doi = {10.1017/jfm.2019.191},
volume = {867},
year = {2019},
}
@article{6187,
abstract = {Aberrant display of the truncated core1 O-glycan T-antigen is a common feature of human cancer cells that correlates with metastasis. Here we show that T-antigen in Drosophila melanogaster macrophages is involved in their developmentally programmed tissue invasion. Higher macrophage T-antigen levels require an atypical major facilitator superfamily (MFS) member that we named Minerva which enables macrophage dissemination and invasion. We characterize for the first time the T and Tn glycoform O-glycoproteome of the Drosophila melanogaster embryo, and determine that Minerva increases the presence of T-antigen on proteins in pathways previously linked to cancer, most strongly on the sulfhydryl oxidase Qsox1 which we show is required for macrophage tissue entry. Minerva’s vertebrate ortholog, MFSD1, rescues the minerva mutant’s migration and T-antigen glycosylation defects. We thus identify a key conserved regulator that orchestrates O-glycosylation on a protein subset to activate a program governing migration steps important for both development and cancer metastasis.},
author = {Valosková, Katarina and Biebl, Julia and Roblek, Marko and Emtenani, Shamsi and György, Attila and Misova, Michaela and Ratheesh, Aparna and Rodrigues, Patricia and Shkarina, Katerina and Larsen, Ida Signe Bohse and Vakhrushev, Sergey Y and Clausen, Henrik and Siekhaus, Daria E},
issn = {2050-084X},
journal = {eLife},
publisher = {eLife Sciences Publications},
title = {{A conserved major facilitator superfamily member orchestrates a subset of O-glycosylation to aid macrophage tissue invasion}},
doi = {10.7554/elife.41801},
volume = {8},
year = {2019},
}
@phdthesis{6546,
abstract = {Invasive migration plays a crucial role not only during development and homeostasis but also in pathological states, such as tumor metastasis. Drosophila macrophage migration into the extended germband is an interesting system to study invasive migration. It carries similarities to immune cell transmigration and cancer cell invasion, therefore studying this process could also bring new understanding of invasion in higher organisms. In our work, we uncover a highly conserved member of the major facilitator family that plays a role in tissue invasion through regulation of glycosylation on a subgroup of proteins and/or by aiding the precise timing of DN-Cadherin downregulation.
Aberrant display of the truncated core1 O-glycan T-antigen is a common feature of human cancer cells that correlates with metastasis. Here we show that T-antigen in Drosophila melanogaster macrophages is involved in their developmentally programmed tissue invasion. Higher macrophage T-antigen levels require an atypical major facilitator superfamily (MFS) member that we named Minerva which enables macrophage dissemination and invasion. We characterize for the first time the T and Tn glycoform O-glycoproteome of the Drosophila melanogaster embryo, and determine that Minerva increases the presence of T-antigen on proteins in pathways previously linked to cancer, most strongly on the sulfhydryl oxidase Qsox1 which we show is required for macrophage tissue entry. Minerva’s vertebrate ortholog, MFSD1, rescues the minerva mutant’s migration and T-antigen glycosylation defects. We thus identify
a key conserved regulator that orchestrates O-glycosylation on a protein subset to activate
a program governing migration steps important for both development and cancer metastasis.
},
author = {Valosková, Katarina},
issn = {2663-337X},
pages = {141},
publisher = {Institute of Science and Technology Austria},
title = {{The role of a highly conserved major facilitator superfamily member in Drosophila embryonic macrophage migration}},
doi = {10.15479/AT:ISTA:6546},
year = {2019},
}
@article{6260,
abstract = {Polar auxin transport plays a pivotal role in plant growth and development. PIN auxin efflux carriers regulate directional auxin movement by establishing local auxin maxima, minima, and gradients that drive multiple developmental processes and responses to environmental signals. Auxin has been proposed to modulate its own transport by regulating subcellular PIN trafficking via processes such as clathrin-mediated PIN endocytosis and constitutive recycling. Here, we further investigated the mechanisms by which auxin affects PIN trafficking by screening auxin analogs and identified pinstatic acid (PISA) as a positive modulator of polar auxin transport in Arabidopsis thaliana. PISA had an auxin-like effect on hypocotyl elongation and adventitious root formation via positive regulation of auxin transport. PISA did not activate SCFTIR1/AFB signaling and yet induced PIN accumulation at the cell surface by inhibiting PIN internalization from the plasma membrane. This work demonstrates PISA to be a promising chemical tool to dissect the regulatory mechanisms behind subcellular PIN trafficking and auxin transport.},
author = {Oochi, A and Hajny, Jakub and Fukui, K and Nakao, Y and Gallei, Michelle C and Quareshy, M and Takahashi, K and Kinoshita, T and Harborough, SR and Kepinski, S and Kasahara, H and Napier, RM and Friml, Jiří and Hayashi, KI},
issn = {1532-2548},
journal = {Plant Physiology},
number = {2},
pages = {1152--1165},
publisher = {ASPB},
title = {{Pinstatic acid promotes auxin transport by inhibiting PIN internalization}},
doi = {10.1104/pp.19.00201},
volume = {180},
year = {2019},
}
@phdthesis{6392,
abstract = {The regulation of gene expression is one of the most fundamental processes in living systems. In recent years, thanks to advances in sequencing technology and automation, it has become possible to study gene expression quantitatively, genome-wide and in high-throughput. This leads to the possibility of exploring changes in gene expression in the context of many external perturbations and their combinations, and thus of characterising the basic principles governing gene regulation. In this thesis, I present quantitative experimental approaches to studying transcriptional and protein level changes in response to combinatorial drug treatment, as well as a theoretical data-driven approach to analysing thermodynamic principles guiding transcription of protein coding genes.
In the first part of this work, I present a novel methodological framework for quantifying gene expression changes in drug combinations, termed isogrowth profiling. External perturbations through small molecule drugs influence the growth rate of the cell, leading to wide-ranging changes in cellular physiology and gene expression. This confounds the gene expression changes specifically elicited by the particular drug. Combinatorial perturbations, owing to the increased stress they exert, influence the growth rate even more strongly and hence suffer the convolution problem to a greater extent when measuring gene expression changes. Isogrowth profiling is a way to experimentally abstract non-specific, growth rate related changes, by performing the measurement using varying ratios of two drugs at such concentrations that the overall inhibition rate is constant. Using a robotic setup for automated high-throughput re-dilution culture of Saccharomyces cerevisiae, the budding yeast, I investigate all pairwise interactions of four small molecule drugs through sequencing RNA along a growth isobole. Through principal component analysis, I demonstrate here that isogrowth profiling can uncover drug-specific as well as drug-interaction-specific gene expression changes. I show that drug-interaction-specific gene expression changes can be used for prediction of higher-order drug interactions. I propose a simplified generalised framework of isogrowth profiling, with few measurements needed for each drug pair, enabling the broad application of isogrowth profiling to high-throughput screening of inhibitors of cellular growth and beyond. Such high-throughput screenings of gene expression changes specific to pairwise drug interactions will be instrumental for predicting the higher-order interactions of the drugs.
In the second part of this work, I extend isogrowth profiling to single-cell measurements of gene expression, characterising population heterogeneity in the budding yeast in response to combinatorial drug perturbation while controlling for non-specific growth rate effects. Through flow cytometry of strains with protein products fused to green fluorescent protein, I discover multiple proteins with bi-modally distributed expression levels in the population in response to drug treatment. I characterize more closely the effect of an ionic stressor, lithium chloride, and find that it inhibits the splicing of mRNA, most strongly affecting ribosomal protein transcripts and leading to a bi-stable behaviour of a small ribosomal subunit protein Rps22B. Time-lapse microscopy of a microfluidic culture system revealed that the induced Rps22B heterogeneity leads to preferential survival of Rps22B-low cells after long starvation, but to preferential proliferation of Rps22B-high cells after short starvation. Overall, this suggests that yeast cells might use splicing of ribosomal genes for bet-hedging in fluctuating environments. I give specific examples of how further exploration of cellular heterogeneity in yeast in response to external perturbation has the potential to reveal yet-undiscovered gene regulation circuitry.
In the last part of this thesis, a re-analysis of a published sequencing dataset of nascent elongating transcripts is used to characterise the thermodynamic constraints for RNA polymerase II (RNAP) elongation. Population-level data on RNAP position throughout the transcribed genome with single nucleotide resolution are used to infer the sequence specific thermodynamic determinants of RNAP pausing and backtracking. This analysis reveals that the basepairing strength of the eight nucleotide-long RNA:DNA duplex relative to the basepairing strength of the same sequence when in DNA:DNA duplex, and the change in this quantity during RNA polymerase movement, is the key determinant of RNAP pausing. This is true for RNAP pausing while elongating, but also of RNAP pausing while backtracking and of the backtracking length. The quantitative dependence of RNAP pausing on basepairing energetics is used to infer the increase in pausing due to transcriptional mismatches, leading to a hypothesis that pervasive RNA polymerase II pausing is due to basepairing energetics, as an evolutionary cost for increased RNA polymerase II fidelity.
This work advances our understanding of the general principles governing gene expression, with the goal of making computational predictions of single-cell gene expression responses to combinatorial perturbations based on the individual perturbations possible. This ability would substantially facilitate the design of drug combination treatments and, in the long term, lead to our increased ability to more generally design targeted manipulations to any biological system. },
author = {Lukacisin, Martin},
isbn = {978-3-99078-001-5},
issn = {2663-337X},
pages = {103},
publisher = {IST Austria},
title = {{Quantitative investigation of gene expression principles through combinatorial drug perturbation and theory}},
doi = {10.15479/AT:ISTA:6392},
year = {2019},
}
@inproceedings{6780,
abstract = {In this work, we consider the almost-sure termination problem for probabilistic programs that asks whether a
given probabilistic program terminates with probability 1. Scalable approaches for program analysis often
rely on modularity as their theoretical basis. In non-probabilistic programs, the classical variant rule (V-rule)
of Floyd-Hoare logic provides the foundation for modular analysis. Extension of this rule to almost-sure
termination of probabilistic programs is quite tricky, and a probabilistic variant was proposed in [16]. While the
proposed probabilistic variant cautiously addresses the key issue of integrability, we show that the proposed
modular rule is still not sound for almost-sure termination of probabilistic programs.
Besides establishing unsoundness of the previous rule, our contributions are as follows: First, we present a
sound modular rule for almost-sure termination of probabilistic programs. Our approach is based on a novel
notion of descent supermartingales. Second, for algorithmic approaches, we consider descent supermartingales
that are linear and show that they can be synthesized in polynomial time. Finally, we present experimental
results on a variety of benchmarks and several natural examples that model various types of nested while
loops in probabilistic programs and demonstrate that our approach is able to efficiently prove their almost-sure
termination property},
author = {Huang, Mingzhang and Fu, Hongfei and Chatterjee, Krishnendu and Goharshady, Amir Kafshdar},
booktitle = {Proceedings of the 34th ACM International Conference on Object-Oriented Programming, Systems, Languages, and Applications },
location = {Athens, Greece},
publisher = {ACM},
title = {{Modular verification for almost-sure termination of probabilistic programs}},
doi = {10.1145/3360555},
volume = {3},
year = {2019},
}
@article{6380,
abstract = {There is a huge gap between the speeds of modern caches and main memories, and therefore cache misses account for a considerable loss of efficiency in programs. The predominant technique to address this issue has been Data Packing: data elements that are frequently accessed within time proximity are packed into the same cache block, thereby minimizing accesses to the main memory. We consider the algorithmic problem of Data Packing on a two-level memory system. Given a reference sequence R of accesses to data elements, the task is to partition the elements into cache blocks such that the number of cache misses on R is minimized. The problem is notoriously difficult: it is NP-hard even when the cache has size 1, and is hard to approximate for any cache size larger than 4. Therefore, all existing techniques for Data Packing are based on heuristics and lack theoretical guarantees. In this work, we present the first positive theoretical results for Data Packing, along with new and stronger negative results. We consider the problem under the lens of the underlying access hypergraphs, which are hypergraphs of affinities between the data elements, where the order of an access hypergraph corresponds to the size of the affinity group. We study the problem parameterized by the treewidth of access hypergraphs, which is a standard notion in graph theory to measure the closeness of a graph to a tree. Our main results are as follows: We show there is a number q* depending on the cache parameters such that (a) if the access hypergraph of order q* has constant treewidth, then there is a linear-time algorithm for Data Packing; (b)the Data Packing problem remains NP-hard even if the access hypergraph of order q*-1 has constant treewidth. Thus, we establish a fine-grained dichotomy depending on a single parameter, namely, the highest order among access hypegraphs that have constant treewidth; and establish the optimal value q* of this parameter. Finally, we present an experimental evaluation of a prototype implementation of our algorithm. Our results demonstrate that, in practice, access hypergraphs of many commonly-used algorithms have small treewidth. We compare our approach with several state-of-the-art heuristic-based algorithms and show that our algorithm leads to significantly fewer cache-misses. },
author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Okati, Nastaran and Pavlogiannis, Andreas},
issn = {2475-1421},
journal = {Proceedings of the ACM on Programming Languages},
number = {POPL},
publisher = {ACM},
title = {{Efficient parameterized algorithms for data packing}},
doi = {10.1145/3290366},
volume = {3},
year = {2019},
}
@inproceedings{6056,
abstract = {In today's programmable blockchains, smart contracts are limited to being deterministic and non-probabilistic. This lack of randomness is a consequential limitation, given that a wide variety of real-world financial contracts, such as casino games and lotteries, depend entirely on randomness. As a result, several ad-hoc random number generation approaches have been developed to be used in smart contracts. These include ideas such as using an oracle or relying on the block hash. However, these approaches are manipulatable, i.e. their output can be tampered with by parties who might not be neutral, such as the owner of the oracle or the miners.We propose a novel game-theoretic approach for generating provably unmanipulatable pseudorandom numbers on the blockchain. Our approach allows smart contracts to access a trustworthy source of randomness that does not rely on potentially compromised miners or oracles, hence enabling the creation of a new generation of smart contracts that are not limited to being non-probabilistic and can be drawn from the much more general class of probabilistic programs.},
author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Pourdamghani, Arash},
booktitle = {IEEE International Conference on Blockchain and Cryptocurrency},
location = {Seoul, Korea},
publisher = {IEEE},
title = {{Probabilistic smart contracts: Secure randomness on the blockchain}},
doi = {10.1109/BLOC.2019.8751326},
year = {2019},
}
@inproceedings{6378,
abstract = {In today's cryptocurrencies, Hashcash proof of work is the most commonly-adopted approach to mining. In Hashcash, when a miner decides to add a block to the chain, she has to solve the difficult computational puzzle of inverting a hash function. While Hashcash has been successfully adopted in both Bitcoin and Ethereum, it has attracted significant and harsh criticism due to its massive waste of electricity, its carbon footprint and environmental effects, and the inherent lack of usefulness in inverting a hash function. Various other mining protocols have been suggested, including proof of stake, in which a miner's chance of adding the next block is proportional to her current balance. However, such protocols lead to a higher entry cost for new miners who might not still have any stake in the cryptocurrency, and can in the worst case lead to an oligopoly, where the rich have complete control over mining. In this paper, we propose Hybrid Mining: a new mining protocol that combines solving real-world useful problems with Hashcash. Our protocol allows new miners to join the network by taking part in Hashcash mining without having to own an initial stake. It also allows nodes of the network to submit hard computational problems whose solutions are of interest in the real world, e.g.~protein folding problems. Then, miners can choose to compete in solving these problems, in lieu of Hashcash, for adding a new block. Hence, Hybrid Mining incentivizes miners to solve useful problems, such as hard computational problems arising in biology, in a distributed manner. It also gives researchers in other areas an easy-to-use tool to outsource their hard computations to the blockchain network, which has enormous computational power, by paying a reward to the miner who solves the problem for them. Moreover, our protocol provides strong security guarantees and is at least as resilient to double spending as Bitcoin.},
author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Pourdamghani, Arash},
booktitle = {Proceedings of the 34th ACM Symposium on Applied Computing},
isbn = {9781450359337},
location = {Limassol, Cyprus},
pages = {374--381},
publisher = {ACM},
title = {{Hybrid Mining: Exploiting blockchain’s computational power for distributed problem solving}},
doi = {10.1145/3297280.3297319},
volume = {Part F147772},
year = {2019},
}
@inproceedings{6175,
abstract = {We consider the problem of expected cost analysis over nondeterministic probabilistic programs,
which aims at automated methods for analyzing the resource-usage of such programs.
Previous approaches for this problem could only handle nonnegative bounded costs.
However, in many scenarios, such as queuing networks or analysis of cryptocurrency protocols,
both positive and negative costs are necessary and the costs are unbounded as well.
In this work, we present a sound and efficient approach to obtain polynomial bounds on the
expected accumulated cost of nondeterministic probabilistic programs.
Our approach can handle (a) general positive and negative costs with bounded updates in
variables; and (b) nonnegative costs with general updates to variables.
We show that several natural examples which could not be
handled by previous approaches are captured in our framework.
Moreover, our approach leads to an efficient polynomial-time algorithm, while no
previous approach for cost analysis of probabilistic programs could guarantee polynomial runtime.
Finally, we show the effectiveness of our approach using experimental results on a variety of programs for which we efficiently synthesize tight resource-usage bounds.},
author = {Wang, Peixin and Fu, Hongfei and Goharshady, Amir Kafshdar and Chatterjee, Krishnendu and Qin, Xudong and Shi, Wenjun},
booktitle = {PLDI 2019: Proceedings of the 40th ACM SIGPLAN Conference on Programming Language Design and Implementation},
keywords = {Program Cost Analysis, Program Termination, Probabilistic Programs, Martingales},
location = {Phoenix, AZ, United States},
pages = {204--220},
publisher = {Association for Computing Machinery},
title = {{Cost analysis of nondeterministic probabilistic programs}},
doi = {10.1145/3314221.3314581},
year = {2019},
}
@inproceedings{6490,
abstract = {Smart contracts are programs that are stored and executed on the Blockchain and can receive, manage and transfer money (cryptocurrency units). Two important problems regarding smart contracts are formal analysis and compiler optimization. Formal analysis is extremely important, because smart contracts hold funds worth billions of dollars and their code is immutable after deployment. Hence, an undetected bug can cause significant financial losses. Compiler optimization is also crucial, because every action of a smart contract has to be executed by every node in the Blockchain network. Therefore, optimizations in compiling smart contracts can lead to significant savings in computation, time and energy.
Two classical approaches in program analysis and compiler optimization are intraprocedural and interprocedural analysis. In intraprocedural analysis, each function is analyzed separately, while interprocedural analysis considers the entire program. In both cases, the analyses are usually reduced to graph problems over the control flow graph (CFG) of the program. These graph problems are often computationally expensive. Hence, there has been ample research on exploiting structural properties of CFGs for efficient algorithms. One such well-studied property is the treewidth, which is a measure of tree-likeness of graphs. It is known that intraprocedural CFGs of structured programs have treewidth at most 6, whereas the interprocedural treewidth cannot be bounded. This result has been used as a basis for many efficient intraprocedural analyses.
In this paper, we explore the idea of exploiting the treewidth of smart contracts for formal analysis and compiler optimization. First, similar to classical programs, we show that the intraprocedural treewidth of structured Solidity and Vyper smart contracts is at most 9. Second, for global analysis, we prove that the interprocedural treewidth of structured smart contracts is bounded by 10 and, in sharp contrast with classical programs, treewidth-based algorithms can be easily applied for interprocedural analysis. Finally, we supplement our theoretical results with experiments using a tool we implemented for computing treewidth of smart contracts and show that the treewidth is much lower in practice. We use 36,764 real-world Ethereum smart contracts as benchmarks and find that they have an average treewidth of at most 3.35 for the intraprocedural case and 3.65 for the interprocedural case.
},
author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Goharshady, Ehsan Kafshdar},
booktitle = {Proceedings of the 34th ACM Symposium on Applied Computing},
isbn = {9781450359337},
location = {Limassol, Cyprus},
pages = {400--408},
publisher = {ACM},
title = {{The treewidth of smart contracts}},
doi = {10.1145/3297280.3297322},
volume = {Part F147772},
year = {2019},
}
@article{7158,
abstract = {Interprocedural analysis is at the heart of numerous applications in programming languages, such as alias analysis, constant propagation, and so on. Recursive state machines (RSMs) are standard models for interprocedural analysis. We consider a general framework with RSMs where the transitions are labeled from a semiring and path properties are algebraic with semiring operations. RSMs with algebraic path properties can model interprocedural dataflow analysis problems, the shortest path problem, the most probable path problem, and so on. The traditional algorithms for interprocedural analysis focus on path properties where the starting point is fixed as the entry point of a specific method. In this work, we consider possible multiple queries as required in many applications such as in alias analysis. The study of multiple queries allows us to bring in an important algorithmic distinction between the resource usage of the one-time preprocessing vs for each individual query. The second aspect we consider is that the control flow graphs for most programs have constant treewidth.
Our main contributions are simple and implementable algorithms that support multiple queries for algebraic path properties for RSMs that have constant treewidth. Our theoretical results show that our algorithms have small additional one-time preprocessing but can answer subsequent queries significantly faster as compared to the current algorithmic solutions for interprocedural dataflow analysis. We have also implemented our algorithms and evaluated their performance for performing on-demand interprocedural dataflow analysis on various domains, such as for live variable analysis and reaching definitions, on a standard benchmark set. Our experimental results align with our theoretical statements and show that after a lightweight preprocessing, on-demand queries are answered much faster than the standard existing algorithmic approaches.
},
author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Goyal, Prateesh and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas},
issn = {0164-0925},
journal = {ACM Transactions on Programming Languages and Systems},
number = {4},
publisher = {ACM},
title = {{Faster algorithms for dynamic algebraic queries in basic RSMs with constant treewidth}},
doi = {10.1145/3363525},
volume = {41},
year = {2019},
}
@article{7014,
abstract = {We study the problem of developing efficient approaches for proving
worst-case bounds of non-deterministic recursive programs. Ranking functions
are sound and complete for proving termination and worst-case bounds of
nonrecursive programs. First, we apply ranking functions to recursion,
resulting in measure functions. We show that measure functions provide a sound
and complete approach to prove worst-case bounds of non-deterministic recursive
programs. Our second contribution is the synthesis of measure functions in
nonpolynomial forms. We show that non-polynomial measure functions with
logarithm and exponentiation can be synthesized through abstraction of
logarithmic or exponentiation terms, Farkas' Lemma, and Handelman's Theorem
using linear programming. While previous methods obtain worst-case polynomial
bounds, our approach can synthesize bounds of the form $\mathcal{O}(n\log n)$
as well as $\mathcal{O}(n^r)$ where $r$ is not an integer. We present
experimental results to demonstrate that our approach can obtain efficiently
worst-case bounds of classical recursive algorithms such as (i) Merge-Sort, the
divide-and-conquer algorithm for the Closest-Pair problem, where we obtain
$\mathcal{O}(n \log n)$ worst-case bound, and (ii) Karatsuba's algorithm for
polynomial multiplication and Strassen's algorithm for matrix multiplication,
where we obtain $\mathcal{O}(n^r)$ bound such that $r$ is not an integer and
close to the best-known bounds for the respective algorithms.},
author = {Chatterjee, Krishnendu and Fu, Hongfei and Goharshady, Amir Kafshdar},
journal = {ACM Transactions on Programming Languages and Systems},
number = {4},
publisher = {ACM},
title = {{Non-polynomial worst-case analysis of recursive programs}},
doi = {10.1145/3339984},
volume = {41},
year = {2019},
}
@article{10286,
abstract = {In this paper, we evaluate clock signals generated in ring oscillators and self-timed rings and the way their jitter can be transformed into random numbers. We show that counting the periods of the jittery clock signal produces random numbers of significantly better quality than the methods in which the jittery signal is simply sampled (the case in almost all current methods). Moreover, we use the counter values to characterize and continuously monitor the source of randomness. However, instead of using the widely used statistical variance, we propose to use Allan variance to do so. There are two main advantages: Allan variance is insensitive to low frequency noises such as flicker noise that are known to be autocorrelated and significantly less circuitry is required for its computation than that used to compute commonly used variance. We also show that it is essential to use a differential principle of randomness extraction from the jitter based on the use of two identical oscillators to avoid autocorrelations originating from external and internal global jitter sources and that this fact is valid for both kinds of rings. Last but not least, we propose a method of statistical testing based on high order Markov model to show the reduced dependencies when the proposed randomness extraction is applied.},
author = {Allini, Elie Noumon and Skórski, Maciej and Petura, Oto and Bernard, Florent and Laban, Marek and Fischer, Viktor},
issn = {2569-2925},
journal = {IACR Transactions on Cryptographic Hardware and Embedded Systems},
number = {3},
pages = {214--242},
publisher = {International Association for Cryptologic Research},
title = {{Evaluation and monitoring of free running oscillators serving as source of randomness}},
doi = {10.13154/tches.v2018.i3.214-242},
volume = {2018},
year = {2018},
}
@inproceedings{10883,
abstract = {Solving parity games, which are equivalent to modal μ-calculus model checking, is a central algorithmic problem in formal methods, with applications in reactive synthesis, program repair, verification of branching-time properties, etc. Besides the standard compu- tation model with the explicit representation of games, another important theoretical model of computation is that of set-based symbolic algorithms. Set-based symbolic algorithms use basic set operations and one-step predecessor operations on the implicit description of games, rather than the explicit representation. The significance of symbolic algorithms is that they provide scalable algorithms for large finite-state systems, as well as for infinite-state systems with finite quotient. Consider parity games on graphs with n vertices and parity conditions with d priorities. While there is a rich literature of explicit algorithms for parity games, the main results for set-based symbolic algorithms are as follows: (a) the basic algorithm that requires O(nd) symbolic operations and O(d) symbolic space; and (b) an improved algorithm that requires O(nd/3+1) symbolic operations and O(n) symbolic space. In this work, our contributions are as follows: (1) We present a black-box set-based symbolic algorithm based on the explicit progress measure algorithm. Two important consequences of our algorithm are as follows: (a) a set-based symbolic algorithm for parity games that requires quasi-polynomially many symbolic operations and O(n) symbolic space; and (b) any future improvement in progress measure based explicit algorithms immediately imply an efficiency improvement in our set-based symbolic algorithm for parity games. (2) We present a set-based symbolic algorithm that requires quasi-polynomially many symbolic operations and O(d · log n) symbolic space. Moreover, for the important special case of d ≤ log n, our algorithm requires only polynomially many symbolic operations and poly-logarithmic symbolic space.},
author = {Chatterjee, Krishnendu and Dvořák, Wolfgang and Henzinger, Monika H and Svozil, Alexander},
booktitle = {22nd International Conference on Logic for Programming, Artificial Intelligence and Reasoning},
issn = {2398-7340},
location = {Awassa, Ethiopia},
pages = {233--253},
publisher = {EasyChair},
title = {{Quasipolynomial set-based symbolic algorithms for parity games}},
doi = {10.29007/5z5k},
volume = {57},
year = {2018},
}
@inproceedings{11,
abstract = {We report on a novel strategy to derive mean-field limits of quantum mechanical systems in which a large number of particles weakly couple to a second-quantized radiation field. The technique combines the method of counting and the coherent state approach to study the growth of the correlations among the particles and in the radiation field. As an instructional example, we derive the Schrödinger–Klein–Gordon system of equations from the Nelson model with ultraviolet cutoff and possibly massless scalar field. In particular, we prove the convergence of the reduced density matrices (of the nonrelativistic particles and the field bosons) associated with the exact time evolution to the projectors onto the solutions of the Schrödinger–Klein–Gordon equations in trace norm. Furthermore, we derive explicit bounds on the rate of convergence of the one-particle reduced density matrix of the nonrelativistic particles in Sobolev norm.},
author = {Leopold, Nikolai K and Pickl, Peter},
location = {Munich, Germany},
pages = {185 -- 214},
publisher = {Springer},
title = {{Mean-field limits of particles in interaction with quantised radiation fields}},
doi = {10.1007/978-3-030-01602-9_9},
volume = {270},
year = {2018},
}
@article{1215,
abstract = {Two generalizations of Itô formula to infinite-dimensional spaces are given.
The first one, in Hilbert spaces, extends the classical one by taking advantage of
cancellations when they occur in examples and it is applied to the case of a group
generator. The second one, based on the previous one and a limit procedure, is an Itô
formula in a special class of Banach spaces having a product structure with the noise
in a Hilbert component; again the key point is the extension due to a cancellation. This
extension to Banach spaces and in particular the specific cancellation are motivated
by path-dependent Itô calculus.},
author = {Flandoli, Franco and Russo, Francesco and Zanco, Giovanni A},
journal = {Journal of Theoretical Probability},
number = {2},
pages = {789--826},
publisher = {Springer},
title = {{Infinite-dimensional calculus under weak spatial regularity of the processes}},
doi = {10.1007/s10959-016-0724-2},
volume = {31},
year = {2018},
}
@article{180,
abstract = {In this paper we define and study the classical Uniform Electron Gas (UEG), a system of infinitely many electrons whose density is constant everywhere in space. The UEG is defined differently from Jellium, which has a positive constant background but no constraint on the density. We prove that the UEG arises in Density Functional Theory in the limit of a slowly varying density, minimizing the indirect Coulomb energy. We also construct the quantum UEG and compare it to the classical UEG at low density.},
author = {Lewi, Mathieu and Lieb, Élliott and Seiringer, Robert},
journal = {Journal de l'Ecole Polytechnique - Mathematiques},
pages = {79 -- 116},
publisher = {Ecole Polytechnique},
title = {{Statistical mechanics of the uniform electron gas}},
doi = {10.5802/jep.64},
volume = {5},
year = {2018},
}
@inproceedings{185,
abstract = {We resolve in the affirmative conjectures of A. Skopenkov and Repovš (1998), and M. Skopenkov (2003) generalizing the classical Hanani-Tutte theorem to the setting of approximating maps of graphs on 2-dimensional surfaces by embeddings. Our proof of this result is constructive and almost immediately implies an efficient algorithm for testing whether a given piecewise linear map of a graph in a surface is approximable by an embedding. More precisely, an instance of this problem consists of (i) a graph G whose vertices are partitioned into clusters and whose inter-cluster edges are partitioned into bundles, and (ii) a region R of a 2-dimensional compact surface M given as the union of a set of pairwise disjoint discs corresponding to the clusters and a set of pairwise disjoint "pipes" corresponding to the bundles, connecting certain pairs of these discs. We are to decide whether G can be embedded inside M so that the vertices in every cluster are drawn in the corresponding disc, the edges in every bundle pass only through its corresponding pipe, and every edge crosses the boundary of each disc at most once.},
author = {Fulek, Radoslav and Kynčl, Jan},
isbn = {978-3-95977-066-8},
location = {Budapest, Hungary},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Hanani-Tutte for approximating maps of graphs}},
doi = {10.4230/LIPIcs.SoCG.2018.39},
volume = {99},
year = {2018},
}
@inproceedings{188,
abstract = {Smallest enclosing spheres of finite point sets are central to methods in topological data analysis. Focusing on Bregman divergences to measure dissimilarity, we prove bounds on the location of the center of a smallest enclosing sphere. These bounds depend on the range of radii for which Bregman balls are convex.},
author = {Edelsbrunner, Herbert and Virk, Ziga and Wagner, Hubert},
location = {Budapest, Hungary},
pages = {35:1 -- 35:13},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Smallest enclosing spheres and Chernoff points in Bregman geometry}},
doi = {10.4230/LIPIcs.SoCG.2018.35},
volume = {99},
year = {2018},
}
@article{284,
abstract = {Borel probability measures living on metric spaces are fundamental
mathematical objects. There are several meaningful distance functions that make the collection of the probability measures living on a certain space a metric space. We are interested in the description of the structure of the isometries of such metric spaces. We overview some of the recent results of the topic and we also provide some new ones concerning the Wasserstein distance. More specifically, we consider the space of all Borel probability measures on the unit sphere of a Euclidean space endowed with the Wasserstein metric W_p for arbitrary p >= 1, and we show that the action of a Wasserstein isometry on the set of Dirac measures is induced by an isometry of the underlying unit sphere.},
author = {Virosztek, Daniel},
journal = {Acta Scientiarum Mathematicarum (Szeged)},
number = {1-2},
pages = {65 -- 80},
publisher = {Bolyai Institute},
title = {{Maps on probability measures preserving certain distances - a survey and some new results}},
doi = {10.14232/actasm-018-753-y},
volume = {84},
year = {2018},
}
@article{306,
abstract = {A cornerstone of statistical inference, the maximum entropy framework is being increasingly applied to construct descriptive and predictive models of biological systems, especially complex biological networks, from large experimental data sets. Both its broad applicability and the success it obtained in different contexts hinge upon its conceptual simplicity and mathematical soundness. Here we try to concisely review the basic elements of the maximum entropy principle, starting from the notion of ‘entropy’, and describe its usefulness for the analysis of biological systems. As examples, we focus specifically on the problem of reconstructing gene interaction networks from expression data and on recent work attempting to expand our system-level understanding of bacterial metabolism. Finally, we highlight some extensions and potential limitations of the maximum entropy approach, and point to more recent developments that are likely to play a key role in the upcoming challenges of extracting structures and information from increasingly rich, high-throughput biological data.},
author = {De Martino, Andrea and De Martino, Daniele},
journal = {Heliyon},
number = {4},
publisher = {Elsevier},
title = {{An introduction to the maximum entropy approach and its application to inference problems in biology}},
doi = {10.1016/j.heliyon.2018.e00596},
volume = {4},
year = {2018},
}
@book{3300,
abstract = {This book first explores the origins of this idea, grounded in theoretical work on temporal logic and automata. The editors and authors are among the world's leading researchers in this domain, and they contributed 32 chapters representing a thorough view of the development and application of the technique. Topics covered include binary decision diagrams, symbolic model checking, satisfiability modulo theories, partial-order reduction, abstraction, interpolation, concurrency, security protocols, games, probabilistic model checking, and process algebra, and chapters on the transfer of theory to industrial practice, property specification languages for hardware, and verification of real-time systems and hybrid systems.
The book will be valuable for researchers and graduate students engaged with the development of formal methods and verification tools.},
author = {Clarke, Edmund M. and Henzinger, Thomas A and Veith, Helmut and Bloem, Roderick},
isbn = {978-3-319-10574-1},
pages = {XLVIII, 1212},
publisher = {Springer Nature},
title = {{Handbook of Model Checking}},
doi = {10.1007/978-3-319-10575-8},
year = {2018},
}
@inbook{37,
abstract = {Developmental processes are inherently dynamic and understanding them requires quantitative measurements of gene and protein expression levels in space and time. While live imaging is a powerful approach for obtaining such data, it is still a challenge to apply it over long periods of time to large tissues, such as the embryonic spinal cord in mouse and chick. Nevertheless, dynamics of gene expression and signaling activity patterns in this organ can be studied by collecting tissue sections at different developmental stages. In combination with immunohistochemistry, this allows for measuring the levels of multiple developmental regulators in a quantitative manner with high spatiotemporal resolution. The mean protein expression levels over time, as well as embryo-to-embryo variability can be analyzed. A key aspect of the approach is the ability to compare protein levels across different samples. This requires a number of considerations in sample preparation, imaging and data analysis. Here we present a protocol for obtaining time course data of dorsoventral expression patterns from mouse and chick neural tube in the first 3 days of neural tube development. The described workflow starts from embryo dissection and ends with a processed dataset. Software scripts for data analysis are included. The protocol is adaptable and instructions that allow the user to modify different steps are provided. Thus, the procedure can be altered for analysis of time-lapse images and applied to systems other than the neural tube.},
author = {Zagórski, Marcin P and Kicheva, Anna},
booktitle = {Morphogen Gradients },
isbn = {978-1-4939-8771-9},
issn = {1064-3745},
pages = {47 -- 63},
publisher = {Springer Nature},
title = {{Measuring dorsoventral pattern and morphogen signaling profiles in the growing neural tube}},
doi = {10.1007/978-1-4939-8772-6_4},
volume = {1863},
year = {2018},
}
@article{417,
abstract = {We introduce a Diagrammatic Monte Carlo (DiagMC) approach to complex molecular impurities with rotational degrees of freedom interacting with a many-particle environment. The treatment is based on the diagrammatic expansion that merges the usual Feynman diagrams with the angular momentum diagrams known from atomic and nuclear structure theory, thereby incorporating the non-Abelian algebra inherent to quantum rotations. Our approach works at arbitrary coupling, is free of systematic errors and of finite size effects, and naturally provides access to the impurity Green function. We exemplify the technique by obtaining an all-coupling solution of the angulon model, however, the method is quite general and can be applied to a broad variety of quantum impurities possessing angular momentum degrees of freedom. },
author = {Bighin, Giacomo and Tscherbul, Timur and Lemeshko, Mikhail},
journal = {Physical Review Letters},
number = {16},
publisher = {APS Physics},
title = {{Diagrammatic Monte Carlo approach to rotating molecular impurities}},
doi = {10.1103/PhysRevLett.121.165301},
volume = {121},
year = {2018},
}
@article{305,
abstract = {The hanging-drop network (HDN) is a technology platform based on a completely open microfluidic network at the bottom of an inverted, surface-patterned substrate. The platform is predominantly used for the formation, culturing, and interaction of self-assembled spherical microtissues (spheroids) under precisely controlled flow conditions. Here, we describe design, fabrication, and operation of microfluidic hanging-drop networks.},
author = {Misun, Patrick and Birchler, Axel and Lang, Moritz and Hierlemann, Andreas and Frey, Olivier},
journal = {Methods in Molecular Biology},
pages = {183 -- 202},
publisher = {Springer},
title = {{Fabrication and operation of microfluidic hanging drop networks}},
doi = {10.1007/978-1-4939-7792-5_15},
volume = {1771},
year = {2018},
}
@inproceedings{325,
abstract = {Probabilistic programs extend classical imperative programs with real-valued random variables and random branching. The most basic liveness property for such programs is the termination property. The qualitative (aka almost-sure) termination problem asks whether a given program program terminates with probability 1. While ranking functions provide a sound and complete method for non-probabilistic programs, the extension of them to probabilistic programs is achieved via ranking supermartingales (RSMs). Although deep theoretical results have been established about RSMs, their application to probabilistic programs with nondeterminism has been limited only to programs of restricted control-flow structure. For non-probabilistic programs, lexicographic ranking functions provide a compositional and practical approach for termination analysis of real-world programs. In this work we introduce lexicographic RSMs and show that they present a sound method for almost-sure termination of probabilistic programs with nondeterminism. We show that lexicographic RSMs provide a tool for compositional reasoning about almost-sure termination, and for probabilistic programs with linear arithmetic they can be synthesized efficiently (in polynomial time). We also show that with additional restrictions even asymptotic bounds on expected termination time can be obtained through lexicographic RSMs. Finally, we present experimental results on benchmarks adapted from previous work to demonstrate the effectiveness of our approach.},
author = {Agrawal, Sheshansh and Chatterjee, Krishnendu and Novotny, Petr},
location = {Los Angeles, CA, USA},
number = {POPL},
publisher = {ACM},
title = {{Lexicographic ranking supermartingales: an efficient approach to termination of probabilistic programs}},
doi = {10.1145/3158122},
volume = {2},
year = {2018},
}
@inbook{408,
abstract = {Adventitious roots (AR) are de novo formed roots that emerge from any part of the plant or from callus in tissue culture, except root tissue. The plant tissue origin and the method by which they are induced determine the physiological properties of emerged ARs. Hence, a standard method encompassing all types of AR does not exist. Here we describe a method for the induction and analysis of AR that emerge from the etiolated hypocotyl of dicot plants. The hypocotyl is formed during embryogenesis and shows a determined developmental pattern which usually does not involve AR formation. However, the hypocotyl shows propensity to form de novo roots under specific circumstances such as removal of the root system, high humidity or flooding, or during de-etiolation. The hypocotyl AR emerge from a pericycle-like cell layer surrounding the vascular tissue of the central cylinder, which is reminiscent to the developmental program of lateral roots. Here we propose an easy protocol for in vitro hypocotyl AR induction from etiolated Arabidopsis seedlings.},
author = {Trinh, Hoang and Verstraeten, Inge and Geelen, Danny},
booktitle = {Root Development },
issn = {1064-3745},
pages = {95 -- 102},
publisher = {Springer Nature},
title = {{In vitro assay for induction of adventitious rooting on intact arabidopsis hypocotyls}},
doi = {10.1007/978-1-4939-7747-5_7},
volume = {1761},
year = {2018},
}
@inbook{411,
abstract = {Immunolocalization is a valuable tool for cell biology research that allows to rapidly determine the localization and expression levels of endogenous proteins. In plants, whole-mount in situ immunolocalization remains a challenging method, especially in tissues protected by waxy layers and complex cell wall carbohydrates. Here, we present a robust method for whole-mount in situ immunolocalization in primary root meristems and lateral root primordia in Arabidopsis thaliana. For good epitope preservation, fixation is done in an alkaline paraformaldehyde/glutaraldehyde mixture. This fixative is suitable for detecting a wide range of proteins, including integral transmembrane proteins and proteins peripherally attached to the plasma membrane. From initiation until emergence from the primary root, lateral root primordia are surrounded by several layers of differentiated tissues with a complex cell wall composition that interferes with the efficient penetration of all buffers. Therefore, immunolocalization in early lateral root primordia requires a modified method, including a strong solvent treatment for removal of hydrophobic barriers and a specific cocktail of cell wall-degrading enzymes. The presented method allows for easy, reliable, and high-quality in situ detection of the subcellular localization of endogenous proteins in primary and lateral root meristems without the need of time-consuming crosses or making translational fusions to fluorescent proteins.},
author = {Karampelias, Michael and Tejos, Ricardo and Friml, Jirí and Vanneste, Steffen},
booktitle = {Root Development. Methods and Protocols},
editor = {Ristova, Daniela and Barbez, Elke},
pages = {131 -- 143},
publisher = {Springer},
title = {{Optimized whole mount in situ immunolocalization for Arabidopsis thaliana root meristems and lateral root primordia}},
doi = {10.1007/978-1-4939-7747-5_10},
volume = {1761},
year = {2018},
}
@article{456,
abstract = {Inhibition of the endoplasmic reticulum stress pathway may hold the key to Zika virus-associated microcephaly treatment. },
author = {Novarino, Gaia},
journal = {Science Translational Medicine},
number = {423},
publisher = {American Association for the Advancement of Science},
title = {{Zika-associated microcephaly: Reduce the stress and race for the treatment}},
doi = {10.1126/scitranslmed.aar7514},
volume = {10},
year = {2018},
}
@article{53,
abstract = {In 2013, a publication repository was implemented at IST Austria and 2015 after a thorough preparation phase a data repository was implemented - both based on the Open Source Software EPrints. In this text, designed as field report, we will reflect on our experiences with Open Source Software in general and specifically with EPrints regarding technical aspects but also regarding their characteristics of the user community. The second part is a pleading for including the end users in the process of implementation, adaption and evaluation.},
author = {Petritsch, Barbara and Porsche, Jana},
journal = {VÖB Mitteilungen},
number = {1},
pages = {199 -- 206},
publisher = {Vereinigung Österreichischer Bibliothekarinnen und Bibliothekare},
title = {{IST PubRep and IST DataRep: the institutional repositories at IST Austria}},
doi = {10.31263/voebm.v71i1.1993},
volume = {71},
year = {2018},
}
@article{536,
abstract = {We consider the problem of consensus in the challenging classic model. In this model, the adversary is adaptive; it can choose which processors crash at any point during the course of the algorithm. Further, communication is via asynchronous message passing: there is no known upper bound on the time to send a message from one processor to another, and all messages and coin flips are seen by the adversary. We describe a new randomized consensus protocol with expected message complexity O(n2log2n) when fewer than n / 2 processes may fail by crashing. This is an almost-linear improvement over the best previously known protocol, and within logarithmic factors of a known Ω(n2) message lower bound. The protocol further ensures that no process sends more than O(nlog3n) messages in expectation, which is again within logarithmic factors of optimal. We also present a generalization of the algorithm to an arbitrary number of failures t, which uses expected O(nt+t2log2t) total messages. Our approach is to build a message-efficient, resilient mechanism for aggregating individual processor votes, implementing the message-passing equivalent of a weak shared coin. Roughly, in our protocol, a processor first announces its votes to small groups, then propagates them to increasingly larger groups as it generates more and more votes. To bound the number of messages that an individual process might have to send or receive, the protocol progressively increases the weight of generated votes. The main technical challenge is bounding the impact of votes that are still “in flight” (generated, but not fully propagated) on the final outcome of the shared coin, especially since such votes might have different weights. We achieve this by leveraging the structure of the algorithm, and a technical argument based on martingale concentration bounds. Overall, we show that it is possible to build an efficient message-passing implementation of a shared coin, and in the process (almost-optimally) solve the classic consensus problem in the asynchronous message-passing model.},
author = {Alistarh, Dan-Adrian and Aspnes, James and King, Valerie and Saia, Jared},
issn = {01782770},
journal = {Distributed Computing},
number = {6},
pages = {489--501},
publisher = {Springer},
title = {{Communication-efficient randomized consensus}},
doi = {10.1007/s00446-017-0315-1},
volume = {31},
year = {2018},
}
@article{554,
abstract = {We analyse the canonical Bogoliubov free energy functional in three dimensions at low temperatures in the dilute limit. We prove existence of a first-order phase transition and, in the limit (Formula presented.), we determine the critical temperature to be (Formula presented.) to leading order. Here, (Formula presented.) is the critical temperature of the free Bose gas, ρ is the density of the gas and a is the scattering length of the pair-interaction potential V. We also prove asymptotic expansions for the free energy. In particular, we recover the Lee–Huang–Yang formula in the limit (Formula presented.).},
author = {Napiórkowski, Marcin M and Reuvers, Robin and Solovej, Jan},
issn = {00103616},
journal = {Communications in Mathematical Physics},
number = {1},
pages = {347--403},
publisher = {Springer},
title = {{The Bogoliubov free energy functional II: The dilute Limit}},
doi = {10.1007/s00220-017-3064-x},
volume = {360},
year = {2018},
}
@misc{5573,
abstract = {Graph matching problems for large displacement optical flow of RGB-D images.},
author = {Alhaija, Hassan and Sellent, Anita and Kondermann, Daniel and Rother, Carsten},
keywords = {graph matching, quadratic assignment problem<},
publisher = {IST Austria},
title = {{Graph matching problems for GraphFlow – 6D Large Displacement Scene Flow}},
doi = {10.15479/AT:ISTA:82},
year = {2018},
}
@misc{5577,
abstract = {Data on Austrian open access publication output at Emerald from 2013-2017 including data analysis.},
author = {Villányi, Márton},
keywords = {Publication analysis, Bibliography, Open Access},
publisher = {IST Austria},
title = {{Emerald Austrian Publications 2013-2017}},
doi = {10.15479/AT:ISTA:89},
year = {2018},
}
@misc{5574,
abstract = {Comparison of Scopus' and publisher's data on Austrian publication output at IOP. },
author = {Villányi, Márton},
keywords = {Publication analysis, Bibliography, Open Access},
publisher = {IST Austria},
title = {{Data Check IOP Scopus vs. Publisher}},
doi = {10.15479/AT:ISTA:86},
year = {2018},
}
@misc{5578,
abstract = {Data on Austrian open access publication output at IOP from 2012-2015 including data analysis.},
author = {Villányi, Márton},
keywords = {Publication analysis, Bibliography, Open Access},
publisher = {IST Austria},
title = {{IOP Austrian Publications 2012-2015}},
doi = {10.15479/AT:ISTA:90},
year = {2018},
}
@misc{5579,
abstract = {Data on Austrian open access publication output at RSC from 2013-2017 including data analysis.},
author = {Villányi, Márton},
keywords = {Publication analysis, Bibliography, Open Access},
publisher = {IST Austria},
title = {{RSC Austrian Publications 2013-2017}},
doi = {10.15479/AT:ISTA:91},
year = {2018},
}
@misc{5576,
abstract = {Comparison of Scopus' and FWF's data on Austrian publication output at T&F.},
author = {Villányi, Márton},
keywords = {Publication analysis, Bibliography, Open Access},
publisher = {IST Austria},
title = {{Data Check T&F Scopus vs. FWF}},
doi = {10.15479/AT:ISTA:88},
year = {2018},
}
@misc{5575,
abstract = {Comparison of Scopus' and FWF's data on Austrian publication output at RSC. },
author = {Villányi, Márton},
keywords = {Publication analysis, Bibliography, Open Access},
publisher = {IST Austria},
title = {{Data Check RSC Scopus vs. FWF}},
doi = {10.15479/AT:ISTA:87},
year = {2018},
}
@phdthesis{278,
abstract = {Consortial subscription contracts regulate the digital access to publications between publishers and scientific libraries. However, since a couple of years the tendency towards a freely accessible publishing (Open Access) intensifies. As a consequence of this trend the contractual relationship between licensor and licensee is gradually changing as well: More and more contracts exercise influence on open access publishing. The present study attempts to compare Austrian examples of consortial licence contracts, which include components of open access. It describes the difference between pure subscription contracts and differing innovative deals including open access components. Thereby it becomes obvious that for the evaluation of this licence contracts new methods are needed. An essential new element of such analyses is the evaluation of the open access publication numbers. So this study tries to carry out such publication analyses for Austrian open access deals focusing on quantitative questions: How does the number of publications evolve? How does the open access share change? Publications reports of the publishers and database queries from Scopus form the data basis. The analysis of the data points out that differing approaches of contracts result in highly divergent results: Particular deals can prioritize a saving in costs or else the increase of the open access rate. It is to be assumed that within the following years further numerous open access deals will be negotiated. The finding of this study shall provide guidance.},
author = {Villányi, Márton},
pages = {94},
publisher = {Universität Wien},
title = {{Lizenzverträge mit Open-Access-Komponenten an österreichischen Bibliotheken}},
year = {2018},
}
@misc{5582,
abstract = {Data on Austrian open access publication output at Taylor&Francis from 2013-2017 including data analysis.},
author = {Villányi, Márton},
keywords = {Publication analysis, Bibliography, Open Access},
publisher = {IST Austria},
title = {{Taylor&Francis Austrian Publications 2013-2017}},
doi = {10.15479/AT:ISTA:94},
year = {2018},
}
@misc{5581,
abstract = {Data on Austrian open access publication output at Springer from 2013-2016 including data analysis.},
author = {Villányi, Márton},
keywords = {Publication analysis, Bibliography, Open Access},
publisher = {IST Austria},
title = {{Springer Austrian Publications 2013-2016}},
doi = {10.15479/AT:ISTA:93},
year = {2018},
}
@misc{5580,
abstract = {Data on Austrian open access publication output at SAGE from 2013-2017 including data analysis.},
author = {Villányi, Márton},
keywords = {Publication analysis, Bibliography, Open Access},
publisher = {IST Austria},
title = {{SAGE Austrian Publications 2013-2017}},
doi = {10.15479/AT:ISTA:92},
year = {2018},
}
@misc{5588,
abstract = {Script to perform a simple exponential lifetime fit of a ROI on time stacks acquired with a FLIM X16 TCSPC detector (+example data)},
author = {Hauschild, Robert},
keywords = {FLIM, FRET, fluorescence lifetime imaging},
publisher = {IST Austria},
title = {{Fluorescence lifetime analysis of FLIM X16 TCSPC data}},
doi = {10.15479/AT:ISTA:0113},
year = {2018},
}
@inbook{562,
abstract = {Primary neuronal cell culture preparations are widely used to investigate synaptic functions. This chapter describes a detailed protocol for the preparation of a neuronal cell culture in which giant calyx-type synaptic terminals are formed. This chapter also presents detailed protocols for utilizing the main technical advantages provided by such a preparation, namely, labeling and imaging of synaptic organelles and electrophysiological recordings directly from presynaptic terminals.},
author = {Dimitrov, Dimitar and Guillaud, Laurent and Eguchi, Kohgaku and Takahashi, Tomoyuki},
booktitle = {Neurotrophic Factors},
editor = {Skaper, Stephen D.},
pages = {201 -- 215},
publisher = {Springer},
title = {{Culture of mouse giant central nervous system synapses and application for imaging and electrophysiological analyses}},
doi = {10.1007/978-1-4939-7571-6_15},
volume = {1727},
year = {2018},
}
@article{5677,
abstract = {Recently, contract-based design has been proposed as an “orthogonal” approach that complements system design methodologies proposed so far to cope with the complexity of system design. Contract-based design provides a rigorous scaffolding for verification, analysis, abstraction/refinement, and even synthesis. A number of results have been obtained in this domain but a unified treatment of the topic that can help put contract-based design in perspective was missing. This monograph intends to provide such a treatment where contracts are precisely defined and characterized so that they can be used in design methodologies with no ambiguity. In particular, this monograph identifies the essence of complex system design using contracts through a mathematical “meta-theory”, where all the properties of the methodology are derived from a very abstract and generic notion of contract. We show that the meta-theory provides deep and illuminating links with existing contract and interface theories, as well as guidelines for designing new theories. Our study encompasses contracts for both software and systems, with emphasis on the latter. We illustrate the use of contracts with two examples: requirement engineering for a parking garage management, and the development of contracts for timing and scheduling in the context of the Autosar methodology in use in the automotive sector.},
author = {Benveniste, Albert and Nickovic, Dejan and Caillaud, Benoît and Passerone, Roberto and Raclet, Jean Baptiste and Reinkemeier, Philipp and Sangiovanni-Vincentelli, Alberto and Damm, Werner and Henzinger, Thomas A and Larsen, Kim G.},
issn = {15513939},
journal = {Foundations and Trends in Electronic Design Automation},
number = {2-3},
pages = {124--400},
publisher = {Now Publishers Inc},
title = {{Contracts for system design}},
doi = {10.1561/1000000053},
volume = {12},
year = {2018},
}
@inbook{59,
abstract = {Graph-based games are an important tool in computer science. They have applications in synthesis, verification, refinement, and far beyond. We review graphbased games with objectives on infinite plays. We give definitions and algorithms to solve the games and to give a winning strategy. The objectives we consider are mostly Boolean, but we also look at quantitative graph-based games and their objectives. Synthesis aims to turn temporal logic specifications into correct reactive systems. We explain the reduction of synthesis to graph-based games (or equivalently tree automata) using synthesis of LTL specifications as an example. We treat the classical approach that uses determinization of parity automata and more modern approaches.},
author = {Bloem, Roderick and Chatterjee, Krishnendu and Jobstmann, Barbara},
booktitle = {Handbook of Model Checking},
editor = {Henzinger, Thomas A and Clarke, Edmund M. and Veith, Helmut and Bloem, Roderick},
isbn = {978-3-319-10574-1},
pages = {921 -- 962},
publisher = {Springer},
title = {{Graph games and reactive synthesis}},
doi = {10.1007/978-3-319-10575-8_27},
year = {2018},
}
@inbook{60,
abstract = {Model checking is a computer-assisted method for the analysis of dynamical systems that can be modeled by state-transition systems. Drawing from research traditions in mathematical logic, programming languages, hardware design, and theoretical computer science, model checking is now widely used for the verification of hardware and software in industry. This chapter is an introduction and short survey of model checking. The chapter aims to motivate and link the individual chapters of the handbook, and to provide context for readers who are not familiar with model checking.},
author = {Clarke, Edmund and Henzinger, Thomas A and Veith, Helmut},
booktitle = {Handbook of Model Checking},
editor = {Henzinger, Thomas A},
pages = {1 -- 26},
publisher = {Springer},
title = {{Introduction to model checking}},
doi = {10.1007/978-3-319-10575-8_1},
year = {2018},
}
@inbook{61,
abstract = {We prove that there is no strongly regular graph (SRG) with parameters (460; 153; 32; 60). The proof is based on a recent lower bound on the number of 4-cliques in a SRG and some applications of Euclidean representation of SRGs. },
author = {Bondarenko, Andriy and Mellit, Anton and Prymak, Andriy and Radchenko, Danylo and Viazovska, Maryna},
booktitle = {Contemporary Computational Mathematics},
pages = {131 -- 134},
publisher = {Springer},
title = {{There is no strongly regular graph with parameters (460; 153; 32; 60)}},
doi = {10.1007/978-3-319-72456-0_7},
year = {2018},
}
@article{6354,
abstract = {Blood platelets are critical for hemostasis and thrombosis, but also play diverse roles during immune responses. We have recently reported that platelets migrate at sites of infection in vitro and in vivo. Importantly, platelets use their ability to migrate to collect and bundle fibrin (ogen)-bound bacteria accomplishing efficient intravascular bacterial trapping. Here, we describe a method that allows analyzing platelet migration in vitro, focusing on their ability to collect bacteria and trap bacteria under flow.},
author = {Fan, Shuxia and Lorenz, Michael and Massberg, Steffen and Gärtner, Florian R},
issn = {2331-8325},
journal = {Bio-Protocol},
keywords = {Platelets, Cell migration, Bacteria, Shear flow, Fibrinogen, E. coli},
number = {18},
publisher = {Bio-Protocol},
title = {{Platelet migration and bacterial trapping assay under flow}},
doi = {10.21769/bioprotoc.3018},
volume = {8},
year = {2018},
}
@misc{6459,
author = {Petritsch, Barbara},
keywords = {Open Access, Publication Analysis},
location = {Graz, Austria},
publisher = {IST Austria},
title = {{Open Access at IST Austria 2009-2017}},
doi = {10.5281/zenodo.1410279},
year = {2018},
}
@inbook{6525,
abstract = {This chapter finds an agreement of equivariant indices of semi-classical homomorphisms between pairwise mirror branes in the GL2 Higgs moduli space on a Riemann surface. On one side of the agreement, components of the Lagrangian brane of U(1,1) Higgs bundles, whose mirror was proposed by Hitchin to be certain even exterior powers of the hyperholomorphic Dirac bundle on the SL2 Higgs moduli space, are present. The agreement arises from a mysterious functional equation. This gives strong computational evidence for Hitchin’s proposal.},
author = {Hausel, Tamás and Mellit, Anton and Pei, Du},
booktitle = {Geometry and Physics: Volume I},
isbn = {9780198802013},
pages = {189--218},
publisher = {Oxford University Press},
title = {{Mirror symmetry with branes by equivariant verlinde formulas}},
doi = {10.1093/oso/9780198802013.003.0009},
year = {2018},
}
@techreport{5686,
author = {Danowski, Patrick},
pages = {5},
title = {{An Austrian proposal for the Classification of Open Access Tuples (COAT) - Distinguish different Open Access types beyond colors}},
doi = {10.5281/zenodo.1244154},
year = {2018},
}
@article{690,
abstract = {We consider spectral properties and the edge universality of sparse random matrices, the class of random matrices that includes the adjacency matrices of the Erdős–Rényi graph model G(N, p). We prove a local law for the eigenvalue density up to the spectral edges. Under a suitable condition on the sparsity, we also prove that the rescaled extremal eigenvalues exhibit GOE Tracy–Widom fluctuations if a deterministic shift of the spectral edge due to the sparsity is included. For the adjacency matrix of the Erdős–Rényi graph this establishes the Tracy–Widom fluctuations of the second largest eigenvalue when p is much larger than N−2/3 with a deterministic shift of order (Np)−1.},
author = {Lee, Jii and Schnelli, Kevin},
journal = {Probability Theory and Related Fields},
number = {1-2},
publisher = {Springer},
title = {{Local law and Tracy–Widom limit for sparse random matrices}},
doi = {10.1007/s00440-017-0787-8},
volume = {171},
year = {2018},
}
@article{703,
abstract = {We consider the NP-hard problem of MAP-inference for undirected discrete graphical models. We propose a polynomial time and practically efficient algorithm for finding a part of its optimal solution. Specifically, our algorithm marks some labels of the considered graphical model either as (i) optimal, meaning that they belong to all optimal solutions of the inference problem; (ii) non-optimal if they provably do not belong to any solution. With access to an exact solver of a linear programming relaxation to the MAP-inference problem, our algorithm marks the maximal possible (in a specified sense) number of labels. We also present a version of the algorithm, which has access to a suboptimal dual solver only and still can ensure the (non-)optimality for the marked labels, although the overall number of the marked labels may decrease. We propose an efficient implementation, which runs in time comparable to a single run of a suboptimal dual solver. Our method is well-scalable and shows state-of-the-art results on computational benchmarks from machine learning and computer vision.},
author = {Shekhovtsov, Alexander and Swoboda, Paul and Savchynskyy, Bogdan},
issn = {01628828},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
number = {7},
pages = {1668--1682},
publisher = {IEEE},
title = {{Maximum persistency via iterative relaxed inference with graphical models}},
doi = {10.1109/TPAMI.2017.2730884},
volume = {40},
year = {2018},
}
@inproceedings{7116,
abstract = {Training deep learning models has received tremendous research interest recently. In particular, there has been intensive research on reducing the communication cost of training when using multiple computational devices, through reducing the precision of the underlying data representation. Naturally, such methods induce system trade-offs—lowering communication precision could de-crease communication overheads and improve scalability; but, on the other hand, it can also reduce the accuracy of training. In this paper, we study this trade-off space, and ask:Can low-precision communication consistently improve the end-to-end performance of training modern neural networks, with no accuracy loss?From the performance point of view, the answer to this question may appear deceptively easy: compressing communication through low precision should help when the ratio between communication and computation is high. However, this answer is less straightforward when we try to generalize this principle across various neural network architectures (e.g., AlexNet vs. ResNet),number of GPUs (e.g., 2 vs. 8 GPUs), machine configurations(e.g., EC2 instances vs. NVIDIA DGX-1), communication primitives (e.g., MPI vs. NCCL), and even different GPU architectures(e.g., Kepler vs. Pascal). Currently, it is not clear how a realistic realization of all these factors maps to the speed up provided by low-precision communication. In this paper, we conduct an empirical study to answer this question and report the insights.},
author = {Grubic, Demjan and Tam, Leo and Alistarh, Dan-Adrian and Zhang, Ce},
booktitle = {Proceedings of the 21st International Conference on Extending Database Technology},
isbn = {9783893180783},
issn = {2367-2005},
location = {Vienna, Austria},
pages = {145--156},
publisher = {OpenProceedings},
title = {{Synchronous multi-GPU training for deep learning with low-precision communications: An empirical study}},
doi = {10.5441/002/EDBT.2018.14},
year = {2018},
}
@article{738,
abstract = {This paper is devoted to automatic competitive analysis of real-time scheduling algorithms for firm-deadline tasksets, where only completed tasks con- tribute some utility to the system. Given such a taskset T , the competitive ratio of an on-line scheduling algorithm A for T is the worst-case utility ratio of A over the utility achieved by a clairvoyant algorithm. We leverage the theory of quantitative graph games to address the competitive analysis and competitive synthesis problems. For the competitive analysis case, given any taskset T and any finite-memory on- line scheduling algorithm A , we show that the competitive ratio of A in T can be computed in polynomial time in the size of the state space of A . Our approach is flexible as it also provides ways to model meaningful constraints on the released task sequences that determine the competitive ratio. We provide an experimental study of many well-known on-line scheduling algorithms, which demonstrates the feasibility of our competitive analysis approach that effectively replaces human ingenuity (required Preliminary versions of this paper have appeared in Chatterjee et al. ( 2013 , 2014 ). B Andreas Pavlogiannis pavlogiannis@ist.ac.at Krishnendu Chatterjee krish.chat@ist.ac.at Alexander Kößler koe@ecs.tuwien.ac.at Ulrich Schmid s@ecs.tuwien.ac.at 1 IST Austria (Institute of Science and Technology Austria), Am Campus 1, 3400 Klosterneuburg, Austria 2 Embedded Computing Systems Group, Vienna University of Technology, Treitlstrasse 3, 1040 Vienna, Austria 123 Real-Time Syst for finding worst-case scenarios) by computing power. For the competitive synthesis case, we are just given a taskset T , and the goal is to automatically synthesize an opti- mal on-line scheduling algorithm A , i.e., one that guarantees the largest competitive ratio possible for T . We show how the competitive synthesis problem can be reduced to a two-player graph game with partial information, and establish that the compu- tational complexity of solving this game is Np -complete. The competitive synthesis problem is hence in Np in the size of the state space of the non-deterministic labeled transition system encoding the taskset. Overall, the proposed framework assists in the selection of suitable scheduling algorithms for a given taskset, which is in fact the most common situation in real-time systems design. },
author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Kößler, Alexander and Schmid, Ulrich},
journal = {Real-Time Systems},
number = {1},
pages = {166 -- 207},
publisher = {Springer},
title = {{Automated competitive analysis of real time scheduling with graph games}},
doi = {10.1007/s11241-017-9293-4},
volume = {54},
year = {2018},
}
@inproceedings{7407,
abstract = {Proofs of space (PoS) [Dziembowski et al., CRYPTO'15] are proof systems where a prover can convince a verifier that he "wastes" disk space. PoS were introduced as a more ecological and economical replacement for proofs of work which are currently used to secure blockchains like Bitcoin. In this work we investigate extensions of PoS which allow the prover to embed useful data into the dedicated space, which later can be recovered. Our first contribution is a security proof for the original PoS from CRYPTO'15 in the random oracle model (the original proof only applied to a restricted class of adversaries which can store a subset of the data an honest prover would store). When this PoS is instantiated with recent constructions of maximally depth robust graphs, our proof implies basically optimal security. As a second contribution we show three different extensions of this PoS where useful data can be embedded into the space required by the prover. Our security proof for the PoS extends (non-trivially) to these constructions. We discuss how some of these variants can be used as proofs of catalytic space (PoCS), a notion we put forward in this work, and which basically is a PoS where most of the space required by the prover can be used to backup useful data. Finally we discuss how one of the extensions is a candidate construction for a proof of replication (PoR), a proof system recently suggested in the Filecoin whitepaper. },
author = {Pietrzak, Krzysztof Z},
booktitle = {10th Innovations in Theoretical Computer Science Conference (ITCS 2019)},
isbn = {978-3-95977-095-8},
issn = {1868-8969},
location = {San Diego, CA, United States},
pages = {59:1--59:25},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Proofs of catalytic space}},
doi = {10.4230/LIPICS.ITCS.2019.59},
volume = {124},
year = {2018},
}
@article{742,
abstract = {We give a detailed and easily accessible proof of Gromov’s Topological Overlap Theorem. Let X be a finite simplicial complex or, more generally, a finite polyhedral cell complex of dimension d. Informally, the theorem states that if X has sufficiently strong higher-dimensional expansion properties (which generalize edge expansion of graphs and are defined in terms of cellular cochains of X) then X has the following topological overlap property: for every continuous map (Formula presented.) there exists a point (Formula presented.) that is contained in the images of a positive fraction (Formula presented.) of the d-cells of X. More generally, the conclusion holds if (Formula presented.) is replaced by any d-dimensional piecewise-linear manifold M, with a constant (Formula presented.) that depends only on d and on the expansion properties of X, but not on M.},
author = {Dotterrer, Dominic and Kaufman, Tali and Wagner, Uli},
journal = {Geometriae Dedicata},
number = {1},
pages = {307–317},
publisher = {Springer},
title = {{On expansion and topological overlap}},
doi = {10.1007/s10711-017-0291-4},
volume = {195},
year = {2018},
}
@article{6001,
abstract = {The concurrent memory reclamation problem is that of devising a way for a deallocating thread to verify that no other concurrent threads hold references to a memory block being deallocated. To date, in the absence of automatic garbage collection, there is no satisfactory solution to this problem; existing tracking methods like hazard pointers, reference counters, or epoch-based techniques like RCU are either prohibitively expensive or require significant programming expertise to the extent that implementing them efficiently can be worthy of a publication. None of the existing techniques are automatic or even semi-automated.
In this article, we take a new approach to concurrent memory reclamation. Instead of manually tracking access to memory locations as done in techniques like hazard pointers, or restricting shared accesses to specific epoch boundaries as in RCU, our algorithm, called ThreadScan, leverages operating system signaling to automatically detect which memory locations are being accessed by concurrent threads.
Initial empirical evidence shows that ThreadScan scales surprisingly well and requires negligible programming effort beyond the standard use of Malloc and Free.},
author = {Alistarh, Dan-Adrian and Leiserson, William and Matveev, Alexander and Shavit, Nir},
issn = {2329-4949},
journal = {ACM Transactions on Parallel Computing},
number = {4},
publisher = {Association for Computing Machinery},
title = {{ThreadScan: Automatic and scalable memory reclamation}},
doi = {10.1145/3201897},
volume = {4},
year = {2018},
}
@inproceedings{7812,
abstract = {Deep neural networks (DNNs) continue to make significant advances, solving tasks from image classification to translation or reinforcement learning. One aspect of the field receiving considerable attention is efficiently executing deep models in resource-constrained environments, such as mobile or embedded devices. This paper focuses on this problem, and proposes two new compression methods, which jointly leverage weight quantization and distillation of larger teacher networks into smaller student networks. The first method we propose is called quantized distillation and leverages distillation during the training process, by incorporating distillation loss, expressed with respect to the teacher, into the training of a student network whose weights are quantized to a limited set of levels. The second method, differentiable quantization, optimizes the location of quantization points through stochastic gradient descent, to better fit the behavior of the teacher model. We validate both methods through experiments on convolutional and recurrent architectures. We show that quantized shallow students can reach similar accuracy levels to full-precision teacher models, while providing order of magnitude compression, and inference speedup that is linear in the depth reduction. In sum, our results enable DNNs for resource-constrained environments to leverage architecture and accuracy advances developed on more powerful devices.},
author = {Polino, Antonio and Pascanu, Razvan and Alistarh, Dan-Adrian},
booktitle = {6th International Conference on Learning Representations},
location = {Vancouver, Canada},
title = {{Model compression via distillation and quantization}},
year = {2018},
}
@unpublished{8547,
abstract = {The cerebral cortex contains multiple hierarchically organized areas with distinctive cytoarchitectonical patterns, but the cellular mechanisms underlying the emergence of this diversity remain unclear. Here, we have quantitatively investigated the neuronal output of individual progenitor cells in the ventricular zone of the developing mouse neocortex using a combination of methods that together circumvent the biases and limitations of individual approaches. We found that individual cortical progenitor cells show a high degree of stochasticity and generate pyramidal cell lineages that adopt a wide range of laminar configurations. Mathematical modelling these lineage data suggests that a small number of progenitor cell populations, each generating pyramidal cells following different stochastic developmental programs, suffice to generate the heterogenous complement of pyramidal cell lineages that collectively build the complex cytoarchitecture of the neocortex.},
author = {Llorca, Alfredo and Ciceri, Gabriele and Beattie, Robert J and Wong, Fong K. and Diana, Giovanni and Serafeimidou, Eleni and Fernández-Otero, Marian and Streicher, Carmen and Arnold, Sebastian J. and Meyer, Martin and Hippenmeyer, Simon and Maravall, Miguel and Marín, Oscar},
booktitle = {bioRxiv},
publisher = {Cold Spring Harbor Laboratory},
title = {{Heterogeneous progenitor cell behaviors underlie the assembly of neocortical cytoarchitecture}},
doi = {10.1101/494088},
year = {2018},
}
@inbook{86,
abstract = {Responsiveness—the requirement that every request to a system be eventually handled—is one of the fundamental liveness properties of a reactive system. Average response time is a quantitative measure for the responsiveness requirement used commonly in performance evaluation. We show how average response time can be computed on state-transition graphs, on Markov chains, and on game graphs. In all three cases, we give polynomial-time algorithms.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan},
booktitle = {Principles of Modeling},
editor = {Lohstroh, Marten and Derler, Patricia and Sirjani, Marjan},
pages = {143 -- 161},
publisher = {Springer},
title = {{Computing average response time}},
doi = {10.1007/978-3-319-95246-8_9},
volume = {10760},
year = {2018},
}
@article{9229,
author = {Danzl, Johann G},
issn = {2500-2295},
journal = {Opera Medica et Physiologica},
number = {S1},
pages = {11},
publisher = {Lobachevsky State University of Nizhny Novgorod},
title = {{Diffraction-unlimited optical imaging for synaptic physiology}},
doi = {10.20388/omp2018.00s1.001},
volume = {4},
year = {2018},
}
@inproceedings{6005,
abstract = {Network games are widely used as a model for selfish resource-allocation problems. In the classicalmodel, each player selects a path connecting her source and target vertices. The cost of traversingan edge depends on theload; namely, number of players that traverse it. Thus, it abstracts the factthat different users may use a resource at different times and for different durations, which playsan important role in determining the costs of the users in reality. For example, when transmittingpackets in a communication network, routing traffic in a road network, or processing a task in aproduction system, actual sharing and congestion of resources crucially depends on time.In [13], we introducedtimed network games, which add a time component to network games.Each vertexvin the network is associated with a cost function, mapping the load onvto theprice that a player pays for staying invfor one time unit with this load. Each edge in thenetwork is guarded by the time intervals in which it can be traversed, which forces the players tospend time in the vertices. In this work we significantly extend the way time can be referred toin timed network games. In the model we study, the network is equipped withclocks, and, as intimed automata, edges are guarded by constraints on the values of the clocks, and their traversalmay involve a reset of some clocks. We argue that the stronger model captures many realisticnetworks. The addition of clocks breaks the techniques we developed in [13] and we developnew techniques in order to show that positive results on classic network games carry over to thestronger timed setting.},
author = {Avni, Guy and Guha, Shibashis and Kupferman, Orna},
issn = {1868-8969},
location = {Liverpool, United Kingdom},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Timed network games with clocks}},
doi = {10.4230/LIPICS.MFCS.2018.23},
volume = {117},
year = {2018},
}
@article{315,
abstract = {More than 100 years after Grigg’s influential analysis of species’ borders, the causes of limits to species’ ranges still represent a puzzle that has never been understood with clarity. The topic has become especially important recently as many scientists have become interested in the potential for species’ ranges to shift in response to climate change—and yet nearly all of those studies fail to recognise or incorporate evolutionary genetics in a way that relates to theoretical developments. I show that range margins can be understood based on just two measurable parameters: (i) the fitness cost of dispersal—a measure of environmental heterogeneity—and (ii) the strength of genetic drift, which reduces genetic diversity. Together, these two parameters define an ‘expansion threshold’: adaptation fails when genetic drift reduces genetic diversity below that required for adaptation to a heterogeneous environment. When the key parameters drop below this expansion threshold locally, a sharp range margin forms. When they drop below this threshold throughout the species’ range, adaptation collapses everywhere, resulting in either extinction or formation of a fragmented metapopulation. Because the effects of dispersal differ fundamentally with dimension, the second parameter—the strength of genetic drift—is qualitatively different compared to a linear habitat. In two-dimensional habitats, genetic drift becomes effectively independent of selection. It decreases with ‘neighbourhood size’—the number of individuals accessible by dispersal within one generation. Moreover, in contrast to earlier predictions, which neglected evolution of genetic variance and/or stochasticity in two dimensions, dispersal into small marginal populations aids adaptation. This is because the reduction of both genetic and demographic stochasticity has a stronger effect than the cost of dispersal through increased maladaptation. The expansion threshold thus provides a novel, theoretically justified, and testable prediction for formation of the range margin and collapse of the species’ range.},
author = {Polechova, Jitka},
issn = {15449173},
journal = {PLoS Biology},
number = {6},
publisher = {Public Library of Science},
title = {{Is the sky the limit? On the expansion threshold of a species’ range}},
doi = {10.1371/journal.pbio.2005372},
volume = {16},
year = {2018},
}
@article{913,
abstract = {Coordinated cell polarization in developing tissues is a recurrent theme in multicellular organisms. In plants, a directional distribution of the plant hormone auxin is at the core of many developmental programs. A feedback regulation of auxin on the polarized localization of PIN auxin transporters in individual cells has been proposed as a self-organizing mechanism for coordinated tissue polarization, but the molecular mechanisms linking auxin signalling to PIN-dependent auxin transport remain unknown. We performed a microarray-based approach to find regulators of the auxin-induced PIN relocation in the Arabidopsis thaliana root. We identified a subset of a family of phosphatidylinositol transfer proteins (PITP), the PATELLINs (PATL). Here, we show that PATLs are expressed in partially overlapping cells types in different tissues going through mitosis or initiating differentiation programs. PATLs are plasma membrane-associated proteins accumulated in Arabidopsis embryos, primary roots, lateral root primordia, and developing stomata. Higher order patl mutants display reduced PIN1 repolarization in response to auxin, shorter root apical meristem, and drastic defects in embryo and seedling development. This suggests PATLs redundantly play a crucial role in polarity and patterning in Arabidopsis.},
author = {Tejos, Ricardo and Rodríguez Furlán, Cecilia and Adamowski, Maciek and Sauer, Michael and Norambuena, Lorena and Friml, Jirí},
issn = {00219533},
journal = {Journal of Cell Science},
number = {2},
publisher = {Company of Biologists},
title = {{PATELLINS are regulators of auxin mediated PIN1 relocation and plant development in Arabidopsis thaliana}},
doi = {10.1242/jcs.204198},
volume = {131},
year = {2018},
}
@article{9471,
abstract = {The DEMETER (DME) DNA glycosylase catalyzes genome-wide DNA demethylation and is required for endosperm genomic imprinting and embryo viability. Targets of DME-mediated DNA demethylation reside in small, euchromatic, AT-rich transposons and at the boundaries of large transposons, but how DME interacts with these diverse chromatin states is unknown. The STRUCTURE SPECIFIC RECOGNITION PROTEIN 1 (SSRP1) subunit of the chromatin remodeler FACT (facilitates chromatin transactions), was previously shown to be involved in the DME-dependent regulation of genomic imprinting in Arabidopsis endosperm. Therefore, to investigate the interaction between DME and chromatin, we focused on the activity of the two FACT subunits, SSRP1 and SUPPRESSOR of TY16 (SPT16), during reproduction in Arabidopsis. We found that FACT colocalizes with nuclear DME in vivo, and that DME has two classes of target sites, the first being euchromatic and accessible to DME, but the second, representing over half of DME targets, requiring the action of FACT for DME-mediated DNA demethylation genome-wide. Our results show that the FACT-dependent DME targets are GC-rich heterochromatin domains with high nucleosome occupancy enriched with H3K9me2 and H3K27me1. Further, we demonstrate that heterochromatin-associated linker histone H1 specifically mediates the requirement for FACT at a subset of DME-target loci. Overall, our results demonstrate that FACT is required for DME targeting by facilitating its access to heterochromatin.},
author = {Frost, Jennifer M. and Kim, M. Yvonne and Park, Guen Tae and Hsieh, Ping-Hung and Nakamura, Miyuki and Lin, Samuel J. H. and Yoo, Hyunjin and Choi, Jaemyung and Ikeda, Yoko and Kinoshita, Tetsu and Choi, Yeonhee and Zilberman, Daniel and Fischer, Robert L.},
issn = {1091-6490},
journal = {Proceedings of the National Academy of Sciences},
keywords = {Multidisciplinary},
number = {20},
pages = {E4720--E4729},
publisher = {National Academy of Sciences},
title = {{FACT complex is required for DNA demethylation at heterochromatin during reproduction in Arabidopsis}},
doi = {10.1073/pnas.1713333115},
volume = {115},
year = {2018},
}
@article{556,
abstract = {We investigate the free boundary Schur process, a variant of the Schur process introduced by Okounkov and Reshetikhin, where we allow the first and the last partitions to be arbitrary (instead of empty in the original setting). The pfaffian Schur process, previously studied by several authors, is recovered when just one of the boundary partitions is left free. We compute the correlation functions of the process in all generality via the free fermion formalism, which we extend with the thorough treatment of “free boundary states.” For the case of one free boundary, our approach yields a new proof that the process is pfaffian. For the case of two free boundaries, we find that the process is not pfaffian, but a closely related process is. We also study three different applications of the Schur process with one free boundary: fluctuations of symmetrized last passage percolation models, limit shapes and processes for symmetric plane partitions and for plane overpartitions.},
author = {Betea, Dan and Bouttier, Jeremie and Nejjar, Peter and Vuletic, Mirjana},
issn = {1424-0637},
journal = {Annales Henri Poincare},
number = {12},
pages = {3663--3742},
publisher = {Springer Nature},
title = {{The free boundary Schur process and applications I}},
doi = {10.1007/s00023-018-0723-1},
volume = {19},
year = {2018},
}
@inproceedings{186,
abstract = {A drawing of a graph on a surface is independently even if every pair of nonadjacent edges in the drawing crosses an even number of times. The ℤ2-genus of a graph G is the minimum g such that G has an independently even drawing on the orientable surface of genus g. An unpublished result by Robertson and Seymour implies that for every t, every graph of sufficiently large genus contains as a minor a projective t × t grid or one of the following so-called t-Kuratowski graphs: K3, t, or t copies of K5 or K3,3 sharing at most 2 common vertices. We show that the ℤ2-genus of graphs in these families is unbounded in t; in fact, equal to their genus. Together, this implies that the genus of a graph is bounded from above by a function of its ℤ2-genus, solving a problem posed by Schaefer and Štefankovič, and giving an approximate version of the Hanani-Tutte theorem on orientable surfaces.},
author = {Fulek, Radoslav and Kynčl, Jan},
location = {Budapest, Hungary},
pages = {40.1 -- 40.14},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{The ℤ2-Genus of Kuratowski minors}},
doi = {10.4230/LIPIcs.SoCG.2018.40},
volume = {99},
year = {2018},
}
@inproceedings{433,
abstract = {A thrackle is a graph drawn in the plane so that every pair of its edges meet exactly once: either at a common end vertex or in a proper crossing. We prove that any thrackle of n vertices has at most 1.3984n edges. Quasi-thrackles are defined similarly, except that every pair of edges that do not share a vertex are allowed to cross an odd number of times. It is also shown that the maximum number of edges of a quasi-thrackle on n vertices is 3/2(n-1), and that this bound is best possible for infinitely many values of n.},
author = {Fulek, Radoslav and Pach, János},
location = {Boston, MA, United States},
pages = {160 -- 166},
publisher = {Springer},
title = {{Thrackles: An improved upper bound}},
doi = {10.1007/978-3-319-73915-1_14},
volume = {10692},
year = {2018},
}
@misc{5757,
abstract = {File S1. Variant Calling Format file of the ingroup: 197 haploid sequences of D. melanogaster from Zambia (Africa) aligned to the D. melanogaster 5.57 reference genome.
File S2. Variant Calling Format file of the outgroup: 1 haploid sequence of D. simulans aligned to the D. melanogaster 5.57 reference genome.
File S3. Annotations of each transcript in coding regions with SNPeff: Ps (# of synonymous polymorphic sites); Pn (# of non-synonymous polymorphic sites); Ds (# of synonymous divergent sites); Dn (# of non-synonymous divergent sites); DoS; ⍺ MK . All variants were included.
File S4. Annotations of each transcript in non-coding regions with SNPeff: Ps (# of synonymous polymorphic sites); Pu (# of UTR polymorphic sites); Ds (# of synonymous divergent sites); Du (# of UTR divergent sites); DoS; ⍺ MK . All variants were included.
File S5. Annotations of each transcript in coding regions with SNPGenie: Ps (# of synonymous polymorphic sites); πs (synonymous diversity); Ss_p (total # of synonymous sites in the polymorphism data); Pn (# of non-synonymous polymorphic sites); πn (non-synonymous diversity); Sn_p (total # of non-synonymous sites in the polymorphism data); Ds (# of synonymous divergent sites); ks (synonymous evolutionary rate); Ss_d (total # of synonymous sites in the divergence data); Dn (# of non-synonymous divergent sites); kn (non-synonymous evolutionary rate); Sn_d (total # of non-
synonymous sites in the divergence data); DoS; ⍺ MK . All variants were included.
File S6. Gene expression values (RPKM summed over all transcripts) for each sample. Values were quantile-normalized across all samples.
File S7. Final dataset with all covariates, ⍺ MK , ωA MK and DoS for coding sites, excluding variants below 5% frequency.
File S8. Final dataset with all covariates, ⍺ MK , ωA MK and DoS for non-coding sites, excluding variants below 5%
frequency.
File S9. Final dataset with all covariates, ⍺ EWK , ωA EWK and deleterious SFS for coding sites obtained with the Eyre-Walker and Keightley method on binned data and using all variants.},
author = {Fraisse, Christelle},
keywords = {(mal)adaptation, pleiotropy, selective constraint, evo-devo, gene expression, Drosophila melanogaster},
publisher = {IST Austria},
title = {{Supplementary Files for "Pleiotropy modulates the efficacy of selection in Drosophila melanogaster"}},
doi = {10.15479/at:ista:/5757},
year = {2018},
}
@misc{9837,
abstract = {Both classical and recent studies suggest that chromosomal inversion polymorphisms are important in adaptation and speciation. However, biases in discovery and reporting of inversions make it difficult to assess their prevalence and biological importance. Here, we use an approach based on linkage disequilibrium among markers genotyped for samples collected across a transect between contrasting habitats to detect chromosomal rearrangements de novo. We report 17 polymorphic rearrangements in a single locality for the coastal marine snail, Littorina saxatilis. Patterns of diversity in the field and of recombination in controlled crosses provide strong evidence that at least the majority of these rearrangements are inversions. Most show clinal changes in frequency between habitats, suggestive of divergent selection, but only one appears to be fixed for different arrangements in the two habitats. Consistent with widespread evidence for balancing selection on inversion polymorphisms, we argue that a combination of heterosis and divergent selection can explain the observed patterns and should be considered in other systems spanning environmental gradients.},
author = {Faria, Rui and Chaube, Pragya and Morales, Hernán E. and Larsson, Tomas and Lemmon, Alan R. and Lemmon, Emily M. and Rafajlović, Marina and Panova, Marina and Ravinet, Mark and Johannesson, Kerstin and Westram, Anja M and Butlin, Roger K.},
publisher = {Dryad},
title = {{Data from: Multiple chromosomal rearrangements in a hybrid zone between Littorina saxatilis ecotypes}},
doi = {10.5061/dryad.72cg113},
year = {2018},
}
@inbook{10864,
abstract = {We prove that every congruence distributive variety has directed Jónsson terms, and every congruence modular variety has directed Gumm terms. The directed terms we construct witness every case of absorption witnessed by the original Jónsson or Gumm terms. This result is equivalent to a pair of claims about absorption for admissible preorders in congruence distributive and congruence modular varieties, respectively. For finite algebras, these absorption theorems have already seen significant applications, but until now, it was not clear if the theorems hold for general algebras as well. Our method also yields a novel proof of a result by P. Lipparini about the existence of a chain of terms (which we call Pixley terms) in varieties that are at the same time congruence distributive and k-permutable for some k.},
author = {Kazda, Alexandr and Kozik, Marcin and McKenzie, Ralph and Moore, Matthew},
booktitle = {Don Pigozzi on Abstract Algebraic Logic, Universal Algebra, and Computer Science},
editor = {Czelakowski, J},
isbn = {9783319747712},
issn = {2211-2766},
pages = {203--220},
publisher = {Springer Nature},
title = {{Absorption and directed Jónsson terms}},
doi = {10.1007/978-3-319-74772-9_7},
volume = {16},
year = {2018},
}
@inproceedings{184,
abstract = {We prove that for every d ≥ 2, deciding if a pure, d-dimensional, simplicial complex is shellable is NP-hard, hence NP-complete. This resolves a question raised, e.g., by Danaraj and Klee in 1978. Our reduction also yields that for every d ≥ 2 and k ≥ 0, deciding if a pure, d-dimensional, simplicial complex is k-decomposable is NP-hard. For d ≥ 3, both problems remain NP-hard when restricted to contractible pure d-dimensional complexes.},
author = {Goaoc, Xavier and Paták, Pavel and Patakova, Zuzana and Tancer, Martin and Wagner, Uli},
location = {Budapest, Hungary},
pages = {41:1 -- 41:16},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Shellability is NP-complete}},
doi = {10.4230/LIPIcs.SoCG.2018.41},
volume = {99},
year = {2018},
}
@inproceedings{285,
abstract = {In graph theory, as well as in 3-manifold topology, there exist several width-type parameters to describe how "simple" or "thin" a given graph or 3-manifold is. These parameters, such as pathwidth or treewidth for graphs, or the concept of thin position for 3-manifolds, play an important role when studying algorithmic problems; in particular, there is a variety of problems in computational 3-manifold topology - some of them known to be computationally hard in general - that become solvable in polynomial time as soon as the dual graph of the input triangulation has bounded treewidth. In view of these algorithmic results, it is natural to ask whether every 3-manifold admits a triangulation of bounded treewidth. We show that this is not the case, i.e., that there exists an infinite family of closed 3-manifolds not admitting triangulations of bounded pathwidth or treewidth (the latter implies the former, but we present two separate proofs). We derive these results from work of Agol and of Scharlemann and Thompson, by exhibiting explicit connections between the topology of a 3-manifold M on the one hand and width-type parameters of the dual graphs of triangulations of M on the other hand, answering a question that had been raised repeatedly by researchers in computational 3-manifold topology. In particular, we show that if a closed, orientable, irreducible, non-Haken 3-manifold M has a triangulation of treewidth (resp. pathwidth) k then the Heegaard genus of M is at most 48(k+1) (resp. 4(3k+1)).},
author = {Huszár, Kristóf and Spreer, Jonathan and Wagner, Uli},
issn = {18688969},
location = {Budapest, Hungary},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{On the treewidth of triangulated 3-manifolds}},
doi = {10.4230/LIPIcs.SoCG.2018.46},
volume = {99},
year = {2018},
}
@misc{13059,
abstract = {This dataset contains a GitHub repository containing all the data, analysis, Nextflow workflows and Jupyter notebooks to replicate the manuscript titled "Fast and accurate large multiple sequence alignments with a root-to-leaf regressive method".
It also contains the Multiple Sequence Alignments (MSAs) generated and well as the main figures and tables from the manuscript.
The repository is also available at GitHub (https://github.com/cbcrg/dpa-analysis) release `v1.2`.
For details on how to use the regressive alignment algorithm, see the T-Coffee software suite (https://github.com/cbcrg/tcoffee).},
author = {Garriga, Edgar and di Tommaso, Paolo and Magis, Cedrik and Erb, Ionas and Mansouri, Leila and Baltzis, Athanasios and Laayouni, Hafid and Kondrashov, Fyodor and Floden, Evan and Notredame, Cedric},
publisher = {Zenodo},
title = {{Fast and accurate large multiple sequence alignments with a root-to-leaf regressive method}},
doi = {10.5281/ZENODO.2025846},
year = {2018},
}
@phdthesis{49,
abstract = {Nowadays, quantum computation is receiving more and more attention as an alternative to the classical way of computing. For realizing a quantum computer, different devices are investigated as potential quantum bits. In this thesis, the focus is on Ge hut wires, which turned out to be promising candidates for implementing hole spin quantum bits. The advantages of Ge as a material system are the low hyperfine interaction for holes and the strong spin orbit coupling, as well as the compatibility with the highly developed CMOS processes in industry. In addition, Ge can also be isotopically purified which is expected to boost the spin coherence times. The strong spin orbit interaction for holes in Ge on the one hand enables the full electrical control of the quantum bit and on the other hand should allow short spin manipulation times. Starting with a bare Si wafer, this work covers the entire process reaching from growth over the fabrication and characterization of hut wire devices up to the demonstration of hole spin resonance. From experiments with single quantum dots, a large g-factor anisotropy between the in-plane and the out-of-plane direction was found. A comparison to a theoretical model unveiled the heavy-hole character of the lowest energy states. The second part of the thesis addresses double quantum dot devices, which were realized by adding two gate electrodes to a hut wire. In such devices, Pauli spin blockade was observed, which can serve as a read-out mechanism for spin quantum bits. Applying oscillating electric fields in spin blockade allowed the demonstration of continuous spin rotations and the extraction of a lower bound for the spin dephasing time. Despite the strong spin orbit coupling in Ge, the obtained value for the dephasing time is comparable to what has been recently reported for holes in Si. All in all, the presented results point out the high potential of Ge hut wires as a platform for long-lived, fast and fully electrically tunable hole spin quantum bits.},
author = {Watzinger, Hannes},
issn = {2663-337X},
pages = {77},
publisher = {Institute of Science and Technology Austria},
title = {{Ge hut wires - from growth to hole spin resonance}},
doi = {10.15479/AT:ISTA:th_1033},
year = {2018},
}
@phdthesis{201,
abstract = {We describe arrangements of three-dimensional spheres from a geometrical and topological point of view. Real data (fitting this setup) often consist of soft spheres which show certain degree of deformation while strongly packing against each other. In this context, we answer the following questions: If we model a soft packing of spheres by hard spheres that are allowed to overlap, can we measure the volume in the overlapped areas? Can we be more specific about the overlap volume, i.e. quantify how much volume is there covered exactly twice, three times, or k times? What would be a good optimization criteria that rule the arrangement of soft spheres while making a good use of the available space? Fixing a particular criterion, what would be the optimal sphere configuration? The first result of this thesis are short formulas for the computation of volumes covered by at least k of the balls. The formulas exploit information contained in the order-k Voronoi diagrams and its closely related Level-k complex. The used complexes lead to a natural generalization into poset diagrams, a theoretical formalism that contains the order-k and degree-k diagrams as special cases. In parallel, we define different criteria to determine what could be considered an optimal arrangement from a geometrical point of view. Fixing a criterion, we find optimal soft packing configurations in 2D and 3D where the ball centers lie on a lattice. As a last step, we use tools from computational topology on real physical data, to show the potentials of higher-order diagrams in the description of melting crystals. The results of the experiments leaves us with an open window to apply the theories developed in this thesis in real applications.},
author = {Iglesias Ham, Mabel},
issn = {2663-337X},
pages = {171},
publisher = {Institute of Science and Technology Austria},
title = {{Multiple covers with balls}},
doi = {10.15479/AT:ISTA:th_1026},
year = {2018},
}
@phdthesis{68,
abstract = {The most common assumption made in statistical learning theory is the assumption of the independent and identically distributed (i.i.d.) data. While being very convenient mathematically, it is often very clearly violated in practice. This disparity between the machine learning theory and applications underlies a growing demand in the development of algorithms that learn from dependent data and theory that can provide generalization guarantees similar to the independent situations. This thesis is dedicated to two variants of dependencies that can arise in practice. One is a dependence on the level of samples in a single learning task. Another dependency type arises in the multi-task setting when the tasks are dependent on each other even though the data for them can be i.i.d. In both cases we model the data (samples or tasks) as stochastic processes and introduce new algorithms for both settings that take into account and exploit the resulting dependencies. We prove the theoretical guarantees on the performance of the introduced algorithms under different evaluation criteria and, in addition, we compliment the theoretical study by the empirical one, where we evaluate some of the algorithms on two real world datasets to highlight their practical applicability.},
author = {Zimin, Alexander},
issn = {2663-337X},
pages = {92},
publisher = {Institute of Science and Technology Austria},
title = {{Learning from dependent data}},
doi = {10.15479/AT:ISTA:TH1048},
year = {2018},
}
@phdthesis{83,
abstract = {A proof system is a protocol between a prover and a verifier over a common input in which an honest prover convinces the verifier of the validity of true statements. Motivated by the success of decentralized cryptocurrencies, exemplified by Bitcoin, the focus of this thesis will be on proof systems which found applications in some sustainable alternatives to Bitcoin, such as the Spacemint and Chia cryptocurrencies. In particular, we focus on proofs of space and proofs of sequential work.
Proofs of space (PoSpace) were suggested as more ecological, economical, and egalitarian alternative to the energy-wasteful proof-of-work mining of Bitcoin. However, the state-of-the-art constructions of PoSpace are based on sophisticated graph pebbling lower bounds, and are therefore complex. Moreover, when these PoSpace are used in cryptocurrencies like Spacemint, miners can only start mining after ensuring that a commitment to their space is already added in a special transaction to the blockchain. Proofs of sequential work (PoSW) are proof systems in which a prover, upon receiving a statement x and a time parameter T, computes a proof which convinces the verifier that T time units had passed since x was received. Whereas Spacemint assumes synchrony to retain some interesting Bitcoin dynamics, Chia requires PoSW with unique proofs, i.e., PoSW in which it is hard to come up with more than one accepting proof for any true statement. In this thesis we construct simple and practically-efficient PoSpace and PoSW. When using our PoSpace in cryptocurrencies, miners can start mining on the fly, like in Bitcoin, and unlike current constructions of PoSW, which either achieve efficient verification of sequential work, or faster-than-recomputing verification of correctness of proofs, but not both at the same time, ours achieve the best of these two worlds.},
author = {Abusalah, Hamza M},
issn = {2663-337X},
pages = {59},
publisher = {Institute of Science and Technology Austria},
title = {{Proof systems for sustainable decentralized cryptocurrencies}},
doi = {10.15479/AT:ISTA:TH_1046},
year = {2018},
}
@unpublished{6183,
abstract = {We study the unique solution $m$ of the Dyson equation \[ -m(z)^{-1} = z - a
+ S[m(z)] \] on a von Neumann algebra $\mathcal{A}$ with the constraint
$\mathrm{Im}\,m\geq 0$. Here, $z$ lies in the complex upper half-plane, $a$ is
a self-adjoint element of $\mathcal{A}$ and $S$ is a positivity-preserving
linear operator on $\mathcal{A}$. We show that $m$ is the Stieltjes transform
of a compactly supported $\mathcal{A}$-valued measure on $\mathbb{R}$. Under
suitable assumptions, we establish that this measure has a uniformly
$1/3$-H\"{o}lder continuous density with respect to the Lebesgue measure, which
is supported on finitely many intervals, called bands. In fact, the density is
analytic inside the bands with a square-root growth at the edges and internal
cubic root cusps whenever the gap between two bands vanishes. The shape of
these singularities is universal and no other singularity may occur. We give a
precise asymptotic description of $m$ near the singular points. These
asymptotics generalize the analysis at the regular edges given in the companion
paper on the Tracy-Widom universality for the edge eigenvalue statistics for
correlated random matrices [arXiv:1804.07744] and they play a key role in the
proof of the Pearcey universality at the cusp for Wigner-type matrices
[arXiv:1809.03971,arXiv:1811.04055]. We also extend the finite dimensional band
mass formula from [arXiv:1804.07744] to the von Neumann algebra setting by
showing that the spectral mass of the bands is topologically rigid under
deformations and we conclude that these masses are quantized in some important
cases.},
author = {Alt, Johannes and Erdös, László and Krüger, Torben H},
booktitle = {arXiv},
title = {{The Dyson equation with linear self-energy: Spectral bands, edges and cusps}},
year = {2018},
}
@phdthesis{197,
abstract = {Modern computer vision systems heavily rely on statistical machine learning models, which typically require large amounts of labeled data to be learned reliably. Moreover, very recently computer vision research widely adopted techniques for representation learning, which further increase the demand for labeled data. However, for many important practical problems there is relatively small amount of labeled data available, so it is problematic to leverage full potential of the representation learning methods. One way to overcome this obstacle is to invest substantial resources into producing large labelled datasets. Unfortunately, this can be prohibitively expensive in practice. In this thesis we focus on the alternative way of tackling the aforementioned issue. We concentrate on methods, which make use of weakly-labeled or even unlabeled data. Specifically, the first half of the thesis is dedicated to the semantic image segmentation task. We develop a technique, which achieves competitive segmentation performance and only requires annotations in a form of global image-level labels instead of dense segmentation masks. Subsequently, we present a new methodology, which further improves segmentation performance by leveraging tiny additional feedback from a human annotator. By using our methods practitioners can greatly reduce the amount of data annotation effort, which is required to learn modern image segmentation models. In the second half of the thesis we focus on methods for learning from unlabeled visual data. We study a family of autoregressive models for modeling structure of natural images and discuss potential applications of these models. Moreover, we conduct in-depth study of one of these applications, where we develop the state-of-the-art model for the probabilistic image colorization task.},
author = {Kolesnikov, Alexander},
issn = {2663-337X},
pages = {113},
publisher = {Institute of Science and Technology Austria},
title = {{Weakly-Supervised Segmentation and Unsupervised Modeling of Natural Images}},
doi = {10.15479/AT:ISTA:th_1021},
year = {2018},
}
@article{6774,
abstract = {A central problem of algebraic topology is to understand the homotopy groups 𝜋𝑑(𝑋) of a topological space X. For the computational version of the problem, it is well known that there is no algorithm to decide whether the fundamental group 𝜋1(𝑋) of a given finite simplicial complex X is trivial. On the other hand, there are several algorithms that, given a finite simplicial complex X that is simply connected (i.e., with 𝜋1(𝑋) trivial), compute the higher homotopy group 𝜋𝑑(𝑋) for any given 𝑑≥2 . However, these algorithms come with a caveat: They compute the isomorphism type of 𝜋𝑑(𝑋) , 𝑑≥2 as an abstract finitely generated abelian group given by generators and relations, but they work with very implicit representations of the elements of 𝜋𝑑(𝑋) . Converting elements of this abstract group into explicit geometric maps from the d-dimensional sphere 𝑆𝑑 to X has been one of the main unsolved problems in the emerging field of computational homotopy theory. Here we present an algorithm that, given a simply connected space X, computes 𝜋𝑑(𝑋) and represents its elements as simplicial maps from a suitable triangulation of the d-sphere 𝑆𝑑 to X. For fixed d, the algorithm runs in time exponential in size(𝑋) , the number of simplices of X. Moreover, we prove that this is optimal: For every fixed 𝑑≥2 , we construct a family of simply connected spaces X such that for any simplicial map representing a generator of 𝜋𝑑(𝑋) , the size of the triangulation of 𝑆𝑑 on which the map is defined, is exponential in size(𝑋) .},
author = {Filakovský, Marek and Franek, Peter and Wagner, Uli and Zhechev, Stephan Y},
issn = {2367-1734},
journal = {Journal of Applied and Computational Topology},
number = {3-4},
pages = {177--231},
publisher = {Springer},
title = {{Computing simplicial representatives of homotopy group elements}},
doi = {10.1007/s41468-018-0021-5},
volume = {2},
year = {2018},
}
@unpublished{75,
abstract = {We prove that any convex body in the plane can be partitioned into m convex parts of equal areas and perimeters for any integer m≥2; this result was previously known for prime powers m=pk. We also give a higher-dimensional generalization.},
author = {Akopyan, Arseniy and Avvakumov, Sergey and Karasev, Roman},
pages = {11},
publisher = {arXiv},
title = {{Convex fair partitions into arbitrary number of pieces}},
year = {2018},
}
@inproceedings{133,
abstract = {Synchronous programs are easy to specify because the side effects of an operation are finished by the time the invocation of the operation returns to the caller. Asynchronous programs, on the other hand, are difficult to specify because there are side effects due to pending computation scheduled as a result of the invocation of an operation. They are also difficult to verify because of the large number of possible interleavings of concurrent computation threads. We present synchronization, a new proof rule that simplifies the verification of asynchronous programs by introducing the fiction, for proof purposes, that asynchronous operations complete synchronously. Synchronization summarizes an asynchronous computation as immediate atomic effect. Modular verification is enabled via pending asynchronous calls in atomic summaries, and a complementary proof rule that eliminates pending asynchronous calls when components and their specifications are composed. We evaluate synchronization in the context of a multi-layer refinement verification methodology on a collection of benchmark programs.},
author = {Kragl, Bernhard and Qadeer, Shaz and Henzinger, Thomas A},
issn = {18688969},
location = {Beijing, China},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Synchronizing the asynchronous}},
doi = {10.4230/LIPIcs.CONCUR.2018.21},
volume = {118},
year = {2018},
}
@inproceedings{187,
abstract = {Given a locally finite X ⊆ ℝd and a radius r ≥ 0, the k-fold cover of X and r consists of all points in ℝd that have k or more points of X within distance r. We consider two filtrations - one in scale obtained by fixing k and increasing r, and the other in depth obtained by fixing r and decreasing k - and we compute the persistence diagrams of both. While standard methods suffice for the filtration in scale, we need novel geometric and topological concepts for the filtration in depth. In particular, we introduce a rhomboid tiling in ℝd+1 whose horizontal integer slices are the order-k Delaunay mosaics of X, and construct a zigzag module from Delaunay mosaics that is isomorphic to the persistence module of the multi-covers. },
author = {Edelsbrunner, Herbert and Osang, Georg F},
location = {Budapest, Hungary},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{The multi-cover persistence of Euclidean balls}},
doi = {10.4230/LIPIcs.SoCG.2018.34},
volume = {99},
year = {2018},
}
@article{692,
abstract = {We consider families of confocal conics and two pencils of Apollonian circles having the same foci. We will show that these families of curves generate trivial 3-webs and find the exact formulas describing them.},
author = {Akopyan, Arseniy},
journal = {Geometriae Dedicata},
number = {1},
pages = {55 -- 64},
publisher = {Springer},
title = {{3-Webs generated by confocal conics and circles}},
doi = {10.1007/s10711-017-0265-6},
volume = {194},
year = {2018},
}
@article{77,
abstract = {Holes confined in quantum dots have gained considerable interest in the past few years due to their potential as spin qubits. Here we demonstrate two-axis control of a spin 3/2 qubit in natural Ge. The qubit is formed in a hut wire double quantum dot device. The Pauli spin blockade principle allowed us to demonstrate electric dipole spin resonance by applying a radio frequency electric field to one of the electrodes defining the double quantum dot. Coherent hole spin oscillations with Rabi frequencies reaching 140 MHz are demonstrated and dephasing times of 130 ns are measured. The reported results emphasize the potential of Ge as a platform for fast and electrically tunable hole spin qubit devices.},
author = {Watzinger, Hannes and Kukucka, Josip and Vukusic, Lada and Gao, Fei and Wang, Ting and Schäffler, Friedrich and Zhang, Jian and Katsaros, Georgios},
journal = {Nature Communications},
number = {3902 },
publisher = {Nature Publishing Group},
title = {{A germanium hole spin qubit}},
doi = {10.1038/s41467-018-06418-4},
volume = {9},
year = {2018},
}
@article{691,
abstract = {Background: Transport protein particle (TRAPP) is a multisubunit complex that regulates membrane trafficking through the Golgi apparatus. The clinical phenotype associated with mutations in various TRAPP subunits has allowed elucidation of their functions in specific tissues. The role of some subunits in human disease, however, has not been fully established, and their functions remain uncertain.
Objective: We aimed to expand the range of neurodevelopmental disorders associated with mutations in TRAPP subunits by exome sequencing of consanguineous families.
Methods: Linkage and homozygosity mapping and candidate gene analysis were used to identify homozygous mutations in families. Patient fibroblasts were used to study splicing defect and zebrafish to model the disease.
Results: We identified six individuals from three unrelated families with a founder homozygous splice mutation in TRAPPC6B, encoding a core subunit of the complex TRAPP I. Patients manifested a neurodevelopmental disorder characterised by microcephaly, epilepsy and autistic features, and showed splicing defect. Zebrafish trappc6b morphants replicated the human phenotype, displaying decreased head size and neuronal hyperexcitability, leading to a lower seizure threshold.
Conclusion: This study provides clinical and functional evidence of the role of TRAPPC6B in brain development and function.},
author = {Marin Valencia, Isaac and Novarino, Gaia and Johansen, Anide and Rosti, Başak and Issa, Mahmoud and Musaev, Damir and Bhat, Gifty and Scott, Eric and Silhavy, Jennifer and Stanley, Valentina and Rosti, Rasim and Gleeson, Jeremy and Imam, Farhad and Zaki, Maha and Gleeson, Joseph},
journal = {Journal of Medical Genetics},
number = {1},
pages = {48 -- 54},
publisher = {BMJ Publishing Group},
title = {{A homozygous founder mutation in TRAPPC6B associates with a neurodevelopmental disorder characterised by microcephaly epilepsy and autistic features}},
doi = {10.1136/jmedgenet-2017-104627},
volume = {55},
year = {2018},
}
@article{401,
abstract = {The actomyosin cytoskeleton, a key stress-producing unit in epithelial cells, oscillates spontaneously in a wide variety of systems. Although much of the signal cascade regulating myosin activity has been characterized, the origin of such oscillatory behavior is still unclear. Here, we show that basal myosin II oscillation in Drosophila ovarian epithelium is not controlled by actomyosin cortical tension, but instead relies on a biochemical oscillator involving ROCK and myosin phosphatase. Key to this oscillation is a diffusive ROCK flow, linking junctional Rho1 to medial actomyosin cortex, and dynamically maintained by a self-activation loop reliant on ROCK kinase activity. In response to the resulting myosin II recruitment, myosin phosphatase is locally enriched and shuts off ROCK and myosin II signals. Coupling Drosophila genetics, live imaging, modeling, and optogenetics, we uncover an intrinsic biochemical oscillator at the core of myosin II regulatory network, shedding light on the spatio-temporal dynamics of force generation.},
author = {Qin, Xiang and Hannezo, Edouard B and Mangeat, Thomas and Liu, Chang and Majumder, Pralay and Liu, Jjiaying and Choesmel Cadamuro, Valerie and Mcdonald, Jocelyn and Liu, Yinyao and Yi, Bin and Wang, Xiaobo},
journal = {Nature Communications},
number = {1},
publisher = {Nature Publishing Group},
title = {{A biochemical network controlling basal myosin oscillation}},
doi = {10.1038/s41467-018-03574-5},
volume = {9},
year = {2018},
}
@article{318,
abstract = {The insect’s fat body combines metabolic and immunological functions. In this issue of Developmental Cell, Franz et al. (2018) show that in Drosophila, cells of the fat body are not static, but can actively “swim” toward sites of epithelial injury, where they physically clog the wound and locally secrete antimicrobial peptides.},
author = {Casano, Alessandra M and Sixt, Michael K},
journal = {Developmental Cell},
number = {4},
pages = {405 -- 406},
publisher = {Cell Press},
title = {{A fat lot of good for wound healing}},
doi = {10.1016/j.devcel.2018.02.009},
volume = {44},
year = {2018},
}
@article{410,
abstract = {Lesion verification and quantification is traditionally done via histological examination of sectioned brains, a time-consuming process that relies heavily on manual estimation. Such methods are particularly problematic in posterior cortical regions (e.g. visual cortex), where sectioning leads to significant damage and distortion of tissue. Even more challenging is the post hoc localization of micro-electrodes, which relies on the same techniques, suffers from similar drawbacks and requires even higher precision. Here, we propose a new, simple method for quantitative lesion characterization and electrode localization that is less labor-intensive and yields more detailed results than conventional methods. We leverage staining techniques standard in electron microscopy with the use of commodity micro-CT imaging. We stain whole rat and zebra finch brains in osmium tetroxide, embed these in resin and scan entire brains in a micro-CT machine. The scans result in 3D reconstructions of the brains with section thickness dependent on sample size (12–15 and 5–6 microns for rat and zebra finch respectively) that can be segmented manually or automatically. Because the method captures the entire intact brain volume, comparisons within and across studies are more tractable, and the extent of lesions and electrodes may be studied with higher accuracy than with current methods.},
author = {Masís, Javier and Mankus, David and Wolff, Steffen and Guitchounts, Grigori and Jösch, Maximilian A and Cox, David},
journal = {Scientific Reports},
number = {1},
publisher = {Nature Publishing Group},
title = {{A micro-CT-based method for quantitative brain lesion characterization and electrode localization}},
doi = {10.1038/s41598-018-23247-z},
volume = {8},
year = {2018},
}
@article{277,
abstract = {Arabidopsis and human ARM protein interact with telomerase. Deregulated mRNA levels of DNA repair and ribosomal protein genes in an Arabidopsis arm mutant suggest non-telomeric ARM function. The human homolog ARMC6 interacts with hTRF2. Abstract: Telomerase maintains telomeres and has proposed non-telomeric functions. We previously identified interaction of the C-terminal domain of Arabidopsis telomerase reverse transcriptase (AtTERT) with an armadillo/β-catenin-like repeat (ARM) containing protein. Here we explore protein–protein interactions of the ARM protein, AtTERT domains, POT1a, TRF-like family and SMH family proteins, and the chromatin remodeling protein CHR19 using bimolecular fluorescence complementation (BiFC), yeast two-hybrid (Y2H) analysis, and co-immunoprecipitation. The ARM protein interacts with both the N- and C-terminal domains of AtTERT in different cellular compartments. ARM interacts with CHR19 and TRF-like I family proteins that also bind AtTERT directly or through interaction with POT1a. The putative human ARM homolog co-precipitates telomerase activity and interacts with hTRF2 protein in vitro. Analysis of Arabidopsis arm mutants shows no obvious changes in telomere length or telomerase activity, suggesting that ARM is not essential for telomere maintenance. The observed interactions with telomerase and Myb-like domain proteins (TRF-like family I) may therefore reflect possible non-telomeric functions. Transcript levels of several DNA repair and ribosomal genes are affected in arm mutants, and ARM, likely in association with other proteins, suppressed expression of XRCC3 and RPSAA promoter constructs in luciferase reporter assays. In conclusion, ARM can participate in non-telomeric functions of telomerase, and can also perform its own telomerase-independent functions.},
author = {Dokládal, Ladislav and Benková, Eva and Honys, David and Dupláková, Nikoleta and Lee, Lan and Gelvin, Stanton and Sýkorová, Eva},
journal = {Plant Molecular Biology},
number = {5},
pages = {407 -- 420},
publisher = {Springer},
title = {{An armadillo-domain protein participates in a telomerase interaction network}},
doi = {10.1007/s11103-018-0747-4},
volume = {97},
year = {2018},
}
@inproceedings{299,
abstract = {We introduce in this paper AMT 2.0 , a tool for qualitative and quantitative analysis of hybrid continuous and Boolean signals that combine numerical values and discrete events. The evaluation of the signals is based on rich temporal specifications expressed in extended Signal Temporal Logic (xSTL), which integrates Timed Regular Expressions (TRE) within Signal Temporal Logic (STL). The tool features qualitative monitoring (property satisfaction checking), trace diagnostics for explaining and justifying property violations and specification-driven measurement of quantitative features of the signal.},
author = {Nickovic, Dejan and Lebeltel, Olivier and Maler, Oded and Ferrere, Thomas and Ulus, Dogan},
editor = {Beyer, Dirk and Huisman, Marieke},
location = {Thessaloniki, Greece},
pages = {303 -- 319},
publisher = {Springer},
title = {{AMT 2.0: Qualitative and quantitative trace analysis with extended signal temporal logic}},
doi = {10.1007/978-3-319-89963-3_18},
volume = {10806},
year = {2018},
}
@article{413,
abstract = {Being cared for when sick is a benefit of sociality that can reduce disease and improve survival of group members. However, individuals providing care risk contracting infectious diseases themselves. If they contract a low pathogen dose, they may develop low-level infections that do not cause disease but still affect host immunity by either decreasing or increasing the host’s vulnerability to subsequent infections. Caring for contagious individuals can thus significantly alter the future disease susceptibility of caregivers. Using ants and their fungal pathogens as a model system, we tested if the altered disease susceptibility of experienced caregivers, in turn, affects their expression of sanitary care behavior. We found that low-level infections contracted during sanitary care had protective or neutral effects on secondary exposure to the same (homologous) pathogen but consistently caused high mortality on superinfection with a different (heterologous) pathogen. In response to this risk, the ants selectively adjusted the expression of their sanitary care. Specifically, the ants performed less grooming and more antimicrobial disinfection when caring for nestmates contaminated with heterologous pathogens compared with homologous ones. By modulating the components of sanitary care in this way the ants acquired less infectious particles of the heterologous pathogens, resulting in reduced superinfection. The performance of risk-adjusted sanitary care reveals the remarkable capacity of ants to react to changes in their disease susceptibility, according to their own infection history and to flexibly adjust collective care to individual risk.},
author = {Konrad, Matthias and Pull, Christopher and Metzler, Sina and Seif, Katharina and Naderlinger, Elisabeth and Grasse, Anna V and Cremer, Sylvia},
journal = {PNAS},
number = {11},
pages = {2782 -- 2787},
publisher = {National Academy of Sciences},
title = {{Ants avoid superinfections by performing risk-adjusted sanitary care}},
doi = {10.1073/pnas.1713501115},
volume = {115},
year = {2018},
}
@article{163,
abstract = {For ultrafast fixation of biological samples to avoid artifacts, high-pressure freezing (HPF) followed by freeze substitution (FS) is preferred over chemical fixation at room temperature. After HPF, samples are maintained at low temperature during dehydration and fixation, while avoiding damaging recrystallization. This is a notoriously slow process. McDonald and Webb demonstrated, in 2011, that sample agitation during FS dramatically reduces the necessary time. Then, in 2015, we (H.G. and S.R.) introduced an agitation module into the cryochamber of an automated FS unit and demonstrated that the preparation of algae could be shortened from days to a couple of hours. We argued that variability in the processing, reproducibility, and safety issues are better addressed using automated FS units. For dissemination, we started low-cost manufacturing of agitation modules for two of the most widely used FS units, the Automatic Freeze Substitution Systems, AFS(1) and AFS2, from Leica Microsystems, using three dimensional (3D)-printing of the major components. To test them, several labs independently used the modules on a wide variety of specimens that had previously been processed by manual agitation, or without agitation. We demonstrate that automated processing with sample agitation saves time, increases flexibility with respect to sample requirements and protocols, and produces data of at least as good quality as other approaches.},
author = {Reipert, Siegfried and Goldammer, Helmuth and Richardson, Christine and Goldberg, Martin and Hawkins, Timothy and Hollergschwandtner, Elena and Kaufmann, Walter and Antreich, Sebastian and Stierhof, York},
journal = {Journal of Histochemistry and Cytochemistry},
number = {12},
pages = {903--921},
publisher = {Histochemical Society},
title = {{Agitation modules: Flexible means to accelerate automated freeze substitution}},
doi = {10.1369/0022155418786698},
volume = {66},
year = {2018},
}
@article{195,
abstract = {We demonstrate that identical impurities immersed in a two-dimensional many-particle bath can be viewed as flux-tube-charged-particle composites described by fractional statistics. In particular, we find that the bath manifests itself as an external magnetic flux tube with respect to the impurities, and hence the time-reversal symmetry is broken for the effective Hamiltonian describing the impurities. The emerging flux tube acts as a statistical gauge field after a certain critical coupling. This critical coupling corresponds to the intersection point between the quasiparticle state and the phonon wing, where the angular momentum is transferred from the impurity to the bath. This amounts to a novel configuration with emerging anyons. The proposed setup paves the way to realizing anyons using electrons interacting with superfluid helium or lattice phonons, as well as using atomic impurities in ultracold gases.},
author = {Yakaboylu, Enderalp and Lemeshko, Mikhail},
journal = {Physical Review B - Condensed Matter and Materials Physics},
number = {4},
publisher = {American Physical Society},
title = {{Anyonic statistics of quantum impurities in two dimensions}},
doi = {10.1103/PhysRevB.98.045402},
volume = {98},
year = {2018},
}
@inproceedings{144,
abstract = {The task of a monitor is to watch, at run-time, the execution of a reactive system, and signal the occurrence of a safety violation in the observed sequence of events. While finite-state monitors have been studied extensively, in practice, monitoring software also makes use of unbounded memory. We define a model of automata equipped with integer-valued registers which can execute only a bounded number of instructions between consecutive events, and thus can form the theoretical basis for the study of infinite-state monitors. We classify these register monitors according to the number k of available registers, and the type of register instructions. In stark contrast to the theory of computability for register machines, we prove that for every k 1, monitors with k + 1 counters (with instruction set 〈+1, =〉) are strictly more expressive than monitors with k counters. We also show that adder monitors (with instruction set 〈1, +, =〉) are strictly more expressive than counter monitors, but are complete for monitoring all computable safety -languages for k = 6. Real-time monitors are further required to signal the occurrence of a safety violation as soon as it occurs. The expressiveness hierarchy for counter monitors carries over to real-time monitors. We then show that 2 adders cannot simulate 3 counters in real-time. Finally, we show that real-time adder monitors with inequalities are as expressive as real-time Turing machines.},
author = {Ferrere, Thomas and Henzinger, Thomas A and Saraç, Ege},
location = {Oxford, UK},
pages = {394 -- 403},
publisher = {IEEE},
title = {{A theory of register monitors}},
doi = {10.1145/3209108.3209194},
volume = {Part F138033},
year = {2018},
}