@article{2211,
abstract = {In two-player finite-state stochastic games of partial observation on graphs, in every state of the graph, the players simultaneously choose an action, and their joint actions determine a probability distribution over the successor states. The game is played for infinitely many rounds and thus the players construct an infinite path in the graph. We consider reachability objectives where the first player tries to ensure a target state to be visited almost-surely (i.e., with probability 1) or positively (i.e., with positive probability), no matter the strategy of the second player. We classify such games according to the information and to the power of randomization available to the players. On the basis of information, the game can be one-sided with either (a) player 1, or (b) player 2 having partial observation (and the other player has perfect observation), or two-sided with (c) both players having partial observation. On the basis of randomization, (a) the players may not be allowed to use randomization (pure strategies), or (b) they may choose a probability distribution over actions but the actual random choice is external and not visible to the player (actions invisible), or (c) they may use full randomization. Our main results for pure strategies are as follows: (1) For one-sided games with player 2 having perfect observation we show that (in contrast to full randomized strategies) belief-based (subset-construction based) strategies are not sufficient, and we present an exponential upper bound on memory both for almost-sure and positive winning strategies; we show that the problem of deciding the existence of almost-sure and positive winning strategies for player 1 is EXPTIME-complete and present symbolic algorithms that avoid the explicit exponential construction. (2) For one-sided games with player 1 having perfect observation we show that nonelementarymemory is both necessary and sufficient for both almost-sure and positive winning strategies. (3) We show that for the general (two-sided) case finite-memory strategies are sufficient for both positive and almost-sure winning, and at least nonelementary memory is required. We establish the equivalence of the almost-sure winning problems for pure strategies and for randomized strategies with actions invisible. Our equivalence result exhibit serious flaws in previous results of the literature: we show a nonelementary memory lower bound for almost-sure winning whereas an exponential upper bound was previously claimed.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {2},
publisher = {ACM},
title = {{Partial-observation stochastic games: How to win when belief fails}},
doi = {10.1145/2579821},
volume = {15},
year = {2014},
}
@article{2038,
abstract = {Recently, there has been an effort to add quantitative objectives to formal verification and synthesis. We introduce and investigate the extension of temporal logics with quantitative atomic assertions. At the heart of quantitative objectives lies the accumulation of values along a computation. It is often the accumulated sum, as with energy objectives, or the accumulated average, as with mean-payoff objectives. We investigate the extension of temporal logics with the prefix-accumulation assertions Sum(v) ≥ c and Avg(v) ≥ c, where v is a numeric (or Boolean) variable of the system, c is a constant rational number, and Sum(v) and Avg(v) denote the accumulated sum and average of the values of v from the beginning of the computation up to the current point in time. We also allow the path-accumulation assertions LimInfAvg(v) ≥ c and LimSupAvg(v) ≥ c, referring to the average value along an entire infinite computation. We study the border of decidability for such quantitative extensions of various temporal logics. In particular, we show that extending the fragment of CTL that has only the EX, EF, AX, and AG temporal modalities with both prefix-accumulation assertions, or extending LTL with both path-accumulation assertions, results in temporal logics whose model-checking problem is decidable. Moreover, the prefix-accumulation assertions may be generalized with "controlled accumulation," allowing, for example, to specify constraints on the average waiting time between a request and a grant. On the negative side, we show that this branching-time logic is, in a sense, the maximal logic with one or both of the prefix-accumulation assertions that permits a decidable model-checking procedure. Extending a temporal logic that has the EG or EU modalities, such as CTL or LTL, makes the problem undecidable.},
author = {Boker, Udi and Chatterjee, Krishnendu and Henzinger, Thomas A and Kupferman, Orna},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {4},
publisher = {ACM},
title = {{Temporal specifications with accumulative values}},
doi = {10.1145/2629686},
volume = {15},
year = {2014},
}
@inproceedings{2162,
abstract = {We study two-player (zero-sum) concurrent mean-payoff games played on a finite-state graph. We focus on the important sub-class of ergodic games where all states are visited infinitely often with probability 1. The algorithmic study of ergodic games was initiated in a seminal work of Hoffman and Karp in 1966, but all basic complexity questions have remained unresolved. Our main results for ergodic games are as follows: We establish (1) an optimal exponential bound on the patience of stationary strategies (where patience of a distribution is the inverse of the smallest positive probability and represents a complexity measure of a stationary strategy); (2) the approximation problem lies in FNP; (3) the approximation problem is at least as hard as the decision problem for simple stochastic games (for which NP ∩ coNP is the long-standing best known bound). We present a variant of the strategy-iteration algorithm by Hoffman and Karp; show that both our algorithm and the classical value-iteration algorithm can approximate the value in exponential time; and identify a subclass where the value-iteration algorithm is a FPTAS. We also show that the exact value can be expressed in the existential theory of the reals, and establish square-root sum hardness for a related class of games.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus},
location = {Copenhagen, Denmark},
number = {Part 2},
pages = {122 -- 133},
publisher = {Springer},
title = {{The complexity of ergodic mean payoff games}},
doi = {10.1007/978-3-662-43951-7_11},
volume = {8573},
year = {2014},
}
@inproceedings{2213,
abstract = {We consider two-player partial-observation stochastic games on finitestate graphs where player 1 has partial observation and player 2 has perfect observation. The winning condition we study are ε-regular conditions specified as parity objectives. The qualitative-analysis problem given a partial-observation stochastic game and a parity objective asks whether there is a strategy to ensure that the objective is satisfied with probability 1 (resp. positive probability). These qualitative-analysis problems are known to be undecidable. However in many applications the relevant question is the existence of finite-memory strategies, and the qualitative-analysis problems under finite-memory strategies was recently shown to be decidable in 2EXPTIME.We improve the complexity and show that the qualitative-analysis problems for partial-observation stochastic parity games under finite-memory strategies are EXPTIME-complete; and also establish optimal (exponential) memory bounds for finite-memory strategies required for qualitative analysis.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Nain, Sumit and Vardi, Moshe},
location = {Grenoble, France},
pages = {242 -- 257},
publisher = {Springer},
title = {{The complexity of partial-observation stochastic parity games with finite-memory strategies}},
doi = {10.1007/978-3-642-54830-7_16},
volume = {8412},
year = {2014},
}
@inproceedings{2212,
abstract = {The theory of graph games is the foundation for modeling and synthesizing reactive processes. In the synthesis of stochastic processes, we use 2 1/2-player games where some transitions of the game graph are controlled by two adversarial players, the System and the Environment, and the other transitions are determined probabilistically. We consider 2 1/2-player games where the objective of the System is the conjunction of a qualitative objective (specified as a parity condition) and a quantitative objective (specified as a mean-payoff condition). We establish that the problem of deciding whether the System can ensure that the probability to satisfy the mean-payoff parity objective is at least a given threshold is in NP ∩ coNP, matching the best known bound in the special case of 2-player games (where all transitions are deterministic). We present an algorithm running in time O(d·n2d·MeanGame) to compute the set of almost-sure winning states from which the objective can be ensured with probability 1, where n is the number of states of the game, d the number of priorities of the parity objective, and MeanGame is the complexity to compute the set of almost-sure winning states in 2 1/2-player mean-payoff games. Our results are useful in the synthesis of stochastic reactive systems with both functional requirement (given as a qualitative objective) and performance requirement (given as a quantitative objective). },
author = {Chatterjee, Krishnendu and Doyen, Laurent and Gimbert, Hugo and Oualhadj, Youssouf},
location = {Grenoble, France},
pages = {210 -- 225},
publisher = {Springer},
title = {{Perfect-information stochastic mean-payoff parity games}},
doi = {10.1007/978-3-642-54830-7_14},
volume = {8412},
year = {2014},
}
@inproceedings{2216,
abstract = {The edit distance between two (untimed) traces is the minimum cost of a sequence of edit operations (insertion, deletion, or substitution) needed to transform one trace to the other. Edit distances have been extensively studied in the untimed setting, and form the basis for approximate matching of sequences in different domains such as coding theory, parsing, and speech recognition. In this paper, we lift the study of edit distances from untimed languages to the timed setting. We define an edit distance between timed words which incorporates both the edit distance between the untimed words and the absolute difference in time stamps. Our edit distance between two timed words is computable in polynomial time. Further, we show that the edit distance between a timed word and a timed language generated by a timed automaton, defined as the edit distance between the word and the closest word in the language, is PSPACE-complete. While computing the edit distance between two timed automata is undecidable, we show that the approximate version, where we decide if the edit distance between two timed automata is either less than a given parameter or more than δ away from the parameter, for δ > 0, can be solved in exponential space and is EXPSPACE-hard. Our definitions and techniques can be generalized to the setting of hybrid systems, and analogous decidability results hold for rectangular automata.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Majumdar, Ritankar},
location = {Berlin, Germany},
pages = {303 -- 312},
publisher = {Springer},
title = {{Edit distance for timed automata}},
doi = {10.1145/2562059.2562141},
year = {2014},
}
@inproceedings{2167,
abstract = {Model-based testing is a promising technology for black-box software and hardware testing, in which test cases are generated automatically from high-level specifications. Nowadays, systems typically consist of multiple interacting components and, due to their complexity, testing presents a considerable portion of the effort and cost in the design process. Exploiting the compositional structure of system specifications can considerably reduce the effort in model-based testing. Moreover, inferring properties about the system from testing its individual components allows the designer to reduce the amount of integration testing. In this paper, we study compositional properties of the ioco-testing theory. We propose a new approach to composition and hiding operations, inspired by contract-based design and interface theories. These operations preserve behaviors that are compatible under composition and hiding, and prune away incompatible ones. The resulting specification characterizes the input sequences for which the unit testing of components is sufficient to infer the correctness of component integration without the need for further tests. We provide a methodology that uses these results to minimize integration testing effort, but also to detect potential weaknesses in specifications. While we focus on asynchronous models and the ioco conformance relation, the resulting methodology can be applied to a broader class of systems.},
author = {Daca, Przemyslaw and Henzinger, Thomas A and Krenn, Willibald and Nickovic, Dejan},
booktitle = {IEEE 7th International Conference on Software Testing, Verification and Validation},
isbn = {978-1-4799-2255-0},
issn = {2159-4848},
location = {Cleveland, USA},
publisher = {IEEE},
title = {{Compositional specifications for IOCO testing}},
doi = {10.1109/ICST.2014.50},
year = {2014},
}
@misc{5411,
abstract = {Model-based testing is a promising technology for black-box software and hardware testing, in which test cases are generated automatically from high-level specifications. Nowadays, systems typically consist of multiple interacting components and, due to their complexity, testing presents a considerable portion of the effort and cost in the design process. Exploiting the compositional structure of system specifications can considerably reduce the effort in model-based testing. Moreover, inferring properties about the system from testing its individual components allows the designer to reduce the amount of integration testing.
In this paper, we study compositional properties of the IOCO-testing theory. We propose a new approach to composition and hiding operations, inspired by contract-based design and interface theories. These operations preserve behaviors that are compatible under composition and hiding, and prune away incompatible ones. The resulting specification characterizes the input sequences for which the unit testing of components is sufficient to infer the correctness of component integration without the need for further tests. We provide a methodology that uses these results to minimize integration testing effort, but also to detect potential weaknesses in specifications. While we focus on asynchronous models and the IOCO conformance relation, the resulting methodology can be applied to a broader class of systems.},
author = {Daca, Przemyslaw and Henzinger, Thomas A and Krenn, Willibald and Nickovic, Dejan},
issn = {2664-1690},
pages = {20},
publisher = {IST Austria},
title = {{Compositional specifications for IOCO testing}},
doi = {10.15479/AT:IST-2014-148-v2-1},
year = {2014},
}
@inproceedings{2063,
abstract = {We consider Markov decision processes (MDPs) which are a standard model for probabilistic systems.We focus on qualitative properties forMDPs that can express that desired behaviors of the system arise almost-surely (with probability 1) or with positive probability. We introduce a new simulation relation to capture the refinement relation ofMDPs with respect to qualitative properties, and present discrete graph theoretic algorithms with quadratic complexity to compute the simulation relation.We present an automated technique for assume-guarantee style reasoning for compositional analysis ofMDPs with qualitative properties by giving a counterexample guided abstraction-refinement approach to compute our new simulation relation. We have implemented our algorithms and show that the compositional analysis leads to significant improvements.},
author = {Chatterjee, Krishnendu and Chmelik, Martin and Daca, Przemyslaw},
location = {Vienna, Austria},
pages = {473 -- 490},
publisher = {Springer},
title = {{CEGAR for qualitative analysis of probabilistic systems}},
doi = {10.1007/978-3-319-08867-9_31},
volume = {8559},
year = {2014},
}
@misc{5413,
abstract = {We consider Markov decision processes (MDPs) which are a standard model for probabilistic systems. We focus on qualitative properties for MDPs that can express that desired behaviors of the system arise almost-surely (with probability 1) or with positive probability.
We introduce a new simulation relation to capture the refinement relation of MDPs with respect to qualitative properties, and present discrete graph theoretic algorithms with quadratic complexity to compute the simulation relation.
We present an automated technique for assume-guarantee style reasoning for compositional analysis of MDPs with qualitative properties by giving a counter-example guided abstraction-refinement approach to compute our new simulation relation. We have implemented our algorithms and show that the compositional analysis leads to significant improvements. },
author = {Chatterjee, Krishnendu and Daca, Przemyslaw and Chmelik, Martin},
issn = {2664-1690},
pages = {33},
publisher = {IST Austria},
title = {{CEGAR for qualitative analysis of probabilistic systems}},
doi = {10.15479/AT:IST-2014-153-v2-2},
year = {2014},
}
@misc{5414,
abstract = {We consider Markov decision processes (MDPs) which are a standard model for probabilistic systems. We focus on qualitative properties for MDPs that can express that desired behaviors of the system arise almost-surely (with probability 1) or with positive probability.
We introduce a new simulation relation to capture the refinement relation of MDPs with respect to qualitative properties, and present discrete graph theoretic algorithms with quadratic complexity to compute the simulation relation.
We present an automated technique for assume-guarantee style reasoning for compositional analysis of MDPs with qualitative properties by giving a counter-example guided abstraction-refinement approach to compute our new simulation relation.
We have implemented our algorithms and show that the compositional analysis leads to significant improvements. },
author = {Chatterjee, Krishnendu and Daca, Przemyslaw and Chmelik, Martin},
issn = {2664-1690},
pages = {33},
publisher = {IST Austria},
title = {{CEGAR for qualitative analysis of probabilistic systems}},
doi = {10.15479/AT:IST-2014-153-v3-1},
year = {2014},
}
@misc{5412,
abstract = {We consider Markov decision processes (MDPs) which are a standard model for probabilistic systems. We focus on qualitative properties for MDPs that can express that desired behaviors of the system arise almost-surely (with probability 1) or with positive probability.
We introduce a new simulation relation to capture the refinement relation of MDPs with respect to qualitative properties, and present discrete graph theoretic algorithms with quadratic complexity to compute the simulation relation.
We present an automated technique for assume-guarantee style reasoning for compositional analysis of MDPs with qualitative properties by giving a counter-example guided abstraction-refinement approach to compute our new simulation relation. We have implemented our algorithms and show that the compositional analysis leads to significant improvements. },
author = {Chatterjee, Krishnendu and Daca, Przemyslaw and Chmelik, Martin},
issn = {2664-1690},
pages = {31},
publisher = {IST Austria},
title = {{CEGAR for qualitative analysis of probabilistic systems}},
doi = {10.15479/AT:IST-2014-153-v1-1},
year = {2014},
}
@inproceedings{2163,
abstract = {We consider multi-player graph games with partial-observation and parity objective. While the decision problem for three-player games with a coalition of the first and second players against the third player is undecidable in general, we present a decidability result for partial-observation games where the first and third player are in a coalition against the second player, thus where the second player is adversarial but weaker due to partial-observation. We establish tight complexity bounds in the case where player 1 is less informed than player 2, namely 2-EXPTIME-completeness for parity objectives. The symmetric case of player 1 more informed than player 2 is much more complicated, and we show that already in the case where player 1 has perfect observation, memory of size non-elementary is necessary in general for reachability objectives, and the problem is decidable for safety and reachability objectives. From our results we derive new complexity results for partial-observation stochastic games.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
booktitle = {Lecture Notes in Computer Science},
location = {Copenhagen, Denmark},
number = {Part 2},
pages = {110 -- 121},
publisher = {Springer},
title = {{Games with a weak adversary}},
doi = {10.1007/978-3-662-43951-7_10},
volume = {8573},
year = {2014},
}
@misc{5419,
abstract = {We consider the reachability and shortest path problems on low tree-width graphs, with n nodes, m edges, and tree-width t, on a standard RAM with wordsize W. We use O to hide polynomial factors of the inverse of the Ackermann function. Our main contributions are three fold:
1. For reachability, we present an algorithm that requires O(n·t2·log(n/t)) preprocessing time, O(n·(t·log(n/t))/W) space, and O(t/W) time for pair queries and O((n·t)/W) time for single-source queries. Note that for constant t our algorithm uses O(n·logn) time for preprocessing; and O(n/W) time for single-source queries, which is faster than depth first search/breath first search (after the preprocessing).
2. We present an algorithm for shortest path that requires O(n·t2) preprocessing time, O(n·t) space, and O(t2) time for pair queries and O(n·t) time single-source queries.
3. We give a space versus query time trade-off algorithm for shortest path that, given any constant >0, requires O(n·t2) preprocessing time, O(n·t2) space, and O(n1−·t2) time for pair queries.
Our algorithms improve all existing results, and use very simple data structures.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas},
issn = {2664-1690},
pages = {34},
publisher = {IST Austria},
title = {{Improved algorithms for reachability and shortest path on low tree-width graphs}},
doi = {10.15479/AT:IST-2014-187-v1-1},
year = {2014},
}
@inproceedings{2217,
abstract = {As hybrid systems involve continuous behaviors, they should be evaluated by quantitative methods, rather than qualitative methods. In this paper we adapt a quantitative framework, called model measuring, to the hybrid systems domain. The model-measuring problem asks, given a model M and a specification, what is the maximal distance such that all models within that distance from M satisfy (or violate) the specification. A distance function on models is given as part of the input of the problem. Distances, especially related to continuous behaviors are more natural in the hybrid case than the discrete case. We are interested in distances represented by monotonic hybrid automata, a hybrid counterpart of (discrete) weighted automata, whose recognized timed languages are monotone (w.r.t. inclusion) in the values of parameters.
The contributions of this paper are twofold. First, we give sufficient conditions under which the model-measuring problem can be solved. Second, we discuss the modeling of distances and applications of the model-measuring problem.},
author = {Henzinger, Thomas A and Otop, Jan},
booktitle = {Proceedings of the 17th international conference on Hybrid systems: computation and control},
location = {Berlin, Germany},
pages = {213 -- 222},
publisher = {Springer},
title = {{Model measuring for hybrid systems}},
doi = {10.1145/2562059.2562130},
year = {2014},
}
@misc{5417,
abstract = {We define the model-measuring problem: given a model M and specification φ, what is the maximal distance ρ such that all models M'within distance ρ from M satisfy (or violate)φ. The model measuring problem presupposes a distance function on models. We concentrate on automatic distance functions, which are defined by weighted automata.
The model-measuring problem subsumes several generalizations of the classical model-checking problem, in particular, quantitative model-checking problems that measure the degree of satisfaction of a specification, and robustness problems that measure how much a model can be perturbed without violating the specification.
We show that for automatic distance functions, and ω-regular linear-time and branching-time specifications, the model-measuring problem can be solved.
We use automata-theoretic model-checking methods for model measuring, replacing the emptiness question for standard word and tree automata by the optimal-weight question for the weighted versions of these automata. We consider weighted automata that accumulate weights by maximizing, summing, discounting, and limit averaging.
We give several examples of using the model-measuring problem to compute various notions of robustness and quantitative satisfaction for temporal specifications.},
author = {Henzinger, Thomas A and Otop, Jan},
issn = {2664-1690},
pages = {14},
publisher = {IST Austria},
title = {{From model checking to model measuring}},
doi = {10.15479/AT:IST-2014-172-v1-1},
year = {2014},
}
@misc{5416,
abstract = {As hybrid systems involve continuous behaviors, they should be evaluated by quantitative methods, rather than qualitative methods. In this paper we adapt a quantitative framework, called model measuring, to the hybrid systems domain. The model-measuring problem asks, given a model M and a specification, what is the maximal distance such that all models within that distance from M satisfy (or violate) the specification. A distance function on models is given as part of the input of the problem. Distances, especially related to continuous behaviors are more natural in the hybrid case than the discrete case. We are interested in distances represented by monotonic hybrid automata, a hybrid counterpart of (discrete) weighted automata, whose recognized timed languages are monotone (w.r.t. inclusion) in the values of parameters.The contributions of this paper are twofold. First, we give sufficient conditions under which the model-measuring problem can be solved. Second, we discuss the modeling of distances and applications of the model-measuring problem.},
author = {Henzinger, Thomas A and Otop, Jan},
issn = {2664-1690},
pages = {22},
publisher = {IST Austria},
title = {{Model measuring for hybrid systems}},
doi = {10.15479/AT:IST-2014-171-v1-1},
year = {2014},
}
@misc{5418,
abstract = {We consider multi-player graph games with partial-observation and parity objective. While the decision problem for three-player games with a coalition of the first and second players against the third player is undecidable, we present a decidability result for partial-observation games where the first and third player are in a coalition against the second player, thus where the second player is adversarial but weaker due to partial-observation. We establish tight complexity bounds in the case where player 1 is less informed than player 2, namely 2-EXPTIME-completeness for parity objectives. The symmetric case of player 1 more informed than player 2 is much more complicated, and we show that already in the case where player 1 has perfect observation, memory of size non-elementary is necessary in general for reachability objectives, and the problem is decidable for safety and reachability objectives. Our results have tight connections with partial-observation stochastic games for which we derive new complexity results.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
issn = {2664-1690},
pages = {18},
publisher = {IST Austria},
title = {{Games with a weak adversary}},
doi = {10.15479/AT:IST-2014-176-v1-1},
year = {2014},
}
@misc{5420,
abstract = {We consider concurrent mean-payoff games, a very well-studied class of two-player (player 1 vs player 2) zero-sum games on finite-state graphs where every transition is assigned a reward between 0 and 1, and the payoff function is the long-run average of the rewards. The value is the maximal expected payoff that player 1 can guarantee against all strategies of player 2. We consider the computation of the set of states with value 1 under finite-memory strategies for player 1, and our main results for the problem are as follows: (1) we present a polynomial-time algorithm; (2) we show that whenever there is a finite-memory strategy, there is a stationary strategy that does not need memory at all; and (3) we present an optimal bound (which is double exponential) on the patience of stationary strategies (where patience of a distribution is the inverse of the smallest positive probability and represents a complexity measure of a stationary strategy).},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus},
issn = {2664-1690},
pages = {49},
publisher = {IST Austria},
title = {{The value 1 problem for concurrent mean-payoff games}},
doi = {10.15479/AT:IST-2014-191-v1-1},
year = {2014},
}
@techreport{5422,
abstract = {Notes from the Third Plenary for the Research Data Alliance in Dublin, Ireland on March 26 to 28, 2014 with focus on starting an institutional research data repository.},
author = {Porsche, Jana},
publisher = {none},
title = {{Notes from Research Data Alliance Plenary Meeting in Dublin, Ireland}},
year = {2014},
}
@misc{5424,
abstract = {We consider partially observable Markov decision processes (POMDPs), that are a standard framework for robotics applications to model uncertainties present in the real world, with temporal logic specifications. All temporal logic specifications in linear-time temporal logic (LTL) can be expressed as parity objectives. We study the qualitative analysis problem for POMDPs with parity objectives that asks whether there is a controller (policy) to ensure that the objective holds with probability 1 (almost-surely). While the qualitative analysis of POMDPs with parity objectives is undecidable, recent results show that when restricted to finite-memory policies the problem is EXPTIME-complete. While the problem is intractable in theory, we present a practical approach to solve the qualitative analysis problem. We designed several heuristics to deal with the exponential complexity, and have used our implementation on a number of well-known POMDP examples for robotics applications. Our results provide the first practical approach to solve the qualitative analysis of robot motion planning with LTL properties in the presence of uncertainty.},
author = {Chatterjee, Krishnendu and Chmelik, Martin and Gupta, Raghav and Kanodia, Ayush},
issn = {2664-1690},
pages = {12},
publisher = {IST Austria},
title = {{Qualitative analysis of POMDPs with temporal logic specifications for robotics applications}},
doi = {10.15479/AT:IST-2014-305-v1-1},
year = {2014},
}
@misc{5426,
abstract = {We consider partially observable Markov decision processes (POMDPs), that are a standard framework for robotics applications to model uncertainties present in the real world, with temporal logic specifications. All temporal logic specifications in linear-time temporal logic (LTL) can be expressed as parity objectives. We study the qualitative analysis problem for POMDPs with parity objectives that asks whether there is a controller (policy) to ensure that the objective holds with probability 1 (almost-surely). While the qualitative analysis of POMDPs with parity objectives is undecidable, recent results show that when restricted to finite-memory policies the problem is EXPTIME-complete. While the problem is intractable in theory, we present a practical approach to solve the qualitative analysis problem. We designed several heuristics to deal with the exponential complexity, and have used our implementation on a number of well-known POMDP examples for robotics applications. Our results provide the first practical approach to solve the qualitative analysis of robot motion planning with LTL properties in the presence of uncertainty.},
author = {Chatterjee, Krishnendu and Chmelik, Martin and Gupta, Raghav and Kanodia, Ayush},
issn = {2664-1690},
pages = {10},
publisher = {IST Austria},
title = {{Qualitative analysis of POMDPs with temporal logic specifications for robotics applications}},
doi = {10.15479/AT:IST-2014-305-v2-1},
year = {2014},
}
@misc{5423,
abstract = {We present a flexible framework for the automated competitive analysis of on-line scheduling algorithms for firm- deadline real-time tasks based on multi-objective graphs: Given a taskset and an on-line scheduling algorithm specified as a labeled transition system, along with some optional safety, liveness, and/or limit-average constraints for the adversary, we automatically compute the competitive ratio of the algorithm w.r.t. a clairvoyant scheduler. We demonstrate the flexibility and power of our approach by comparing the competitive ratio of several on-line algorithms, including D(over), that have been proposed in the past, for various tasksets. Our experimental results reveal that none of these algorithms is universally optimal, in the sense that there are tasksets where other schedulers provide better performance. Our framework is hence a very useful design tool for selecting optimal algorithms for a given application. },
author = {Chatterjee, Krishnendu and Kössler, Alexander and Pavlogiannis, Andreas and Schmid, Ulrich},
issn = {2664-1690},
pages = {14},
publisher = {IST Austria},
title = {{A framework for automated competitive analysis of on-line scheduling of firm-deadline tasks}},
doi = {10.15479/AT:IST-2014-300-v1-1},
year = {2014},
}
@misc{5427,
abstract = {We consider graphs with n nodes together with their tree-decomposition that has b = O ( n ) bags and width t , on the standard RAM computational model with wordsize W = Θ (log n ) . Our contributions are two-fold: Our first contribution is an algorithm that given a graph and its tree-decomposition as input, computes a binary and balanced tree-decomposition of width at most 4 · t + 3 of the graph in O ( b ) time and space, improving a long-standing (from 1992) bound of O ( n · log n ) time for constant treewidth graphs. Our second contribution is on reachability queries for low treewidth graphs. We build on our tree-balancing algorithm and present a data-structure for graph reachability that requires O ( n · t 2 ) preprocessing time, O ( n · t ) space, and O ( d t/ log n e ) time for pair queries, and O ( n · t · log t/ log n ) time for single-source queries. For constant t our data-structure uses O ( n ) time for preprocessing, O (1) time for pair queries, and O ( n/ log n ) time for single-source queries. This is (asymptotically) optimal and is faster than DFS/BFS when answering more than a constant number of single-source queries.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas},
issn = {2664-1690},
pages = {24},
publisher = {IST Austria},
title = {{Optimal tree-decomposition balancing and reachability on low treewidth graphs}},
doi = {10.15479/AT:IST-2014-314-v1-1},
year = {2014},
}
@misc{5428,
abstract = {Simulation is an attractive alternative for language inclusion for automata as it is an under-approximation of language inclusion, but usually has much lower complexity. For non-deterministic automata, while language inclusion is PSPACE-complete, simulation can be computed in polynomial time. Simulation has also been extended in two orthogonal directions, namely, (1) fair simulation, for simulation over specified set of infinite runs; and (2) quantitative simulation, for simulation between weighted automata. Again, while fair trace inclusion is PSPACE-complete, fair simulation can be computed in polynomial time. For weighted automata, the (quantitative) language inclusion problem is undecidable for mean-payoff automata and the decidability is open for discounted-sum automata, whereas the (quantitative) simulation reduce to mean-payoff games and discounted-sum games, which admit pseudo-polynomial time algorithms.
In this work, we study (quantitative) simulation for weighted automata with Büchi acceptance conditions, i.e., we generalize fair simulation from non-weighted automata to weighted automata. We show that imposing Büchi acceptance conditions on weighted automata changes many fundamental properties of the simulation games. For example, whereas for mean-payoff and discounted-sum games, the players do not need memory to play optimally; we show in contrast that for simulation games with Büchi acceptance conditions, (i) for mean-payoff objectives, optimal strategies for both players require infinite memory in general, and (ii) for discounted-sum objectives, optimal strategies need not exist for both players. While the simulation games with Büchi acceptance conditions are more complicated (e.g., due to infinite-memory requirements for mean-payoff objectives) as compared to their counterpart without Büchi acceptance conditions, we still present pseudo-polynomial time algorithms to solve simulation games with Büchi acceptance conditions for both weighted mean-payoff and weighted discounted-sum automata.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan and Velner, Yaron},
issn = {2664-1690},
pages = {26},
publisher = {IST Austria},
title = {{Quantitative fair simulation games}},
doi = {10.15479/AT:IST-2014-315-v1-1},
year = {2014},
}
@misc{5415,
abstract = {Recently there has been a significant effort to add quantitative properties in formal verification and synthesis. While weighted automata over finite and infinite words provide a natural and flexible framework to express quantitative properties, perhaps surprisingly, several basic system properties such as average response time cannot be expressed with weighted automata. In this work, we introduce nested weighted automata as a new formalism for expressing important quantitative properties such as average response time. We establish an almost complete decidability picture for the basic decision problems for nested weighted automata, and illustrate its applicability in several domains. },
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan},
issn = {2664-1690},
pages = {27},
publisher = {IST Austria},
title = {{Nested weighted automata}},
doi = {10.15479/AT:IST-2014-170-v1-1},
year = {2014},
}
@misc{5421,
abstract = {Evolution occurs in populations of reproducing individuals. The structure of the population affects the outcome of the evolutionary process. Evolutionary graph theory is a powerful approach to study this phenomenon. There are two graphs. The interaction graph specifies who interacts with whom in the context of evolution. The replacement graph specifies who competes with whom for reproduction. The vertices of the two graphs are the same, and each vertex corresponds to an individual. A key quantity is the fixation probability of a new mutant. It is defined as the probability that a newly introduced mutant (on a single vertex) generates a lineage of offspring which eventually takes over the entire population of resident individuals. The basic computational questions are as follows: (i) the qualitative question asks whether the fixation probability is positive; and (ii) the quantitative approximation question asks for an approximation of the fixation probability. Our main results are: (1) We show that the qualitative question is NP-complete and the quantitative approximation question is #P-hard in the special case when the interaction and the replacement graphs coincide and even with the restriction that the resident individuals do not reproduce (which corresponds to an invading population taking over an empty structure). (2) We show that in general the qualitative question is PSPACE-complete and the quantitative approximation question is PSPACE-hard and can be solved in exponential time.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Nowak, Martin},
issn = {2664-1690},
pages = {27},
publisher = {IST Austria},
title = {{The complexity of evolution on graphs}},
doi = {10.15479/AT:IST-2014-190-v2-2},
year = {2014},
}
@article{2257,
abstract = {Maximum entropy models are the least structured probability distributions that exactly reproduce a chosen set of statistics measured in an interacting network. Here we use this principle to construct probabilistic models which describe the correlated spiking activity of populations of up to 120 neurons in the salamander retina as it responds to natural movies. Already in groups as small as 10 neurons, interactions between spikes can no longer be regarded as small perturbations in an otherwise independent system; for 40 or more neurons pairwise interactions need to be supplemented by a global interaction that controls the distribution of synchrony in the population. Here we show that such “K-pairwise” models—being systematic extensions of the previously used pairwise Ising models—provide an excellent account of the data. We explore the properties of the neural vocabulary by: 1) estimating its entropy, which constrains the population's capacity to represent visual information; 2) classifying activity patterns into a small set of metastable collective modes; 3) showing that the neural codeword ensembles are extremely inhomogenous; 4) demonstrating that the state of individual neurons is highly predictable from the rest of the population, allowing the capacity for error correction.},
author = {Tkacik, Gasper and Marre, Olivier and Amodei, Dario and Schneidman, Elad and Bialek, William and Berry, Michael},
issn = {1553734X},
journal = {PLoS Computational Biology},
number = {1},
publisher = {Public Library of Science},
title = {{Searching for collective behavior in a large network of sensory neurons}},
doi = {10.1371/journal.pcbi.1003408},
volume = {10},
year = {2014},
}
@inbook{6178,
abstract = {Mechanically coupled cells can generate forces driving cell and tissue morphogenesis during development. Visualization and measuring of these forces is of major importance to better understand the complexity of the biomechanic processes that shape cells and tissues. Here, we describe how UV laser ablation can be utilized to quantitatively assess mechanical tension in different tissues of the developing zebrafish and in cultures of primary germ layer progenitor cells ex vivo.},
author = {Smutny, Michael and Behrndt, Martin and Campinho, Pedro and Ruprecht, Verena and Heisenberg, Carl-Philipp J},
booktitle = {Tissue Morphogenesis},
editor = {Nelson, Celeste},
isbn = {9781493911639},
issn = {1064-3745},
pages = {219--235},
publisher = {Springer},
title = {{UV laser ablation to measure cell and tissue-generated forces in the zebrafish embryo in vivo and ex vivo}},
doi = {10.1007/978-1-4939-1164-6_15},
volume = {1189},
year = {2014},
}
@inproceedings{10885,
abstract = {Two-player games on graphs provide the theoretical framework for many important problems such as reactive synthesis. While the traditional study of two-player zero-sum games has been extended to multi-player games with several notions of equilibria, they are decidable only for perfect-information games, whereas several applications require imperfect-information games.
In this paper we propose a new notion of equilibria, called doomsday equilibria, which is a strategy profile such that all players satisfy their own objective, and if any coalition of players deviates and violates even one of the players objective, then the objective of every player is violated.
We present algorithms and complexity results for deciding the existence of doomsday equilibria for various classes of ω-regular objectives, both for imperfect-information games, and for perfect-information games.We provide optimal complexity bounds for imperfect-information games, and in most cases for perfect-information games.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Filiot, Emmanuel and Raskin, Jean-François},
booktitle = {VMCAI 2014: Verification, Model Checking, and Abstract Interpretation},
isbn = {9783642540127},
issn = {1611-3349},
location = {San Diego, CA, United States},
pages = {78--97},
publisher = {Springer Nature},
title = {{Doomsday equilibria for omega-regular games}},
doi = {10.1007/978-3-642-54013-4_5},
volume = {8318},
year = {2014},
}
@book{6853,
abstract = {This monograph presents a short course in computational geometry and topology. In the first part the book covers Voronoi diagrams and Delaunay triangulations, then it presents the theory of alpha complexes which play a crucial role in biology. The central part of the book is the homology theory and their computation, including the theory of persistence which is indispensable for applications, e.g. shape reconstruction. The target audience comprises researchers and practitioners in mathematics, biology, neuroscience and computer science, but the book may also be beneficial to graduate students of these fields.},
author = {Edelsbrunner, Herbert},
isbn = {9-783-3190-5956-3},
issn = {2191-5318},
pages = {IX, 110},
publisher = {Springer Nature},
title = {{A Short Course in Computational Geometry and Topology}},
doi = {10.1007/978-3-319-05957-0},
year = {2014},
}
@techreport{7038,
author = {Huszár, Kristóf and Rolinek, Michal},
pages = {5},
publisher = {IST Austria},
title = {{Playful Math - An introduction to mathematical games}},
year = {2014},
}
@inproceedings{8044,
abstract = {Many questions concerning models in quantum mechanics require a detailed analysis of the spectrum of the corresponding Hamiltonian, a linear operator on a suitable Hilbert space. Of particular relevance for an understanding of the low-temperature properties of a system is the structure of the excitation spectrum, which is the part of the spectrum close to the spectral bottom. We present recent progress on this question for bosonic many-body quantum systems with weak two-body interactions. Such system are currently of great interest, due to their experimental realization in ultra-cold atomic gases. We investigate the accuracy of the Bogoliubov approximations, which predicts that the low-energy spectrum is made up of sums of elementary excitations, with linear dispersion law at low momentum. The latter property is crucial for the superfluid behavior the system.},
author = {Seiringer, Robert},
booktitle = {Proceeding of the International Congress of Mathematicans},
isbn = {9788961058063},
location = {Seoul, South Korea},
pages = {1175--1194},
publisher = {Kyung Moon SA},
title = {{Structure of the excitation spectrum for many-body quantum systems}},
volume = {3},
year = {2014},
}
@article{2001,
abstract = {Antibiotics affect bacterial cell physiology at many levels. Rather than just compensating for the direct cellular defects caused by the drug, bacteria respond to antibiotics by changing their morphology, macromolecular composition, metabolism, gene expression and possibly even their mutation rate. Inevitably, these processes affect each other, resulting in a complex response with changes in the expression of numerous genes. Genome‐wide approaches can thus help in gaining a comprehensive understanding of bacterial responses to antibiotics. In addition, a combination of experimental and theoretical approaches is needed for identifying general principles that underlie these responses. Here, we review recent progress in our understanding of bacterial responses to antibiotics and their combinations, focusing on effects at the levels of growth rate and gene expression. We concentrate on studies performed in controlled laboratory conditions, which combine promising experimental techniques with quantitative data analysis and mathematical modeling. While these basic research approaches are not immediately applicable in the clinic, uncovering the principles and mechanisms underlying bacterial responses to antibiotics may, in the long term, contribute to the development of new treatment strategies to cope with and prevent the rise of resistant pathogenic bacteria.},
author = {Mitosch, Karin and Bollenbach, Tobias},
journal = {Environmental Microbiology Reports},
number = {6},
pages = {545 -- 557},
publisher = {Wiley},
title = {{Bacterial responses to antibiotics and their combinations}},
doi = {10.1111/1758-2229.12190},
volume = {6},
year = {2014},
}
@inproceedings{2082,
abstract = {NMAC is a mode of operation which turns a fixed input-length keyed hash function f into a variable input-length function. A practical single-key variant of NMAC called HMAC is a very popular and widely deployed message authentication code (MAC). Security proofs and attacks for NMAC can typically be lifted to HMAC. NMAC was introduced by Bellare, Canetti and Krawczyk [Crypto'96], who proved it to be a secure pseudorandom function (PRF), and thus also a MAC, assuming that (1) f is a PRF and (2) the function we get when cascading f is weakly collision-resistant. Unfortunately, HMAC is typically instantiated with cryptographic hash functions like MD5 or SHA-1 for which (2) has been found to be wrong. To restore the provable guarantees for NMAC, Bellare [Crypto'06] showed its security based solely on the assumption that f is a PRF, albeit via a non-uniform reduction. - Our first contribution is a simpler and uniform proof for this fact: If f is an ε-secure PRF (against q queries) and a δ-non-adaptively secure PRF (against q queries), then NMAC f is an (ε+ℓqδ)-secure PRF against q queries of length at most ℓ blocks each. - We then show that this ε+ℓqδ bound is basically tight. For the most interesting case where ℓqδ ≥ ε we prove this by constructing an f for which an attack with advantage ℓqδ exists. This also violates the bound O(ℓε) on the PRF-security of NMAC recently claimed by Koblitz and Menezes. - Finally, we analyze the PRF-security of a modification of NMAC called NI [An and Bellare, Crypto'99] that differs mainly by using a compression function with an additional keying input. This avoids the constant rekeying on multi-block messages in NMAC and allows for a security proof starting by the standard switch from a PRF to a random function, followed by an information-theoretic analysis. We carry out such an analysis, obtaining a tight ℓq2/2 c bound for this step, improving over the trivial bound of ℓ2q2/2c. The proof borrows combinatorial techniques originally developed for proving the security of CBC-MAC [Bellare et al., Crypto'05].},
author = {Gazi, Peter and Pietrzak, Krzysztof Z and Rybar, Michal},
editor = {Garay, Juan and Gennaro, Rosario},
location = {Santa Barbara, USA},
number = {1},
pages = {113 -- 130},
publisher = {Springer},
title = {{The exact PRF-security of NMAC and HMAC}},
doi = {10.1007/978-3-662-44371-2_7},
volume = {8616},
year = {2014},
}
@article{9519,
abstract = {Transposons are selfish genetic sequences that can increase their copy number and inflict substantial damage on their hosts. To combat these genomic parasites, plants have evolved multiple pathways to identify and silence transposons by methylating their DNA. Plants have also evolved mechanisms to limit the collateral damage from the antitransposon machinery. In this review, we examine recent developments that have elucidated many of the molecular workings of these pathways. We also highlight the evidence that the methylation and demethylation pathways interact, indicating that plants have a highly sophisticated, integrated system of transposon defense that has an important role in the regulation of gene expression.},
author = {Kim, M. Yvonne and Zilberman, Daniel},
issn = {1878-4372},
journal = {Trends in Plant Science},
number = {5},
pages = {320--326},
publisher = {Elsevier},
title = {{DNA methylation as a system of plant genomic immunity}},
doi = {10.1016/j.tplants.2014.01.014},
volume = {19},
year = {2014},
}
@article{1912,
abstract = {Kupffer's vesicle (KV) is the zebrafish organ of laterality, patterning the embryo along its left-right (LR) axis. Regional differences in cell shape within the lumen-lining KV epithelium are essential for its LR patterning function. However, the processes by which KV cells acquire their characteristic shapes are largely unknown. Here, we show that the notochord induces regional differences in cell shape within KV by triggering extracellular matrix (ECM) accumulation adjacent to anterior-dorsal (AD) regions of KV. This localized ECM deposition restricts apical expansion of lumen-lining epithelial cells in AD regions of KV during lumen growth. Our study provides mechanistic insight into the processes by which KV translates global embryonic patterning into regional cell shape differences required for its LR symmetry-breaking function.},
author = {Compagnon, Julien and Barone, Vanessa and Rajshekar, Srivarsha and Kottmeier, Rita and Pranjic-Ferscha, Kornelija and Behrndt, Martin and Heisenberg, Carl-Philipp J},
journal = {Developmental Cell},
number = {6},
pages = {774 -- 783},
publisher = {Cell Press},
title = {{The notochord breaks bilateral symmetry by controlling cell shapes in the Zebrafish laterality organ}},
doi = {10.1016/j.devcel.2014.11.003},
volume = {31},
year = {2014},
}
@article{2083,
abstract = {Understanding the effects of sex and migration on adaptation to novel environments remains a key problem in evolutionary biology. Using a single-cell alga Chlamydomonas reinhardtii, we investigated how sex and migration affected rates of evolutionary rescue in a sink environment, and subsequent changes in fitness following evolutionary rescue. We show that sex and migration affect both the rate of evolutionary rescue and subsequent adaptation. However, their combined effects change as the populations adapt to a sink habitat. Both sex and migration independently increased rates of evolutionary rescue, but the effect of sex on subsequent fitness improvements, following initial rescue, changed with migration, as sex was beneficial in the absence of migration but constraining adaptation when combined with migration. These results suggest that sex and migration are beneficial during the initial stages of adaptation, but can become detrimental as the population adapts to its environment.},
author = {Lagator, Mato and Morgan, Andrew and Neve, Paul and Colegrave, Nick},
journal = {Evolution},
number = {8},
pages = {2296 -- 2305},
publisher = {Wiley},
title = {{Role of sex and migration in adaptation to sink environments}},
doi = {10.1111/evo.12440},
volume = {68},
year = {2014},
}
@misc{9747,
abstract = {Understanding the effects of sex and migration on adaptation to novel environments remains a key problem in evolutionary biology. Using a single-cell alga Chlamydomonas reinhardtii, we investigated how sex and migration affected rates of evolutionary rescue in a sink environment, and subsequent changes in fitness following evolutionary rescue. We show that sex and migration affect both the rate of evolutionary rescue and subsequent adaptation. However, their combined effects change as the populations adapt to a sink habitat. Both sex and migration independently increased rates of evolutionary rescue, but the effect of sex on subsequent fitness improvements, following initial rescue, changed with migration, as sex was beneficial in the absence of migration but constraining adaptation when combined with migration. These results suggest that sex and migration are beneficial during the initial stages of adaptation, but can become detrimental as the population adapts to its environment.},
author = {Lagator, Mato and Morgan, Andrew and Neve, Paul and Colegrave, Nick},
publisher = {Dryad},
title = {{Data from: Role of sex and migration in adaptation to sink environments}},
doi = {10.5061/dryad.s42n1},
year = {2014},
}
@article{2086,
abstract = {Pathogens may gain a fitness advantage through manipulation of the behaviour of their hosts. Likewise, host behavioural changes can be a defence mechanism, counteracting the impact of pathogens on host fitness. We apply harmonic radar technology to characterize the impact of an emerging pathogen - Nosema ceranae (Microsporidia) - on honeybee (Apis mellifera) flight and orientation performance in the field. Honeybees are the most important commercial pollinators. Emerging diseases have been proposed to play a prominent role in colony decline, partly through sub-lethal behavioural manipulation of their hosts. We found that homing success was significantly reduced in diseased (65.8%) versus healthy foragers (92.5%). Although lost bees had significantly reduced continuous flight times and prolonged resting times, other flight characteristics and navigational abilities showed no significant difference between infected and non-infected bees. Our results suggest that infected bees express normal flight characteristics but are constrained in their homing ability, potentially compromising the colony by reducing its resource inputs, but also counteracting the intra-colony spread of infection. We provide the first high-resolution analysis of sub-lethal effects of an emerging disease on insect flight behaviour. The potential causes and the implications for both host and parasite are discussed.},
author = {Wolf, Stephan and Mcmahon, Dino and Lim, Ka and Pull, Christopher and Clark, Suzanne and Paxton, Robert and Osborne, Juliet},
journal = {PLoS One},
number = {8},
publisher = {Public Library of Science},
title = {{So near and yet so far: Harmonic radar reveals reduced homing ability of Nosema infected honeybees}},
doi = {10.1371/journal.pone.0103989},
volume = {9},
year = {2014},
}
@misc{9888,
abstract = {Detailed description of the experimental prodedures, data analyses and additional statistical analyses of the results.},
author = {Wolf, Stephan and Mcmahon, Dino and Lim, Ka and Pull, Christopher and Clark, Suzanne and Paxton, Robert and Osborne, Juliet},
publisher = {Public Library of Science},
title = {{Supporting information}},
doi = {10.1371/journal.pone.0103989.s003},
year = {2014},
}
@article{9458,
abstract = {Dnmt1 epigenetically propagates symmetrical CG methylation in many eukaryotes. Their genomes are typically depleted of CG dinucleotides because of imperfect repair of deaminated methylcytosines. Here, we extensively survey diverse species lacking Dnmt1 and show that, surprisingly, symmetrical CG methylation is nonetheless frequently present and catalyzed by a different DNA methyltransferase family, Dnmt5. Numerous Dnmt5-containing organisms that diverged more than a billion years ago exhibit clustered methylation, specifically in nucleosome linkers. Clustered methylation occurs at unprecedented densities and directly disfavors nucleosomes, contributing to nucleosome positioning between clusters. Dense methylation is enabled by a regime of genomic sequence evolution that enriches CG dinucleotides and drives the highest CG frequencies known. Species with linker methylation have small, transcriptionally active nuclei that approach the physical limits of chromatin compaction. These features constitute a previously unappreciated genome architecture, in which dense methylation influences nucleosome positions, likely facilitating nuclear processes under extreme spatial constraints.},
author = {Huff, Jason T. and Zilberman, Daniel},
issn = {1097-4172},
journal = {Cell},
number = {6},
pages = {1286--1297},
publisher = {Elsevier},
title = {{Dnmt1-independent CG methylation contributes to nucleosome positioning in diverse eukaryotes}},
doi = {10.1016/j.cell.2014.01.029},
volume = {156},
year = {2014},
}
@article{9479,
abstract = {Centromeres mediate chromosome segregation and are defined by the centromere-specific histone H3 variant (CenH3)/centromere protein A (CENP-A). Removal of CenH3 from centromeres is a general property of terminally differentiated cells, and the persistence of CenH3 increases the risk of diseases such as cancer. However, active mechanisms of centromere disassembly are unknown. Nondividing Arabidopsis pollen vegetative cells, which transport engulfed sperm by extended tip growth, undergo loss of CenH3; centromeric heterochromatin decondensation; and bulk activation of silent rRNA genes, accompanied by their translocation into the nucleolus. Here, we show that these processes are blocked by mutations in the evolutionarily conserved AAA-ATPase molecular chaperone, CDC48A, homologous to yeast Cdc48 and human p97 proteins, both of which are implicated in ubiquitin/small ubiquitin-like modifier (SUMO)-targeted protein degradation. We demonstrate that CDC48A physically associates with its heterodimeric cofactor UFD1-NPL4, known to bind ubiquitin and SUMO, as well as with SUMO1-modified CenH3 and mutations in NPL4 phenocopy cdc48a mutations. In WT vegetative cell nuclei, genetically unlinked ribosomal DNA (rDNA) loci are uniquely clustered together within the nucleolus and all major rRNA gene variants, including those rDNA variants silenced in leaves, are transcribed. In cdc48a mutant vegetative cell nuclei, however, these rDNA loci frequently colocalized with condensed centromeric heterochromatin at the external periphery of the nucleolus. Our results indicate that the CDC48ANPL4 complex actively removes sumoylated CenH3 from centromeres and disrupts centromeric heterochromatin to release bulk rRNA genes into the nucleolus for ribosome production, which fuels single nucleus-driven pollen tube growth and is essential for plant reproduction.},
author = {Mérai, Zsuzsanna and Chumak, Nina and García-Aguilar, Marcelina and Hsieh, Tzung-Fu and Nishimura, Toshiro and Schoft, Vera K. and Bindics, János and Ślusarz, Lucyna and Arnoux, Stéphanie and Opravil, Susanne and Mechtler, Karl and Zilberman, Daniel and Fischer, Robert L. and Tamaru, Hisashi},
issn = {1091-6490},
journal = {Proceedings of the National Academy of Sciences},
number = {45},
pages = {16166--16171},
publisher = {National Academy of Sciences},
title = {{The AAA-ATPase molecular chaperone Cdc48/p97 disassembles sumoylated centromeres, decondenses heterochromatin, and activates ribosomal RNA genes}},
doi = {10.1073/pnas.1418564111},
volume = {111},
year = {2014},
}
@article{2004,
abstract = {We have assembled a network of cell-fate determining transcription factors that play a key role in the specification of the ventral neuronal subtypes of the spinal cord on the basis of published transcriptional interactions. Asynchronous Boolean modelling of the network was used to compare simulation results with reported experimental observations. Such comparison highlighted the need to include additional regulatory connections in order to obtain the fixed point attractors of the model associated with the five known progenitor cell types located in the ventral spinal cord. The revised gene regulatory network reproduced previously observed cell state switches between progenitor cells observed in knock-out animal models or in experiments where the transcription factors were overexpressed. Furthermore the network predicted the inhibition of Irx3 by Nkx2.2 and this prediction was tested experimentally. Our results provide evidence for the existence of an as yet undescribed inhibitory connection which could potentially have significance beyond the ventral spinal cord. The work presented in this paper demonstrates the strength of Boolean modelling for identifying gene regulatory networks.},
author = {Lovrics, Anna and Gao, Yu and Juhász, Bianka and Bock, István and Byrne, Helen and Dinnyés, András and Kovács, Krisztián},
journal = {PLoS One},
number = {11},
publisher = {Public Library of Science},
title = {{Boolean modelling reveals new regulatory connections between transcription factors orchestrating the development of the ventral spinal cord}},
doi = {10.1371/journal.pone.0111430},
volume = {9},
year = {2014},
}
@misc{9722,
author = {Lovrics, Anna and Gao, Yu and Juhász, Bianka and Bock, István and Byrne, Helen M. and Dinnyés, András and Kovács, Krisztián},
publisher = {Public Library of Science},
title = {{Transition probability between TF expression states when Dbx2 inhibits Nkx2.2}},
doi = {10.1371/journal.pone.0111430.s006},
year = {2014},
}
@article{2039,
abstract = {A fundamental question in biology is the following: what is the time scale that is needed for evolutionary innovations? There are many results that characterize single steps in terms of the fixation time of new mutants arising in populations of certain size and structure. But here we ask a different question, which is concerned with the much longer time scale of evolutionary trajectories: how long does it take for a population exploring a fitness landscape to find target sequences that encode new biological functions? Our key variable is the length, (Formula presented.) of the genetic sequence that undergoes adaptation. In computer science there is a crucial distinction between problems that require algorithms which take polynomial or exponential time. The latter are considered to be intractable. Here we develop a theoretical approach that allows us to estimate the time of evolution as function of (Formula presented.) We show that adaptation on many fitness landscapes takes time that is exponential in (Formula presented.) even if there are broad selection gradients and many targets uniformly distributed in sequence space. These negative results lead us to search for specific mechanisms that allow evolution to work on polynomial time scales. We study a regeneration process and show that it enables evolution to work in polynomial time.},
author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Adlam, Ben and Nowak, Martin},
journal = {PLoS Computational Biology},
number = {9},
publisher = {Public Library of Science},
title = {{The time scale of evolutionary innovation}},
doi = {10.1371/journal.pcbi.1003818},
volume = {10},
year = {2014},
}
@article{2161,
abstract = {Repeated pathogen exposure is a common threat in colonies of social insects, posing selection pressures on colony members to respond with improved disease-defense performance. We here tested whether experience gained by repeated tending of low-level fungus-exposed (Metarhizium robertsii) larvae may alter the performance of sanitary brood care in the clonal ant, Platythyrea punctata. We trained ants individually over nine consecutive trials to either sham-treated or fungus-exposed larvae. We then compared the larval grooming behavior of naive and trained ants and measured how effectively they removed infectious fungal conidiospores from the fungus-exposed larvae. We found that the ants changed the duration of larval grooming in response to both, larval treatment and their level of experience: (1) sham-treated larvae received longer grooming than the fungus-exposed larvae and (2) trained ants performed less self-grooming but longer larval grooming than naive ants, which was true for both, ants trained to fungus-exposed and also to sham-treated larvae. Ants that groomed the fungus-exposed larvae for longer periods removed a higher number of fungal conidiospores from the surface of the fungus-exposed larvae. As experienced ants performed longer larval grooming, they were more effective in fungal removal, thus making them better caretakers under pathogen attack of the colony. By studying this clonal ant, we can thus conclude that even in the absence of genetic variation between colony members, differences in experience levels of brood care may affect performance of sanitary brood care in social insects.},
author = {Westhus, Claudia and Ugelvig, Line V and Tourdot, Edouard and Heinze, Jürgen and Doums, Claudie and Cremer, Sylvia},
issn = {0340-5443},
journal = {Behavioral Ecology and Sociobiology},
number = {10},
pages = {1701 -- 1710},
publisher = {Springer},
title = {{Increased grooming after repeated brood care provides sanitary benefits in a clonal ant}},
doi = {10.1007/s00265-014-1778-8},
volume = {68},
year = {2014},
}
@article{2036,
abstract = { In rapidly changing environments, selection history may impact the dynamics of adaptation. Mutations selected in one environment may result in pleiotropic fitness trade-offs in subsequent novel environments, slowing the rates of adaptation. Epistatic interactions between mutations selected in sequential stressful environments may slow or accelerate subsequent rates of adaptation, depending on the nature of that interaction. We explored the dynamics of adaptation during sequential exposure to herbicides with different modes of action in Chlamydomonas reinhardtii. Evolution of resistance to two of the herbicides was largely independent of selection history. For carbetamide, previous adaptation to other herbicide modes of action positively impacted the likelihood of adaptation to this herbicide. Furthermore, while adaptation to all individual herbicides was associated with pleiotropic fitness costs in stress-free environments, we observed that accumulation of resistance mechanisms was accompanied by a reduction in overall fitness costs. We suggest that antagonistic epistasis may be a driving mechanism that enables populations to more readily adapt in novel environments. These findings highlight the potential for sequences of xenobiotics to facilitate the rapid evolution of multiple-drug and -pesticide resistance, as well as the potential for epistatic interactions between adaptive mutations to facilitate evolutionary rescue in rapidly changing environments. },
author = {Lagator, Mato and Colegrave, Nick and Neve, Paul},
journal = {Proceedings of the Royal Society of London Series B Biological Sciences},
number = {1794},
publisher = {Royal Society, The},
title = {{Selection history and epistatic interactions impact dynamics of adaptation to novel environmental stresses}},
doi = {10.1098/rspb.2014.1679},
volume = {281},
year = {2014},
}
@misc{9740,
abstract = {The fitness effects of symbionts on their hosts can be context-dependent, with usually benign symbionts causing detrimental effects when their hosts are stressed, or typically parasitic symbionts providing protection towards their hosts (e.g. against pathogen infection). Here, we studied the novel association between the invasive garden ant Lasius neglectus and its fungal ectosymbiont Laboulbenia formicarum for potential costs and benefits. We tested ants with different Laboulbenia levels for their survival and immunity under resource limitation and exposure to the obligate killing entomopathogen Metarhizium brunneum. While survival of L. neglectus workers under starvation was significantly decreased with increasing Laboulbenia levels, host survival under Metarhizium exposure increased with higher levels of the ectosymbiont, suggesting a symbiont-mediated anti-pathogen protection, which seems to be driven mechanistically by both improved sanitary behaviours and an upregulated immune system. Ants with high Laboulbenia levels showed significantly longer self-grooming and elevated expression of immune genes relevant for wound repair and antifungal responses (β-1,3-glucan binding protein, Prophenoloxidase), compared with ants carrying low Laboulbenia levels. This suggests that the ectosymbiont Laboulbenia formicarum weakens its ant host by either direct resource exploitation or the costs of an upregulated behavioural and immunological response, which, however, provides a prophylactic protection upon later exposure to pathogens.},
author = {Konrad, Matthias and Grasse, Anna V and Tragust, Simon and Cremer, Sylvia},
publisher = {Dryad},
title = {{Data from: Anti-pathogen protection versus survival costs mediated by an ectosymbiont in an ant host}},
doi = {10.5061/dryad.vm0vc},
year = {2014},
}
@misc{9741,
abstract = {In rapidly changing environments, selection history may impact the dynamics of adaptation. Mutations selected in one environment may result in pleiotropic fitness trade-offs in subsequent novel environments, slowing the rates of adaptation. Epistatic interactions between mutations selected in sequential stressful environments may slow or accelerate subsequent rates of adaptation, depending on the nature of that interaction. We explored the dynamics of adaptation during sequential exposure to herbicides with different modes of action in Chlamydomonas reinhardtii. Evolution of resistance to two of the herbicides was largely independent of selection history. For carbetamide, previous adaptation to other herbicide modes of action positively impacted the likelihood of adaptation to this herbicide. Furthermore, while adaptation to all individual herbicides was associated with pleiotropic fitness costs in stress-free environments, we observed that accumulation of resistance mechanisms was accompanied by a reduction in overall fitness costs. We suggest that antagonistic epistasis may be a driving mechanism that enables populations to more readily adapt in novel environments. These findings highlight the potential for sequences of xenobiotics to facilitate the rapid evolution of multiple-drug and -pesticide resistance, as well as the potential for epistatic interactions between adaptive mutations to facilitate evolutionary rescue in rapidly changing environments.},
author = {Lagator, Mato and Colegrave, Nick and Neve, Paul},
publisher = {Dryad},
title = {{Data from: Selection history and epistatic interactions impact dynamics of adaptation to novel environmental stresses}},
doi = {10.5061/dryad.85dn7},
year = {2014},
}
@misc{9739,
author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Adlam, Ben and Novak, Martin},
publisher = {Public Library of Science},
title = {{Detailed proofs for “The time scale of evolutionary innovation”}},
doi = {10.1371/journal.pcbi.1003818.s001},
year = {2014},
}
@article{2170,
abstract = { Short-read sequencing technologies have in principle made it feasible to draw detailed inferences about the recent history of any organism. In practice, however, this remains challenging due to the difficulty of genome assembly in most organisms and the lack of statistical methods powerful enough to discriminate between recent, nonequilibrium histories. We address both the assembly and inference challenges. We develop a bioinformatic pipeline for generating outgroup-rooted alignments of orthologous sequence blocks from de novo low-coverage short-read data for a small number of genomes, and show how such sequence blocks can be used to fit explicit models of population divergence and admixture in a likelihood framework. To illustrate our approach, we reconstruct the Pleistocene history of an oak-feeding insect (the oak gallwasp Biorhiza pallida), which, in common with many other taxa, was restricted during Pleistocene ice ages to a longitudinal series of southern refugia spanning the Western Palaearctic. Our analysis of sequence blocks sampled from a single genome from each of three major glacial refugia reveals support for an unexpected history dominated by recent admixture. Despite the fact that 80% of the genome is affected by admixture during the last glacial cycle, we are able to infer the deeper divergence history of these populations. These inferences are robust to variation in block length, mutation model and the sampling location of individual genomes within refugia. This combination of de novo assembly and numerical likelihood calculation provides a powerful framework for estimating recent population history that can be applied to any organism without the need for prior genetic resources.},
author = {Hearn, Jack and Stone, Graham and Bunnefeld, Lynsey and Nicholls, James and Barton, Nicholas H and Lohse, Konrad},
journal = {Molecular Ecology},
number = {1},
pages = {198 -- 211},
publisher = {Wiley-Blackwell},
title = {{Likelihood-based inference of population history from low-coverage de novo genome assemblies}},
doi = {10.1111/mec.12578},
volume = {23},
year = {2014},
}
@misc{9753,
abstract = {Background: The brood of ants and other social insects is highly susceptible to pathogens, particularly those that penetrate the soft larval and pupal cuticle. We here test whether the presence of a pupal cocoon, which occurs in some ant species but not in others, affects the sanitary brood care and fungal infection patterns after exposure to the entomopathogenic fungus Metarhizium brunneum. We use a) a comparative approach analysing four species with either naked or cocooned pupae and b) a within-species analysis of a single ant species, in which both pupal types co-exist in the same colony. Results: We found that the presence of a cocoon did not compromise fungal pathogen detection by the ants and that species with cocooned pupae increased brood grooming after pathogen exposure. All tested ant species further removed brood from their nests, which was predominantly expressed towards larvae and naked pupae treated with the live fungal pathogen. In contrast, cocooned pupae exposed to live fungus were not removed at higher rates than cocooned pupae exposed to dead fungus or a sham control. Consistent with this, exposure to the live fungus caused high numbers of infections and fungal outgrowth in larvae and naked pupae, but not in cocooned pupae. Moreover, the ants consistently removed the brood prior to fungal outgrowth, ensuring a clean brood chamber. Conclusion: Our study suggests that the pupal cocoon has a protective effect against fungal infection, causing an adaptive change in sanitary behaviours by the ants. It further demonstrates that brood removal - originally described for honeybees as “hygienic behaviour” – is a widespread sanitary behaviour in ants, which likely has important implications on disease dynamics in social insect colonies.},
author = {Tragust, Simon and Ugelvig, Line V and Chapuisat, Michel and Heinze, Jürgen and Cremer, Sylvia},
publisher = {Dryad},
title = {{Data from: Pupal cocoons affect sanitary brood care and limit fungal infections in ant colonies}},
doi = {10.5061/dryad.nc0gc},
year = {2014},
}
@misc{9752,
abstract = {Redundancies and correlations in the responses of sensory neurons may seem to waste neural resources, but they can also carry cues about structured stimuli and may help the brain to correct for response errors. To investigate the effect of stimulus structure on redundancy in retina, we measured simultaneous responses from populations of retinal ganglion cells presented with natural and artificial stimuli that varied greatly in correlation structure; these stimuli and recordings are publicly available online. Responding to spatio-temporally structured stimuli such as natural movies, pairs of ganglion cells were modestly more correlated than in response to white noise checkerboards, but they were much less correlated than predicted by a non-adapting functional model of retinal response. Meanwhile, responding to stimuli with purely spatial correlations, pairs of ganglion cells showed increased correlations consistent with a static, non-adapting receptive field and nonlinearity. We found that in response to spatio-temporally correlated stimuli, ganglion cells had faster temporal kernels and tended to have stronger surrounds. These properties of individual cells, along with gain changes that opposed changes in effective contrast at the ganglion cell input, largely explained the pattern of pairwise correlations across stimuli where receptive field measurements were possible.},
author = {Simmons, Kristina and Prentice, Jason and Tkačik, Gašper and Homann, Jan and Yee, Heather and Palmer, Stephanie and Nelson, Philip and Balasubramanian, Vijay},
publisher = {Dryad},
title = {{Data from: Transformation of stimulus correlations by the retina}},
doi = {10.5061/dryad.246qg},
year = {2014},
}
@article{9931,
abstract = {Gene duplication is important in evolution, because it provides new raw material for evolutionary adaptations. Several existing hypotheses about the causes of duplicate retention and diversification differ in their emphasis on gene dosage, subfunctionalization, and neofunctionalization. Little experimental data exist on the relative importance of gene expression changes and changes in coding regions for the evolution of duplicate genes. Furthermore, we do not know how strongly the environment could affect this importance. To address these questions, we performed evolution experiments with the TEM-1 beta lactamase gene in Escherichia coli to study the initial stages of duplicate gene evolution in the laboratory. We mimicked tandem duplication by inserting two copies of the TEM-1 gene on the same plasmid. We then subjected these copies to repeated cycles of mutagenesis and selection in various environments that contained antibiotics in different combinations and concentrations. Our experiments showed that gene dosage is the most important factor in the initial stages of duplicate gene evolution, and overshadows the importance of point mutations in the coding region.},
author = {Dhar, Riddhiman and Bergmiller, Tobias and Wagner, Andreas},
issn = {1558-5646},
journal = {Evolution},
number = {6},
pages = {1775--1791},
publisher = {Wiley},
title = {{Increased gene dosage plays a predominant role in the initial stages of evolution of duplicate TEM-1 beta lactamase genes}},
doi = {10.1111/evo.12373},
volume = {68},
year = {2014},
}
@misc{9932,
abstract = {Gene duplication is important in evolution, because it provides new raw material for evolutionary adaptations. Several existing hypotheses about the causes of duplicate retention and diversification differ in their emphasis on gene dosage, sub-functionalization, and neo-functionalization. Little experimental data exists on the relative importance of gene expression changes and changes in coding regions for the evolution of duplicate genes. Furthermore, we do not know how strongly the environment could affect this importance. To address these questions, we performed evolution experiments with the TEM-1 beta lactamase gene in E. coli to study the initial stages of duplicate gene evolution in the laboratory. We mimicked tandem duplication by inserting two copies of the TEM-1 gene on the same plasmid. We then subjected these copies to repeated cycles of mutagenesis and selection in various environments that contained antibiotics in different combinations and concentrations. Our experiments showed that gene dosage is the most important factor in the initial stages of duplicate gene evolution, and overshadows the importance of point mutations in the coding region.},
author = {Dhar, Riddhiman and Bergmiller, Tobias and Wagner, Andreas},
publisher = {Dryad},
title = {{Data from: Increased gene dosage plays a predominant role in the initial stages of evolution of duplicate TEM-1 beta lactamase genes}},
doi = {10.5061/dryad.jc402},
year = {2014},
}
@article{1999,
abstract = {Selection for disease control is believed to have contributed to shape the organisation of insect societies — leading to interaction patterns that mitigate disease transmission risk within colonies, conferring them ‘organisational immunity’. Recent studies combining epidemiological models with social network analysis have identified general properties of interaction networks that may hinder propagation of infection within groups. These can be prophylactic and/or induced upon pathogen exposure. Here we review empirical evidence for these two types of organisational immunity in social insects and describe the individual-level behaviours that underlie it. We highlight areas requiring further investigation, and emphasise the need for tighter links between theory and empirical research and between individual-level and collective-level analyses.},
author = {Stroeymeyt, Nathalie and Casillas Perez, Barbara E and Cremer, Sylvia},
journal = {Current Opinion in Insect Science},
number = {1},
pages = {1 -- 15},
publisher = {Elsevier},
title = {{Organisational immunity in social insects}},
doi = {10.1016/j.cois.2014.09.001},
volume = {5},
year = {2014},
}
@article{10396,
abstract = {Stimfit is a free cross-platform software package for viewing and analyzing electrophysiological data. It supports most standard file types for cellular neurophysiology and other biomedical formats. Its analysis algorithms have been used and validated in several experimental laboratories. Its embedded Python scripting interface makes Stimfit highly extensible and customizable.},
author = {Schlögl, Alois and Jonas, Peter M and Schmidt-Hieber, C. and Guzman, S. J.},
issn = {1862-278X},
journal = {Biomedical Engineering / Biomedizinische Technik},
keywords = {biomedical engineering, data analysis, free software},
location = {Graz, Austria},
number = {SI-1-Track-G},
publisher = {De Gruyter},
title = {{Stimfit: A fast visualization and analysis environment for cellular neurophysiology}},
doi = {10.1515/bmt-2013-4181},
volume = {58},
year = {2013},
}
@article{10895,
abstract = {Due to their sessile lifestyles, plants need to deal with the limitations and stresses imposed by the changing environment. Plants cope with these by a remarkable developmental flexibility, which is embedded in their strategy to survive. Plants can adjust their size, shape and number of organs, bend according to gravity and light, and regenerate tissues that were damaged, utilizing a coordinating, intercellular signal, the plant hormone, auxin. Another versatile signal is the cation, Ca2+, which is a crucial second messenger for many rapid cellular processes during responses to a wide range of endogenous and environmental signals, such as hormones, light, drought stress and others. Auxin is a good candidate for one of these Ca2+-activating signals. However, the role of auxin-induced Ca2+ signaling is poorly understood. Here, we will provide an overview of possible developmental and physiological roles, as well as mechanisms underlying the interconnection of Ca2+ and auxin signaling. },
author = {Vanneste, Steffen and Friml, Jiří},
issn = {2223-7747},
journal = {Plants},
keywords = {Plant Science, Ecology, Ecology, Evolution, Behavior and Systematics},
number = {4},
pages = {650--675},
publisher = {MDPI},
title = {{Calcium: The missing link in auxin action}},
doi = {10.3390/plants2040650},
volume = {2},
year = {2013},
}
@inproceedings{10902,
abstract = {We consider how to edit strings from a source language so that the edited strings belong to a target language, where the languages are given as deterministic finite automata. Non-streaming (or offline) transducers perform edits given the whole source string. We show that the class of deterministic one-pass transducers with registers along with increment and min operation suffices for computing optimal edit distance, whereas the same class of transducers without the min operation is not sufficient. Streaming (or online) transducers perform edits as the letters of the source string are received. We present a polynomial time algorithm for the partial-repair problem that given a bound α asks for the construction of a deterministic streaming transducer (if one exists) that ensures that the ‘maximum fraction’ η of the strings of the source language are edited, within cost α, to the target language.},
author = {Chatterjee, Krishnendu and Chaubal, Siddhesh and Rubin, Sasha},
booktitle = {7th International Conference on Language and Automata Theory and Applications},
isbn = {9783642370632},
issn = {0302-9743},
location = {Bilbao, Spain},
pages = {214--225},
publisher = {Springer Nature},
title = {{How to travel between languages}},
doi = {10.1007/978-3-642-37064-9_20},
volume = {7810},
year = {2013},
}
@inbook{10900,
abstract = {Leukocyte migration through the interstitial space is crucial for the maintenance of tolerance and immunity. The main cues for leukocyte trafficking are chemokines thought to directionally guide these cells towards their targets. However, model systems that facilitate quantification of chemokine-guided leukocyte migration in vivo are uncommon. Here we describe an ex vivo crawl-in assay using explanted mouse ears that allows the visualization of chemokine-dependent dendritic cell (DC) motility in the dermal interstitium in real time. We present methods for the preparation of mouse ear sheets and their use in multidimensional confocal imaging experiments to monitor and analyze the directional migration of fluorescently labelled DCs through the dermis and into afferent lymphatic vessels. The assay provides a more physiological approach to study leukocyte migration than in vitro three-dimensional (3D) or 2-dimensional (2D) migration assays such as collagen gels and transwell assays.},
author = {Weber, Michele and Sixt, Michael K},
booktitle = {Chemokines},
editor = {Cardona, Astrid and Ubogu, Eroboghene},
isbn = {9781627034258},
issn = {1064-3745},
pages = {215--226},
publisher = {Humana Press},
title = {{Live Cell Imaging of Chemotactic Dendritic Cell Migration in Explanted Mouse Ear Preparations}},
doi = {10.1007/978-1-62703-426-5_14},
volume = {1013},
year = {2013},
}
@inproceedings{10897,
abstract = {Taking images is an efficient way to collect data about the physical world. It can be done fast and in exquisite detail. By definition, image processing is the field that concerns itself with the computation aimed at harnessing the information contained in images [10]. This talk is concerned with topological information. Our main thesis is that persistent homology [5] is a useful method to quantify and summarize topological information, building a bridge that connects algebraic topology with applications. We provide supporting evidence for this thesis by touching upon four technical developments in the overlap between persistent homology and image processing.},
author = {Edelsbrunner, Herbert},
booktitle = {Graph-Based Representations in Pattern Recognition},
isbn = {9783642382208},
issn = {0302-9743},
location = {Vienna, Austria},
pages = {182--183},
publisher = {Springer Nature},
title = {{Persistent homology in image processing}},
doi = {10.1007/978-3-642-38221-5_19},
volume = {7877},
year = {2013},
}
@inproceedings{10898,
abstract = {A prominent remedy to multicore scalability issues in concurrent data structure implementations is to relax the sequential specification of the data structure. We present distributed queues (DQ), a new family of relaxed concurrent queue implementations. DQs implement relaxed queues with linearizable emptiness check and either configurable or bounded out-of-order behavior or pool behavior. Our experiments show that DQs outperform and outscale in micro- and macrobenchmarks all strict and relaxed queue as well as pool implementations that we considered.},
author = {Haas, Andreas and Lippautz, Michael and Henzinger, Thomas A and Payer, Hannes and Sokolova, Ana and Kirsch, Christoph M. and Sezgin, Ali},
booktitle = {Proceedings of the ACM International Conference on Computing Frontiers - CF '13},
isbn = {978-145032053-5},
location = {Ischia, Italy},
number = {5},
publisher = {ACM Press},
title = {{Distributed queues in shared memory: Multicore performance and scalability through quantitative relaxation}},
doi = {10.1145/2482767.2482789},
year = {2013},
}
@inbook{10899,
author = {Barton, Nicholas H},
booktitle = {Encyclopedia of Biodiversity},
isbn = {978-0-12-384720-1},
keywords = {Adaptive landscape, Cline, Coalescent process, Gene flow, Hybrid zone, Local adaptation, Natural selection, Neutral theory, Population structure, Speciation},
pages = {508--515},
publisher = {Elsevier},
title = {{Differentiation}},
doi = {10.1016/b978-0-12-384719-5.00031-9},
year = {2013},
}
@inproceedings{1374,
abstract = {We study two-player zero-sum games over infinite-state graphs equipped with ωB and finitary conditions. Our first contribution is about the strategy complexity, i.e the memory required for winning strategies: we prove that over general infinite-state graphs, memoryless strategies are sufficient for finitary Büchi, and finite-memory suffices for finitary parity games. We then study pushdown games with boundedness conditions, with two contributions. First we prove a collapse result for pushdown games with ωB-conditions, implying the decidability of solving these games. Second we consider pushdown games with finitary parity along with stack boundedness conditions, and show that solving these games is EXPTIME-complete.},
author = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
booktitle = {22nd EACSL Annual Conference on Computer Science Logic},
location = {Torino, Italy},
pages = {181 -- 196},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Infinite-state games with finitary conditions}},
doi = {10.4230/LIPIcs.CSL.2013.181},
volume = {23},
year = {2013},
}
@inproceedings{1385,
abstract = {It is often difficult to correctly implement a Boolean controller for a complex system, especially when concurrency is involved. Yet, it may be easy to formally specify a controller. For instance, for a pipelined processor it suffices to state that the visible behavior of the pipelined system should be identical to a non-pipelined reference system (Burch-Dill paradigm). We present a novel procedure to efficiently synthesize multiple Boolean control signals from a specification given as a quantified first-order formula (with a specific quantifier structure). Our approach uses uninterpreted functions to abstract details of the design. We construct an unsatisfiable SMT formula from the given specification. Then, from just one proof of unsatisfiability, we use a variant of Craig interpolation to compute multiple coordinated interpolants that implement the Boolean control signals. Our method avoids iterative learning and back-substitution of the control functions. We applied our approach to synthesize a controller for a simple two-stage pipelined processor, and present first experimental results.},
author = {Hofferek, Georg and Gupta, Ashutosh and Könighofer, Bettina and Jiang, Jie and Bloem, Roderick},
booktitle = {2013 Formal Methods in Computer-Aided Design},
location = {Portland, OR, United States},
pages = {77 -- 84},
publisher = {IEEE},
title = {{Synthesizing multiple boolean functions using interpolation on a single proof}},
doi = {10.1109/FMCAD.2013.6679394},
year = {2013},
}
@inproceedings{1387,
abstract = {Choices made by nondeterministic word automata depend on both the past (the prefix of the word read so far) and the future (the suffix yet to be read). In several applications, most notably synthesis, the future is diverse or unknown, leading to algorithms that are based on deterministic automata. Hoping to retain some of the advantages of nondeterministic automata, researchers have studied restricted classes of nondeterministic automata. Three such classes are nondeterministic automata that are good for trees (GFT; i.e., ones that can be expanded to tree automata accepting the derived tree languages, thus whose choices should satisfy diverse futures), good for games (GFG; i.e., ones whose choices depend only on the past), and determinizable by pruning (DBP; i.e., ones that embody equivalent deterministic automata). The theoretical properties and relative merits of the different classes are still open, having vagueness on whether they really differ from deterministic automata. In particular, while DBP ⊆ GFG ⊆ GFT, it is not known whether every GFT automaton is GFG and whether every GFG automaton is DBP. Also open is the possible succinctness of GFG and GFT automata compared to deterministic automata. We study these problems for ω-regular automata with all common acceptance conditions. We show that GFT=GFG⊃DBP, and describe a determinization construction for GFG automata.},
author = {Boker, Udi and Kuperberg, Denis and Kupferman, Orna and Skrzypczak, Michał},
location = {Riga, Latvia},
number = {PART 2},
pages = {89 -- 100},
publisher = {Springer},
title = {{Nondeterminism in the presence of a diverse or unknown future}},
doi = {10.1007/978-3-642-39212-2_11},
volume = {7966},
year = {2013},
}
@phdthesis{1406,
abstract = {Epithelial spreading is a critical part of various developmental and wound repair processes. Here we use zebrafish epiboly as a model system to study the cellular and molecular mechanisms underlying the spreading of epithelial sheets. During zebrafish epiboly the enveloping cell layer (EVL), a simple squamous epithelium, spreads over the embryo to eventually cover the entire yolk cell by the end of gastrulation. The EVL leading edge is anchored through tight junctions to the yolk syncytial layer (YSL), where directly adjacent to the EVL margin a contractile actomyosin ring is formed that is thought to drive EVL epiboly. The prevalent view in the field was that the contractile ring exerts a pulling force on the EVL margin, which pulls the EVL towards the vegetal pole. However, how this force is generated and how it affects EVL morphology still remains elusive. Moreover, the cellular mechanisms mediating the increase in EVL surface area, while maintaining tissue integrity and function are still unclear. Here we show that the YSL actomyosin ring pulls on the EVL margin by two distinct force-generating mechanisms. One mechanism is based on contraction of the ring around its circumference, as previously proposed. The second mechanism is based on actomyosin retrogade flows, generating force through resistance against the substrate. The latter can function at any epiboly stage even in situations where the contraction-based mechanism is unproductive. Additionally, we demonstrate that during epiboly the EVL is subjected to anisotropic tension, which guides the orientation of EVL cell division along the main axis (animal-vegetal) of tension. The influence of tension in cell division orientation involves cell elongation and requires myosin-2 activity for proper spindle alignment. Strikingly, we reveal that tension-oriented cell divisions release anisotropic tension within the EVL and that in the absence of such divisions, EVL cells undergo ectopic fusions. We conclude that forces applied to the EVL by the action of the YSL actomyosin ring generate a tension anisotropy in the EVL that orients cell divisions, which in turn limit tissue tension increase thereby facilitating tissue spreading.},
author = {Campinho, Pedro},
pages = {123},
publisher = {IST Austria},
title = {{Mechanics of zebrafish epiboly: Tension-oriented cell divisions limit anisotropic tissue tension in epithelial spreading}},
year = {2013},
}
@article{2010,
abstract = {Many algorithms for inferring causality rely heavily on the faithfulness assumption. The main justification for imposing this assumption is that the set of unfaithful distributions has Lebesgue measure zero, since it can be seen as a collection of hypersurfaces in a hypercube. However, due to sampling error the faithfulness condition alone is not sufficient for statistical estimation, and strong-faithfulness has been proposed and assumed to achieve uniform or high-dimensional consistency. In contrast to the plain faithfulness assumption, the set of distributions that is not strong-faithful has nonzero Lebesgue measure and in fact, can be surprisingly large as we show in this paper. We study the strong-faithfulness condition from a geometric and combinatorial point of view and give upper and lower bounds on the Lebesgue measure of strong-faithful distributions for various classes of directed acyclic graphs. Our results imply fundamental limitations for the PC-algorithm and potentially also for other algorithms based on partial correlation testing in the Gaussian case.},
author = {Uhler, Caroline and Raskutti, Garvesh and Bühlmann, Peter and Yu, Bin},
journal = {The Annals of Statistics},
number = {2},
pages = {436 -- 463},
publisher = {Institute of Mathematical Statistics},
title = {{Geometry of the faithfulness assumption in causal inference}},
doi = {10.1214/12-AOS1080},
volume = {41},
year = {2013},
}
@article{2009,
abstract = {Traditional statistical methods for confidentiality protection of statistical databases do not scale well to deal with GWAS databases especially in terms of guarantees regarding protection from linkage to external information. The more recent concept of differential privacy, introduced by the cryptographic community, is an approach which provides a rigorous definition of privacy with meaningful privacy guarantees in the presence of arbitrary external information, although the guarantees may come at a serious price in terms of data utility. Building on such notions, we propose new methods to release aggregate GWAS data without compromising an individual’s privacy. We present methods for releasing differentially private minor allele frequencies, chi-square statistics and p-values. We compare these approaches on simulated data and on a GWAS study of canine hair length involving 685 dogs. We also propose a privacy-preserving method for finding genome-wide associations based on a differentially-private approach to penalized logistic regression.},
author = {Uhler, Caroline and Slavkovic, Aleksandra and Fienberg, Stephen},
journal = {Journal of Privacy and Confidentiality },
number = {1},
pages = {137 -- 166},
publisher = {Carnegie Mellon University},
title = {{Privacy-preserving data sharing for genome-wide association studies}},
doi = {10.29012/jpc.v5i1.629},
volume = {5},
year = {2013},
}
@inproceedings{2181,
abstract = {There is a trade-off between performance and correctness in implementing concurrent data structures. Better performance may be achieved at the expense of relaxing correctness, by redefining the semantics of data structures. We address such a redefinition of data structure semantics and present a systematic and formal framework for obtaining new data structures by quantitatively relaxing existing ones. We view a data structure as a sequential specification S containing all "legal" sequences over an alphabet of method calls. Relaxing the data structure corresponds to defining a distance from any sequence over the alphabet to the sequential specification: the k-relaxed sequential specification contains all sequences over the alphabet within distance k from the original specification. In contrast to other existing work, our relaxations are semantic (distance in terms of data structure states). As an instantiation of our framework, we present two simple yet generic relaxation schemes, called out-of-order and stuttering relaxation, along with several ways of computing distances. We show that the out-of-order relaxation, when further instantiated to stacks, queues, and priority queues, amounts to tolerating bounded out-of-order behavior, which cannot be captured by a purely syntactic relaxation (distance in terms of sequence manipulation, e.g. edit distance). We give concurrent implementations of relaxed data structures and demonstrate that bounded relaxations provide the means for trading correctness for performance in a controlled way. The relaxations are monotonic which further highlights the trade-off: increasing k increases the number of permitted sequences, which as we demonstrate can lead to better performance. Finally, since a relaxed stack or queue also implements a pool, we actually have new concurrent pool implementations that outperform the state-of-the-art ones.},
author = {Henzinger, Thomas A and Kirsch, Christoph and Payer, Hannes and Sezgin, Ali and Sokolova, Ana},
booktitle = {Proceedings of the 40th annual ACM SIGPLAN-SIGACT symposium on Principles of programming language},
isbn = {978-1-4503-1832-7},
location = {Rome, Italy},
pages = {317 -- 328},
publisher = {ACM},
title = {{Quantitative relaxation of concurrent data structures}},
doi = {10.1145/2429069.2429109},
year = {2013},
}
@inproceedings{2182,
abstract = {We propose a general framework for abstraction with respect to quantitative properties, such as worst-case execution time, or power consumption. Our framework provides a systematic way for counter-example guided abstraction refinement for quantitative properties. The salient aspect of the framework is that it allows anytime verification, that is, verification algorithms that can be stopped at any time (for example, due to exhaustion of memory), and report approximations that improve monotonically when the algorithms are given more time. We instantiate the framework with a number of quantitative abstractions and refinement schemes, which differ in terms of how much quantitative information they keep from the original system. We introduce both state-based and trace-based quantitative abstractions, and we describe conditions that define classes of quantitative properties for which the abstractions provide over-approximations. We give algorithms for evaluating the quantitative properties on the abstract systems. We present algorithms for counter-example based refinements for quantitative properties for both state-based and segment-based abstractions. We perform a case study on worst-case execution time of executables to evaluate the anytime verification aspect and the quantitative abstractions we proposed.},
author = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
booktitle = {Proceedings of the 40th annual ACM SIGPLAN-SIGACT symposium on Principles of programming language},
location = {Rome, Italy},
pages = {115 -- 128},
publisher = {ACM},
title = {{Quantitative abstraction refinement}},
doi = {10.1145/2429069.2429085},
year = {2013},
}
@inproceedings{2209,
abstract = {A straight skeleton is a well-known geometric structure, and several algorithms exist to construct the straight skeleton for a given polygon or planar straight-line graph. In this paper, we ask the reverse question: Given the straight skeleton (in form of a planar straight-line graph, with some rays to infinity), can we reconstruct a planar straight-line graph for which this was the straight skeleton? We show how to reduce this problem to the problem of finding a line that intersects a set of convex polygons. We can find these convex polygons and all such lines in $O(nlog n)$ time in the Real RAM computer model, where $n$ denotes the number of edges of the input graph. We also explain how our approach can be used for recognizing Voronoi diagrams of points, thereby completing a partial solution provided by Ash and Bolker in 1985.
},
author = {Biedl, Therese and Held, Martin and Huber, Stefan},
location = {St. Petersburg, Russia},
pages = {37 -- 46},
publisher = {IEEE},
title = {{Recognizing straight skeletons and Voronoi diagrams and reconstructing their input}},
doi = {10.1109/ISVD.2013.11},
year = {2013},
}
@inproceedings{2210,
abstract = {A straight skeleton is a well-known geometric structure, and several algorithms exist to construct the straight skeleton for a given polygon. In this paper, we ask the reverse question: Given the straight skeleton (in form of a tree with a drawing in the plane, but with the exact position of the leaves unspecified), can we reconstruct the polygon? We show that in most cases there exists at most one polygon; in the remaining case there is an infinite number of polygons determined by one angle that can range in an interval. We can find this (set of) polygon(s) in linear time in the Real RAM computer model.},
author = {Biedl, Therese and Held, Martin and Huber, Stefan},
booktitle = {29th European Workshop on Computational Geometry},
location = {Braunschweig, Germany},
pages = {95 -- 98},
publisher = {TU Braunschweig},
title = {{Reconstructing polygons from embedded straight skeletons}},
year = {2013},
}
@inproceedings{2237,
abstract = {We describe new extensions of the Vampire theorem prover for computing tree interpolants. These extensions generalize Craig interpolation in Vampire, and can also be used to derive sequence interpolants. We evaluated our implementation on a large number of examples over the theory of linear integer arithmetic and integer-indexed arrays, with and without quantifiers. When compared to other methods, our experiments show that some examples could only be solved by our implementation.},
author = {Blanc, Régis and Gupta, Ashutosh and Kovács, Laura and Kragl, Bernhard},
location = {Stellenbosch, South Africa},
pages = {173 -- 181},
publisher = {Springer},
title = {{Tree interpolation in Vampire}},
doi = {10.1007/978-3-642-45221-5_13},
volume = {8312},
year = {2013},
}
@inproceedings{2238,
abstract = {We study the problem of achieving a given value in Markov decision processes (MDPs) with several independent discounted reward objectives. We consider a generalised version of discounted reward objectives, in which the amount of discounting depends on the states visited and on the objective. This definition extends the usual definition of discounted reward, and allows to capture the systems in which the value of different commodities diminish at different and variable rates.
We establish results for two prominent subclasses of the problem, namely state-discount models where the discount factors are only dependent on the state of the MDP (and independent of the objective), and reward-discount models where they are only dependent on the objective (but not on the state of the MDP). For the state-discount models we use a straightforward reduction to expected total reward and show that the problem whether a value is achievable can be solved in polynomial time. For the reward-discount model we show that memory and randomisation of the strategies are required, but nevertheless that the problem is decidable and it is sufficient to consider strategies which after a certain number of steps behave in a memoryless way.
For the general case, we show that when restricted to graphs (i.e. MDPs with no randomisation), pure strategies and discount factors of the form 1/n where n is an integer, the problem is in PSPACE and finite memory suffices for achieving a given value. We also show that when the discount factors are not of the form 1/n, the memory required by a strategy can be infinite.
},
author = {Chatterjee, Krishnendu and Forejt, Vojtěch and Wojtczak, Dominik},
location = {Stellenbosch, South Africa},
pages = {228 -- 242},
publisher = {Springer},
title = {{Multi-objective discounted reward verification in graphs and MDPs}},
doi = {10.1007/978-3-642-45221-5_17},
volume = {8312},
year = {2013},
}
@inproceedings{2243,
abstract = {We show that modal logic over universally first-order definable classes of transitive frames is decidable. More precisely, let K be an arbitrary class of transitive Kripke frames definable by a universal first-order sentence. We show that the global and finite global satisfiability problems of modal logic over K are decidable in NP, regardless of choice of K. We also show that the local satisfiability and the finite local satisfiability problems of modal logic over K are decidable in NEXPTIME.},
author = {Michaliszyn, Jakub and Otop, Jan},
location = {Torino, Italy},
pages = {563 -- 577},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Elementary modal logics over transitive structures}},
doi = {10.4230/LIPIcs.CSL.2013.563},
volume = {23},
year = {2013},
}
@inproceedings{2244,
abstract = {We consider two systems (α1,...,αm) and (β1,...,βn) of curves drawn on a compact two-dimensional surface ℳ with boundary. Each αi and each βj is either an arc meeting the boundary of ℳ at its two endpoints, or a closed curve. The αi are pairwise disjoint except for possibly sharing endpoints, and similarly for the βj. We want to "untangle" the βj from the αi by a self-homeomorphism of ℳ; more precisely, we seek an homeomorphism φ: ℳ → ℳ fixing the boundary of ℳ pointwise such that the total number of crossings of the αi with the φ(βj) is as small as possible. This problem is motivated by an application in the algorithmic theory of embeddings and 3-manifolds. We prove that if ℳ is planar, i.e., a sphere with h ≥ 0 boundary components ("holes"), then O(mn) crossings can be achieved (independently of h), which is asymptotically tight, as an easy lower bound shows. In general, for an arbitrary (orientable or nonorientable) surface ℳ with h holes and of (orientable or nonorientable) genus g ≥ 0, we obtain an O((m + n)4) upper bound, again independent of h and g. },
author = {Matoušek, Jiří and Sedgwick, Eric and Tancer, Martin and Wagner, Uli},
location = {Bordeaux, France},
pages = {472 -- 483},
publisher = {Springer},
title = {{Untangling two systems of noncrossing curves}},
doi = {10.1007/978-3-319-03841-4_41},
volume = {8242},
year = {2013},
}
@inproceedings{2259,
abstract = {The learning with rounding (LWR) problem, introduced by Banerjee, Peikert and Rosen at EUROCRYPT ’12, is a variant of learning with errors (LWE), where one replaces random errors with deterministic rounding. The LWR problem was shown to be as hard as LWE for a setting of parameters where the modulus and modulus-to-error ratio are super-polynomial. In this work we resolve the main open problem and give a new reduction that works for a larger range of parameters, allowing for a polynomial modulus and modulus-to-error ratio. In particular, a smaller modulus gives us greater efficiency, and a smaller modulus-to-error ratio gives us greater security, which now follows from the worst-case hardness of GapSVP with polynomial (rather than super-polynomial) approximation factors.
As a tool in the reduction, we show that there is a “lossy mode” for the LWR problem, in which LWR samples only reveal partial information about the secret. This property gives us several interesting new applications, including a proof that LWR remains secure with weakly random secrets of sufficient min-entropy, and very simple constructions of deterministic encryption, lossy trapdoor functions and reusable extractors.
Our approach is inspired by a technique of Goldwasser et al. from ICS ’10, which implicitly showed the existence of a “lossy mode” for LWE. By refining this technique, we also improve on the parameters of that work to only requiring a polynomial (instead of super-polynomial) modulus and modulus-to-error ratio.
},
author = {Alwen, Joel F and Krenn, Stephan and Pietrzak, Krzysztof Z and Wichs, Daniel},
location = {Santa Barbara, CA, United States},
number = {1},
pages = {57 -- 74},
publisher = {Springer},
title = {{Learning with rounding, revisited: New reduction properties and applications}},
doi = {10.1007/978-3-642-40041-4_4},
volume = {8042},
year = {2013},
}
@inproceedings{2258,
abstract = {In a digital signature scheme with message recovery, rather than transmitting the message m and its signature σ, a single enhanced signature τ is transmitted. The verifier is able to recover m from τ and at the same time verify its authenticity. The two most important parameters of such a scheme are its security and overhead |τ| − |m|. A simple argument shows that for any scheme with “n bits security” |τ| − |m| ≥ n, i.e., the overhead is lower bounded by the security parameter n. Currently, the best known constructions in the random oracle model are far from this lower bound requiring an overhead of n + logq h , where q h is the number of queries to the random oracle. In this paper we give a construction which basically matches the n bit lower bound. We propose a simple digital signature scheme with n + o(logq h ) bits overhead, where q h denotes the number of random oracle queries.
Our construction works in two steps. First, we propose a signature scheme with message recovery having optimal overhead in a new ideal model, the random invertible function model. Second, we show that a four-round Feistel network with random oracles as round functions is tightly “public-indifferentiable” from a random invertible function. At the core of our indifferentiability proof is an almost tight upper bound for the expected number of edges of the densest “small” subgraph of a random Cayley graph, which may be of independent interest.
},
author = {Kiltz, Eike and Pietrzak, Krzysztof Z and Szegedy, Mario},
location = {Santa Barbara, CA, United States},
pages = {571 -- 588},
publisher = {Springer},
title = {{Digital signatures with minimal overhead from indifferentiable random invertible functions}},
doi = {10.1007/978-3-642-40041-4_31},
volume = {8042},
year = {2013},
}
@article{2256,
abstract = {Linked (Open) Data - bibliographic data on the Semantic Web. Report of the Working Group on Linked Data to the plenary assembly of the Austrian Library Network (translation of the title). Linked Data stands for a certain approach to publishing data on the Web. The underlying idea is to harmonise heterogeneous data sources of different origin in order to improve their accessibility and interoperability, effectively making them queryable as a big distributed database. This report summarises relevant developments in Europe as well as the Linked Data Working Group‘s strategic and technical considerations regarding the publishing of the Austrian Library Network’s (OBV’s) bibliographic datasets. It concludes with the mutual agreement that the implementation of Linked Data principles within the OBV can only be taken into consideration accompanied by a discussion about the provision of the datasets under a free license.},
author = {Danowski, Patrick and Goldfarb, Doron and Schaffner, Verena and Seidler, Wolfram},
journal = {VÖB Mitteilungen},
number = {3/4},
pages = {559 -- 587},
publisher = {Verein Österreichischer Bibliothekarinnen und Bibliothekare},
title = {{Linked (Open) Data - Bibliographische Daten im Semantic Web}},
volume = {66},
year = {2013},
}
@inproceedings{2260,
abstract = {Direct Anonymous Attestation (DAA) is one of the most complex cryptographic protocols deployed in practice. It allows an embedded secure processor known as a Trusted Platform Module (TPM) to attest to the configuration of its host computer without violating the owner’s privacy. DAA has been standardized by the Trusted Computing Group and ISO/IEC.
The security of the DAA standard and all existing schemes is analyzed in the random-oracle model. We provide the first constructions of DAA in the standard model, that is, without relying on random oracles. Our constructions use new building blocks, including the first efficient signatures of knowledge in the standard model, which have many applications beyond DAA.
},
author = {Bernhard, David and Fuchsbauer, Georg and Ghadafi, Essam},
location = {Banff, AB, Canada},
pages = {518 -- 533},
publisher = {Springer},
title = {{Efficient signatures of knowledge and DAA in the standard model}},
doi = {10.1007/978-3-642-38980-1_33},
volume = {7954},
year = {2013},
}
@article{2264,
abstract = {Faithful progression through the cell cycle is crucial to the maintenance and developmental potential of stem cells. Here, we demonstrate that neural stem cells (NSCs) and intermediate neural progenitor cells (NPCs) employ a zinc-finger transcription factor specificity protein 2 (Sp2) as a cell cycle regulator in two temporally and spatially distinct progenitor domains. Differential conditional deletion of Sp2 in early embryonic cerebral cortical progenitors, and perinatal olfactory bulb progenitors disrupted transitions through G1, G2 and M phases, whereas DNA synthesis appeared intact. Cell-autonomous function of Sp2 was identified by deletion of Sp2 using mosaic analysis with double markers, which clearly established that conditional Sp2-null NSCs and NPCs are M phase arrested in vivo. Importantly, conditional deletion of Sp2 led to a decline in the generation of NPCs and neurons in the developing and postnatal brains. Our findings implicate Sp2-dependent mechanisms as novel regulators of cell cycle progression, the absence of which disrupts neurogenesis in the embryonic and postnatal brain.},
author = {Liang, Huixuan and Xiao, Guanxi and Yin, Haifeng and Hippenmeyer, Simon and Horowitz, Jonathan and Ghashghaei, Troy},
journal = {Development},
number = {3},
pages = {552 -- 561},
publisher = {Company of Biologists},
title = {{Neural development is dependent on the function of specificity protein 2 in cell cycle progression}},
doi = {10.1242/dev.085621},
volume = {140},
year = {2013},
}
@inproceedings{2272,
abstract = {We consider Conditional Random Fields (CRFs) with pattern-based potentials defined on a chain. In this model the energy of a string (labeling) x1...xn is the sum of terms over intervals [i,j] where each term is non-zero only if the substring xi...xj equals a prespecified pattern α. Such CRFs can be naturally applied to many sequence tagging problems.
We present efficient algorithms for the three standard inference tasks in a CRF, namely computing (i) the partition function, (ii) marginals, and (iii) computing the MAP. Their complexities are respectively O(nL), O(nLℓmax) and O(nLmin{|D|,log(ℓmax+1)}) where L is the combined length of input patterns, ℓmax is the maximum length of a pattern, and D is the input alphabet. This improves on the previous algorithms of (Ye et al., 2009) whose complexities are respectively O(nL|D|), O(n|Γ|L2ℓ2max) and O(nL|D|), where |Γ| is the number of input patterns.
In addition, we give an efficient algorithm for sampling. Finally, we consider the case of non-positive weights. (Komodakis & Paragios, 2009) gave an O(nL) algorithm for computing the MAP. We present a modification that has the same worst-case complexity but can beat it in the best case. },
author = {Takhanov, Rustem and Kolmogorov, Vladimir},
booktitle = {ICML'13 Proceedings of the 30th International Conference on International},
location = {Atlanta, GA, USA},
number = {3},
pages = {145 -- 153},
publisher = {International Machine Learning Society},
title = {{Inference algorithms for pattern-based CRFs on sequence data}},
volume = {28},
year = {2013},
}
@inproceedings{2270,
abstract = {Representation languages for coalitional games are a key research area in algorithmic game theory. There is an inher-
ent tradeoff between how general a language is, allowing it to capture more elaborate games, and how hard it is computationally to optimize and solve such games. One prominent such language is the simple yet expressive
Weighted Graph Games (WGGs) representation (Deng and Papadimitriou 1994), which maintains knowledge about synergies between agents in the form of an edge weighted graph. We consider the problem of finding the optimal coalition structure in WGGs. The agents in such games are vertices in a graph, and the value of a coalition is the sum of the weights of the edges present between coalition members. The optimal coalition structure is a partition of the agents to coalitions, that maximizes the sum of utilities obtained by the coalitions. We show that finding the optimal coalition structure is not only hard for general graphs, but is also intractable for restricted families such as planar graphs which are amenable for many other combinatorial problems. We then provide algorithms with constant factor approximations for planar, minorfree and bounded degree graphs.},
author = {Bachrach, Yoram and Kohli, Pushmeet and Kolmogorov, Vladimir and Zadimoghaddam, Morteza},
location = {Bellevue, WA, United States},
pages = {81--87},
publisher = {AAAI Press},
title = {{Optimal Coalition Structures in Cooperative Graph Games}},
year = {2013},
}
@techreport{2274,
abstract = {Proofs of work (PoW) have been suggested by Dwork and Naor (Crypto'92) as protection to a shared resource. The basic idea is to ask the service requestor to dedicate some non-trivial amount of computational work to every request. The original applications included prevention of spam and protection against denial of service attacks. More recently, PoWs have been used to prevent double spending in the Bitcoin digital currency system.
In this work, we put forward an alternative concept for PoWs -- so-called proofs of space (PoS), where a service requestor must dedicate a significant amount of disk space as opposed to computation. We construct secure PoS schemes in the random oracle model, using graphs with high "pebbling complexity" and Merkle hash-trees. },
author = {Dziembowski, Stefan and Faust, Sebastian and Kolmogorov, Vladimir and Pietrzak, Krzysztof Z},
publisher = {IST Austria},
title = {{Proofs of Space}},
year = {2013},
}
@techreport{2273,
abstract = {We propose a new family of message passing techniques for MAP estimation in graphical models which we call Sequential Reweighted Message Passing (SRMP). Special cases include well-known techniques such as Min-Sum Diusion (MSD) and a faster Sequential Tree-Reweighted Message Passing (TRW-S). Importantly, our derivation is simpler than the original derivation of TRW-S, and does not involve a decomposition into trees. This allows easy generalizations. We present such a generalization for the case of higher-order graphical models, and test it on several real-world problems with promising results.},
author = {Vladimir Kolmogorov},
publisher = {IST Austria},
title = {{Reweighted message passing revisited}},
year = {2013},
}
@article{2278,
abstract = {It is firmly established that interactions between neurons and glia are fundamental across species for the correct establishment of a functional brain. Here, we found that the glia of the Drosophila larval brain display an essential non-autonomous role during the development of the optic lobe. The optic lobe develops from neuroepithelial cells that proliferate by dividing symmetrically until they switch to asymmetric/differentiative divisions that generate neuroblasts. The proneural gene lethal of scute (l9sc) is transiently activated by the epidermal growth factor receptor (EGFR)-Ras signal transduction pathway at the leading edge of a proneural wave that sweeps from medial to lateral neuroepithelium, promoting this switch. This process is tightly regulated by the tissue-autonomous function within the neuroepithelium of multiple signaling pathways, including EGFR-Ras and Notch. This study shows that the Notch ligand Serrate (Ser) is expressed in the glia and it forms a complex in vivo with Notch and Canoe, which colocalize at the adherens junctions of neuroepithelial cells. This complex is crucial for interactions between glia and neuroepithelial cells during optic lobe development. Ser is tissue-autonomously required in the glia where it activates Notch to regulate its proliferation, and non-autonomously in the neuroepithelium where Ser induces Notch signaling to avoid the premature activation of the EGFR-Ras pathway and hence of L9sc. Interestingly, different Notch activity reporters showed very different expression patterns in the glia and in the neuroepithelium, suggesting the existence of tissue-specific factors that promote the expression of particular Notch target genes or/and a reporter response dependent on different thresholds of Notch signaling.},
author = {Pérez Gómez, Raquel and Slovakova, Jana and Rives Quinto, Noemí and Krejčí, Alena and Carmena, Ana},
journal = {Journal of Cell Science},
number = {21},
pages = {4873 -- 4884},
publisher = {Company of Biologists},
title = {{A serrate-notch-canoe complex mediates essential interactions between glia and neuroepithelial cells during Drosophila optic lobe development}},
doi = {10.1242/jcs.125617},
volume = {126},
year = {2013},
}
@inproceedings{2276,
abstract = {The problem of minimizing the Potts energy function frequently occurs in computer vision applications. One way to tackle this NP-hard problem was proposed by Kovtun [19, 20]. It identifies a part of an optimal solution by running k maxflow computations, where k is the number of labels. The number of “labeled” pixels can be significant in some applications, e.g. 50-93% in our tests for stereo. We show how to reduce the runtime to O (log k) maxflow computations (or one parametric maxflow computation). Furthermore, the output of our algorithm allows to speed-up the subsequent alpha expansion for the unlabeled part, or can be used as it is for time-critical applications. To derive our technique, we generalize the algorithm of Felzenszwalb et al. [7] for Tree Metrics . We also show a connection to k-submodular functions from combinatorial optimization, and discuss k-submodular relaxations for general energy functions.},
author = {Gridchyn, Igor and Kolmogorov, Vladimir},
location = {Sydney, Australia},
pages = {2320 -- 2327},
publisher = {IEEE},
title = {{Potts model, parametric maxflow and k-submodular functions}},
doi = {10.1109/ICCV.2013.288},
year = {2013},
}
@article{2280,
abstract = {The problem of packing ellipsoids of different sizes and shapes into an ellipsoidal container so as to minimize a measure of overlap between ellipsoids is considered. A bilevel optimization formulation is given, together with an algorithm for the general case and a simpler algorithm for the special case in which all ellipsoids are in fact spheres. Convergence results are proved and computational experience is described and illustrated. The motivating application-chromosome organization in the human cell nucleus-is discussed briefly, and some illustrative results are presented.},
author = {Uhler, Caroline and Wright, Stephen},
journal = {SIAM Review},
number = {4},
pages = {671 -- 706},
publisher = {Society for Industrial and Applied Mathematics },
title = {{Packing ellipsoids with overlap}},
doi = {10.1137/120872309},
volume = {55},
year = {2013},
}
@article{2287,
abstract = {Negative frequency-dependent selection should result in equal sex ratios in large populations of dioecious flowering plants, but deviations from equality are commonly reported. A variety of ecological and genetic factors can explain biased sex ratios, although the mechanisms involved are not well understood. Most dioecious species are long-lived and/or clonal complicating efforts to identify stages during the life cycle when biases develop. We investigated the demographic correlates of sex-ratio variation in two chromosome races of Rumex hastatulus, an annual, wind-pollinated colonizer of open habitats from the southern USA. We examined sex ratios in 46 populations and evaluated the hypothesis that the proximity of males in the local mating environment, through its influence on gametophytic selection, is the primary cause of female-biased sex ratios. Female-biased sex ratios characterized most populations of R. hastatulus (mean sex ratio = 0.62), with significant female bias in 89% of populations. Large, high-density populations had the highest proportion of females, whereas smaller, low-density populations had sex ratios closer to equality. Progeny sex ratios were more female biased when males were in closer proximity to females, a result consistent with the gametophytic selection hypothesis. Our results suggest that interactions between demographic and genetic factors are probably the main cause of female-biased sex ratios in R. hastatulus. The annual life cycle of this species may limit the scope for selection against males and may account for the weaker degree of bias in comparison with perennial Rumex species.},
author = {Pickup, Melinda and Barrett, Spencer},
journal = {Ecology and Evolution},
number = {3},
pages = {629 -- 639},
publisher = {Wiley-Blackwell},
title = {{The influence of demography and local mating environment on sex ratios in a wind-pollinated dioecious plant}},
doi = {10.1002/ece3.465},
volume = {3},
year = {2013},
}
@article{2282,
abstract = {Epithelial spreading is a common and fundamental aspect of various developmental and disease-related processes such as epithelial closure and wound healing. A key challenge for epithelial tissues undergoing spreading is to increase their surface area without disrupting epithelial integrity. Here we show that orienting cell divisions by tension constitutes an efficient mechanism by which the enveloping cell layer (EVL) releases anisotropic tension while undergoing spreading during zebrafish epiboly. The control of EVL cell-division orientation by tension involves cell elongation and requires myosin II activity to align the mitotic spindle with the main tension axis. We also found that in the absence of tension-oriented cell divisions and in the presence of increased tissue tension, EVL cells undergo ectopic fusions, suggesting that the reduction of tension anisotropy by oriented cell divisions is required to prevent EVL cells from fusing. We conclude that cell-division orientation by tension constitutes a key mechanism for limiting tension anisotropy and thus promoting tissue spreading during EVL epiboly.},
author = {Campinho, Pedro and Behrndt, Martin and Ranft, Jonas and Risler, Thomas and Minc, Nicolas and Heisenberg, Carl-Philipp J},
journal = {Nature Cell Biology},
pages = {1405 -- 1414},
publisher = {Nature Publishing Group},
title = {{Tension-oriented cell divisions limit anisotropic tissue tension in epithelial spreading during zebrafish epiboly}},
doi = {10.1038/ncb2869},
volume = {15},
year = {2013},
}
@article{2283,
abstract = {Pathogens exert a strong selection pressure on organisms to evolve effective immune defences. In addition to individual immunity, social organisms can act cooperatively to produce collective defences. In many ant species, queens have the option to found a colony alone or in groups with other, often unrelated, conspecifics. These associations are transient, usually lasting only as long as each queen benefits from the presence of others. In fact, once the first workers emerge, queens fight to the death for dominance. One potential advantage of co-founding may be that queens benefit from collective disease defences, such as mutual grooming, that act against common soil pathogens. We test this hypothesis by exposing single and co-founding queens to a fungal parasite, in order to assess whether queens in co-founding associations have improved survival. Surprisingly, co-foundresses exposed to the entomopathogenic fungus Metarhizium did not engage in cooperative disease defences, and consequently, we find no direct benefit of multiple queens on survival. However, an indirect benefit was observed, with parasite-exposed queens producing more brood when they co-founded, than when they were alone. We suggest this is due to a trade-off between reproduction and immunity. Additionally, we report an extraordinary ability of the queens to tolerate an infection for long periods after parasite exposure. Our study suggests that there are no social immunity benefits for co-founding ant queens, but that in parasite-rich environments, the presence of additional queens may nevertheless improve the chances of colony founding success.},
author = {Pull, Christopher and Hughes, William and Brown, Markus},
journal = {Naturwissenschaften},
number = {12},
pages = {1125 -- 1136},
publisher = {Springer},
title = {{Tolerating an infection: an indirect benefit of co-founding queen associations in the ant Lasius niger }},
doi = {10.1007/s00114-013-1115-5},
volume = {100},
year = {2013},
}
@article{2286,
abstract = {The spatiotemporal control of cell divisions is a key factor in epithelial morphogenesis and patterning. Mao et al (2013) now describe how differential rates of proliferation within the Drosophila wing disc epithelium give rise to anisotropic tissue tension in peripheral/proximal regions of the disc. Such global tissue tension anisotropy in turn determines the orientation of cell divisions by controlling epithelial cell elongation.},
author = {Campinho, Pedro and Heisenberg, Carl-Philipp J},
journal = {EMBO Journal},
number = {21},
pages = {2783 -- 2784},
publisher = {Wiley-Blackwell},
title = {{The force and effect of cell proliferation}},
doi = {10.1038/emboj.2013.225},
volume = {32},
year = {2013},
}
@article{2289,
abstract = {Formal verification aims to improve the quality of software by detecting errors before they do harm. At the basis of formal verification is the logical notion of correctness, which purports to capture whether or not a program behaves as desired. We suggest that the boolean partition of software into correct and incorrect programs falls short of the practical need to assess the behavior of software in a more nuanced fashion against multiple criteria. We therefore propose to introduce quantitative fitness measures for programs, specifically for measuring the function, performance, and robustness of reactive programs such as concurrent processes. This article describes the goals of the ERC Advanced Investigator Project QUAREM. The project aims to build and evaluate a theory of quantitative fitness measures for reactive models. Such a theory must strive to obtain quantitative generalizations of the paradigms that have been success stories in qualitative reactive modeling, such as compositionality, property-preserving abstraction and abstraction refinement, model checking, and synthesis. The theory will be evaluated not only in the context of software and hardware engineering, but also in the context of systems biology. In particular, we will use the quantitative reactive models and fitness measures developed in this project for testing hypotheses about the mechanisms behind data from biological experiments.},
author = {Henzinger, Thomas A},
journal = {Computer Science Research and Development},
number = {4},
pages = {331 -- 344},
publisher = {Springer},
title = {{Quantitative reactive modeling and verification}},
doi = {10.1007/s00450-013-0251-7},
volume = {28},
year = {2013},
}
@article{2290,
abstract = {The plant hormone indole-acetic acid (auxin) is essential for many aspects of plant development. Auxin-mediated growth regulation typically involves the establishment of an auxin concentration gradient mediated by polarly localized auxin transporters. The localization of auxin carriers and their amount at the plasma membrane are controlled by membrane trafficking processes such as secretion, endocytosis, and recycling. In contrast to endocytosis or recycling, how the secretory pathway mediates the localization of auxin carriers is not well understood. In this study we have used the differential cell elongation process during apical hook development to elucidate the mechanisms underlying the post-Golgi trafficking of auxin carriers in Arabidopsis. We show that differential cell elongation during apical hook development is defective in Arabidopsis mutant echidna (ech). ECH protein is required for the trans-Golgi network (TGN)-mediated trafficking of the auxin influx carrier AUX1 to the plasma membrane. In contrast, ech mutation only marginally perturbs the trafficking of the highly related auxin influx carrier LIKE-AUX1-3 or the auxin efflux carrier PIN-FORMED-3, both also involved in hook development. Electron tomography reveals that the trafficking defects in ech mutant are associated with the perturbation of secretory vesicle genesis from the TGN. Our results identify differential mechanisms for the post-Golgi trafficking of de novo-synthesized auxin carriers to plasma membrane from the TGN and reveal how trafficking of auxin influx carriers mediates the control of differential cell elongation in apical hook development.},
author = {Boutté, Yohann and Jonsson, Kristoffer and Mcfarlane, Heather and Johnson, Errin and Gendre, Delphine and Swarup, Ranjan and Friml, Jirí and Samuels, Lacey and Robert, Stéphanie and Bhalerao, Rishikesh},
journal = {PNAS},
number = {40},
pages = {16259 -- 16264},
publisher = {National Academy of Sciences},
title = {{ECHIDNA mediated post Golgi trafficking of auxin carriers for differential cell elongation}},
doi = {10.1073/pnas.1309057110},
volume = {110},
year = {2013},
}
@inproceedings{2294,
abstract = {In this work we propose a system for automatic classification of Drosophila embryos into developmental stages.
While the system is designed to solve an actual problem in biological research, we believe that the principle underly-
ing it is interesting not only for biologists, but also for researchers in computer vision. The main idea is to combine two orthogonal sources of information: one is a classifier trained on strongly invariant features, which makes it applicable to images of very different conditions, but also leads to rather noisy predictions. The other is a label propagation step based on a more powerful similarity measure that however is only consistent within specific subsets of the data at a time.
In our biological setup, the information sources are the shape and the staining patterns of embryo images. We show
experimentally that while neither of the methods can be used by itself to achieve satisfactory results, their combina-
tion achieves prediction quality comparable to human performance.},
author = {Kazmar, Tomas and Kvon, Evgeny and Stark, Alexander and Lampert, Christoph},
location = {Sydney, Australia},
publisher = {IEEE},
title = {{Drosophila Embryo Stage Annotation using Label Propagation}},
doi = {10.1109/ICCV.2013.139},
year = {2013},
}
@proceedings{2292,
abstract = {This book constitutes the thoroughly refereed conference proceedings of the 38th International Symposium on Mathematical Foundations of Computer Science, MFCS 2013, held in Klosterneuburg, Austria, in August 2013. The 67 revised full papers presented together with six invited talks were carefully selected from 191 submissions. Topics covered include algorithmic game theory, algorithmic learning theory, algorithms and data structures, automata, formal languages, bioinformatics, complexity, computational geometry, computer-assisted reasoning, concurrency theory, databases and knowledge-based systems, foundations of computing, logic in computer science, models of computation, semantics and verification of programs, and theoretical issues in artificial intelligence.},
editor = {Chatterjee, Krishnendu and Sgall, Jiri},
isbn = {978-3-642-40312-5},
location = {Klosterneuburg, Austria},
pages = {VI -- 854},
publisher = {Springer},
title = {{Mathematical Foundations of Computer Science 2013}},
doi = {10.1007/978-3-642-40313-2},
volume = {8087},
year = {2013},
}
@inproceedings{2293,
abstract = {Many computer vision problems have an asymmetric distribution of information between training and test time. In this work, we study the case where we are given additional information about the training data, which however will not be available at test time. This situation is called learning using privileged information (LUPI). We introduce two maximum-margin techniques that are able to make use of this additional source of information, and we show that the framework is applicable to several scenarios that have been studied in computer vision before. Experiments with attributes, bounding boxes, image tags and rationales as additional information in object classification show promising results.},
author = {Sharmanska, Viktoriia and Quadrianto, Novi and Lampert, Christoph},
location = {Sydney, Australia},
pages = {825 -- 832},
publisher = {IEEE},
title = {{Learning to rank using privileged information}},
doi = {10.1109/ICCV.2013.107},
year = {2013},
}
@inproceedings{2291,
abstract = {Cryptographic access control promises to offer easily distributed trust and broader applicability, while reducing reliance on low-level online monitors. Traditional implementations of cryptographic access control rely on simple cryptographic primitives whereas recent endeavors employ primitives with richer functionality and security guarantees. Worryingly, few of the existing cryptographic access-control schemes come with precise guarantees, the gap between the policy specification and the implementation being analyzed only informally, if at all. In this paper we begin addressing this shortcoming. Unlike prior work that targeted ad-hoc policy specification, we look at the well-established Role-Based Access Control (RBAC) model, as used in a typical file system. In short, we provide a precise syntax for a computational version of RBAC, offer rigorous definitions for cryptographic policy enforcement of a large class of RBAC security policies, and demonstrate that an implementation based on attribute-based encryption meets our security notions. We view our main contribution as being at the conceptual level. Although we work with RBAC for concreteness, our general methodology could guide future research for uses of cryptography in other access-control models.
},
author = {Ferrara, Anna and Fuchsbauer, Georg and Warinschi, Bogdan},
location = {New Orleans, LA, United States},
pages = {115 -- 129},
publisher = {IEEE},
title = {{Cryptographically enforced RBAC}},
doi = {10.1109/CSF.2013.15},
year = {2013},
}