@inproceedings{3365,
  abstract     = {We present the tool Quasy, a quantitative synthesis tool. Quasy takes qualitative and quantitative specifications and automatically constructs a system that satisfies the qualitative specification and optimizes the quantitative specification, if such a system exists. The user can choose between a system that satisfies and optimizes the specifications (a) under all possible environment behaviors or (b) under the most-likely environment behaviors given as a probability distribution on the possible input sequences. Quasy solves these two quantitative synthesis problems by reduction to instances of 2-player games and Markov Decision Processes (MDPs) with quantitative winning objectives. Quasy can also be seen as a game solver for quantitative games. Most notable, it can solve lexicographic mean-payoff games with 2 players, MDPs with mean-payoff objectives, and ergodic MDPs with mean-payoff parity objectives.},
  author       = {Chatterjee, Krishnendu and Henzinger, Thomas A and Jobstmann, Barbara and Singh, Rohit},
  location     = {Saarbrucken, Germany},
  pages        = {267 -- 271},
  publisher    = {Springer},
  title        = {{QUASY: quantitative synthesis tool}},
  doi          = {10.1007/978-3-642-19835-9_24},
  volume       = {6605},
  year         = {2011},
}

@inproceedings{3366,
  abstract     = {We present an algorithmic method for the quantitative, performance-aware synthesis of concurrent programs. The input consists of a nondeterministic partial program and of a parametric performance model. The nondeterminism allows the programmer to omit which (if any) synchronization construct is used at a particular program location. The performance model, specified as a weighted automaton, can capture system architectures by assigning different costs to actions such as locking, context switching, and memory and cache accesses. The quantitative synthesis problem is to automatically resolve the nondeterminism of the partial program so that both correctness is guaranteed and performance is optimal. As is standard for shared memory concurrency, correctness is formalized &quot;specification free&quot;, in particular as race freedom or deadlock freedom. For worst-case (average-case) performance, we show that the problem can be reduced to 2-player graph games (with probabilistic transitions) with quantitative objectives. While we show, using game-theoretic methods, that the synthesis problem is Nexp-complete, we present an algorithmic method and an implementation that works efficiently for concurrent programs and performance models of practical interest. We have implemented a prototype tool and used it to synthesize finite-state concurrent programs that exhibit different programming patterns, for several performance models representing different architectures. },
  author       = {Cerny, Pavol and Chatterjee, Krishnendu and Henzinger, Thomas A and Radhakrishna, Arjun and Singh, Rohit},
  editor       = {Gopalakrishnan, Ganesh and Qadeer, Shaz},
  location     = {Snowbird, USA},
  pages        = {243 -- 259},
  publisher    = {Springer},
  title        = {{Quantitative synthesis for concurrent programs}},
  doi          = {10.1007/978-3-642-22110-1_20},
  volume       = {6806},
  year         = {2011},
}

@article{3381,
  abstract     = {In this survey, we compare several languages for specifying Markovian population models such as queuing networks and chemical reaction networks. All these languages — matrix descriptions, stochastic Petri nets, stoichiometric equations, stochastic process algebras, and guarded command models — describe continuous-time Markov chains, but they differ according to important properties, such as compositionality, expressiveness and succinctness, executability, and ease of use. Moreover, they provide different support for checking the well-formedness of a model and for analyzing a model.},
  author       = {Henzinger, Thomas A and Jobstmann, Barbara and Wolf, Verena},
  journal      = {IJFCS: International Journal of Foundations of Computer Science},
  number       = {4},
  pages        = {823 -- 841},
  publisher    = {World Scientific Publishing},
  title        = {{Formalisms for specifying Markovian population models}},
  doi          = {10.1142/S0129054111008441},
  volume       = {22},
  year         = {2011},
}

@article{531,
  abstract     = {Software transactional memories (STM) are described in the literature with assumptions of sequentially consistent program execution and atomicity of high level operations like read, write, and abort. However, in a realistic setting, processors use relaxed memory models to optimize hardware performance. Moreover, the atomicity of operations depends on the underlying hardware. This paper presents the first approach to verify STMs under relaxed memory models with atomicity of 32 bit loads and stores, and read-modify-write operations. We describe RML, a simple language for expressing concurrent programs. We develop a semantics of RML parametrized by a relaxed memory model. We then present our tool, FOIL, which takes as input the RML description of an STM algorithm restricted to two threads and two variables, and the description of a memory model, and automatically determines the locations of fences, which if inserted, ensure the correctness of the restricted STM algorithm under the given memory model. We use FOIL to verify DSTM, TL2, and McRT STM under the memory models of sequential consistency, total store order, partial store order, and relaxed memory order for two threads and two variables. Finally, we extend the verification results for DSTM and TL2 to an arbitrary number of threads and variables by manually proving that the structural properties of STMs are satisfied at the hardware level of atomicity under the considered relaxed memory models.},
  author       = {Guerraoui, Rachid and Henzinger, Thomas A and Singh, Vasu},
  journal      = {Formal Methods in System Design},
  number       = {3},
  pages        = {297 -- 331},
  publisher    = {Springer},
  title        = {{Verification of STM on relaxed memory models}},
  doi          = {10.1007/s10703-011-0131-3},
  volume       = {39},
  year         = {2011},
}

@misc{5383,
  abstract     = {We present a new decidable logic called TREX for expressing constraints about imperative tree data structures. In particular, TREX supports a transitive closure operator that can express reachability constraints, which often appear in data structure invariants. We show that our logic is closed under weakest precondition computation, which enables its use for automated software verification. We further show that satisfiability of formulas in TREX is decidable in NP. The low complexity makes it an attractive alternative to more expensive logics such as monadic second-order logic (MSOL) over trees, which have been traditionally used for reasoning about tree data structures.},
  author       = {Wies, Thomas and Muñiz, Marco and Kuncak, Viktor},
  issn         = {2664-1690},
  pages        = {25},
  publisher    = {IST Austria},
  title        = {{On an efficient decision procedure for imperative tree data structures}},
  doi          = {10.15479/AT:IST-2011-0005},
  year         = {2011},
}

@misc{5385,
  abstract     = {There is recently a significant effort to add quantitative objectives to formal verification and synthesis. We introduce and investigate the extension of temporal logics with quantitative atomic assertions, aiming for a general and flexible framework for quantitative-oriented specifications. In the heart of quantitative objectives lies the accumulation of values along a computation. It is either the accumulated summation, as with the energy objectives, or the accumulated average, as with the mean-payoff objectives. We investigate the extension of temporal logics with the prefix-accumulation assertions Sum(v) ≥ c and Avg(v) ≥ c, where v is a numeric variable of the system, c is a constant rational number, and Sum(v) and Avg(v) denote the accumulated sum and average of the values of v from the beginning of the computation up to the current point of time. We also allow the path-accumulation assertions LimInfAvg(v) ≥ c and LimSupAvg(v) ≥ c, referring to the average value along an entire computation. We study the border of decidability for extensions of various temporal logics. In particular, we show that extending the fragment of CTL that has only the EX, EF, AX, and AG temporal modalities by prefix-accumulation assertions and extending LTL with path-accumulation assertions, result in temporal logics whose model-checking problem is decidable. The extended logics allow to significantly extend the currently known energy and mean-payoff objectives. Moreover, the prefix-accumulation assertions may be refined with “controlled-accumulation”, allowing, for example, to specify constraints on the average waiting time between a request and a grant. On the negative side, we show that the fragment we point to is, in a sense, the maximal logic whose extension with prefix-accumulation assertions permits a decidable model-checking procedure. Extending a temporal logic that has the EG or EU modalities, and in particular CTL and LTL, makes the problem undecidable.},
  author       = {Boker, Udi and Chatterjee, Krishnendu and Henzinger, Thomas A and Kupferman, Orna},
  issn         = {2664-1690},
  pages        = {14},
  publisher    = {IST Austria},
  title        = {{Temporal specifications with accumulative values}},
  doi          = {10.15479/AT:IST-2011-0003},
  year         = {2011},
}

@inproceedings{10908,
  abstract     = {We present ABC, a software tool for automatically computing symbolic upper bounds on the number of iterations of nested program loops. The system combines static analysis of programs with symbolic summation techniques to derive loop invariant relations between program variables. Iteration bounds are obtained from the inferred invariants, by replacing variables with bounds on their greatest values. We have successfully applied ABC to a large number of examples. The derived symbolic bounds express non-trivial polynomial relations over loop variables. We also report on results to automatically infer symbolic expressions over harmonic numbers as upper bounds on loop iteration counts.},
  author       = {Blanc, Régis and Henzinger, Thomas A and Hottelier, Thibaud and Kovács, Laura},
  booktitle    = {Logic for Programming, Artificial Intelligence, and Reasoning},
  editor       = {Clarke, Edmund M and Voronkov, Andrei},
  isbn         = {9783642175107},
  issn         = {1611-3349},
  location     = {Dakar, Senegal},
  pages        = {103--118},
  publisher    = {Springer Nature},
  title        = {{ABC: Algebraic Bound Computation for loops}},
  doi          = {10.1007/978-3-642-17511-4_7},
  volume       = {6355},
  year         = {2010},
}

@inproceedings{488,
  abstract     = {Streaming string transducers [1] define (partial) functions from input strings to output strings. A streaming string transducer makes a single pass through the input string and uses a finite set of variables that range over strings from the output alphabet. At every step, the transducer processes an input symbol, and updates all the variables in parallel using assignments whose right-hand-sides are concatenations of output symbols and variables with the restriction that a variable can be used at most once in a right-hand-side expression. It has been shown that streaming string transducers operating on strings over infinite data domains are of interest in algorithmic verification of list-processing programs, as they lead to PSPACE decision procedures for checking pre/post conditions and for checking semantic equivalence, for a well-defined class of heap-manipulating programs. In order to understand the theoretical expressiveness of streaming transducers, we focus on streaming transducers processing strings over finite alphabets, given the existence of a robust and well-studied class of &quot;regular&quot; transductions for this case. Such regular transductions can be defined either by two-way deterministic finite-state transducers, or using a logical MSO-based characterization. Our main result is that the expressiveness of streaming string transducers coincides exactly with this class of regular transductions. },
  author       = {Alur, Rajeev and Cerny, Pavol},
  location     = {Chennai, India},
  pages        = {1 -- 12},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Expressiveness of streaming string transducers}},
  doi          = {10.4230/LIPIcs.FSTTCS.2010.1},
  volume       = {8},
  year         = {2010},
}

@article{533,
  abstract     = {Any programming error that can be revealed before compiling a program saves precious time for the programmer. While integrated development environments already do a good job by detecting, e.g., data-flow abnormalities, current static analysis tools suffer from false positives (&quot;noise&quot;) or require strong user interaction. We propose to avoid this deficiency by defining a new class of errors. A program fragment is doomed if its execution will inevitably fail, regardless of which state it is started in. We use a formal verification method to identify such errors fully automatically and, most significantly, without producing noise. We report on experiments with a prototype tool.},
  author       = {Hoenicke, Jochen and Leino, Kari and Podelski, Andreas and Schäf, Martin and Wies, Thomas},
  journal      = {Formal Methods in System Design},
  number       = {2-3},
  pages        = {171 -- 199},
  publisher    = {Springer},
  title        = {{Doomed program points}},
  doi          = {10.1007/s10703-010-0102-0},
  volume       = {37},
  year         = {2010},
}

@misc{5388,
  abstract     = {We present an algorithmic method for the synthesis of concurrent programs that are optimal with respect to quantitative performance measures. The input consists of a sequential sketch, that is, a program that does not contain synchronization constructs, and of a parametric performance model that assigns costs to actions such as locking, context switching, and idling. The quantitative synthesis problem is to automatically introduce synchronization constructs into the sequential sketch so that both correctness is guaranteed and worst-case (or average-case) performance is optimized. Correctness is formalized as race freedom or linearizability.

We show that for worst-case performance, the problem can be modeled
as a 2-player graph game with quantitative (limit-average) objectives, and
for average-case performance, as a 2 1/2 -player graph game (with probabilistic transitions). In both cases, the optimal correct program is derived from an optimal strategy in the corresponding quantitative game. We prove that the respective game problems are computationally expensive (NP-complete), and present several techniques that overcome the theoretical difficulty in cases of concurrent programs of practical interest.

We have implemented a prototype tool and used it for the automatic syn- thesis of programs that access a concurrent list. For certain parameter val- ues, our method automatically synthesizes various classical synchronization schemes for implementing a concurrent list, such as fine-grained locking or a lazy algorithm. For other parameter values, a new, hybrid synchronization style is synthesized, which uses both the lazy approach and coarse-grained locks (instead of standard fine-grained locks). The trade-off occurs because while fine-grained locking tends to decrease the cost that is due to waiting for locks, it increases cache size requirements.},
  author       = {Chatterjee, Krishnendu and Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun and Singh, Rohit},
  issn         = {2664-1690},
  pages        = {17},
  publisher    = {IST Austria},
  title        = {{Quantitative synthesis for concurrent programs}},
  doi          = {10.15479/AT:IST-2010-0004},
  year         = {2010},
}

@misc{5389,
  abstract     = {Boolean notions of correctness are formalized by preorders on systems. Quantitative measures of correctness can be formalized by real-valued distance functions between systems, where the distance between implementation and specification provides a measure of “fit” or “desirability.” We extend the simulation preorder to the quantitative setting, by making each player of a simulation game pay a certain price for her choices. We use the resulting games with quantitative objectives to define three different simulation distances. The correctness distance measures how much the specification must be changed in order to be satisfied by the implementation. The coverage distance measures how much the im- plementation restricts the degrees of freedom offered by the specification. The robustness distance measures how much a system can deviate from the implementation description without violating the specification. We consider these distances for safety as well as liveness specifications. The distances can be computed in polynomial time for safety specifications, and for liveness specifications given by weak fairness constraints. We show that the distance functions satisfy the triangle inequality, that the distance between two systems does not increase under parallel composition with a third system, and that the distance between two systems can be bounded from above and below by distances between abstractions of the two systems. These properties suggest that our simulation distances provide an appropriate basis for a quantitative theory of discrete systems. We also demonstrate how the robustness distance can be used to measure how many transmission errors are tolerated by error correcting codes.},
  author       = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
  issn         = {2664-1690},
  pages        = {24},
  publisher    = {IST Austria},
  title        = {{Simulation distances}},
  doi          = {10.15479/AT:IST-2010-0003},
  year         = {2010},
}

@misc{5391,
  abstract     = {Concurrent data structures with fine-grained synchronization are notoriously difficult to implement correctly. The difficulty of reasoning about these implementations does not stem from the number of variables or the program size, but rather from the large number of possible interleavings. These implementations are therefore prime candidates for model checking. We introduce an algorithm for verifying linearizability of singly-linked heap-based concurrent data structures. We consider a model consisting of an unbounded heap where each node consists an element from an unbounded data domain, with a restricted set of operations for testing and updating pointers and data elements. Our main result is that linearizability is decidable for programs that invoke a fixed number of methods, possibly in parallel. This decidable fragment covers many of the common implementation techniques — fine-grained locking, lazy synchronization, and lock-free synchronization. We also show how the technique can be used to verify optimistic implementations with the help of programmer annotations. We developed a verification tool CoLT and evaluated it on a representative sample of Java implementations of the concurrent set data structure. The tool verified linearizability of a number of implementations, found a known error in a lock-free imple- mentation and proved that the corrected version is linearizable.},
  author       = {Cerny, Pavol and Radhakrishna, Arjun and Zufferey, Damien and Chaudhuri, Swarat and Alur, Rajeev},
  issn         = {2664-1690},
  pages        = {27},
  publisher    = {IST Austria},
  title        = {{Model checking of linearizability of concurrent list implementations}},
  doi          = {10.15479/AT:IST-2010-0001},
  year         = {2010},
}

@inproceedings{3719,
  abstract     = {The induction of a signaling pathway is characterized by transient complex formation and mutual posttranslational modification of proteins. To faithfully capture this combinatorial process in a math- ematical model is an important challenge in systems biology. Exploiting the limited context on which most binding and modification events are conditioned, attempts have been made to reduce the com- binatorial complexity by quotienting the reachable set of molecular species, into species aggregates while preserving the deterministic semantics of the thermodynamic limit. Recently we proposed a quotienting that also preserves the stochastic semantics and that is complete in the sense that the semantics of individual species can be recovered from the aggregate semantics. In this paper we prove that this quotienting yields a sufficient condition for weak lumpability and that it gives rise to a backward Markov bisimulation between the original and aggregated transition system. We illustrate the framework on a case study of the EGF/insulin receptor crosstalk.},
  author       = {Feret, Jérôme and Henzinger, Thomas A and Koeppl, Heinz and Petrov, Tatjana},
  location     = {Jena, Germany},
  pages        = {142--161},
  publisher    = {Open Publishing Association},
  title        = {{Lumpability abstractions of rule-based systems}},
  volume       = {40},
  year         = {2010},
}

@article{3834,
  abstract     = {Background

The chemical master equation (CME) is a system of ordinary differential equations that describes the evolution of a network of chemical reactions as a stochastic process. Its solution yields the probability density vector of the system at each point in time. Solving the CME numerically is in many cases computationally expensive or even infeasible as the number of reachable states can be very large or infinite. We introduce the sliding window method, which computes an approximate solution of the CME by performing a sequence of local analysis steps. In each step, only a manageable subset of states is considered, representing a &quot;window&quot; into the state space. In subsequent steps, the window follows the direction in which the probability mass moves, until the time period of interest has elapsed. We construct the window based on a deterministic approximation of the future behavior of the system by estimating upper and lower bounds on the populations of the chemical species.
Results

In order to show the effectiveness of our approach, we apply it to several examples previously described in the literature. The experimental results show that the proposed method speeds up the analysis considerably, compared to a global analysis, while still providing high accuracy.


Conclusions

The sliding window method is a novel approach to address the performance problems of numerical algorithms for the solution of the chemical master equation. The method efficiently approximates the probability distributions at the time points of interest for a variety of chemically reacting systems, including systems for which no upper bound on the population sizes of the chemical species is known a priori.},
  author       = {Wolf, Verena and Goel, Rushil and Mateescu, Maria and Henzinger, Thomas A},
  journal      = {BMC Systems Biology},
  number       = {42},
  pages        = {1 -- 19},
  publisher    = {BioMed Central},
  title        = {{Solving the chemical master equation using sliding windows}},
  doi          = {10.1186/1752-0509-4-42},
  volume       = {4},
  year         = {2010},
}

@inproceedings{3838,
  abstract     = {We present a numerical approximation technique for the analysis of continuous-time Markov chains that describe net- works of biochemical reactions and play an important role in the stochastic modeling of biological systems. Our approach is based on the construction of a stochastic hybrid model in which certain discrete random variables of the original Markov chain are approximated by continuous deterministic variables. We compute the solution of the stochastic hybrid model using a numerical algorithm that discretizes time and in each step performs a mutual update of the transient prob- ability distribution of the discrete stochastic variables and the values of the continuous deterministic variables. We im- plemented the algorithm and we demonstrate its usefulness and efficiency on several case studies from systems biology.},
  author       = {Henzinger, Thomas A and Mateescu, Maria and Mikeev, Linar and Wolf, Verena},
  location     = {Trento, Italy},
  pages        = {55 -- 65},
  publisher    = {Springer},
  title        = {{Hybrid numerical solution of the chemical master equation}},
  doi          = {10.1145/1839764.1839772},
  year         = {2010},
}

@inproceedings{3839,
  abstract     = {We present a loop property generation method for loops iterating over multi-dimensional arrays. When used on matrices, our method is able to infer their shapes (also called types), such as upper-triangular, diagonal, etc. To gen- erate loop properties, we first transform a nested loop iterating over a multi- dimensional array into an equivalent collection of unnested loops. Then, we in- fer quantified loop invariants for each unnested loop using a generalization of a recurrence-based invariant generation technique. These loop invariants give us conditions on matrices from which we can derive matrix types automatically us- ing theorem provers. Invariant generation is implemented in the software package Aligator and types are derived by theorem provers and SMT solvers, including Vampire and Z3. When run on the Java matrix package JAMA, our tool was able to infer automatically all matrix types describing the matrix shapes guaranteed by JAMA’s API.},
  author       = {Henzinger, Thomas A and Hottelier, Thibaud and Kovács, Laura and Voronkov, Andrei},
  location     = {Madrid, Spain},
  pages        = {163 -- 179},
  publisher    = {Springer},
  title        = {{Invariant and type inference for matrices}},
  doi          = {10.1007/978-3-642-11319-2_14},
  volume       = {5944},
  year         = {2010},
}

@inproceedings{3840,
  abstract     = {Classical formalizations of systems and properties are boolean: given a system and a property, the property is either true or false of the system. Correspondingly, classical methods for system analysis determine the truth value of a property, preferably giving a proof if the property is true, and a counterexample if the property is false; classical methods for system synthesis construct a system for which a property is true; classical methods for system transformation, composition, and abstraction aim to preserve the truth of properties. The boolean view is prevalent even if the system, the property, or both refer to numerical quantities, such as the times or probabilities of events. For example, a timed automaton either satisfies or violates a formula of a real-time logic; a stochastic process either satisfies or violates a formula of a probabilistic logic. The classical black-and-white view partitions the world into "correct" and "incorrect" systems, offering few nuances. In reality, of several systems that satisfy a property in the boolean sense, often some are more desirable than others, and of the many systems that violate a property, usually some are less objectionable than others. For instance, among the systems that satisfy the response property that every request be granted, we may prefer systems that grant requests quickly (the quicker, the better), or we may prefer systems that issue few unnecessary grants (the fewer, the better); and among the systems that violate the response property, we may prefer systems that serve many initial requests (the more, the better), or we may prefer systems that serve many requests in the long run (the greater the fraction of served to unserved requests, the better). Formally, while a boolean notion of correctness is given by a preorder on systems and properties, a quantitative notion of correctness is defined by a directed metric on systems and properties, where the distance between a system and a property provides a measure of "fit" or "desirability." There are many ways how such distances can be defined. In a linear-time framework, one assigns numerical values to individual behaviors before assigning values to systems and properties, which are sets of behaviors. For example, the value of a single behavior may be a discounted value, which is largely determined by a prefix of the behavior, e.g., by the number of requests that are granted before the first request that is not granted; or a limit value, which is independent of any finite prefix. A limit value may be an average, such as the average response time over an infinite sequence of requests and grants, or a supremum, such as the worst-case response time. Similarly, the value of a set of behaviors may be an extremum or an average across the values of all behaviors in the set: in this way one can measure the worst of all possible average-case response times, or the average of all possible worst-case response times, etc. Accordingly, the distance between two sets of behaviors may be defined as the worst or average difference between the values of corresponding behaviors. In summary, we propagate replacing boolean specifications for the correctness of systems with quantitative measures for the desirability of systems. In quantitative analysis, the aim is to compute the distance between a system and a property (or between two systems, or two properties); in quantitative synthesis, the objective is to construct a system that has minimal distance from a given property. Multiple quantitative measures can be prioritized (e.g., combined lexicographically into a single measure) or studied along the Pareto curve. Quantitative transformations, compositions, and abstractions of systems are useful if they allow us to bound the induced change in distance from a property. We present some initial results in some of these directions. We also give some potential applications, which not only generalize tradiditional correctness concerns in the functional, timed, and probabilistic domains, but also capture such system measures as resource use, performance, cost, reliability, and robustness.},
  author       = {Henzinger, Thomas A},
  location     = {Madrid, Spain},
  number       = {1},
  pages        = {157 -- 158},
  publisher    = {ACM},
  title        = {{From boolean to quantitative notions of correctness}},
  doi          = {10.1145/1706299.1706319},
  volume       = {45},
  year         = {2010},
}

@article{3842,
  abstract     = {Within systems biology there is an increasing interest in the stochastic behavior of biochemical reaction networks. An appropriate stochastic description is provided by the chemical master equation, which represents a continuous-time Markov chain (CTMC). The uniformization technique is an efficient method to compute probability distributions of a CTMC if the number of states is manageable. However, the size of a CTMC that represents a biochemical reaction network is usually far beyond what is feasible. In this paper we present an on-the-fly variant of uniformization, where we improve the original algorithm at the cost of a small approximation error. By means of several examples, we show that our approach is particularly well-suited for biochemical reaction networks.},
  author       = {Didier, Frédéric and Henzinger, Thomas A and Mateescu, Maria and Wolf, Verena},
  journal      = {IET Systems Biology},
  number       = {6},
  pages        = {441 -- 452},
  publisher    = {Institution of Engineering and Technology},
  title        = {{Fast adaptive uniformization of the chemical master equation}},
  doi          = {10.1049/iet-syb.2010.0005},
  volume       = {4},
  year         = {2010},
}

@inproceedings{3845,
  abstract     = {This paper presents Aligators, a tool for the generation of universally quantified array invariants. Aligators leverages recurrence solving and algebraic techniques to carry out inductive reasoning over array content. The Aligators’ loop extraction module allows treatment of multi-path loops by exploiting their commutativity and serializability properties. Our experience in applying Aligators on a collection of loops from open source software projects indicates the applicability of recurrence and algebraic solving techniques for reasoning about arrays.},
  author       = {Henzinger, Thomas A and Hottelier, Thibaud and Kovács, Laura and Rybalchenko, Andrey},
  location     = {Yogyakarta, Indonesia},
  pages        = {348 -- 356},
  publisher    = {Springer},
  title        = {{Aligators for arrays}},
  doi          = {10.1007/978-3-642-16242-8_25},
  volume       = {6397},
  year         = {2010},
}

@inproceedings{3847,
  abstract     = {The importance of stochasticity within biological systems has been shown repeatedly during the last years and has raised the need for efficient stochastic tools. We present SABRE, a tool for stochastic analysis of biochemical reaction networks. SABRE implements fast adaptive uniformization (FAU), a direct numerical approximation algorithm for computing transient solutions of biochemical reaction networks. Biochemical reactions networks represent biological systems studied at a molecular level and these reactions can be modeled as transitions of a Markov chain. SABRE accepts as input the formalism of guarded commands, which it interprets either as continuous-time or as discrete-time Markov chains. Besides operating in a stochastic mode, SABRE may also perform a deterministic analysis by directly computing a mean-field approximation of the system under study. We illustrate the different functionalities of SABRE by means of biological case studies.},
  author       = {Didier, Frédéric and Henzinger, Thomas A and Mateescu, Maria and Wolf, Verena},
  location     = {Williamsburg, USA},
  pages        = {193 -- 194},
  publisher    = {IEEE},
  title        = {{SABRE: A tool for the stochastic analysis of biochemical reaction networks}},
  doi          = {10.1109/QEST.2010.33},
  year         = {2010},
}

