@inproceedings{1636,
  abstract     = {Constraint Satisfaction Problem (CSP) is a fundamental algorithmic problem that appears in many areas of Computer Science. It can be equivalently stated as computing a homomorphism R→ΓΓ between two relational structures, e.g. between two directed graphs. Analyzing its complexity has been a prominent research direction, especially for the fixed template CSPs where the right side ΓΓ is fixed and the left side R is unconstrained.

Far fewer results are known for the hybrid setting that restricts both sides simultaneously. It assumes that R belongs to a certain class of relational structures (called a structural restriction in this paper). We study which structural restrictions are effective, i.e. there exists a fixed template ΓΓ (from a certain class of languages) for which the problem is tractable when R is restricted, and NP-hard otherwise. We provide a characterization for structural restrictions that are closed under inverse homomorphisms. The criterion is based on the chromatic number of a relational structure defined in this paper; it generalizes the standard chromatic number of a graph.

As our main tool, we use the algebraic machinery developed for fixed template CSPs. To apply it to our case, we introduce a new construction called a “lifted language”. We also give a characterization for structural restrictions corresponding to minor-closed families of graphs, extend results to certain Valued CSPs (namely conservative valued languages), and state implications for (valued) CSPs with ordered variables and for the maximum weight independent set problem on some restricted families of graphs.},
  author       = {Kolmogorov, Vladimir and Rolinek, Michal and Takhanov, Rustem},
  booktitle    = {26th International Symposium},
  isbn         = {978-3-662-48970-3},
  location     = {Nagoya, Japan},
  pages        = {566 -- 577},
  publisher    = {Springer Nature},
  title        = {{Effectiveness of structural restrictions for hybrid CSPs}},
  doi          = {10.1007/978-3-662-48971-0_48},
  volume       = {9472},
  year         = {2015},
}

@inproceedings{1637,
  abstract     = {An instance of the Valued Constraint Satisfaction Problem (VCSP) is given by a finite set of variables, a finite domain of labels, and a sum of functions, each function depending on a subset of the variables. Each function can take finite values specifying costs of assignments of labels to its variables or the infinite value, which indicates an infeasible assignment. The goal is to find an assignment of labels to the variables that minimizes the sum. We study, assuming that P ≠ NP, how the complexity of this very general problem depends on the set of functions allowed in the instances, the so-called constraint language. The case when all allowed functions take values in {0, ∞} corresponds to ordinary CSPs, where one deals only with the feasibility issue and there is no optimization. This case is the subject of the Algebraic CSP Dichotomy Conjecture predicting for which constraint languages CSPs are tractable (i.e. solvable in polynomial time) and for which NP-hard. The case when all allowed functions take only finite values corresponds to finite-valued CSP, where the feasibility aspect is trivial and one deals only with the optimization issue. The complexity of finite-valued CSPs was fully classified by Thapper and Zivny. An algebraic necessary condition for tractability of a general-valued CSP with a fixed constraint language was recently given by Kozik and Ochremiak. As our main result, we prove that if a constraint language satisfies this algebraic necessary condition, and the feasibility CSP (i.e. the problem of deciding whether a given instance has a feasible solution) corresponding to the VCSP with this language is tractable, then the VCSP is tractable. The algorithm is a simple combination of the assumed algorithm for the feasibility CSP and the standard LP relaxation. As a corollary, we obtain that a dichotomy for ordinary CSPs would imply a dichotomy for general-valued CSPs.},
  author       = {Kolmogorov, Vladimir and Krokhin, Andrei and Rolinek, Michal},
  location     = {Berkeley, CA, United States},
  pages        = {1246 -- 1258},
  publisher    = {IEEE},
  title        = {{The complexity of general-valued CSPs}},
  doi          = {10.1109/FOCS.2015.80},
  year         = {2015},
}

@article{1639,
  abstract     = {In this paper the optimal transport and the metamorphosis perspectives are combined. For a pair of given input images geodesic paths in the space of images are defined as minimizers of a resulting path energy. To this end, the underlying Riemannian metric measures the rate of transport cost and the rate of viscous dissipation. Furthermore, the model is capable to deal with strongly varying image contrast and explicitly allows for sources and sinks in the transport equations which are incorporated in the metric related to the metamorphosis approach by Trouvé and Younes. In the non-viscous case with source term existence of geodesic paths is proven in the space of measures. The proposed model is explored on the range from merely optimal transport to strongly dissipative dynamics. For this model a robust and effective variational time discretization of geodesic paths is proposed. This requires to minimize a discrete path energy consisting of a sum of consecutive image matching functionals. These functionals are defined on corresponding pairs of intensity functions and on associated pairwise matching deformations. Existence of time discrete geodesics is demonstrated. Furthermore, a finite element implementation is proposed and applied to instructive test cases and to real images. In the non-viscous case this is compared to the algorithm proposed by Benamou and Brenier including a discretization of the source term. Finally, the model is generalized to define discrete weighted barycentres with applications to textures and objects.},
  author       = {Maas, Jan and Rumpf, Martin and Schönlieb, Carola and Simon, Stefan},
  journal      = {ESAIM: Mathematical Modelling and Numerical Analysis},
  number       = {6},
  pages        = {1745 -- 1769},
  publisher    = {EDP Sciences},
  title        = {{A generalized model for optimal transport of images including dissipation and density modulation}},
  doi          = {10.1051/m2an/2015043},
  volume       = {49},
  year         = {2015},
}

@article{1640,
  abstract     = {Auxin and cytokinin are key endogenous regulators of plant development. Although cytokinin-mediated modulation of auxin distribution is a developmentally crucial hormonal interaction, its molecular basis is largely unknown. Here we show a direct regulatory link between cytokinin signalling and the auxin transport machinery uncovering a mechanistic framework for cytokinin-auxin cross-talk. We show that the CYTOKININ RESPONSE FACTORS (CRFs), transcription factors downstream of cytokinin perception, transcriptionally control genes encoding PIN-FORMED (PIN) auxin transporters at a specific PIN CYTOKININ RESPONSE ELEMENT (PCRE) domain. Removal of this cis-regulatory element effectively uncouples PIN transcription from the CRF-mediated cytokinin regulation and attenuates plant cytokinin sensitivity. We propose that CRFs represent a missing cross-talk component that fine-tunes auxin transport capacity downstream of cytokinin signalling to control plant development.},
  author       = {Šimášková, Mária and O'Brien, José and Khan-Djamei, Mamoona and Van Noorden, Giel and Ötvös, Krisztina and Vieten, Anne and De Clercq, Inge and Van Haperen, Johanna and Cuesta, Candela and Hoyerová, Klára and Vanneste, Steffen and Marhavy, Peter and Wabnik, Krzysztof T and Van Breusegem, Frank and Nowack, Moritz and Murphy, Angus and Friml, Jiřĺ and Weijers, Dolf and Beeckman, Tom and Benková, Eva},
  journal      = {Nature Communications},
  publisher    = {Nature Publishing Group},
  title        = {{Cytokinin response factors regulate PIN-FORMED auxin transporters}},
  doi          = {10.1038/ncomms9717},
  volume       = {6},
  year         = {2015},
}

@article{1642,
  abstract     = {The Hanani-Tutte theorem is a classical result proved for the first time in the 1930s that characterizes planar graphs as graphs that admit a drawing in the plane in which every pair of edges not sharing a vertex cross an even number of times. We generalize this result to clustered graphs with two disjoint clusters, and show that a straightforward extension to flat clustered graphs with three or more disjoint clusters is not possible. For general clustered graphs we show a variant of the Hanani-Tutte theorem in the case when each cluster induces a connected subgraph. Di Battista and Frati proved that clustered planarity of embedded clustered graphs whose every face is incident to at most five vertices can be tested in polynomial time. We give a new and short proof of this result, using the matroid intersection algorithm.},
  author       = {Fulek, Radoslav and Kynčl, Jan and Malinovič, Igor and Pálvölgyi, Dömötör},
  issn         = {1077-8926},
  journal      = {Electronic Journal of Combinatorics},
  number       = {4},
  publisher    = {Electronic Journal of Combinatorics},
  title        = {{Clustered planarity testing revisited}},
  doi          = {10.37236/5002},
  volume       = {22},
  year         = {2015},
}

@inproceedings{1644,
  abstract     = {Increasing the computational complexity of evaluating a hash function, both for the honest users as well as for an adversary, is a useful technique employed for example in password-based cryptographic schemes to impede brute-force attacks, and also in so-called proofs of work (used in protocols like Bitcoin) to show that a certain amount of computation was performed by a legitimate user. A natural approach to adjust the complexity of a hash function is to iterate it c times, for some parameter c, in the hope that any query to the scheme requires c evaluations of the underlying hash function. However, results by Dodis et al. (Crypto 2012) imply that plain iteration falls short of achieving this goal, and designing schemes which provably have such a desirable property remained an open problem. This paper formalizes explicitly what it means for a given scheme to amplify the query complexity of a hash function. In the random oracle model, the goal of a secure query-complexity amplifier (QCA) scheme is captured as transforming, in the sense of indifferentiability, a random oracle allowing R queries (for the adversary) into one provably allowing only r &lt; R queries. Turned around, this means that making r queries to the scheme requires at least R queries to the actual random oracle. Second, a new scheme, called collision-free iteration, is proposed and proven to achieve c-fold QCA for both the honest parties and the adversary, for any fixed parameter c.},
  author       = {Demay, Grégory and Gazi, Peter and Maurer, Ueli and Tackmann, Björn},
  location     = {Lugano, Switzerland},
  pages        = {159 -- 180},
  publisher    = {Springer},
  title        = {{Query-complexity amplification for random oracles}},
  doi          = {10.1007/978-3-319-17470-9_10},
  volume       = {9063},
  year         = {2015},
}

@inproceedings{1646,
  abstract     = {A pseudorandom function (PRF) is a keyed function F : K × X → Y where, for a random key k ∈ K, the function F(k, ·) is indistinguishable from a uniformly random function, given black-box access. A key-homomorphic PRF has the additional feature that for any keys k, k' and any input x, we have F(k+k', x) = F(k, x)⊕F(k', x) for some group operations +,⊕ on K and Y, respectively. A constrained PRF for a family of setsS ⊆ P(X) has the property that, given any key k and set S ∈ S, one can efficiently compute a “constrained” key kS that enables evaluation of F(k, x) on all inputs x ∈ S, while the values F(k, x) for x /∈ S remain pseudorandom even given kS. In this paper we construct PRFs that are simultaneously constrained and key homomorphic, where the homomorphic property holds even for constrained keys. We first show that the multilinear map-based bit-fixing and circuit-constrained PRFs of Boneh and Waters (Asiacrypt 2013) can be modified to also be keyhomomorphic. We then show that the LWE-based key-homomorphic PRFs of Banerjee and Peikert (Crypto 2014) are essentially already prefix-constrained PRFs, using a (non-obvious) definition of constrained keys and associated group operation. Moreover, the constrained keys themselves are pseudorandom, and the constraining and evaluation functions can all be computed in low depth. As an application of key-homomorphic constrained PRFs,we construct a proxy re-encryption schemewith fine-grained access control. This scheme allows storing encrypted data on an untrusted server, where each file can be encrypted relative to some attributes, so that only parties whose constrained keys match the attributes can decrypt. Moreover, the server can re-key (arbitrary subsets of) the ciphertexts without learning anything about the plaintexts, thus permitting efficient and finegrained revocation.},
  author       = {Banerjee, Abishek and Fuchsbauer, Georg and Peikert, Chris and Pietrzak, Krzysztof Z and Stevens, Sophie},
  booktitle    = {12th Theory of Cryptography Conference},
  isbn         = {978-3-662-46496-0},
  location     = {Warsaw, Poland},
  pages        = {31 -- 60},
  publisher    = {Springer Nature},
  title        = {{Key-homomorphic constrained pseudorandom functions}},
  doi          = {10.1007/978-3-662-46497-7_2},
  volume       = {9015},
  year         = {2015},
}

@inproceedings{1647,
  abstract     = {Round-optimal blind signatures are notoriously hard to construct in the standard model, especially in the malicious-signer model, where blindness must hold under adversarially chosen keys. This is substantiated by several impossibility results. The only construction that can be termed theoretically efficient, by Garg and Gupta (Eurocrypt’14), requires complexity leveraging, inducing an exponential security loss. We present a construction of practically efficient round-optimal blind signatures in the standard model. It is conceptually simple and builds on the recent structure-preserving signatures on equivalence classes (SPSEQ) from Asiacrypt’14. While the traditional notion of blindness follows from standard assumptions, we prove blindness under adversarially chosen keys under an interactive variant of DDH. However, we neither require non-uniform assumptions nor complexity leveraging. We then show how to extend our construction to partially blind signatures and to blind signatures on message vectors, which yield a construction of one-show anonymous credentials à la “anonymous credentials light” (CCS’13) in the standard model. Furthermore, we give the first SPS-EQ construction under noninteractive assumptions and show how SPS-EQ schemes imply conventional structure-preserving signatures, which allows us to apply optimality results for the latter to SPS-EQ.},
  author       = {Fuchsbauer, Georg and Hanser, Christian and Slamanig, Daniel},
  location     = {Santa Barbara, CA, United States},
  pages        = {233 -- 253},
  publisher    = {Springer},
  title        = {{Practical round-optimal blind signatures in the standard model}},
  doi          = {10.1007/978-3-662-48000-7_12},
  volume       = {9216},
  year         = {2015},
}

@inproceedings{1648,
  abstract     = {Generalized Selective Decryption (GSD), introduced by Panjwani [TCC’07], is a game for a symmetric encryption scheme Enc that captures the difficulty of proving adaptive security of certain protocols, most notably the Logical Key Hierarchy (LKH) multicast encryption protocol. In the GSD game there are n keys k1,..., kn, which the adversary may adaptively corrupt (learn); moreover, it can ask for encryptions Encki (kj) of keys under other keys. The adversary’s task is to distinguish keys (which it cannot trivially compute) from random. Proving the hardness of GSD assuming only IND-CPA security of Enc is surprisingly hard. Using “complexity leveraging” loses a factor exponential in n, which makes the proof practically meaningless. We can think of the GSD game as building a graph on n vertices, where we add an edge i → j when the adversary asks for an encryption of kj under ki. If restricted to graphs of depth ℓ, Panjwani gave a reduction that loses only a factor exponential in ℓ (not n). To date, this is the only non-trivial result known for GSD. In this paper we give almost-polynomial reductions for large classes of graphs. Most importantly, we prove the security of the GSD game restricted to trees losing only a quasi-polynomial factor n3 log n+5. Trees are an important special case capturing real-world protocols like the LKH protocol. Our new bound improves upon Panjwani’s on some LKH variants proposed in the literature where the underlying tree is not balanced. Our proof builds on ideas from the “nested hybrids” technique recently introduced by Fuchsbauer et al. [Asiacrypt’14] for proving the adaptive security of constrained PRFs.},
  author       = {Fuchsbauer, Georg and Jafargholi, Zahra and Pietrzak, Krzysztof Z},
  location     = {Santa Barbara, CA, USA},
  pages        = {601 -- 620},
  publisher    = {Springer},
  title        = {{A quasipolynomial reduction for generalized selective decryption on trees}},
  doi          = {10.1007/978-3-662-47989-6_29},
  volume       = {9215},
  year         = {2015},
}

@inproceedings{1649,
  abstract     = {We extend a commitment scheme based on the learning with errors over rings (RLWE) problem, and present efficient companion zeroknowledge proofs of knowledge. Our scheme maps elements from the ring (or equivalently, n elements from },
  author       = {Benhamouda, Fabrice and Krenn, Stephan and Lyubashevsky, Vadim and Pietrzak, Krzysztof Z},
  location     = {Vienna, Austria},
  pages        = {305 -- 325},
  publisher    = {Springer},
  title        = {{Efficient zero-knowledge proofs for commitments from learning with errors over rings}},
  doi          = {10.1007/978-3-319-24174-6_16},
  volume       = {9326},
  year         = {2015},
}

@inproceedings{1650,
  abstract     = {We consider the task of deriving a key with high HILL entropy (i.e., being computationally indistinguishable from a key with high min-entropy) from an unpredictable source.

Previous to this work, the only known way to transform unpredictability into a key that was ϵ indistinguishable from having min-entropy was via pseudorandomness, for example by Goldreich-Levin (GL) hardcore bits. This approach has the inherent limitation that from a source with k bits of unpredictability entropy one can derive a key of length (and thus HILL entropy) at most k−2log(1/ϵ) bits. In many settings, e.g. when dealing with biometric data, such a 2log(1/ϵ) bit entropy loss in not an option. Our main technical contribution is a theorem that states that in the high entropy regime, unpredictability implies HILL entropy. Concretely, any variable K with |K|−d bits of unpredictability entropy has the same amount of so called metric entropy (against real-valued, deterministic distinguishers), which is known to imply the same amount of HILL entropy. The loss in circuit size in this argument is exponential in the entropy gap d, and thus this result only applies for small d (i.e., where the size of distinguishers considered is exponential in d).

To overcome the above restriction, we investigate if it’s possible to first “condense” unpredictability entropy and make the entropy gap small. We show that any source with k bits of unpredictability can be condensed into a source of length k with k−3 bits of unpredictability entropy. Our condenser simply “abuses&quot; the GL construction and derives a k bit key from a source with k bits of unpredicatibily. The original GL theorem implies nothing when extracting that many bits, but we show that in this regime, GL still behaves like a “condenser&quot; for unpredictability. This result comes with two caveats (1) the loss in circuit size is exponential in k and (2) we require that the source we start with has no HILL entropy (equivalently, one can efficiently check if a guess is correct). We leave it as an intriguing open problem to overcome these restrictions or to prove they’re inherent.},
  author       = {Skórski, Maciej and Golovnev, Alexander and Pietrzak, Krzysztof Z},
  location     = {Kyoto, Japan},
  pages        = {1046 -- 1057},
  publisher    = {Springer},
  title        = {{Condensed unpredictability }},
  doi          = {10.1007/978-3-662-47672-7_85},
  volume       = {9134},
  year         = {2015},
}

@inproceedings{1651,
  abstract     = {Cryptographic e-cash allows off-line electronic transactions between a bank, users and merchants in a secure and anonymous fashion. A plethora of e-cash constructions has been proposed in the literature; however, these traditional e-cash schemes only allow coins to be transferred once between users and merchants. Ideally, we would like users to be able to transfer coins between each other multiple times before deposit, as happens with physical cash. “Transferable” e-cash schemes are the solution to this problem. Unfortunately, the currently proposed schemes are either completely impractical or do not achieve the desirable anonymity properties without compromises, such as assuming the existence of a trusted “judge” who can trace all coins and users in the system. This paper presents the first efficient and fully anonymous transferable e-cash scheme without any trusted third parties. We start by revising the security and anonymity properties of transferable e-cash to capture issues that were previously overlooked. For our construction we use the recently proposed malleable signatures by Chase et al. to allow the secure and anonymous transfer of coins, combined with a new efficient double-spending detection mechanism. Finally, we discuss an instantiation of our construction.},
  author       = {Baldimtsi, Foteini and Chase, Melissa and Fuchsbauer, Georg and Kohlweiss, Markulf},
  booktitle    = {Public-Key Cryptography - PKC 2015},
  isbn         = {978-3-662-46446-5},
  location     = {Gaithersburg, MD, United States},
  pages        = {101 -- 124},
  publisher    = {Springer},
  title        = {{Anonymous transferable e-cash}},
  doi          = {10.1007/978-3-662-46447-2_5},
  volume       = {9020},
  year         = {2015},
}

@inproceedings{1652,
  abstract     = {We develop new theoretical tools for proving lower-bounds on the (amortized) complexity of certain functions in models of parallel computation. We apply the tools to construct a class of functions with high amortized memory complexity in the parallel Random Oracle Model (pROM); a variant of the standard ROM allowing for batches of simultaneous queries. In particular we obtain a new, more robust, type of Memory-Hard Functions (MHF); a security primitive which has recently been gaining acceptance in practice as an effective means of countering brute-force attacks on security relevant functions. Along the way we also demonstrate an important shortcoming of previous definitions of MHFs and give a new definition addressing the problem. The tools we develop represent an adaptation of the powerful pebbling paradigm (initially introduced by Hewitt and Paterson [HP70] and Cook [Coo73]) to a simple and intuitive parallel setting. We define a simple pebbling game Gp over graphs which aims to abstract parallel computation in an intuitive way. As a conceptual contribution we define a measure of pebbling complexity for graphs called cumulative complexity (CC) and show how it overcomes a crucial shortcoming (in the parallel setting) exhibited by more traditional complexity measures used in the past. As a main technical contribution we give an explicit construction of a constant in-degree family of graphs whose CC in Gp approaches maximality to within a polylogarithmic factor for any graph of equal size (analogous to the graphs of Tarjan et. al. [PTC76, LT82] for sequential pebbling games). Finally, for a given graph G and related function fG, we derive a lower-bound on the amortized memory complexity of fG in the pROM in terms of the CC of G in the game Gp.},
  author       = {Alwen, Joel F and Serbinenko, Vladimir},
  booktitle    = {Proceedings of the 47th annual ACM symposium on Theory of computing},
  location     = {Portland, OR, United States},
  pages        = {595 -- 603},
  publisher    = {ACM},
  title        = {{High parallel complexity graphs and memory-hard functions}},
  doi          = {10.1145/2746539.2746622},
  year         = {2015},
}

@inproceedings{1654,
  abstract     = {HMAC and its variant NMAC are the most popular approaches to deriving a MAC (and more generally, a PRF) from a cryptographic hash function. Despite nearly two decades of research, their exact security still remains far from understood in many different contexts. Indeed, recent works have re-surfaced interest for {\em generic} attacks, i.e., attacks that treat the compression function of the underlying hash function as a black box.

Generic security can be proved in a model where the underlying compression function is modeled as a random function -- yet, to date, the question of proving tight, non-trivial bounds on the generic security of HMAC/NMAC even as a PRF remains a challenging open question.

In this paper, we ask the question of whether a small modification to HMAC and NMAC can allow us to exactly characterize the security of the resulting constructions, while only incurring little penalty with respect to efficiency. To this end, we present simple variants of NMAC and HMAC, for which we prove tight bounds on the generic PRF security, expressed in terms of numbers of construction and compression function queries necessary to break the construction. All of our constructions are obtained via a (near) {\em black-box} modification of NMAC and HMAC, which can be interpreted as an initial step of key-dependent message pre-processing.

While our focus is on PRF security, a further attractive feature of our new constructions is that they clearly defeat all recent generic attacks against properties such as state recovery and universal forgery. These exploit properties of the so-called ``functional graph'' which are not directly accessible in our new constructions. },
  author       = {Gazi, Peter and Pietrzak, Krzysztof Z and Tessaro, Stefano},
  location     = {Auckland, New Zealand},
  pages        = {85 -- 109},
  publisher    = {Springer},
  title        = {{Generic security of NMAC and HMAC with input whitening}},
  doi          = {10.1007/978-3-662-48800-3_4},
  volume       = {9453},
  year         = {2015},
}

@article{1655,
  abstract     = {Quantifying behaviors of robots which were generated autonomously from task-independent objective functions is an important prerequisite for objective comparisons of algorithms and movements of animals. The temporal sequence of such a behavior can be considered as a time series and hence complexity measures developed for time series are natural candidates for its quantification. The predictive information and the excess entropy are such complexity measures. They measure the amount of information the past contains about the future and thus quantify the nonrandom structure in the temporal sequence. However, when using these measures for systems with continuous states one has to deal with the fact that their values will depend on the resolution with which the systems states are observed. For deterministic systems both measures will diverge with increasing resolution. We therefore propose a new decomposition of the excess entropy in resolution dependent and resolution independent parts and discuss how they depend on the dimensionality of the dynamics, correlations and the noise level. For the practical estimation we propose to use estimates based on the correlation integral instead of the direct estimation of the mutual information based on next neighbor statistics because the latter allows less control of the scale dependencies. Using our algorithm we are able to show how autonomous learning generates behavior of increasing complexity with increasing learning duration.},
  author       = {Martius, Georg S and Olbrich, Eckehard},
  journal      = {Entropy},
  number       = {10},
  pages        = {7266 -- 7297},
  publisher    = {MDPI},
  title        = {{Quantifying emergent behavior of autonomous robots}},
  doi          = {10.3390/e17107266},
  volume       = {17},
  year         = {2015},
}

@inproceedings{1656,
  abstract     = {Recently there has been a significant effort to handle quantitative properties in formal verification and synthesis. While weighted automata over finite and infinite words provide a natural and flexible framework to express quantitative properties, perhaps surprisingly, some basic system properties such as average response time cannot be expressed using weighted automata, nor in any other know decidable formalism. In this work, we introduce nested weighted automata as a natural extension of weighted automata which makes it possible to express important quantitative properties such as average response time. In nested weighted automata, a master automaton spins off and collects results from weighted slave automata, each of which computes a quantity along a finite portion of an infinite word. Nested weighted automata can be viewed as the quantitative analogue of monitor automata, which are used in run-time verification. We establish an almost complete decidability picture for the basic decision problems about nested weighted automata, and illustrate their applicability in several domains. In particular, nested weighted automata can be used to decide average response time properties.},
  author       = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan},
  booktitle    = {Proceedings - Symposium on Logic in Computer Science},
  location     = {Kyoto, Japan},
  publisher    = {IEEE},
  title        = {{Nested weighted automata}},
  doi          = {10.1109/LICS.2015.72},
  volume       = {2015-July},
  year         = {2015},
}

@inproceedings{1657,
  abstract     = {We consider Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) objectives. There exist two different views: (i) ~the expectation semantics, where the goal is to optimize the expected mean-payoff objective, and (ii) ~the satisfaction semantics, where the goal is to maximize the probability of runs such that the mean-payoff value stays above a given vector. We consider optimization with respect to both objectives at once, thus unifying the existing semantics. Precisely, the goal is to optimize the expectation while ensuring the satisfaction constraint. Our problem captures the notion of optimization with respect to strategies that are risk-averse (i.e., Ensure certain probabilistic guarantee). Our main results are as follows: First, we present algorithms for the decision problems, which are always polynomial in the size of the MDP. We also show that an approximation of the Pareto curve can be computed in time polynomial in the size of the MDP, and the approximation factor, but exponential in the number of dimensions. Second, we present a complete characterization of the strategy complexity (in terms of memory bounds and randomization) required to solve our problem. },
  author       = {Chatterjee, Krishnendu and Komárková, Zuzana and Kretinsky, Jan},
  location     = {Kyoto, Japan},
  pages        = {244 -- 256},
  publisher    = {IEEE},
  title        = {{Unifying two views on multiple mean-payoff objectives in Markov decision processes}},
  doi          = {10.1109/LICS.2015.32},
  year         = {2015},
}

@inproceedings{1659,
  abstract     = {The target discounted-sum problem is the following: Given a rational discount factor 0 &lt; λ &lt; 1 and three rational values a, b, and t, does there exist a finite or an infinite sequence w ε(a, b)∗ or w ε(a, b)w, such that Σ|w| i=0 w(i)λi equals t? The problem turns out to relate to many fields of mathematics and computer science, and its decidability question is surprisingly hard to solve. We solve the finite version of the problem, and show the hardness of the infinite version, linking it to various areas and open problems in mathematics and computer science: β-expansions, discounted-sum automata, piecewise affine maps, and generalizations of the Cantor set. We provide some partial results to the infinite version, among which are solutions to its restriction to eventually-periodic sequences and to the cases that λ λ 1/2 or λ = 1/n, for every n ε N. We use our results for solving some open problems on discounted-sum automata, among which are the exact-value problem for nondeterministic automata over finite words and the universality and inclusion problems for functional automata.},
  author       = {Boker, Udi and Henzinger, Thomas A and Otop, Jan},
  booktitle    = {LICS},
  issn         = {1043-6871 },
  location     = {Kyoto, Japan},
  pages        = {750 -- 761},
  publisher    = {IEEE},
  title        = {{The target discounted-sum problem}},
  doi          = {10.1109/LICS.2015.74},
  year         = {2015},
}

@inproceedings{1660,
  abstract     = {We study the pattern frequency vector for runs in probabilistic Vector Addition Systems with States (pVASS). Intuitively, each configuration of a given pVASS is assigned one of finitely many patterns, and every run can thus be seen as an infinite sequence of these patterns. The pattern frequency vector assigns to each run the limit of pattern frequencies computed for longer and longer prefixes of the run. If the limit does not exist, then the vector is undefined. We show that for one-counter pVASS, the pattern frequency vector is defined and takes one of finitely many values for almost all runs. Further, these values and their associated probabilities can be approximated up to an arbitrarily small relative error in polynomial time. For stable two-counter pVASS, we show the same result, but we do not provide any upper complexity bound. As a byproduct of our study, we discover counterexamples falsifying some classical results about stochastic Petri nets published in the 80s.},
  author       = {Brázdil, Tomáš and Kiefer, Stefan and Kučera, Antonín and Novotny, Petr},
  location     = {Kyoto, Japan},
  pages        = {44 -- 55},
  publisher    = {IEEE},
  title        = {{Long-run average behaviour of probabilistic vector addition systems}},
  doi          = {10.1109/LICS.2015.15},
  year         = {2015},
}

@inproceedings{1661,
  abstract     = {The computation of the winning set for one-pair Streett objectives and for k-pair Streett objectives in (standard) graphs as well as in game graphs are central problems in computer-aided verification, with application to the verification of closed systems with strong fairness conditions, the verification of open systems, checking interface compatibility, well-formed ness of specifications, and the synthesis of reactive systems. We give faster algorithms for the computation of the winning set for (1) one-pair Streett objectives (aka parity-3 problem) in game graphs and (2) for k-pair Streett objectives in graphs. For both problems this represents the first improvement in asymptotic running time in 15 years.},
  author       = {Chatterjee, Krishnendu and Henzinger, Monika H and Loitzenbauer, Veronika},
  booktitle    = {Proceedings - Symposium on Logic in Computer Science},
  location     = {Kyoto, Japan},
  publisher    = {IEEE},
  title        = {{Improved algorithms for one-pair and k-pair Streett objectives}},
  doi          = {10.1109/LICS.2015.34},
  volume       = {2015-July},
  year         = {2015},
}

