@inproceedings{14888, abstract = {A face in a curve arrangement is called popular if it is bounded by the same curve multiple times. Motivated by the automatic generation of curved nonogram puzzles, we investigate possibilities to eliminate the popular faces in an arrangement by inserting a single additional curve. This turns out to be NP-hard; however, it becomes tractable when the number of popular faces is small: We present a probabilistic FPT-approach in the number of popular faces.}, author = {De Nooijer, Phoebe and Terziadis, Soeren and Weinberger, Alexandra and Masárová, Zuzana and Mchedlidze, Tamara and Löffler, Maarten and Rote, Günter}, booktitle = {31st International Symposium on Graph Drawing and Network Visualization}, isbn = {9783031492747}, issn = {1611-3349}, location = {Isola delle Femmine, Palermo, Italy}, pages = {18--33}, publisher = {Springer Nature}, title = {{Removing popular faces in curve arrangements}}, doi = {10.1007/978-3-031-49275-4_2}, volume = {14466}, year = {2024}, } @inproceedings{12854, abstract = {The main idea behind BUBAAK is to run multiple program analyses in parallel and use runtime monitoring and enforcement to observe and control their progress in real time. The analyses send information about (un)explored states of the program and discovered invariants to a monitor. The monitor processes the received data and can force an analysis to stop the search of certain program parts (which have already been analyzed by other analyses), or to make it utilize a program invariant found by another analysis. At SV-COMP 2023, the implementation of data exchange between the monitor and the analyses was not yet completed, which is why BUBAAK only ran several analyses in parallel, without any coordination. Still, BUBAAK won the meta-category FalsificationOverall and placed very well in several other (sub)-categories of the competition.}, author = {Chalupa, Marek and Henzinger, Thomas A}, booktitle = {Tools and Algorithms for the Construction and Analysis of Systems}, isbn = {9783031308192}, issn = {1611-3349}, location = {Paris, France}, pages = {535--540}, publisher = {Springer Nature}, title = {{Bubaak: Runtime monitoring of program verifiers}}, doi = {10.1007/978-3-031-30820-8_32}, volume = {13994}, year = {2023}, } @inproceedings{12856, abstract = {As the complexity and criticality of software increase every year, so does the importance of run-time monitoring. Third-party monitoring, with limited knowledge of the monitored software, and best-effort monitoring, which keeps pace with the monitored software, are especially valuable, yet underexplored areas of run-time monitoring. Most existing monitoring frameworks do not support their combination because they either require access to the monitored code for instrumentation purposes or the processing of all observed events, or both. We present a middleware framework, VAMOS, for the run-time monitoring of software which is explicitly designed to support third-party and best-effort scenarios. The design goals of VAMOS are (i) efficiency (keeping pace at low overhead), (ii) flexibility (the ability to monitor black-box code through a variety of different event channels, and the connectability to monitors written in different specification languages), and (iii) ease-of-use. To achieve its goals, VAMOS combines aspects of event broker and event recognition systems with aspects of stream processing systems. We implemented a prototype toolchain for VAMOS and conducted experiments including a case study of monitoring for data races. The results indicate that VAMOS enables writing useful yet efficient monitors, is compatible with a variety of event sources and monitor specifications, and simplifies key aspects of setting up a monitoring system from scratch.}, author = {Chalupa, Marek and Mühlböck, Fabian and Muroya Lei, Stefanie and Henzinger, Thomas A}, booktitle = {Fundamental Approaches to Software Engineering}, isbn = {9783031308253}, issn = {1611-3349}, location = {Paris, France}, pages = {260--281}, publisher = {Springer Nature}, title = {{Vamos: Middleware for best-effort third-party monitoring}}, doi = {10.1007/978-3-031-30826-0_15}, volume = {13991}, year = {2023}, } @inproceedings{13143, abstract = {GIMPS and PrimeGrid are large-scale distributed projects dedicated to searching giant prime numbers, usually of special forms like Mersenne and Proth primes. The numbers in the current search-space are millions of digits large and the participating volunteers need to run resource-consuming primality tests. Once a candidate prime N has been found, the only way for another party to independently verify the primality of N used to be by repeating the expensive primality test. To avoid the need for second recomputation of each primality test, these projects have recently adopted certifying mechanisms that enable efficient verification of performed tests. However, the mechanisms presently in place only detect benign errors and there is no guarantee against adversarial behavior: a malicious volunteer can mislead the project to reject a giant prime as being non-prime. In this paper, we propose a practical, cryptographically-sound mechanism for certifying the non-primality of Proth numbers. That is, a volunteer can – parallel to running the primality test for N – generate an efficiently verifiable proof at a little extra cost certifying that N is not prime. The interactive protocol has statistical soundness and can be made non-interactive using the Fiat-Shamir heuristic. Our approach is based on a cryptographic primitive called Proof of Exponentiation (PoE) which, for a group G, certifies that a tuple (x,y,T)∈G2×N satisfies x2T=y (Pietrzak, ITCS 2019 and Wesolowski, J. Cryptol. 2020). In particular, we show how to adapt Pietrzak’s PoE at a moderate additional cost to make it a cryptographically-sound certificate of non-primality.}, author = {Hoffmann, Charlotte and Hubáček, Pavel and Kamath, Chethan and Pietrzak, Krzysztof Z}, booktitle = {Public-Key Cryptography - PKC 2023}, isbn = {9783031313677}, issn = {1611-3349}, location = {Atlanta, GA, United States}, pages = {530--553}, publisher = {Springer Nature}, title = {{Certifying giant nonprimes}}, doi = {10.1007/978-3-031-31368-4_19}, volume = {13940}, year = {2023}, } @inproceedings{13142, abstract = {Reinforcement learning has received much attention for learning controllers of deterministic systems. We consider a learner-verifier framework for stochastic control systems and survey recent methods that formally guarantee a conjunction of reachability and safety properties. Given a property and a lower bound on the probability of the property being satisfied, our framework jointly learns a control policy and a formal certificate to ensure the satisfaction of the property with a desired probability threshold. Both the control policy and the formal certificate are continuous functions from states to reals, which are learned as parameterized neural networks. While in the deterministic case, the certificates are invariant and barrier functions for safety, or Lyapunov and ranking functions for liveness, in the stochastic case the certificates are supermartingales. For certificate verification, we use interval arithmetic abstract interpretation to bound the expected values of neural network functions.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Lechner, Mathias and Zikelic, Dorde}, booktitle = {Tools and Algorithms for the Construction and Analysis of Systems }, isbn = {9783031308222}, issn = {1611-3349}, location = {Paris, France}, pages = {3--25}, publisher = {Springer Nature}, title = {{A learner-verifier framework for neural network controllers and certificates of stochastic systems}}, doi = {10.1007/978-3-031-30823-9_1}, volume = {13993}, year = {2023}, } @inproceedings{13141, abstract = {We automatically compute a new class of environment assumptions in two-player turn-based finite graph games which characterize an “adequate cooperation” needed from the environment to allow the system player to win. Given an ω-regular winning condition Φ for the system player, we compute an ω-regular assumption Ψ for the environment player, such that (i) every environment strategy compliant with Ψ allows the system to fulfill Φ (sufficiency), (ii) Ψ can be fulfilled by the environment for every strategy of the system (implementability), and (iii) Ψ does not prevent any cooperative strategy choice (permissiveness). For parity games, which are canonical representations of ω-regular games, we present a polynomial-time algorithm for the symbolic computation of adequately permissive assumptions and show that our algorithm runs faster and produces better assumptions than existing approaches—both theoretically and empirically. To the best of our knowledge, for ω -regular games, we provide the first algorithm to compute sufficient and implementable environment assumptions that are also permissive.}, author = {Anand, Ashwani and Mallik, Kaushik and Nayak, Satya Prakash and Schmuck, Anne Kathrin}, booktitle = {TACAS 2023: Tools and Algorithms for the Construction and Analysis of Systems}, isbn = {9783031308192}, issn = {1611-3349}, location = {Paris, France}, pages = {211--228}, publisher = {Springer Nature}, title = {{Computing adequately permissive assumptions for synthesis}}, doi = {10.1007/978-3-031-30820-8_15}, volume = {13994}, year = {2023}, } @inproceedings{12467, abstract = {Safety and liveness are elementary concepts of computation, and the foundation of many verification paradigms. The safety-liveness classification of boolean properties characterizes whether a given property can be falsified by observing a finite prefix of an infinite computation trace (always for safety, never for liveness). In quantitative specification and verification, properties assign not truth values, but quantitative values to infinite traces (e.g., a cost, or the distance to a boolean property). We introduce quantitative safety and liveness, and we prove that our definitions induce conservative quantitative generalizations of both (1)~the safety-progress hierarchy of boolean properties and (2)~the safety-liveness decomposition of boolean properties. In particular, we show that every quantitative property can be written as the pointwise minimum of a quantitative safety property and a quantitative liveness property. Consequently, like boolean properties, also quantitative properties can be min-decomposed into safety and liveness parts, or alternatively, max-decomposed into co-safety and co-liveness parts. Moreover, quantitative properties can be approximated naturally. We prove that every quantitative property that has both safe and co-safe approximations can be monitored arbitrarily precisely by a monitor that uses only a finite number of states.}, author = {Henzinger, Thomas A and Mazzocchi, Nicolas Adrien and Sarac, Naci E}, booktitle = {26th International Conference Foundations of Software Science and Computation Structures}, isbn = {9783031308284}, issn = {1611-3349}, location = {Paris, France}, pages = {349--370}, publisher = {Springer Nature}, title = {{Quantitative safety and liveness}}, doi = {10.1007/978-3-031-30829-1_17}, volume = {13992}, year = {2023}, } @inproceedings{13310, abstract = {Machine-learned systems are in widespread use for making decisions about humans, and it is important that they are fair, i.e., not biased against individuals based on sensitive attributes. We present runtime verification of algorithmic fairness for systems whose models are unknown, but are assumed to have a Markov chain structure. We introduce a specification language that can model many common algorithmic fairness properties, such as demographic parity, equal opportunity, and social burden. We build monitors that observe a long sequence of events as generated by a given system, and output, after each observation, a quantitative estimate of how fair or biased the system was on that run until that point in time. The estimate is proven to be correct modulo a variable error bound and a given confidence level, where the error bound gets tighter as the observed sequence gets longer. Our monitors are of two types, and use, respectively, frequentist and Bayesian statistical inference techniques. While the frequentist monitors compute estimates that are objectively correct with respect to the ground truth, the Bayesian monitors compute estimates that are correct subject to a given prior belief about the system’s model. Using a prototype implementation, we show how we can monitor if a bank is fair in giving loans to applicants from different social backgrounds, and if a college is fair in admitting students while maintaining a reasonable financial burden on the society. Although they exhibit different theoretical complexities in certain cases, in our experiments, both frequentist and Bayesian monitors took less than a millisecond to update their verdicts after each observation.}, author = {Henzinger, Thomas A and Karimi, Mahyar and Kueffner, Konstantin and Mallik, Kaushik}, booktitle = {Computer Aided Verification}, isbn = {9783031377020}, issn = {1611-3349}, location = {Paris, France}, pages = {358–382}, publisher = {Springer Nature}, title = {{Monitoring algorithmic fairness}}, doi = {10.1007/978-3-031-37703-7_17}, volume = {13965}, year = {2023}, } @inproceedings{14259, abstract = {We provide a learning-based technique for guessing a winning strategy in a parity game originating from an LTL synthesis problem. A cheaply obtained guess can be useful in several applications. Not only can the guessed strategy be applied as best-effort in cases where the game’s huge size prohibits rigorous approaches, but it can also increase the scalability of rigorous LTL synthesis in several ways. Firstly, checking whether a guessed strategy is winning is easier than constructing one. Secondly, even if the guess is wrong in some places, it can be fixed by strategy iteration faster than constructing one from scratch. Thirdly, the guess can be used in on-the-fly approaches to prioritize exploration in the most fruitful directions. In contrast to previous works, we (i) reflect the highly structured logical information in game’s states, the so-called semantic labelling, coming from the recent LTL-to-automata translations, and (ii) learn to reflect it properly by learning from previously solved games, bringing the solving process closer to human-like reasoning.}, author = {Kretinsky, Jan and Meggendorfer, Tobias and Prokop, Maximilian and Rieder, Sabine}, booktitle = {35th International Conference on Computer Aided Verification }, isbn = {9783031377051}, issn = {1611-3349}, location = {Paris, France}, pages = {390--414}, publisher = {Springer Nature}, title = {{Guessing winning policies in LTL synthesis by semantic learning}}, doi = {10.1007/978-3-031-37706-8_20}, volume = {13964}, year = {2023}, } @inproceedings{14318, abstract = {Probabilistic recurrence relations (PRRs) are a standard formalism for describing the runtime of a randomized algorithm. Given a PRR and a time limit κ, we consider the tail probability Pr[T≥κ], i.e., the probability that the randomized runtime T of the PRR exceeds κ. Our focus is the formal analysis of tail bounds that aims at finding a tight asymptotic upper bound u≥Pr[T≥κ]. To address this problem, the classical and most well-known approach is the cookbook method by Karp (JACM 1994), while other approaches are mostly limited to deriving tail bounds of specific PRRs via involved custom analysis. In this work, we propose a novel approach for deriving the common exponentially-decreasing tail bounds for PRRs whose preprocessing time and random passed sizes observe discrete or (piecewise) uniform distribution and whose recursive call is either a single procedure call or a divide-and-conquer. We first establish a theoretical approach via Markov’s inequality, and then instantiate the theoretical approach with a template-based algorithmic approach via a refined treatment of exponentiation. Experimental evaluation shows that our algorithmic approach is capable of deriving tail bounds that are (i) asymptotically tighter than Karp’s method, (ii) match the best-known manually-derived asymptotic tail bound for QuickSelect, and (iii) is only slightly worse (with a loglogn factor) than the manually-proven optimal asymptotic tail bound for QuickSort. Moreover, our algorithmic approach handles all examples (including realistic PRRs such as QuickSort, QuickSelect, DiameterComputation, etc.) in less than 0.1 s, showing that our approach is efficient in practice.}, author = {Sun, Yican and Fu, Hongfei and Chatterjee, Krishnendu and Goharshady, Amir Kafshdar}, booktitle = {Computer Aided Verification}, isbn = {9783031377082}, issn = {1611-3349}, location = {Paris, France}, pages = {16--39}, publisher = {Springer Nature}, title = {{Automated tail bound analysis for probabilistic recurrence relations}}, doi = {10.1007/978-3-031-37709-9_2}, volume = {13966}, year = {2023}, } @inproceedings{14317, abstract = {Markov decision processes can be viewed as transformers of probability distributions. While this view is useful from a practical standpoint to reason about trajectories of distributions, basic reachability and safety problems are known to be computationally intractable (i.e., Skolem-hard) to solve in such models. Further, we show that even for simple examples of MDPs, strategies for safety objectives over distributions can require infinite memory and randomization. In light of this, we present a novel overapproximation approach to synthesize strategies in an MDP, such that a safety objective over the distributions is met. More precisely, we develop a new framework for template-based synthesis of certificates as affine distributional and inductive invariants for safety objectives in MDPs. We provide two algorithms within this framework. One can only synthesize memoryless strategies, but has relative completeness guarantees, while the other can synthesize general strategies. The runtime complexity of both algorithms is in PSPACE. We implement these algorithms and show that they can solve several non-trivial examples.}, author = {Akshay, S. and Chatterjee, Krishnendu and Meggendorfer, Tobias and Zikelic, Dorde}, booktitle = {International Conference on Computer Aided Verification}, isbn = {9783031377082}, issn = {1611-3349}, location = {Paris, France}, pages = {86--112}, publisher = {Springer Nature}, title = {{MDPs as distribution transformers: Affine invariant synthesis for safety objectives}}, doi = {10.1007/978-3-031-37709-9_5}, volume = {13966}, year = {2023}, } @inproceedings{14410, abstract = {This paper focuses on the implementation details of the baseline methods and a recent lightweight conditional model extrapolation algorithm LIMES [5] for streaming data under class-prior shift. LIMES achieves superior performance over the baseline methods, especially concerning the minimum-across-day accuracy, which is important for the users of the system. In this work, the key measures to facilitate reproducibility and enhance the credibility of the results are described.}, author = {Tomaszewska, Paulina and Lampert, Christoph}, booktitle = {International Workshop on Reproducible Research in Pattern Recognition}, isbn = {9783031407727}, issn = {1611-3349}, location = {Montreal, Canada}, pages = {67--73}, publisher = {Springer Nature}, title = {{On the implementation of baselines and lightweight conditional model extrapolation (LIMES) under class-prior shift}}, doi = {10.1007/978-3-031-40773-4_6}, volume = {14068}, year = {2023}, } @inproceedings{14428, abstract = {Suppose we have two hash functions h1 and h2, but we trust the security of only one of them. To mitigate this worry, we wish to build a hash combiner Ch1,h2 which is secure so long as one of the underlying hash functions is. This question has been well-studied in the regime of collision resistance. In this case, concatenating the two hash function outputs clearly works. Unfortunately, a long series of works (Boneh and Boyen, CRYPTO’06; Pietrzak, Eurocrypt’07; Pietrzak, CRYPTO’08) showed no (noticeably) shorter combiner for collision resistance is possible. In this work, we revisit this pessimistic state of affairs, motivated by the observation that collision-resistance is insufficient for many interesting applications of cryptographic hash functions anyway. We argue the right formulation of the “hash combiner” is to build what we call random oracle (RO) combiners, utilizing stronger assumptions for stronger constructions. Indeed, we circumvent the previous lower bounds for collision resistance by constructing a simple length-preserving RO combiner C˜h1,h2Z1,Z2(M)=h1(M,Z1)⊕h2(M,Z2),where Z1,Z2 are random salts of appropriate length. We show that this extra randomness is necessary for RO combiners, and indeed our construction is somewhat tight with this lower bound. On the negative side, we show that one cannot generically apply the composition theorem to further replace “monolithic” hash functions h1 and h2 by some simpler indifferentiable construction (such as the Merkle-Damgård transformation) from smaller components, such as fixed-length compression functions. Finally, despite this issue, we directly prove collision resistance of the Merkle-Damgård variant of our combiner, where h1 and h2 are replaced by iterative Merkle-Damgård hashes applied to a fixed-length compression function. Thus, we can still subvert the concatenation barrier for collision-resistance combiners while utilizing practically small fixed-length components underneath.}, author = {Dodis, Yevgeniy and Ferguson, Niels and Goldin, Eli and Hall, Peter and Pietrzak, Krzysztof Z}, booktitle = {43rd Annual International Cryptology Conference}, isbn = {9783031385445}, issn = {1611-3349}, location = {Santa Barbara, CA, United States}, pages = {514--546}, publisher = {Springer Nature}, title = {{Random oracle combiners: Breaking the concatenation barrier for collision-resistance}}, doi = {10.1007/978-3-031-38545-2_17}, volume = {14082}, year = {2023}, } @inproceedings{14457, abstract = {Threshold secret sharing allows a dealer to split a secret s into n shares, such that any t shares allow for reconstructing s, but no t-1 shares reveal any information about s. Leakage-resilient secret sharing requires that the secret remains hidden, even when an adversary additionally obtains a limited amount of leakage from every share. Benhamouda et al. (CRYPTO’18) proved that Shamir’s secret sharing scheme is one bit leakage-resilient for reconstruction threshold t≥0.85n and conjectured that the same holds for t = c.n for any constant 0≤c≤1. Nielsen and Simkin (EUROCRYPT’20) showed that this is the best one can hope for by proving that Shamir’s scheme is not secure against one-bit leakage when t0c.n/log(n). In this work, we strengthen the lower bound of Nielsen and Simkin. We consider noisy leakage-resilience, where a random subset of leakages is replaced by uniformly random noise. We prove a lower bound for Shamir’s secret sharing, similar to that of Nielsen and Simkin, which holds even when a constant fraction of leakages is replaced by random noise. To this end, we first prove a lower bound on the share size of any noisy-leakage-resilient sharing scheme. We then use this lower bound to show that there exist universal constants c1, c2, such that for sufficiently large n it holds that Shamir’s secret sharing scheme is not noisy-leakage-resilient for t≤c1.n/log(n), even when a c2 fraction of leakages are replaced by random noise. }, author = {Hoffmann, Charlotte and Simkin, Mark}, booktitle = {8th International Conference on Cryptology and Information Security in Latin America}, isbn = {9783031444685}, issn = {1611-3349}, location = {Quito, Ecuador}, pages = {215--228}, publisher = {Springer Nature}, title = {{Stronger lower bounds for leakage-resilient secret sharing}}, doi = {10.1007/978-3-031-44469-2_11}, volume = {14168}, year = {2023}, } @inproceedings{14454, abstract = {As AI and machine-learned software are used increasingly for making decisions that affect humans, it is imperative that they remain fair and unbiased in their decisions. To complement design-time bias mitigation measures, runtime verification techniques have been introduced recently to monitor the algorithmic fairness of deployed systems. Previous monitoring techniques assume full observability of the states of the (unknown) monitored system. Moreover, they can monitor only fairness properties that are specified as arithmetic expressions over the probabilities of different events. In this work, we extend fairness monitoring to systems modeled as partially observed Markov chains (POMC), and to specifications containing arithmetic expressions over the expected values of numerical functions on event sequences. The only assumptions we make are that the underlying POMC is aperiodic and starts in the stationary distribution, with a bound on its mixing time being known. These assumptions enable us to estimate a given property for the entire distribution of possible executions of the monitored POMC, by observing only a single execution. Our monitors observe a long run of the system and, after each new observation, output updated PAC-estimates of how fair or biased the system is. The monitors are computationally lightweight and, using a prototype implementation, we demonstrate their effectiveness on several real-world examples.}, author = {Henzinger, Thomas A and Kueffner, Konstantin and Mallik, Kaushik}, booktitle = {23rd International Conference on Runtime Verification}, isbn = {9783031442667}, issn = {1611-3349}, location = {Thessaloniki, Greece}, pages = {291--311}, publisher = {Springer Nature}, title = {{Monitoring algorithmic fairness under partial observations}}, doi = {10.1007/978-3-031-44267-4_15}, volume = {14245}, year = {2023}, } @inproceedings{14559, abstract = {We consider the problem of learning control policies in discrete-time stochastic systems which guarantee that the system stabilizes within some specified stabilization region with probability 1. Our approach is based on the novel notion of stabilizing ranking supermartingales (sRSMs) that we introduce in this work. Our sRSMs overcome the limitation of methods proposed in previous works whose applicability is restricted to systems in which the stabilizing region cannot be left once entered under any control policy. We present a learning procedure that learns a control policy together with an sRSM that formally certifies probability 1 stability, both learned as neural networks. We show that this procedure can also be adapted to formally verifying that, under a given Lipschitz continuous control policy, the stochastic system stabilizes within some stabilizing region with probability 1. Our experimental evaluation shows that our learning procedure can successfully learn provably stabilizing policies in practice.}, author = {Ansaripour, Matin and Chatterjee, Krishnendu and Henzinger, Thomas A and Lechner, Mathias and Zikelic, Dorde}, booktitle = {21st International Symposium on Automated Technology for Verification and Analysis}, isbn = {9783031453281}, issn = {1611-3349}, location = {Singapore, Singapore}, pages = {357--379}, publisher = {Springer Nature}, title = {{Learning provably stabilizing neural controllers for discrete-time stochastic systems}}, doi = {10.1007/978-3-031-45329-8_17}, volume = {14215}, year = {2023}, } @inproceedings{13238, abstract = {We consider a natural problem dealing with weighted packet selection across a rechargeable link, which e.g., finds applications in cryptocurrency networks. The capacity of a link (u, v) is determined by how much nodes u and v allocate for this link. Specifically, the input is a finite ordered sequence of packets that arrive in both directions along a link. Given (u, v) and a packet of weight x going from u to v, node u can either accept or reject the packet. If u accepts the packet, the capacity on link (u, v) decreases by x. Correspondingly, v’s capacity on (u, v) increases by x. If a node rejects the packet, this will entail a cost affinely linear in the weight of the packet. A link is “rechargeable” in the sense that the total capacity of the link has to remain constant, but the allocation of capacity at the ends of the link can depend arbitrarily on the nodes’ decisions. The goal is to minimise the sum of the capacity injected into the link and the cost of rejecting packets. We show that the problem is NP-hard, but can be approximated efficiently with a ratio of (1+ε)⋅(1+3–√) for some arbitrary ε>0. .}, author = {Schmid, Stefan and Svoboda, Jakub and Yeo, Michelle X}, booktitle = {SIROCCO 2023: Structural Information and Communication Complexity }, isbn = {9783031327322}, issn = {1611-3349}, location = {Alcala de Henares, Spain}, pages = {576--594}, publisher = {Springer Nature}, title = {{Weighted packet selection for rechargeable links in cryptocurrency networks: Complexity and approximation}}, doi = {10.1007/978-3-031-32733-9_26}, volume = {13892}, year = {2023}, } @inproceedings{14693, abstract = {Lucas sequences are constant-recursive integer sequences with a long history of applications in cryptography, both in the design of cryptographic schemes and cryptanalysis. In this work, we study the sequential hardness of computing Lucas sequences over an RSA modulus. First, we show that modular Lucas sequences are at least as sequentially hard as the classical delay function given by iterated modular squaring proposed by Rivest, Shamir, and Wagner (MIT Tech. Rep. 1996) in the context of time-lock puzzles. Moreover, there is no obvious reduction in the other direction, which suggests that the assumption of sequential hardness of modular Lucas sequences is strictly weaker than that of iterated modular squaring. In other words, the sequential hardness of modular Lucas sequences might hold even in the case of an algorithmic improvement violating the sequential hardness of iterated modular squaring. Second, we demonstrate the feasibility of constructing practically-efficient verifiable delay functions based on the sequential hardness of modular Lucas sequences. Our construction builds on the work of Pietrzak (ITCS 2019) by leveraging the intrinsic connection between the problem of computing modular Lucas sequences and exponentiation in an appropriate extension field.}, author = {Hoffmann, Charlotte and Hubáček, Pavel and Kamath, Chethan and Krňák, Tomáš}, booktitle = {21st International Conference on Theory of Cryptography}, isbn = {9783031486234}, issn = {1611-3349}, location = {Taipei, Taiwan}, pages = {336--362}, publisher = {Springer Nature}, title = {{(Verifiable) delay functions from Lucas sequences}}, doi = {10.1007/978-3-031-48624-1_13}, volume = {14372}, year = {2023}, } @inproceedings{14691, abstract = {Continuous Group-Key Agreement (CGKA) allows a group of users to maintain a shared key. It is the fundamental cryptographic primitive underlying group messaging schemes and related protocols, most notably TreeKEM, the underlying key agreement protocol of the Messaging Layer Security (MLS) protocol, a standard for group messaging by the IETF. CKGA works in an asynchronous setting where parties only occasionally must come online, and their messages are relayed by an untrusted server. The most expensive operation provided by CKGA is that which allows for a user to refresh their key material in order to achieve forward secrecy (old messages are secure when a user is compromised) and post-compromise security (users can heal from compromise). One caveat of early CGKA protocols is that these update operations had to be performed sequentially, with any user wanting to update their key material having had to receive and process all previous updates. Late versions of TreeKEM do allow for concurrent updates at the cost of a communication overhead per update message that is linear in the number of updating parties. This was shown to be indeed necessary when achieving PCS in just two rounds of communication by [Bienstock et al. TCC’20]. The recently proposed protocol CoCoA [Alwen et al. Eurocrypt’22], however, shows that this overhead can be reduced if PCS requirements are relaxed, and only a logarithmic number of rounds is required. The natural question, thus, is whether CoCoA is optimal in this setting. In this work we answer this question, providing a lower bound on the cost (concretely, the amount of data to be uploaded to the server) for CGKA protocols that heal in an arbitrary k number of rounds, that shows that CoCoA is very close to optimal. Additionally, we extend CoCoA to heal in an arbitrary number of rounds, and propose a modification of it, with a reduced communication cost for certain k. We prove our bound in a combinatorial setting where the state of the protocol progresses in rounds, and the state of the protocol in each round is captured by a set system, each set specifying a set of users who share a secret key. We show this combinatorial model is equivalent to a symbolic model capturing building blocks including PRFs and public-key encryption, related to the one used by Bienstock et al. Our lower bound is of order k•n1+1/(k-1)/log(k), where 2≤k≤log(n) is the number of updates per user the protocol requires to heal. This generalizes the n2 bound for k=2 from Bienstock et al.. This bound almost matches the k⋅n1+2/(k-1) or k2⋅n1+1/(k-1) efficiency we get for the variants of the CoCoA protocol also introduced in this paper.}, author = {Auerbach, Benedikt and Cueto Noval, Miguel and Pascual Perez, Guillermo and Pietrzak, Krzysztof Z}, booktitle = {21st International Conference on Theory of Cryptography}, isbn = {9783031486203}, issn = {1611-3349}, location = {Taipei, Taiwan}, pages = {271--300}, publisher = {Springer Nature}, title = {{On the cost of post-compromise security in concurrent Continuous Group-Key Agreement}}, doi = {10.1007/978-3-031-48621-0_10}, volume = {14371}, year = {2023}, } @inproceedings{14692, abstract = {The generic-group model (GGM) aims to capture algorithms working over groups of prime order that only rely on the group operation, but do not exploit any additional structure given by the concrete implementation of the group. In it, it is possible to prove information-theoretic lower bounds on the hardness of problems like the discrete logarithm (DL) or computational Diffie-Hellman (CDH). Thus, since its introduction, it has served as a valuable tool to assess the concrete security provided by cryptographic schemes based on such problems. A work on the related algebraic-group model (AGM) introduced a method, used by many subsequent works, to adapt GGM lower bounds for one problem to another, by means of conceptually simple reductions. In this work, we propose an alternative approach to extend GGM bounds from one problem to another. Following an idea by Yun [EC15], we show that, in the GGM, the security of a large class of problems can be reduced to that of geometric search-problems. By reducing the security of the resulting geometric-search problems to variants of the search-by-hypersurface problem, for which information theoretic lower bounds exist, we give alternative proofs of several results that used the AGM approach. The main advantage of our approach is that our reduction from geometric search-problems works, as well, for the GGM with preprocessing (more precisely the bit-fixing GGM introduced by Coretti, Dodis and Guo [Crypto18]). As a consequence, this opens up the possibility of transferring preprocessing GGM bounds from one problem to another, also by means of simple reductions. Concretely, we prove novel preprocessing bounds on the hardness of the d-strong discrete logarithm, the d-strong Diffie-Hellman inversion, and multi-instance CDH problems, as well as a large class of Uber assumptions. Additionally, our approach applies to Shoup’s GGM without additional restrictions on the query behavior of the adversary, while the recent works of Zhang, Zhou, and Katz [AC22] and Zhandry [Crypto22] highlight that this is not the case for the AGM approach.}, author = {Auerbach, Benedikt and Hoffmann, Charlotte and Pascual Perez, Guillermo}, booktitle = {21st International Conference on Theory of Cryptography}, isbn = {9783031486203}, issn = {1611-3349}, pages = {301--330}, publisher = {Springer Nature}, title = {{Generic-group lower bounds via reductions between geometric-search problems: With and without preprocessing}}, doi = {10.1007/978-3-031-48621-0_11}, volume = {14371}, year = {2023}, } @inproceedings{14736, abstract = {Payment channel networks (PCNs) are a promising technology to improve the scalability of cryptocurrencies. PCNs, however, face the challenge that the frequent usage of certain routes may deplete channels in one direction, and hence prevent further transactions. In order to reap the full potential of PCNs, recharging and rebalancing mechanisms are required to provision channels, as well as an admission control logic to decide which transactions to reject in case capacity is insufficient. This paper presents a formal model of this optimisation problem. In particular, we consider an online algorithms perspective, where transactions arrive over time in an unpredictable manner. Our main contributions are competitive online algorithms which come with provable guarantees over time. We empirically evaluate our algorithms on randomly generated transactions to compare the average performance of our algorithms to our theoretical bounds. We also show how this model and approach differs from related problems in classic communication networks.}, author = {Bastankhah, Mahsa and Chatterjee, Krishnendu and Maddah-Ali, Mohammad Ali and Schmid, Stefan and Svoboda, Jakub and Yeo, Michelle X}, booktitle = {27th International Conference on Financial Cryptography and Data Security}, isbn = {9783031477539}, issn = {1611-3349}, location = {Bol, Brac, Croatia}, pages = {309--325}, publisher = {Springer Nature}, title = {{R2: Boosting liquidity in payment channel networks with online admission control}}, doi = {10.1007/978-3-031-47754-6_18}, volume = {13950}, year = {2023}, } @inproceedings{14744, abstract = {Sharding distributed ledgers is a promising on-chain solution for scaling blockchains but lacks formal grounds, nurturing skepticism on whether such complex systems can scale blockchains securely. We fill this gap by introducing the first formal framework as well as a roadmap to robust sharding. In particular, we first define the properties sharded distributed ledgers should fulfill. We build upon and extend the Bitcoin backbone protocol by defining consistency and scalability. Consistency encompasses the need for atomic execution of cross-shard transactions to preserve safety, whereas scalability encapsulates the speedup a sharded system can gain in comparison to a non-sharded system. Using our model, we explore the limitations of sharding. We show that a sharded ledger with n participants cannot scale under a fully adaptive adversary, but it can scale up to m shards where n=c'm log m, under an epoch-adaptive adversary; the constant c' encompasses the trade-off between security and scalability. This is possible only if the sharded ledgers create succinct proofs of the valid state updates at every epoch. We leverage our results to identify the sufficient components for robust sharding, which we incorporate in a protocol abstraction termed Divide & Scale. To demonstrate the power of our framework, we analyze the most prominent sharded blockchains (Elastico, Monoxide, OmniLedger, RapidChain) and pinpoint where they fail to meet the desired properties.}, author = {Avarikioti, Zeta and Desjardins, Antoine and Kokoris Kogias, Eleftherios and Wattenhofer, Roger}, booktitle = {30th International Colloquium on Structural Information and Communication Complexity}, isbn = {9783031327322}, issn = {1611-3349}, location = {Alcalá de Henares, Spain}, pages = {199--245}, publisher = {Springer Nature}, title = {{Divide & Scale: Formalization and roadmap to robust sharding}}, doi = {10.1007/978-3-031-32733-9_10}, volume = {13892}, year = {2023}, } @inproceedings{14456, abstract = {In this paper, we present novel algorithms that efficiently compute a shortest reconfiguration sequence between two given dominating sets in trees and interval graphs under the TOKEN SLIDING model. In this problem, a graph is provided along with its two dominating sets, which can be imagined as tokens placed on vertices. The objective is to find a shortest sequence of dominating sets that transforms one set into the other, with each set in the sequence resulting from sliding a single token in the previous set. While identifying any sequence has been well studied, our work presents the first polynomial algorithms for this optimization variant in the context of dominating sets.}, author = {Křišťan, Jan Matyáš and Svoboda, Jakub}, booktitle = {24th International Symposium on Fundamentals of Computation Theory}, isbn = {9783031435867}, issn = {1611-3349}, location = {Trier, Germany}, pages = {333--347}, publisher = {Springer Nature}, title = {{Shortest dominating set reconfiguration under token sliding}}, doi = {10.1007/978-3-031-43587-4_24}, volume = {14292}, year = {2023}, } @inproceedings{14829, abstract = {This paper explores a modular design architecture aimed at helping blockchains (and other SMR implementation) to scale to a very large number of processes. This comes in contrast to existing monolithic architectures that interleave transaction dissemination, ordering, and execution in a single functionality. To achieve this we first split the monolith to multiple layers which can use existing distributed computing primitives. The exact specifications of the data dissemination part are formally defined by the Proof of Availability & Retrieval (PoA &R) abstraction. Solutions to the PoA &R problem contain two related sub-protocols: one that “pushes” information into the network and another that “pulls” this information. Regarding the latter, there is a dearth of research literature which is rectified in this paper. We present a family of pulling sub-protocols and rigorously analyze them. Extensive simulations support the theoretical claims of efficiency and robustness in case of a very large number of players. Finally, actual implementation and deployment on a small number of machines (roughly the size of several industrial systems) demonstrates the viability of the architecture’s paradigm.}, author = {Cohen, Shir and Goren, Guy and Kokoris Kogias, Eleftherios and Sonnino, Alberto and Spiegelman, Alexander}, booktitle = {27th International Conference on Financial Cryptography and Data Security}, isbn = {9783031477508}, issn = {1611-3349}, location = {Bol, Brac, Croatia}, pages = {36--53}, publisher = {Springer Nature}, title = {{Proof of availability and retrieval in a modular blockchain architecture}}, doi = {10.1007/978-3-031-47751-5_3}, volume = {13951}, year = {2023}, } @inproceedings{14411, abstract = {Partially specified Boolean networks (PSBNs) represent a promising framework for the qualitative modelling of biological systems in which the logic of interactions is not completely known. Phenotype control aims to stabilise the network in states exhibiting specific traits. In this paper, we define the phenotype control problem in the context of asynchronous PSBNs and propose a novel semi-symbolic algorithm for solving this problem with permanent variable perturbations.}, author = {Beneš, Nikola and Brim, Luboš and Pastva, Samuel and Šafránek, David and Šmijáková, Eva}, booktitle = {21st International Conference on Computational Methods in Systems Biology}, isbn = {9783031426964}, issn = {1611-3349}, location = {Luxembourg City, Luxembourg}, pages = {18--35}, publisher = {Springer Nature}, title = {{Phenotype control of partially specified boolean networks}}, doi = {10.1007/978-3-031-42697-1_2}, volume = {14137}, year = {2023}, } @inproceedings{14758, abstract = {We present a flexible and efficient toolchain to symbolically solve (standard) Rabin games, fair-adversarial Rabin games, and 2 1/2 license type-player Rabin games. To our best knowledge, our tools are the first ones to be able to solve these problems. Furthermore, using these flexible game solvers as a back-end, we implemented a tool for computing correct-by-construction controllers for stochastic dynamical systems under LTL specifications. Our implementations use the recent theoretical result that all of these games can be solved using the same symbolic fixpoint algorithm but utilizing different, domain specific calculations of the involved predecessor operators. The main feature of our toolchain is the utilization of two programming abstractions: one to separate the symbolic fixpoint computations from the predecessor calculations, and another one to allow the integration of different BDD libraries as back-ends. In particular, we employ a multi-threaded execution of the fixpoint algorithm by using the multi-threaded BDD library Sylvan, which leads to enormous computational savings.}, author = {Majumdar, Rupak and Mallik, Kaushik and Rychlicki, Mateusz and Schmuck, Anne-Kathrin and Soudjani, Sadegh}, booktitle = {35th International Conference on Computer Aided Verification}, isbn = {9783031377082}, issn = {1611-3349}, location = {Paris, France}, pages = {3--15}, publisher = {Springer Nature}, title = {{A flexible toolchain for symbolic rabin games under fair and stochastic uncertainties}}, doi = {10.1007/978-3-031-37709-9_1}, volume = {13966}, year = {2023}, } @inproceedings{13139, abstract = {A classical problem for Markov chains is determining their stationary (or steady-state) distribution. This problem has an equally classical solution based on eigenvectors and linear equation systems. However, this approach does not scale to large instances, and iterative solutions are desirable. It turns out that a naive approach, as used by current model checkers, may yield completely wrong results. We present a new approach, which utilizes recent advances in partial exploration and mean payoff computation to obtain a correct, converging approximation.}, author = {Meggendorfer, Tobias}, booktitle = {TACAS 2023: Tools and Algorithms for the Construction and Analysis of Systems}, isbn = {9783031308222}, issn = {1611-3349}, location = {Paris, France}, pages = {489--507}, publisher = {Springer Nature}, title = {{Correct approximation of stationary distributions}}, doi = {10.1007/978-3-031-30823-9_25}, volume = {13993}, year = {2023}, } @inproceedings{14260, abstract = {This paper presents Lincheck, a new practical and user-friendly framework for testing concurrent algorithms on the Java Virtual Machine (JVM). Lincheck provides a simple and declarative way to write concurrent tests: instead of describing how to perform the test, users specify what to test by declaring all the operations to examine; the framework automatically handles the rest. As a result, tests written with Lincheck are concise and easy to understand. The framework automatically generates a set of concurrent scenarios, examines them using stress-testing or bounded model checking, and verifies that the results of each invocation are correct. Notably, if an error is detected via model checking, Lincheck provides an easy-to-follow trace to reproduce it, significantly simplifying the bug investigation. To the best of our knowledge, Lincheck is the first production-ready tool on the JVM that offers such a simple way of writing concurrent tests, without requiring special skills or expertise. We successfully integrated Lincheck in the development process of several large projects, such as Kotlin Coroutines, and identified new bugs in popular concurrency libraries, such as a race in Java’s standard ConcurrentLinkedDeque and a liveliness bug in Java’s AbstractQueuedSynchronizer framework, which is used in most of the synchronization primitives. We believe that Lincheck can significantly improve the quality and productivity of concurrent algorithms research and development and become the state-of-the-art tool for checking their correctness.}, author = {Koval, Nikita and Fedorov, Alexander and Sokolova, Maria and Tsitelov, Dmitry and Alistarh, Dan-Adrian}, booktitle = {35th International Conference on Computer Aided Verification }, isbn = {9783031377051}, issn = {1611-3349}, location = {Paris, France}, pages = {156--169}, publisher = {Springer Nature}, title = {{Lincheck: A practical framework for testing concurrent data structures on JVM}}, doi = {10.1007/978-3-031-37706-8_8}, volume = {13964}, year = {2023}, } @inproceedings{13236, abstract = {We present an auction algorithm using multiplicative instead of constant weight updates to compute a (1−ε)-approximate maximum weight matching (MWM) in a bipartite graph with n vertices and m edges in time O(mε−1log(ε−1)), matching the running time of the linear-time approximation algorithm of Duan and Pettie [JACM ’14]. Our algorithm is very simple and it can be extended to give a dynamic data structure that maintains a (1−ε)-approximate maximum weight matching under (1) one-sided vertex deletions (with incident edges) and (2) one-sided vertex insertions (with incident edges sorted by weight) to the other side. The total time time used is O(mε−1log(ε−1)), where m is the sum of the number of initially existing and inserted edges.}, author = {Zheng, Da Wei and Henzinger, Monika H}, booktitle = {International Conference on Integer Programming and Combinatorial Optimization}, isbn = {9783031327254}, issn = {1611-3349}, location = {Madison, WI, United States}, pages = {453--465}, publisher = {Springer Nature}, title = {{Multiplicative auction algorithm for approximate maximum weight bipartite matching}}, doi = {10.1007/978-3-031-32726-1_32}, volume = {13904}, year = {2023}, } @book{11429, abstract = {This book constitutes the refereed proceedings of the 18th International Symposium on Web and Wireless Geographical Information Systems, W2GIS 2022, held in Konstanz, Germany, in April 2022. The 7 full papers presented together with 6 short papers in the volume were carefully reviewed and selected from 16 submissions. The papers cover topics that range from mobile GIS and Location-Based Services to Spatial Information Retrieval and Wireless Sensor Networks.}, editor = {Karimipour, Farid and Storandt, Sabine}, isbn = {9783031062445}, issn = {1611-3349}, pages = {153}, publisher = {Springer Nature}, title = {{Web and Wireless Geographical Information Systems}}, doi = {10.1007/978-3-031-06245-2}, volume = {13238}, year = {2022}, } @inproceedings{12171, abstract = {We propose an algorithmic approach for synthesizing linear hybrid automata from time-series data. Unlike existing approaches, our approach provides a whole family of models with the same discrete structure but different dynamics. Each model in the family is guaranteed to capture the input data up to a precision error ε, in the following sense: For each time series, the model contains an execution that is ε-close to the data points. Our construction allows to effectively choose a model from this family with minimal precision error ε. We demonstrate the algorithm’s efficiency and its ability to find precise models in two case studies.}, author = {Garcia Soto, Miriam and Henzinger, Thomas A and Schilling, Christian}, booktitle = {20th International Symposium on Automated Technology for Verification and Analysis}, isbn = {9783031199912}, issn = {1611-3349}, location = {Virtual}, pages = {337--353}, publisher = {Springer Nature}, title = {{Synthesis of parametric hybrid automata from time series}}, doi = {10.1007/978-3-031-19992-9_22}, volume = {13505}, year = {2022}, } @inproceedings{10891, abstract = {We present a formal framework for the online black-box monitoring of software using monitors with quantitative verdict functions. Quantitative verdict functions have several advantages. First, quantitative monitors can be approximate, i.e., the value of the verdict function does not need to correspond exactly to the value of the property under observation. Second, quantitative monitors can be quantified universally, i.e., for every possible observed behavior, the monitor tries to make the best effort to estimate the value of the property under observation. Third, quantitative monitors can watch boolean as well as quantitative properties, such as average response time. Fourth, quantitative monitors can use non-finite-state resources, such as counters. As a consequence, quantitative monitors can be compared according to how many resources they use (e.g., the number of counters) and how precisely they approximate the property under observation. This allows for a rich spectrum of cost-precision trade-offs in monitoring software.}, author = {Henzinger, Thomas A}, booktitle = {Software Verification}, isbn = {9783030955601}, issn = {1611-3349}, location = {New Haven, CT, United States}, pages = {3--6}, publisher = {Springer Nature}, title = {{Quantitative monitoring of software}}, doi = {10.1007/978-3-030-95561-8_1}, volume = {13124}, year = {2022}, } @inproceedings{11355, abstract = {Contract-based design is a promising methodology for taming the complexity of developing sophisticated systems. A formal contract distinguishes between assumptions, which are constraints that the designer of a component puts on the environments in which the component can be used safely, and guarantees, which are promises that the designer asks from the team that implements the component. A theory of formal contracts can be formalized as an interface theory, which supports the composition and refinement of both assumptions and guarantees. Although there is a rich landscape of contract-based design methods that address functional and extra-functional properties, we present the first interface theory that is designed for ensuring system-wide security properties. Our framework provides a refinement relation and a composition operation that support both incremental design and independent implementability. We develop our theory for both stateless and stateful interfaces. We illustrate the applicability of our framework with an example inspired from the automotive domain.}, author = {Bartocci, Ezio and Ferrere, Thomas and Henzinger, Thomas A and Nickovic, Dejan and Da Costa, Ana Oliveira}, booktitle = {Fundamental Approaches to Software Engineering}, isbn = {9783030994280}, issn = {1611-3349}, location = {Munich, Germany}, pages = {3--22}, publisher = {Springer Nature}, title = {{Information-flow interfaces}}, doi = {10.1007/978-3-030-99429-7_1}, volume = {13241}, year = {2022}, } @inproceedings{11476, abstract = {Messaging platforms like Signal are widely deployed and provide strong security in an asynchronous setting. It is a challenging problem to construct a protocol with similar security guarantees that can efficiently scale to large groups. A major bottleneck are the frequent key rotations users need to perform to achieve post compromise forward security. In current proposals – most notably in TreeKEM (which is part of the IETF’s Messaging Layer Security (MLS) protocol draft) – for users in a group of size n to rotate their keys, they must each craft a message of size log(n) to be broadcast to the group using an (untrusted) delivery server. In larger groups, having users sequentially rotate their keys requires too much bandwidth (or takes too long), so variants allowing any T≤n users to simultaneously rotate their keys in just 2 communication rounds have been suggested (e.g. “Propose and Commit” by MLS). Unfortunately, 2-round concurrent updates are either damaging or expensive (or both); i.e. they either result in future operations being more costly (e.g. via “blanking” or “tainting”) or are costly themselves requiring Ω(T) communication for each user [Bienstock et al., TCC’20]. In this paper we propose CoCoA; a new scheme that allows for T concurrent updates that are neither damaging nor costly. That is, they add no cost to future operations yet they only require Ω(log2(n)) communication per user. To circumvent the [Bienstock et al.] lower bound, CoCoA increases the number of rounds needed to complete all updates from 2 up to (at most) log(n); though typically fewer rounds are needed. The key insight of our protocol is the following: in the (non-concurrent version of) TreeKEM, a delivery server which gets T concurrent update requests will approve one and reject the remaining T−1. In contrast, our server attempts to apply all of them. If more than one user requests to rotate the same key during a round, the server arbitrarily picks a winner. Surprisingly, we prove that regardless of how the server chooses the winners, all previously compromised users will recover after at most log(n) such update rounds. To keep the communication complexity low, CoCoA is a server-aided CGKA. That is, the delivery server no longer blindly forwards packets, but instead actively computes individualized packets tailored to each user. As the server is untrusted, this change requires us to develop new mechanisms ensuring robustness of the protocol.}, author = {Alwen, Joël and Auerbach, Benedikt and Cueto Noval, Miguel and Klein, Karen and Pascual Perez, Guillermo and Pietrzak, Krzysztof Z and Walter, Michael}, booktitle = {Advances in Cryptology – EUROCRYPT 2022}, isbn = {9783031070846}, issn = {1611-3349}, location = {Trondheim, Norway}, pages = {815–844}, publisher = {Springer Nature}, title = {{CoCoA: Concurrent continuous group key agreement}}, doi = {10.1007/978-3-031-07085-3_28}, volume = {13276}, year = {2022}, } @inproceedings{11707, abstract = {In this work we introduce the graph-theoretic notion of mendability: for each locally checkable graph problem we can define its mending radius, which captures the idea of how far one needs to modify a partial solution in order to “patch a hole.” We explore how mendability is connected to the existence of efficient algorithms, especially in distributed, parallel, and fault-tolerant settings. It is easy to see that O(1)-mendable problems are also solvable in O(log∗n) rounds in the LOCAL model of distributed computing. One of the surprises is that in paths and cycles, a converse also holds in the following sense: if a problem Π can be solved in O(log∗n), there is always a restriction Π′⊆Π that is still efficiently solvable but that is also O(1)-mendable. We also explore the structure of the landscape of mendability. For example, we show that in trees, the mending radius of any locally checkable problem is O(1), Θ(logn), or Θ(n), while in general graphs the structure is much more diverse.}, author = {Balliu, Alkida and Hirvonen, Juho and Melnyk, Darya and Olivetti, Dennis and Rybicki, Joel and Suomela, Jukka}, booktitle = {International Colloquium on Structural Information and Communication Complexity}, editor = {Parter, Merav}, isbn = {9783031099922}, issn = {1611-3349}, location = {Paderborn, Germany}, pages = {1--20}, publisher = {Springer Nature}, title = {{Local mending}}, doi = {10.1007/978-3-031-09993-9_1}, volume = {13298}, year = {2022}, } @inproceedings{12516, abstract = {The homogeneous continuous LWE (hCLWE) problem is to distinguish samples of a specific high-dimensional Gaussian mixture from standard normal samples. It was shown to be at least as hard as Learning with Errors, but no reduction in the other direction is currently known. We present four new public-key encryption schemes based on the hardness of hCLWE, with varying tradeoffs between decryption and security errors, and different discretization techniques. Our schemes yield a polynomial-time algorithm for solving hCLWE using a Statistical Zero-Knowledge oracle.}, author = {Bogdanov, Andrej and Cueto Noval, Miguel and Hoffmann, Charlotte and Rosen, Alon}, booktitle = {Theory of Cryptography}, isbn = {9783031223648}, issn = {1611-3349}, location = {Chicago, IL, United States}, pages = {565--592}, publisher = {Springer Nature}, title = {{Public-Key Encryption from Homogeneous CLWE}}, doi = {10.1007/978-3-031-22365-5_20}, volume = {13748}, year = {2022}, } @inproceedings{12167, abstract = {Payment channels effectively move the transaction load off-chain thereby successfully addressing the inherent scalability problem most cryptocurrencies face. A major drawback of payment channels is the need to “top up” funds on-chain when a channel is depleted. Rebalancing was proposed to alleviate this issue, where parties with depleting channels move their funds along a cycle to replenish their channels off-chain. Protocols for rebalancing so far either introduce local solutions or compromise privacy. In this work, we present an opt-in rebalancing protocol that is both private and globally optimal, meaning our protocol maximizes the total amount of rebalanced funds. We study rebalancing from the framework of linear programming. To obtain full privacy guarantees, we leverage multi-party computation in solving the linear program, which is executed by selected participants to maintain efficiency. Finally, we efficiently decompose the rebalancing solution into incentive-compatible cycles which conserve user balances when executed atomically.}, author = {Avarikioti, Georgia and Pietrzak, Krzysztof Z and Salem, Iosif and Schmid, Stefan and Tiwari, Samarth and Yeo, Michelle X}, booktitle = {Financial Cryptography and Data Security}, isbn = {9783031182822}, issn = {1611-3349}, location = {Grenada}, pages = {358--373}, publisher = {Springer Nature}, title = {{Hide & Seek: Privacy-preserving rebalancing on payment channel networks}}, doi = {10.1007/978-3-031-18283-9_17}, volume = {13411}, year = {2022}, } @inproceedings{12302, abstract = {We propose a novel algorithm to decide the language inclusion between (nondeterministic) Büchi automata, a PSPACE-complete problem. Our approach, like others before, leverage a notion of quasiorder to prune the search for a counterexample by discarding candidates which are subsumed by others for the quasiorder. Discarded candidates are guaranteed to not compromise the completeness of the algorithm. The novelty of our work lies in the quasiorder used to discard candidates. We introduce FORQs (family of right quasiorders) that we obtain by adapting the notion of family of right congruences put forward by Maler and Staiger in 1993. We define a FORQ-based inclusion algorithm which we prove correct and instantiate it for a specific FORQ, called the structural FORQ, induced by the Büchi automaton to the right of the inclusion sign. The resulting implementation, called FORKLIFT, scales up better than the state-of-the-art on a variety of benchmarks including benchmarks from program verification and theorem proving for word combinatorics. Artifact: https://doi.org/10.5281/zenodo.6552870}, author = {Doveri, Kyveli and Ganty, Pierre and Mazzocchi, Nicolas Adrien}, booktitle = {Computer Aided Verification}, isbn = {9783031131875}, issn = {1611-3349}, location = {Haifa, Israel}, pages = {109--129}, publisher = {Springer Nature}, title = {{FORQ-based language inclusion formal testing}}, doi = {10.1007/978-3-031-13188-2_6}, volume = {13372}, year = {2022}, } @inproceedings{12176, abstract = {A proof of exponentiation (PoE) in a group G of unknown order allows a prover to convince a verifier that a tuple (x,q,T,y)∈G×N×N×G satisfies xqT=y. This primitive has recently found exciting applications in the constructions of verifiable delay functions and succinct arguments of knowledge. The most practical PoEs only achieve soundness either under computational assumptions, i.e., they are arguments (Wesolowski, Journal of Cryptology 2020), or in groups that come with the promise of not having any small subgroups (Pietrzak, ITCS 2019). The only statistically-sound PoE in general groups of unknown order is due to Block et al. (CRYPTO 2021), and can be seen as an elaborate parallel repetition of Pietrzak’s PoE: to achieve λ bits of security, say λ=80, the number of repetitions required (and thus the blow-up in communication) is as large as λ. In this work, we propose a statistically-sound PoE for the case where the exponent q is the product of all primes up to some bound B. We show that, in this case, it suffices to run only λ/log(B) parallel instances of Pietrzak’s PoE, which reduces the concrete proof-size compared to Block et al. by an order of magnitude. Furthermore, we show that in the known applications where PoEs are used as a building block such structured exponents are viable. Finally, we also discuss batching of our PoE, showing that many proofs (for the same G and q but different x and T) can be batched by adding only a single element to the proof per additional statement.}, author = {Hoffmann, Charlotte and Hubáček, Pavel and Kamath, Chethan and Klein, Karen and Pietrzak, Krzysztof Z}, booktitle = {Advances in Cryptology – CRYPTO 2022}, isbn = {9783031159787}, issn = {1611-3349}, location = {Santa Barbara, CA, United States}, pages = {370--399}, publisher = {Springer Nature}, title = {{Practical statistically-sound proofs of exponentiation in any group}}, doi = {10.1007/978-3-031-15979-4_13}, volume = {13508}, year = {2022}, } @inproceedings{12298, abstract = {Existing committee-based Byzantine state machine replication (SMR) protocols, typically deployed in production blockchains, face a clear trade-off: (1) they either achieve linear communication cost in the steady state, but sacrifice liveness during periods of asynchrony, or (2) they are robust (progress with probability one) but pay quadratic communication cost. We believe this trade-off is unwarranted since existing linear protocols still have asymptotic quadratic cost in the worst case. We design Ditto, a Byzantine SMR protocol that enjoys the best of both worlds: optimal communication on and off the steady state (linear and quadratic, respectively) and progress guarantee under asynchrony and DDoS attacks. We achieve this by replacing the view-synchronization of partially synchronous protocols with an asynchronous fallback mechanism at no extra asymptotic cost. Specifically, we start from HotStuff, a state-of-the-art linear protocol, and gradually build Ditto. As a separate contribution and an intermediate step, we design a 2-chain version of HotStuff, Jolteon, which leverages a quadratic view-change mechanism to reduce the latency of the standard 3-chain HotStuff. We implement and experimentally evaluate all our systems to prove that breaking the robustness-efficiency trade-off is in the realm of practicality.}, author = {Gelashvili, Rati and Kokoris Kogias, Eleftherios and Sonnino, Alberto and Spiegelman, Alexander and Xiang, Zhuolun}, booktitle = {Financial Cryptography and Data Security}, isbn = {9783031182822}, issn = {1611-3349}, location = {Radisson Grenada Beach Resort, Grenada}, pages = {296--315}, publisher = {Springer Nature}, title = {{Jolteon and ditto: Network-adaptive efficient consensus with asynchronous fallback}}, doi = {10.1007/978-3-031-18283-9_14}, volume = {13411}, year = {2022}, } @inproceedings{12168, abstract = {Advances in blockchains have influenced the State-Machine-Replication (SMR) world and many state-of-the-art blockchain-SMR solutions are based on two pillars: Chaining and Leader-rotation. A predetermined round-robin mechanism used for Leader-rotation, however, has an undesirable behavior: crashed parties become designated leaders infinitely often, slowing down overall system performance. In this paper, we provide a new Leader-Aware SMR framework that, among other desirable properties, formalizes a Leader-utilization requirement that bounds the number of rounds whose leaders are faulty in crash-only executions. We introduce Carousel, a novel, reputation-based Leader-rotation solution to achieve Leader-Aware SMR. The challenge in adaptive Leader-rotation is that it cannot rely on consensus to determine a leader, since consensus itself needs a leader. Carousel uses the available on-chain information to determine a leader locally and achieves Liveness despite this difficulty. A HotStuff implementation fitted with Carousel demonstrates drastic performance improvements: it increases throughput over 2x in faultless settings and provided a 20x throughput increase and 5x latency reduction in the presence of faults.}, author = {Cohen, Shir and Gelashvili, Rati and Kokoris Kogias, Eleftherios and Li, Zekun and Malkhi, Dahlia and Sonnino, Alberto and Spiegelman, Alexander}, booktitle = {International Conference on Financial Cryptography and Data Security}, isbn = {9783031182822}, issn = {1611-3349}, location = {Grenada}, pages = {279--295}, publisher = {Springer Nature}, title = {{Be aware of your leaders}}, doi = {10.1007/978-3-031-18283-9_13}, volume = {13411}, year = {2022}, } @inproceedings{12170, abstract = {We present PET, a specialized and highly optimized framework for partial exploration on probabilistic systems. Over the last decade, several significant advances in the analysis of Markov decision processes employed partial exploration. In a nutshell, this idea allows to focus computation on specific parts of the system, guided by heuristics, while maintaining correctness. In particular, only relevant parts of the system are constructed on demand, which in turn potentially allows to omit constructing large parts of the system. Depending on the model, this leads to dramatic speed-ups, in extreme cases even up to an arbitrary factor. PET unifies several previous implementations and provides a flexible framework to easily implement partial exploration for many further problems. Our experimental evaluation shows significant improvements compared to the previous implementations while vastly reducing the overhead required to add support for additional properties.}, author = {Meggendorfer, Tobias}, booktitle = {20th International Symposium on Automated Technology for Verification and Analysis}, isbn = {9783031199912}, issn = {1611-3349}, location = {Virtual}, pages = {320--326}, publisher = {Springer Nature}, title = {{PET – A partial exploration tool for probabilistic verification}}, doi = {10.1007/978-3-031-19992-9_20}, volume = {13505}, year = {2022}, } @inproceedings{12175, abstract = {An automaton is history-deterministic (HD) if one can safely resolve its non-deterministic choices on the fly. In a recent paper, Henzinger, Lehtinen and Totzke studied this in the context of Timed Automata [9], where it was conjectured that the class of timed ω-languages recognised by HD-timed automata strictly extends that of deterministic ones. We provide a proof for this fact.}, author = {Bose, Sougata and Henzinger, Thomas A and Lehtinen, Karoliina and Schewe, Sven and Totzke, Patrick}, booktitle = {16th International Conference on Reachability Problems}, isbn = {9783031191343}, issn = {1611-3349}, location = {Kaiserslautern, Germany}, pages = {67--76}, publisher = {Springer Nature}, title = {{History-deterministic timed automata are not determinizable}}, doi = {10.1007/978-3-031-19135-0_5}, volume = {13608}, year = {2022}, } @inproceedings{11185, abstract = {Bundling crossings is a strategy which can enhance the readability of graph drawings. In this paper we consider bundlings for families of pseudosegments, i.e., simple curves such that any two have share at most one point at which they cross. Our main result is that there is a polynomial-time algorithm to compute an 8-approximation of the bundled crossing number of such instances (up to adding a term depending on the facial structure). This 8-approximation also holds for bundlings of good drawings of graphs. In the special case of circular drawings the approximation factor is 8 (no extra term), this improves upon the 10-approximation of Fink et al. [6]. We also show how to compute a 92-approximation when the intersection graph of the pseudosegments is bipartite.}, author = {Arroyo Guevara, Alan M and Felsner, Stefan}, booktitle = {WALCOM 2022: Algorithms and Computation}, isbn = {9783030967307}, issn = {1611-3349}, location = {Jember, Indonesia}, pages = {383--395}, publisher = {Springer Nature}, title = {{Approximating the bundled crossing number}}, doi = {10.1007/978-3-030-96731-4_31}, volume = {13174}, year = {2022}, } @inproceedings{12000, abstract = {We consider the quantitative problem of obtaining lower-bounds on the probability of termination of a given non-deterministic probabilistic program. Specifically, given a non-termination threshold p∈[0,1], we aim for certificates proving that the program terminates with probability at least 1−p. The basic idea of our approach is to find a terminating stochastic invariant, i.e. a subset SI of program states such that (i) the probability of the program ever leaving SI is no more than p, and (ii) almost-surely, the program either leaves SI or terminates. While stochastic invariants are already well-known, we provide the first proof that the idea above is not only sound, but also complete for quantitative termination analysis. We then introduce a novel sound and complete characterization of stochastic invariants that enables template-based approaches for easy synthesis of quantitative termination certificates, especially in affine or polynomial forms. Finally, by combining this idea with the existing martingale-based methods that are relatively complete for qualitative termination analysis, we obtain the first automated, sound, and relatively complete algorithm for quantitative termination analysis. Notably, our completeness guarantees for quantitative termination analysis are as strong as the best-known methods for the qualitative variant. Our prototype implementation demonstrates the effectiveness of our approach on various probabilistic programs. We also demonstrate that our algorithm certifies lower bounds on termination probability for probabilistic programs that are beyond the reach of previous methods.}, author = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Meggendorfer, Tobias and Zikelic, Dorde}, booktitle = {Proceedings of the 34th International Conference on Computer Aided Verification}, isbn = {9783031131844}, issn = {1611-3349}, location = {Haifa, Israel}, pages = {55--78}, publisher = {Springer}, title = {{Sound and complete certificates for auantitative termination analysis of probabilistic programs}}, doi = {10.1007/978-3-031-13185-1_4}, volume = {13371}, year = {2022}, } @inproceedings{11771, abstract = {Classic dynamic data structure problems maintain a data structure subject to a sequence S of updates and they answer queries using the latest version of the data structure, i.e., the data structure after processing the whole sequence. To handle operations that change the sequence S of updates, Demaine et al. [7] introduced retroactive data structures (RDS). A retroactive operation modifies the update sequence S in a given position t, called time, and either creates or cancels an update in S at time t. A fully retroactive data structure supports queries at any time t: a query at time t is answered using only the updates of S up to time t. While efficient RDS have been proposed for classic data structures, e.g., stack, priority queue and binary search tree, the retroactive version of graph problems are rarely studied. In this paper we study retroactive graph problems including connectivity, minimum spanning forest (MSF), maximum degree, etc. We show that under the OMv conjecture (proposed by Henzinger et al. [15]), there does not exist fully RDS maintaining connectivity or MSF, or incremental fully RDS maintaining the maximum degree with 𝑂(𝑛1−𝜖) time per operation, for any constant 𝜖>0. Furthermore, We provide RDS with almost tight time per operation. We give fully RDS for maintaining the maximum degree, connectivity and MSF in 𝑂̃ (𝑛) time per operation. We also give an algorithm for the incremental (insertion-only) fully retroactive connectivity with 𝑂̃ (1) time per operation, showing that the lower bound cannot be extended to this setting. We also study a restricted version of RDS, where the only change to S is the swap of neighboring updates and show that for this problem we can beat the above hardness result. This also implies the first non-trivial dynamic Reeb graph computation algorithm.}, author = {Henzinger, Monika H and Wu, Xiaowei}, booktitle = {17th International Symposium on Algorithms and Data Structures}, isbn = {9783030835071}, issn = {1611-3349}, location = {Virtual}, pages = {471–484}, publisher = {Springer Nature}, title = {{Upper and lower bounds for fully retroactive graph problems}}, doi = {10.1007/978-3-030-83508-8_34}, volume = {12808}, year = {2021}, } @inproceedings{9210, abstract = {Modern neural networks can easily fit their training set perfectly. Surprisingly, despite being “overfit” in this way, they tend to generalize well to future data, thereby defying the classic bias–variance trade-off of machine learning theory. Of the many possible explanations, a prevalent one is that training by stochastic gradient descent (SGD) imposes an implicit bias that leads it to learn simple functions, and these simple functions generalize well. However, the specifics of this implicit bias are not well understood. In this work, we explore the smoothness conjecture which states that SGD is implicitly biased towards learning functions that are smooth. We propose several measures to formalize the intuitive notion of smoothness, and we conduct experiments to determine whether SGD indeed implicitly optimizes for these measures. Our findings rule out the possibility that smoothness measures based on first-order derivatives are being implicitly enforced. They are supportive, though, of the smoothness conjecture for measures based on second-order derivatives.}, author = {Volhejn, Vaclav and Lampert, Christoph}, booktitle = {42nd German Conference on Pattern Recognition}, isbn = {9783030712778}, issn = {1611-3349}, location = {Tübingen, Germany}, pages = {246--259}, publisher = {Springer}, title = {{Does SGD implicitly optimize for smoothness?}}, doi = {10.1007/978-3-030-71278-5_18}, volume = {12544}, year = {2021}, } @inproceedings{9620, abstract = {In this note, we introduce a distributed twist on the classic coupon collector problem: a set of m collectors wish to each obtain a set of n coupons; for this, they can each sample coupons uniformly at random, but can also meet in pairwise interactions, during which they can exchange coupons. By doing so, they hope to reduce the number of coupons that must be sampled by each collector in order to obtain a full set. This extension is natural when considering real-world manifestations of the coupon collector phenomenon, and has been remarked upon and studied empirically (Hayes and Hannigan 2006, Ahmad et al. 2014, Delmarcelle 2019). We provide the first theoretical analysis for such a scenario. We find that “coupon collecting with friends” can indeed significantly reduce the number of coupons each collector must sample, and raises interesting connections to the more traditional variants of the problem. While our analysis is in most cases asymptotically tight, there are several open questions raised, regarding finer-grained analysis of both “coupon collecting with friends,” and of a long-studied variant of the original problem in which a collector requires multiple full sets of coupons.}, author = {Alistarh, Dan-Adrian and Davies, Peter}, booktitle = {Structural Information and Communication Complexity}, isbn = {9783030795269}, issn = {1611-3349}, location = {Wrocław, Poland}, pages = {3--12}, publisher = {Springer Nature}, title = {{Collecting coupons is faster with friends}}, doi = {10.1007/978-3-030-79527-6_1}, volume = {12810}, year = {2021}, } @inproceedings{12767, abstract = {Several problems in planning and reactive synthesis can be reduced to the analysis of two-player quantitative graph games. Optimization is one form of analysis. We argue that in many cases it may be better to replace the optimization problem with the satisficing problem, where instead of searching for optimal solutions, the goal is to search for solutions that adhere to a given threshold bound. This work defines and investigates the satisficing problem on a two-player graph game with the discounted-sum cost model. We show that while the satisficing problem can be solved using numerical methods just like the optimization problem, this approach does not render compelling benefits over optimization. When the discount factor is, however, an integer, we present another approach to satisficing, which is purely based on automata methods. We show that this approach is algorithmically more performant – both theoretically and empirically – and demonstrates the broader applicability of satisficing over optimization.}, author = {Bansal, Suguman and Chatterjee, Krishnendu and Vardi, Moshe Y.}, booktitle = {27th International Conference on Tools and Algorithms for the Construction and Analysis of Systems}, isbn = {9783030720155}, issn = {1611-3349}, location = {Luxembourg City, Luxembourg}, pages = {20--37}, publisher = {Springer Nature}, title = {{On satisficing in quantitative games}}, doi = {10.1007/978-3-030-72016-2}, volume = {12651}, year = {2021}, } @inproceedings{10076, abstract = {We present a novel approach for blockchain asset owners to reclaim their funds in case of accidental private-key loss or transfer to a mistyped address. Our solution can be deployed upon failure or absence of proactively implemented backup mechanisms, such as secret sharing and cold storage. The main advantages against previous proposals is it does not require any prior action from users and works with both single-key and multi-sig accounts. We achieve this by a 3-phase Commit()→Reveal()→Claim()−or−Challenge() smart contract that enables accessing funds of addresses for which the spending key is not available. We provide an analysis of the threat and incentive models and formalize the concept of reactive KEy-Loss Protection (KELP).}, author = {Blackshear, Sam and Chalkias, Konstantinos and Chatzigiannis, Panagiotis and Faizullabhoy, Riyaz and Khaburzaniya, Irakliy and Kokoris Kogias, Eleftherios and Lind, Joshua and Wong, David and Zakian, Tim}, booktitle = {FC 2021 Workshops}, isbn = {978-3-6626-3957-3}, issn = {1611-3349}, location = {Virtual}, pages = {431--450}, publisher = {Springer Nature}, title = {{Reactive key-loss protection in blockchains}}, doi = {10.1007/978-3-662-63958-0_34}, volume = {12676 }, year = {2021}, } @inproceedings{10108, abstract = {We argue that the time is ripe to investigate differential monitoring, in which the specification of a program's behavior is implicitly given by a second program implementing the same informal specification. Similar ideas have been proposed before, and are currently implemented in restricted form for testing and specialized run-time analyses, aspects of which we combine. We discuss the challenges of implementing differential monitoring as a general-purpose, black-box run-time monitoring framework, and present promising results of a preliminary implementation, showing low monitoring overheads for diverse programs.}, author = {Mühlböck, Fabian and Henzinger, Thomas A}, booktitle = {International Conference on Runtime Verification}, isbn = {978-3-030-88493-2}, issn = {1611-3349}, keywords = {run-time verification, software engineering, implicit specification}, location = {Virtual}, pages = {231--243}, publisher = {Springer Nature}, title = {{Differential monitoring}}, doi = {10.1007/978-3-030-88494-9_12}, volume = {12974}, year = {2021}, } @inproceedings{10325, abstract = {Since the inception of Bitcoin, a plethora of distributed ledgers differing in design and purpose has been created. While by design, blockchains provide no means to securely communicate with external systems, numerous attempts towards trustless cross-chain communication have been proposed over the years. Today, cross-chain communication (CCC) plays a fundamental role in cryptocurrency exchanges, scalability efforts via sharding, extension of existing systems through sidechains, and bootstrapping of new blockchains. Unfortunately, existing proposals are designed ad-hoc for specific use-cases, making it hard to gain confidence in their correctness and composability. We provide the first systematic exposition of cross-chain communication protocols. We formalize the underlying research problem and show that CCC is impossible without a trusted third party, contrary to common beliefs in the blockchain community. With this result in mind, we develop a framework to design new and evaluate existing CCC protocols, focusing on the inherent trust assumptions thereof, and derive a classification covering the field of cross-chain communication to date. We conclude by discussing open challenges for CCC research and the implications of interoperability on the security and privacy of blockchains.}, author = {Zamyatin, Alexei and Al-Bassam, Mustafa and Zindros, Dionysis and Kokoris Kogias, Eleftherios and Moreno-Sanchez, Pedro and Kiayias, Aggelos and Knottenbelt, William J.}, booktitle = {25th International Conference on Financial Cryptography and Data Security}, isbn = {9-783-6626-4330-3}, issn = {1611-3349}, location = {Virtual}, pages = {3--36}, publisher = {Springer Nature}, title = {{SoK: Communication across distributed ledgers}}, doi = {10.1007/978-3-662-64331-0_1}, volume = {12675 }, year = {2021}, } @inproceedings{10324, abstract = {Off-chain protocols (channels) are a promising solution to the scalability and privacy challenges of blockchain payments. Current proposals, however, require synchrony assumptions to preserve the safety of a channel, leaking to an adversary the exact amount of time needed to control the network for a successful attack. In this paper, we introduce Brick, the first payment channel that remains secure under network asynchrony and concurrently provides correct incentives. The core idea is to incorporate the conflict resolution process within the channel by introducing a rational committee of external parties, called wardens. Hence, if a party wants to close a channel unilaterally, it can only get the committee’s approval for the last valid state. Additionally, Brick provides sub-second latency because it does not employ heavy-weight consensus. Instead, Brick uses consistent broadcast to announce updates and close the channel, a light-weight abstraction that is powerful enough to preserve safety and liveness to any rational parties. We formally define and prove for Brick the properties a payment channel construction should fulfill. We also design incentives for Brick such that honest and rational behavior aligns. Finally, we provide a reference implementation of the smart contracts in Solidity.}, author = {Avarikioti, Zeta and Kokoris Kogias, Eleftherios and Wattenhofer, Roger and Zindros, Dionysis}, booktitle = {25th International Conference on Financial Cryptography and Data Security}, isbn = {9-783-6626-4330-3}, issn = {1611-3349}, location = {Virtual}, pages = {209--230}, publisher = {Springer Nature}, title = {{Brick: Asynchronous incentive-compatible payment channels}}, doi = {10.1007/978-3-662-64331-0_11}, volume = {12675 }, year = {2021}, } @inproceedings{10407, abstract = {Digital hardware Trojans are integrated circuits whose implementation differ from the specification in an arbitrary and malicious way. For example, the circuit can differ from its specified input/output behavior after some fixed number of queries (known as “time bombs”) or on some particular input (known as “cheat codes”). To detect such Trojans, countermeasures using multiparty computation (MPC) or verifiable computation (VC) have been proposed. On a high level, to realize a circuit with specification F one has more sophisticated circuits F⋄ manufactured (where F⋄ specifies a MPC or VC of F ), and then embeds these F⋄ ’s into a master circuit which must be trusted but is relatively simple compared to F . Those solutions impose a significant overhead as F⋄ is much more complex than F , also the master circuits are not exactly trivial. In this work, we show that in restricted settings, where F has no evolving state and is queried on independent inputs, we can achieve a relaxed security notion using very simple constructions. In particular, we do not change the specification of the circuit at all (i.e., F=F⋄ ). Moreover the master circuit basically just queries a subset of its manufactured circuits and checks if they’re all the same. The security we achieve guarantees that, if the manufactured circuits are initially tested on up to T inputs, the master circuit will catch Trojans that try to deviate on significantly more than a 1/T fraction of the inputs. This bound is optimal for the type of construction considered, and we provably achieve it using a construction where 12 instantiations of F need to be embedded into the master. We also discuss an extremely simple construction with just 2 instantiations for which we conjecture that it already achieves the optimal bound.}, author = {Chakraborty, Suvradip and Dziembowski, Stefan and Gałązka, Małgorzata and Lizurej, Tomasz and Pietrzak, Krzysztof Z and Yeo, Michelle X}, isbn = {9-783-0309-0452-4}, issn = {1611-3349}, location = {Raleigh, NC, United States}, pages = {397--428}, publisher = {Springer Nature}, title = {{Trojan-resilience without cryptography}}, doi = {10.1007/978-3-030-90453-1_14}, volume = {13043}, year = {2021}, } @inproceedings{10408, abstract = {Key trees are often the best solution in terms of transmission cost and storage requirements for managing keys in a setting where a group needs to share a secret key, while being able to efficiently rotate the key material of users (in order to recover from a potential compromise, or to add or remove users). Applications include multicast encryption protocols like LKH (Logical Key Hierarchies) or group messaging like the current IETF proposal TreeKEM. A key tree is a (typically balanced) binary tree, where each node is identified with a key: leaf nodes hold users’ secret keys while the root is the shared group key. For a group of size N, each user just holds log(N) keys (the keys on the path from its leaf to the root) and its entire key material can be rotated by broadcasting 2log(N) ciphertexts (encrypting each fresh key on the path under the keys of its parents). In this work we consider the natural setting where we have many groups with partially overlapping sets of users, and ask if we can find solutions where the cost of rotating a key is better than in the trivial one where we have a separate key tree for each group. We show that in an asymptotic setting (where the number m of groups is fixed while the number N of users grows) there exist more general key graphs whose cost converges to the cost of a single group, thus saving a factor linear in the number of groups over the trivial solution. As our asymptotic “solution” converges very slowly and performs poorly on concrete examples, we propose an algorithm that uses a natural heuristic to compute a key graph for any given group structure. Our algorithm combines two greedy algorithms, and is thus very efficient: it first converts the group structure into a “lattice graph”, which is then turned into a key graph by repeatedly applying the algorithm for constructing a Huffman code. To better understand how far our proposal is from an optimal solution, we prove lower bounds on the update cost of continuous group-key agreement and multicast encryption in a symbolic model admitting (asymmetric) encryption, pseudorandom generators, and secret sharing as building blocks.}, author = {Alwen, Joel F and Auerbach, Benedikt and Baig, Mirza Ahad and Cueto Noval, Miguel and Klein, Karen and Pascual Perez, Guillermo and Pietrzak, Krzysztof Z and Walter, Michael}, booktitle = {19th International Conference}, isbn = {9-783-0309-0455-5}, issn = {1611-3349}, location = {Raleigh, NC, United States}, pages = {222--253}, publisher = {Springer Nature}, title = {{Grafting key trees: Efficient key management for overlapping groups}}, doi = {10.1007/978-3-030-90456-2_8}, volume = {13044}, year = {2021}, } @inproceedings{10409, abstract = {We show that Yao’s garbling scheme is adaptively indistinguishable for the class of Boolean circuits of size S and treewidth w with only a SO(w) loss in security. For instance, circuits with constant treewidth are as a result adaptively indistinguishable with only a polynomial loss. This (partially) complements a negative result of Applebaum et al. (Crypto 2013), which showed (assuming one-way functions) that Yao’s garbling scheme cannot be adaptively simulatable. As main technical contributions, we introduce a new pebble game that abstracts out our security reduction and then present a pebbling strategy for this game where the number of pebbles used is roughly O(δwlog(S)) , δ being the fan-out of the circuit. The design of the strategy relies on separators, a graph-theoretic notion with connections to circuit complexity. with only a SO(w) loss in security. For instance, circuits with constant treewidth are as a result adaptively indistinguishable with only a polynomial loss. This (partially) complements a negative result of Applebaum et al. (Crypto 2013), which showed (assuming one-way functions) that Yao’s garbling scheme cannot be adaptively simulatable. As main technical contributions, we introduce a new pebble game that abstracts out our security reduction and then present a pebbling strategy for this game where the number of pebbles used is roughly O(δwlog(S)) , δ being the fan-out of the circuit. The design of the strategy relies on separators, a graph-theoretic notion with connections to circuit complexity.}, author = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z}, booktitle = {19th International Conference}, isbn = {9-783-0309-0452-4}, issn = {1611-3349}, location = {Raleigh, NC, United States}, pages = {486--517}, publisher = {Springer Nature}, title = {{On treewidth, separators and Yao’s garbling}}, doi = {10.1007/978-3-030-90453-1_17}, volume = {13043 }, year = {2021}, } @inproceedings{10609, abstract = {We study Multi-party computation (MPC) in the setting of subversion, where the adversary tampers with the machines of honest parties. Our goal is to construct actively secure MPC protocols where parties are corrupted adaptively by an adversary (as in the standard adaptive security setting), and in addition, honest parties’ machines are compromised. The idea of reverse firewalls (RF) was introduced at EUROCRYPT’15 by Mironov and Stephens-Davidowitz as an approach to protecting protocols against corruption of honest parties’ devices. Intuitively, an RF for a party P is an external entity that sits between P and the outside world and whose scope is to sanitize P ’s incoming and outgoing messages in the face of subversion of their computer. Mironov and Stephens-Davidowitz constructed a protocol for passively-secure two-party computation. At CRYPTO’20, Chakraborty, Dziembowski and Nielsen constructed a protocol for secure computation with firewalls that improved on this result, both by extending it to multi-party computation protocol, and considering active security in the presence of static corruptions. In this paper, we initiate the study of RF for MPC in the adaptive setting. We put forward a definition for adaptively secure MPC in the reverse firewall setting, explore relationships among the security notions, and then construct reverse firewalls for MPC in this stronger setting of adaptive security. We also resolve the open question of Chakraborty, Dziembowski and Nielsen by removing the need for a trusted setup in constructing RF for MPC. Towards this end, we construct reverse firewalls for adaptively secure augmented coin tossing and adaptively secure zero-knowledge protocols and obtain a constant round adaptively secure MPC protocol in the reverse firewall setting without setup. Along the way, we propose a new multi-party adaptively secure coin tossing protocol in the plain model, that is of independent interest.}, author = {Chakraborty, Suvradip and Ganesh, Chaya and Pancholi, Mahak and Sarkar, Pratik}, booktitle = {27th International Conference on the Theory and Application of Cryptology and Information Security}, isbn = {978-3-030-92074-6}, issn = {1611-3349}, location = {Virtual, Singapore}, pages = {335--364}, publisher = {Springer Nature}, title = {{Reverse firewalls for adaptively secure MPC without setup}}, doi = {10.1007/978-3-030-92075-3_12}, volume = {13091}, year = {2021}, } @inproceedings{9987, abstract = {Stateless model checking (SMC) is one of the standard approaches to the verification of concurrent programs. As scheduling non-determinism creates exponentially large spaces of thread interleavings, SMC attempts to partition this space into equivalence classes and explore only a few representatives from each class. The efficiency of this approach depends on two factors: (a) the coarseness of the partitioning, and (b) the time to generate representatives in each class. For this reason, the search for coarse partitionings that are efficiently explorable is an active research challenge. In this work we present RVF-SMC , a new SMC algorithm that uses a novel reads-value-from (RVF) partitioning. Intuitively, two interleavings are deemed equivalent if they agree on the value obtained in each read event, and read events induce consistent causal orderings between them. The RVF partitioning is provably coarser than recent approaches based on Mazurkiewicz and “reads-from” partitionings. Our experimental evaluation reveals that RVF is quite often a very effective equivalence, as the underlying partitioning is exponentially coarser than other approaches. Moreover, RVF-SMC generates representatives very efficiently, as the reduction in the partitioning is often met with significant speed-ups in the model checking task.}, author = {Agarwal, Pratyush and Chatterjee, Krishnendu and Pathak, Shreya and Pavlogiannis, Andreas and Toman, Viktor}, booktitle = {33rd International Conference on Computer-Aided Verification }, isbn = {978-3-030-81684-1}, issn = {1611-3349}, location = {Virtual}, pages = {341--366}, publisher = {Springer Nature}, title = {{Stateless model checking under a reads-value-from equivalence}}, doi = {10.1007/978-3-030-81685-8_16}, volume = {12759 }, year = {2021}, } @inproceedings{10041, abstract = {Yao’s garbling scheme is one of the most fundamental cryptographic constructions. Lindell and Pinkas (Journal of Cryptograhy 2009) gave a formal proof of security in the selective setting where the adversary chooses the challenge inputs before seeing the garbled circuit assuming secure symmetric-key encryption (and hence one-way functions). This was followed by results, both positive and negative, concerning its security in the, stronger, adaptive setting. Applebaum et al. (Crypto 2013) showed that it cannot satisfy adaptive security as is, due to a simple incompressibility argument. Jafargholi and Wichs (TCC 2017) considered a natural adaptation of Yao’s scheme (where the output mapping is sent in the online phase, together with the garbled input) that circumvents this negative result, and proved that it is adaptively secure, at least for shallow circuits. In particular, they showed that for the class of circuits of depth δ , the loss in security is at most exponential in δ . The above results all concern the simulation-based notion of security. In this work, we show that the upper bound of Jafargholi and Wichs is basically optimal in a strong sense. As our main result, we show that there exists a family of Boolean circuits, one for each depth δ∈N , such that any black-box reduction proving the adaptive indistinguishability of the natural adaptation of Yao’s scheme from any symmetric-key encryption has to lose a factor that is exponential in δ√ . Since indistinguishability is a weaker notion than simulation, our bound also applies to adaptive simulation. To establish our results, we build on the recent approach of Kamath et al. (Eprint 2021), which uses pebbling lower bounds in conjunction with oracle separations to prove fine-grained lower bounds on loss in cryptographic security.}, author = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z and Wichs, Daniel}, booktitle = {41st Annual International Cryptology Conference, Part II }, isbn = {978-3-030-84244-4}, issn = {1611-3349}, location = {Virtual}, pages = {486--515}, publisher = {Springer Nature}, title = {{Limits on the Adaptive Security of Yao’s Garbling}}, doi = {10.1007/978-3-030-84245-1_17}, volume = {12826}, year = {2021}, } @inproceedings{9227, abstract = {In the multiway cut problem we are given a weighted undirected graph G=(V,E) and a set T⊆V of k terminals. The goal is to find a minimum weight set of edges E′⊆E with the property that by removing E′ from G all the terminals become disconnected. In this paper we present a simple local search approximation algorithm for the multiway cut problem with approximation ratio 2−2k . We present an experimental evaluation of the performance of our local search algorithm and show that it greatly outperforms the isolation heuristic of Dalhaus et al. and it has similar performance as the much more complex algorithms of Calinescu et al., Sharma and Vondrak, and Buchbinder et al. which have the currently best known approximation ratios for this problem.}, author = {Bloch-Hansen, Andrew and Samei, Nasim and Solis-Oba, Roberto}, booktitle = {Conference on Algorithms and Discrete Applied Mathematics}, isbn = {9783030678982}, issn = {1611-3349}, location = {Rupnagar, India}, pages = {346--358}, publisher = {Springer Nature}, title = {{Experimental evaluation of a local search approximation algorithm for the multiway cut problem}}, doi = {10.1007/978-3-030-67899-9_28}, volume = {12601}, year = {2021}, } @inproceedings{10410, abstract = {The security of cryptographic primitives and protocols against adversaries that are allowed to make adaptive choices (e.g., which parties to corrupt or which queries to make) is notoriously difficult to establish. A broad theoretical framework was introduced by Jafargholi et al. [Crypto’17] for this purpose. In this paper we initiate the study of lower bounds on loss in adaptive security for certain cryptographic protocols considered in the framework. We prove lower bounds that almost match the upper bounds (proven using the framework) for proxy re-encryption, prefix-constrained PRFs and generalized selective decryption, a security game that captures the security of certain group messaging and broadcast encryption schemes. Those primitives have in common that their security game involves an underlying graph that can be adaptively built by the adversary. Some of our lower bounds only apply to a restricted class of black-box reductions which we term “oblivious” (the existing upper bounds are of this restricted type), some apply to the broader but still restricted class of non-rewinding reductions, while our lower bound for proxy re-encryption applies to all black-box reductions. The fact that some of our lower bounds seem to crucially rely on obliviousness or at least a non-rewinding reduction hints to the exciting possibility that the existing upper bounds can be improved by using more sophisticated reductions. Our main conceptual contribution is a two-player multi-stage game called the Builder-Pebbler Game. We can translate bounds on the winning probabilities for various instantiations of this game into cryptographic lower bounds for the above-mentioned primitives using oracle separation techniques.}, author = {Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z and Walter, Michael}, booktitle = {19th International Conference}, isbn = {9-783-0309-0452-4}, issn = {1611-3349}, location = {Raleigh, NC, United States}, pages = {550--581}, publisher = {Springer Nature}, title = {{The cost of adaptivity in security games on graphs}}, doi = {10.1007/978-3-030-90453-1_19}, volume = {13043}, year = {2021}, } @inproceedings{10414, abstract = {We consider the almost-sure (a.s.) termination problem for probabilistic programs, which are a stochastic extension of classical imperative programs. Lexicographic ranking functions provide a sound and practical approach for termination of non-probabilistic programs, and their extension to probabilistic programs is achieved via lexicographic ranking supermartingales (LexRSMs). However, LexRSMs introduced in the previous work have a limitation that impedes their automation: all of their components have to be non-negative in all reachable states. This might result in LexRSM not existing even for simple terminating programs. Our contributions are twofold: First, we introduce a generalization of LexRSMs which allows for some components to be negative. This standard feature of non-probabilistic termination proofs was hitherto not known to be sound in the probabilistic setting, as the soundness proof requires a careful analysis of the underlying stochastic process. Second, we present polynomial-time algorithms using our generalized LexRSMs for proving a.s. termination in broad classes of linear-arithmetic programs.}, author = {Chatterjee, Krishnendu and Goharshady, Ehsan Kafshdar and Novotný, Petr and Zárevúcky, Jiří and Zikelic, Dorde}, booktitle = {24th International Symposium on Formal Methods}, isbn = {9-783-0309-0869-0}, issn = {1611-3349}, location = {Virtual}, pages = {619--639}, publisher = {Springer Nature}, title = {{On lexicographic proof rules for probabilistic termination}}, doi = {10.1007/978-3-030-90870-6_33}, volume = {13047}, year = {2021}, } @inproceedings{10206, abstract = {Neural-network classifiers achieve high accuracy when predicting the class of an input that they were trained to identify. Maintaining this accuracy in dynamic environments, where inputs frequently fall outside the fixed set of initially known classes, remains a challenge. The typical approach is to detect inputs from novel classes and retrain the classifier on an augmented dataset. However, not only the classifier but also the detection mechanism needs to adapt in order to distinguish between newly learned and yet unknown input classes. To address this challenge, we introduce an algorithmic framework for active monitoring of a neural network. A monitor wrapped in our framework operates in parallel with the neural network and interacts with a human user via a series of interpretable labeling queries for incremental adaptation. In addition, we propose an adaptive quantitative monitor to improve precision. An experimental evaluation on a diverse set of benchmarks with varying numbers of classes confirms the benefits of our active monitoring framework in dynamic scenarios.}, author = {Lukina, Anna and Schilling, Christian and Henzinger, Thomas A}, booktitle = {21st International Conference on Runtime Verification}, isbn = {9-783-0308-8493-2}, issn = {1611-3349}, keywords = {monitoring, neural networks, novelty detection}, location = {Virtual}, pages = {42--61}, publisher = {Springer Nature}, title = {{Into the unknown: active monitoring of neural networks}}, doi = {10.1007/978-3-030-88494-9_3}, volume = {12974 }, year = {2021}, } @inproceedings{9299, abstract = {We call a multigraph non-homotopic if it can be drawn in the plane in such a way that no two edges connecting the same pair of vertices can be continuously transformed into each other without passing through a vertex, and no loop can be shrunk to its end-vertex in the same way. It is easy to see that a non-homotopic multigraph on n>1 vertices can have arbitrarily many edges. We prove that the number of crossings between the edges of a non-homotopic multigraph with n vertices and m>4n edges is larger than cm2n for some constant c>0 , and that this bound is tight up to a polylogarithmic factor. We also show that the lower bound is not asymptotically sharp as n is fixed and m⟶∞ .}, author = {Pach, János and Tardos, Gábor and Tóth, Géza}, booktitle = {28th International Symposium on Graph Drawing and Network Visualization}, isbn = {9783030687656}, issn = {1611-3349}, location = {Virtual, Online}, pages = {359--371}, publisher = {Springer Nature}, title = {{Crossings between non-homotopic edges}}, doi = {10.1007/978-3-030-68766-3_28}, volume = {12590}, year = {2020}, } @inproceedings{7966, abstract = {For 1≤m≤n, we consider a natural m-out-of-n multi-instance scenario for a public-key encryption (PKE) scheme. An adversary, given n independent instances of PKE, wins if he breaks at least m out of the n instances. In this work, we are interested in the scaling factor of PKE schemes, SF, which measures how well the difficulty of breaking m out of the n instances scales in m. That is, a scaling factor SF=ℓ indicates that breaking m out of n instances is at least ℓ times more difficult than breaking one single instance. A PKE scheme with small scaling factor hence provides an ideal target for mass surveillance. In fact, the Logjam attack (CCS 2015) implicitly exploited, among other things, an almost constant scaling factor of ElGamal over finite fields (with shared group parameters). For Hashed ElGamal over elliptic curves, we use the generic group model to argue that the scaling factor depends on the scheme's granularity. In low granularity, meaning each public key contains its independent group parameter, the scheme has optimal scaling factor SF=m; In medium and high granularity, meaning all public keys share the same group parameter, the scheme still has a reasonable scaling factor SF=√m. Our findings underline that instantiating ElGamal over elliptic curves should be preferred to finite fields in a multi-instance scenario. As our main technical contribution, we derive new generic-group lower bounds of Ω(√(mp)) on the difficulty of solving both the m-out-of-n Gap Discrete Logarithm and the m-out-of-n Gap Computational Diffie-Hellman problem over groups of prime order p, extending a recent result by Yun (EUROCRYPT 2015). We establish the lower bound by studying the hardness of a related computational problem which we call the search-by-hypersurface problem.}, author = {Auerbach, Benedikt and Giacon, Federico and Kiltz, Eike}, booktitle = {Advances in Cryptology – EUROCRYPT 2020}, isbn = {9783030457266}, issn = {1611-3349}, pages = {475--506}, publisher = {Springer Nature}, title = {{Everybody’s a target: Scalability in public-key encryption}}, doi = {10.1007/978-3-030-45727-3_16}, volume = {12107}, year = {2020}, } @inproceedings{8623, abstract = {We introduce the monitoring of trace properties under assumptions. An assumption limits the space of possible traces that the monitor may encounter. An assumption may result from knowledge about the system that is being monitored, about the environment, or about another, connected monitor. We define monitorability under assumptions and study its theoretical properties. In particular, we show that for every assumption A, the boolean combinations of properties that are safe or co-safe relative to A are monitorable under A. We give several examples and constructions on how an assumption can make a non-monitorable property monitorable, and how an assumption can make a monitorable property monitorable with fewer resources, such as integer registers.}, author = {Henzinger, Thomas A and Sarac, Naci E}, booktitle = {Runtime Verification}, isbn = {9783030605070}, issn = {1611-3349}, location = {Los Angeles, CA, United States}, pages = {3--18}, publisher = {Springer Nature}, title = {{Monitorability under assumptions}}, doi = {10.1007/978-3-030-60508-7_1}, volume = {12399}, year = {2020}, } @inproceedings{8732, abstract = {A simple drawing D(G) of a graph G is one where each pair of edges share at most one point: either a common endpoint or a proper crossing. An edge e in the complement of G can be inserted into D(G) if there exists a simple drawing of G+e extending D(G). As a result of Levi’s Enlargement Lemma, if a drawing is rectilinear (pseudolinear), that is, the edges can be extended into an arrangement of lines (pseudolines), then any edge in the complement of G can be inserted. In contrast, we show that it is NP -complete to decide whether one edge can be inserted into a simple drawing. This remains true even if we assume that the drawing is pseudocircular, that is, the edges can be extended to an arrangement of pseudocircles. On the positive side, we show that, given an arrangement of pseudocircles A and a pseudosegment σ , it can be decided in polynomial time whether there exists a pseudocircle Φσ extending σ for which A∪{Φσ} is again an arrangement of pseudocircles.}, author = {Arroyo Guevara, Alan M and Klute, Fabian and Parada, Irene and Seidel, Raimund and Vogtenhuber, Birgit and Wiedera, Tilo}, booktitle = {Graph-Theoretic Concepts in Computer Science}, isbn = {9783030604394}, issn = {1611-3349}, location = {Leeds, United Kingdom}, pages = {325--338}, publisher = {Springer Nature}, title = {{Inserting one edge into a simple drawing is hard}}, doi = {10.1007/978-3-030-60440-0_26}, volume = {12301}, year = {2020}, } @inbook{10865, abstract = {We introduce the notion of Witness Maps as a cryptographic notion of a proof system. A Unique Witness Map (UWM) deterministically maps all witnesses for an NP statement to a single representative witness, resulting in a computationally sound, deterministic-prover, non-interactive witness independent proof system. A relaxation of UWM, called Compact Witness Map (CWM), maps all the witnesses to a small number of witnesses, resulting in a “lossy” deterministic-prover, non-interactive proof-system. We also define a Dual Mode Witness Map (DMWM) which adds an “extractable” mode to a CWM. Our main construction is a DMWM for all NP relations, assuming sub-exponentially secure indistinguishability obfuscation ( iO ), along with standard cryptographic assumptions. The DMWM construction relies on a CWM and a new primitive called Cumulative All-Lossy-But-One Trapdoor Functions (C-ALBO-TDF), both of which are in turn instantiated based on iO and other primitives. Our instantiation of a CWM is in fact a UWM; in turn, we show that a UWM implies Witness Encryption. Along the way to constructing UWM and C-ALBO-TDF, we also construct, from standard assumptions, Puncturable Digital Signatures and a new primitive called Cumulative Lossy Trapdoor Functions (C-LTDF). The former improves up on a construction of Bellare et al. (Eurocrypt 2016), who relied on sub-exponentially secure iO and sub-exponentially secure OWF. As an application of our constructions, we show how to use a DMWM to construct the first leakage and tamper-resilient signatures with a deterministic signer, thereby solving a decade old open problem posed by Katz and Vaikunthanathan (Asiacrypt 2009), by Boyle, Segev and Wichs (Eurocrypt 2011), as well as by Faonio and Venturi (Asiacrypt 2016). Our construction achieves the optimal leakage rate of 1−o(1) .}, author = {Chakraborty, Suvradip and Prabhakaran, Manoj and Wichs, Daniel}, booktitle = {Public-Key Cryptography}, editor = {Kiayias, A}, isbn = {9783030453732}, issn = {1611-3349}, pages = {220--246}, publisher = {Springer Nature}, title = {{Witness maps and applications}}, doi = {10.1007/978-3-030-45374-9_8}, volume = {12110}, year = {2020}, } @inproceedings{8195, abstract = {This paper presents a foundation for refining concurrent programs with structured control flow. The verification problem is decomposed into subproblems that aid interactive program development, proof reuse, and automation. The formalization in this paper is the basis of a new design and implementation of the Civl verifier.}, author = {Kragl, Bernhard and Qadeer, Shaz and Henzinger, Thomas A}, booktitle = {Computer Aided Verification}, isbn = {9783030532871}, issn = {1611-3349}, pages = {275--298}, publisher = {Springer Nature}, title = {{Refinement for structured concurrent programs}}, doi = {10.1007/978-3-030-53288-8_14}, volume = {12224}, year = {2020}, } @inproceedings{8728, abstract = {Discrete-time Markov Chains (MCs) and Markov Decision Processes (MDPs) are two standard formalisms in system analysis. Their main associated quantitative objectives are hitting probabilities, discounted sum, and mean payoff. Although there are many techniques for computing these objectives in general MCs/MDPs, they have not been thoroughly studied in terms of parameterized algorithms, particularly when treewidth is used as the parameter. This is in sharp contrast to qualitative objectives for MCs, MDPs and graph games, for which treewidth-based algorithms yield significant complexity improvements. In this work, we show that treewidth can also be used to obtain faster algorithms for the quantitative problems. For an MC with n states and m transitions, we show that each of the classical quantitative objectives can be computed in O((n+m)⋅t2) time, given a tree decomposition of the MC with width t. Our results also imply a bound of O(κ⋅(n+m)⋅t2) for each objective on MDPs, where κ is the number of strategy-iteration refinements required for the given input and objective. Finally, we make an experimental evaluation of our new algorithms on low-treewidth MCs and MDPs obtained from the DaCapo benchmark suite. Our experiments show that on low-treewidth MCs and MDPs, our algorithms outperform existing well-established methods by one or more orders of magnitude.}, author = {Asadi, Ali and Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Mohammadi, Kiarash and Pavlogiannis, Andreas}, booktitle = {Automated Technology for Verification and Analysis}, isbn = {9783030591519}, issn = {1611-3349}, location = {Hanoi, Vietnam}, pages = {253--270}, publisher = {Springer Nature}, title = {{Faster algorithms for quantitative analysis of MCs and MDPs with small treewidth}}, doi = {10.1007/978-3-030-59152-6_14}, volume = {12302}, year = {2020}, } @inproceedings{7147, abstract = {The expression of a gene is characterised by its transcription factors and the function processing them. If the transcription factors are not affected by gene products, the regulating function is often represented as a combinational logic circuit, where the outputs (product) are determined by current input values (transcription factors) only, and are hence independent on their relative arrival times. However, the simultaneous arrival of transcription factors (TFs) in genetic circuits is a strong assumption, given that the processes of transcription and translation of a gene into a protein introduce intrinsic time delays and that there is no global synchronisation among the arrival times of different molecular species at molecular targets. In this paper, we construct an experimentally implementable genetic circuit with two inputs and a single output, such that, in presence of small delays in input arrival, the circuit exhibits qualitatively distinct observable phenotypes. In particular, these phenotypes are long lived transients: they all converge to a single value, but so slowly, that they seem stable for an extended time period, longer than typical experiment duration. We used rule-based language to prototype our circuit, and we implemented a search for finding the parameter combinations raising the phenotypes of interest. The behaviour of our prototype circuit has wide implications. First, it suggests that GRNs can exploit event timing to create phenotypes. Second, it opens the possibility that GRNs are using event timing to react to stimuli and memorise events, without explicit feedback in regulation. From the modelling perspective, our prototype circuit demonstrates the critical importance of analysing the transient dynamics at the promoter binding sites of the DNA, before applying rapid equilibrium assumptions.}, author = {Guet, Calin C and Henzinger, Thomas A and Igler, Claudia and Petrov, Tatjana and Sezgin, Ali}, booktitle = {17th International Conference on Computational Methods in Systems Biology}, isbn = {9783030313036}, issn = {1611-3349}, location = {Trieste, Italy}, pages = {155--187}, publisher = {Springer Nature}, title = {{Transient memory in gene regulation}}, doi = {10.1007/978-3-030-31304-3_9}, volume = {11773}, year = {2019}, } @inproceedings{7228, abstract = {Traditional concurrent programming involves manipulating shared mutable state. Alternatives to this programming style are communicating sequential processes (CSP) and actor models, which share data via explicit communication. These models have been known for almost half a century, and have recently had started to gain significant traction among modern programming languages. The common abstraction for communication between several processes is the channel. Although channels are similar to producer-consumer data structures, they have different semantics and support additional operations, such as the select expression. Despite their growing popularity, most known implementations of channels use lock-based data structures and can be rather inefficient. In this paper, we present the first efficient lock-free algorithm for implementing a communication channel for CSP programming. We provide implementations and experimental results in the Kotlin and Go programming languages. Our new algorithm outperforms existing implementations on many workloads, while providing non-blocking progress guarantee. Our design can serve as an example of how to construct general communication data structures for CSP and actor models. }, author = {Koval, Nikita and Alistarh, Dan-Adrian and Elizarov, Roman}, booktitle = {25th Anniversary of Euro-Par}, isbn = {978-3-0302-9399-4}, issn = {1611-3349}, location = {Göttingen, Germany}, pages = {317--333}, publisher = {Springer Nature}, title = {{Scalable FIFO channels for programming via communicating sequential processes}}, doi = {10.1007/978-3-030-29400-7_23}, volume = {11725}, year = {2019}, } @inproceedings{7231, abstract = {Piecewise Barrier Tubes (PBT) is a new technique for flowpipe overapproximation for nonlinear systems with polynomial dynamics, which leverages a combination of barrier certificates. PBT has advantages over traditional time-step based methods in dealing with those nonlinear dynamical systems in which there is a large difference in speed between trajectories, producing an overapproximation that is time independent. However, the existing approach for PBT is not efficient due to the application of interval methods for enclosure-box computation, and it can only deal with continuous dynamical systems without uncertainty. In this paper, we extend the approach with the ability to handle both continuous and hybrid dynamical systems with uncertainty that can reside in parameters and/or noise. We also improve the efficiency of the method significantly, by avoiding the use of interval-based methods for the enclosure-box computation without loosing soundness. We have developed a C++ prototype implementing the proposed approach and we evaluate it on several benchmarks. The experiments show that our approach is more efficient and precise than other methods in the literature.}, author = {Kong, Hui and Bartocci, Ezio and Jiang, Yu and Henzinger, Thomas A}, booktitle = {17th International Conference on Formal Modeling and Analysis of Timed Systems}, isbn = {978-3-0302-9661-2}, issn = {1611-3349}, location = {Amsterdam, The Netherlands}, pages = {123--141}, publisher = {Springer Nature}, title = {{Piecewise robust barrier tubes for nonlinear hybrid systems with uncertainty}}, doi = {10.1007/978-3-030-29662-9_8}, volume = {11750}, year = {2019}, } @inproceedings{7230, abstract = {Simple drawings of graphs are those in which each pair of edges share at most one point, either a common endpoint or a proper crossing. In this paper we study the problem of extending a simple drawing D(G) of a graph G by inserting a set of edges from the complement of G into D(G) such that the result is a simple drawing. In the context of rectilinear drawings, the problem is trivial. For pseudolinear drawings, the existence of such an extension follows from Levi’s enlargement lemma. In contrast, we prove that deciding if a given set of edges can be inserted into a simple drawing is NP-complete. Moreover, we show that the maximization version of the problem is APX-hard. We also present a polynomial-time algorithm for deciding whether one edge uv can be inserted into D(G) when {u,v} is a dominating set for the graph G.}, author = {Arroyo Guevara, Alan M and Derka, Martin and Parada, Irene}, booktitle = {27th International Symposium on Graph Drawing and Network Visualization}, isbn = {978-3-0303-5801-3}, issn = {1611-3349}, location = {Prague, Czech Republic}, pages = {230--243}, publisher = {Springer Nature}, title = {{Extending simple drawings}}, doi = {10.1007/978-3-030-35802-0_18}, volume = {11904}, year = {2019}, } @inproceedings{7232, abstract = {We present Mixed-time Signal Temporal Logic (STL−MX), a specification formalism which extends STL by capturing the discrete/ continuous time duality found in many cyber-physical systems (CPS), as well as mixed-signal electronic designs. In STL−MX, properties of components with continuous dynamics are expressed in STL, while specifications of components with discrete dynamics are written in LTL. To combine the two layers, we evaluate formulas on two traces, discrete- and continuous-time, and introduce two interface operators that map signals, properties and their satisfaction signals across the two time domains. We show that STL-mx has the expressive power of STL supplemented with an implicit T-periodic clock signal. We develop and implement an algorithm for monitoring STL-mx formulas and illustrate the approach using a mixed-signal example. }, author = {Ferrere, Thomas and Maler, Oded and Nickovic, Dejan}, booktitle = {17th International Conference on Formal Modeling and Analysis of Timed Systems}, isbn = {978-3-0302-9661-2}, issn = {1611-3349}, location = {Amsterdam, The Netherlands}, pages = {59--75}, publisher = {Springer Nature}, title = {{Mixed-time signal temporal logic}}, doi = {10.1007/978-3-030-29662-9_4}, volume = {11750}, year = {2019}, } @inproceedings{7411, abstract = {Proofs of sequential work (PoSW) are proof systems where a prover, upon receiving a statement χ and a time parameter T computes a proof ϕ(χ,T) which is efficiently and publicly verifiable. The proof can be computed in T sequential steps, but not much less, even by a malicious party having large parallelism. A PoSW thus serves as a proof that T units of time have passed since χ was received. PoSW were introduced by Mahmoody, Moran and Vadhan [MMV11], a simple and practical construction was only recently proposed by Cohen and Pietrzak [CP18]. In this work we construct a new simple PoSW in the random permutation model which is almost as simple and efficient as [CP18] but conceptually very different. Whereas the structure underlying [CP18] is a hash tree, our construction is based on skip lists and has the interesting property that computing the PoSW is a reversible computation. The fact that the construction is reversible can potentially be used for new applications like constructing proofs of replication. We also show how to “embed” the sloth function of Lenstra and Weselowski [LW17] into our PoSW to get a PoSW where one additionally can verify correctness of the output much more efficiently than recomputing it (though recent constructions of “verifiable delay functions” subsume most of the applications this construction was aiming at).}, author = {Abusalah, Hamza M and Kamath Hosdurg, Chethan and Klein, Karen and Pietrzak, Krzysztof Z and Walter, Michael}, booktitle = {Advances in Cryptology – EUROCRYPT 2019}, isbn = {9783030176556}, issn = {1611-3349}, location = {Darmstadt, Germany}, pages = {277--291}, publisher = {Springer International Publishing}, title = {{Reversible proofs of sequential work}}, doi = {10.1007/978-3-030-17656-3_10}, volume = {11477}, year = {2019}, } @inproceedings{6482, abstract = {Computer vision systems for automatic image categorization have become accurate and reliable enough that they can run continuously for days or even years as components of real-world commercial applications. A major open problem in this context, however, is quality control. Good classification performance can only be expected if systems run under the specific conditions, in particular data distributions, that they were trained for. Surprisingly, none of the currently used deep network architectures have a built-in functionality that could detect if a network operates on data from a distribution it was not trained for, such that potentially a warning to the human users could be triggered. In this work, we describe KS(conf), a procedure for detecting such outside of specifications (out-of-specs) operation, based on statistical testing of the network outputs. We show by extensive experiments using the ImageNet, AwA2 and DAVIS datasets on a variety of ConvNets architectures that KS(conf) reliably detects out-of-specs situations. It furthermore has a number of properties that make it a promising candidate for practical deployment: it is easy to implement, adds almost no overhead to the system, works with all networks, including pretrained ones, and requires no a priori knowledge of how the data distribution could change. }, author = {Sun, Rémy and Lampert, Christoph}, isbn = {9783030129385}, issn = {1611-3349}, location = {Stuttgart, Germany}, pages = {244--259}, publisher = {Springer Nature}, title = {{KS(conf): A light-weight test if a ConvNet operates outside of Its specifications}}, doi = {10.1007/978-3-030-12939-2_18}, volume = {11269}, year = {2019}, } @inproceedings{6164, abstract = {In this paper, we propose an algorithm to build discrete spherical shell having integer center and real-valued inner and outer radii on the face-centered cubic (FCC) grid. We address the problem by mapping it to a 2D scenario and building the shell layer by layer on hexagonal grids with additive manufacturing in mind. The layered hexagonal grids get shifted according to need as we move from one layer to another and forms the FCC grid in 3D. However, we restrict our computation strictly to 2D in order to utilize symmetry and simplicity.}, author = {Koshti, Girish and Biswas, Ranita and Largeteau-Skapin, Gaëlle and Zrour, Rita and Andres, Eric and Bhowmick, Partha}, booktitle = {19th International Workshop}, isbn = {978-3-030-05287-4}, issn = {1611-3349}, location = {Porto, Portugal}, pages = {82--96}, publisher = {Springer}, title = {{Sphere construction on the FCC grid interpreted as layered hexagonal grids in 3D}}, doi = {10.1007/978-3-030-05288-1_7}, volume = {11255}, year = {2018}, } @inproceedings{6941, abstract = {Bitcoin has become the most successful cryptocurrency ever deployed, and its most distinctive feature is that it is decentralized. Its underlying protocol (Nakamoto consensus) achieves this by using proof of work, which has the drawback that it causes the consumption of vast amounts of energy to maintain the ledger. Moreover, Bitcoin mining dynamics have become less distributed over time. Towards addressing these issues, we propose SpaceMint, a cryptocurrency based on proofs of space instead of proofs of work. Miners in SpaceMint dedicate disk space rather than computation. We argue that SpaceMint’s design solves or alleviates several of Bitcoin’s issues: most notably, its large energy consumption. SpaceMint also rewards smaller miners fairly according to their contribution to the network, thus incentivizing more distributed participation. This paper adapts proof of space to enable its use in cryptocurrency, studies the attacks that can arise against a Bitcoin-like blockchain that uses proof of space, and proposes a new blockchain format and transaction types to address these attacks. Our prototype shows that initializing 1 TB for mining takes about a day (a one-off setup cost), and miners spend on average just a fraction of a second per block mined. Finally, we provide a game-theoretic analysis modeling SpaceMint as an extensive game (the canonical game-theoretic notion for games that take place over time) and show that this stylized game satisfies a strong equilibrium notion, thereby arguing for SpaceMint ’s stability and consensus.}, author = {Park, Sunoo and Kwon, Albert and Fuchsbauer, Georg and Gazi, Peter and Alwen, Joel F and Pietrzak, Krzysztof Z}, booktitle = {22nd International Conference on Financial Cryptography and Data Security}, isbn = {9783662583869}, issn = {1611-3349}, location = {Nieuwpoort, Curacao}, pages = {480--499}, publisher = {Springer Nature}, title = {{SpaceMint: A cryptocurrency based on proofs of space}}, doi = {10.1007/978-3-662-58387-6_26}, volume = {10957}, year = {2018}, } @inproceedings{5801, abstract = {Space filling circles and spheres have various applications in mathematical imaging and physical modeling. In this paper, we first show how the thinnest (i.e., 2-minimal) model of digital sphere can be augmented to a space filling model by fixing certain “simple voxels” and “filler voxels” associated with it. Based on elementary number-theoretic properties of such voxels, we design an efficient incremental algorithm for generation of these space filling spheres with successively increasing radius. The novelty of the proposed technique is established further through circular space filling on 3D digital plane. As evident from a preliminary set of experimental result, this can particularly be useful for parallel computing of 3D Voronoi diagrams in the digital space.}, author = {Dwivedi, Shivam and Gupta, Aniket and Roy, Siddhant and Biswas, Ranita and Bhowmick, Partha}, booktitle = {20th IAPR International Conference}, isbn = {978-3-319-66271-8}, issn = {1611-3349}, location = {Vienna, Austria}, pages = {347--359}, publisher = {Springer Nature}, title = {{Fast and Efficient Incremental Algorithms for Circular and Spherical Propagation in Integer Space}}, doi = {10.1007/978-3-319-66272-5_28}, volume = {10502}, year = {2017}, } @inproceedings{5802, abstract = {This papers introduces a definition of digital primitives based on focal points and weighted distances (with positive weights). The proposed definition is applicable to general dimensions and covers in its gamut various regular curves and surfaces like circles, ellipses, digital spheres and hyperspheres, ellipsoids and k-ellipsoids, Cartesian k-ovals, etc. Several interesting properties are presented for this class of digital primitives such as space partitioning, topological separation, and connectivity properties. To demonstrate further the potential of this new way of defining digital primitives, we propose, as extension, another class of digital conics defined by focus-directrix combination.}, author = {Andres, Eric and Biswas, Ranita and Bhowmick, Partha}, booktitle = {20th IAPR International Conference}, isbn = {978-3-319-66271-8}, issn = {1611-3349}, location = {Vienna, Austria}, pages = {388--398}, publisher = {Springer Nature}, title = {{Digital primitives defined by weighted focal set}}, doi = {10.1007/978-3-319-66272-5_31}, volume = {10502}, year = {2017}, } @inproceedings{13160, abstract = {Transforming deterministic ω -automata into deterministic parity automata is traditionally done using variants of appearance records. We present a more efficient variant of this approach, tailored to Rabin automata, and several optimizations applicable to all appearance records. We compare the methods experimentally and find out that our method produces smaller automata than previous approaches. Moreover, the experiments demonstrate the potential of our method for LTL synthesis, using LTL-to-Rabin translators. It leads to significantly smaller parity automata when compared to state-of-the-art approaches on complex formulae.}, author = {Kretinsky, Jan and Meggendorfer, Tobias and Waldmann, Clara and Weininger, Maximilian}, booktitle = {Tools and Algorithms for the Construction and Analysis of Systems}, isbn = {9783662545768}, issn = {1611-3349}, location = {Uppsala, Sweden}, pages = {443--460}, publisher = {Springer}, title = {{Index appearance record for transforming Rabin automata into parity automata}}, doi = {10.1007/978-3-662-54577-5_26}, volume = {10205}, year = {2017}, } @inbook{5805, abstract = {Discretization of sphere in the integer space follows a particular discretization scheme, which, in principle, conforms to some topological model. This eventually gives rise to interesting topological properties of a discrete spherical surface, which need to be investigated for its analytical characterization. This paper presents some novel results on the local topological properties of the naive model of discrete sphere. They follow from the bijection of each quadraginta octant of naive sphere with its projection map called f -map on the corresponding functional plane and from the characterization of certain jumps in the f-map. As an application, we have shown how these properties can be used in designing an efficient reconstruction algorithm for a naive spherical surface from an input voxel set when it is sparse or noisy.}, author = {Sen, Nabhasmita and Biswas, Ranita and Bhowmick, Partha}, booktitle = {Computational Topology in Image Context}, isbn = {978-3-319-39440-4}, issn = {1611-3349}, location = {Marseille, France}, pages = {253--264}, publisher = {Springer Nature}, title = {{On some local topological properties of naive discrete sphere}}, doi = {10.1007/978-3-319-39441-1_23}, volume = {9667}, year = {2016}, } @inbook{5809, abstract = {A discrete spherical circle is a topologically well-connected 3D circle in the integer space, which belongs to a discrete sphere as well as a discrete plane. It is one of the most important 3D geometric primitives, but has not possibly yet been studied up to its merit. This paper is a maiden exposition of some of its elementary properties, which indicates a sense of its profound theoretical prospects in the framework of digital geometry. We have shown how different types of discretization can lead to forbidden and admissible classes, when one attempts to define the discretization of a spherical circle in terms of intersection between a discrete sphere and a discrete plane. Several fundamental theoretical results have been presented, the algorithm for construction of discrete spherical circles has been discussed, and some test results have been furnished to demonstrate its practicality and usefulness.}, author = {Biswas, Ranita and Bhowmick, Partha and Brimkov, Valentin E.}, booktitle = {Combinatorial image analysis}, isbn = {978-3-319-26144-7}, issn = {1611-3349}, location = {Kolkata, India}, pages = {86--100}, publisher = {Springer Nature}, title = {{On the connectivity and smoothness of discrete spherical circles}}, doi = {10.1007/978-3-319-26145-4_7}, volume = {9448}, year = {2016}, } @inbook{1094, abstract = {Immunogold labeling of freeze-fracture replicas has recently been used for high-resolution visualization of protein localization in electron microscopy. This method has higher labeling efficiency than conventional immunogold methods for membrane molecules allowing precise quantitative measurements. However, one of the limitations of freeze-fracture replica immunolabeling is difficulty in keeping structural orientation and identifying labeled profiles in complex tissues like brain. The difficulty is partly due to fragmentation of freeze-fracture replica preparations during labeling procedures and limited morphological clues on the replica surface. To overcome these issues, we introduce here a grid-glued replica method combined with SEM observation. This method allows histological staining before dissolving the tissue and easy handling of replicas during immunogold labeling, and keeps the whole replica surface intact without fragmentation. The procedure described here is also useful for matched double-replica analysis allowing further identification of labeled profiles in corresponding P-face and E-face.}, author = {Harada, Harumi and Shigemoto, Ryuichi}, booktitle = {High-Resolution Imaging of Cellular Proteins}, issn = {1611-3349}, pages = {203 -- 216}, publisher = {Springer}, title = {{Immunogold protein localization on grid-glued freeze-fracture replicas}}, doi = {10.1007/978-1-4939-6352-2_12}, volume = {1474}, year = {2016}, } @inproceedings{10884, abstract = {We revisit the parameterized model checking problem for token-passing systems and specifications in indexed CTL  ∗ \X. Emerson and Namjoshi (1995, 2003) have shown that parameterized model checking of indexed CTL  ∗ \X in uni-directional token rings can be reduced to checking rings up to some cutoff size. Clarke et al. (2004) have shown a similar result for general topologies and indexed LTL \X, provided processes cannot choose the directions for sending or receiving the token. We unify and substantially extend these results by systematically exploring fragments of indexed CTL  ∗ \X with respect to general topologies. For each fragment we establish whether a cutoff exists, and for some concrete topologies, such as rings, cliques and stars, we infer small cutoffs. Finally, we show that the problem becomes undecidable, and thus no cutoffs exist, if processes are allowed to choose the directions in which they send or from which they receive the token.}, author = {Aminof, Benjamin and Jacobs, Swen and Khalimov, Ayrat and Rubin, Sasha}, booktitle = {Verification, Model Checking, and Abstract Interpretation}, isbn = {9783642540127}, issn = {1611-3349}, location = {San Diego, CA, United States}, pages = {262--281}, publisher = {Springer Nature}, title = {{Parameterized model checking of token-passing systems}}, doi = {10.1007/978-3-642-54013-4_15}, volume = {8318}, year = {2014}, } @inproceedings{10892, abstract = {In this paper, we introduce planar matchings on directed pseudo-line arrangements, which yield a planar set of pseudo-line segments such that only matching-partners are adjacent. By translating the planar matching problem into a corresponding stable roommates problem we show that such matchings always exist. Using our new framework, we establish, for the first time, a complete, rigorous definition of weighted straight skeletons, which are based on a so-called wavefront propagation process. We present a generalized and unified approach to treat structural changes in the wavefront that focuses on the restoration of weak planarity by finding planar matchings.}, author = {Biedl, Therese and Huber, Stefan and Palfrader, Peter}, booktitle = {25th International Symposium, ISAAC 2014}, isbn = {9783319130743}, issn = {1611-3349}, location = {Jeonju, Korea}, pages = {117--127}, publisher = {Springer Nature}, title = {{Planar matchings for weighted straight skeletons}}, doi = {10.1007/978-3-319-13075-0_10}, volume = {8889}, year = {2014}, } @inproceedings{10885, abstract = {Two-player games on graphs provide the theoretical framework for many important problems such as reactive synthesis. While the traditional study of two-player zero-sum games has been extended to multi-player games with several notions of equilibria, they are decidable only for perfect-information games, whereas several applications require imperfect-information games. In this paper we propose a new notion of equilibria, called doomsday equilibria, which is a strategy profile such that all players satisfy their own objective, and if any coalition of players deviates and violates even one of the players objective, then the objective of every player is violated. We present algorithms and complexity results for deciding the existence of doomsday equilibria for various classes of ω-regular objectives, both for imperfect-information games, and for perfect-information games.We provide optimal complexity bounds for imperfect-information games, and in most cases for perfect-information games.}, author = {Chatterjee, Krishnendu and Doyen, Laurent and Filiot, Emmanuel and Raskin, Jean-François}, booktitle = {VMCAI 2014: Verification, Model Checking, and Abstract Interpretation}, isbn = {9783642540127}, issn = {1611-3349}, location = {San Diego, CA, United States}, pages = {78--97}, publisher = {Springer Nature}, title = {{Doomsday equilibria for omega-regular games}}, doi = {10.1007/978-3-642-54013-4_5}, volume = {8318}, year = {2014}, } @inproceedings{10894, abstract = {PHAT is a C++ library for the computation of persistent homology by matrix reduction. We aim for a simple generic design that decouples algorithms from data structures without sacrificing efficiency or user-friendliness. This makes PHAT a versatile platform for experimenting with algorithmic ideas and comparing them to state of the art implementations.}, author = {Bauer, Ulrich and Kerber, Michael and Reininghaus, Jan and Wagner, Hubert}, booktitle = {ICMS 2014: International Congress on Mathematical Software}, isbn = {9783662441985}, issn = {1611-3349}, location = {Seoul, South Korea}, pages = {137--143}, publisher = {Springer Berlin Heidelberg}, title = {{PHAT – Persistent Homology Algorithms Toolbox}}, doi = {10.1007/978-3-662-44199-2_24}, volume = {8592}, year = {2014}, } @inbook{5747, author = {Dragoi, Cezara and Gupta, Ashutosh and Henzinger, Thomas A}, booktitle = {Computer Aided Verification}, isbn = {9783642397981}, issn = {1611-3349}, location = {Saint Petersburg, Russia}, pages = {174--190}, publisher = {Springer Berlin Heidelberg}, title = {{Automatic Linearizability Proofs of Concurrent Objects with Cooperating Updates}}, doi = {10.1007/978-3-642-39799-8_11}, volume = {8044}, year = {2013}, } @inproceedings{10902, abstract = {We consider how to edit strings from a source language so that the edited strings belong to a target language, where the languages are given as deterministic finite automata. Non-streaming (or offline) transducers perform edits given the whole source string. We show that the class of deterministic one-pass transducers with registers along with increment and min operation suffices for computing optimal edit distance, whereas the same class of transducers without the min operation is not sufficient. Streaming (or online) transducers perform edits as the letters of the source string are received. We present a polynomial time algorithm for the partial-repair problem that given a bound α asks for the construction of a deterministic streaming transducer (if one exists) that ensures that the ‘maximum fraction’ η of the strings of the source language are edited, within cost α, to the target language.}, author = {Chatterjee, Krishnendu and Chaubal, Siddhesh and Rubin, Sasha}, booktitle = {7th International Conference on Language and Automata Theory and Applications}, isbn = {9783642370632}, issn = {1611-3349}, location = {Bilbao, Spain}, pages = {214--225}, publisher = {Springer Nature}, title = {{How to travel between languages}}, doi = {10.1007/978-3-642-37064-9_20}, volume = {7810}, year = {2013}, } @inproceedings{10897, abstract = {Taking images is an efficient way to collect data about the physical world. It can be done fast and in exquisite detail. By definition, image processing is the field that concerns itself with the computation aimed at harnessing the information contained in images [10]. This talk is concerned with topological information. Our main thesis is that persistent homology [5] is a useful method to quantify and summarize topological information, building a bridge that connects algebraic topology with applications. We provide supporting evidence for this thesis by touching upon four technical developments in the overlap between persistent homology and image processing.}, author = {Edelsbrunner, Herbert}, booktitle = {Graph-Based Representations in Pattern Recognition}, isbn = {9783642382208}, issn = {1611-3349}, location = {Vienna, Austria}, pages = {182--183}, publisher = {Springer Nature}, title = {{Persistent homology in image processing}}, doi = {10.1007/978-3-642-38221-5_19}, volume = {7877}, year = {2013}, } @inproceedings{10903, abstract = {We propose a logic-based framework for automated reasoning about sequential programs manipulating singly-linked lists and arrays with unbounded data. We introduce the logic SLAD, which allows combining shape constraints, written in a fragment of Separation Logic, with data and size constraints. We address the problem of checking the entailment between SLAD formulas, which is crucial in performing pre-post condition reasoning. Although this problem is undecidable in general for SLAD, we propose a sound and powerful procedure that is able to solve this problem for a large class of formulas, beyond the capabilities of existing techniques and tools. We prove that this procedure is complete, i.e., it is actually a decision procedure for this problem, for an important fragment of SLAD including known decidable logics. We implemented this procedure and shown its preciseness and its efficiency on a significant benchmark of formulas.}, author = {Bouajjani, Ahmed and Dragoi, Cezara and Enea, Constantin and Sighireanu, Mihaela}, booktitle = {Automated Technology for Verification and Analysis}, isbn = {9783642333859}, issn = {1611-3349}, location = {Thiruvananthapuram, India}, pages = {167--182}, publisher = {Springer}, title = {{Accurate invariant checking for programs manipulating lists and arrays with infinite data}}, doi = {10.1007/978-3-642-33386-6_14}, volume = {7561}, year = {2012}, } @inproceedings{10905, abstract = {Energy games belong to a class of turn-based two-player infinite-duration games played on a weighted directed graph. It is one of the rare and intriguing combinatorial problems that lie in NP ∩ co−NP, but are not known to be in P. While the existence of polynomial-time algorithms has been a major open problem for decades, there is no algorithm that solves any non-trivial subclass in polynomial time. In this paper, we give several results based on the weight structures of the graph. First, we identify a notion of penalty and present a polynomial-time algorithm when the penalty is large. Our algorithm is the first polynomial-time algorithm on a large class of weighted graphs. It includes several counter examples that show that many previous algorithms, such as value iteration and random facet algorithms, require at least sub-exponential time. Our main technique is developing the first non-trivial approximation algorithm and showing how to convert it to an exact algorithm. Moreover, we show that in a practical case in verification where weights are clustered around a constant number of values, the energy game problem can be solved in polynomial time. We also show that the problem is still as hard as in general when the clique-width is bounded or the graph is strongly ergodic, suggesting that restricting graph structures need not help.}, author = {Chatterjee, Krishnendu and Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon}, booktitle = {Algorithms – ESA 2012}, isbn = {9783642330896}, issn = {1611-3349}, location = {Ljubljana, Slovenia}, pages = {301--312}, publisher = {Springer}, title = {{Polynomial-time algorithms for energy games with special weight structures}}, doi = {10.1007/978-3-642-33090-2_27}, volume = {7501}, year = {2012}, } @inproceedings{10906, abstract = {HSF(C) is a tool that automates verification of safety and liveness properties for C programs. This paper describes the verification approach taken by HSF(C) and provides instructions on how to install and use the tool.}, author = {Grebenshchikov, Sergey and Gupta, Ashutosh and Lopes, Nuno P. and Popeea, Corneliu and Rybalchenko, Andrey}, booktitle = {Tools and Algorithms for the Construction and Analysis of Systems}, editor = {Flanagan, Cormac and König, Barbara}, isbn = {9783642287558}, issn = {1611-3349}, location = {Tallinn, Estonia}, pages = {549--551}, publisher = {Springer}, title = {{HSF(C): A software verifier based on Horn clauses}}, doi = {10.1007/978-3-642-28756-5_46}, volume = {7214}, year = {2012}, } @inbook{5745, author = {Gupta, Ashutosh}, booktitle = {Automated Technology for Verification and Analysis}, isbn = {9783642333859}, issn = {1611-3349}, location = {Thiruvananthapuram, Kerala, India}, pages = {107--121}, publisher = {Springer Berlin Heidelberg}, title = {{Improved Single Pass Algorithms for Resolution Proof Reduction}}, doi = {10.1007/978-3-642-33386-6_10}, volume = {7561}, year = {2012}, } @inproceedings{10907, abstract = {This paper presents a method to create a model of an articulated object using the planar motion in an initialization video. The model consists of rigid parts connected by points of articulation. The rigid parts are described by the positions of salient feature-points tracked throughout the video. Following a filtering step that identifies points that belong to different objects, rigid parts are found by a grouping process in a graph pyramid. Valid articulation points are selected by verifying multiple hypotheses for each pair of parts.}, author = {Artner, Nicole M. and Ion, Adrian and Kropatsch, Walter G.}, booktitle = {Graph-Based Representations in Pattern Recognition}, editor = {Jiang, Xiaoyi and Ferrer, Miquel and Torsello, Andrea}, isbn = {9783642208430}, issn = {1611-3349}, location = {Münster, Germany}, pages = {215--224}, publisher = {Springer}, title = {{Spatio-temporal extraction of articulated models in a graph pyramid}}, doi = {10.1007/978-3-642-20844-7_22}, volume = {6658}, year = {2011}, } @inproceedings{10908, abstract = {We present ABC, a software tool for automatically computing symbolic upper bounds on the number of iterations of nested program loops. The system combines static analysis of programs with symbolic summation techniques to derive loop invariant relations between program variables. Iteration bounds are obtained from the inferred invariants, by replacing variables with bounds on their greatest values. We have successfully applied ABC to a large number of examples. The derived symbolic bounds express non-trivial polynomial relations over loop variables. We also report on results to automatically infer symbolic expressions over harmonic numbers as upper bounds on loop iteration counts.}, author = {Blanc, Régis and Henzinger, Thomas A and Hottelier, Thibaud and Kovács, Laura}, booktitle = {Logic for Programming, Artificial Intelligence, and Reasoning}, editor = {Clarke, Edmund M and Voronkov, Andrei}, isbn = {9783642175107}, issn = {1611-3349}, location = {Dakar, Senegal}, pages = {103--118}, publisher = {Springer Nature}, title = {{ABC: Algebraic Bound Computation for loops}}, doi = {10.1007/978-3-642-17511-4_7}, volume = {6355}, year = {2010}, } @inproceedings{11801, abstract = {Web search engines have emerged as one of the central applications on the internet. In fact, search has become one of the most important activities that people engage in on the Internet. Even beyond becoming the number one source of information, a growing number of businesses are depending on web search engines for customer acquisition. In this talk I will brief review the history of web search engines: The first generation of web search engines used text-only retrieval techniques. Google revolutionized the field by deploying the PageRank technology – an eigenvector-based analysis of the hyperlink structure- to analyze the web in order to produce relevant results. Moving forward, our goal is to achieve a better understanding of a page with a view towards producing even more relevant results. Google is powered by a large number of PCs. Using this infrastructure and striving to be as efficient as possible poses challenging systems problems but also various algorithmic challenges. I will discuss some of them in my talk.}, author = {Henzinger, Monika H}, booktitle = {2th Annual European Symposium on Algorithms}, isbn = { 3540230254}, issn = {1611-3349}, location = {Bergen, Norway}, pages = {3}, publisher = {Springer Nature}, title = {{Algorithmic aspects of web search engines}}, doi = {10.1007/978-3-540-30140-0_2}, volume = {3221}, year = {2004}, } @inproceedings{11800, abstract = {Web search engines have emerged as one of the central applications on the Internet. In fact, search has become one of the most important activities that people engage in on the the Internet. Even beyond becoming the number one source of information, a growing number of businesses are depending on web search engines for customer acquisition. The first generation of web search engines used text-only retrieval techniques. Google revolutionized the field by deploying the PageRank technology – an eigenvector-based analysis of the hyperlink structure – to analyze the web in order to produce relevant results. Moving forward, our goal is to achieve a better understanding of a page with a view towards producing even more relevant results.}, author = {Henzinger, Monika H}, booktitle = {31st International Colloquium on Automata, Languages and Programming}, issn = {1611-3349}, location = {Turku, Finland}, pages = {3}, publisher = {Springer Nature}, title = {{The past, present, and future of web search engines}}, doi = {10.1007/978-3-540-27836-8_2}, volume = {3142}, year = {2004}, }