@inproceedings{19743,
  abstract     = {The possibility of errors in human-engineered formal verification software, such as model checkers, poses a serious threat to the purpose of these tools. An established approach to mitigate this problem are certificates—lightweight, easy-to-check proofs of the verification results. In this paper, we develop novel certificates for model checking of Markov decision processes (MDPs) with quantitative reachability and expected reward properties. Our approach is conceptually simple and relies almost exclusively on elementary fixed point theory. Our certificates work for arbitrary finite MDPs and can be readily computed with little overhead using standard algorithms. We formalize the soundness of our certificates in Isabelle/HOL and provide a formally verified certificate checker. Moreover, we augment existing algorithms in the probabilistic model checker Storm with the ability to produce certificates and demonstrate practical applicability by conducting the first formal certification of the reference results in the Quantitative Verification Benchmark Set.},
  author       = {Chatterjee, Krishnendu and Quatmann, Tim and Schäffeler, Maximilian and Weininger, Maximilian and Winkler, Tobias and Zilken, Daniel},
  booktitle    = {31st International Conference on Tools and Algorithms for the Construction and Analysis of Systems},
  isbn         = {9783031906527},
  issn         = {1611-3349},
  location     = {Hamilton, ON, Canada},
  pages        = {130--151},
  publisher    = {Springer Nature},
  title        = {{Fixed point certificates for reachability and expected rewards in MDPs}},
  doi          = {10.1007/978-3-031-90653-4_7},
  volume       = {15697},
  year         = {2025},
}

@inproceedings{19744,
  abstract     = {We consider the problem of refuting equivalence of probabilistic programs, i.e., the problem of proving that two probabilistic programs induce different output distributions. We study this problem in the context of programs with conditioning (i.e., with observe and score statements), where the output distribution is conditioned by the event that all the observe statements along a run evaluate to true, and where the probability densities of different runs may be updated via the score statements. Building on a recent work on programs without conditioning, we present a new equivalence refutation method for programs with conditioning. Our method is based on weighted restarting, a novel transformation of probabilistic programs with conditioning to the output equivalent probabilistic programs without conditioning that we introduce in this work. Our method is the first to be both a) fully automated, and b) providing provably correct answers. We demonstrate the applicability of our method on a set of programs from the probabilistic inference literature.},
  author       = {Chatterjee, Krishnendu and Kafshdar Goharshadi, Ehsan and Novotný, Petr and Zikelic, Dorde},
  booktitle    = {31st International Conference on Tools and Algorithms for the Construction and Analysis of Systems},
  isbn         = {9783031906527},
  issn         = {1611-3349},
  location     = {Hamilton, ON, Canada},
  pages        = {279--300},
  publisher    = {Springer Nature},
  title        = {{Refuting equivalence in probabilistic programs with conditioning}},
  doi          = {10.1007/978-3-031-90653-4_14},
  volume       = {15697},
  year         = {2025},
}

@inproceedings{20844,
  abstract     = {We introduce and construct a new proof system called Non-interactive Arguments of Knowledge or Space (NArKoS), where a space-bounded prover can convince a verifier they know a secret, while having access to sufficient space allows one to forge indistinguishable proofs without the secret.
An application of NArKoS are space-deniable proofs, which are proofs of knowledge (say for authentication in access control) that are sound when executed by a lightweight device like a smart-card or an RFID chip that cannot have much storage, but are deniable (in the strong sense of online deniability) as the verifier, like a card reader, can efficiently forge such proofs.
We construct NArKoS in the random oracle model using an OR-proof combining a sigma protocol (for the proof of knowledge of the secret) with a new proof system called simulatable Proof of Transient Space (simPoTS). We give two different constructions of simPoTS, one based on labelling graphs with high pebbling complexity, a technique used in the construction of memory-hard functions and proofs of space, and a more practical construction based on the verifiable space-hard functions from TCC’24 where a prover must compute a root of a sparse polynomial. In both cases, the main challenge is making the proofs efficiently simulatable.},
  author       = {Dujmovic, Jesko and Günther, Christoph Ullrich and Pietrzak, Krzysztof Z},
  booktitle    = {23rd International Conference on Theory of Cryptography},
  isbn         = {9783032122896},
  issn         = {1611-3349},
  location     = {Aarhus, Denmark},
  pages        = {171--202},
  publisher    = {Springer Nature},
  title        = {{Space-deniable proofs}},
  doi          = {10.1007/978-3-032-12290-2_6},
  volume       = {16271},
  year         = {2025},
}

@inproceedings{20845,
  abstract     = {We develop new attacks against the Evasive LWE family of assumptions, in both the public and private-coin regime. To the best of our knowledge, ours are the first attacks against Evasive LWE in the public-coin regime, for any instantiation from the family. Our attacks are summarized below.

Public-Coin Attacks.
1.The recent work by Hseih, Lin and Luo [17] constructed the first Attribute Based Encryption (ABE) for unbounded depth circuits by relying on the “circular” evasive LWE assumption. This assumption has been popularly considered as a safe, public-coin instance of Evasive LWE in contrast to its “private-coin” cousins (for instance, see [10, 11]).
We provide the first attack against this assumption, challenging the widely held belief that this is a public-coin assumption.
2. We demonstrate a counter-example against vanilla public-coin evasive LWE by Wee [26] in an unnatural parameter regime. Our attack crucially relies on the error in the pre-condition being larger than the error in the post-condition, necessitating a refinement of the assumption.

Private-Coin Attacks.
1. The recent work by Agrawal, Kumari and Yamada [2] constructed the first functional encryption scheme for pseudorandom functionalities (PRFE) and extended this to obfuscation for pseudorandom functionalities (PRIO) [4] by relying on private-coin evasive LWE. We provide a new attack against the assumption stated in the first posting of their work (subsequently refined to avoid these attacks).
2. The recent work by Branco et al. [8] (concurrently to [4]) provides a construction of obfuscation for pseudorandom functionalities by relying on private-coin evasive LWE. We provide a new attack against their stated assumption.
3. Branco et al. [8] showed that there exist contrived, “self-referential” classes of pseudorandom functionalities for which pseudorandom obfuscation cannot exist. We extend their techniques to develop an analogous result for pseudorandom functional encryption.

While Evasive LWE was developed to specifically avoid “zeroizing attacks”, our work shows that in certain settings, such attacks can still apply.},
  author       = {Agrawal, Shweta and Modi, Anuja and Yadav, Anshu and Yamada, Shota},
  booktitle    = {23rd International Conference on Theory of Cryptography},
  isbn         = {9783032122926},
  issn         = {1611-3349},
  location     = {Aarhus, Denmark},
  pages        = {259--290},
  publisher    = {Springer Nature},
  title        = {{Zeroizing attacks against evasive and circular evasive LWE}},
  doi          = {10.1007/978-3-032-12293-3_9},
  volume       = {16269},
  year         = {2025},
}

@inproceedings{20846,
  abstract     = {CVRFs are PRFs that unify the properties of verifiable and constrained PRFs. Since they were introduced concurrently by Fuchsbauer and Chandran-Raghuraman-Vinayagamurthy in 2014, it has been an open problem to construct CVRFs without using heavy machinery such as multilinear maps, obfuscation or functional encryption.
We solve this problem by constructing a prefix-constrained verifiable PRF that does not rely on the aforementioned assumptions. Essentially, our construction is a verifiable version of the Goldreich-Goldwasser-Micali PRF. To achieve verifiability we leverage degree-2 algebraic PRGs and bilinear groups. In short, proofs consist of intermediate values of the Goldreich-Goldwasser-Micali PRF raised to the exponents of group elements. These outputs can be verified using pairings since the underlying PRG is of degree 2.
We prove the selective security of our construction under the Decisional Square Diffie-Hellman (DSDH) assumption and a new assumption, which we dub recursive Decisional Diffie-Hellman (recursive DDH).
We prove the soundness of recursive DDH in the generic group model assuming the hardness of the Multivariate Quadratic (MQ) problem and a new variant thereof, which we call MQ+.
Last, in terms of applications, we observe that our CVRF is also an exponent (C)VRF in the plain model. Exponent VRFs were recently introduced by Boneh et al. (Eurocrypt’25) with various applications to threshold cryptography in mind. In addition to that, we give further applications for prefix-CVRFs in the blockchain setting, namely, stake-pooling and compressible randomness beacons.},
  author       = {Brandt, Nicholas and Cueto Noval, Miguel and Günther, Christoph Ullrich and Ünal, Akin and Wohnig, Stella},
  booktitle    = {23rd International Conference on Theory of Cryptography},
  isbn         = {9783032122896},
  issn         = {1611-3349},
  location     = {Aarhus, Denmark},
  pages        = {478--511},
  publisher    = {Springer Nature},
  title        = {{Constrained verifiable random functions without obfuscation and friends}},
  doi          = {10.1007/978-3-032-12290-2_16},
  volume       = {16271},
  year         = {2025},
}

@inproceedings{21090,
  abstract     = {Fairness in AI is traditionally studied as a static property evaluated once, over a fixed dataset. However, real-world AI systems operate sequentially, with outcomes and environments evolving over time. This paper proposes a framework for analysing fairness as a runtime property. Using a minimal yet expressive model based on sequences of coin tosses with possibly evolving biases, we study the problems of monitoring and enforcing fairness expressed in either toss outcomes or coin biases. Since there is no one-size-fits-all solution for either problem, we provide a summary of monitoring and enforcement strategies, parametrised by environment dynamics, prediction horizon, and confidence thresholds. For both problems, we present general results under simple or minimal assumptions. We survey existing solutions for the monitoring problem for Markovian and additive dynamics, and existing solutions for the enforcement problem in static settings with known dynamics.},
  author       = {Cano Cordoba, Filip and Henzinger, Thomas A and Kueffner, Konstantin},
  booktitle    = {25th International Conference on Runtime Verification},
  issn         = {1611-3349},
  location     = {Graz, Austria},
  pages        = {1--21},
  publisher    = {Springer Nature},
  title        = {{Algorithmic fairness: A runtime perspective}},
  doi          = {10.1007/978-3-032-05435-7_1},
  volume       = {16087},
  year         = {2025},
}

@inproceedings{21091,
  abstract     = {Neural certificates have emerged as a powerful tool in cyber-physical systems control, providing witnesses of correctness. These certificates, such as barrier functions, often learned alongside control policies, once verified, serve as mathematical proofs of system safety. However, traditional formal verification of their defining conditions typically faces scalability challenges due to exhaustive state-space exploration. To address this challenge, we propose a lightweight runtime monitoring framework that integrates real-time verification and does not require access to the underlying control policy. Our monitor observes the system during deployment and performs on-the-fly verification of the certificate over a lookahead region to ensure safety within a finite prediction horizon. We instantiate this framework for ReLU-based control barrier functions and demonstrate its practical effectiveness in a case study. Our approach enables timely detection of safety violations and incorrect certificates with minimal overhead, providing an effective but lightweight alternative to the static verification of the certificates.},
  author       = {Henzinger, Thomas A and Kueffner, Konstantin and Yu, Zhengqi},
  booktitle    = {25th International Conference on Runtime Verification},
  issn         = {1611-3349},
  location     = {Graz, Austria},
  pages        = {54--72},
  publisher    = {Springer Nature},
  title        = {{Formal verification of neural certificates done dynamically}},
  doi          = {10.1007/978-3-032-05435-7_4},
  volume       = {16087},
  year         = {2025},
}

@inproceedings{21092,
  abstract     = {Formal verification provides assurances that a probabilistic system satisfies its specification—conditioned on the system model being aligned with reality. We propose alignment monitoring to watch that this assumption is justified. We consider a probabilistic model well aligned if it accurately predicts the behaviour of an uncertain system in advance. An alignment score measures this by quantifying the similarity between the model’s predicted and the system’s (unknown) actual distributions. An alignment monitor observes the system at runtime; at each point in time it uses the current state and the model to predict the next state. After the next state is observed, the monitor updates the verdict, which is a high-probability interval estimate for the true alignment score. We utilize tools from sequential forecasting to construct our alignment monitors. Besides a monitor for measuring the expected alignment score, we introduce a differential alignment monitor, designed for comparing two models, and a weighted alignment monitor, which permits task-specific alignment monitoring. We evaluate our monitors experimentally on the PRISM benchmark suite. They are fast, memory-efficient, and detect misalignment early.},
  author       = {Henzinger, Thomas A and Kueffner, Konstantin and Singh, Vasu and Sun, I},
  booktitle    = {25th International Conference on Runtime Verification},
  issn         = {1611-3349},
  location     = {Graz, Austria},
  pages        = {140--159},
  publisher    = {Springer Nature},
  title        = {{Alignment monitoring}},
  doi          = {10.1007/978-3-032-05435-7_9},
  volume       = {16087},
  year         = {2025},
}

@inproceedings{21093,
  abstract     = {We propose a monitoring approach for hyperproperties where the system’s observations range over infinite domains. The specifications are given as formulas of symbolic hypernode logic, an extension of earlier versions of hypernode logic that supports events with data. We demonstrate how to translate terms of symbolic hypernode logic into multi-tape symbolic transducers and we present a monitoring algorithm for universally quantified formulas that is based on this translation. We evaluate our approach against the previous approach for monitoring hypernode logic, and we also compare it to other monitors for hyperproperties.},
  author       = {Chalupa, Marek and Henzinger, Thomas A and Oliveira da Costa, Ana A},
  booktitle    = {25th International Conference on Runtime Verification},
  issn         = {1611-3349},
  location     = {Graz, Austria},
  pages        = {417--437},
  publisher    = {Springer Nature},
  title        = {{Monitoring hypernode logic over infinite domains}},
  doi          = {10.1007/978-3-032-05435-7_23},
  volume       = {16087},
  year         = {2025},
}

@inproceedings{21262,
  abstract     = {Continuous Group Key Agreement (CGKA) is the primitive underlying secure group messaging. It allows a large group of N users to maintain a shared secret key that is frequently rotated by the
group members in order to achieve forward secrecy and post compromise security. The group messaging scheme Messaging Layer Security (MLS) standardized by the IETF makes use of a CGKA called TreeKEM which arranges the N group members in a binary tree. Here, each node is associated with a public-key, each user is assigned one of the leaves, and a user knows the corresponding secret keys from their leaf to the root. To update the key material known to them, a user must just replace keys at log(N) nodes, which requires them to create and upload log(N) ciphertexts. Such updates must be processed sequentially by all users, which for large groups is impractical. To allow for concurrent updates, TreeKEM uses the “propose and commit” paradigm, where multiple users can concurrently propose to update (by just sampling a fresh leaf key), and a single user can then commit to all proposals at once. Unfortunately, this process destroys the binary tree structure as the tree gets pruned and some nodes must be “blanked” at the cost of increasing the in-degree of others, which makes the commit operation, as well as, future commits more costly. In the worst case, the update cost (in terms of uploaded ciphertexts) per user can grow from log(N) to Ω(N). In this work we provide two main contributions. First, we show that MLS’ communication complexity is bad not only in the worst case but also if the proposers and committers are chosen at random: even if there’s just one update proposal for every commit the expected cost is already over √N, and it approaches N as this ratio changes towards more proposals. Our second contribution is a new variant of propose and commit for
TreeKEM which for moderate amounts of update proposals per commit provably achieves an update cost of Θ(log(N)) assuming the proposers and committers are chosen at random.},
  author       = {Auerbach, Benedikt and Cueto Noval, Miguel and Erol, Boran and Pietrzak, Krzysztof Z},
  booktitle    = {45th Annual International Cryptology Conference},
  isbn         = {9783032019127},
  issn         = {1611-3349},
  location     = {Santa Barbara, CA, United States},
  pages        = {141--172},
  publisher    = {Springer Nature},
  title        = {{Continuous group-key agreement: Concurrent updates without pruning}},
  doi          = {10.1007/978-3-032-01913-4_5},
  volume       = {16007},
  year         = {2025},
}

@inproceedings{21323,
  abstract     = {We present a unifying framework for proving the knowledge-soundness of KZG-like polynomial commitment schemes, encompassing both univariate and multivariate variants. By conceptualizing the proof technique of Lipmaa, Parisella, and Siim for the univariate KZG scheme (EUROCRYPT 2024), we present tools and falsifiable hardness assumptions that permit black-box extraction of the multivariate KZG scheme. Central to our approach is the notion of a canonical Proof-of-Knowledge of a Polynomial (PoKoP) of a polynomial commitment scheme, which we use to capture the extractability notion required in constructions of practical zk-SNARKs. We further present an explicit polynomial decomposition lemma for multivariate polynomials, enabling a more direct analysis of interpolating extractors and bridging the gap between univariate and multivariate commitments. Our results provide the first standard-model proofs of extractability for the multivariate KZG scheme and many of its variants under falsifiable assumptions.},
  author       = {Belohorec, Juraj and Dvořák, Pavel and Hoffmann, Charlotte and Hubáček, Pavel and Mašková, Kristýna and Pastyřík, Martin},
  booktitle    = {45th Annual International Cryptology Conference},
  isbn         = {9783032018861},
  issn         = {1611-3349},
  location     = {Santa Barbara, CA, United States},
  pages        = {584--616},
  publisher    = {Springer Nature},
  title        = {{On extractability of the KZG family of polynomial commitment schemes}},
  doi          = {10.1007/978-3-032-01887-8_19},
  volume       = {16005},
  year         = {2025},
}

@inproceedings{19741,
  abstract     = {Quantitative automata model beyond-boolean aspects of systems: every execution is mapped to a real number by incorporating weighted transitions and value functions that generalize acceptance conditions of boolean w-automata. Despite the theoretical advances in systems analysis through quantitative automata, the first comprehensive software tool for quantitative automata (Quantitative Automata Kit, or QuAK) was developed only recently. QuAK implements algorithms for solving standard decision problems, e.g., emptiness and universality, as well as constructions for safety and liveness of quantitative automata. We present the architecture of QuAK, which reflects that all of these problems reduce to either checking inclusion between two quantitative automata or computing the highest value achievable by an automaton—its so-called top value. We improve QuAK by extending these two algorithms with an option to return, alongside their results, an ultimately periodic word witnessing the algorithm’s output, as well as implementing a new safety-liveness decomposition algorithm that can handle nondeterministic automata, making QuAK more informative and capable.},
  author       = {Chalupa, Marek and Henzinger, Thomas A and Mazzocchi, Nicolas Adrien and Sarac, Naci E},
  booktitle    = {31st International Conference on Tools and Algorithms for the Construction and Analysis of Systems},
  isbn         = {9783031906428},
  issn         = {1611-3349},
  pages        = {303--312},
  publisher    = {Springer Nature},
  title        = {{Automating the analysis of quantitative automata with QuAK}},
  doi          = {10.1007/978-3-031-90643-5_16},
  volume       = {15696},
  year         = {2025},
}

@inproceedings{19778,
  abstract     = {A verifiable delay function VDF(x, T)->(y, π) maps an input x and time parameter T to an output y together with an efficiently verifiable proof π certifying that y was correctly computed. The function runs in T sequential steps, and it should not be possible to compute y much faster than that. The only known practical VDFs use sequential squaring in groups of unknown order as the sequential function, i.e., y = x^2^T. There are two constructions for the proof of exponentiation (PoE) certifying that y = x^2^T, with Wesolowski (Eurocrypt’19) having very short proofs, but they are more expensive to compute and the soundness relies on stronger assumptions than the PoE proposed by Pietrzak (ITCS’19).
A recent application of VDFs by Arun, Bonneau and Clark (Asiacrypt’22) are short-lived proofs and signatures, which are proofs and signatures that are only sound for some time t, but after that can be forged by anyone. For this they rely on “watermarkable VDFs”, where the proof embeds a prover chosen watermark. To achieve stronger notions of proofs/signatures with reusable forgeability, they rely on “zero-knowledge VDFs”, where instead of the output y, one just proves knowledge of this output. The existing proposals for watermarkable and zero-knowledge VDFs all build on Wesolowski’s PoE, for the watermarkable VDFs there’s currently no security proof.

In this work we give the first constructions that transform any PoEs in hidden order groups into watermarkable VDFs and into zkVDFs, solving an open question by Arun et al. Unlike our watermarkable VDF, the zkVDF (required for reusable forgeability) is not very practical as the number of group elements in the proof is a security parameter. To address this, we introduce the notion of zero-knowledge proofs of sequential work (zkPoSW), a notion that relaxes zkVDFs by not requiring that the output is unique. We show that zkPoSW are sufficient to construct proofs or signatures with reusable forgeability, and construct efficient zkPoSW from any PoE, ultimately achieving short lived proofs and signatures that improve upon Arun et al.’s construction in several dimensions (faster forging times, arguably weaker assumptions).
A key idea underlying our constructions is to not directly construct a (watermarked or zk) proof for y = x^2^T, but instead give a (watermarked or zk) proof for the more basic statement that 
x^l, y^l satisfy x^l = x ^r, y^l = y^r for some r, together with a normal PoE for y^l = (x^l)^2^T.},
  author       = {Hoffmann, Charlotte and Pietrzak, Krzysztof Z},
  booktitle    = {28th IACR International Conference on Practice and Theory of Public-Key Cryptography},
  isbn         = {9783031918193},
  issn         = {1611-3349},
  location     = {Roros, Norway},
  pages        = {36--66},
  publisher    = {Springer Nature},
  title        = {{Watermarkable and zero-knowledge Verifiable Delay Functions from any proof of exponentiation}},
  doi          = {10.1007/978-3-031-91820-9_2},
  volume       = {15674},
  year         = {2025},
}

@inproceedings{18155,
  abstract     = {We study the classical problem of verifying programs with respect to formal specifications given in the linear temporal logic (LTL). We first present novel sound and complete witnesses for LTL verification over imperative programs. Our witnesses are applicable to both verification (proving) and refutation (finding bugs) settings. We then consider LTL formulas in which atomic propositions can be polynomial constraints and turn our focus to polynomial arithmetic programs, i.e. programs in which every assignment and guard consists only of polynomial expressions. For this setting, we provide an efficient algorithm to automatically synthesize such LTL witnesses. Our synthesis procedure is both sound and semi-complete. Finally, we present experimental results demonstrating the effectiveness of our approach and that it can handle programs which were beyond the reach of previous state-of-the-art tools.},
  author       = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Goharshady, Ehsan and Karrabi, Mehrdad and Zikelic, Dorde},
  booktitle    = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
  isbn         = {9783031711619},
  issn         = {1611-3349},
  location     = {Milan, Italy},
  pages        = {600--619},
  publisher    = {Springer Nature},
  title        = {{Sound and complete witnesses for template-based verification of LTL properties on polynomial programs}},
  doi          = {10.1007/978-3-031-71162-6_31},
  volume       = {14933},
  year         = {2024},
}

@inproceedings{18177,
  abstract     = {Partially Specified Boolean Networks (PSBNs) represent a family of Boolean models resulting from possible interpretations of unknown update logics. Hybrid extension of CTL (HCTL) has the power to express complex dynamical phenomena, such as oscillations or stability. We present BNClassifier to classify Boolean Networks corresponding to a given PSBN according to criteria specified in HCTL. The implementation of the tool is fully symbolic (based on BDDs). The results are visualised using the machine-learning-based technology of decision trees.},
  author       = {Beneš, Nikola and Brim, Luboš and Huvar, Ondřej and Pastva, Samuel and Šafránek, David},
  booktitle    = {Computational Methods in Systems Biology},
  isbn         = {9783031716706},
  issn         = {1611-3349},
  pages        = {19--26},
  publisher    = {Springer Nature},
  title        = {{BNClassifier: Classifying boolean models by dynamic properties}},
  doi          = {10.1007/978-3-031-71671-3_2},
  volume       = {14971},
  year         = {2024},
}

@inproceedings{18206,
  abstract     = {In the context of in vitro fertilization (IVF), selecting embryos for transfer is critical in determining pregnancy outcomes, with implantation as the essential first milestone for a successful pregnancy. This study introduces the Bonna algorithm, an advanced deep-learning framework engineered to predict embryo implantation probabilities. The algorithm employs a sophisticated integration of machine-learning techniques, utilizing MobileNetV2 for pixel and context embedding, a custom Pix2Pix model for precise segmentation, and a Vision Transformer for additional depth in embedding. MobileNetV2 was chosen for its robust feature extraction capabilities, focusing on textures and edges. The custom Pix2Pix model is adapted for precise segmentation of significant biological features such as the zona pellucida and blastocyst cavity. The Vision Transformer adds a global perspective, capturing complex patterns not apparent in local image segments. Tested on a dataset of images of human blastocysts collected from Ukraine, Israel, and Spain, the Bonna algorithm was rigorously validated through 10-fold cross-validation to ensure its robustness and reliability. It demonstrates superior performance with a mean area under the receiver operating characteristic curve (AUC) of 0.754, significantly outperforming existing models. The study not only advances predictive accuracy in embryo selection but also highlights the algorithm’s clinical applicability due to reliable confidence reporting.},
  author       = {Rave, Gilad and Fordham, Daniel E. and Bronstein, Alexander and Silver, David H.},
  booktitle    = {First International Conference on Artificial Intelligence in Healthcare},
  isbn         = {9783031672842},
  issn         = {1611-3349},
  location     = {Swansea, United Kingdom},
  pages        = {160--171},
  publisher    = {Springer Nature},
  title        = {{Enhancing predictive accuracy in embryo implantation: The Bonna algorithm and its clinical implications}},
  doi          = {10.1007/978-3-031-67285-9_12},
  volume       = {14976},
  year         = {2024},
}

@inproceedings{18521,
  abstract     = {In distributed systems with processes that do not share a global clock, partial synchrony is achieved by clock synchronization that guarantees bounded clock skew among all applications. Existing solutions for distributed runtime verification under partial synchrony against temporal logic specifications are exact but suffer from significant computational overhead. In this paper, we propose an approximate distributed monitoring algorithm for Signal Temporal Logic (STL) that mitigates this issue by abstracting away potential interleaving behaviors. This conservative abstraction enables a significant speedup of the distributed monitors, albeit with a tradeoff in accuracy. We address this tradeoff with a methodology that combines our approximate monitor with its exact counterpart, resulting in enhanced efficiency without sacrificing precision. We evaluate our approach with multiple experiments, showcasing its efficacy in both real-world applications and synthetic examples.},
  author       = {Bonakdarpour, Borzoo and Momtaz, Anik and Nickovic, Dejan and Sarac, Naci E},
  booktitle    = {24th International Conference on Runtime Verification},
  isbn         = {9783031742330},
  issn         = {1611-3349},
  location     = {Istanbul, Turkey},
  pages        = {282--301},
  publisher    = {Springer Nature},
  title        = {{Approximate distributed monitoring under partial synchrony: Balancing speed & accuracy}},
  doi          = {10.1007/978-3-031-74234-7_18},
  volume       = {15191},
  year         = {2024},
}

@inbook{18563,
  abstract     = {I give a personal account about the wave of new research activities that rose in the 1990s on the specification, verification, and control of real-time systems.},
  author       = {Henzinger, Thomas A},
  booktitle    = {Real Time and Such},
  editor       = {Graf, Susanne and Pettersson, Paul and Steffen, Bernhard},
  isbn         = {9783031737503},
  issn         = {1611-3349},
  pages        = {154--164},
  publisher    = {Springer Nature},
  title        = {{Reminiscences of a Real-Time Researcher}},
  doi          = {10.1007/978-3-031-73751-0_12},
  volume       = {15230},
  year         = {2024},
}

@inproceedings{18599,
  abstract     = {Hypernode logic can reason about the prefix relation on stutter-reduced finite traces through the stutter-reduced prefix predicate. We increase the expressiveness of hypernode logic in two ways. First, we split the stutter-reduced prefix predicate into an explicit stutter-reduction operator and the classical prefix predicate on words. This change gives hypernode logic the ability to combine synchronous and asynchronous reasoning by explicitly stating which parts of traces can stutter. Second, we allow the use of regular expressions in formulas to reason about the structure of traces. This change enables hypernode logic to describe a mixture of trace properties and hyperproperties.

We show how to translate extended hypernode logic formulas into multi-track automata, which are automata that read multiple input words. Then we describe a fully online monitoring algorithm for monitoring k-safety hyperproperties specified in the logic. We have implemented the monitoring algorithm, and evaluated it on monitoring synchronous and asynchronous versions of observational determinism, and on checking the privacy preservation by compiler optimizations.},
  author       = {Chalupa, Marek and Henzinger, Thomas A and Oliveira da Costa, Ana},
  booktitle    = {Integrated Formal Methods},
  isbn         = {9783031765537},
  issn         = {1611-3349},
  pages        = {151--171},
  publisher    = {Springer Nature},
  title        = {{Monitoring extended hypernode logic}},
  doi          = {10.1007/978-3-031-76554-4_9},
  volume       = {15234},
  year         = {2024},
}

@inproceedings{18600,
  abstract     = {The analysis of formal models that include quantitative aspects such as timing or probabilistic choices is performed by quantitative verification tools. Broad and mature tool support is available for computing basic properties such as expected rewards on basic models such as Markov chains. Previous editions of QComp, the comparison of tools for the analysis of quantitative formal models, focused on this setting. Many application scenarios, however, require more advanced property types such as LTL and parameter synthesis queries as well as advanced models like stochastic games and partially observable MDPs. For these, tool support is in its infancy today. This paper presents the outcomes of QComp 2023: a survey of the state of the art in quantitative verification tool support for advanced property types and models. With tools ranging from first research prototypes to well-supported integrations into established toolsets, this report highlights today’s active areas and tomorrow’s challenges in tool-focused research for quantitative verification.},
  author       = {Andriushchenko, Roman and Bork, Alexander and Budde, Carlos E. and Češka, Milan and Grover, Kush and Hahn, Ernst Moritz and Hartmanns, Arnd and Israelsen, Bryant and Jansen, Nils and Jeppson, Joshua and Junges, Sebastian and Köhl, Maximilian A. and Könighofer, Bettina and Kretinsky, Jan and Meggendorfer, Tobias and Parker, David and Pranger, Stefan and Quatmann, Tim and Ruijters, Enno and Taylor, Landon and Volk, Matthias and Weininger, Maximilian and Zhang, Zhen},
  booktitle    = {TOOLympics Challenge 2023},
  isbn         = {9783031676949},
  issn         = {1611-3349},
  pages        = {90--146},
  publisher    = {Springer Nature},
  title        = {{Tools at the Frontiers of Quantitative Verification: QComp 2023 Competition Report}},
  doi          = {10.1007/978-3-031-67695-6_4},
  volume       = {14550},
  year         = {2024},
}

