@inproceedings{21042,
  abstract     = {Many blockchains such as Ethereum execute all incoming transactions sequentially significantly limiting the potential throughput. A common approach to scale execution is parallel execution engines that fully utilize modern multi-core architectures. Parallel execution is then either done optimistically, by executing transactions in parallel and detecting conflicts on the fly, or guided, by requiring exhaustive client transaction hints and scheduling transactions accordingly.

However, recent studies have shown that the performance of parallel execution engines depends on the nature of the underlying workload. In fact, in some cases, only a 60% speed-up compared to sequential execution could be obtained. This is the case, as transactions that access the same resources must be executed sequentially. For example, if 10% of the transactions in a block access the same resource, the execution cannot meaningfully scale beyond 10 cores. Therefore, a single popular application can bottleneck the execution and limit the potential throughput.

In this paper, we introduce Anthemius, a block construction algorithm that optimizes parallel transaction execution throughput. We evaluate Anthemius exhaustively under a range of workloads, and show that Anthemius enables the underlying parallel execution engine to process over twice as many transactions.},
  author       = {Neiheiser, Ray and Kokoris Kogias, Eleftherios},
  booktitle    = {29th International Conference on Financial Cryptography and Data Security},
  isbn         = {9783032070234},
  issn         = {1611-3349},
  location     = {Miyakojima, Japan},
  pages        = {307--323},
  publisher    = {Springer Nature},
  title        = {{Anthemius: Efficient and modular block assembly for concurrent execution}},
  doi          = {10.1007/978-3-032-07024-1_18},
  volume       = {15751},
  year         = {2026},
}

@inproceedings{21134,
  abstract     = {The Nakamoto consensus protocol underlying the Bitcoin blockchain uses proof of work as a voting mechanism. Honest miners who contribute hashing power towards securing the chain try to extend the longest chain they are aware of. Despite its simplicity, Nakamoto consensus achieves meaningful security guarantees assuming that at any point in time, a majority of the hashing power is controlled by honest parties. This also holds under “resource variability”, i.e., if the total hashing power varies greatly over time.
Proofs of space (PoSpace) have been suggested as a more sustainable replacement for proofs of work. Unfortunately, no construction of a “longest-chain” blockchain based on PoSpace, that is secure under dynamic availability, is known. In this work, we prove that without additional assumptions no such protocol exists. We exactly quantify this impossibility result by proving a bound on the length of the fork required for double spending as a function of the adversarial capabilities. This bound holds for any chain selection rule, and we also show a chain selection rule (albeit a very strange one) that almost matches this bound.
The Nakamoto consensus protocol underlying the Bitcoin blockchain uses proof of work as a voting mechanism. Honest miners who contribute hashing power towards securing the chain try to extend the longest chain they are aware of. Despite its simplicity, Nakamoto consensus achieves meaningful security guarantees assuming that at any point in time, a majority of the hashing power is controlled by honest parties. This also holds under “resource variability”, i.e., if the total hashing power varies greatly over time.

Proofs of space (PoSpace) have been suggested as a more sustainable replacement for proofs of work. Unfortunately, no construction of a “longest-chain” blockchain based on PoSpace, that is secure under dynamic availability, is known. In this work, we prove that without additional assumptions no such protocol exists. We exactly quantify this impossibility result by proving a bound on the length of the fork required for double spending as a function of the adversarial capabilities. This bound holds for any chain selection rule, and we also show a chain selection rule (albeit a very strange one) that almost matches this bound.

Concretely, we consider a security game in which the honest parties at any point control 0 > 1
 times more space than the adversary. The adversary can change the honest space by a factor 1+- E with every block (dynamic availability), and “replotting” the space (which allows answering two challenges using the same space) takes as much time as p blocks.
We prove that no matter what chain selection rule is used, in this game the adversary can create a fork of length o^2 . p/E that will be picked as the winner by the chain selection rule.
We also provide an upper bound that matches the lower bound up to a factor o. There exists a chain selection rule (albeit a very strange one) which in the above game requires forks of length at least o . p/E
Our results show the necessity of additional assumptions to create a secure PoSpace based longest-chain blockchain. The Chia network in addition to PoSpace uses a verifiable delay function. Our bounds show that an additional primitive like that is necessary.},
  author       = {Baig, Mirza Ahad and Pietrzak, Krzysztof Z},
  booktitle    = {29th International Conference on Financial Cryptography and Data Security},
  isbn         = {9783032070340},
  issn         = {1611-3349},
  location     = {Miyakojima, Japan},
  pages        = {127--142},
  publisher    = {Springer Nature},
  title        = {{On the (in)security of Proofs-of-space based longest-chain blockchains}},
  doi          = {10.1007/978-3-032-07035-7_8},
  volume       = {15752},
  year         = {2026},
}

@phdthesis{21651,
  abstract     = {Blockchains enable distributed consensus in permissionless settings, where participants
are unknown, dynamically changing, and do not trust each other. While Bitcoin,
based on Proof-of-Work (PoW), was the first protocol in this model, significant
research has focused on permissionless protocols using alternative physical resources,
specifically Proof-of-Space (PoSpace) and Verifiable Delay Functions (VDFs). This
thesis investigates the theoretical limits and design space of longest-chain protocols in
the fully permissionless and dynamically available settings using these three resources.
First, we address the feasibility of blockchains relying solely on storage as a resource.
We prove a fundamental impossibility result: there exists no secure longest-chain
protocol based exclusively on Proof-of-Space in the fully permissionless or dynamically
available settings. Further, we quantify the adversarial capabilities required to execute
a double-spend attack. Our result formally justifies the necessity of coupling PoSpace
with time-dependent primitives (such as VDFs) or to move to less permissive settings
(quasi-permissionless or permissioned) to ensure security.
Second, we generalize Nakamoto-like heaviest chain consensus to protocols utilizing
combinations of multiple physical resources. We analyze chain selection rules governed
by a weight function Γ(S, V,W), which assigns weight to blocks based on recorded
Space (S), VDF speed (V ), and Work (W). We provide a complete classification
of secure weight functions, proving that a weight function is secure against private
double-spend attacks if and only if it is homogeneous in the timed resources (V,W)
and sub-homogeneous in S. This framework unifies existing protocols like Bitcoin and
Chia under a single theoretical model and provides a powerful tool for designing new
longest-chain blockchains from a mix of physical resources.},
  author       = {Baig, Mirza Ahad},
  isbn         = {978-3-99078-078-7},
  issn         = {2663-337X},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{On secure chain selection rules from physical resources in a permissionless setting}},
  doi          = {10.15479/AT-ISTA-21651},
  year         = {2026},
}

@inproceedings{20053,
  abstract     = {Liquid democracy is a transitive vote delegation mechanism over voting graphs. It enables each voter to delegate their vote(s) to another better-informed voter, with the goal of collectively making a better decision. The question of whether liquid democracy outperforms direct voting has been previously studied in the context of local delegation mechanisms (where voters can only delegate to someone in their neighbourhood) and binary decision problems. It has previously been shown that it is impossible for local delegation mechanisms to outperform direct voting in general graphs. This raises the question: for which classes of graphs do local delegation mechanisms yield good results?
In this work, we analyse (1) properties of specific graphs and (2) properties of local delegation mechanisms on these graphs, determining where local delegation actually outperforms direct voting. We show that a critical graph property enabling liquid democracy is that the voting outcome of local delegation mechanisms preserves a sufficient amount of variance, thereby avoiding situations where delegation falls behind direct voting1. These insights allow us to prove our main results, namely that there exist local delegation mechanisms that perform no worse and in fact quantitatively better than direct voting in natural graph topologies like complete, random d-regular, and bounded degree graphs, lending a more nuanced perspective to previous impossibility results.},
  author       = {Chatterjee, Krishnendu and Gilbert, Seth and Schmid, Stefan and Svoboda, Jakub and Yeo, Michelle X},
  booktitle    = {Proceedings of the ACM Symposium on Principles of Distributed Computing},
  isbn         = {9798400718854},
  location     = {Huatulco, Mexico},
  pages        = {241--251},
  publisher    = {Association for Computing Machinery},
  title        = {{When is liquid democracy possible?: On the manipulation of variance}},
  doi          = {10.1145/3732772.3733544},
  year         = {2025},
}

@inproceedings{19600,
  abstract     = {In this work, we explore route discovery in private payment channel networks. We first determine what “ideal" privacy for a routing protocol means in this setting. We observe that protocols achieving this strong privacy definition exist by leveraging Multi-Party Computation but they are inherently inefficient as they must involve the entire network. We then present protocols with weaker privacy guarantees but much better efficiency (involving only a small fraction of the nodes). The core idea is that both sender and receiver gossip a message which propagates through the network, and the moment any node in the network receives both messages, a path is found. In our first protocol the message is always sent to all neighbouring nodes with a delay proportional to the fees of that edge. In our second protocol the message is only sent to one neighbour chosen randomly with a probability proportional to its degree. We additionally propose a more realistic notion of privacy in order to measure the privacy leakage of our protocols in practice. Our realistic notion of privacy challenges an adversary that join the network with a fixed budget to create channels to guess the sender and receiver of a transaction upon receiving messages from our protocols. Simulations of our protocols on the Lightning network topology (for random transactions and uniform fees) show that 1) forming edges with high degree nodes is a more effective attack strategy for the adversary, 2) there is a tradeoff between the number of nodes involved in our protocols (privacy) and the optimality of the discovered path, and 3) our protocols involve a very small fraction of the network on average.},
  author       = {Avarikioti, Zeta and Bastankhah, Mahsa and Maddah-Ali, Mohammad Ali and Pietrzak, Krzysztof Z and Svoboda, Jakub and Yeo, Michelle X},
  booktitle    = {Computer Security. ESORICS 2024 International Workshops},
  isbn         = {9783031823480},
  issn         = {1611-3349},
  location     = {Bydgoszcz, Poland},
  pages        = {207--223},
  publisher    = {Springer Nature},
  title        = {{Route discovery in private payment channel networks}},
  doi          = {10.1007/978-3-031-82349-7_15},
  volume       = {15263},
  year         = {2025},
}

@inproceedings{19712,
  abstract     = {We study recent algebraic attacks (Briaud-Øygarden EC’23) on the Regular Syndrome Decoding (RSD) problem and the assumptions underlying the correctness of their attacks’ complexity estimates. By relating these assumptions to interesting algebraic-combinatorial problems, we prove that they do not hold in full generality. However, we show that they are (asymptotically) true for most parameter sets, supporting the soundness of algebraic attacks on RSD. Further, we prove—without any heuristics or assumptions—that RSD can be broken in polynomial time whenever the number of error blocks times the square of the size of error blocks is larger than 2 times the square of the dimension of the code.
Additionally, we use our methodology to attack a variant of the Learning With Errors problem where each error term lies in a fixed set of constant size. We prove that this problem can be broken in polynomial time, given a sufficient number of samples. This result improves on the seminal work by Arora and Ge (ICALP’11), as the attack’s time complexity is independent of the LWE modulus.},
  author       = {Cueto Noval, Miguel and Merz, Simon-Philipp and Stählin, Patrick and Ünal, Akin},
  booktitle    = {44th Annual International Conference on the Theory and Applications of Cryptographic Techniques},
  isbn         = {9783031910944},
  issn         = {1611-3349},
  location     = {Madrid, Spain},
  pages        = {385--415},
  publisher    = {Springer Nature},
  title        = {{On the soundness of algebraic attacks against code-based assumptions}},
  doi          = {10.1007/978-3-031-91095-1_14},
  volume       = {15606},
  year         = {2025},
}

@inproceedings{19738,
  abstract     = {Garbling is a fundamental cryptographic primitive, with numerous theoretical and practical applications. Since the first construction by Yao (FOCS’82, ’86), a line of work has concerned itself with reducing the communication and computational complexity of that construction. One of the most efficient garbling schemes presently is the ‘Half Gates’ scheme by Zahur, Rosulek, and Evans (Eurocrypt’15). Despite its widespread adoption, the provable security of this scheme has been based on assumptions whose only instantiations are in idealized models. For example, in their original paper, Zahur, Rosulek, and Evans showed that hash functions satisfying a notion called circular correlation robustness (CCR) suffice for this task, and then proved that CCR secure hash functions can be instantiated in the random permutation model.
In this work, we show how to securely instantiate the Half Gates scheme in the standard model. To this end, we first show how this scheme can be securely instantiated given a (family of) weak CCR hash function, a notion that we introduce. Furthermore, we show how a weak CCR hash function can be used to securely instantiate other efficient garbling schemes, namely the ones by Rosulek and Roy (Crypto’21) and Heath (Eurocrypt’24). Thus we believe this notion to be of independent interest.
Finally, we construct such weak CCR hash functions using indistinguishability obfuscation and one-way functions. The security proof of this construction constitutes our main technical contribution. While our construction is not practical, it serves as a proof of concept supporting the soundness of these garbling schemes, which we regard to be particularly important given the recent initiative by NIST to standardize garbling, and the optimizations in Half Gates being potentially adopted.},
  author       = {Acharya, Anasuya and Azari, Karen and Baig, Mirza Ahad and Hofheinz, Dennis and Kamath, Chethan},
  booktitle    = {28th IACR International Conference on Practice and Theory of Public-Key Cryptography},
  isbn         = {9783031918285},
  issn         = {1611-3349},
  location     = {Roros, Norway},
  pages        = {37--75},
  publisher    = {Springer Nature},
  title        = {{Securely instantiating ‘Half Gates’ garbling in the standard model}},
  doi          = {10.1007/978-3-031-91829-2_2},
  volume       = {15677},
  year         = {2025},
}

@inproceedings{20844,
  abstract     = {We introduce and construct a new proof system called Non-interactive Arguments of Knowledge or Space (NArKoS), where a space-bounded prover can convince a verifier they know a secret, while having access to sufficient space allows one to forge indistinguishable proofs without the secret.
An application of NArKoS are space-deniable proofs, which are proofs of knowledge (say for authentication in access control) that are sound when executed by a lightweight device like a smart-card or an RFID chip that cannot have much storage, but are deniable (in the strong sense of online deniability) as the verifier, like a card reader, can efficiently forge such proofs.
We construct NArKoS in the random oracle model using an OR-proof combining a sigma protocol (for the proof of knowledge of the secret) with a new proof system called simulatable Proof of Transient Space (simPoTS). We give two different constructions of simPoTS, one based on labelling graphs with high pebbling complexity, a technique used in the construction of memory-hard functions and proofs of space, and a more practical construction based on the verifiable space-hard functions from TCC’24 where a prover must compute a root of a sparse polynomial. In both cases, the main challenge is making the proofs efficiently simulatable.},
  author       = {Dujmovic, Jesko and Günther, Christoph Ullrich and Pietrzak, Krzysztof Z},
  booktitle    = {23rd International Conference on Theory of Cryptography},
  isbn         = {9783032122896},
  issn         = {1611-3349},
  location     = {Aarhus, Denmark},
  pages        = {171--202},
  publisher    = {Springer Nature},
  title        = {{Space-deniable proofs}},
  doi          = {10.1007/978-3-032-12290-2_6},
  volume       = {16271},
  year         = {2025},
}

@inproceedings{20845,
  abstract     = {We develop new attacks against the Evasive LWE family of assumptions, in both the public and private-coin regime. To the best of our knowledge, ours are the first attacks against Evasive LWE in the public-coin regime, for any instantiation from the family. Our attacks are summarized below.

Public-Coin Attacks.
1.The recent work by Hseih, Lin and Luo [17] constructed the first Attribute Based Encryption (ABE) for unbounded depth circuits by relying on the “circular” evasive LWE assumption. This assumption has been popularly considered as a safe, public-coin instance of Evasive LWE in contrast to its “private-coin” cousins (for instance, see [10, 11]).
We provide the first attack against this assumption, challenging the widely held belief that this is a public-coin assumption.
2. We demonstrate a counter-example against vanilla public-coin evasive LWE by Wee [26] in an unnatural parameter regime. Our attack crucially relies on the error in the pre-condition being larger than the error in the post-condition, necessitating a refinement of the assumption.

Private-Coin Attacks.
1. The recent work by Agrawal, Kumari and Yamada [2] constructed the first functional encryption scheme for pseudorandom functionalities (PRFE) and extended this to obfuscation for pseudorandom functionalities (PRIO) [4] by relying on private-coin evasive LWE. We provide a new attack against the assumption stated in the first posting of their work (subsequently refined to avoid these attacks).
2. The recent work by Branco et al. [8] (concurrently to [4]) provides a construction of obfuscation for pseudorandom functionalities by relying on private-coin evasive LWE. We provide a new attack against their stated assumption.
3. Branco et al. [8] showed that there exist contrived, “self-referential” classes of pseudorandom functionalities for which pseudorandom obfuscation cannot exist. We extend their techniques to develop an analogous result for pseudorandom functional encryption.

While Evasive LWE was developed to specifically avoid “zeroizing attacks”, our work shows that in certain settings, such attacks can still apply.},
  author       = {Agrawal, Shweta and Modi, Anuja and Yadav, Anshu and Yamada, Shota},
  booktitle    = {23rd International Conference on Theory of Cryptography},
  isbn         = {9783032122926},
  issn         = {1611-3349},
  location     = {Aarhus, Denmark},
  pages        = {259--290},
  publisher    = {Springer Nature},
  title        = {{Zeroizing attacks against evasive and circular evasive LWE}},
  doi          = {10.1007/978-3-032-12293-3_9},
  volume       = {16269},
  year         = {2025},
}

@inproceedings{20846,
  abstract     = {CVRFs are PRFs that unify the properties of verifiable and constrained PRFs. Since they were introduced concurrently by Fuchsbauer and Chandran-Raghuraman-Vinayagamurthy in 2014, it has been an open problem to construct CVRFs without using heavy machinery such as multilinear maps, obfuscation or functional encryption.
We solve this problem by constructing a prefix-constrained verifiable PRF that does not rely on the aforementioned assumptions. Essentially, our construction is a verifiable version of the Goldreich-Goldwasser-Micali PRF. To achieve verifiability we leverage degree-2 algebraic PRGs and bilinear groups. In short, proofs consist of intermediate values of the Goldreich-Goldwasser-Micali PRF raised to the exponents of group elements. These outputs can be verified using pairings since the underlying PRG is of degree 2.
We prove the selective security of our construction under the Decisional Square Diffie-Hellman (DSDH) assumption and a new assumption, which we dub recursive Decisional Diffie-Hellman (recursive DDH).
We prove the soundness of recursive DDH in the generic group model assuming the hardness of the Multivariate Quadratic (MQ) problem and a new variant thereof, which we call MQ+.
Last, in terms of applications, we observe that our CVRF is also an exponent (C)VRF in the plain model. Exponent VRFs were recently introduced by Boneh et al. (Eurocrypt’25) with various applications to threshold cryptography in mind. In addition to that, we give further applications for prefix-CVRFs in the blockchain setting, namely, stake-pooling and compressible randomness beacons.},
  author       = {Brandt, Nicholas and Cueto Noval, Miguel and Günther, Christoph Ullrich and Ünal, Akin and Wohnig, Stella},
  booktitle    = {23rd International Conference on Theory of Cryptography},
  isbn         = {9783032122896},
  issn         = {1611-3349},
  location     = {Aarhus, Denmark},
  pages        = {478--511},
  publisher    = {Springer Nature},
  title        = {{Constrained verifiable random functions without obfuscation and friends}},
  doi          = {10.1007/978-3-032-12290-2_16},
  volume       = {16271},
  year         = {2025},
}

@article{21017,
  abstract     = {With the growing interest in blockchains, permissioned approaches to consensus have received increasing attention. Unfortunately, the BFT consensus algorithms that are the backbone of most of these blockchains scale poorly and offer limited throughput. In fact, many state-of-the-art BFT consensus algorithms require a single leader process to receive and validate votes from a quorum of processes and then broadcast the result, which is inherently non-scalable. Recent approaches avoid this bottleneck by using dissemination/aggregation trees to propagate values and collect and validate votes. However, the use of trees increases the round latency, which limits the throughput for deeper trees. In this paper we propose Kauri, a BFT communication abstraction that sustains high throughput as the system size grows by leveraging a novel pipelining technique to perform scalable dissemination and aggregation on trees. Furthermore, when the number of faults is moderate (arguably the most common case in practice), our construction is able to recover from faults in an optimal number of reconfiguration steps. We implemented and experimentally evaluated Kauri with up to 800 processes. Our results show that Kauri outperforms the throughput of state-of-the-art permissioned blockchain protocols, by up to 58x without compromising latency. Interestingly, in some cases, the parallelization provided by Kauri can also decrease the latency.},
  author       = {Neiheiser, Ray and Matos, Miguel and Rodrigues, Luis},
  issn         = {1557-7333},
  journal      = {ACM Transactions on Computer Systems},
  publisher    = {Association for Computing Machinery},
  title        = {{Kauri: BFT consensus with pipelined tree-based dissemination and aggregation}},
  doi          = {10.1145/3769423},
  year         = {2025},
}

@inproceedings{21262,
  abstract     = {Continuous Group Key Agreement (CGKA) is the primitive underlying secure group messaging. It allows a large group of N users to maintain a shared secret key that is frequently rotated by the
group members in order to achieve forward secrecy and post compromise security. The group messaging scheme Messaging Layer Security (MLS) standardized by the IETF makes use of a CGKA called TreeKEM which arranges the N group members in a binary tree. Here, each node is associated with a public-key, each user is assigned one of the leaves, and a user knows the corresponding secret keys from their leaf to the root. To update the key material known to them, a user must just replace keys at log(N) nodes, which requires them to create and upload log(N) ciphertexts. Such updates must be processed sequentially by all users, which for large groups is impractical. To allow for concurrent updates, TreeKEM uses the “propose and commit” paradigm, where multiple users can concurrently propose to update (by just sampling a fresh leaf key), and a single user can then commit to all proposals at once. Unfortunately, this process destroys the binary tree structure as the tree gets pruned and some nodes must be “blanked” at the cost of increasing the in-degree of others, which makes the commit operation, as well as, future commits more costly. In the worst case, the update cost (in terms of uploaded ciphertexts) per user can grow from log(N) to Ω(N). In this work we provide two main contributions. First, we show that MLS’ communication complexity is bad not only in the worst case but also if the proposers and committers are chosen at random: even if there’s just one update proposal for every commit the expected cost is already over √N, and it approaches N as this ratio changes towards more proposals. Our second contribution is a new variant of propose and commit for
TreeKEM which for moderate amounts of update proposals per commit provably achieves an update cost of Θ(log(N)) assuming the proposers and committers are chosen at random.},
  author       = {Auerbach, Benedikt and Cueto Noval, Miguel and Erol, Boran and Pietrzak, Krzysztof Z},
  booktitle    = {45th Annual International Cryptology Conference},
  isbn         = {9783032019127},
  issn         = {1611-3349},
  location     = {Santa Barbara, CA, United States},
  pages        = {141--172},
  publisher    = {Springer Nature},
  title        = {{Continuous group-key agreement: Concurrent updates without pruning}},
  doi          = {10.1007/978-3-032-01913-4_5},
  volume       = {16007},
  year         = {2025},
}

@inproceedings{21323,
  abstract     = {We present a unifying framework for proving the knowledge-soundness of KZG-like polynomial commitment schemes, encompassing both univariate and multivariate variants. By conceptualizing the proof technique of Lipmaa, Parisella, and Siim for the univariate KZG scheme (EUROCRYPT 2024), we present tools and falsifiable hardness assumptions that permit black-box extraction of the multivariate KZG scheme. Central to our approach is the notion of a canonical Proof-of-Knowledge of a Polynomial (PoKoP) of a polynomial commitment scheme, which we use to capture the extractability notion required in constructions of practical zk-SNARKs. We further present an explicit polynomial decomposition lemma for multivariate polynomials, enabling a more direct analysis of interpolating extractors and bridging the gap between univariate and multivariate commitments. Our results provide the first standard-model proofs of extractability for the multivariate KZG scheme and many of its variants under falsifiable assumptions.},
  author       = {Belohorec, Juraj and Dvořák, Pavel and Hoffmann, Charlotte and Hubáček, Pavel and Mašková, Kristýna and Pastyřík, Martin},
  booktitle    = {45th Annual International Cryptology Conference},
  isbn         = {9783032018861},
  issn         = {1611-3349},
  location     = {Santa Barbara, CA, United States},
  pages        = {584--616},
  publisher    = {Springer Nature},
  title        = {{On extractability of the KZG family of polynomial commitment schemes}},
  doi          = {10.1007/978-3-032-01887-8_19},
  volume       = {16005},
  year         = {2025},
}

@inproceedings{20587,
  abstract     = {The blocks in the Bitcoin blockchain "record" the amount of work W that went into creating them through proofs of work. When honest parties control a majority of the work, consensus is achieved by picking the chain with the highest recorded weight. Resources other than work have been considered to secure such longest-chain blockchains. In Chia, blocks record the amount of disk-space S (via a proof of space) and sequential computational steps V (through a VDF).
In this paper, we ask what weight functions Γ(S,V,W) (that assign a weight to a block as a function of the recorded space, speed, and work) are secure in the sense that whenever the weight of the resources controlled by honest parties is larger than the weight of adversarial parties, the blockchain is secure against private double-spending attacks.
We completely classify such functions in an idealized "continuous" model: Γ(S,V,W) is secure against private double-spending attacks if and only if it is homogeneous of degree one in the "timed" resources V and W, i.e., αΓ(S,V,W) = Γ(S,α V, α W). This includes the Bitcoin rule Γ(S,V,W) = W and the Chia rule Γ(S,V,W) = S ⋅ V. In a more realistic model where blocks are created at discrete time-points, one additionally needs some mild assumptions on the dependency on S (basically, the weight should not grow too much if S is slightly increased, say linear as in Chia).
Our classification is more general and allows various instantiations of the same resource. It provides a powerful tool for designing new longest-chain blockchains. E.g., consider combining different PoWs to counter centralization, say the Bitcoin PoW W₁ and a memory-hard PoW W₂. Previous work suggested to use W₁+W₂ as weight. Our results show that using e.g., √{W₁}⋅ √{W₂} or min{W₁,W₂} are also secure, and we argue that in practice these are much better choices.},
  author       = {Baig, Mirza Ahad and Günther, Christoph Ullrich and Pietrzak, Krzysztof Z},
  booktitle    = {7th Conference on Advances in Financial Technologies},
  isbn         = {9783959774000},
  issn         = {1868-8969},
  location     = {Pittsburgh, PA, United States},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Nakamoto consensus from multiple resources}},
  doi          = {10.4230/LIPIcs.AFT.2025.16},
  volume       = {354},
  year         = {2025},
}

@phdthesis{20920,
  abstract     = {Verifiable Delay Functions (VDFs) introduced by Boneh et al. (CRYPTO'18) are functions that require a prescribed number of sequential steps T to evaluate, yet their output can be verified in time much faster than T. Since their introduction, VDFs have gained a lot of attention due to their applications in blockchain protocols, randomness beacons, timestamping and deniability. This thesis explores the theory and applications of VDFs, focusing on enhancing their soundness, efficiency and practicality.

The only practical VDFs known to date are based on repeated squaring in hidden order groups. Consider the function VDF(x,T)=x^(2^T).
The iterated squaring assumption states that, for a random group element x, the result of VDF cannot be computed significantly faster than performing T sequential squarings if the group order is unknown. To make the result verifiable a prover can compute a proof of exponentiation (PoE) \pi. Given \pi, the output of VDF can be verified in time much less than T.

We first present new constructions of statistically sound proofs of exponentiation, which are an important building block in the construction of SNARKs (Succinct Non-Interactive Argument of Knowledge). Statistical soundness means that the proofs remain secure against computationally unbounded adversaries, in particular, it remains secure even when the group order is known. We thereby address limitations in previous PoE protocols which either required (non-standard) hardness assumptions or a lot of parallel repetitions. Our construction significantly reduces the proof size of statistically sound PoEs that allow for a structured exponent, which leads to better efficiency of SNARKs and other applications.

Secondly, we introduce improved batching techniques for PoEs, which allow multiple proofs to be aggregated and verified with minimal overhead. These protocols optimize communication and computation complexity in large-scale blockchain environments and enable scalable remote benchmarking of parallel computation resources.

We then construct VDFs with enhanced properties such as zero-knowledge and watermarkability. It was shown by Arun, Bonneau and Clark (ASIACRYPT'22) that these features enable new cryptographic primitives called short-lived proofs and signatures. The validity of such proofs and signatures expires after a predefined amount of time T, i.e., they are deniable after time T. Our constructions improve upon the constructions by Arun, Bonneau and Clark in several dimensions (faster forging times, arguably weaker assumptions).

Finally, we apply PoEs in the realm of primality testing, providing cryptographically sound proofs of non-primality for large Proth numbers. This work gives a surprising application of VDFs in the area of computational number theory.

Together, our contributions advance both the theoretical foundations and the real-world usability of VDFs in general and in particular of PoEs, making them more adaptable and secure for current and emerging cryptographic applications.},
  author       = {Hoffmann, Charlotte},
  issn         = {2663-337X},
  pages        = {116},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Theory and applications of verifiable delay functions}},
  doi          = {10.15479/AT-ISTA-20920},
  year         = {2025},
}

@phdthesis{20556,
  abstract     = {Verifiable Delay Functions (VDFs) introduced by Boneh et al. (CRYPTO'18) are functions that require a prescribed number of sequential steps T to evaluate, yet their output can be verified in time much faster than T. Since their introduction, VDFs have gained a lot of attention due to their applications in blockchain protocols, randomness beacons, timestamping and deniability. This thesis explores the theory and applications of VDFs, focusing on enhancing their soundness, efficiency and practicality.

The only practical VDFs known to date are based on repeated squaring in hidden order groups. Consider the function VDF(x,T)=x^(2^T).
The iterated squaring assumption states that, for a random group element x, the result of VDF cannot be computed significantly faster than performing T sequential squarings if the group order is unknown. To make the result verifiable a prover can compute a proof of exponentiation (PoE) \pi. Given \pi, the output of VDF can be verified in time much less than T.

We first present new constructions of statistically sound proofs of exponentiation, which are an important building block in the construction of SNARKs (Succinct Non-Interactive Argument of Knowledge). Statistical soundness means that the proofs remain secure against computationally unbounded adversaries, in particular, it remains secure even when the group order is known. We thereby address limitations in previous PoE protocols which either required (non-standard) hardness assumptions or a lot of parallel repetitions. Our construction significantly reduces the proof size of statistically sound PoEs that allow for a structured exponent, which leads to better efficiency of SNARKs and other applications.

Secondly, we introduce improved batching techniques for PoEs, which allow multiple proofs to be aggregated and verified with minimal overhead. These protocols optimize communication and computation complexity in large-scale blockchain environments and enable scalable remote benchmarking of parallel computation resources.

We then construct VDFs with enhanced properties such as zero-knowledge and watermarkability. It was shown by Arun, Bonneau and Clark (ASIACRYPT'22) that these features enable new cryptographic primitives called short-lived proofs and signatures. The validity of such proofs and signatures expires after a predefined amount of time T, i.e., they are deniable after time T. Our constructions improve upon the constructions by Arun, Bonneau and Clark in several dimensions (faster forging times, arguably weaker assumptions).

Finally, we apply PoEs in the realm of primality testing, providing cryptographically sound proofs of non-primality for large Proth numbers. This work gives a surprising application of VDFs in the area of computational number theory.

Together, our contributions advance both the theoretical foundations and the real-world usability of VDFs in general and in particular of PoEs, making them more adaptable and secure for current and emerging cryptographic applications.},
  author       = {Hoffmann, Charlotte},
  issn         = {2663-337X},
  pages        = {116},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Theory and applications of verifiable delay functions}},
  doi          = {10.15479/AT-ISTA-20556},
  year         = {2025},
}

@inproceedings{19778,
  abstract     = {A verifiable delay function VDF(x, T)->(y, π) maps an input x and time parameter T to an output y together with an efficiently verifiable proof π certifying that y was correctly computed. The function runs in T sequential steps, and it should not be possible to compute y much faster than that. The only known practical VDFs use sequential squaring in groups of unknown order as the sequential function, i.e., y = x^2^T. There are two constructions for the proof of exponentiation (PoE) certifying that y = x^2^T, with Wesolowski (Eurocrypt’19) having very short proofs, but they are more expensive to compute and the soundness relies on stronger assumptions than the PoE proposed by Pietrzak (ITCS’19).
A recent application of VDFs by Arun, Bonneau and Clark (Asiacrypt’22) are short-lived proofs and signatures, which are proofs and signatures that are only sound for some time t, but after that can be forged by anyone. For this they rely on “watermarkable VDFs”, where the proof embeds a prover chosen watermark. To achieve stronger notions of proofs/signatures with reusable forgeability, they rely on “zero-knowledge VDFs”, where instead of the output y, one just proves knowledge of this output. The existing proposals for watermarkable and zero-knowledge VDFs all build on Wesolowski’s PoE, for the watermarkable VDFs there’s currently no security proof.

In this work we give the first constructions that transform any PoEs in hidden order groups into watermarkable VDFs and into zkVDFs, solving an open question by Arun et al. Unlike our watermarkable VDF, the zkVDF (required for reusable forgeability) is not very practical as the number of group elements in the proof is a security parameter. To address this, we introduce the notion of zero-knowledge proofs of sequential work (zkPoSW), a notion that relaxes zkVDFs by not requiring that the output is unique. We show that zkPoSW are sufficient to construct proofs or signatures with reusable forgeability, and construct efficient zkPoSW from any PoE, ultimately achieving short lived proofs and signatures that improve upon Arun et al.’s construction in several dimensions (faster forging times, arguably weaker assumptions).
A key idea underlying our constructions is to not directly construct a (watermarked or zk) proof for y = x^2^T, but instead give a (watermarked or zk) proof for the more basic statement that 
x^l, y^l satisfy x^l = x ^r, y^l = y^r for some r, together with a normal PoE for y^l = (x^l)^2^T.},
  author       = {Hoffmann, Charlotte and Pietrzak, Krzysztof Z},
  booktitle    = {28th IACR International Conference on Practice and Theory of Public-Key Cryptography},
  isbn         = {9783031918193},
  issn         = {1611-3349},
  location     = {Roros, Norway},
  pages        = {36--66},
  publisher    = {Springer Nature},
  title        = {{Watermarkable and zero-knowledge Verifiable Delay Functions from any proof of exponentiation}},
  doi          = {10.1007/978-3-031-91820-9_2},
  volume       = {15674},
  year         = {2025},
}

@inproceedings{18702,
  abstract     = {In this work we prove lower bounds on the (communication) cost of maintaining a shared key among a dynamic group of users. Being “dynamic” means one can add and remove users from the group. This captures important protocols like multicast encryption (ME) and continuous group-key agreement (CGKA), which is the primitive underlying many group messaging applications. We prove our bounds in a combinatorial setting where the state of the protocol progresses in rounds. The state of the protocol in each round is captured by a set system, with each of its elements specifying a set of users who share a secret key. We show this combinatorial model implies bounds in symbolic models for ME and CGKA that capture, as building blocks, PRGs, PRFs, dual PRFs, secret sharing, and symmetric encryption in the setting of ME, and PRGs, PRFs, dual PRFs, secret sharing, public-key encryption, and key-updatable public-key encryption in the setting of CGKA. The models are related to the ones used by Micciancio and Panjwani (Eurocrypt’04) and Bienstock et al. (TCC’20) to analyze ME and CGKA, respectively. We prove – using the Bollobás’ Set Pairs Inequality – that the cost (number of uploaded ciphertexts) for replacing a set of d users in a group of size n is Ω(dln(n/d)). Our lower bound is asymptotically tight and both improves on a bound of Ω(d) by Bienstock et al. (TCC’20), and generalizes a result by Micciancio and Panjwani (Eurocrypt’04), who proved a lower bound of Ω(log(n)) for d=1. },
  author       = {Anastos, Michael and Auerbach, Benedikt and Baig, Mirza Ahad and Cueto Noval, Miguel and Kwan, Matthew Alan and Pascual Perez, Guillermo and Pietrzak, Krzysztof Z},
  booktitle    = {22nd International Conference on Theory of Cryptography},
  isbn         = {9783031780103},
  issn         = {1611-3349},
  location     = {Milan, Italy},
  pages        = {413--443},
  publisher    = {Springer Nature},
  title        = {{The cost of maintaining keys in dynamic groups with applications to multicast encryption and group messaging}},
  doi          = {10.1007/978-3-031-78011-0_14},
  volume       = {15364},
  year         = {2024},
}

@inproceedings{18755,
  abstract     = {A universalthresholdizer (UT), constructed from a threshold fully homomorphic encryption by Boneh et. al , Crypto 2018, is a general framework for universally thresholdizing many cryptographic schemes. However, their framework is insufficient to construct strongly secure threshold schemes, such as threshold signatures and threshold public-key encryption, etc.

In this paper, we strengthen the security definition for a universal thresholdizer and propose a scheme which satisfies our stronger security notion. Our UT scheme is an improvement of Boneh et. al ’s construction at the level of threshold fully homomorphic encryption using a key homomorphic pseudorandom function. We apply our strongly secure UT scheme to construct strongly secure threshold signatures and threshold public-key encryption.},
  author       = {Ebrahimi, Ehsan and Yadav, Anshu},
  booktitle    = {30th International Conference on the Theory and Application of Cryptology and Information Security},
  isbn         = {9789819608904},
  issn         = {1611-3349},
  location     = {Kolkata, India},
  pages        = {207--239},
  publisher    = {Springer Nature},
  title        = {{Strongly secure universal thresholdizer}},
  doi          = {10.1007/978-981-96-0891-1_7},
  volume       = {15486},
  year         = {2024},
}

@inproceedings{18756,
  abstract     = {The evasive LWE assumption, proposed by Wee [Eurocrypt’22 Wee] for constructing a lattice-based optimal broadcast encryption, has shown to be a powerful assumption, adopted by subsequent works to construct advanced primitives ranging from ABE variants to obfuscation for null circuits. However, a closer look reveals significant differences among the precise assumption statements involved in different works, leading to the fundamental question of how these assumptions compare to each other. In this work, we initiate a more systematic study on evasive LWE assumptions:
(i) Based on the standard LWE assumption, we construct simple counterexamples against three private-coin evasive LWE variants, used in [Crypto’22 Tsabary, Asiacrypt’22 VWW, Crypto’23 ARYY] respectively, showing that these assumptions are unlikely to hold.

(ii) Based on existing evasive LWE variants and our counterexamples, we propose and define three classes of plausible evasive LWE assumptions, suitably capturing all existing variants for which we are not aware of non-obfuscation-based counterexamples.

(iii) We show that under our assumption formulations, the security proofs of [Asiacrypt’22 VWW] and [Crypto’23 ARYY] can be recovered, and we reason why the security proof of [Crypto’22 Tsabary] is also plausibly repairable using an appropriate evasive LWE assumption.},
  author       = {Brzuska, Chris and Ünal, Akin and Woo, Ivy K.Y.},
  booktitle    = {30th International Conference on the Theory and Application of Cryptology and Information Security},
  isbn         = {9789819608935},
  issn         = {1611-3349},
  location     = {Kolkata, India},
  pages        = {418--449},
  publisher    = {Springer Nature},
  title        = {{Evasive LWE assumptions: Definitions, classes, and counterexamples}},
  doi          = {10.1007/978-981-96-0894-2_14},
  volume       = {15487},
  year         = {2024},
}

