@phdthesis{20556,
  abstract     = {Verifiable Delay Functions (VDFs) introduced by Boneh et al. (CRYPTO'18) are functions that require a prescribed number of sequential steps T to evaluate, yet their output can be verified in time much faster than T. Since their introduction, VDFs have gained a lot of attention due to their applications in blockchain protocols, randomness beacons, timestamping and deniability. This thesis explores the theory and applications of VDFs, focusing on enhancing their soundness, efficiency and practicality.

The only practical VDFs known to date are based on repeated squaring in hidden order groups. Consider the function VDF(x,T)=x^(2^T).
The iterated squaring assumption states that, for a random group element x, the result of VDF cannot be computed significantly faster than performing T sequential squarings if the group order is unknown. To make the result verifiable a prover can compute a proof of exponentiation (PoE) \pi. Given \pi, the output of VDF can be verified in time much less than T.

We first present new constructions of statistically sound proofs of exponentiation, which are an important building block in the construction of SNARKs (Succinct Non-Interactive Argument of Knowledge). Statistical soundness means that the proofs remain secure against computationally unbounded adversaries, in particular, it remains secure even when the group order is known. We thereby address limitations in previous PoE protocols which either required (non-standard) hardness assumptions or a lot of parallel repetitions. Our construction significantly reduces the proof size of statistically sound PoEs that allow for a structured exponent, which leads to better efficiency of SNARKs and other applications.

Secondly, we introduce improved batching techniques for PoEs, which allow multiple proofs to be aggregated and verified with minimal overhead. These protocols optimize communication and computation complexity in large-scale blockchain environments and enable scalable remote benchmarking of parallel computation resources.

We then construct VDFs with enhanced properties such as zero-knowledge and watermarkability. It was shown by Arun, Bonneau and Clark (ASIACRYPT'22) that these features enable new cryptographic primitives called short-lived proofs and signatures. The validity of such proofs and signatures expires after a predefined amount of time T, i.e., they are deniable after time T. Our constructions improve upon the constructions by Arun, Bonneau and Clark in several dimensions (faster forging times, arguably weaker assumptions).

Finally, we apply PoEs in the realm of primality testing, providing cryptographically sound proofs of non-primality for large Proth numbers. This work gives a surprising application of VDFs in the area of computational number theory.

Together, our contributions advance both the theoretical foundations and the real-world usability of VDFs in general and in particular of PoEs, making them more adaptable and secure for current and emerging cryptographic applications.},
  author       = {Hoffmann, Charlotte},
  issn         = {2663-337X},
  pages        = {116},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Theory and applications of verifiable delay functions}},
  doi          = {10.15479/AT-ISTA-20556},
  year         = {2025},
}

@phdthesis{20920,
  abstract     = {Verifiable Delay Functions (VDFs) introduced by Boneh et al. (CRYPTO'18) are functions that require a prescribed number of sequential steps T to evaluate, yet their output can be verified in time much faster than T. Since their introduction, VDFs have gained a lot of attention due to their applications in blockchain protocols, randomness beacons, timestamping and deniability. This thesis explores the theory and applications of VDFs, focusing on enhancing their soundness, efficiency and practicality.

The only practical VDFs known to date are based on repeated squaring in hidden order groups. Consider the function VDF(x,T)=x^(2^T).
The iterated squaring assumption states that, for a random group element x, the result of VDF cannot be computed significantly faster than performing T sequential squarings if the group order is unknown. To make the result verifiable a prover can compute a proof of exponentiation (PoE) \pi. Given \pi, the output of VDF can be verified in time much less than T.

We first present new constructions of statistically sound proofs of exponentiation, which are an important building block in the construction of SNARKs (Succinct Non-Interactive Argument of Knowledge). Statistical soundness means that the proofs remain secure against computationally unbounded adversaries, in particular, it remains secure even when the group order is known. We thereby address limitations in previous PoE protocols which either required (non-standard) hardness assumptions or a lot of parallel repetitions. Our construction significantly reduces the proof size of statistically sound PoEs that allow for a structured exponent, which leads to better efficiency of SNARKs and other applications.

Secondly, we introduce improved batching techniques for PoEs, which allow multiple proofs to be aggregated and verified with minimal overhead. These protocols optimize communication and computation complexity in large-scale blockchain environments and enable scalable remote benchmarking of parallel computation resources.

We then construct VDFs with enhanced properties such as zero-knowledge and watermarkability. It was shown by Arun, Bonneau and Clark (ASIACRYPT'22) that these features enable new cryptographic primitives called short-lived proofs and signatures. The validity of such proofs and signatures expires after a predefined amount of time T, i.e., they are deniable after time T. Our constructions improve upon the constructions by Arun, Bonneau and Clark in several dimensions (faster forging times, arguably weaker assumptions).

Finally, we apply PoEs in the realm of primality testing, providing cryptographically sound proofs of non-primality for large Proth numbers. This work gives a surprising application of VDFs in the area of computational number theory.

Together, our contributions advance both the theoretical foundations and the real-world usability of VDFs in general and in particular of PoEs, making them more adaptable and secure for current and emerging cryptographic applications.},
  author       = {Hoffmann, Charlotte},
  issn         = {2663-337X},
  pages        = {116},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Theory and applications of verifiable delay functions}},
  doi          = {10.15479/AT-ISTA-20920},
  year         = {2025},
}

@phdthesis{18088,
  abstract     = {Instant messaging applications like Whatsapp, Signal or Telegram have become ubiquitous in today's society.
Many of them provide not only end-to-end encryption, but also security guarantees even when the key material gets compromised.
These are achieved through frequent key update performed by users.
In particular, the compromise of a group key should preserve confidentiality of previously exchanged messages (forward secrecy), and a subsequent key update will ensure security for future ones (post-compromise security).
Though great protocols for one-on-one communication have been known for some time, the design of ones that scale efficiently for larger groups while achieving akin security guarantees is a hard problem.
A great deal of research has been aimed at this topic, much of it under the umbrella of the Messaging Layer Security (MLS) working group at the IETF. 
Started in 2018, this joint effort by academics and industry culminated in 2023 with the publication of the first standard for secure group messaging [IETF, RFC9420].

At the core of secure group messaging is a cryptographic primitive termed Continuous Group Key Agreement, or CGKA [Alwen et al. 2021], that essentially allows a changing group of users to agree on a common key with the added functionality security against compromises is achieved by users asynchronously issuing a key update. In this thesis we contribute to the understanding of CGKA across different angles.
First, we present a new technique to effect dynamic operations in groups, i.e., add or remove members, that can be more efficient that the one employed by MLS in certain settings.
Considering the setting of users belonging to multiple overlapping groups, we then show lowerbounds on the communication cost of constructions that leverage said overlap, at the same time showing protocols that are asymptotically optimal and efficient for practical settings, respectively. Along the way, we show that the communication cost of key updates in MLS is average-cost optimal.
An important feature in CGKA protocols, particularly for big groups, is the possibility of executing several group operations concurrently. While later versions of MLS support this, they do at the cost of worsening the communication efficiency of future group operations.
In this thesis we introduce two new protocols that permit concurrency without any negative effect on efficiency. Our protocols circumvent previously existing lower bounds by satisfying a new notion of post-compromise security that only asks for security to be re-established after a certain number of key updates have taken place. While this can be slower than MLS in terms of rounds of communication, we show that it leads to more efficient overall communication. 
Additionally, we introduce a new technique that allows group members to decrease the information they need to store and download, which makes one of our protocols enjoy much lower download cost than any other existing CGKA constructions. },
  author       = {Pascual Perez, Guillermo},
  issn         = {2663-337X},
  pages        = {239},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{On the efficiency and security of secure group messaging}},
  doi          = {10.15479/at:ista:18088},
  year         = {2024},
}

@phdthesis{14506,
  abstract     = {Payment channel networks are a promising approach to improve the scalability bottleneck
of cryptocurrencies. Two design principles behind payment channel networks are
efficiency and privacy. Payment channel networks improve efficiency by allowing users
to transact in a peer-to-peer fashion along multi-hop routes in the network, avoiding
the lengthy process of consensus on the blockchain. Transacting over payment channel
networks also improves privacy as these transactions are not broadcast to the blockchain.
Despite the influx of recent protocols built on top of payment channel networks and
their analysis, a common shortcoming of many of these protocols is that they typically
focus only on either improving efficiency or privacy, but not both. Another limitation
on the efficiency front is that the models used to model actions, costs and utilities of
users are limited or come with unrealistic assumptions.
This thesis aims to address some of the shortcomings of recent protocols and algorithms
on payment channel networks, particularly in their privacy and efficiency aspects. We
first present a payment route discovery protocol based on hub labelling and private
information retrieval that hides the route query and is also efficient. We then present
a rebalancing protocol that formulates the rebalancing problem as a linear program
and solves the linear program using multiparty computation so as to hide the channel
balances. The rebalancing solution as output by our protocol is also globally optimal.
We go on to develop more realistic models of the action space, costs, and utilities of
both existing and new users that want to join the network. In each of these settings,
we also develop algorithms to optimise the utility of these users with good guarantees
on the approximation and competitive ratios.},
  author       = {Yeo, Michelle X},
  issn         = {2663-337X},
  pages        = {162},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Advances in efficiency and privacy in payment channel network analysis}},
  doi          = {10.15479/14506},
  year         = {2023},
}

@phdthesis{10035,
  abstract     = {Many security definitions come in two flavors: a stronger “adaptive” flavor, where the adversary can arbitrarily make various choices during the course of the attack, and a weaker “selective” flavor where the adversary must commit to some or all of their choices a-priori. For example, in the context of identity-based encryption, selective security requires the adversary to decide on the identity of the attacked party at the very beginning of the game whereas adaptive security allows the attacker to first see the master public key and some secret keys before making this choice. Often, it appears to be much easier to achieve selective security than it is to achieve adaptive security. A series of several recent works shows how to cleverly achieve adaptive security in several such scenarios including generalized selective decryption [Pan07][FJP15], constrained PRFs [FKPR14], and Yao’s garbled circuits [JW16]. Although the above works expressed vague intuition that they share a common technique, the connection was never made precise. In this work we present a new framework (published at Crypto ’17 [JKK+17a]) that connects all of these works and allows us to present them in a unified and simplified fashion. Having the framework in place, we show how to achieve adaptive security for proxy re-encryption schemes (published at PKC ’19 [FKKP19]) and provide the first adaptive security proofs for continuous group key agreement protocols (published at S&P ’21 [KPW+21]). Questioning optimality of our framework, we then show that currently used proof techniques cannot lead to significantly better security guarantees for "graph-building" games (published at TCC ’21 [KKPW21a]). These games cover generalized selective decryption, as well as the security of prominent constructions for constrained PRFs, continuous group key agreement, and proxy re-encryption. Finally, we revisit the adaptive security of Yao’s garbled circuits and extend the analysis of Jafargholi and Wichs in two directions: While they prove adaptive security only for a modified construction with increased online complexity, we provide the first positive results for the original construction by Yao (published at TCC ’21 [KKP21a]). On the negative side, we prove that the results of Jafargholi and Wichs are essentially optimal by showing that no black-box reduction can provide a significantly better security bound (published at Crypto ’21 [KKPW21c]).},
  author       = {Klein, Karen},
  issn         = {2663-337X},
  pages        = {276},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{On the adaptive security of graph-based games}},
  doi          = {10.15479/at:ista:10035},
  year         = {2021},
}

@phdthesis{7896,
  abstract     = {A search problem lies in the complexity class FNP if a solution to the given instance of the problem can be verified efficiently. The complexity class TFNP consists of all search problems in FNP that are total in the sense that a solution is guaranteed to exist. TFNP contains a host of interesting problems from fields such as algorithmic game theory, computational topology, number theory and combinatorics. Since TFNP is a semantic class, it is unlikely to have a complete problem. Instead, one studies its syntactic subclasses which are defined based on the combinatorial principle used to argue totality. Of particular interest is the subclass PPAD, which contains important problems
like computing Nash equilibrium for bimatrix games and computational counterparts of several fixed-point theorems as complete. In the thesis, we undertake the study of averagecase hardness of TFNP, and in particular its subclass PPAD.
Almost nothing was known about average-case hardness of PPAD before a series of recent results showed how to achieve it using a cryptographic primitive called program obfuscation.
However, it is currently not known how to construct program obfuscation from standard cryptographic assumptions. Therefore, it is desirable to relax the assumption under which average-case hardness of PPAD can be shown. In the thesis we take a step in this direction. First, we show that assuming the (average-case) hardness of a numbertheoretic
problem related to factoring of integers, which we call Iterated-Squaring, PPAD is hard-on-average in the random-oracle model. Then we strengthen this result to show that the average-case hardness of PPAD reduces to the (adaptive) soundness of the Fiat-Shamir Transform, a well-known technique used to compile a public-coin interactive protocol into a non-interactive one. As a corollary, we obtain average-case hardness for PPAD in the random-oracle model assuming the worst-case hardness of #SAT. Moreover, the above results can all be strengthened to obtain average-case hardness for the class CLS ⊆ PPAD.
Our main technical contribution is constructing incrementally-verifiable procedures for computing Iterated-Squaring and #SAT. By incrementally-verifiable, we mean that every intermediate state of the computation includes a proof of its correctness, and the proof can be updated and verified in polynomial time. Previous constructions of such procedures relied on strong, non-standard assumptions. Instead, we introduce a technique called recursive proof-merging to obtain the same from weaker assumptions. },
  author       = {Kamath Hosdurg, Chethan},
  issn         = {2663-337X},
  pages        = {126},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{On the average-case hardness of total search problems}},
  doi          = {10.15479/AT:ISTA:7896},
  year         = {2020},
}

@phdthesis{83,
  abstract     = {A proof system is a protocol between a prover and a verifier over a common input in which an honest prover convinces the verifier of the validity of true statements. Motivated by the success of decentralized cryptocurrencies, exemplified by Bitcoin, the focus of this thesis will be on proof systems which found applications in some sustainable alternatives to Bitcoin, such as the Spacemint and Chia cryptocurrencies. In particular, we focus on proofs of space and proofs of sequential work.
Proofs of space (PoSpace) were suggested as more ecological, economical, and egalitarian alternative to the energy-wasteful proof-of-work mining of Bitcoin. However, the state-of-the-art constructions of PoSpace are based on sophisticated graph pebbling lower bounds, and are therefore complex. Moreover, when these PoSpace are used in cryptocurrencies like Spacemint, miners can only start mining after ensuring that a commitment to their space is already added in a special transaction to the blockchain. Proofs of sequential work (PoSW) are proof systems in which a prover, upon receiving a statement x and a time parameter T, computes a proof which convinces the verifier that T time units had passed since x was received. Whereas Spacemint assumes synchrony to retain some interesting Bitcoin dynamics, Chia requires PoSW with unique proofs, i.e., PoSW in which it is hard to come up with more than one accepting proof for any true statement. In this thesis we construct simple and practically-efficient PoSpace and PoSW. When using our PoSpace in cryptocurrencies, miners can start mining on the fly, like in Bitcoin, and unlike current constructions of PoSW, which either achieve efficient verification of sequential work, or faster-than-recomputing verification of correctness of proofs, but not both at the same time, ours achieve the best of these two worlds.},
  author       = {Abusalah, Hamza M},
  issn         = {2663-337X},
  pages        = {59},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Proof systems for sustainable decentralized cryptocurrencies}},
  doi          = {10.15479/AT:ISTA:TH_1046},
  year         = {2018},
}

