@article{18855,
  abstract     = {A central problem in computational statistics is to convert a procedure for sampling combinatorial objects into a procedure for counting those objects, and vice versa. We consider sampling problems which come from Gibbs distributions, which are families of probability distributions over a discrete space Ω with probability mass function of the form μ^Ω_β(ω) ∝ e^{β H(ω)} for β in an interval [β_min, β_max] and H(ω) ∈ {0} ∪ [1, n]. Two important parameters are the partition function, which is the normalization factor Z(β) = ∑_{ω ∈ Ω} e^{β H(ω)}, and the vector of pre-image counts c_x=|H^-1(x)|.
We develop black-box sampling algorithms to estimate the counts roughly Õ(n²/ε²) samples for integer-valued distributions and Õ(q/ε²) samples for general distributions, where q = (log Z(β_max))/Z(β_min)  (ignoring some second-order terms and parameters). We show this is optimal up to logarithmic factors. We illustrate with improved algorithms for counting connected subgraphs, independent sets, and perfect matchings. As a key subroutine, we estimate all values of the partition function using Õ(n²/ε²) samples for integer-valued distributions and Õ(q/ε²) samples for general distributions. This improves over a prior algorithm of Huber (2015) which computes a single point estimate Z(β_max) and which uses a slightly larger amount of samples. We show matching lower bounds, demonstrating this complexity is optimal as a function of n and q up to logarithmic terms.},
  author       = {Harris, David G. and Kolmogorov, Vladimir},
  issn         = {1549-6333},
  journal      = {ACM Transactions on Algorithms},
  number       = {1},
  publisher    = {Association for Computing Machinery},
  title        = {{Parameter estimation for Gibbs distributions}},
  doi          = {10.1145/3685676},
  volume       = {21},
  year         = {2025},
}

@article{21007,
  abstract     = {Currently, the best known tradeoff between approximation ratio and complexity for the Sparsest Cut problem is achieved by the algorithm in [Sherman, FOCS 2009]: it computes O(√(log n)/ε)-approximation using O(nε logO(1) n) maxflows for any ε∈[Θ(1/log n),Θ(1)]. It works by solving the SDP relaxation of [Arora-Rao-Vazirani, STOC 2004] using the Multiplicative Weights Update algorithm (MW) of [Arora-Kale, JACM 2016]. To implement one MW step, Sherman approximately solves a multicommodity flow problem using another application of MW. Nested MW steps are solved via a certain "chaining" algorithm that combines results of multiple calls to the maxflow algorithm. We present an alternative approach that avoids solving the multicommodity flow problem and instead computes "violating paths". This simplifies Sherman's algorithm by removing a need for a nested application of MW, and also allows parallelization: we show how to compute O(√(log n)/ε)-approximation via O(logO(1) n) maxflows using O(nε) processors. We also revisit Sherman's chaining algorithm, and present a simpler version together with a new analysis.},
  author       = {Kolmogorov, Vladimir},
  issn         = {1549-6333},
  journal      = {ACM Transactions on Algorithms},
  number       = {4},
  pages        = {1--22},
  publisher    = {Association for Computing Machinery},
  title        = {{A simpler and parallelizable O(√log n)-approximation algorithm for SPARSEST CUT}},
  doi          = {10.1145/3748723},
  volume       = {21},
  year         = {2025},
}

@article{11662,
  abstract     = {We give a fully dynamic (Las-Vegas style) algorithm with constant expected amortized time per update that maintains a proper (Δ +1)-vertex coloring of a graph with maximum degree at most Δ. This improves upon the previous O(log Δ)-time algorithm by Bhattacharya et al. (SODA 2018). Our algorithm uses an approach based on assigning random ranks to vertices and does not need to maintain a hierarchical graph decomposition. We show that our result does not only have optimal running time but is also optimal in the sense that already deciding whether a Δ-coloring exists in a dynamically changing graph with maximum degree at most Δ takes Ω (log n) time per operation.},
  author       = {Henzinger, Monika H and Peng, Pan},
  issn         = {1549-6333},
  journal      = {ACM Transactions on Algorithms},
  number       = {2},
  publisher    = {Association for Computing Machinery},
  title        = {{Constant-time Dynamic (Δ +1)-Coloring}},
  doi          = {10.1145/3501403},
  volume       = {18},
  year         = {2022},
}

@article{11663,
  abstract     = {Many dynamic graph algorithms have an amortized update time, rather than a stronger worst-case guarantee. But amortized data structures are not suitable for real-time systems, where each individual operation has to be executed quickly. For this reason, there exist many recent randomized results that aim to provide a guarantee stronger than amortized expected. The strongest possible guarantee for a randomized algorithm is that it is always correct (Las Vegas) and has high-probability worst-case update time, which gives a bound on the time for each individual operation that holds with high probability.

In this article, we present the first polylogarithmic high-probability worst-case time bounds for the dynamic spanner and the dynamic maximal matching problem.

(1)

For dynamic spanner, the only known o(n) worst-case bounds were O(n3/4) high-probability worst-case update time for maintaining a 3-spanner and O(n5/9) for maintaining a 5-spanner. We give a O(1)k log3 (n) high-probability worst-case time bound for maintaining a (2k-1)-spanner, which yields the first worst-case polylog update time for all constant k. (All the results above maintain the optimal tradeoff of stretch 2k-1 and Õ(n1+1/k) edges.)

(2)

For dynamic maximal matching, or dynamic 2-approximate maximum matching, no algorithm with o(n) worst-case time bound was known and we present an algorithm with O(log 5 (n)) high-probability worst-case time; similar worst-case bounds existed only for maintaining a matching that was (2+ϵ)-approximate, and hence not maximal.

Our results are achieved using a new approach for converting amortized guarantees to worst-case ones for randomized data structures by going through a third type of guarantee, which is a middle ground between the two above: An algorithm is said to have worst-case expected update time ɑ if for every update σ, the expected time to process σ is at most ɑ. Although stronger than amortized expected, the worst-case expected guarantee does not resolve the fundamental problem of amortization: A worst-case expected update time of O(1) still allows for the possibility that every 1/f(n) updates requires ϴ (f(n)) time to process, for arbitrarily high f(n). In this article, we present a black-box reduction that converts any data structure with worst-case expected update time into one with a high-probability worst-case update time: The query time remains the same, while the update time increases by a factor of O(log 2(n)).

Thus, we achieve our results in two steps:

(1) First, we show how to convert existing dynamic graph algorithms with amortized expected polylogarithmic running times into algorithms with worst-case expected polylogarithmic running times.

(2) Then, we use our black-box reduction to achieve the polylogarithmic high-probability worst-case time bound. All our algorithms are Las-Vegas-type algorithms.},
  author       = {Bernstein, Aaron and Forster, Sebastian and Henzinger, Monika H},
  issn         = {1549-6333},
  journal      = {ACM Transactions on Algorithms},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{A deamortization approach for dynamic spanner and dynamic maximal matching}},
  doi          = {10.1145/3469833},
  volume       = {17},
  year         = {2021},
}

@article{9541,
  abstract     = {The Massively Parallel Computation (MPC) model is an emerging model that distills core aspects of distributed and parallel computation, developed as a tool to solve combinatorial (typically graph) problems in systems of many machines with limited space. Recent work has focused on the regime in which machines have sublinear (in n, the number of nodes in the input graph) space, with randomized algorithms presented for the fundamental problems of Maximal Matching and Maximal Independent Set. However, there have been no prior corresponding deterministic algorithms. A major challenge underlying the sublinear space setting is that the local space of each machine might be too small to store all edges incident to a single node. This poses a considerable obstacle compared to classical models in which each node is assumed to know and have easy access to its incident edges. To overcome this barrier, we introduce a new graph sparsification technique that deterministically computes a low-degree subgraph, with the additional property that solving the problem on this subgraph provides significant progress towards solving the problem for the original input graph. Using this framework to derandomize the well-known algorithm of Luby [SICOMP’86], we obtain O(log Δ + log log n)-round deterministic MPC algorithms for solving the problems of Maximal Matching and Maximal Independent Set with O(nɛ) space on each machine for any constant ɛ > 0. These algorithms also run in O(log Δ) rounds in the closely related model of CONGESTED CLIQUE, improving upon the state-of-the-art bound of O(log 2Δ) rounds by Censor-Hillel et al. [DISC’17].},
  author       = {Czumaj, Artur and Davies, Peter and Parter, Merav},
  issn         = {1549-6333},
  journal      = {ACM Transactions on Algorithms},
  number       = {2},
  publisher    = {Association for Computing Machinery},
  title        = {{Graph sparsification for derandomizing massively parallel computation with low space}},
  doi          = {10.1145/3451992},
  volume       = {17},
  year         = {2021},
}

@article{11664,
  abstract     = {We present a deterministic incremental algorithm for exactly maintaining the size of a minimum cut with O(log3 n log log2 n) amortized time per edge insertion and O(1) query time. This result partially answers an open question posed by Thorup (2007). It also stays in sharp contrast to a polynomial conditional lower bound for the fully dynamic weighted minimum cut problem. Our algorithm is obtained by combining a sparsification technique of Kawarabayashi and Thorup (2015) or its recent improvement by Henzinger, Rao, and Wang (2017), and an exact incremental algorithm of Henzinger (1997).

We also study space-efficient incremental algorithms for the minimum cut problem. Concretely, we show that there exists an O(nlog n/ε2) space Monte Carlo algorithm that can process a stream of edge insertions starting from an empty graph, and with high probability, the algorithm maintains a (1+ε)-approximation to the minimum cut. The algorithm has O((α (n) log3 n)/ε 2) amortized update time and constant query time, where α (n) stands for the inverse of Ackermann function.},
  author       = {Goranci, Gramoz and Henzinger, Monika H and Thorup, Mikkel},
  issn         = {1549-6333},
  journal      = {ACM Transactions on Algorithms},
  number       = {2},
  publisher    = {Association for Computing Machinery},
  title        = {{Incremental exact min-cut in polylogarithmic amortized update time}},
  doi          = {10.1145/3174803},
  volume       = {14},
  year         = {2018},
}

@article{11665,
  abstract     = {We study the problem of maintaining a breadth-first spanning tree (BFS tree) in partially dynamic distributed networks modeling a sequence of either failures or additions of communication links (but not both). We present deterministic (1+ϵ)-approximation algorithms whose amortized time (over some number of link changes) is sublinear in D, the maximum diameter of the network.

Our technique also leads to a deterministic (1+ϵ)-approximate incremental algorithm for single-source shortest paths in the sequential (usual RAM) model. Prior to our work, the state of the art was the classic exact algorithm of Even and Shiloach (1981), which is optimal under some assumptions (Roditty and Zwick 2011; Henzinger et al. 2015). Our result is the first to show that, in the incremental setting, this bound can be beaten in certain cases if some approximation is allowed.},
  author       = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon},
  issn         = {1549-6333},
  journal      = {ACM Transactions on Algorithms},
  number       = {4},
  publisher    = {Association for Computing Machinery},
  title        = {{Sublinear-time maintenance of breadth-first spanning trees in partially dynamic networks}},
  doi          = {10.1145/3146550},
  volume       = {13},
  year         = {2017},
}

