@inproceedings{21140,
  abstract     = {We consider several problems related to packing forests in graphs. The first one is to find k edge-disjoint forests in a directed graph G of maximal size such that the indegree of each vertex in these forests is at most k. We describe a min-max characterization for this problem and show that it can be solved in almost linear time for fixed k, extending the algorithm of [Gabow, 1995]. Specifically, the complexity is O(kδm log n), where n, m are the number of vertices and edges in G respectively, and δ = max{1, k − kG}, where kG is the edge connectivity of the graph. Using our solution to this problem, we improve complexities for two existing applications:(1) k-forest problem: find k forests in an undirected graph G maximizing the number of edges in their union. We show how to solve this problem in O(k3 min{kn, m} log2 n + k · MAXFLOW(m, m) log n) time, breaking the Ok(n3/2) complexity barrier of previously known approaches.(2) Directed edge-connectivity augmentation problem: find a smallest set of directed edges whose addition to the given directed graph makes it strongly k-connected. We improve the deterministic complexity for this problem from O(kδ(m + δn) log n) [Gabow, STOC 1994] to O(kδm log n). A similar approach with the same complexity also works for the undirected version of the problem.},
  author       = {Arkhipov, Pavel and Kolmogorov, Vladimir},
  booktitle    = {Proceedings of the 2026 Annual ACM-SIAM Symposium on Discrete Algorithms},
  location     = {Vancouver, Canada},
  pages        = {4023--4042},
  publisher    = {Society for Industrial and Applied Mathematics},
  title        = {{Faster algorithms for packing forests in graphs and related problems}},
  doi          = {10.1137/1.9781611978971.148},
  year         = {2026},
}

@unpublished{21400,
  abstract     = {This document is a blueprint for the formalization in Lean of the structural theory of regular matroids underlying Seymour's decomposition theorem. We present a modular account of regularity via totally unimodular representations, show that regularity is preserved under 1-, 2-, and 3-sums, and establish regularity for several special classes of matroids, including graphic, cographic, and the matroid R10. The blueprint records the logical structure of the proof, the precise dependencies between results, and their correspondence with Lean declarations. It is intended both as a guide for the ongoing formalization effort and as a human-readable reference for the organization of the proof.},
  author       = {Sergeev, Ivan and Dvorak, Martin and Rampell, Cameron and Sandey, Mark and Monticone, Pietro},
  booktitle    = {arXiv},
  pages        = {18},
  title        = {{A blueprint for the formalization of Seymour's matroid decomposition theorem}},
  doi          = {10.48550/arXiv.2601.01255},
  year         = {2026},
}

@phdthesis{21393,
  abstract     = {This thesis documents a voyage towards truth and beauty via formal verification of theorems. To this end, we develop libraries in Lean 4 that present definitions and results from diverse areas of MathematiCS (i.e., Mathematics and Computer Science). The aim is to create code that is understandable, believable, useful, and elegant. The code should stand for itself as much as possible without a need for documentation; however, this text redundantly documents our code artifacts and provides additional context that isn’t present in the code. This thesis is written for readers who know Lean 4 but are not familiar with any of the topics presented. We manifest truth and beauty in three formalized areas of MathematiCS.

We formalize general grammars in Lean 4 and use grammars to show closure of the class of type-0 languages under four operations; union, reversal, concatenation, and the Kleene star.

Our second stop is the theory of optimization. Farkas established that a system of linear inequalities has a solution if and only if we cannot obtain a contradiction by taking a linear combination of the inequalities. We state and formally prove several Farkas-like theorems over linearly ordered fields in Lean 4. Furthermore, we extend duality theory to the case when some coefficients are allowed to take “infinite values”. Additionally, we develop the basics of the theory of optimization in terms of the framework called General-Valued Constraint Satisfaction Problems, and we prove that, if a Rational-Valued Constraint Satisfaction Problem template has symmetric fractional polymorphisms of all arities, then its basic LP relaxation is tight.

Our third stop is matroid theory. Seymour’s decomposition theorem is a hallmark result in matroid theory, presenting a structural characterization of the class of regular matroids. We aim to formally verify Seymour’s theorem in Lean 4. First, we build a library for working with totally unimodular matrices. We define binary matroids and their standard representations, and we prove that they form a matroid in the sense how Mathlib defines matroids. We define regular matroids to be matroids for which there exists a full representation rational matrix that is totally unimodular, and we prove that all regular matroids are binary. We define 1-sum, 2-sum, and 3 sum of binary matroids as specific ways to compose their standard representation matrices. We prove that the 1-sum, the 2-sum, and the 3-sum of regular matroids are a regular matroid, which concludes the composition direction of the Seymour’s theorem. The (more difficult) decomposition direction remains unproved.

In the pursuit of truth, we focus on identifying the trusted code in each project and presenting it faithfully. We emphasize the readability and believability of definitions rather than choosing definitions that are easier to work with. In search for beauty, we focus on the philosophical framework of Roger Scruton, who emphasizes that beauty is not a mere decoration but, most importantly, beauty is the means for shaping our place in the world and a source of redemption, where it can be viewed as a substitute for religion.},
  author       = {Dvorak, Martin},
  isbn         = {978-3-99078-074-9},
  issn         = {2663-337X},
  pages        = {160},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Pursuit of truth and beauty in Lean 4 : Formally verified theory of grammars, optimization, matroids}},
  doi          = {10.15479/AT-ISTA-21393},
  year         = {2026},
}

@article{18855,
  abstract     = {A central problem in computational statistics is to convert a procedure for sampling combinatorial objects into a procedure for counting those objects, and vice versa. We consider sampling problems which come from Gibbs distributions, which are families of probability distributions over a discrete space Ω with probability mass function of the form μ^Ω_β(ω) ∝ e^{β H(ω)} for β in an interval [β_min, β_max] and H(ω) ∈ {0} ∪ [1, n]. Two important parameters are the partition function, which is the normalization factor Z(β) = ∑_{ω ∈ Ω} e^{β H(ω)}, and the vector of pre-image counts c_x=|H^-1(x)|.
We develop black-box sampling algorithms to estimate the counts roughly Õ(n²/ε²) samples for integer-valued distributions and Õ(q/ε²) samples for general distributions, where q = (log Z(β_max))/Z(β_min)  (ignoring some second-order terms and parameters). We show this is optimal up to logarithmic factors. We illustrate with improved algorithms for counting connected subgraphs, independent sets, and perfect matchings. As a key subroutine, we estimate all values of the partition function using Õ(n²/ε²) samples for integer-valued distributions and Õ(q/ε²) samples for general distributions. This improves over a prior algorithm of Huber (2015) which computes a single point estimate Z(β_max) and which uses a slightly larger amount of samples. We show matching lower bounds, demonstrating this complexity is optimal as a function of n and q up to logarithmic terms.},
  author       = {Harris, David G. and Kolmogorov, Vladimir},
  issn         = {1549-6333},
  journal      = {ACM Transactions on Algorithms},
  number       = {1},
  publisher    = {Association for Computing Machinery},
  title        = {{Parameter estimation for Gibbs distributions}},
  doi          = {10.1145/3685676},
  volume       = {21},
  year         = {2025},
}

@article{10045,
  abstract     = {Given a fixed finite metric space (V,μ), the {\em minimum 0-extension problem}, denoted as 0-Ext[μ], is equivalent to the following optimization problem: minimize function of the form minx∈Vn∑ifi(xi)+∑ijcijμ(xi,xj) where cij,cvi are given nonnegative costs and fi:V→R are functions given by fi(xi)=∑v∈Vcviμ(xi,v). The computational complexity of 0-Ext[μ] has been recently established by Karzanov and by Hirai: if metric μ is {\em orientable modular} then 0-Ext[μ] can be solved in polynomial time, otherwise 0-Ext[μ] is NP-hard. To prove the tractability part, Hirai developed a theory of discrete convex functions on orientable modular graphs generalizing several known classes of functions in discrete convex analysis, such as L♮-convex functions. We consider a more general version of the problem in which unary functions fi(xi) can additionally have terms of the form cuv;iμ(xi,{u,v}) for {u,v}∈F, where set F⊆(V2) is fixed. We extend the complexity classification above by providing an explicit condition on (μ,F) for the problem to be tractable. In order to prove the tractability part, we generalize Hirai's theory and define a larger class of discrete convex functions. It covers, in particular, another well-known class of functions, namely submodular functions on an integer lattice. Finally, we improve the complexity of Hirai's algorithm for solving 0-Ext on orientable modular graphs.
},
  author       = {Dvorak, Martin and Kolmogorov, Vladimir},
  issn         = {1436-4646},
  journal      = {Mathematical Programming},
  keywords     = {minimum 0-extension problem, metric labeling problem, discrete metric spaces, metric extensions, computational complexity, valued constraint satisfaction problems, discrete convex analysis, L-convex functions},
  pages        = {279--322},
  publisher    = {Springer Nature},
  title        = {{Generalized minimum 0-extension problem and discrete convexity}},
  doi          = {10.1007/s10107-024-02064-5},
  volume       = {209},
  year         = {2025},
}

@article{21007,
  abstract     = {Currently, the best known tradeoff between approximation ratio and complexity for the Sparsest Cut problem is achieved by the algorithm in [Sherman, FOCS 2009]: it computes O(√(log n)/ε)-approximation using O(nε logO(1) n) maxflows for any ε∈[Θ(1/log n),Θ(1)]. It works by solving the SDP relaxation of [Arora-Rao-Vazirani, STOC 2004] using the Multiplicative Weights Update algorithm (MW) of [Arora-Kale, JACM 2016]. To implement one MW step, Sherman approximately solves a multicommodity flow problem using another application of MW. Nested MW steps are solved via a certain "chaining" algorithm that combines results of multiple calls to the maxflow algorithm. We present an alternative approach that avoids solving the multicommodity flow problem and instead computes "violating paths". This simplifies Sherman's algorithm by removing a need for a nested application of MW, and also allows parallelization: we show how to compute O(√(log n)/ε)-approximation via O(logO(1) n) maxflows using O(nε) processors. We also revisit Sherman's chaining algorithm, and present a simpler version together with a new analysis.},
  author       = {Kolmogorov, Vladimir},
  issn         = {1549-6333},
  journal      = {ACM Transactions on Algorithms},
  number       = {4},
  pages        = {1--22},
  publisher    = {Association for Computing Machinery},
  title        = {{A simpler and parallelizable O(√log n)-approximation algorithm for SPARSEST CUT}},
  doi          = {10.1145/3748723},
  volume       = {21},
  year         = {2025},
}

@article{21143,
  abstract     = {The Lovász Local Lemma (LLL) is a powerful tool in probabilistic
combinatorics which can be used to establish the existence of objects with certain
properties. The breakthrough paper by Moser & Tardos (STOC’09 and JACM 2010)
and follow-up work revealed that the LLL has intimate connections with a class of
stochastic local search algorithms for finding such desirable objects.
Besides conditions for convergence, many other natural questions can be asked
about algorithms; for instance, “are they parallelizable?”, “how many solutions can
they output?”, “what is the expected ‘weight’ of a solution?”. These questions and
more have been answered for a class of LLL-inspired algorithms called commutative. In
this paper we introduce a new, very natural and more general notion of commutativity
(essentially matrix commutativity) which allows us to show a number of new refined
properties of LLL-inspired local search algorithms with significantly simpler proofs.},
  author       = {Harris, David G. and Iliopoulos, Fotios and Kolmogorov, Vladimir},
  issn         = {1557-2862},
  journal      = {Theory of Computing},
  number       = {5},
  pages        = {1 -- 34},
  publisher    = {University of Chicago Press},
  title        = {{A new notion of commutativity for the algorithmic Lovász Local Lemma}},
  doi          = {10.4086/toc.2025.v021a005},
  volume       = {21},
  year         = {2025},
}

@article{21144,
  abstract     = {This paper deals with the algorithmic aspects of solving feasibility problems of semidefinite programming (SDP), aka linear matrix inequalities (LMIs). Since in some SDP instances all feasible solutions have irrational entries, numerical solvers that work with rational numbers can only find an approximate solution. We study the following question: Is it possible to certify feasibility of a given SDP using an approximate solution that is sufficiently close to some exact solution? Existing approaches make the assumption that there exist rational feasible solutions (and use techniques such as rounding and lattice reduction algorithms). We propose an alternative approach that does not need this assumption. More specifically, we show how to construct a system of polynomial equations whose set of real solutions is guaranteed to have an isolated correct solution (assuming that the target exact solution is maximum-rank). This allows, in particular, for us to use algorithms from real algebraic geometry for solving systems of polynomial equations, yielding a hybrid (or symbolic-numerical) method for SDPs. We experimentally compare it with a pure symbolic method in [D. Henrion, S. Naldi, and M. Safey El Din, SIAM J. Optim., 26 (2016), pp. 2512–2539]; the hybrid method was able to certify feasibility of many SDP instances on which the aforementioned paper failed. Our approach may have further applications, such as refining an approximate solution using methods of numerical algebraic geometry for systems of polynomial equations.},
  author       = {Kolmogorov, Vladimir and Naldi, Simone and Zapata, Jeferson},
  issn         = {1095-7189},
  journal      = {SIAM Journal on Optimization},
  number       = {3},
  pages        = {1630--1654},
  publisher    = {Society for Industrial and Applied Mathematics},
  title        = {{Certifying solutions of degenerate semidefinite programs}},
  doi          = {10.1137/24m1664691},
  volume       = {35},
  year         = {2025},
}

@unpublished{21399,
  abstract     = {We report on the Equational Theories Project (ETP), an online collaborative pilot project to explore new ways to collaborate in mathematics with machine assistance. The project successfully determined all 22 028 942 edges of the implication graph between the 4694 simplest equational laws on magmas, by a combination of human-generated and automated proofs, all validated by the formal proof assistant language Lean. As a result of this project, several new constructions of magmas satisfying specific laws were discovered, and several auxiliary questions were also addressed, such as the effect of restricting attention to finite magmas.},
  author       = {Bolan, Matthew and Breitner, Joachim and Brox, Jose and Carlini, Nicholas and Carneiro, Mario and Doorn, Floris van and Dvorak, Martin and Goens, Andrés and Hill, Aaron and Husum, Harald and Mejia, Hernán Ibarra and Kocsis, Zoltan A. and Floch, Bruno Le and Bar-on, Amir and Luccioli, Lorenzo and McNeil, Douglas and Meiburg, Alex and Monticone, Pietro and Nielsen, Pace P. and Osazuwa, Emmanuel Osalotioman and Paolini, Giovanni and Petracci, Marco and Reinke, Bernhard and Renshaw, David and Rossel, Marcus and Roux, Cody and Scanvic, Jérémy and Srinivas, Shreyas and Tadipatri, Anand Rao and Tao, Terence and Tsyrklevich, Vlad and Vaquerizo-Villar, Fernando and Weber, Daniel and Zheng, Fan},
  booktitle    = {arXiv},
  title        = {{The equational theories project: Advancing collaborative mathematical research at scale}},
  doi          = {10.48550/arXiv.2512.07087},
  year         = {2025},
}

@unpublished{21398,
  abstract     = {Seymour's decomposition theorem is a hallmark result in matroid theory presenting a structural characterization of the class of regular matroids. Formalization of matroid theory faces many challenges, most importantly that only a limited number of notions and results have been implemented so far. In this work, we formalize the proof of the forward (composition) direction of Seymour's theorem for regular matroids. To this end, we develop a library in Lean 4 that implements definitions and results about totally unimodular matrices, vector matroids, their standard representations, regular matroids, and 1-, 2-, and 3-sums of matrices and binary matroids given by their standard representations. Using this framework, we formally state Seymour's decomposition theorem and implement a formally verified proof of the composition direction in the setting where the matroids have finite rank and may have infinite ground sets.},
  author       = {Dvorak, Martin and Figueroa-Reid, Tristan and Hamadani, Rida and Hwang, Byung-Hak and Karunus, Evgenia and Kolmogorov, Vladimir and Meiburg, Alexander and Nelson, Alexander and Nelson, Peter and Sandey, Mark and Sergeev, Ivan},
  booktitle    = {arXiv},
  pages        = {21},
  title        = {{Composition direction of Seymour's theorem for regular matroids — Formally verified}},
  doi          = {10.48550/arXiv.2509.20539},
  year         = {2025},
}

@inproceedings{17236,
  abstract     = {Currently, the best known tradeoff between approximation ratio and complexity for the Sparsest Cut problem is achieved by the algorithm in [Sherman, FOCS 2009]: it computes O(√(log n)/ε)-approximation using O(nε logO(1) n) maxflows for any ε∈[Θ(1/log n),Θ(1)]. It works by solving the SDP relaxation of [Arora-Rao-Vazirani, STOC 2004] using the Multiplicative Weights Update algorithm (MW) of [Arora-Kale, JACM 2016]. To implement one MW step, Sherman approximately solves a multicommodity flow problem using another application of MW. Nested MW steps are solved via a certain "chaining" algorithm that combines results of multiple calls to the maxflow algorithm.
We present an alternative approach that avoids solving the multicommodity flow problem and instead computes "violating paths". This simplifies Sherman's algorithm by removing a need for a nested application of MW, and also allows parallelization: we show how to compute O(√(log n)/ε)-approximation via O(logO(1) n) maxflows using O(nε) processors.
We also revisit Sherman's chaining algorithm, and present a simpler version together with a new analysis.},
  author       = {Kolmogorov, Vladimir},
  booktitle    = {Proceedings of the 36th ACM Symposium on Parallelism in Algorithms and Architectures},
  isbn         = {9798400704161},
  issn         = {1548-6109},
  location     = {Nantes, France},
  pages        = {403--414},
  publisher    = {Association for Computing Machinery},
  title        = {{A simpler and parallelizable O(√log n)-approximation algorithm for sparsest cut}},
  doi          = {10.1145/3626183.3659969},
  year         = {2024},
}

@unpublished{20071,
  abstract     = {Farkas established that a system of linear inequalities has a solution if and only if we cannot obtain a contradiction by taking a linear combination of the inequalities. We state and formally prove several Farkas-like theorems over linearly ordered fields in Lean 4. Furthermore, we extend duality theory to the case when some coefficients are allowed to take "infinite values".},
  author       = {Dvorak, Martin and Kolmogorov, Vladimir},
  booktitle    = {arXiv},
  keywords     = {Farkas lemma, linear programming, extended reals, calculus of inductive constructions},
  title        = {{Duality theory in linear optimization and its extensions -- formally  verified}},
  doi          = {10.48550/arXiv.2409.08119},
  year         = {2024},
}

@inproceedings{14084,
  abstract     = {A central problem in computational statistics is to convert a procedure for sampling combinatorial objects into a procedure for counting those objects, and vice versa. We will consider sampling problems which come from Gibbs distributions, which are families of probability distributions over a discrete space Ω with probability mass function of the form μ^Ω_β(ω) ∝ e^{β H(ω)} for β in an interval [β_min, β_max] and H(ω) ∈ {0} ∪ [1, n].
The partition function is the normalization factor Z(β) = ∑_{ω ∈ Ω} e^{β H(ω)}, and the log partition ratio is defined as q = (log Z(β_max))/Z(β_min)
We develop a number of algorithms to estimate the counts c_x using roughly Õ(q/ε²) samples for general Gibbs distributions and Õ(n²/ε²) samples for integer-valued distributions (ignoring some second-order terms and parameters), We show this is optimal up to logarithmic factors. We illustrate with improved algorithms for counting connected subgraphs and perfect matchings in a graph.},
  author       = {Harris, David G. and Kolmogorov, Vladimir},
  booktitle    = {50th International Colloquium on Automata, Languages, and Programming},
  isbn         = {9783959772785},
  issn         = {1868-8969},
  location     = {Paderborn, Germany},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Parameter estimation for Gibbs distributions}},
  doi          = {10.4230/LIPIcs.ICALP.2023.72},
  volume       = {261},
  year         = {2023},
}

@inproceedings{14448,
  abstract     = {We consider the problem of solving LP relaxations of MAP-MRF inference problems, and in particular the method proposed recently in [16], [35]. As a key computational subroutine, it uses a variant of the Frank-Wolfe (FW) method to minimize a smooth convex function over a combinatorial polytope. We propose an efficient implementation of this subroutine based on in-face Frank-Wolfe directions, introduced in [4] in a different context. More generally, we define an abstract data structure for a combinatorial subproblem that enables in-face FW directions, and describe its specialization for tree-structured MAP-MRF inference subproblems. Experimental results indicate that the resulting method is the current state-of-art LP solver for some classes of problems. Our code is available at pub.ist.ac.at/~vnk/papers/IN-FACE-FW.html.},
  author       = {Kolmogorov, Vladimir},
  booktitle    = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition},
  isbn         = {9798350301298},
  issn         = {1063-6919},
  location     = {Vancouver, Canada},
  pages        = {11980--11989},
  publisher    = {IEEE},
  title        = {{Solving relaxations of MAP-MRF problems: Combinatorial in-face Frank-Wolfe directions}},
  doi          = {10.1109/CVPR52729.2023.01153},
  volume       = {2023},
  year         = {2023},
}

@inproceedings{13120,
  abstract     = {We formalized general (i.e., type-0) grammars using the Lean 3 proof assistant. We defined basic notions of rewrite rules and of words derived by a grammar, and used grammars to show closure of the class of type-0 languages under four operations: union, reversal, concatenation, and the Kleene star. The literature mostly focuses on Turing machine arguments, which are possibly more difficult to formalize. For the Kleene star, we could not follow the literature and came up with our own grammar-based construction.},
  author       = {Dvorak, Martin and Blanchette, Jasmin},
  booktitle    = {14th International Conference on Interactive Theorem Proving},
  isbn         = {9783959772846},
  issn         = {1868-8969},
  location     = {Bialystok, Poland},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Closure properties of general grammars - formally verified}},
  doi          = {10.4230/LIPIcs.ITP.2023.15},
  volume       = {268},
  year         = {2023},
}

@article{10737,
  abstract     = {We consider two models for the sequence labeling (tagging) problem. The first one is a Pattern-Based Conditional Random Field (PB), in which the energy of a string (chain labeling) x=x1⁢…⁢xn∈Dn is a sum of terms over intervals [i,j] where each term is non-zero only if the substring xi⁢…⁢xj equals a prespecified word w∈Λ. The second model is a Weighted Context-Free Grammar (WCFG) frequently used for natural language processing. PB and WCFG encode local and non-local interactions respectively, and thus can be viewed as complementary. We propose a Grammatical Pattern-Based CRF model (GPB) that combines the two in a natural way. We argue that it has certain advantages over existing approaches such as the Hybrid model of Benedí and Sanchez that combines N-grams and WCFGs. The focus of this paper is to analyze the complexity of inference tasks in a GPB such as computing MAP. We present a polynomial-time algorithm for general GPBs and a faster version for a special case that we call Interaction Grammars.},
  author       = {Takhanov, Rustem and Kolmogorov, Vladimir},
  issn         = {1571-4128},
  journal      = {Intelligent Data Analysis},
  number       = {1},
  pages        = {257--272},
  publisher    = {IOS Press},
  title        = {{Combining pattern-based CRFs and weighted context-free grammars}},
  doi          = {10.3233/IDA-205623},
  volume       = {26},
  year         = {2022},
}

@article{9365,
  abstract     = {In this paper, we propose a new iterative method with alternated inertial step for solving split common null point problem in real Hilbert spaces. We obtain weak convergence of the proposed iterative algorithm. Furthermore, we introduce the notion of bounded linear regularity property for the split common null point problem and obtain the linear convergence property for the new algorithm under some mild assumptions. Finally, we provide some numerical examples to demonstrate the performance and efficiency of the proposed method.},
  author       = {Ogbuisi, Ferdinard U. and Shehu, Yekini and Yao, Jen Chih},
  issn         = {1029-4945},
  journal      = {Optimization},
  number       = {13},
  pages        = {3767--3795},
  publisher    = {Taylor and Francis},
  title        = {{Convergence analysis of new inertial method for the split common null point problem}},
  doi          = {10.1080/02331934.2021.1914035},
  volume       = {71},
  year         = {2022},
}

@article{9469,
  abstract     = {In this paper, we consider reflected three-operator splitting methods for monotone inclusion problems in real Hilbert spaces. To do this, we first obtain weak convergence analysis and nonasymptotic O(1/n) convergence rate of the reflected Krasnosel'skiĭ-Mann iteration for finding a fixed point of nonexpansive mapping in real Hilbert spaces under some seemingly easy to implement conditions on the iterative parameters. We then apply our results to three-operator splitting for the monotone inclusion problem and consequently obtain the corresponding convergence analysis. Furthermore, we derive reflected primal-dual algorithms for highly structured monotone inclusion problems. Some numerical implementations are drawn from splitting methods to support the theoretical analysis.},
  author       = {Iyiola, Olaniyi S. and Enyi, Cyril D. and Shehu, Yekini},
  issn         = {1029-4937},
  journal      = {Optimization Methods and Software},
  number       = {4},
  pages        = {1527--1565},
  publisher    = {Taylor and Francis},
  title        = {{Reflected three-operator splitting method for monotone inclusion problem}},
  doi          = {10.1080/10556788.2021.1924715},
  volume       = {37},
  year         = {2022},
}

@article{7577,
  abstract     = {Weak convergence of inertial iterative method for solving variational inequalities is the focus of this paper. The cost function is assumed to be non-Lipschitz and monotone. We propose a projection-type method with inertial terms and give weak convergence analysis under appropriate conditions. Some test results are performed and compared with relevant methods in the literature to show the efficiency and advantages given by our proposed methods.},
  author       = {Shehu, Yekini and Iyiola, Olaniyi S.},
  issn         = {1563-504X},
  journal      = {Applicable Analysis},
  number       = {1},
  pages        = {192--216},
  publisher    = {Taylor & Francis},
  title        = {{Weak convergence for variational inequalities with inertial-type method}},
  doi          = {10.1080/00036811.2020.1736287},
  volume       = {101},
  year         = {2022},
}

@inproceedings{10552,
  abstract     = {We study a class of convex-concave saddle-point problems of the form minxmaxy⟨Kx,y⟩+fP(x)−h∗(y) where K is a linear operator, fP is the sum of a convex function f with a Lipschitz-continuous gradient and the indicator function of a bounded convex polytope P, and h∗ is a convex (possibly nonsmooth) function. Such problem arises, for example, as a Lagrangian relaxation of various discrete optimization problems. Our main assumptions are the existence of an efficient linear minimization oracle (lmo) for fP and an efficient proximal map for h∗ which motivate the solution via a blend of proximal primal-dual algorithms and Frank-Wolfe algorithms. In case h∗ is the indicator function of a linear constraint and function f is quadratic, we show a O(1/n2) convergence rate on the dual objective, requiring O(nlogn) calls of lmo. If the problem comes from the constrained optimization problem minx∈Rd{fP(x)|Ax−b=0} then we additionally get bound O(1/n2) both on the primal gap and on the infeasibility gap. In the most general case, we show a O(1/n) convergence rate of the primal-dual gap again requiring O(nlogn) calls of lmo. To the best of our knowledge, this improves on the known convergence rates for the considered class of saddle-point problems. We show applications to labeling problems frequently appearing in machine learning and computer vision.},
  author       = {Kolmogorov, Vladimir and Pock, Thomas},
  booktitle    = {38th International Conference on Machine Learning},
  location     = {Virtual},
  title        = {{One-sided Frank-Wolfe algorithms for saddle problems}},
  year         = {2021},
}

