@article{21661,
  abstract     = {Model checking undiscounted reachability and expected-reward properties on Markov decision processes (MDPs) are key for the verification of systems that act under uncertainty. Popular algorithms are policy iteration and variants of value iteration; in tool competitions, most participants rely on the latter. These algorithms generally need worst-case exponential time. However, the problem can equally be formulated as a linear programme, solvable in polynomial time. In this paper, we give a detailed overview of today’s state-of-the-art algorithms for MDP model checking with a focus on performance and correctness. We highlight their fundamental differences, and describe various optimizations and implementation variants. We experimentally compare floating-point and exact-arithmetic implementations of all algorithms on three benchmark sets using two probabilistic model checkers. Our results show that (optimistic) value iteration is a sensible default, but other algorithms are preferable in specific settings. This paper thereby provides a guide for MDP verification practitioners—tool builders and users alike.},
  author       = {Hartmanns, Arnd and Junges, Sebastian and Quatmann, Tim and Weininger, Maximilian},
  issn         = {1433-2787},
  journal      = {International Journal on Software Tools for Technology Transfer},
  keywords     = {Quantitative model checking, Markov decision process, Linear programming, Value iteration, Policy iteration},
  publisher    = {Springer Nature},
  title        = {{The revised practitioner’s guide to MDP model checking algorithms}},
  doi          = {10.1007/s10009-026-00848-y},
  year         = {2026},
}

