@article{21661,
  abstract     = {Model checking undiscounted reachability and expected-reward properties on Markov decision processes (MDPs) are key for the verification of systems that act under uncertainty. Popular algorithms are policy iteration and variants of value iteration; in tool competitions, most participants rely on the latter. These algorithms generally need worst-case exponential time. However, the problem can equally be formulated as a linear programme, solvable in polynomial time. In this paper, we give a detailed overview of today’s state-of-the-art algorithms for MDP model checking with a focus on performance and correctness. We highlight their fundamental differences, and describe various optimizations and implementation variants. We experimentally compare floating-point and exact-arithmetic implementations of all algorithms on three benchmark sets using two probabilistic model checkers. Our results show that (optimistic) value iteration is a sensible default, but other algorithms are preferable in specific settings. This paper thereby provides a guide for MDP verification practitioners—tool builders and users alike.},
  author       = {Hartmanns, Arnd and Junges, Sebastian and Quatmann, Tim and Weininger, Maximilian},
  issn         = {1433-2787},
  journal      = {International Journal on Software Tools for Technology Transfer},
  keywords     = {Quantitative model checking, Markov decision process, Linear programming, Value iteration, Policy iteration},
  publisher    = {Springer Nature},
  title        = {{The revised practitioner’s guide to MDP model checking algorithms}},
  doi          = {10.1007/s10009-026-00848-y},
  year         = {2026},
}

@misc{5549,
  abstract     = {This repository contains the experimental part of the CAV 2015 publication Counterexample Explanation by Learning Small Strategies in Markov Decision Processes.
We extended the probabilistic model checker PRISM to represent strategies of Markov Decision Processes as Decision Trees.
The archive contains a java executable version of the extended tool (prism_dectree.jar) together with a few examples of the PRISM benchmark library.
To execute the program, please have a look at the README.txt, which provides instructions and further information on the archive.
The archive contains scripts that (if run often enough) reproduces the data presented in the publication.},
  author       = {Fellner, Andreas},
  keywords     = {Markov Decision Process, Decision Tree, Probabilistic Verification, Counterexample Explanation},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Experimental part of CAV 2015 publication: Counterexample Explanation by Learning Small Strategies in Markov Decision Processes}},
  doi          = {10.15479/AT:ISTA:28},
  year         = {2015},
}

