@inproceedings{18159,
  abstract     = {Markov Decision Processes (MDPs) are a classical model for decision making in the presence of uncertainty. Often they are viewed as state transformers with planning objectives defned with respect to paths over MDP states. An increasingly
popular alternative is to view them as distribution transformers, giving rise to a sequence of probability distributions over MDP states. For instance, reachability and safety properties in modeling robot swarms or chemical reaction networks are naturally defned in terms of probability distributions over states. Verifying such distributional properties is known to be hard and often beyond the reach of classical state-based verifcation techniques. In this work, we consider the problems of certifed policy (i.e. controller) verifcation and synthesis in MDPs under distributional reach-avoidance specifcations. By certifed we mean that, along with a policy, we also aim to synthesize a (checkable) certifcate ensuring that the MDP indeed satisfes the property. Thus, given the target set of distributions and an unsafe set of distributions over MDP states, our goal is to either synthesize a certifcate for a given policy or synthesize a policy along with a certifcate, proving that the target distribution can be reached while avoiding unsafe distributions. To solve this problem, we introduce the novel notion of distributional reach-avoid certifcates and present automated procedures for (1) synthesizing a certifcate for a given policy, and (2) synthesizing a policy together with the certifcate, both providing formal guarantees on certifcate correctness. Our experimental evaluation demonstrates the ability of our method to solve several non-trivial examples, including a multi-agent robot-swarm model, to synthesize certifed policies and to certify existing policies. },
  author       = {Akshay, S and Chatterjee, Krishnendu and Meggendorfer, Tobias and Zikelic, Dorde},
  booktitle    = {Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence},
  isbn         = {9781956792041},
  issn         = {1045-0823},
  location     = {Jeju, Korea},
  pages        = {3--12},
  publisher    = {International Joint Conferences on Artificial Intelligence},
  title        = {{Certified policy verification and synthesis for MDPs under distributional reach-avoidance properties}},
  doi          = {10.24963/ijcai.2024/1},
  year         = {2024},
}

@inproceedings{18160,
  abstract     = {Markov decision processes (MDPs) provide a standard framework for sequential decision making under uncertainty. However, MDPs do not take uncertainty in transition probabilities into account. Robust Markov decision processes (RMDPs) address this shortcoming of MDPs by assigning to each transition an uncertainty set rather than a single probability value. In this work, we consider polytopic RMDPs in which all uncertainty sets are polytopes and study the problem of solving long-run average reward polytopic RMDPs. We present a novel perspective on this problem and show that it can be reduced to solving long-run average reward turn-based stochastic games with finite state and action spaces. This reduction allows us to derive several important consequences that were hitherto not known to hold for polytopic RMDPs. First, we derive new computational complexity bounds for solving long-run average reward polytopic RMDPs, showing for the first time that the threshold decision problem for them is in NP∩CONP and that they admit a randomized algorithm with sub-exponential expected runtime. Second, we present Robust Polytopic Policy Iteration (RPPI), a novel policy iteration algorithm for solving long-run average reward polytopic RMDPs. Our experimental evaluation shows that RPPI is much more efficient in solving long-run average reward polytopic RMDPs compared to state-of-the-art methods based on value iteration. },
  author       = {Chatterjee, Krishnendu and Kafshdar Goharshadi, Ehsan and Karrabi, Mehrdad and Novotný, Petr and Zikelic, Dorde},
  booktitle    = {33rd International Joint Conference on Artificial Intelligence},
  isbn         = {9781956792041},
  issn         = {1045-0823},
  location     = {Jeju, South Korea},
  pages        = {6707--6715},
  publisher    = {International Joint Conferences on Artificial Intelligence},
  title        = {{Solving long-run average reward robust MDPs via stochastic games}},
  doi          = {10.24963/ijcai.2024/741},
  year         = {2024},
}

