[{"department":[{"_id":"ToHe"}],"acknowledgement":"This work is partly supported by the European Research Council under Grant No.: ERC-2020-AdG 101020093. It is also partially supported by the State Government of Styria, Austria – Department Zukunftsfonds Steiermark.","publication_identifier":{"eissn":["2374-3468"],"issn":["2159-5399"]},"doi":"10.1609/aaai.v39i15.33719","oa_version":"Preprint","scopus_import":"1","month":"04","external_id":{"arxiv":["2412.11994"]},"conference":{"start_date":"2025-02-25","end_date":"2025-03-04","name":"AAAI: Conference on Artificial Intelligence","location":"Philadelphia, PA, United States"},"language":[{"iso":"eng"}],"project":[{"call_identifier":"H2020","_id":"62781420-2b32-11ec-9570-8d9b63373d4d","name":"Vigilant Algorithmic Monitoring of Software","grant_number":"101020093"}],"date_updated":"2026-02-16T12:24:30Z","OA_place":"repository","author":[{"last_name":"Cano Cordoba","full_name":"Cano Cordoba, Filip","id":"708cad98-e86a-11ef-8098-bdae2d7c6af1","first_name":"Filip","orcid":"0000-0002-0783-904X"},{"orcid":"0000-0002-2985-7724","first_name":"Thomas A","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","last_name":"Henzinger","full_name":"Henzinger, Thomas A"},{"full_name":"Könighofer, Bettina","last_name":"Könighofer","first_name":"Bettina"},{"orcid":"0000-0001-8974-2542","first_name":"Konstantin","full_name":"Kueffner, Konstantin","last_name":"Kueffner","id":"8121a2d0-dc85-11ea-9058-af578f3b4515"},{"first_name":"Kaushik","orcid":"0000-0001-9864-7475","last_name":"Mallik","full_name":"Mallik, Kaushik","id":"0834ff3c-6d72-11ec-94e0-b5b0a4fb8598"}],"date_published":"2025-04-11T00:00:00Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","title":"Fairness shields: Safeguarding against biased decision makers","ec_funded":1,"citation":{"ama":"Cano Cordoba F, Henzinger TA, Könighofer B, Kueffner K, Mallik K. Fairness shields: Safeguarding against biased decision makers. In: <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>. Vol 39. Association for the Advancement of Artificial Intelligence; 2025:15659-15668. doi:<a href=\"https://doi.org/10.1609/aaai.v39i15.33719\">10.1609/aaai.v39i15.33719</a>","ista":"Cano Cordoba F, Henzinger TA, Könighofer B, Kueffner K, Mallik K. 2025. Fairness shields: Safeguarding against biased decision makers. Proceedings of the 39th AAAI Conference on Artificial Intelligence. AAAI: Conference on Artificial Intelligence vol. 39, 15659–15668.","apa":"Cano Cordoba, F., Henzinger, T. A., Könighofer, B., Kueffner, K., &#38; Mallik, K. (2025). Fairness shields: Safeguarding against biased decision makers. In <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i> (Vol. 39, pp. 15659–15668). Philadelphia, PA, United States: Association for the Advancement of Artificial Intelligence. <a href=\"https://doi.org/10.1609/aaai.v39i15.33719\">https://doi.org/10.1609/aaai.v39i15.33719</a>","chicago":"Cano Cordoba, Filip, Thomas A Henzinger, Bettina Könighofer, Konstantin Kueffner, and Kaushik Mallik. “Fairness Shields: Safeguarding against Biased Decision Makers.” In <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, 39:15659–68. Association for the Advancement of Artificial Intelligence, 2025. <a href=\"https://doi.org/10.1609/aaai.v39i15.33719\">https://doi.org/10.1609/aaai.v39i15.33719</a>.","short":"F. Cano Cordoba, T.A. Henzinger, B. Könighofer, K. Kueffner, K. Mallik, in:, Proceedings of the 39th AAAI Conference on Artificial Intelligence, Association for the Advancement of Artificial Intelligence, 2025, pp. 15659–15668.","ieee":"F. Cano Cordoba, T. A. Henzinger, B. Könighofer, K. Kueffner, and K. Mallik, “Fairness shields: Safeguarding against biased decision makers,” in <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, Philadelphia, PA, United States, 2025, vol. 39, no. 15, pp. 15659–15668.","mla":"Cano Cordoba, Filip, et al. “Fairness Shields: Safeguarding against Biased Decision Makers.” <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, vol. 39, no. 15, Association for the Advancement of Artificial Intelligence, 2025, pp. 15659–68, doi:<a href=\"https://doi.org/10.1609/aaai.v39i15.33719\">10.1609/aaai.v39i15.33719</a>."},"oa":1,"corr_author":"1","quality_controlled":"1","year":"2025","date_created":"2025-05-11T22:02:39Z","page":"15659-15668","day":"11","volume":39,"type":"conference","article_processing_charge":"No","OA_type":"green","publisher":"Association for the Advancement of Artificial Intelligence","issue":"15","publication":"Proceedings of the 39th AAAI Conference on Artificial Intelligence","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2412.11994","open_access":"1"}],"status":"public","intvolume":"        39","_id":"19665","abstract":[{"text":"As AI-based decision-makers increasingly influence human lives, it is a growing concern that their decisions may be unfair or biased with respect to people's protected attributes, such as gender and race. Most existing bias prevention measures provide probabilistic fairness guarantees in the long run, and it is possible that the decisions are biased on any decision sequence of fixed length. We introduce *fairness shielding*, where a symbolic decision-maker---the fairness shield---continuously monitors the sequence of decisions of another deployed black-box decision-maker, and makes interventions so that a given fairness criterion is met while the total intervention costs are minimized. We present four different algorithms for computing fairness shields, among which one guarantees fairness over fixed horizons, and three guarantee fairness periodically after fixed intervals. Given a distribution over future decisions and their intervention costs, our algorithms solve different instances of bounded-horizon optimal control problems with different levels of computational costs and optimality guarantees. Our empirical evaluation demonstrates the effectiveness of these shields in ensuring fairness while maintaining cost efficiency across various scenarios.","lang":"eng"}],"publication_status":"published","arxiv":1},{"quality_controlled":"1","date_created":"2025-05-11T22:02:39Z","year":"2025","citation":{"short":"T. Meggendorfer, M. Weininger, P. Wienhöft, in:, Proceedings of the 39th AAAI Conference on Artificial Intelligence, Association for the Advancement of Artificial Intelligence, 2025, pp. 26631–26641.","ieee":"T. Meggendorfer, M. Weininger, and P. Wienhöft, “Solving robust Markov decision processes: Generic, reliable, efficient,” in <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, Philadelphia, PA, United States, 2025, vol. 39, no. 25, pp. 26631–26641.","mla":"Meggendorfer, Tobias, et al. “Solving Robust Markov Decision Processes: Generic, Reliable, Efficient.” <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, vol. 39, no. 25, Association for the Advancement of Artificial Intelligence, 2025, pp. 26631–41, doi:<a href=\"https://doi.org/10.1609/aaai.v39i25.34865\">10.1609/aaai.v39i25.34865</a>.","ama":"Meggendorfer T, Weininger M, Wienhöft P. Solving robust Markov decision processes: Generic, reliable, efficient. In: <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>. Vol 39. Association for the Advancement of Artificial Intelligence; 2025:26631-26641. doi:<a href=\"https://doi.org/10.1609/aaai.v39i25.34865\">10.1609/aaai.v39i25.34865</a>","ista":"Meggendorfer T, Weininger M, Wienhöft P. 2025. Solving robust Markov decision processes: Generic, reliable, efficient. Proceedings of the 39th AAAI Conference on Artificial Intelligence. AAAI: Conference on Artificial Intelligence vol. 39, 26631–26641.","apa":"Meggendorfer, T., Weininger, M., &#38; Wienhöft, P. (2025). Solving robust Markov decision processes: Generic, reliable, efficient. In <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i> (Vol. 39, pp. 26631–26641). Philadelphia, PA, United States: Association for the Advancement of Artificial Intelligence. <a href=\"https://doi.org/10.1609/aaai.v39i25.34865\">https://doi.org/10.1609/aaai.v39i25.34865</a>","chicago":"Meggendorfer, Tobias, Maximilian Weininger, and Patrick Wienhöft. “Solving Robust Markov Decision Processes: Generic, Reliable, Efficient.” In <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, 39:26631–41. Association for the Advancement of Artificial Intelligence, 2025. <a href=\"https://doi.org/10.1609/aaai.v39i25.34865\">https://doi.org/10.1609/aaai.v39i25.34865</a>."},"oa":1,"related_material":{"link":[{"relation":"software","url":"https://doi.org/10.5281/zenodo.14385449"}]},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","title":"Solving robust Markov decision processes: Generic, reliable, efficient","ec_funded":1,"publication_status":"published","arxiv":1,"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2412.10185","open_access":"1"}],"intvolume":"        39","_id":"19666","abstract":[{"text":"Markov decision processes (MDP) are a well-established model for sequential decision-making in the presence of probabilities. In *robust* MDP (RMDP), every action is associated with an *uncertainty set* of probability distributions, modelling that transition probabilities are not known precisely. Based on the known theoretical connection to stochastic games, we provide a framework for solving RMDPs that is generic, reliable, and efficient. It is *generic* both with respect to the model, allowing for a wide range of uncertainty sets, including but not limited to intervals, L1- or L2-balls, and polytopes; and with respect to the objective, including long-run average reward, undiscounted total reward, and stochastic shortest path. It is *reliable*, as our approach not only converges in the limit, but provides precision guarantees at any time during the computation. It is *efficient* because -- in contrast to state-of-the-art approaches -- it avoids explicitly constructing the underlying stochastic game. Consequently, our prototype implementation outperforms existing tools by several orders of magnitude and can solve RMDPs with a million states in under a minute.","lang":"eng"}],"status":"public","OA_type":"green","article_processing_charge":"No","publication":"Proceedings of the 39th AAAI Conference on Artificial Intelligence","issue":"25","publisher":"Association for the Advancement of Artificial Intelligence","volume":39,"page":"26631-26641","day":"11","type":"conference","doi":"10.1609/aaai.v39i25.34865","acknowledgement":"This project has received funding from the European Union’s Horizon 2020 research and innovation programme under the Marie Sklodowska-Curie grant agreement No. 101034413,\r\nthe ERC CoG 863818 (ForM-SMArt), and the DFG through the Cluster of Excellence EXC 2050/1 (CeTI, project ID 390696704, as part of Germany’s Excellence Strategy) and the TRR 248 (see https://perspicuous-computing.science, project ID 389792660).","publication_identifier":{"eissn":["2374-3468"],"issn":["2159-5399"]},"department":[{"_id":"KrCh"}],"author":[{"id":"b21b0c15-30a2-11eb-80dc-f13ca25802e1","full_name":"Meggendorfer, Tobias","last_name":"Meggendorfer","first_name":"Tobias","orcid":"0000-0002-1712-2165"},{"last_name":"Weininger","id":"02ab0197-cc70-11ed-ab61-918e71f56881","full_name":"Weininger, Maximilian","orcid":"0000-0002-0163-2152","first_name":"Maximilian"},{"last_name":"Wienhöft","full_name":"Wienhöft, Patrick","first_name":"Patrick"}],"date_published":"2025-04-11T00:00:00Z","OA_place":"repository","date_updated":"2026-02-16T12:25:05Z","project":[{"grant_number":"863818","name":"Formal Methods for Stochastic Models: Algorithms and Applications","call_identifier":"H2020","_id":"0599E47C-7A3F-11EA-A408-12923DDC885E"}],"external_id":{"arxiv":["2412.10185"]},"language":[{"iso":"eng"}],"conference":{"location":"Philadelphia, PA, United States","name":"AAAI: Conference on Artificial Intelligence","end_date":"2025-03-04","start_date":"2025-02-25"},"oa_version":"Preprint","scopus_import":"1","month":"04"},{"date_published":"2025-04-11T00:00:00Z","author":[{"full_name":"Chatterjee, Krishnendu","last_name":"Chatterjee","id":"2E5DCA20-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-4561-241X","first_name":"Krishnendu"},{"orcid":"0000-0002-8595-0587","first_name":"Ehsan","last_name":"Kafshdar Goharshadi","id":"103b4fa0-896a-11ed-bdf8-87b697bef40d","full_name":"Kafshdar Goharshadi, Ehsan"},{"last_name":"Karrabi","full_name":"Karrabi, Mehrdad","id":"67638922-f394-11eb-9cf6-f20423e08757","orcid":"0009-0007-5253-9170","first_name":"Mehrdad"},{"first_name":"Harshit J.","last_name":"Motwani","full_name":"Motwani, Harshit J."},{"last_name":"Seeliger","full_name":"Seeliger, Maximilian","first_name":"Maximilian"},{"last_name":"Zikelic","id":"294AA7A6-F248-11E8-B48F-1D18A9856A87","full_name":"Zikelic, Dorde","first_name":"Dorde","orcid":"0000-0002-4681-1699"}],"project":[{"grant_number":"863818","name":"Formal Methods for Stochastic Models: Algorithms and Applications","_id":"0599E47C-7A3F-11EA-A408-12923DDC885E","call_identifier":"H2020"}],"date_updated":"2026-02-16T12:24:47Z","OA_place":"repository","conference":{"location":"Philadelphia, PA, United States","name":"AAAI: Conference on Artificial Intelligence","end_date":"2025-03-04","start_date":"2025-02-25"},"language":[{"iso":"eng"}],"external_id":{"arxiv":["2412.16226"]},"month":"04","oa_version":"Preprint","scopus_import":"1","doi":"10.1609/aaai.v39i11.33213","publication_identifier":{"issn":["2159-5399"],"eissn":["2374-3468"]},"acknowledgement":"This work was partially funded by ERC CoG 863818 (ForM-SMArt) and Austrian Science Fund (FWF) 10.55776/COE12.","department":[{"_id":"KrCh"}],"arxiv":1,"publication_status":"published","status":"public","abstract":[{"text":"The problem of checking satisfiability of linear real arithmetic (LRA) and non-linear real arithmetic (NRA) formulas has broad applications, in particular, they are at the heart of logic-related applications such as logic for artificial intelligence, program analysis, etc. While there has been much work on checking satisfiability of unquantified LRA and NRA formulas, the problem of checking satisfiability of quantified LRA and NRA formulas remains a significant challenge. The main bottleneck in the existing methods is a computationally expensive quantifier elimination step. In this work, we propose a novel method for efficient quantifier elimination in quantified LRA and NRA formulas. We propose a template-based Skolemization approach, where we automatically synthesize linear/polynomial Skolem functions in order to eliminate quantifiers in the formula. The key technical ingredient in our approach are Positivstellensätze theorems from algebraic geometry, which allow for an efficient manipulation of polynomial inequalities. Our method offers a range of appealing theoretical properties combined with a strong practical performance. On the theory side, our method is sound, semi-complete, and runs in subexponential time and polynomial space, as opposed to existing sound and complete quantifier elimination methods that run in doubly-exponential time and at least exponential space. On the practical side, our experiments show superior performance compared to state of the art SMT solvers in terms of the number of solved instances and runtime, both on LRA and on NRA benchmarks.","lang":"eng"}],"_id":"19667","intvolume":"        39","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2412.16226"}],"publisher":"Association for the Advancement of Artificial Intelligence","publication":"Proceedings of the 39th AAAI Conference on Artificial Intelligence","issue":"11","article_processing_charge":"No","OA_type":"green","type":"conference","day":"11","page":"11158-11166","volume":39,"year":"2025","date_created":"2025-05-11T22:02:39Z","corr_author":"1","quality_controlled":"1","oa":1,"citation":{"ieee":"K. Chatterjee, E. Goharshady, M. Karrabi, H. J. Motwani, M. Seeliger, and D. Zikelic, “Quantified linear and polynomial arithmetic satisfiability via template-based skolemization,” in <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, Philadelphia, PA, United States, 2025, vol. 39, no. 11, pp. 11158–11166.","mla":"Chatterjee, Krishnendu, et al. “Quantified Linear and Polynomial Arithmetic Satisfiability via Template-Based Skolemization.” <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, vol. 39, no. 11, Association for the Advancement of Artificial Intelligence, 2025, pp. 11158–66, doi:<a href=\"https://doi.org/10.1609/aaai.v39i11.33213\">10.1609/aaai.v39i11.33213</a>.","short":"K. Chatterjee, E. Goharshady, M. Karrabi, H.J. Motwani, M. Seeliger, D. Zikelic, in:, Proceedings of the 39th AAAI Conference on Artificial Intelligence, Association for the Advancement of Artificial Intelligence, 2025, pp. 11158–11166.","ama":"Chatterjee K, Goharshady E, Karrabi M, Motwani HJ, Seeliger M, Zikelic D. Quantified linear and polynomial arithmetic satisfiability via template-based skolemization. In: <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>. Vol 39. Association for the Advancement of Artificial Intelligence; 2025:11158-11166. doi:<a href=\"https://doi.org/10.1609/aaai.v39i11.33213\">10.1609/aaai.v39i11.33213</a>","ista":"Chatterjee K, Goharshady E, Karrabi M, Motwani HJ, Seeliger M, Zikelic D. 2025. Quantified linear and polynomial arithmetic satisfiability via template-based skolemization. Proceedings of the 39th AAAI Conference on Artificial Intelligence. AAAI: Conference on Artificial Intelligence vol. 39, 11158–11166.","apa":"Chatterjee, K., Goharshady, E., Karrabi, M., Motwani, H. J., Seeliger, M., &#38; Zikelic, D. (2025). Quantified linear and polynomial arithmetic satisfiability via template-based skolemization. In <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i> (Vol. 39, pp. 11158–11166). Philadelphia, PA, United States: Association for the Advancement of Artificial Intelligence. <a href=\"https://doi.org/10.1609/aaai.v39i11.33213\">https://doi.org/10.1609/aaai.v39i11.33213</a>","chicago":"Chatterjee, Krishnendu, Ehsan Goharshady, Mehrdad Karrabi, Harshit J. Motwani, Maximilian Seeliger, and Dorde Zikelic. “Quantified Linear and Polynomial Arithmetic Satisfiability via Template-Based Skolemization.” In <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, 39:11158–66. Association for the Advancement of Artificial Intelligence, 2025. <a href=\"https://doi.org/10.1609/aaai.v39i11.33213\">https://doi.org/10.1609/aaai.v39i11.33213</a>."},"ec_funded":1,"title":"Quantified linear and polynomial arithmetic satisfiability via template-based skolemization","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87"},{"OA_place":"repository","project":[{"grant_number":"101020093","name":"Vigilant Algorithmic Monitoring of Software","call_identifier":"H2020","_id":"62781420-2b32-11ec-9570-8d9b63373d4d"}],"date_updated":"2025-05-12T09:49:25Z","author":[{"id":"20aa2ae8-f2f1-11ed-bbfa-8205053f1342","full_name":"Yu, Zhengqi","last_name":"Yu","first_name":"Zhengqi"},{"orcid":"0000-0002-4681-1699","first_name":"Dorde","last_name":"Zikelic","id":"294AA7A6-F248-11E8-B48F-1D18A9856A87","full_name":"Zikelic, Dorde"},{"last_name":"Henzinger","full_name":"Henzinger, Thomas A","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-2985-7724","first_name":"Thomas A"}],"date_published":"2025-04-11T00:00:00Z","scopus_import":"1","oa_version":"Preprint","month":"04","external_id":{"arxiv":["2412.12996"]},"language":[{"iso":"eng"}],"conference":{"location":"Philadelphia, PA, United States","name":"AAAI: Conference on Artificial Intelligence","end_date":"2025-03-04","start_date":"2025-02-25"},"acknowledgement":"This work was supported in part by the ERC project ERC2020-AdG 101020093","publication_identifier":{"eissn":["2374-3468"],"issn":["2159-5399"]},"doi":"10.1609/aaai.v39i25.34840","department":[{"_id":"ToHe"}],"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2412.12996","open_access":"1"}],"abstract":[{"lang":"eng","text":"Learning-based methods provide a promising approach to solving highly non-linear control tasks that are often challenging for classical control methods. To ensure the satisfaction of a safety property, learning-based methods jointly learn a control policy together with a certificate function for the property. Popular examples include barrier functions for safety and Lyapunov functions for asymptotic stability. While there has been significant progress on learning-based control with certificate functions in the white-box setting, where the correctness of the certificate function can be formally verified, there has been little work on ensuring their reliability in the black-box setting where the system dynamics are unknown. In this work, we consider the problems of certifying and repairing neural network control policies and certificate functions in the black-box setting. We propose a novel framework that utilizes runtime monitoring to detect system behaviors that violate the property of interest under some initially trained neural network policy and certificate. These violating behaviors are used to extract new training data, that is used to re-train the neural network policy and the certificate function and to ultimately repair them. We demonstrate the effectiveness of our approach empirically by using it to repair and to boost the safety rate of neural network policies learned by a state-of-the-art method for learning-based control on two autonomous system control tasks."}],"_id":"19668","intvolume":"        39","status":"public","publication_status":"published","arxiv":1,"volume":39,"page":"26409-26417","day":"11","type":"conference","OA_type":"green","article_processing_charge":"No","issue":"25","publication":"Proceedings of the 39th AAAI Conference on Artificial Intelligence","publisher":"Association for the Advancement of Artificial Intelligence","citation":{"short":"E. Yu, D. Zikelic, T.A. Henzinger, in:, Proceedings of the 39th AAAI Conference on Artificial Intelligence, Association for the Advancement of Artificial Intelligence, 2025, pp. 26409–26417.","mla":"Yu, Emily, et al. “Neural Control and Certificate Repair via Runtime Monitoring.” <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, vol. 39, no. 25, Association for the Advancement of Artificial Intelligence, 2025, pp. 26409–17, doi:<a href=\"https://doi.org/10.1609/aaai.v39i25.34840\">10.1609/aaai.v39i25.34840</a>.","ieee":"E. Yu, D. Zikelic, and T. A. Henzinger, “Neural control and certificate repair via runtime monitoring,” in <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, Philadelphia, PA, United States, 2025, vol. 39, no. 25, pp. 26409–26417.","chicago":"Yu, Emily, Dorde Zikelic, and Thomas A Henzinger. “Neural Control and Certificate Repair via Runtime Monitoring.” In <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, 39:26409–17. Association for the Advancement of Artificial Intelligence, 2025. <a href=\"https://doi.org/10.1609/aaai.v39i25.34840\">https://doi.org/10.1609/aaai.v39i25.34840</a>.","apa":"Yu, E., Zikelic, D., &#38; Henzinger, T. A. (2025). Neural control and certificate repair via runtime monitoring. In <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i> (Vol. 39, pp. 26409–26417). Philadelphia, PA, United States: Association for the Advancement of Artificial Intelligence. <a href=\"https://doi.org/10.1609/aaai.v39i25.34840\">https://doi.org/10.1609/aaai.v39i25.34840</a>","ista":"Yu E, Zikelic D, Henzinger TA. 2025. Neural control and certificate repair via runtime monitoring. Proceedings of the 39th AAAI Conference on Artificial Intelligence. AAAI: Conference on Artificial Intelligence vol. 39, 26409–26417.","ama":"Yu E, Zikelic D, Henzinger TA. Neural control and certificate repair via runtime monitoring. In: <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>. Vol 39. Association for the Advancement of Artificial Intelligence; 2025:26409-26417. doi:<a href=\"https://doi.org/10.1609/aaai.v39i25.34840\">10.1609/aaai.v39i25.34840</a>"},"oa":1,"corr_author":"1","quality_controlled":"1","date_created":"2025-05-11T22:02:40Z","year":"2025","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","title":"Neural control and certificate repair via runtime monitoring","ec_funded":1},{"OA_place":"repository","date_updated":"2025-05-12T09:42:09Z","project":[{"grant_number":"863818","name":"Formal Methods for Stochastic Models: Algorithms and Applications","call_identifier":"H2020","_id":"0599E47C-7A3F-11EA-A408-12923DDC885E"}],"author":[{"id":"2E5DCA20-F248-11E8-B48F-1D18A9856A87","last_name":"Chatterjee","full_name":"Chatterjee, Krishnendu","first_name":"Krishnendu","orcid":"0000-0002-4561-241X"},{"first_name":"Ruichen","last_name":"Luo","id":"b391db08-1ffe-11ee-8b67-d18ddcfb5a14","full_name":"Luo, Ruichen"},{"orcid":"0000-0001-5103-038X","first_name":"Raimundo J","full_name":"Saona Urmeneta, Raimundo J","id":"BD1DF4C4-D767-11E9-B658-BC13E6697425","last_name":"Saona Urmeneta"},{"last_name":"Svoboda","full_name":"Svoboda, Jakub","id":"130759D2-D7DD-11E9-87D2-DE0DE6697425","first_name":"Jakub","orcid":"0000-0002-1419-3267"}],"date_published":"2025-04-11T00:00:00Z","oa_version":"Preprint","scopus_import":"1","month":"04","external_id":{"arxiv":["2412.12228"]},"language":[{"iso":"eng"}],"conference":{"location":"Philadelphia, PA, United States","name":"AAAI: Conference on Artificial Intelligence","end_date":"2025-03-04","start_date":"2025-02-25"},"acknowledgement":"This research was partially supported by the ERC CoG 863818 (ForM-SMArt) grant and the Austrian Science Fund (FWF) 10.55776/COE12 grant.","publication_identifier":{"issn":["2159-5399"],"eissn":["2374-3468"]},"doi":"10.1609/aaai.v39i11.33212","department":[{"_id":"KrCh"}],"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2412.12228","open_access":"1"}],"intvolume":"        39","_id":"19669","abstract":[{"text":"We consider a class of optimization problems defined by a system of linear equations with min and max operators. This class of optimization problems has been studied under restrictive conditions, such as, (C1) the halting or stability condition; (C2) the non-negative coefficients condition; (C3) the sum upto 1 condition; and (C4) the only min or only max operator condition. Several seminal results in the literature focus on special cases. For example, turn-based stochastic games correspond to conditions C2 and C3; and Markov decision process to conditions C2, C3, and C4. However, the systematic computational complexity study of all the cases has not been explored, which we address in this work. Some highlights of our results are: with conditions C2 and C4, and with conditions C3 and C4, the problem is NP-complete, whereas with condition C1 only, the problem is in UP intersects coUP. Finally, we establish the computational complexity of the decision problem of checking the respective conditions.","lang":"eng"}],"status":"public","publication_status":"published","arxiv":1,"volume":39,"day":"11","page":"11150-11157","type":"conference","OA_type":"green","article_processing_charge":"No","issue":"11","publication":"Proceedings of the 39th AAAI Conference on Artificial Intelligence","publisher":"Association for the Advancement of Artificial Intelligence","citation":{"ieee":"K. Chatterjee, R. Luo, R. J. Saona Urmeneta, and J. Svoboda, “Linear equations with min and max operators: Computational complexity,” in <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, Philadelphia, PA, United States, 2025, vol. 39, no. 11, pp. 11150–11157.","mla":"Chatterjee, Krishnendu, et al. “Linear Equations with Min and Max Operators: Computational Complexity.” <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, vol. 39, no. 11, Association for the Advancement of Artificial Intelligence, 2025, pp. 11150–57, doi:<a href=\"https://doi.org/10.1609/aaai.v39i11.33212\">10.1609/aaai.v39i11.33212</a>.","short":"K. Chatterjee, R. Luo, R.J. Saona Urmeneta, J. Svoboda, in:, Proceedings of the 39th AAAI Conference on Artificial Intelligence, Association for the Advancement of Artificial Intelligence, 2025, pp. 11150–11157.","apa":"Chatterjee, K., Luo, R., Saona Urmeneta, R. J., &#38; Svoboda, J. (2025). Linear equations with min and max operators: Computational complexity. In <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i> (Vol. 39, pp. 11150–11157). Philadelphia, PA, United States: Association for the Advancement of Artificial Intelligence. <a href=\"https://doi.org/10.1609/aaai.v39i11.33212\">https://doi.org/10.1609/aaai.v39i11.33212</a>","chicago":"Chatterjee, Krishnendu, Ruichen Luo, Raimundo J Saona Urmeneta, and Jakub Svoboda. “Linear Equations with Min and Max Operators: Computational Complexity.” In <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, 39:11150–57. Association for the Advancement of Artificial Intelligence, 2025. <a href=\"https://doi.org/10.1609/aaai.v39i11.33212\">https://doi.org/10.1609/aaai.v39i11.33212</a>.","ama":"Chatterjee K, Luo R, Saona Urmeneta RJ, Svoboda J. Linear equations with min and max operators: Computational complexity. In: <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>. Vol 39. Association for the Advancement of Artificial Intelligence; 2025:11150-11157. doi:<a href=\"https://doi.org/10.1609/aaai.v39i11.33212\">10.1609/aaai.v39i11.33212</a>","ista":"Chatterjee K, Luo R, Saona Urmeneta RJ, Svoboda J. 2025. Linear equations with min and max operators: Computational complexity. Proceedings of the 39th AAAI Conference on Artificial Intelligence. AAAI: Conference on Artificial Intelligence vol. 39, 11150–11157."},"oa":1,"corr_author":"1","quality_controlled":"1","date_created":"2025-05-11T22:02:40Z","year":"2025","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","ec_funded":1,"title":"Linear equations with min and max operators: Computational complexity"},{"intvolume":"        39","_id":"19713","abstract":[{"text":"Distributed optimization is the standard way of speeding up machine learning training, and most of the research in the area focuses on distributed first-order, gradient-based methods. Yet, there are settings where some computationally-bounded nodes may not be able to implement first-order, gradient-based optimization, while they could still contribute to joint optimization tasks. In this paper, we initiate the study of hybrid decentralized optimization, studying settings where nodes with zeroth-order and first-order optimization capabilities co-exist in a distributed system, and attempt to jointly solve an optimization task over some data distribution. We essentially show that, under reasonable parameter settings, such a system can not only withstand noisier zeroth-order agents but can even benefit from integrating such agents into the optimization process, rather than ignoring their information. At the core of our approach is a new analysis of distributed optimization with noisy and possibly-biased gradient estimators, which may be of independent interest. Our results hold for both convex and non-convex objectives. Experimental results on standard optimization tasks confirm our analysis, showing that hybrid first-zeroth order optimization can be practical, even when training deep neural networks.","lang":"eng"}],"status":"public","main_file_link":[{"url":"https://doi.org/10.1609/aaai.v39i19.34290","open_access":"1"}],"arxiv":1,"publication_status":"published","type":"journal_article","volume":39,"day":"11","page":"20778-20786","publication":"Proceedings of the 39th AAAI Conference on Artificial Intelligence","issue":"19","publisher":"Association for the Advancement of Artificial Intelligence","OA_type":"free access","article_processing_charge":"No","oa":1,"citation":{"ieee":"S. Talaei, M. Ansaripour, G. Nadiradze, and D.-A. Alistarh, “Hybrid decentralized optimization: Leveraging both first- and zeroth-order optimizers for faster convergence,” <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, vol. 39, no. 19. Association for the Advancement of Artificial Intelligence, pp. 20778–20786, 2025.","short":"S. Talaei, M. Ansaripour, G. Nadiradze, D.-A. Alistarh, Proceedings of the 39th AAAI Conference on Artificial Intelligence 39 (2025) 20778–20786.","mla":"Talaei, Shayan, et al. “Hybrid Decentralized Optimization: Leveraging Both First- and Zeroth-Order Optimizers for Faster Convergence.” <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>, vol. 39, no. 19, Association for the Advancement of Artificial Intelligence, 2025, pp. 20778–86, doi:<a href=\"https://doi.org/10.1609/aaai.v39i19.34290\">10.1609/aaai.v39i19.34290</a>.","ista":"Talaei S, Ansaripour M, Nadiradze G, Alistarh D-A. 2025. Hybrid decentralized optimization: Leveraging both first- and zeroth-order optimizers for faster convergence. Proceedings of the 39th AAAI Conference on Artificial Intelligence. 39(19), 20778–20786.","ama":"Talaei S, Ansaripour M, Nadiradze G, Alistarh D-A. Hybrid decentralized optimization: Leveraging both first- and zeroth-order optimizers for faster convergence. <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>. 2025;39(19):20778-20786. doi:<a href=\"https://doi.org/10.1609/aaai.v39i19.34290\">10.1609/aaai.v39i19.34290</a>","chicago":"Talaei, Shayan, Matin Ansaripour, Giorgi Nadiradze, and Dan-Adrian Alistarh. “Hybrid Decentralized Optimization: Leveraging Both First- and Zeroth-Order Optimizers for Faster Convergence.” <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>. Association for the Advancement of Artificial Intelligence, 2025. <a href=\"https://doi.org/10.1609/aaai.v39i19.34290\">https://doi.org/10.1609/aaai.v39i19.34290</a>.","apa":"Talaei, S., Ansaripour, M., Nadiradze, G., &#38; Alistarh, D.-A. (2025). Hybrid decentralized optimization: Leveraging both first- and zeroth-order optimizers for faster convergence. <i>Proceedings of the 39th AAAI Conference on Artificial Intelligence</i>. Association for the Advancement of Artificial Intelligence. <a href=\"https://doi.org/10.1609/aaai.v39i19.34290\">https://doi.org/10.1609/aaai.v39i19.34290</a>"},"date_created":"2025-05-19T14:15:35Z","year":"2025","quality_controlled":"1","corr_author":"1","article_type":"original","title":"Hybrid decentralized optimization: Leveraging both first- and zeroth-order optimizers for faster convergence","ec_funded":1,"related_material":{"link":[{"url":"https://github.com/ShayanTalaei/HDO","relation":"software"}]},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","OA_place":"publisher","project":[{"grant_number":"805223","_id":"268A44D6-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","name":"Elastic Coordination for Scalable Machine Learning"}],"date_updated":"2026-02-16T12:34:44Z","date_published":"2025-04-11T00:00:00Z","author":[{"first_name":"Shayan","last_name":"Talaei","full_name":"Talaei, Shayan"},{"first_name":"Matin","full_name":"Ansaripour, Matin","last_name":"Ansaripour"},{"first_name":"Giorgi","orcid":"0000-0001-5634-0731","full_name":"Nadiradze, Giorgi","id":"3279A00C-F248-11E8-B48F-1D18A9856A87","last_name":"Nadiradze"},{"full_name":"Alistarh, Dan-Adrian","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X","first_name":"Dan-Adrian"}],"month":"04","oa_version":"Preprint","scopus_import":"1","language":[{"iso":"eng"}],"external_id":{"arxiv":["2210.07703"]},"publication_identifier":{"eissn":["2374-3468"],"issn":["2159-5399"]},"acknowledgement":"This project has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement\r\nNo 805223 ScaleML). The authors would like to acknowledge Eugenia Iofinova for useful discussions during the inception of this project.","doi":"10.1609/aaai.v39i19.34290","department":[{"_id":"DaAl"}]},{"department":[{"_id":"ToHe"}],"acknowledgement":"This work was supported by Institut Carnot STAR, Marseille, France and by the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie Grant Agreement No. 101034413.","publication_identifier":{"issn":["2159-5399"],"isbn":["1577358872"],"eissn":["2374-3468"]},"doi":"10.1609/aaai.v38i9.28943","scopus_import":"1","oa_version":"Published Version","month":"03","language":[{"iso":"eng"}],"project":[{"grant_number":"101034413","_id":"fc2ed2f7-9c52-11eb-aca3-c01059dda49c","call_identifier":"H2020","name":"IST-BRIDGE: International postdoctoral program"}],"date_updated":"2025-04-14T07:54:55Z","author":[{"last_name":"Trinh","full_name":"Trinh, Giang","first_name":"Giang"},{"last_name":"Benhamou","full_name":"Benhamou, Belaid","first_name":"Belaid"},{"full_name":"Pastva, Samuel","last_name":"Pastva","id":"07c5ea74-f61c-11ec-a664-aa7c5d957b2b","first_name":"Samuel","orcid":"0000-0003-1993-0331"},{"first_name":"Sylvain","full_name":"Soliman, Sylvain","last_name":"Soliman"}],"date_published":"2024-03-25T00:00:00Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","ec_funded":1,"title":"Scalable enumeration of trap spaces in boolean networks via answer set programming","citation":{"apa":"Trinh, G., Benhamou, B., Pastva, S., &#38; Soliman, S. (2024). Scalable enumeration of trap spaces in boolean networks via answer set programming. In <i>Proceedings of the 38th AAAI Conference on Artificial Intelligence</i> (Vol. 38, pp. 10714–10722). Association for the Advancement of Artificial Intelligence. <a href=\"https://doi.org/10.1609/aaai.v38i9.28943\">https://doi.org/10.1609/aaai.v38i9.28943</a>","chicago":"Trinh, Giang, Belaid Benhamou, Samuel Pastva, and Sylvain Soliman. “Scalable Enumeration of Trap Spaces in Boolean Networks via Answer Set Programming.” In <i>Proceedings of the 38th AAAI Conference on Artificial Intelligence</i>, 38:10714–22. Association for the Advancement of Artificial Intelligence, 2024. <a href=\"https://doi.org/10.1609/aaai.v38i9.28943\">https://doi.org/10.1609/aaai.v38i9.28943</a>.","ama":"Trinh G, Benhamou B, Pastva S, Soliman S. Scalable enumeration of trap spaces in boolean networks via answer set programming. In: <i>Proceedings of the 38th AAAI Conference on Artificial Intelligence</i>. Vol 38. Association for the Advancement of Artificial Intelligence; 2024:10714-10722. doi:<a href=\"https://doi.org/10.1609/aaai.v38i9.28943\">10.1609/aaai.v38i9.28943</a>","ista":"Trinh G, Benhamou B, Pastva S, Soliman S. 2024. Scalable enumeration of trap spaces in boolean networks via answer set programming. Proceedings of the 38th AAAI Conference on Artificial Intelligence. vol. 38, 10714–10722.","ieee":"G. Trinh, B. Benhamou, S. Pastva, and S. Soliman, “Scalable enumeration of trap spaces in boolean networks via answer set programming,” in <i>Proceedings of the 38th AAAI Conference on Artificial Intelligence</i>, 2024, vol. 38, no. 9, pp. 10714–10722.","mla":"Trinh, Giang, et al. “Scalable Enumeration of Trap Spaces in Boolean Networks via Answer Set Programming.” <i>Proceedings of the 38th AAAI Conference on Artificial Intelligence</i>, vol. 38, no. 9, Association for the Advancement of Artificial Intelligence, 2024, pp. 10714–22, doi:<a href=\"https://doi.org/10.1609/aaai.v38i9.28943\">10.1609/aaai.v38i9.28943</a>.","short":"G. Trinh, B. Benhamou, S. Pastva, S. Soliman, in:, Proceedings of the 38th AAAI Conference on Artificial Intelligence, Association for the Advancement of Artificial Intelligence, 2024, pp. 10714–10722."},"oa":1,"quality_controlled":"1","year":"2024","date_created":"2024-04-14T22:01:02Z","day":"25","page":"10714-10722","volume":38,"type":"conference","article_processing_charge":"No","publisher":"Association for the Advancement of Artificial Intelligence","publication":"Proceedings of the 38th AAAI Conference on Artificial Intelligence","issue":"9","main_file_link":[{"open_access":"1","url":"https://amu.hal.science/hal-04523118/"}],"status":"public","_id":"15321","intvolume":"        38","abstract":[{"text":"Boolean Networks (BNs) are widely used as a modeling formalism in several domains, notably systems biology and computer science. A fundamental problem in BN analysis is the enumeration of trap spaces, which are hypercubes in the state space that cannot be escaped once entered. Several methods have been proposed for enumerating trap spaces, however they often suffer from scalability and efficiency issues, particularly for large and complex models. To our knowledge, the most efficient and recent methods for the trap space enumeration all rely on Answer Set Programming (ASP), which has been widely applied to the analysis of BNs. Motivated by these considerations, our work proposes a new method for enumerating trap spaces in BNs using ASP. We evaluate the method on a mix of 250+ real-world and 400+ randomly generated BNs, showing that it enables analysis of models beyond the capabilities of existing tools (namely pyboolnet, mpbn, trappist, and trapmvn).","lang":"eng"}],"publication_status":"published"},{"date_created":"2024-01-18T07:44:31Z","year":"2023","quality_controlled":"1","corr_author":"1","oa":1,"citation":{"ieee":"D. Zikelic, M. Lechner, T. A. Henzinger, and K. Chatterjee, “Learning control policies for stochastic systems with reach-avoid guarantees,” in <i>Proceedings of the 37th AAAI Conference on Artificial Intelligence</i>, Washington, DC, United States, 2023, vol. 37, no. 10, pp. 11926–11935.","short":"D. Zikelic, M. Lechner, T.A. Henzinger, K. Chatterjee, in:, Proceedings of the 37th AAAI Conference on Artificial Intelligence, Association for the Advancement of Artificial Intelligence, 2023, pp. 11926–11935.","mla":"Zikelic, Dorde, et al. “Learning Control Policies for Stochastic Systems with Reach-Avoid Guarantees.” <i>Proceedings of the 37th AAAI Conference on Artificial Intelligence</i>, vol. 37, no. 10, Association for the Advancement of Artificial Intelligence, 2023, pp. 11926–35, doi:<a href=\"https://doi.org/10.1609/aaai.v37i10.26407\">10.1609/aaai.v37i10.26407</a>.","chicago":"Zikelic, Dorde, Mathias Lechner, Thomas A Henzinger, and Krishnendu Chatterjee. “Learning Control Policies for Stochastic Systems with Reach-Avoid Guarantees.” In <i>Proceedings of the 37th AAAI Conference on Artificial Intelligence</i>, 37:11926–35. Association for the Advancement of Artificial Intelligence, 2023. <a href=\"https://doi.org/10.1609/aaai.v37i10.26407\">https://doi.org/10.1609/aaai.v37i10.26407</a>.","apa":"Zikelic, D., Lechner, M., Henzinger, T. A., &#38; Chatterjee, K. (2023). Learning control policies for stochastic systems with reach-avoid guarantees. In <i>Proceedings of the 37th AAAI Conference on Artificial Intelligence</i> (Vol. 37, pp. 11926–11935). Washington, DC, United States: Association for the Advancement of Artificial Intelligence. <a href=\"https://doi.org/10.1609/aaai.v37i10.26407\">https://doi.org/10.1609/aaai.v37i10.26407</a>","ista":"Zikelic D, Lechner M, Henzinger TA, Chatterjee K. 2023. Learning control policies for stochastic systems with reach-avoid guarantees. Proceedings of the 37th AAAI Conference on Artificial Intelligence. AAAI: Conference on Artificial Intelligence vol. 37, 11926–11935.","ama":"Zikelic D, Lechner M, Henzinger TA, Chatterjee K. Learning control policies for stochastic systems with reach-avoid guarantees. In: <i>Proceedings of the 37th AAAI Conference on Artificial Intelligence</i>. Vol 37. Association for the Advancement of Artificial Intelligence; 2023:11926-11935. doi:<a href=\"https://doi.org/10.1609/aaai.v37i10.26407\">10.1609/aaai.v37i10.26407</a>"},"title":"Learning control policies for stochastic systems with reach-avoid guarantees","ec_funded":1,"related_material":{"record":[{"status":"public","id":"14600","relation":"earlier_version"}]},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","arxiv":1,"publication_status":"published","_id":"14830","abstract":[{"text":"We study the problem of learning controllers for discrete-time non-linear stochastic dynamical systems with formal reach-avoid guarantees. This work presents the first method for providing formal reach-avoid guarantees, which combine and generalize stability and safety guarantees, with a tolerable probability threshold p in [0,1] over the infinite time horizon. Our method leverages advances in machine learning literature and it represents formal certificates as neural networks. In particular, we learn a certificate in the form of a reach-avoid supermartingale (RASM), a novel notion that we introduce in this work. Our RASMs provide reachability and avoidance guarantees by imposing constraints on what can be viewed as a stochastic extension of level sets of Lyapunov functions for deterministic systems. Our approach solves several important problems -- it can be used to learn a control policy from scratch, to verify a reach-avoid specification for a fixed control policy, or to fine-tune a pre-trained policy if it does not satisfy the reach-avoid specification. We validate our approach on 3 stochastic non-linear reinforcement learning tasks.","lang":"eng"}],"intvolume":"        37","status":"public","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2210.05308"}],"issue":"10","publication":"Proceedings of the 37th AAAI Conference on Artificial Intelligence","publisher":"Association for the Advancement of Artificial Intelligence","OA_type":"green","article_processing_charge":"No","type":"conference","volume":37,"day":"26","page":"11926-11935","keyword":["General Medicine"],"doi":"10.1609/aaai.v37i10.26407","publication_identifier":{"eissn":["2374-3468"],"issn":["2159-5399"]},"acknowledgement":"This work was supported in part by the ERC-2020-AdG 101020093, ERC CoG 863818 (FoRM-SMArt) and the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie Grant Agreement No. 665385.","department":[{"_id":"ToHe"},{"_id":"KrCh"}],"date_published":"2023-06-26T00:00:00Z","author":[{"orcid":"0000-0002-4681-1699","first_name":"Dorde","last_name":"Zikelic","id":"294AA7A6-F248-11E8-B48F-1D18A9856A87","full_name":"Zikelic, Dorde"},{"first_name":"Mathias","last_name":"Lechner","full_name":"Lechner, Mathias","id":"3DC22916-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Thomas A","orcid":"0000-0002-2985-7724","full_name":"Henzinger, Thomas A","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","last_name":"Henzinger"},{"first_name":"Krishnendu","orcid":"0000-0002-4561-241X","id":"2E5DCA20-F248-11E8-B48F-1D18A9856A87","last_name":"Chatterjee","full_name":"Chatterjee, Krishnendu"}],"OA_place":"repository","date_updated":"2025-07-03T11:38:12Z","project":[{"_id":"62781420-2b32-11ec-9570-8d9b63373d4d","call_identifier":"H2020","name":"Vigilant Algorithmic Monitoring of Software","grant_number":"101020093"},{"call_identifier":"H2020","_id":"0599E47C-7A3F-11EA-A408-12923DDC885E","name":"Formal Methods for Stochastic Models: Algorithms and Applications","grant_number":"863818"},{"_id":"2564DBCA-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","name":"International IST Doctoral Program","grant_number":"665385"}],"language":[{"iso":"eng"}],"conference":{"end_date":"2023-02-14","start_date":"2023-02-07","location":"Washington, DC, United States","name":"AAAI: Conference on Artificial Intelligence"},"external_id":{"arxiv":["2210.05308"]},"month":"06","scopus_import":"1","oa_version":"Preprint"},{"article_processing_charge":"No","publisher":"Association for the Advancement of Artificial Intelligence","issue":"6","publication":"Proceedings of the AAAI Conference on Artificial Intelligence","day":"28","keyword":["General Medicine"],"page":"6755-6764","volume":36,"type":"journal_article","publication_status":"published","arxiv":1,"main_file_link":[{"url":"https://arxiv.org/abs/2107.08467","open_access":"1"}],"status":"public","intvolume":"        36","_id":"12510","abstract":[{"lang":"eng","text":"We introduce a new statistical verification algorithm that formally quantifies the behavioral robustness of any time-continuous process formulated as a continuous-depth model. Our algorithm solves a set of global optimization (Go) problems over a given time horizon to construct a tight enclosure (Tube) of the set of all process executions starting from a ball of initial states. We call our algorithm GoTube. Through its construction, GoTube ensures that the bounding tube is conservative up to a desired probability and up to a desired tightness.\r\n GoTube is implemented in JAX and optimized to scale to complex continuous-depth neural network models. Compared to advanced reachability analysis tools for time-continuous neural networks, GoTube does not accumulate overapproximation errors between time steps and avoids the infamous wrapping effect inherent in symbolic techniques. We show that GoTube substantially outperforms state-of-the-art verification tools in terms of the size of the initial ball, speed, time-horizon, task completion, and scalability on a large set of experiments.\r\n GoTube is stable and sets the state-of-the-art in terms of its ability to scale to time horizons well beyond what has been previously possible."}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","ec_funded":1,"title":"GoTube: Scalable statistical verification of continuous-depth models","article_type":"original","quality_controlled":"1","year":"2022","date_created":"2023-02-05T17:27:42Z","citation":{"short":"S.A. Gruenbacher, M. Lechner, R. Hasani, D. Rus, T.A. Henzinger, S.A. Smolka, R. Grosu, Proceedings of the AAAI Conference on Artificial Intelligence 36 (2022) 6755–6764.","ieee":"S. A. Gruenbacher <i>et al.</i>, “GoTube: Scalable statistical verification of continuous-depth models,” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, vol. 36, no. 6. Association for the Advancement of Artificial Intelligence, pp. 6755–6764, 2022.","mla":"Gruenbacher, Sophie A., et al. “GoTube: Scalable Statistical Verification of Continuous-Depth Models.” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, vol. 36, no. 6, Association for the Advancement of Artificial Intelligence, 2022, pp. 6755–64, doi:<a href=\"https://doi.org/10.1609/aaai.v36i6.20631\">10.1609/aaai.v36i6.20631</a>.","chicago":"Gruenbacher, Sophie A., Mathias Lechner, Ramin Hasani, Daniela Rus, Thomas A Henzinger, Scott A. Smolka, and Radu Grosu. “GoTube: Scalable Statistical Verification of Continuous-Depth Models.” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. Association for the Advancement of Artificial Intelligence, 2022. <a href=\"https://doi.org/10.1609/aaai.v36i6.20631\">https://doi.org/10.1609/aaai.v36i6.20631</a>.","apa":"Gruenbacher, S. A., Lechner, M., Hasani, R., Rus, D., Henzinger, T. A., Smolka, S. A., &#38; Grosu, R. (2022). GoTube: Scalable statistical verification of continuous-depth models. <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. Association for the Advancement of Artificial Intelligence. <a href=\"https://doi.org/10.1609/aaai.v36i6.20631\">https://doi.org/10.1609/aaai.v36i6.20631</a>","ista":"Gruenbacher SA, Lechner M, Hasani R, Rus D, Henzinger TA, Smolka SA, Grosu R. 2022. GoTube: Scalable statistical verification of continuous-depth models. Proceedings of the AAAI Conference on Artificial Intelligence. 36(6), 6755–6764.","ama":"Gruenbacher SA, Lechner M, Hasani R, et al. GoTube: Scalable statistical verification of continuous-depth models. <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. 2022;36(6):6755-6764. doi:<a href=\"https://doi.org/10.1609/aaai.v36i6.20631\">10.1609/aaai.v36i6.20631</a>"},"oa":1,"external_id":{"arxiv":["2107.08467"]},"language":[{"iso":"eng"}],"oa_version":"Preprint","scopus_import":"1","month":"06","author":[{"full_name":"Gruenbacher, Sophie A.","last_name":"Gruenbacher","first_name":"Sophie A."},{"full_name":"Lechner, Mathias","last_name":"Lechner","id":"3DC22916-F248-11E8-B48F-1D18A9856A87","first_name":"Mathias"},{"full_name":"Hasani, Ramin","last_name":"Hasani","first_name":"Ramin"},{"last_name":"Rus","full_name":"Rus, Daniela","first_name":"Daniela"},{"first_name":"Thomas A","orcid":"0000-0002-2985-7724","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","full_name":"Henzinger, Thomas A","last_name":"Henzinger"},{"first_name":"Scott A.","last_name":"Smolka","full_name":"Smolka, Scott A."},{"full_name":"Grosu, Radu","last_name":"Grosu","first_name":"Radu"}],"date_published":"2022-06-28T00:00:00Z","project":[{"name":"Formal methods for the design and analysis of complex systems","call_identifier":"FWF","_id":"25F42A32-B435-11E9-9278-68D0E5697425","grant_number":"Z211"},{"grant_number":"101020093","_id":"62781420-2b32-11ec-9570-8d9b63373d4d","call_identifier":"H2020","name":"Vigilant Algorithmic Monitoring of Software"}],"date_updated":"2025-04-15T06:26:14Z","department":[{"_id":"ToHe"}],"doi":"10.1609/aaai.v36i6.20631","acknowledgement":"SG is funded by the Austrian Science Fund (FWF) project number W1255-N23. ML and TH are supported in part by FWF under grant Z211-N23 (Wittgenstein Award) and the ERC-2020-AdG 101020093. SS is supported by NSF awards DCL-2040599, CCF-1918225, and CPS-1446832. RH and DR are partially supported by Boeing. RG is partially supported by Horizon-2020 ECSEL Project grant No. 783163 (iDev40).","publication_identifier":{"issn":["2159-5399"],"eissn":["2374-3468"],"isbn":["978577358350"]}},{"publication_identifier":{"eissn":["2374-3468"],"isbn":["1577358767"]},"doi":"10.1609/aaai.v36i9.21222","department":[{"_id":"KrCh"}],"date_updated":"2024-10-09T21:04:32Z","author":[{"orcid":"0000-0002-1712-2165","first_name":"Tobias","last_name":"Meggendorfer","full_name":"Meggendorfer, Tobias","id":"b21b0c15-30a2-11eb-80dc-f13ca25802e1"}],"date_published":"2022-06-28T00:00:00Z","scopus_import":"1","oa_version":"Preprint","month":"06","external_id":{"arxiv":["2203.01640"]},"conference":{"start_date":"2022-02-22","end_date":"2022-03-01","name":"Conference on Artificial Intelligence","location":"Virtual"},"language":[{"iso":"eng"}],"citation":{"apa":"Meggendorfer, T. (2022). Risk-aware stochastic shortest path. In <i>Proceedings of the 36th AAAI Conference on Artificial Intelligence, AAAI 2022</i> (Vol. 36, pp. 9858–9867). Virtual: Association for the Advancement of Artificial Intelligence. <a href=\"https://doi.org/10.1609/aaai.v36i9.21222\">https://doi.org/10.1609/aaai.v36i9.21222</a>","chicago":"Meggendorfer, Tobias. “Risk-Aware Stochastic Shortest Path.” In <i>Proceedings of the 36th AAAI Conference on Artificial Intelligence, AAAI 2022</i>, 36:9858–67. Association for the Advancement of Artificial Intelligence, 2022. <a href=\"https://doi.org/10.1609/aaai.v36i9.21222\">https://doi.org/10.1609/aaai.v36i9.21222</a>.","ama":"Meggendorfer T. Risk-aware stochastic shortest path. In: <i>Proceedings of the 36th AAAI Conference on Artificial Intelligence, AAAI 2022</i>. Vol 36. Association for the Advancement of Artificial Intelligence; 2022:9858-9867. doi:<a href=\"https://doi.org/10.1609/aaai.v36i9.21222\">10.1609/aaai.v36i9.21222</a>","ista":"Meggendorfer T. 2022. Risk-aware stochastic shortest path. Proceedings of the 36th AAAI Conference on Artificial Intelligence, AAAI 2022. Conference on Artificial Intelligence vol. 36, 9858–9867.","mla":"Meggendorfer, Tobias. “Risk-Aware Stochastic Shortest Path.” <i>Proceedings of the 36th AAAI Conference on Artificial Intelligence, AAAI 2022</i>, vol. 36, no. 9, Association for the Advancement of Artificial Intelligence, 2022, pp. 9858–67, doi:<a href=\"https://doi.org/10.1609/aaai.v36i9.21222\">10.1609/aaai.v36i9.21222</a>.","ieee":"T. Meggendorfer, “Risk-aware stochastic shortest path,” in <i>Proceedings of the 36th AAAI Conference on Artificial Intelligence, AAAI 2022</i>, Virtual, 2022, vol. 36, no. 9, pp. 9858–9867.","short":"T. Meggendorfer, in:, Proceedings of the 36th AAAI Conference on Artificial Intelligence, AAAI 2022, Association for the Advancement of Artificial Intelligence, 2022, pp. 9858–9867."},"oa":1,"corr_author":"1","quality_controlled":"1","year":"2022","date_created":"2023-02-19T23:00:56Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","title":"Risk-aware stochastic shortest path","main_file_link":[{"open_access":"1","url":" https://doi.org/10.48550/arXiv.2203.01640"}],"status":"public","_id":"12568","intvolume":"        36","abstract":[{"lang":"eng","text":"We treat the problem of risk-aware control for stochastic shortest path (SSP) on Markov decision processes (MDP). Typically, expectation is considered for SSP, which however is oblivious to the incurred risk. We present an alternative view, instead optimizing conditional value-at-risk (CVaR), an established risk measure. We treat both Markov chains as well as MDP and introduce, through novel insights, two algorithms, based on linear programming and value iteration, respectively. Both algorithms offer precise and provably correct solutions. Evaluation of our prototype implementation shows that risk-aware control is feasible on several moderately sized models."}],"publication_status":"published","arxiv":1,"page":"9858-9867","day":"28","volume":36,"type":"conference","article_processing_charge":"No","publisher":"Association for the Advancement of Artificial Intelligence","issue":"9","publication":"Proceedings of the 36th AAAI Conference on Artificial Intelligence, AAAI 2022"},{"publication":"Proceedings of the AAAI Conference on Artificial Intelligence","issue":"7","publisher":"Association for the Advancement of Artificial Intelligence","article_processing_charge":"No","type":"journal_article","volume":36,"page":"7326-7336","day":"28","keyword":["General Medicine"],"arxiv":1,"publication_status":"published","abstract":[{"text":"We consider the problem of formally verifying almost-sure (a.s.) asymptotic stability in discrete-time nonlinear stochastic control systems. While verifying stability in deterministic control systems is extensively studied in the literature, verifying stability in stochastic control systems is an open problem. The few existing works on this topic either consider only specialized forms of stochasticity or make restrictive assumptions on the system, rendering them inapplicable to learning algorithms with neural network policies. \r\n In this work, we present an approach for general nonlinear stochastic control problems with two novel aspects: (a) instead of classical stochastic extensions of Lyapunov functions, we use ranking supermartingales (RSMs) to certify a.s. asymptotic stability, and (b) we present a method for learning neural network RSMs. \r\n We prove that our approach guarantees a.s. asymptotic stability of the system and\r\n provides the first method to obtain bounds on the stabilization time, which stochastic Lyapunov functions do not.\r\n Finally, we validate our approach experimentally on a set of nonlinear stochastic reinforcement learning environments with neural network policies.","lang":"eng"}],"_id":"12511","intvolume":"        36","status":"public","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2112.09495"}],"ec_funded":1,"title":"Stability verification in stochastic control systems via neural network supermartingales","related_material":{"record":[{"status":"public","relation":"dissertation_contains","id":"14539"}]},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_created":"2023-02-05T17:29:50Z","year":"2022","corr_author":"1","quality_controlled":"1","article_type":"original","oa":1,"citation":{"apa":"Lechner, M., Zikelic, D., Chatterjee, K., &#38; Henzinger, T. A. (2022). Stability verification in stochastic control systems via neural network supermartingales. <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. Association for the Advancement of Artificial Intelligence. <a href=\"https://doi.org/10.1609/aaai.v36i7.20695\">https://doi.org/10.1609/aaai.v36i7.20695</a>","chicago":"Lechner, Mathias, Dorde Zikelic, Krishnendu Chatterjee, and Thomas A Henzinger. “Stability Verification in Stochastic Control Systems via Neural Network Supermartingales.” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. Association for the Advancement of Artificial Intelligence, 2022. <a href=\"https://doi.org/10.1609/aaai.v36i7.20695\">https://doi.org/10.1609/aaai.v36i7.20695</a>.","ama":"Lechner M, Zikelic D, Chatterjee K, Henzinger TA. Stability verification in stochastic control systems via neural network supermartingales. <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. 2022;36(7):7326-7336. doi:<a href=\"https://doi.org/10.1609/aaai.v36i7.20695\">10.1609/aaai.v36i7.20695</a>","ista":"Lechner M, Zikelic D, Chatterjee K, Henzinger TA. 2022. Stability verification in stochastic control systems via neural network supermartingales. Proceedings of the AAAI Conference on Artificial Intelligence. 36(7), 7326–7336.","short":"M. Lechner, D. Zikelic, K. Chatterjee, T.A. Henzinger, Proceedings of the AAAI Conference on Artificial Intelligence 36 (2022) 7326–7336.","ieee":"M. Lechner, D. Zikelic, K. Chatterjee, and T. A. Henzinger, “Stability verification in stochastic control systems via neural network supermartingales,” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, vol. 36, no. 7. Association for the Advancement of Artificial Intelligence, pp. 7326–7336, 2022.","mla":"Lechner, Mathias, et al. “Stability Verification in Stochastic Control Systems via Neural Network Supermartingales.” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, vol. 36, no. 7, Association for the Advancement of Artificial Intelligence, 2022, pp. 7326–36, doi:<a href=\"https://doi.org/10.1609/aaai.v36i7.20695\">10.1609/aaai.v36i7.20695</a>."},"language":[{"iso":"eng"}],"external_id":{"arxiv":["2112.09495"]},"month":"06","scopus_import":"1","oa_version":"Preprint","date_published":"2022-06-28T00:00:00Z","author":[{"id":"3DC22916-F248-11E8-B48F-1D18A9856A87","last_name":"Lechner","full_name":"Lechner, Mathias","first_name":"Mathias"},{"last_name":"Zikelic","id":"294AA7A6-F248-11E8-B48F-1D18A9856A87","full_name":"Zikelic, Dorde","first_name":"Dorde","orcid":"0000-0002-4681-1699"},{"last_name":"Chatterjee","full_name":"Chatterjee, Krishnendu","id":"2E5DCA20-F248-11E8-B48F-1D18A9856A87","first_name":"Krishnendu","orcid":"0000-0002-4561-241X"},{"first_name":"Thomas A","orcid":"0000-0002-2985-7724","full_name":"Henzinger, Thomas A","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","last_name":"Henzinger"}],"date_updated":"2026-04-07T13:27:55Z","project":[{"name":"Vigilant Algorithmic Monitoring of Software","_id":"62781420-2b32-11ec-9570-8d9b63373d4d","call_identifier":"H2020","grant_number":"101020093"},{"grant_number":"863818","call_identifier":"H2020","_id":"0599E47C-7A3F-11EA-A408-12923DDC885E","name":"Formal Methods for Stochastic Models: Algorithms and Applications"},{"name":"International IST Doctoral Program","_id":"2564DBCA-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"665385"}],"department":[{"_id":"ToHe"},{"_id":"KrCh"}],"doi":"10.1609/aaai.v36i7.20695","publication_identifier":{"isbn":["9781577358350"],"eissn":["2374-3468"],"issn":["2159-5399"]},"acknowledgement":"This work was supported in part by the ERC-2020-AdG 101020093, ERC CoG 863818 (FoRM-SMArt) and the European Union’s Horizon 2020 research and innovation programme\r\nunder the Marie Skłodowska-Curie Grant Agreement No. 665385."},{"file_date_updated":"2022-01-26T07:38:08Z","title":"On the verification of neural ODEs with stochastic guarantees","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_created":"2022-01-25T15:47:20Z","year":"2021","corr_author":"1","quality_controlled":"1","oa":1,"citation":{"chicago":"Grunbacher, Sophie, Ramin Hasani, Mathias Lechner, Jacek Cyranka, Scott A Smolka, and Radu Grosu. “On the Verification of Neural ODEs with Stochastic Guarantees.” In <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, 35:11525–35. AAAI Press, 2021.","apa":"Grunbacher, S., Hasani, R., Lechner, M., Cyranka, J., Smolka, S. A., &#38; Grosu, R. (2021). On the verification of neural ODEs with stochastic guarantees. In <i>Proceedings of the AAAI Conference on Artificial Intelligence</i> (Vol. 35, pp. 11525–11535). Virtual: AAAI Press.","ista":"Grunbacher S, Hasani R, Lechner M, Cyranka J, Smolka SA, Grosu R. 2021. On the verification of neural ODEs with stochastic guarantees. Proceedings of the AAAI Conference on Artificial Intelligence. AAAI: Association for the Advancement of Artificial Intelligence, Technical Tracks, vol. 35, 11525–11535.","ama":"Grunbacher S, Hasani R, Lechner M, Cyranka J, Smolka SA, Grosu R. On the verification of neural ODEs with stochastic guarantees. In: <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. Vol 35. AAAI Press; 2021:11525-11535.","short":"S. Grunbacher, R. Hasani, M. Lechner, J. Cyranka, S.A. Smolka, R. Grosu, in:, Proceedings of the AAAI Conference on Artificial Intelligence, AAAI Press, 2021, pp. 11525–11535.","ieee":"S. Grunbacher, R. Hasani, M. Lechner, J. Cyranka, S. A. Smolka, and R. Grosu, “On the verification of neural ODEs with stochastic guarantees,” in <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, Virtual, 2021, vol. 35, no. 13, pp. 11525–11535.","mla":"Grunbacher, Sophie, et al. “On the Verification of Neural ODEs with Stochastic Guarantees.” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, vol. 35, no. 13, AAAI Press, 2021, pp. 11525–35."},"issue":"13","publication":"Proceedings of the AAAI Conference on Artificial Intelligence","publisher":"AAAI Press","article_processing_charge":"No","type":"conference","volume":35,"page":"11525-11535","day":"28","arxiv":1,"publication_status":"published","file":[{"checksum":"468d07041e282a1d46ffdae92f709630","file_size":286906,"file_id":"10680","access_level":"open_access","content_type":"application/pdf","relation":"main_file","file_name":"17372-Article Text-20866-1-2-20210518.pdf","date_created":"2022-01-26T07:38:08Z","success":1,"creator":"mlechner","date_updated":"2022-01-26T07:38:08Z"}],"_id":"10669","intvolume":"        35","abstract":[{"text":"We show that Neural ODEs, an emerging class of timecontinuous neural networks, can be verified by solving a set of global-optimization problems. For this purpose, we introduce Stochastic Lagrangian Reachability (SLR), an\r\nabstraction-based technique for constructing a tight Reachtube (an over-approximation of the set of reachable states\r\nover a given time-horizon), and provide stochastic guarantees in the form of confidence intervals for the Reachtube bounds. SLR inherently avoids the infamous wrapping effect (accumulation of over-approximation errors) by performing local optimization steps to expand safe regions instead of repeatedly forward-propagating them as is done by deterministic reachability methods. To enable fast local optimizations, we introduce a novel forward-mode adjoint sensitivity method to compute gradients without the need for backpropagation. Finally, we establish asymptotic and non-asymptotic convergence rates for SLR.","lang":"eng"}],"status":"public","main_file_link":[{"open_access":"1","url":"https://ojs.aaai.org/index.php/AAAI/article/view/17372"}],"department":[{"_id":"GradSch"},{"_id":"ToHe"}],"publication_identifier":{"eissn":["2374-3468"],"isbn":["978-1-57735-866-4"],"issn":["2159-5399"]},"acknowledgement":"The authors would like to thank the reviewers for their insightful comments. RH and RG were partially supported by\r\nHorizon-2020 ECSEL Project grant No. 783163 (iDev40). RH was partially supported by Boeing. ML was supported\r\nin part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award). SG was funded by FWF\r\nproject W1255-N23. JC was partially supported by NAWA Polish Returns grant PPN/PPO/2018/1/00029. SS was supported by NSF awards DCL-2040599, CCF-1918225, and CPS-1446832.\r\n","language":[{"iso":"eng"}],"conference":{"end_date":"2021-02-09","start_date":"2021-02-02","location":"Virtual","name":"AAAI: Association for the Advancement of Artificial Intelligence"},"external_id":{"arxiv":["2012.08863"]},"month":"05","oa_version":"Published Version","has_accepted_license":"1","date_published":"2021-05-28T00:00:00Z","ddc":["000"],"alternative_title":["Technical Tracks"],"author":[{"full_name":"Grunbacher, Sophie","last_name":"Grunbacher","first_name":"Sophie"},{"full_name":"Hasani, Ramin","last_name":"Hasani","first_name":"Ramin"},{"id":"3DC22916-F248-11E8-B48F-1D18A9856A87","full_name":"Lechner, Mathias","last_name":"Lechner","first_name":"Mathias"},{"first_name":"Jacek","full_name":"Cyranka, Jacek","last_name":"Cyranka"},{"first_name":"Scott A","last_name":"Smolka","full_name":"Smolka, Scott A"},{"full_name":"Grosu, Radu","last_name":"Grosu","first_name":"Radu"}],"project":[{"grant_number":"Z211","name":"Formal methods for the design and analysis of complex systems","call_identifier":"FWF","_id":"25F42A32-B435-11E9-9278-68D0E5697425"}],"date_updated":"2025-04-15T06:25:56Z"},{"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","file_date_updated":"2022-01-26T07:36:03Z","title":"Liquid time-constant networks","citation":{"mla":"Hasani, Ramin, et al. “Liquid Time-Constant Networks.” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, vol. 35, no. 9, AAAI Press, 2021, pp. 7657–66.","ieee":"R. Hasani, M. Lechner, A. Amini, D. Rus, and R. Grosu, “Liquid time-constant networks,” in <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, Virtual, 2021, vol. 35, no. 9, pp. 7657–7666.","short":"R. Hasani, M. Lechner, A. Amini, D. Rus, R. Grosu, in:, Proceedings of the AAAI Conference on Artificial Intelligence, AAAI Press, 2021, pp. 7657–7666.","chicago":"Hasani, Ramin, Mathias Lechner, Alexander Amini, Daniela Rus, and Radu Grosu. “Liquid Time-Constant Networks.” In <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, 35:7657–66. AAAI Press, 2021.","apa":"Hasani, R., Lechner, M., Amini, A., Rus, D., &#38; Grosu, R. (2021). Liquid time-constant networks. In <i>Proceedings of the AAAI Conference on Artificial Intelligence</i> (Vol. 35, pp. 7657–7666). Virtual: AAAI Press.","ista":"Hasani R, Lechner M, Amini A, Rus D, Grosu R. 2021. Liquid time-constant networks. Proceedings of the AAAI Conference on Artificial Intelligence. AAAI: Association for the Advancement of Artificial Intelligence, Technical Tracks, vol. 35, 7657–7666.","ama":"Hasani R, Lechner M, Amini A, Rus D, Grosu R. Liquid time-constant networks. In: <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. Vol 35. AAAI Press; 2021:7657-7666."},"oa":1,"quality_controlled":"1","corr_author":"1","date_created":"2022-01-25T15:48:36Z","year":"2021","volume":35,"page":"7657-7666","day":"28","type":"conference","article_processing_charge":"No","issue":"9","publication":"Proceedings of the AAAI Conference on Artificial Intelligence","publisher":"AAAI Press","main_file_link":[{"open_access":"1","url":"https://ojs.aaai.org/index.php/AAAI/article/view/16936"}],"intvolume":"        35","_id":"10671","abstract":[{"text":"We introduce a new class of time-continuous recurrent neural network models. Instead of declaring a learning system’s dynamics by implicit nonlinearities, we construct networks of linear first-order dynamical systems modulated via nonlinear interlinked gates. The resulting models represent dynamical systems with varying (i.e., liquid) time-constants coupled to their hidden state, with outputs being computed by numerical differential equation solvers. These neural networks exhibit stable and bounded behavior, yield superior expressivity within the family of neural ordinary differential equations, and give rise to improved performance on time-series prediction tasks. To demonstrate these properties, we first take a theoretical approach to find bounds over their dynamics, and compute their expressive power by the trajectory length measure in a latent trajectory space. We then conduct a series of time-series prediction experiments to manifest the approximation capability of Liquid Time-Constant Networks (LTCs) compared to classical and modern RNNs.","lang":"eng"}],"status":"public","publication_status":"published","file":[{"file_size":4302669,"checksum":"0f06995fba06dbcfa7ed965fc66027ff","access_level":"open_access","file_id":"10678","file_name":"16936-Article Text-20430-1-2-20210518 (1).pdf","date_created":"2022-01-26T07:36:03Z","relation":"main_file","content_type":"application/pdf","date_updated":"2022-01-26T07:36:03Z","creator":"mlechner","success":1}],"arxiv":1,"department":[{"_id":"GradSch"},{"_id":"ToHe"}],"acknowledgement":"R.H. and D.R. are partially supported by Boeing. R.H. and R.G. were partially supported by the Horizon-2020 ECSEL\r\nProject grant No. 783163 (iDev40). M.L. was supported in part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award). A.A. is supported by the National Science Foundation (NSF) Graduate Research Fellowship Program. This research work is partially drawn from the PhD dissertation of R.H.","publication_identifier":{"isbn":["978-1-57735-866-4"],"eissn":["2374-3468"],"issn":["2159-5399"]},"oa_version":"Published Version","has_accepted_license":"1","month":"05","external_id":{"arxiv":["2006.04439"]},"language":[{"iso":"eng"}],"conference":{"location":"Virtual","name":"AAAI: Association for the Advancement of Artificial Intelligence","end_date":"2021-02-09","start_date":"2021-02-02"},"date_updated":"2025-04-15T06:25:56Z","project":[{"grant_number":"Z211","name":"Formal methods for the design and analysis of complex systems","call_identifier":"FWF","_id":"25F42A32-B435-11E9-9278-68D0E5697425"}],"author":[{"first_name":"Ramin","last_name":"Hasani","full_name":"Hasani, Ramin"},{"full_name":"Lechner, Mathias","last_name":"Lechner","id":"3DC22916-F248-11E8-B48F-1D18A9856A87","first_name":"Mathias"},{"first_name":"Alexander","last_name":"Amini","full_name":"Amini, Alexander"},{"full_name":"Rus, Daniela","last_name":"Rus","first_name":"Daniela"},{"first_name":"Radu","full_name":"Grosu, Radu","last_name":"Grosu"}],"date_published":"2021-05-28T00:00:00Z","ddc":["000"],"alternative_title":["Technical Tracks"]},{"acknowledgement":"Vyacheslav Kungurtsev was supported by the OP VVV project CZ.02.1.01/0.0/0.0/16 019/0000765 “Research Center for Informatics. Bapi Chatterjee was supported by the European Union’s Horizon 2020 research and innovation programme under the Marie Sklodowska-Curie grant agreement No. 754411 (ISTPlus). Dan Alistarh has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML).","publication_identifier":{"issn":["2159-5399"],"eissn":["2374-3468"],"isbn":["9781713835974"]},"department":[{"_id":"DaAl"}],"date_updated":"2025-04-14T07:43:57Z","project":[{"_id":"260C2330-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","name":"ISTplus - Postdoctoral Fellowships","grant_number":"754411"},{"name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"805223"}],"author":[{"last_name":"Kungurtsev","full_name":"Kungurtsev, Vyacheslav","first_name":"Vyacheslav"},{"full_name":"Egan, Malcolm","last_name":"Egan","first_name":"Malcolm"},{"first_name":"Bapi","orcid":"0000-0002-2742-4028","full_name":"Chatterjee, Bapi","id":"3C41A08A-F248-11E8-B48F-1D18A9856A87","last_name":"Chatterjee"},{"last_name":"Alistarh","full_name":"Alistarh, Dan-Adrian","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X","first_name":"Dan-Adrian"}],"date_published":"2021-05-18T00:00:00Z","oa_version":"Preprint","scopus_import":"1","month":"05","external_id":{"arxiv":["1905.11845"]},"language":[{"iso":"eng"}],"conference":{"name":"AAAI: Conference on Artificial Intelligence","location":"Virtual, Online","start_date":"2021-02-02","end_date":"2021-02-09"},"citation":{"ieee":"V. Kungurtsev, M. Egan, B. Chatterjee, and D.-A. Alistarh, “Asynchronous optimization methods for efficient training of deep neural networks with guarantees,” in <i>35th AAAI Conference on Artificial Intelligence, AAAI 2021</i>, Virtual, Online, 2021, vol. 35, no. 9B, pp. 8209–8216.","mla":"Kungurtsev, Vyacheslav, et al. “Asynchronous Optimization Methods for Efficient Training of Deep Neural Networks with Guarantees.” <i>35th AAAI Conference on Artificial Intelligence, AAAI 2021</i>, vol. 35, no. 9B, AAAI Press, 2021, pp. 8209–16.","short":"V. Kungurtsev, M. Egan, B. Chatterjee, D.-A. Alistarh, in:, 35th AAAI Conference on Artificial Intelligence, AAAI 2021, AAAI Press, 2021, pp. 8209–8216.","ista":"Kungurtsev V, Egan M, Chatterjee B, Alistarh D-A. 2021. Asynchronous optimization methods for efficient training of deep neural networks with guarantees. 35th AAAI Conference on Artificial Intelligence, AAAI 2021. AAAI: Conference on Artificial Intelligence vol. 35, 8209–8216.","ama":"Kungurtsev V, Egan M, Chatterjee B, Alistarh D-A. Asynchronous optimization methods for efficient training of deep neural networks with guarantees. In: <i>35th AAAI Conference on Artificial Intelligence, AAAI 2021</i>. Vol 35. AAAI Press; 2021:8209-8216.","chicago":"Kungurtsev, Vyacheslav, Malcolm Egan, Bapi Chatterjee, and Dan-Adrian Alistarh. “Asynchronous Optimization Methods for Efficient Training of Deep Neural Networks with Guarantees.” In <i>35th AAAI Conference on Artificial Intelligence, AAAI 2021</i>, 35:8209–16. AAAI Press, 2021.","apa":"Kungurtsev, V., Egan, M., Chatterjee, B., &#38; Alistarh, D.-A. (2021). Asynchronous optimization methods for efficient training of deep neural networks with guarantees. In <i>35th AAAI Conference on Artificial Intelligence, AAAI 2021</i> (Vol. 35, pp. 8209–8216). Virtual, Online: AAAI Press."},"oa":1,"quality_controlled":"1","date_created":"2022-06-05T22:01:52Z","year":"2021","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","title":"Asynchronous optimization methods for efficient training of deep neural networks with guarantees","ec_funded":1,"main_file_link":[{"open_access":"1","url":" https://doi.org/10.48550/arXiv.1905.11845"}],"_id":"11436","intvolume":"        35","abstract":[{"lang":"eng","text":"Asynchronous distributed algorithms are a popular way to reduce synchronization costs in large-scale optimization, and in particular for neural network training. However, for nonsmooth and nonconvex objectives, few convergence guarantees exist beyond cases where closed-form proximal operator solutions are available. As training most popular deep neural networks corresponds to optimizing nonsmooth and nonconvex objectives, there is a pressing need for such convergence guarantees. In this paper, we analyze for the first time the convergence of stochastic asynchronous optimization for this general class of objectives. In particular, we focus on stochastic subgradient methods allowing for block variable partitioning, where the shared model is asynchronously updated by concurrent processes. To this end, we use a probabilistic model which captures key features of real asynchronous scheduling between concurrent processes. Under this model, we establish convergence with probability one to an invariant set for stochastic subgradient methods with momentum. From a practical perspective, one issue with the family of algorithms that we consider is that they are not efficiently supported by machine learning frameworks, which mostly focus on distributed data-parallel strategies. To address this, we propose a new implementation strategy for shared-memory based training of deep neural networks for a partitioned but shared model in single- and multi-GPU settings. Based on this implementation, we achieve on average1.2x speed-up in comparison to state-of-the-art training methods for popular image classification tasks, without compromising accuracy."}],"status":"public","publication_status":"published","arxiv":1,"volume":35,"page":"8209-8216","day":"18","type":"conference","article_processing_charge":"No","publication":"35th AAAI Conference on Artificial Intelligence, AAAI 2021","issue":"9B","publisher":"AAAI Press"},{"intvolume":"        35","_id":"10665","abstract":[{"text":"Formal verification of neural networks is an active topic of research, and recent advances have significantly increased the size of the networks that verification tools can handle. However, most methods are designed for verification of an idealized model of the actual network which works over real arithmetic and ignores rounding imprecisions. This idealization is in stark contrast to network quantization, which is a technique that trades numerical precision for computational efficiency and is, therefore, often applied in practice. Neglecting rounding errors of such low-bit quantized neural networks has been shown to lead to wrong conclusions about the network’s correctness. Thus, the desired approach for verifying quantized neural networks would be one that takes these rounding errors\r\ninto account. In this paper, we show that verifying the bitexact implementation of quantized neural networks with bitvector specifications is PSPACE-hard, even though verifying idealized real-valued networks and satisfiability of bit-vector specifications alone are each in NP. Furthermore, we explore several practical heuristics toward closing the complexity gap between idealized and bit-exact verification. In particular, we propose three techniques for making SMT-based verification of quantized neural networks more scalable. Our experiments demonstrate that our proposed methods allow a speedup of up to three orders of magnitude over existing approaches.","lang":"eng"}],"status":"public","main_file_link":[{"open_access":"1","url":"https://ojs.aaai.org/index.php/AAAI/article/view/16496"}],"arxiv":1,"publication_status":"published","file":[{"checksum":"2bc8155b2526a70fba5b7301bc89dbd1","file_size":137235,"file_id":"10684","access_level":"open_access","content_type":"application/pdf","relation":"main_file","file_name":"16496-Article Text-19990-1-2-20210518 (1).pdf","date_created":"2022-01-26T07:41:16Z","creator":"mlechner","success":1,"date_updated":"2022-01-26T07:41:16Z"}],"type":"conference","volume":35,"day":"28","page":"3787-3795","issue":"5A","publication":"Proceedings of the AAAI Conference on Artificial Intelligence","publisher":"AAAI Press","article_processing_charge":"No","oa":1,"citation":{"ama":"Henzinger TA, Lechner M, Zikelic D. Scalable verification of quantized neural networks. In: <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. Vol 35. AAAI Press; 2021:3787-3795.","ista":"Henzinger TA, Lechner M, Zikelic D. 2021. Scalable verification of quantized neural networks. Proceedings of the AAAI Conference on Artificial Intelligence. AAAI: Association for the Advancement of Artificial Intelligence, Technical Tracks, vol. 35, 3787–3795.","apa":"Henzinger, T. A., Lechner, M., &#38; Zikelic, D. (2021). Scalable verification of quantized neural networks. In <i>Proceedings of the AAAI Conference on Artificial Intelligence</i> (Vol. 35, pp. 3787–3795). Virtual: AAAI Press.","chicago":"Henzinger, Thomas A, Mathias Lechner, and Dorde Zikelic. “Scalable Verification of Quantized Neural Networks.” In <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, 35:3787–95. AAAI Press, 2021.","ieee":"T. A. Henzinger, M. Lechner, and D. Zikelic, “Scalable verification of quantized neural networks,” in <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, Virtual, 2021, vol. 35, no. 5A, pp. 3787–3795.","short":"T.A. Henzinger, M. Lechner, D. Zikelic, in:, Proceedings of the AAAI Conference on Artificial Intelligence, AAAI Press, 2021, pp. 3787–3795.","mla":"Henzinger, Thomas A., et al. “Scalable Verification of Quantized Neural Networks.” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, vol. 35, no. 5A, AAAI Press, 2021, pp. 3787–95."},"date_created":"2022-01-25T15:15:02Z","year":"2021","corr_author":"1","quality_controlled":"1","file_date_updated":"2022-01-26T07:41:16Z","ec_funded":1,"title":"Scalable verification of quantized neural networks","related_material":{"record":[{"relation":"dissertation_contains","id":"11362","status":"public"}]},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","project":[{"name":"International IST Doctoral Program","_id":"2564DBCA-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"665385"},{"grant_number":"Z211","name":"Formal methods for the design and analysis of complex systems","_id":"25F42A32-B435-11E9-9278-68D0E5697425","call_identifier":"FWF"},{"_id":"0599E47C-7A3F-11EA-A408-12923DDC885E","call_identifier":"H2020","name":"Formal Methods for Stochastic Models: Algorithms and Applications","grant_number":"863818"}],"date_updated":"2026-04-07T14:21:58Z","date_published":"2021-05-28T00:00:00Z","ddc":["000"],"alternative_title":["Technical Tracks"],"author":[{"last_name":"Henzinger","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","full_name":"Henzinger, Thomas A","first_name":"Thomas A","orcid":"0000-0002-2985-7724"},{"id":"3DC22916-F248-11E8-B48F-1D18A9856A87","full_name":"Lechner, Mathias","last_name":"Lechner","first_name":"Mathias"},{"last_name":"Zikelic","id":"294AA7A6-F248-11E8-B48F-1D18A9856A87","full_name":"Zikelic, Dorde","orcid":"0000-0002-4681-1699","first_name":"Dorde"}],"month":"05","scopus_import":"1","oa_version":"Published Version","has_accepted_license":"1","language":[{"iso":"eng"}],"conference":{"start_date":"2021-02-02","end_date":"2021-02-09","name":"AAAI: Association for the Advancement of Artificial Intelligence","location":"Virtual"},"external_id":{"arxiv":["2012.08185"]},"publication_identifier":{"eissn":["2374-3468"],"isbn":["978-1-57735-866-4"],"issn":["2159-5399"]},"acknowledgement":"This research was supported in part by the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein\r\nAward), ERC CoG 863818 (FoRM-SMArt), and the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie Grant Agreement No. 665385.\r\n","department":[{"_id":"GradSch"},{"_id":"ToHe"}]},{"quality_controlled":"1","date_created":"2023-08-22T14:07:26Z","year":"2020","citation":{"mla":"Locatello, Francesco, et al. “A Commentary on the Unsupervised Learning of Disentangled Representations.” <i>The 34th AAAI Conference on Artificial Intelligence</i>, vol. 34, no. 9, Association for the Advancement of Artificial Intelligence, 2020, pp. 13681–84, doi:<a href=\"https://doi.org/10.1609/aaai.v34i09.7120\">10.1609/aaai.v34i09.7120</a>.","ieee":"F. Locatello <i>et al.</i>, “A commentary on the unsupervised learning of disentangled representations,” in <i>The 34th AAAI Conference on Artificial Intelligence</i>, New York, NY, United States, 2020, vol. 34, no. 9, pp. 13681–13684.","short":"F. Locatello, S. Bauer, M. Lucic, G. Rätsch, S. Gelly, B. Schölkopf, O. Bachem, in:, The 34th AAAI Conference on Artificial Intelligence, Association for the Advancement of Artificial Intelligence, 2020, pp. 13681–13684.","ista":"Locatello F, Bauer S, Lucic M, Rätsch G, Gelly S, Schölkopf B, Bachem O. 2020. A commentary on the unsupervised learning of disentangled representations. The 34th AAAI Conference on Artificial Intelligence. AAAI: Conference on Artificial Intelligence vol. 34, 13681–13684.","ama":"Locatello F, Bauer S, Lucic M, et al. A commentary on the unsupervised learning of disentangled representations. In: <i>The 34th AAAI Conference on Artificial Intelligence</i>. Vol 34. Association for the Advancement of Artificial Intelligence; 2020:13681-13684. doi:<a href=\"https://doi.org/10.1609/aaai.v34i09.7120\">10.1609/aaai.v34i09.7120</a>","chicago":"Locatello, Francesco, Stefan Bauer, Mario Lucic, Gunnar Rätsch, Sylvain Gelly, Bernhard Schölkopf, and Olivier Bachem. “A Commentary on the Unsupervised Learning of Disentangled Representations.” In <i>The 34th AAAI Conference on Artificial Intelligence</i>, 34:13681–84. Association for the Advancement of Artificial Intelligence, 2020. <a href=\"https://doi.org/10.1609/aaai.v34i09.7120\">https://doi.org/10.1609/aaai.v34i09.7120</a>.","apa":"Locatello, F., Bauer, S., Lucic, M., Rätsch, G., Gelly, S., Schölkopf, B., &#38; Bachem, O. (2020). A commentary on the unsupervised learning of disentangled representations. In <i>The 34th AAAI Conference on Artificial Intelligence</i> (Vol. 34, pp. 13681–13684). New York, NY, United States: Association for the Advancement of Artificial Intelligence. <a href=\"https://doi.org/10.1609/aaai.v34i09.7120\">https://doi.org/10.1609/aaai.v34i09.7120</a>"},"oa":1,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","title":"A commentary on the unsupervised learning of disentangled representations","publication_status":"published","arxiv":1,"main_file_link":[{"url":"https://arxiv.org/abs/2007.14184","open_access":"1"}],"abstract":[{"text":"The goal of the unsupervised learning of disentangled representations is to\r\nseparate the independent explanatory factors of variation in the data without\r\naccess to supervision. In this paper, we summarize the results of Locatello et\r\nal., 2019, and focus on their implications for practitioners. We discuss the\r\ntheoretical result showing that the unsupervised learning of disentangled\r\nrepresentations is fundamentally impossible without inductive biases and the\r\npractical challenges it entails. Finally, we comment on our experimental\r\nfindings, highlighting the limitations of state-of-the-art approaches and\r\ndirections for future research.","lang":"eng"}],"_id":"14186","intvolume":"        34","status":"public","article_processing_charge":"No","publication":"The 34th AAAI Conference on Artificial Intelligence","issue":"9","extern":"1","publisher":"Association for the Advancement of Artificial Intelligence","volume":34,"page":"13681-13684","day":"28","type":"conference","doi":"10.1609/aaai.v34i09.7120","publication_identifier":{"eissn":["2374-3468"],"isbn":["9781577358350"]},"department":[{"_id":"FrLo"}],"author":[{"orcid":"0000-0002-4850-0683","first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","last_name":"Locatello","full_name":"Locatello, Francesco"},{"first_name":"Stefan","full_name":"Bauer, Stefan","last_name":"Bauer"},{"full_name":"Lucic, Mario","last_name":"Lucic","first_name":"Mario"},{"first_name":"Gunnar","last_name":"Rätsch","full_name":"Rätsch, Gunnar"},{"first_name":"Sylvain","last_name":"Gelly","full_name":"Gelly, Sylvain"},{"first_name":"Bernhard","full_name":"Schölkopf, Bernhard","last_name":"Schölkopf"},{"first_name":"Olivier","full_name":"Bachem, Olivier","last_name":"Bachem"}],"date_published":"2020-07-28T00:00:00Z","date_updated":"2023-09-12T07:44:48Z","external_id":{"arxiv":["2007.14184"]},"language":[{"iso":"eng"}],"conference":{"name":"AAAI: Conference on Artificial Intelligence","location":"New York, NY, United States","start_date":"2020-02-07","end_date":"2020-02-12"},"scopus_import":"1","oa_version":"Preprint","month":"07"},{"citation":{"ista":"Avni G, Ibsen-Jensen R, Tkadlec J. 2020. All-pay bidding games on graphs. Proceedings of the AAAI Conference on Artificial Intelligence. 34(02), 1798–1805.","ama":"Avni G, Ibsen-Jensen R, Tkadlec J. All-pay bidding games on graphs. <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. 2020;34(02):1798-1805. doi:<a href=\"https://doi.org/10.1609/aaai.v34i02.5546\">10.1609/aaai.v34i02.5546</a>","chicago":"Avni, Guy, Rasmus Ibsen-Jensen, and Josef Tkadlec. “All-Pay Bidding Games on Graphs.” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. Association for the Advancement of Artificial Intelligence, 2020. <a href=\"https://doi.org/10.1609/aaai.v34i02.5546\">https://doi.org/10.1609/aaai.v34i02.5546</a>.","apa":"Avni, G., Ibsen-Jensen, R., &#38; Tkadlec, J. (2020). All-pay bidding games on graphs. <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. New York, NY, United States: Association for the Advancement of Artificial Intelligence. <a href=\"https://doi.org/10.1609/aaai.v34i02.5546\">https://doi.org/10.1609/aaai.v34i02.5546</a>","ieee":"G. Avni, R. Ibsen-Jensen, and J. Tkadlec, “All-pay bidding games on graphs,” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, vol. 34, no. 02. Association for the Advancement of Artificial Intelligence, pp. 1798–1805, 2020.","mla":"Avni, Guy, et al. “All-Pay Bidding Games on Graphs.” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, vol. 34, no. 02, Association for the Advancement of Artificial Intelligence, 2020, pp. 1798–805, doi:<a href=\"https://doi.org/10.1609/aaai.v34i02.5546\">10.1609/aaai.v34i02.5546</a>.","short":"G. Avni, R. Ibsen-Jensen, J. Tkadlec, Proceedings of the AAAI Conference on Artificial Intelligence 34 (2020) 1798–1805."},"oa":1,"quality_controlled":"1","article_type":"original","date_created":"2021-02-25T09:05:18Z","year":"2020","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","title":"All-pay bidding games on graphs","main_file_link":[{"url":"https://arxiv.org/abs/1911.08360","open_access":"1"}],"abstract":[{"lang":"eng","text":"In this paper we introduce and study all-pay bidding games, a class of two player, zero-sum games on graphs. The game proceeds as follows. We place a token on some vertex in the graph and assign budgets to the two players. Each turn, each player submits a sealed legal bid (non-negative and below their remaining budget), which is deducted from their budget and the highest bidder moves the token onto an adjacent vertex. The game ends once a sink is reached, and Player 1 pays Player 2 the outcome that is associated with the sink. The players attempt to maximize their expected outcome. Our games model settings where effort (of no inherent value) needs to be invested in an ongoing and stateful manner. On the negative side, we show that even in simple games on DAGs, optimal strategies may require a distribution over bids with infinite support. A central quantity in bidding games is the ratio of the players budgets. On the positive side, we show a simple FPTAS for DAGs, that, for each budget ratio, outputs an approximation for the optimal strategy for that ratio. We also implement it, show that it performs well, and suggests interesting properties of these games. Then, given an outcome c, we show an algorithm for finding the necessary and sufficient initial ratio for guaranteeing outcome c with probability 1 and a strategy ensuring such. Finally, while the general case has not previously been studied, solving the specific game in which Player 1 wins iff he wins the first two auctions, has been long stated as an open question, which we solve."}],"_id":"9197","intvolume":"        34","status":"public","publication_status":"published","arxiv":1,"volume":34,"page":"1798-1805","day":"03","type":"journal_article","OA_type":"green","article_processing_charge":"No","issue":"02","publication":"Proceedings of the AAAI Conference on Artificial Intelligence","publisher":"Association for the Advancement of Artificial Intelligence","acknowledgement":"This research was supported by the Austrian Science Fund (FWF) under grants S11402-N23 (RiSE/SHiNE), Z211-N23 (Wittgenstein Award), and M 2369-N33 (Meitner fellowship).","publication_identifier":{"eissn":["2374-3468"],"isbn":["9781577358350"],"issn":["2159-5399"]},"doi":"10.1609/aaai.v34i02.5546","department":[{"_id":"ToHe"},{"_id":"KrCh"}],"OA_place":"repository","project":[{"grant_number":"S11402-N23","_id":"25F2ACDE-B435-11E9-9278-68D0E5697425","call_identifier":"FWF","name":"Rigorous Systems Engineering"},{"grant_number":"Z211","call_identifier":"FWF","_id":"25F42A32-B435-11E9-9278-68D0E5697425","name":"Formal methods for the design and analysis of complex systems"},{"name":"Formal Methods meets Algorithmic Game Theory","call_identifier":"FWF","_id":"264B3912-B435-11E9-9278-68D0E5697425","grant_number":"M02369"}],"date_updated":"2025-07-03T11:44:58Z","author":[{"first_name":"Guy","orcid":"0000-0001-5588-8287","full_name":"Avni, Guy","last_name":"Avni","id":"463C8BC2-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Rasmus","orcid":"0000-0003-4783-0389","id":"3B699956-F248-11E8-B48F-1D18A9856A87","full_name":"Ibsen-Jensen, Rasmus","last_name":"Ibsen-Jensen"},{"id":"3F24CCC8-F248-11E8-B48F-1D18A9856A87","last_name":"Tkadlec","full_name":"Tkadlec, Josef","orcid":"0000-0002-1097-9684","first_name":"Josef"}],"date_published":"2020-04-03T00:00:00Z","oa_version":"Preprint","scopus_import":"1","month":"04","external_id":{"arxiv":["1911.08360"]},"language":[{"iso":"eng"}],"conference":{"end_date":"2020-02-12","start_date":"2020-02-07","location":"New York, NY, United States","name":"AAAI: Conference on Artificial Intelligence"}}]
