@article{13234, abstract = {Neural-network classifiers achieve high accuracy when predicting the class of an input that they were trained to identify. Maintaining this accuracy in dynamic environments, where inputs frequently fall outside the fixed set of initially known classes, remains a challenge. We consider the problem of monitoring the classification decisions of neural networks in the presence of novel classes. For this purpose, we generalize our recently proposed abstraction-based monitor from binary output to real-valued quantitative output. This quantitative output enables new applications, two of which we investigate in the paper. As our first application, we introduce an algorithmic framework for active monitoring of a neural network, which allows us to learn new classes dynamically and yet maintain high monitoring performance. As our second application, we present an offline procedure to retrain the neural network to improve the monitor’s detection performance without deteriorating the network’s classification accuracy. Our experimental evaluation demonstrates both the benefits of our active monitoring framework in dynamic scenarios and the effectiveness of the retraining procedure.}, author = {Kueffner, Konstantin and Lukina, Anna and Schilling, Christian and Henzinger, Thomas A}, issn = {1433-2787}, journal = {International Journal on Software Tools for Technology Transfer}, pages = {575--592}, publisher = {Springer Nature}, title = {{Into the unknown: Active monitoring of neural networks (extended version)}}, doi = {10.1007/s10009-023-00711-4}, volume = {25}, year = {2023}, } @inproceedings{10206, abstract = {Neural-network classifiers achieve high accuracy when predicting the class of an input that they were trained to identify. Maintaining this accuracy in dynamic environments, where inputs frequently fall outside the fixed set of initially known classes, remains a challenge. The typical approach is to detect inputs from novel classes and retrain the classifier on an augmented dataset. However, not only the classifier but also the detection mechanism needs to adapt in order to distinguish between newly learned and yet unknown input classes. To address this challenge, we introduce an algorithmic framework for active monitoring of a neural network. A monitor wrapped in our framework operates in parallel with the neural network and interacts with a human user via a series of interpretable labeling queries for incremental adaptation. In addition, we propose an adaptive quantitative monitor to improve precision. An experimental evaluation on a diverse set of benchmarks with varying numbers of classes confirms the benefits of our active monitoring framework in dynamic scenarios.}, author = {Lukina, Anna and Schilling, Christian and Henzinger, Thomas A}, booktitle = {21st International Conference on Runtime Verification}, isbn = {9-783-0308-8493-2}, issn = {1611-3349}, keywords = {monitoring, neural networks, novelty detection}, location = {Virtual}, pages = {42--61}, publisher = {Springer Nature}, title = {{Into the unknown: active monitoring of neural networks}}, doi = {10.1007/978-3-030-88494-9_3}, volume = {12974 }, year = {2021}, } @inproceedings{9040, abstract = {Machine learning and formal methods have complimentary benefits and drawbacks. In this work, we address the controller-design problem with a combination of techniques from both fields. The use of black-box neural networks in deep reinforcement learning (deep RL) poses a challenge for such a combination. Instead of reasoning formally about the output of deep RL, which we call the wizard, we extract from it a decision-tree based model, which we refer to as the magic book. Using the extracted model as an intermediary, we are able to handle problems that are infeasible for either deep RL or formal methods by themselves. First, we suggest, for the first time, a synthesis procedure that is based on a magic book. We synthesize a stand-alone correct-by-design controller that enjoys the favorable performance of RL. Second, we incorporate a magic book in a bounded model checking (BMC) procedure. BMC allows us to find numerous traces of the plant under the control of the wizard, which a user can use to increase the trustworthiness of the wizard and direct further training.}, author = {Alamdari, Par Alizadeh and Avni, Guy and Henzinger, Thomas A and Lukina, Anna}, booktitle = {Proceedings of the 20th Conference on Formal Methods in Computer-Aided Design}, isbn = {9783854480426}, issn = {2708-7824}, location = {Online Conference}, pages = {138--147}, publisher = {TU Wien Academic Press}, title = {{Formal methods with a touch of magic}}, doi = {10.34727/2020/isbn.978-3-85448-042-6_21}, year = {2020}, } @inproceedings{7505, abstract = {Neural networks have demonstrated unmatched performance in a range of classification tasks. Despite numerous efforts of the research community, novelty detection remains one of the significant limitations of neural networks. The ability to identify previously unseen inputs as novel is crucial for our understanding of the decisions made by neural networks. At runtime, inputs not falling into any of the categories learned during training cannot be classified correctly by the neural network. Existing approaches treat the neural network as a black box and try to detect novel inputs based on the confidence of the output predictions. However, neural networks are not trained to reduce their confidence for novel inputs, which limits the effectiveness of these approaches. We propose a framework to monitor a neural network by observing the hidden layers. We employ a common abstraction from program analysis - boxes - to identify novel behaviors in the monitored layers, i.e., inputs that cause behaviors outside the box. For each neuron, the boxes range over the values seen in training. The framework is efficient and flexible to achieve a desired trade-off between raising false warnings and detecting novel inputs. We illustrate the performance and the robustness to variability in the unknown classes on popular image-classification benchmarks.}, author = {Henzinger, Thomas A and Lukina, Anna and Schilling, Christian}, booktitle = {24th European Conference on Artificial Intelligence}, location = {Santiago de Compostela, Spain}, pages = {2433--2440}, publisher = {IOS Press}, title = {{Outside the box: Abstraction-based monitoring of neural networks}}, doi = {10.3233/FAIA200375}, volume = {325}, year = {2020}, }