├── .gitattributes ├── .github └── workflows │ ├── codeql-analysis.yml │ └── python-app.yml ├── .gitignore ├── Benchmarking ├── Benchmark_ErrorStop.py ├── CompleteStochasticBenchmarking.py ├── StochasticAlgComparison.py ├── StochasticBenchmarkingWPrism.py ├── StopWithErorrRate.py ├── all_results.pickle ├── benchmark.py ├── benchmark_alphabet_increase.py ├── benchmark_size_increase.py ├── cex_processing_benchmark.py ├── compare_lstar_and_kv.py ├── deterministic_evaluation.PNG ├── error_benchmark_statistics.py ├── evaluate_l_star_configurations.py ├── fm_benchmark.py ├── fm_plots.py ├── generate_plots.py ├── json_lbt.py ├── papni_sequances.pickle ├── passive_mdp_vs_smm.py ├── passive_vpa_vs_rpni.py ├── prism_eval_props │ ├── bluetooth.props │ ├── emqtt_two_client.props │ ├── first_eval.props │ ├── second_eval.props │ ├── shared_coin_eval.props │ ├── slot_machine_eval.props │ └── tcp_eval.props ├── rpni_papni_memory_footrpint.py ├── stochastic_benchmarking │ ├── Benchmark_ErrorStop.py │ ├── CompleteStochasticBenchmarking.py │ ├── StochasticBenchmarkingWPrism.py │ ├── passive_mdp_vs_smm.py │ ├── plot_error_steps.py │ ├── stochastic_benchmark_random_automata.py │ ├── strategy_comp.py │ └── unamb_error_plot.py ├── stochastic_evaluation.PNG ├── unamb_error_plot.py └── vpa_benchmarking │ └── benchmark_vpa.py ├── DotModels ├── Angluin_Mealy.dot ├── Angluin_Moore.dot ├── Bluetooth │ ├── CC2640R2-no-feature-req.dot │ ├── CC2640R2-no-feature-req_stochastic.dot │ ├── CC2640R2-no-pairing-req.dot │ ├── CC2650.dot │ ├── CYBLE-416045-02.dot │ ├── CYBLE-416045-02_Crash_No_Response_stochastic.dot │ ├── CYW43455.dot │ ├── CYW43455_stochastic.dot │ ├── bluetooth_model.dot │ ├── bluetooth_reduced.dot │ ├── cc2652r1.dot │ ├── convert_to_stochastic.py │ └── nRF52832.dot ├── MDPs │ ├── bluetooth.dot │ ├── faulty_car_alarm.dot │ ├── first_grid.dot │ ├── mqtt.dot │ ├── second_grid.dot │ ├── shared_coin.dot │ ├── slot_machine.dot │ └── tcp.dot ├── MQTT │ ├── ActiveMQ__two_client_will_retain.dot │ ├── VerneMQ__two_client_will_retain.dot │ ├── emqtt__two_client_will_retain.dot │ ├── hbmqtt__two_client_will_retain.dot │ └── mosquitto__two_client_will_retain.dot ├── TCP │ ├── TCP_Linux_Client.dot │ ├── tcp_server_bsd_trans.dot │ ├── tcp_server_ubuntu_trans.dot │ └── tcp_server_windows_trans.dot ├── TLS │ ├── JSSE_1.8.0_25_server_regular.dot │ ├── NSS_3.17.4_server_regular.dot │ ├── OpenSSL_1.0.2_server_regular.dot │ ├── RSA_BSAFE_C_4.0.4_server_regular.dot │ └── miTLS_0.1.3_server_regular.dot ├── arithmetics.dot ├── car_alarm.dot ├── coffee_mealy.dot ├── coffee_moore.dot ├── five_clients_mqtt_abstracted_onfsm.dot ├── mooreModel.dot ├── onfsm_0.dot ├── onfsm_1.dot ├── onfsm_2.dot ├── onfsm_3.dot ├── onfsm_4.dot ├── onfsm_5.dot └── tomitaGrammars │ ├── tomita_1.dot │ ├── tomita_2.dot │ ├── tomita_3.dot │ ├── tomita_4.dot │ ├── tomita_5.dot │ ├── tomita_6.dot │ └── tomita_7.dot ├── Examples.py ├── LICENCE.txt ├── README.md ├── aalpy ├── SULs │ ├── AutomataSUL.py │ ├── PyMethodSUL.py │ ├── RegexSUL.py │ ├── TomitaSUL.py │ └── __init__.py ├── __init__.py ├── automata │ ├── Dfa.py │ ├── MarkovChain.py │ ├── Mdp.py │ ├── MealyMachine.py │ ├── MooreMachine.py │ ├── NonDeterministicMooreMachine.py │ ├── Onfsm.py │ ├── Sevpa.py │ ├── StochasticMealyMachine.py │ ├── Vpa.py │ └── __init__.py ├── base │ ├── Automaton.py │ ├── CacheTree.py │ ├── Oracle.py │ ├── SUL.py │ └── __init__.py ├── learning_algs │ ├── __init__.py │ ├── adaptive │ │ ├── AdaptiveLSharp.py │ │ ├── AdaptiveObservationTree.py │ │ ├── StateMatching.py │ │ └── __init__.py │ ├── deterministic │ │ ├── ADS.py │ │ ├── Apartness.py │ │ ├── ClassificationTree.py │ │ ├── CounterExampleProcessing.py │ │ ├── KV.py │ │ ├── LSharp.py │ │ ├── LStar.py │ │ ├── ObservationTable.py │ │ ├── ObservationTree.py │ │ └── __init__.py │ ├── deterministic_passive │ │ ├── ClassicRPNI.py │ │ ├── GsmRPNI.py │ │ ├── RPNI.py │ │ ├── __init__.py │ │ ├── active_RPNI.py │ │ └── rpni_helper_functions.py │ ├── general_passive │ │ ├── GeneralizedStateMerging.py │ │ ├── GsmAlgorithms.py │ │ ├── GsmNode.py │ │ ├── Instrumentation.py │ │ ├── ScoreFunctionsGSM.py │ │ └── __init__.py │ ├── non_deterministic │ │ ├── AbstractedOnfsmLstar.py │ │ ├── AbstractedOnfsmObservationTable.py │ │ ├── NonDeterministicSULWrapper.py │ │ ├── OnfsmLstar.py │ │ ├── OnfsmObservationTable.py │ │ ├── TraceTree.py │ │ └── __init__.py │ ├── stochastic │ │ ├── DifferenceChecker.py │ │ ├── SamplingBasedObservationTable.py │ │ ├── StochasticCexProcessing.py │ │ ├── StochasticLStar.py │ │ ├── StochasticTeacher.py │ │ └── __init__.py │ └── stochastic_passive │ │ ├── ActiveAleriga.py │ │ ├── Alergia.py │ │ ├── CompatibilityChecker.py │ │ ├── FPTA.py │ │ └── __init__.py ├── oracles │ ├── BreadthFirstExplorationEqOracle.py │ ├── CacheBasedEqOracle.py │ ├── PacOracle.py │ ├── PerfectKnowledgeEqOracle.py │ ├── ProvidedSequencesOracleWrapper.py │ ├── RandomWalkEqOracle.py │ ├── RandomWordEqOracle.py │ ├── StatePrefixEqOracle.py │ ├── TransitionFocusOracle.py │ ├── UserInputEqOracle.py │ ├── WMethodEqOracle.py │ ├── WpMethodEqOracle.py │ ├── __init__.py │ ├── kWayStateCoverageEqOracle.py │ └── kWayTransitionCoverageEqOracle.py ├── paths.py └── utils │ ├── AutomatonGenerators.py │ ├── BenchmarkSULs.py │ ├── BenchmarkSevpaModels.py │ ├── BenchmarkVpaModels.py │ ├── DataHandler.py │ ├── FileHandler.py │ ├── HelperFunctions.py │ ├── ModelChecking.py │ ├── Sampling.py │ └── __init__.py ├── docs ├── README.md ├── _config.yml ├── arithmeticSevpa.PNG ├── google306875680a34d740.html ├── instructions.txt ├── logo_dark.png ├── logo_dark_cent.png ├── logo_light.png ├── logo_light_cent.png ├── mqtt_example.PNG ├── passiveLearning.png └── regex_example_wiki.png ├── jAlergia ├── alergia.jar └── exampleMdpData.txt ├── notebooks ├── Abstracted_Non-Det_FSM.ipynb ├── AngluinExample.ipynb ├── MDP_Example.ipynb ├── MDP_and_SMM_Example.ipynb ├── ONFSM_Example.ipynb ├── RandomMealyExample.ipynb ├── RegexExample.ipynb └── Stochstic_Examples.ipynb ├── setup.py └── tests ├── oracles ├── test_baseOracle.py └── test_kWayTransitionCoverageEqOracle.py ├── test_charSet.py ├── test_deterministic.py ├── test_file_operations.py ├── test_non_deterministic.py ├── test_rwpmethod_oracle.py ├── test_stochastic.py ├── test_wmethod_oracle.py ├── test_wpmethod_oracle.py └── tests_imports.py /.gitattributes: -------------------------------------------------------------------------------- 1 | * linguist-vendored 2 | *.py linguist-vendored=false -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ master ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ master ] 20 | schedule: 21 | - cron: '17 10 * * 6' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'python' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 37 | # Learn more: 38 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 39 | 40 | steps: 41 | - name: Checkout repository 42 | uses: actions/checkout@v2 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v1 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | - name: Autobuild 57 | uses: github/codeql-action/autobuild@v1 58 | 59 | # ℹ️ Command-line programs to run using the OS shell. 60 | # 📚 https://git.io/JvXDl 61 | 62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 63 | # and modify them (or add more) to build your code if your project 64 | # uses a compiled language 65 | 66 | #- run: | 67 | # make bootstrap 68 | # make release 69 | 70 | - name: Perform CodeQL Analysis 71 | uses: github/codeql-action/analyze@v1 72 | -------------------------------------------------------------------------------- /.github/workflows/python-app.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Python application 5 | 6 | on: 7 | push: 8 | branches: [ master ] 9 | pull_request: 10 | branches: [ master ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Set up Python 3.9 20 | uses: actions/setup-python@v2 21 | with: 22 | python-version: 3.9 23 | - name: Install dependencies 24 | run: | 25 | python -m pip install --upgrade pip 26 | pip install flake8 pytest 27 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 28 | - name: Lint with flake8 29 | run: | 30 | # stop the build if there are Python syntax errors or undefined names 31 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 32 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 33 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 34 | 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | main.py 10 | LearnedModel.pdf 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | pip-wheel-metadata/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .nox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | *.py,cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | .vscode/settings.json 134 | 135 | # PyCharm 136 | .idea/ -------------------------------------------------------------------------------- /Benchmarking/StochasticAlgComparison.py: -------------------------------------------------------------------------------- 1 | import random 2 | import time 3 | from statistics import mean 4 | 5 | import aalpy.paths 6 | 7 | from aalpy.SULs import AutomatonSUL 8 | from aalpy.learning_algs import run_stochastic_Lstar, run_Alergia 9 | from aalpy.oracles.RandomWordEqOracle import RandomWordEqOracle 10 | from aalpy.utils import load_automaton_from_file, get_properties_file, get_correct_prop_values 11 | from aalpy.utils import model_check_experiment 12 | from aalpy.automata.StochasticMealyMachine import smm_to_mdp_conversion 13 | 14 | path_to_dir = '../DotModels/MDPs/' 15 | files = ['first_grid.dot', 'second_grid.dot', 'slot_machine.dot', 'mqtt.dot', 'tcp.dot', 'bluetooth.dot'] # 16 | 17 | prop_folder = 'prism_eval_props/' 18 | 19 | aalpy.paths.path_to_prism = "C:/Program Files/prism-4.6/bin/prism.bat" 20 | aalpy.paths.path_to_properties = "prism_eval_props/" 21 | 22 | model_dict = {m.split('.')[0]: load_automaton_from_file(path_to_dir + m, automaton_type='mdp') for m in files} 23 | 24 | for file in files: 25 | print(file) 26 | 27 | exp_name = file.split('.')[0] 28 | 29 | print('--------------------------------------------------') 30 | print('Experiment:', exp_name) 31 | 32 | original_mdp = model_dict[exp_name] 33 | input_alphabet = original_mdp.get_input_alphabet() 34 | 35 | mdp_sul = AutomatonSUL(original_mdp) 36 | 37 | eq_oracle = RandomWordEqOracle(input_alphabet, mdp_sul, num_walks=500, min_walk_len=5, 38 | max_walk_len=16, reset_after_cex=True) 39 | 40 | learned_classic_mdp, data_mdp = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type='mdp', 41 | min_rounds=10, strategy='classic', n_c=20, n_resample=2000, 42 | stopping_range_dict={}, 43 | max_rounds=200, return_data=True, target_unambiguity=0.98, 44 | print_level=1) 45 | 46 | del mdp_sul 47 | del eq_oracle 48 | 49 | mdp_sul = AutomatonSUL(original_mdp) 50 | 51 | eq_oracle = RandomWordEqOracle(input_alphabet, mdp_sul, num_walks=150, min_walk_len=5, 52 | max_walk_len=15, reset_after_cex=True) 53 | 54 | learned_smm, data_smm = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type='smm', 55 | min_rounds=10, strategy='normal', 56 | max_rounds=200, return_data=True, target_unambiguity=0.98, 57 | print_level=1) 58 | 59 | smm_2_mdp = smm_to_mdp_conversion(learned_smm) 60 | 61 | mdp_results, mdp_err = model_check_experiment(get_properties_file(exp_name), 62 | get_correct_prop_values(exp_name), learned_classic_mdp) 63 | smm_results, smm_err = model_check_experiment(get_properties_file(exp_name), 64 | get_correct_prop_values(exp_name), smm_2_mdp) 65 | 66 | num_alergia_samples = max([data_mdp["queries_learning"] + data_mdp["queries_eq_oracle"], 67 | data_smm["queries_learning"] + data_smm["queries_eq_oracle"]]) 68 | 69 | alergia_samples = [] 70 | for _ in range(num_alergia_samples): 71 | sample = [mdp_sul.pre()] 72 | for _ in range(random.randint(10, 30)): 73 | action = random.choice(input_alphabet) 74 | output = mdp_sul.step(action) 75 | sample.append((action, output)) 76 | alergia_samples.append(sample) 77 | 78 | alergia_model = run_Alergia(alergia_samples, automaton_type='mdp') 79 | 80 | alergia_results, alergia_error = model_check_experiment(get_properties_file(exp_name), 81 | get_correct_prop_values(exp_name), alergia_model) 82 | 83 | print('Classic MDP learning', mean(mdp_err.values()), mdp_err) 84 | print('SMM learning', mean(smm_err.values()), smm_err) 85 | print('Alergia learning', mean(alergia_error.values()), alergia_error) 86 | 87 | print('Classic MDP traces', data_mdp["queries_learning"] + data_mdp["queries_eq_oracle"]) 88 | print('SMM learning traces', data_smm["queries_learning"] + data_smm["queries_eq_oracle"]) -------------------------------------------------------------------------------- /Benchmarking/StopWithErorrRate.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import random 3 | import time 4 | from collections import defaultdict 5 | from statistics import mean 6 | 7 | import aalpy.paths 8 | 9 | from aalpy.SULs import AutomatonSUL 10 | from aalpy.learning_algs import run_stochastic_Lstar, run_Alergia 11 | from aalpy.oracles.RandomWordEqOracle import RandomWordEqOracle 12 | from aalpy.utils import load_automaton_from_file, get_properties_file, get_correct_prop_values, model_check_properties 13 | from aalpy.utils import model_check_experiment 14 | from aalpy.automata.StochasticMealyMachine import smm_to_mdp_conversion 15 | 16 | path_to_dir = '../DotModels/MDPs/' 17 | files = ['slot_machine.dot', 'bluetooth.dot'] # 18 | files = ['first_grid.dot', 'second_grid.dot', 'tcp.dot', 'mqtt.dot', 'bluetooth.dot', 'slot_machine.dot'] 19 | 20 | prop_folder = 'prism_eval_props/' 21 | 22 | aalpy.paths.path_to_prism = "C:/Program Files/prism-4.6/bin/prism.bat" 23 | aalpy.paths.path_to_properties = "prism_eval_props/" 24 | 25 | model_dict = {m.split('.')[0]: load_automaton_from_file(path_to_dir + m, automaton_type='mdp') for m in files} 26 | 27 | model_type = ['smm'] 28 | cex_processing = [None, 'longest_prefix', 'rs'] 29 | # model_type.reverse() 30 | 31 | res = defaultdict(list) 32 | 33 | # for file in files: 34 | # for mt in model_type: 35 | # for cp in cex_processing: 36 | # for _ in range(4): 37 | # 38 | # exp_name = file.split('.')[0] 39 | # 40 | # print('--------------------------------------------------') 41 | # print('Experiment:', exp_name, cp) 42 | # 43 | # original_mdp = model_dict[exp_name] 44 | # input_alphabet = original_mdp.get_input_alphabet() 45 | # 46 | # mdp_sul = AutomatonSUL(original_mdp) 47 | # 48 | # eq_oracle = RandomWordEqOracle(input_alphabet, mdp_sul, num_walks=500, min_walk_len=5, 49 | # max_walk_len=15, reset_after_cex=True) 50 | # 51 | # pbs = ((get_properties_file(exp_name), 52 | # get_correct_prop_values(exp_name), 0.02 if exp_name != 'bluetooth' else 0.03)) 53 | # learned_classic_mdp, data_mdp = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type=mt, 54 | # min_rounds=10, 55 | # #property_based_stopping=pbs, 56 | # cex_processing=cp, 57 | # samples_cex_strategy=None, 58 | # return_data=True, target_unambiguity=0.98, 59 | # print_level=1) 60 | # 61 | # res[exp_name].append((cp, data_mdp['queries_learning'] + data_mdp['queries_eq_oracle'])) 62 | 63 | # with open('cex_processing_res.pickle', 'wb') as handle: 64 | # pickle.dump(res, handle, protocol=pickle.HIGHEST_PROTOCOL) 65 | 66 | with open('cex_processing_res.pickle', 'rb') as handle: 67 | res = pickle.load(handle) 68 | 69 | for key, val in res.items(): 70 | print(key) 71 | sorted_by_cp = defaultdict(list) 72 | for cp, data in val: 73 | sorted_by_cp[cp].append(data) 74 | 75 | for cp_method, data in sorted_by_cp.items(): 76 | print(cp_method) 77 | print(mean(data), min(data), max(data)) -------------------------------------------------------------------------------- /Benchmarking/all_results.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/Benchmarking/all_results.pickle -------------------------------------------------------------------------------- /Benchmarking/benchmark.py: -------------------------------------------------------------------------------- 1 | import os 2 | from statistics import mean 3 | 4 | from aalpy.SULs import AutomatonSUL 5 | from aalpy.learning_algs import run_Lstar 6 | from aalpy.oracles import StatePrefixEqOracle 7 | from aalpy.utils import load_automaton_from_file 8 | 9 | dfa_1000_states_20_inputs = '../DotModels/DFA_1000_states_20_inp' 10 | dfa_2000_states_10_inputs = '../DotModels/DFA_2000_states_10_inp' 11 | moore_1000_states_20_inputs = '../DotModels/Moore_1000_states_20_inp_out' 12 | moore_2000_states_10_inputs = '../DotModels/Moore_2000_states_10_inp_out' 13 | run_times = [] 14 | 15 | # change on which folder to perform experiments 16 | exp = dfa_2000_states_10_inputs 17 | 18 | benchmarks = os.listdir(exp) 19 | benchmarks = benchmarks[:10] 20 | 21 | caching_opt = [True, False] 22 | closing_options = ['shortest_first', 'longest_first', 'single'] 23 | suffix_processing = ['all', 'single'] 24 | counter_example_processing = ['rs', 'longest_prefix', None] 25 | e_closedness = ['prefix', 'suffix'] 26 | 27 | for b in benchmarks: 28 | automaton = load_automaton_from_file(f'{exp}/{b}', automaton_type='dfa') 29 | input_al = automaton.get_input_alphabet() 30 | 31 | sul_dfa = AutomatonSUL(automaton) 32 | 33 | state_origin_eq_oracle = StatePrefixEqOracle(input_al, sul_dfa, walks_per_state=5, walk_len=25) 34 | 35 | learned_dfa, data = run_Lstar(input_al, sul_dfa, state_origin_eq_oracle, automaton_type='dfa', 36 | cache_and_non_det_check=False, cex_processing='rs', return_data=True, print_level=0) 37 | run_times.append(data['total_time']) 38 | 39 | print(run_times) 40 | print(mean(run_times)) 41 | -------------------------------------------------------------------------------- /Benchmarking/benchmark_alphabet_increase.py: -------------------------------------------------------------------------------- 1 | from statistics import mean 2 | import csv 3 | 4 | from aalpy.SULs import AutomatonSUL 5 | from aalpy.learning_algs import run_Lstar 6 | from aalpy.oracles import RandomWalkEqOracle 7 | from aalpy.utils import generate_random_dfa, generate_random_mealy_machine, generate_random_moore_machine 8 | 9 | num_states = 1000 10 | alph_size = 5 11 | 12 | repeat = 10 13 | num_increases = 20 14 | 15 | states = ['alph_size', alph_size] 16 | times_dfa = ['dfa_pypy_rs'] 17 | times_mealy = ['mealy_pypy_rs'] 18 | times_moore = ['moore_pypyrs'] 19 | 20 | cex_processing = 'rs' 21 | for i in range(num_increases): 22 | print(i) 23 | total_time_dfa = [] 24 | total_time_mealy = [] 25 | total_time_moore = [] 26 | 27 | for _ in range(repeat): 28 | alphabet = list(range(alph_size)) 29 | 30 | dfa = generate_random_dfa(num_states, alphabet=alphabet, num_accepting_states=num_states // 2) 31 | sul = AutomatonSUL(dfa) 32 | 33 | # eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=5, walk_len=40) 34 | eq_oracle = RandomWalkEqOracle(alphabet, sul, num_steps=10000, reset_prob=0.09) 35 | 36 | _, data = run_Lstar(alphabet, sul, eq_oracle, cex_processing=cex_processing, cache_and_non_det_check=False, 37 | return_data=True, automaton_type='dfa') 38 | 39 | total_time_dfa.append(data['learning_time']) 40 | del dfa 41 | del sul 42 | del eq_oracle 43 | 44 | mealy = generate_random_mealy_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet) 45 | sul_mealy = AutomatonSUL(mealy) 46 | 47 | # eq_oracle = StatePrefixEqOracle(alphabet, sul_mealy, walks_per_state=5, walk_len=40) 48 | eq_oracle = RandomWalkEqOracle(alphabet, sul_mealy, num_steps=10000, reset_prob=0.09) 49 | 50 | _, data = run_Lstar(alphabet, sul_mealy, eq_oracle, cex_processing=cex_processing, 51 | cache_and_non_det_check=False, 52 | return_data=True, automaton_type='mealy') 53 | 54 | total_time_mealy.append(data['learning_time']) 55 | 56 | del mealy 57 | del sul_mealy 58 | del eq_oracle 59 | 60 | moore = generate_random_moore_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet) 61 | moore_sul = AutomatonSUL(moore) 62 | 63 | # eq_oracle = StatePrefixEqOracle(alphabet, moore_sul, walks_per_state=5, walk_len=40) 64 | eq_oracle = RandomWalkEqOracle(alphabet, moore_sul, num_steps=10000, reset_prob=0.09) 65 | 66 | _, data = run_Lstar(alphabet, moore_sul, eq_oracle, cex_processing=cex_processing, 67 | cache_and_non_det_check=False, 68 | return_data=True, automaton_type='moore') 69 | 70 | total_time_moore.append(data['learning_time']) 71 | 72 | alph_size += 5 73 | states.append(alph_size) 74 | 75 | # save data and keep averages 76 | times_dfa.append(round(mean(total_time_dfa), 4)) 77 | times_mealy.append(round(mean(total_time_mealy), 4)) 78 | times_moore.append(round(mean(total_time_moore), 4)) 79 | 80 | with open('increasing_alphabet_experiments.csv', 'w') as f: 81 | wr = csv.writer(f, dialect='excel') 82 | wr.writerow(states) 83 | wr.writerow(times_dfa) 84 | wr.writerow(times_mealy) 85 | wr.writerow(times_moore) 86 | -------------------------------------------------------------------------------- /Benchmarking/benchmark_size_increase.py: -------------------------------------------------------------------------------- 1 | from statistics import mean 2 | import csv 3 | 4 | from aalpy.SULs import AutomatonSUL 5 | from aalpy.learning_algs import run_Lstar 6 | from aalpy.oracles import RandomWalkEqOracle 7 | from aalpy.utils import generate_random_dfa, generate_random_mealy_machine, generate_random_moore_machine 8 | 9 | num_states = 100 10 | alphabet = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] 11 | 12 | repeat = 15 13 | num_increases = 50 14 | 15 | states = ['num_states'] 16 | times_dfa = ['dfa_pypy_rs'] 17 | times_mealy = ['mealy_pypy_rs'] 18 | times_moore = ['moore_pypyrs'] 19 | total_dfa = ['dfa_total'] 20 | total_mealy = ['mealy_total'] 21 | total__moore = ['moore_total'] 22 | 23 | cex_processing = 'rs' 24 | for i in range(num_increases): 25 | print(i) 26 | learning_time_dfa = [] 27 | learning_time_mealy = [] 28 | learning_time_moore = [] 29 | 30 | total_time_dfa = [] 31 | total_time_mealy = [] 32 | total_time_moore = [] 33 | 34 | states.append(num_states) 35 | 36 | for _ in range(repeat): 37 | dfa = generate_random_dfa(num_states, alphabet=alphabet, num_accepting_states=num_states // 2) 38 | sul = AutomatonSUL(dfa) 39 | 40 | # eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=5, walk_len=40) 41 | eq_oracle = RandomWalkEqOracle(alphabet, sul, num_steps=9000, reset_prob=0.09) 42 | 43 | _, data = run_Lstar(alphabet, sul, eq_oracle, cex_processing=cex_processing, cache_and_non_det_check=False, 44 | return_data=True, automaton_type='dfa') 45 | 46 | learning_time_dfa.append(data['learning_time']) 47 | total_time_dfa.append(data['total_time']) 48 | 49 | del sul 50 | del eq_oracle 51 | del dfa 52 | 53 | mealy = generate_random_mealy_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet) 54 | sul_mealy = AutomatonSUL(mealy) 55 | 56 | # eq_oracle = StatePrefixEqOracle(alphabet, sul_mealy, walks_per_state=5, walk_len=40) 57 | eq_oracle = RandomWalkEqOracle(alphabet, sul_mealy, num_steps=9000, reset_prob=0.09) 58 | 59 | _, data = run_Lstar(alphabet, sul_mealy, eq_oracle, cex_processing=cex_processing, 60 | cache_and_non_det_check=False, 61 | return_data=True, automaton_type='mealy') 62 | 63 | learning_time_mealy.append(data['learning_time']) 64 | total_time_mealy.append(data['total_time']) 65 | 66 | del mealy 67 | del sul_mealy 68 | del eq_oracle 69 | 70 | moore = generate_random_moore_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet) 71 | moore_sul = AutomatonSUL(moore) 72 | 73 | # eq_oracle = StatePrefixEqOracle(alphabet, moore_sul, walks_per_state=5, walk_len=40) 74 | eq_oracle = RandomWalkEqOracle(alphabet, moore_sul, num_steps=9000, reset_prob=0.09) 75 | 76 | _, data = run_Lstar(alphabet, moore_sul, eq_oracle, cex_processing=cex_processing, 77 | cache_and_non_det_check=False, 78 | return_data=True, automaton_type='moore') 79 | 80 | learning_time_moore.append(data['learning_time']) 81 | total_time_moore.append(data['total_time']) 82 | 83 | # save data and keep averages 84 | times_dfa.append(round(mean(learning_time_dfa), 4)) 85 | times_mealy.append(round(mean(learning_time_mealy), 4)) 86 | times_moore.append(round(mean(learning_time_moore), 4)) 87 | 88 | total_dfa.append(round(mean(total_time_dfa), 4)) 89 | total_mealy.append(round(mean(total_time_mealy), 4)) 90 | total__moore.append(round(mean(total_time_moore), 4)) 91 | 92 | num_states += 100 93 | 94 | with open('increasing_size_experiments.csv', 'w') as f: 95 | wr = csv.writer(f, dialect='excel') 96 | wr.writerow(states) 97 | wr.writerow(times_dfa) 98 | wr.writerow(times_mealy) 99 | wr.writerow(times_moore) 100 | wr.writerow(total_dfa) 101 | wr.writerow(total_mealy) 102 | wr.writerow(times_moore) 103 | -------------------------------------------------------------------------------- /Benchmarking/cex_processing_benchmark.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | from statistics import mean, stdev 3 | 4 | from aalpy.learning_algs import run_KV, run_Lstar 5 | 6 | from aalpy.SULs import AutomatonSUL 7 | from aalpy.oracles import RandomWalkEqOracle 8 | from aalpy.utils import generate_random_deterministic_automata, bisimilar 9 | 10 | counterexample_processing_strategy = ['rs', 'linear_fwd', 'linear_bwd', 'exponential_fwd', 'exponential_bwd'] 11 | algorithms = ['l_star', 'kv'] 12 | model_sizes = [500] 13 | model_type = ['mealy', 'moore'] 14 | # alphabet_sizes = [(3,2), (3, 5), (3, 10), (5, 2), (5, 5), (5, 20)] 15 | alphabet_sizes = [(5, 3)] 16 | 17 | num_repetitions = 5 18 | 19 | for learning_alg in algorithms: 20 | results = defaultdict(list) 21 | for model in model_type: 22 | for model_size in model_sizes: 23 | for input_size, output_size in alphabet_sizes: 24 | for cex_processing in counterexample_processing_strategy: 25 | 26 | for _ in range(num_repetitions): 27 | random_model = generate_random_deterministic_automata(model, num_states=model_size, 28 | input_alphabet_size=input_size, 29 | output_alphabet_size=output_size) 30 | sul = AutomatonSUL(random_model) 31 | input_al = random_model.get_input_alphabet() 32 | eq_oracle = RandomWalkEqOracle(input_al, sul, num_steps=20000, reset_prob=0.09) 33 | 34 | if learning_alg == 'kv': 35 | learned_model, info = run_KV(input_al, sul, eq_oracle, 36 | automaton_type=model, cex_processing=cex_processing, 37 | return_data=True, print_level=0) 38 | else: 39 | learned_model, info = run_Lstar(input_al, sul, eq_oracle, 40 | automaton_type=model, cex_processing=cex_processing, 41 | return_data=True, print_level=0) 42 | results[cex_processing].append(info['steps_learning']) 43 | 44 | if not bisimilar(learned_model, random_model): 45 | print(learning_alg, cex_processing, 'mismatch') 46 | 47 | print(learning_alg) 48 | for k, v in results.items(): 49 | print(k, mean(v), stdev(v), min(v), max(v)) 50 | -------------------------------------------------------------------------------- /Benchmarking/compare_lstar_and_kv.py: -------------------------------------------------------------------------------- 1 | from aalpy.SULs import AutomatonSUL 2 | from aalpy.learning_algs import run_Lstar, run_KV 3 | from aalpy.oracles import RandomWordEqOracle 4 | from aalpy.utils import generate_random_deterministic_automata 5 | 6 | automata_type = ['dfa', 'mealy', 'moore'] 7 | automata_size = [10, 100, 500, 1000,] 8 | input_sizes = [2, 3] 9 | output_sizes = [2, 3, 5, 10] 10 | 11 | test_models = [] 12 | for model_type in automata_type: 13 | for size in automata_size: 14 | for i in input_sizes: 15 | for o in output_sizes: 16 | random_model = generate_random_deterministic_automata(model_type, size, i, o, num_accepting_states=size//8) 17 | input_al = random_model.get_input_alphabet() 18 | 19 | print('------------------------------------------') 20 | if model_type != 'dfa': 21 | print(f'Type: {model_type}, size: {size}, # inputs: {i}, # outputs: {o}') 22 | else: 23 | print(f'Type: {model_type}, size: {size}, # inputs: {i}, # accepting: {size//8}') 24 | 25 | # Lstar 26 | sul = AutomatonSUL(random_model) 27 | eq_oracle = RandomWordEqOracle(input_al, sul, num_walks=5000, min_walk_len=10, max_walk_len=40) 28 | l_star_model, l_star_info = run_Lstar(input_al, sul, eq_oracle, model_type, print_level=0, return_data=True) 29 | 30 | l_star_steps, l_star_queries = l_star_info['steps_learning'], l_star_info['queries_learning'] 31 | 32 | # KV 33 | sul = AutomatonSUL(random_model) 34 | eq_oracle = RandomWordEqOracle(input_al, sul, num_walks=5000, min_walk_len=10, max_walk_len=40) 35 | kv_model, kv_info = run_KV(input_al, sul, eq_oracle, model_type, print_level=0, return_data=True) 36 | 37 | kv_steps, kv_queries = kv_info['steps_learning'], kv_info['queries_learning'] 38 | 39 | if l_star_model.size != random_model.size: 40 | print('L* did not learn correctly.') 41 | if kv_model.size != random_model.size: 42 | print('KV did not learn correctly.') 43 | 44 | print(f'L* steps: {l_star_steps}') 45 | print(f'KV steps: {kv_steps}') 46 | if kv_steps < l_star_steps: 47 | print(f'KV is {round((l_star_steps / kv_steps) * 100 - 100, 2)}% more step efficient') 48 | else: 49 | print(f'L* is {round((kv_steps / l_star_steps) * 100 - 100, 2)}% more step efficient') 50 | 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /Benchmarking/deterministic_evaluation.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/Benchmarking/deterministic_evaluation.PNG -------------------------------------------------------------------------------- /Benchmarking/error_benchmark_statistics.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import os 3 | from collections import defaultdict 4 | from statistics import mean 5 | 6 | directory = 'FM_mdp_smm_error_based_stop/benchmark_no_cq_bfs_longest_prefix/' 7 | 8 | benchmarks = os.listdir(directory)[:-1] 9 | 10 | benchmarks.remove('exp_14.csv') 11 | 12 | values = dict() 13 | 14 | for file in benchmarks: 15 | with open(directory + file, 'r') as f: 16 | reader = csv.reader(f) 17 | data = list(reader) 18 | 19 | for i in range(0, len(data), 3): 20 | header = data[i] 21 | mdp, smm = data[i + 1], data[i + 2] 22 | 23 | for formalism in [mdp, smm]: 24 | for i, val in enumerate(formalism[1:]): 25 | if formalism[0] not in values.keys(): 26 | values[formalism[0]] = defaultdict(list) 27 | values[formalism[0]][header[i + 1]].append(round(float(val), 2)) 28 | 29 | min_values_dict = dict() 30 | max_values_dict = dict() 31 | avr_values_dict = dict() 32 | 33 | for exp in values: 34 | exp_name = exp[12:] 35 | formalism = 'smm' if 'smm' in exp else 'mdp' 36 | 37 | name = f'{exp_name}_{formalism}' 38 | min_values_dict[name] = dict() 39 | max_values_dict[name] = dict() 40 | avr_values_dict[name] = dict() 41 | 42 | for category, value in values[exp].items(): 43 | min_values_dict[name][category] = min(value) 44 | max_values_dict[name][category] = max(value) 45 | avr_values_dict[name][category] = round(mean(value), 2) 46 | 47 | interesting_fields = [' Learning time', ' Learning Rounds', ' #MQ Learning', ' # Steps Learning'] 48 | 49 | print('ALL ERRORS ARE LESS THAN 2%. THAT WAS USED AS STOPPING CRITERION') 50 | experiments = list(min_values_dict.keys()) 51 | for e_index in range(0, len(experiments), 2): 52 | for i in interesting_fields: 53 | print(f'{experiments[e_index]} vs {experiments[e_index + 1]} = > {i}') 54 | min_eff = round(min_values_dict[experiments[e_index]][i] / min_values_dict[experiments[e_index + 1]][i]*100 , 2) 55 | print(f'Min : {min_values_dict[experiments[e_index]][i]} vs {min_values_dict[experiments[e_index + 1]][i]} | SMM efficiency : {min_eff}') 56 | max_eff = round(max_values_dict[experiments[e_index]][i] / max_values_dict[experiments[e_index + 1]][i]*100 , 2) 57 | print(f'Max : {max_values_dict[experiments[e_index]][i]} vs {max_values_dict[experiments[e_index + 1]][i]} | SMM efficiency : {max_eff}') 58 | avr_eff = round(avr_values_dict[experiments[e_index]][i] / avr_values_dict[experiments[e_index + 1]][i]*100 , 2) 59 | print(f'Avr : {avr_values_dict[experiments[e_index]][i]} vs {avr_values_dict[experiments[e_index + 1]][i]}| SMM efficiency : {avr_eff}') 60 | 61 | print('-------------------------------------------------') 62 | 63 | with open('error_benchmark.csv', 'w',newline='') as file: 64 | writer = csv.writer(file) 65 | 66 | experiments = list(min_values_dict.keys()) 67 | for e_index in range(0, len(experiments), 2): 68 | writer.writerow([experiments[e_index][:-4], 'mdp', 'smm', 'smm compared to mdp efficiency %']) 69 | for i in interesting_fields: 70 | print(f'{experiments[e_index]} vs {experiments[e_index + 1]} = > {i}') 71 | min_eff = round(min_values_dict[experiments[e_index]][i] / min_values_dict[experiments[e_index + 1]][i]*100 , 2) 72 | writer.writerow([i + '_min', min_values_dict[experiments[e_index]][i], min_values_dict[experiments[e_index + 1]][i], min_eff]) 73 | max_eff = round(max_values_dict[experiments[e_index]][i] / max_values_dict[experiments[e_index + 1]][i]*100 , 2) 74 | writer.writerow([i + '_max', max_values_dict[experiments[e_index]][i], max_values_dict[experiments[e_index + 1]][i], max_eff]) 75 | avr_eff = round(avr_values_dict[experiments[e_index]][i] / avr_values_dict[experiments[e_index + 1]][i]*100 , 2) 76 | writer.writerow([i + '_avr', avr_values_dict[experiments[e_index]][i], avr_values_dict[experiments[e_index + 1]][i], avr_eff]) 77 | writer.writerow([]) 78 | 79 | print('-------------------------------------------------') 80 | 81 | -------------------------------------------------------------------------------- /Benchmarking/fm_plots.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | def plot_error(): 4 | import matplotlib.pyplot as plt 5 | import matplotlib 6 | 7 | matplotlib.use("pgf") 8 | matplotlib.rcParams.update({ 9 | "pgf.texsystem": "pdflatex", 10 | 'font.family': 'serif', 11 | 'text.usetex': True, 12 | 'pgf.rcfonts': False, 13 | }) 14 | 15 | # MDP then SMM 16 | learning_time_data = [ 17 | [68.19, 140.31, 154.35, 116.8], 18 | [27.5, 98.31, 30.87, 68] 19 | ] 20 | 21 | num_mq_data = [ 22 | [81803.23, 153758.15, 560705.92, 248552.62, ], 23 | [36937.54, 91309.08, 51791.54, 92607] 24 | ] 25 | 26 | import numpy as np 27 | 28 | N = 4 29 | 30 | ind = np.arange(N) # the x locations for the groups 31 | width = 0.25 # the width of the bars 32 | 33 | # fig = plt.figure() 34 | fig, (ax_time, ax_mq) = plt.subplots(1, 2, figsize=(10, 3)) 35 | 36 | ax_time.bar(ind, learning_time_data[0], width, label='MDP') 37 | ax_time.bar(ind + width, learning_time_data[1], width, label='SMM') 38 | 39 | # add some 40 | ax_time.set_ylabel('Learning Time (s)') 41 | 42 | ax_time.set_xticks(ind + width / 2) 43 | ax_time.set_xticklabels(('35 State\nGridworld', '72 State\nGridworld', 'MQTT', 'TCP',)) 44 | 45 | ax_time.grid(axis='y') 46 | ax_time.legend(loc='upper left') 47 | 48 | ax_mq.bar(ind, num_mq_data[0], width, label='MDP') 49 | ax_mq.bar(ind + width, num_mq_data[1], width, label='SMM') 50 | 51 | # add some 52 | ax_mq.set_ylabel('\# Membership Queries') 53 | ax_mq.ticklabel_format(axis='y', style='sci', scilimits=(1, 4)) 54 | ax_mq.set_xticks(ind + width / 2) 55 | ax_mq.set_xticklabels(('35 State\nGridworld', '72 State\nGridworld', 'MQTT', 'TCP',)) 56 | ax_mq.legend(loc='upper left') 57 | 58 | 59 | ax_mq.grid(axis='y') 60 | fig.tight_layout() 61 | 62 | # plt.show() 63 | 64 | plt.savefig("error_bench.pgf", bbox_inches='tight') 65 | 66 | import tikzplotlib 67 | 68 | tikzplotlib.save("error_bench.tex") 69 | 70 | def plot_benchmarks(): 71 | import matplotlib.pyplot as plt 72 | import matplotlib 73 | 74 | matplotlib.use("pgf") 75 | matplotlib.rcParams.update({ 76 | "pgf.texsystem": "pdflatex", 77 | 'font.family': 'serif', 78 | 'text.usetex': True, 79 | 'pgf.rcfonts': False, 80 | }) 81 | 82 | # MDP then SMM 83 | 84 | num_mq_data = [ 85 | [81803.23, 153758.15, 560705.92, 248552.62, ], 86 | [36937.54, 91309.08, 51791.54, 92607] 87 | ] 88 | 89 | # TODO 90 | avr_cum_err = [ 91 | 92 | ] 93 | 94 | import numpy as np 95 | 96 | N = 4 97 | 98 | ind = np.arange(N) # the x locations for the groups 99 | width = 0.25 # the width of the bars 100 | 101 | # fig = plt.figure() 102 | fig, (ax_time, ax_mq) = plt.subplots(1, 2, figsize=(10, 3)) 103 | 104 | ax_time.bar(ind, avr_cum_err[0], width, label='MDP') 105 | ax_time.bar(ind + width, avr_cum_err[1], width, label='SMM') 106 | 107 | # add some 108 | ax_time.set_ylabel('Learning Time (s)') 109 | 110 | ax_time.set_xticks(ind + width / 2) 111 | ax_time.set_xticklabels(('35 State\nGridworld', '72 State\nGridworld', 'MQTT', 'TCP',)) 112 | 113 | ax_time.grid(axis='y') 114 | ax_time.legend(loc='upper left') 115 | 116 | ax_mq.bar(ind, num_mq_data[0], width, label='MDP') 117 | ax_mq.bar(ind + width, num_mq_data[1], width, label='SMM') 118 | 119 | # add some 120 | ax_mq.set_ylabel('\# Membership Queries') 121 | ax_mq.ticklabel_format(axis='y', style='sci', scilimits=(1, 4)) 122 | ax_mq.set_xticks(ind + width / 2) 123 | ax_mq.set_xticklabels(('35 State\nGridworld', '72 State\nGridworld', 'MQTT', 'TCP',)) 124 | 125 | ax_mq.legend(loc='upper left') 126 | 127 | ax_mq.grid(axis='y') 128 | fig.tight_layout() 129 | 130 | # plt.show() 131 | 132 | plt.savefig("benchmarking.pgf", bbox_inches='tight') 133 | 134 | import tikzplotlib 135 | 136 | tikzplotlib.save("benchmarking.tex") 137 | 138 | if __name__ == '__main__': 139 | plot_error() -------------------------------------------------------------------------------- /Benchmarking/papni_sequances.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/Benchmarking/papni_sequances.pickle -------------------------------------------------------------------------------- /Benchmarking/passive_mdp_vs_smm.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import aalpy.paths 4 | from aalpy.SULs import AutomatonSUL 5 | from aalpy.automata.StochasticMealyMachine import smm_to_mdp_conversion 6 | from aalpy.learning_algs import run_Alergia 7 | from aalpy.utils import load_automaton_from_file, get_correct_prop_values, get_properties_file 8 | from aalpy.utils import model_check_experiment 9 | 10 | path_to_dir = '../DotModels/MDPs/' 11 | files = ['first_grid.dot', 'second_grid.dot', 12 | 'slot_machine.dot', 'mqtt.dot', 'tcp.dot'] # 'shared_coin.dot' 13 | 14 | aalpy.paths.path_to_prism = "C:/Program Files/prism-4.7/bin/prism.bat" 15 | aalpy.paths.path_to_properties = "prism_eval_props/" 16 | 17 | 18 | def writeSamplesToFile(samples, path="alergiaSamples.txt"): 19 | isSMM = False 20 | if isinstance(samples[0][0], tuple): 21 | isSMM = True 22 | with open(path, 'a') as f: 23 | for sample in samples: 24 | s = "" if isSMM else f'{str(sample.pop(0))}' 25 | for i, o in sample: 26 | s += f',{i},{o}' 27 | f.write(s + '\n') 28 | 29 | f.close() 30 | # samples.clear() 31 | 32 | 33 | def deleteSampleFile(path="alergiaSamples.txt"): 34 | import os 35 | if os.path.exists(path): 36 | os.remove(path) 37 | 38 | 39 | num_traces = 100000 40 | 41 | for file in ['first_grid.dot']: 42 | 43 | exp_name = file.split('.')[0] 44 | 45 | original_mdp = load_automaton_from_file(path_to_dir + file, automaton_type='mdp') 46 | input_alphabet = original_mdp.get_input_alphabet() 47 | 48 | mdp_sul = AutomatonSUL(original_mdp) 49 | 50 | for _ in range(1): 51 | 52 | data = [] 53 | for _ in range(num_traces): 54 | sample = [mdp_sul.pre()] 55 | for _ in range(random.randint(10, 50)): 56 | i = random.choice(input_alphabet) 57 | o = mdp_sul.step(i) 58 | sample.append((i, o)) 59 | data.append(sample) 60 | mdp_sul.post() 61 | 62 | learned_mdp = run_Alergia(data, automaton_type='mdp') 63 | 64 | for s in data: 65 | s.pop(0) 66 | 67 | learned_smm = run_Alergia(data, automaton_type='smm') 68 | 69 | smm_2_mdp = smm_to_mdp_conversion(learned_smm) 70 | 71 | mdp_results, mdp_err = model_check_experiment(get_properties_file(exp_name), 72 | get_correct_prop_values(exp_name), learned_mdp) 73 | smm_results, smm_err = model_check_experiment(get_properties_file(exp_name), 74 | get_correct_prop_values(exp_name), smm_2_mdp) 75 | 76 | print(learned_mdp.size, learned_smm.size, smm_2_mdp.size) 77 | print(f'-------{exp_name}---------') 78 | print(f'MDP Error: {mdp_err}') 79 | print(f'SMM Error: {smm_err}') 80 | smm_diff = {} 81 | for key, val in mdp_err.items(): 82 | if key not in smm_err.keys() or smm_err[key] == 0: 83 | continue 84 | smm_diff[key] = round(smm_err[key] - val, 2) 85 | print(f'SMM differance: {smm_diff}') 86 | -------------------------------------------------------------------------------- /Benchmarking/prism_eval_props/bluetooth.props: -------------------------------------------------------------------------------- 1 | Pmax=? [ F<5 ("crash") ] 2 | 3 | Pmax=? [ F<8 ("crash") ] 4 | 5 | Pmax=? [ F<11 ("crash") ] 6 | 7 | Pmax=? [ F<14 ("crash") ] 8 | 9 | Pmax=? [ F<17 ("crash") ] 10 | 11 | Pmax=? [ F<20 ("crash") ] 12 | 13 | Pmax=? [ F<3 ("no_response") ] 14 | 15 | Pmax=? [ F<5 ("no_response") ] 16 | 17 | Pmax=? [ F<8 ("no_response") ] 18 | 19 | Pmax=? [ F<11 ("no_response") ] 20 | 21 | Pmax=? [ F<14 ("no_response") ] 22 | 23 | Pmax=? [ F<17 ("no_response") ] 24 | 25 | Pmax=? [ F<20 ("no_response") ] 26 | 27 | -------------------------------------------------------------------------------- /Benchmarking/prism_eval_props/emqtt_two_client.props: -------------------------------------------------------------------------------- 1 | Pmax=? [ F<5 ("c2_Pub_c2_my_topic_bye") ] 2 | 3 | Pmax=? [ F<5 ("c1_crash") ] 4 | 5 | Pmax=? [ F<11 ("c1_crash") ] 6 | 7 | Pmax=? [ F<17 ("c1_crash") ] 8 | 9 | Pmax=? [(!("c2_crash")) U<12 ("c2_Pub_c2_my_topic_messageQos1") ] 10 | 11 | -------------------------------------------------------------------------------- /Benchmarking/prism_eval_props/first_eval.props: -------------------------------------------------------------------------------- 1 | Pmax=?[F<12("goal")] 2 | 3 | Pmax=? [ !("grass") U<=14 ("goal") ] 4 | 5 | Pmax=? [ !("sand") U<=16 ("goal") ] 6 | 7 | -------------------------------------------------------------------------------- /Benchmarking/prism_eval_props/second_eval.props: -------------------------------------------------------------------------------- 1 | Pmax=?[F<15("goal")] 2 | 3 | Pmax=?[F<13("goal")] 4 | 5 | Pmax=? [ !("mud") U<=18 ("goal") ] 6 | 7 | Pmax=? [ !("sand") U<=20 ("goal") ] 8 | 9 | -------------------------------------------------------------------------------- /Benchmarking/prism_eval_props/shared_coin_eval.props: -------------------------------------------------------------------------------- 1 | Pmax=?[F ("finished" & "c1_heads" & "c2_tails")] 2 | 3 | Pmax=?[F ("finished" & "c1_tails" & "c2_tails")] 4 | 5 | Pmax=?[!"five" U "finished"] 6 | 7 | Pmax=?[!"four" U "finished"] 8 | 9 | Pmax=?[F<40 ("finished" & "c1_heads" & "c2_tails")] 10 | 11 | Pmax=?[F<40 ("finished" & "c1_tails" & "c2_tails")] 12 | 13 | Pmax=?[!"five" U<40 "finished"] 14 | 15 | Pmax=?[!"four" U<40 "finished"] 16 | 17 | -------------------------------------------------------------------------------- /Benchmarking/prism_eval_props/slot_machine_eval.props: -------------------------------------------------------------------------------- 1 | Pmax=? [ F ("Pr10") ] 2 | 3 | Pmax=? [ F ("Pr2") ] 4 | 5 | Pmax=? [ F ("Pr0") ] 6 | 7 | Pmax=? [ X (X ("r220")) ] 8 | 9 | Pmax=? [ X(X (X ("r122"))) ] 10 | 11 | Pmax=? [ !(F<10 ("end")) ] 12 | 13 | Pmax=? [ X (X (X ("r111")))&(F ("Pr0")) ] 14 | 15 | -------------------------------------------------------------------------------- /Benchmarking/prism_eval_props/tcp_eval.props: -------------------------------------------------------------------------------- 1 | 2 | Pmax=? [ F<5 ("crash") ] 3 | 4 | Pmax=? [ F<11 ("crash") ] 5 | 6 | Pmax=? [ F<17 ("crash") ] 7 | 8 | Pmax=? [ F<23 ("crash") ] 9 | -------------------------------------------------------------------------------- /Benchmarking/rpni_papni_memory_footrpint.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from random import randint, random 3 | import matplotlib.pyplot as plt 4 | 5 | # Data 6 | import tikzplotlib 7 | 8 | from Benchmarking.visualize_papni_rpni import tikzplotlib_fix_ncols 9 | from aalpy import load_automaton_from_file, run_PAPNI, run_RPNI 10 | from aalpy.utils import generate_input_output_data_from_vpa 11 | from aalpy.utils.BenchmarkVpaModels import get_all_VPAs 12 | from random import seed 13 | 14 | # def get_total_size(obj, seen=None): 15 | # """Recursively find the size of an object and all its referenced objects.""" 16 | # if seen is None: 17 | # seen = set() 18 | # 19 | # obj_id = id(obj) 20 | # if obj_id in seen: # Avoid processing the same object multiple times 21 | # return 0 22 | # 23 | # seen.add(obj_id) 24 | # size = sys.getsizeof(obj) 25 | # 26 | # if isinstance(obj, dict): 27 | # size += sum(get_total_size(k, seen) + get_total_size(v, seen) for k, v in obj.items()) 28 | # elif isinstance(obj, (list, tuple, set, frozenset)): 29 | # size += sum(get_total_size(i, seen) for i in obj) 30 | # elif hasattr(obj, '__dict__'): # For objects with __dict__ attribute 31 | # size += get_total_size(vars(obj), seen) 32 | # elif hasattr(obj, '__slots__'): # For objects with __slots__ 33 | # size += sum(get_total_size(getattr(obj, s), seen) for s in obj.__slots__ if hasattr(obj, s)) 34 | # 35 | # return size 36 | # 37 | # 38 | # def size_in_mb(obj): 39 | # size_bytes = get_total_size(obj) 40 | # return size_bytes / (1024 ** 2) 41 | # 42 | # 43 | # #gt = load_automaton_from_file('../DotModels/arithmetics.dot', 'vpa') 44 | # gt = get_all_VPAs()[9] 45 | # vpa_alphabet = gt.get_input_alphabet() 46 | # 47 | # rpni_size = [] 48 | # papni_size = [] 49 | # for size in range(5000, 50001, 5000): 50 | # print(size) 51 | # data = generate_input_output_data_from_vpa(gt, 52 | # num_sequances=size, 53 | # max_seq_len=randint(6, 30)) 54 | # 55 | # y = run_RPNI(data, automaton_type='dfa', print_info=False) 56 | # x = run_PAPNI(data, vpa_alphabet, print_info=False) 57 | # 58 | # rpni_size.append(y) 59 | # papni_size.append(x) 60 | # 61 | # print(rpni_size) 62 | # print(papni_size) 63 | 64 | 65 | # runtime (pta, alg) papni, rpni 66 | rpni_runtime = [(0.02, 0.04), (0.06, 0.11), (0.11, 0.14), (0.11, 0.22), (0.14, 0.24), (0.12, 0.26), (0.15, 0.31), (0.26, 0.28), (0.21, 0.4), (0.25, 0.43)] 67 | papni_runtime = [(0.0, 0.01), (0.01, 0.04), (0.02, 0.04), (0.02, 0.05), (0.02, 0.06), (0.04, 0.07), (0.02, 0.06), (0.03, 0.06), (0.06, 0.1), (0.03, 0.09)] 68 | 69 | # size rpni papni in Mb 70 | rpni_size = [1.8873348236083984, 3.9477672576904297, 5.673147201538086, 7.70704460144043, 9.281957626342773, 12.503767013549805, 14.622617721557617, 15.591878890991211, 18.589590072631836, 20.439626693725586] 71 | papni_size = [0.0034532546997070312, 0.0034532546997070312, 0.0034532546997070312, 0.0034532546997070312, 0.0034532546997070312, 0.0034532546997070312, 0.0034532546997070312, 0.0034532546997070312, 0.0034532546997070312, 0.0034532546997070312] 72 | 73 | papni_size = [papni_size[0]] 74 | for i in range(len(rpni_runtime) - 1): 75 | papni_size.append(papni_size[-1] * (rpni_size[i+1]/rpni_size[i] )) 76 | 77 | print(papni_size) 78 | 79 | # Create subplots 80 | fig, axes = plt.subplots(1, 2, figsize=(12, 5)) 81 | 82 | ticks = range(5000, 50001, 5000) 83 | 84 | 85 | # Runtime plot 86 | axes[0].plot(ticks, [x + y for x,y in rpni_runtime], label="RPNI", marker='o') 87 | axes[0].plot(ticks, [x + y for x,y in papni_runtime], label="PAPNI", marker='s') 88 | axes[0].set_xlabel("Input Size") 89 | axes[0].set_ylabel("Runtime (s)") 90 | axes[0].set_title("Runtime Comparison") 91 | axes[0].legend() 92 | axes[0].grid(True) 93 | 94 | # Size plot 95 | axes[1].plot(ticks, rpni_size, label="RPNI", marker='o') 96 | axes[1].plot(ticks, papni_size, label="PAPNI", marker='s') 97 | axes[1].set_xlabel("Input Size") 98 | axes[1].set_ylabel("Size (MB)") 99 | axes[1].set_title("Size Comparison") 100 | axes[1].legend() 101 | axes[1].grid(True) 102 | 103 | # Layout adjustment 104 | plt.tight_layout() 105 | # plt.show() 106 | 107 | tikzplotlib_fix_ncols(fig) 108 | # plt.show() 109 | tikzplotlib.save("runtime_and_size_comparison.tex") 110 | -------------------------------------------------------------------------------- /Benchmarking/stochastic_benchmarking/passive_mdp_vs_smm.py: -------------------------------------------------------------------------------- 1 | import random 2 | import os 3 | import aalpy.paths 4 | from aalpy.SULs import AutomatonSUL 5 | from aalpy.automata.StochasticMealyMachine import smm_to_mdp_conversion 6 | from aalpy.learning_algs import run_Alergia, run_JAlergia 7 | from aalpy.utils import load_automaton_from_file, get_correct_prop_values, get_properties_file, visualize_automaton 8 | from aalpy.utils import model_check_experiment 9 | 10 | path_to_dir = '../DotModels/MDPs/' 11 | files = ['first_grid.dot', 'second_grid.dot', 12 | 'slot_machine.dot', 'mqtt.dot', 'tcp.dot'] # 'shared_coin.dot' 13 | 14 | aalpy.paths.path_to_prism = "C:/Program Files/prism-4.7/bin/prism.bat" 15 | aalpy.paths.path_to_properties = "../prism_eval_props/" 16 | 17 | 18 | def writeSamplesToFile(samples, path="alergiaSamples.txt"): 19 | isSMM = False 20 | if isinstance(samples[0][0], tuple): 21 | isSMM = True 22 | with open(path, 'a') as f: 23 | for sample in samples: 24 | s = "" if isSMM else f'{str(sample.pop(0))}' 25 | for i, o in sample: 26 | s += f',{i},{o}' 27 | f.write(s + '\n') 28 | 29 | f.close() 30 | # samples.clear() 31 | 32 | 33 | def deleteSampleFile(path="alergiaSamples.txt"): 34 | import os 35 | if os.path.exists(path): 36 | os.remove(path) 37 | 38 | 39 | num_traces = 100000 40 | 41 | for file in ['first_grid.dot']: 42 | 43 | exp_name = file.split('.')[0] 44 | 45 | original_mdp = load_automaton_from_file(path_to_dir + file, automaton_type='mdp') 46 | input_alphabet = original_mdp.get_input_alphabet() 47 | 48 | mdp_sul = AutomatonSUL(original_mdp) 49 | 50 | for _ in range(1): 51 | 52 | data = [] 53 | for _ in range(num_traces): 54 | sample = [mdp_sul.pre()] 55 | for _ in range(random.randint(10, 50)): 56 | i = random.choice(input_alphabet) 57 | o = mdp_sul.step(i) 58 | sample.append((i, o)) 59 | data.append(sample) 60 | mdp_sul.post() 61 | 62 | learned_mdp = run_Alergia(data, automaton_type='mdp') 63 | 64 | for s in data: 65 | s.pop(0) 66 | 67 | learned_smm = run_Alergia(data, automaton_type='smm') 68 | 69 | smm_2_mdp = smm_to_mdp_conversion(learned_smm) 70 | 71 | mdp_results, mdp_err = model_check_experiment(get_properties_file(exp_name), 72 | get_correct_prop_values(exp_name), learned_mdp) 73 | smm_results, smm_err = model_check_experiment(get_properties_file(exp_name), 74 | get_correct_prop_values(exp_name), smm_2_mdp) 75 | 76 | print(learned_mdp.size, learned_smm.size, smm_2_mdp.size) 77 | print(f'-------{exp_name}---------') 78 | print(f'MDP Error: {mdp_err}') 79 | print(f'SMM Error: {smm_err}') 80 | smm_diff = {} 81 | for key, val in mdp_err.items(): 82 | if key not in smm_err.keys() or smm_err[key] == 0: 83 | continue 84 | smm_diff[key] = round(smm_err[key] - val, 2) 85 | print(f'SMM differance: {smm_diff}') 86 | -------------------------------------------------------------------------------- /Benchmarking/stochastic_benchmarking/stochastic_benchmark_random_automata.py: -------------------------------------------------------------------------------- 1 | from itertools import product 2 | 3 | from aalpy.SULs import AutomatonSUL 4 | from aalpy.learning_algs import run_stochastic_Lstar 5 | from aalpy.oracles import RandomWordEqOracle 6 | from aalpy.utils import generate_random_mdp, generate_random_smm 7 | 8 | automata_size = [5, 10, 15, 20, 30, 50, ] 9 | inputs_size = [2, 3, 5, 7, 9] 10 | outputs_size = [2, 5, 10, 15, 20] 11 | inputs_size = [7] 12 | outputs_size = [15] 13 | 14 | 15 | def learn(mdp, type): 16 | input_al = mdp.get_input_alphabet() 17 | sul = AutomatonSUL(mdp) 18 | eq_oracle = RandomWordEqOracle(input_al, sul, num_walks=1000, min_walk_len=4, max_walk_len=20) 19 | return run_stochastic_Lstar(input_al, sul, eq_oracle, automaton_type=type, cex_processing=None, print_level=0, 20 | return_data=True) 21 | 22 | 23 | num_queries_mdp = [] 24 | num_queries_smm = [] 25 | 26 | # i = 0 27 | # for p in product(automata_size, inputs_size, outputs_size): 28 | # num_states, num_inputs, num_outputs = p 29 | # if num_inputs > num_outputs: 30 | # continue 31 | # 32 | # print(i) 33 | # i += 1 34 | # 35 | # # random_mdp = generate_random_mdp(num_states=num_states, input_size=num_inputs, output_size=num_outputs) 36 | # random_smm = generate_random_mdp(num_states=num_states, input_size=num_inputs, output_size=num_outputs) 37 | # # random_smm = random_smm.to_mdp() 38 | # 39 | # _, mdp_data = learn(random_smm, 'mdp') 40 | # _, smm_data = learn(random_smm, 'smm') 41 | # 42 | # num_queries_mdp.append(mdp_data['queries_learning'] + mdp_data['queries_eq_oracle']) 43 | # num_queries_smm.append(smm_data['queries_learning'] + smm_data['queries_eq_oracle']) 44 | 45 | print(num_queries_mdp) 46 | print(num_queries_smm) 47 | 48 | num_queries_mdp_3_7 = [77115, 85440, 36326, 132485, 250055, 343526] 49 | num_queries_smm_3_7 = [23511, 14287, 17106, 55482, 50935, 99730] 50 | 51 | num_queries_mdp_4_10 = [54654, 265240, 245245, 238944, 320026, 1170086] 52 | num_queries_smm_4_10 = [7122, 42637, 32431, 51821, 75703, 204150] 53 | 54 | num_queries_mdp_7_15 = [237731, 397386, 924637, 2066456, 4117725, 4774201] 55 | num_queries_smm_7_15 = [15733, 19148, 52214, 106436, 157414, 605491] 56 | 57 | # mdp was used for data gen 58 | 59 | mdp_base_num_queries_mdp_3_7 = [6515, 13659, 42904, 31798, 129641, 128275] 60 | mdp_base_num_queries_smm_3_7 = [7383, 16985, 55428, 78679, 230936, 479493] 61 | 62 | mdp_base_num_queries_mdp_4_10 = [10110, 14032, 61815, 35108, 61489, 115270] 63 | mdp_base_num_queries_smm_4_10 = [8284, 11257, 38399, 49637, 74183, 145063] 64 | 65 | mdp_base_num_queries_mdp_7_15 = [7611, 16438, 12564, 33355, 76704, 348364] 66 | mdp_base_num_queries_smm_7_15 = [12132, 29568, 36804, 60763, 95613, 348675] 67 | 68 | pairs_smm_base = [(num_queries_mdp_3_7, num_queries_smm_3_7), (num_queries_mdp_4_10, num_queries_smm_4_10), 69 | (num_queries_mdp_7_15, num_queries_smm_7_15)] 70 | 71 | pairs_mdp_base = [(mdp_base_num_queries_mdp_3_7, mdp_base_num_queries_smm_3_7), 72 | (mdp_base_num_queries_mdp_4_10, mdp_base_num_queries_smm_4_10), 73 | (mdp_base_num_queries_mdp_7_15, mdp_base_num_queries_smm_7_15)] 74 | # 75 | for mdp, smm in pairs_mdp_base: 76 | save = [] 77 | for m, s in zip(mdp, smm): 78 | save.append(100 - round(s / m * 100, 2)) 79 | print(save) 80 | 81 | smm_save_3_7 = [69.51, 83.28, 52.91, 58.12, 79.63, 70.97] 82 | smm_save_4_10 = [86.97, 83.93, 86.78, 78.31, 76.34, 82.55] 83 | smm_save_7_15 = [93.38, 95.18, 94.35, 94.85, 96.18, 87.32] 84 | 85 | # 86 | # def plot_queries_smm_as_base(): 87 | # import matplotlib.pyplot as plt 88 | # 89 | # plt.plot(automata_size, smm_save_3_7, label='I:3,O: 7)') 90 | # plt.plot(automata_size, smm_save_4_10, label='I:4,O: 10') 91 | # plt.plot(automata_size, smm_save_7_15, label='I:7,O: 15') 92 | # 93 | # plt.xticks(automata_size) 94 | # 95 | # plt.grid() 96 | # plt.legend() 97 | # plt.show() 98 | 99 | # plot_queries_smm_as_base() 100 | -------------------------------------------------------------------------------- /Benchmarking/stochastic_evaluation.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/Benchmarking/stochastic_evaluation.PNG -------------------------------------------------------------------------------- /DotModels/Angluin_Mealy.dot: -------------------------------------------------------------------------------- 1 | digraph Angluin_Mealy { 2 | s0 [label="s0"]; 3 | s1 [label="s1"]; 4 | s2 [label="s2"]; 5 | s3 [label="s3"]; 6 | s0 -> s2 [label="a/0"]; 7 | s0 -> s1 [label="b/0"]; 8 | s1 -> s3 [label="a/0"]; 9 | s1 -> s0 [label="b/1"]; 10 | s2 -> s0 [label="a/1"]; 11 | s2 -> s3 [label="b/0"]; 12 | s3 -> s1 [label="a/0"]; 13 | s3 -> s2 [label="b/0"]; 14 | __start0 [label="", shape=none]; 15 | __start0 -> s0 [label=""]; 16 | } 17 | -------------------------------------------------------------------------------- /DotModels/Angluin_Moore.dot: -------------------------------------------------------------------------------- 1 | digraph Angluin_Moore { 2 | s0 [label="s0|1", shape=record, style=rounded]; 3 | s1 [label="s1|0", shape=record, style=rounded]; 4 | s2 [label="s2|0", shape=record, style=rounded]; 5 | s3 [label="s3|0", shape=record, style=rounded]; 6 | s0 -> s2 [label=a]; 7 | s0 -> s1 [label=b]; 8 | s1 -> s3 [label=a]; 9 | s1 -> s0 [label=b]; 10 | s2 -> s0 [label=a]; 11 | s2 -> s3 [label=b]; 12 | s3 -> s1 [label=a]; 13 | s3 -> s2 [label=b]; 14 | __start0 [label="", shape=none]; 15 | __start0 -> s0 [label=""]; 16 | } 17 | -------------------------------------------------------------------------------- /DotModels/Bluetooth/CC2640R2-no-pairing-req.dot: -------------------------------------------------------------------------------- 1 | digraph CC2640R2 { 2 | s0 [label=s0]; 3 | s1 [label=s1]; 4 | s2 [label=s2]; 5 | s3 [label=s3]; 6 | s4 [label=s4]; 7 | s5 [label=s5]; 8 | s0 -> s0 [label="scan_req/Adv"]; 9 | s0 -> s1 [label="connection_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_REQ"]; 10 | s0 -> s0 [label="length_req/Empty"]; 11 | s0 -> s0 [label="length_rsp/Empty"]; 12 | s0 -> s0 [label="feature_rsp/Empty"]; 13 | s0 -> s0 [label="feature_req/Empty"]; 14 | s0 -> s0 [label="version_req/Empty"]; 15 | s0 -> s0 [label="mtu_req/Empty"]; 16 | s1 -> s0 [label="scan_req/Adv"]; 17 | s1 -> s1 [label="connection_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_REQ"]; 18 | s1 -> s1 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_RSP"]; 19 | s1 -> s3 [label="length_rsp/BTLE|BTLE_DATA"]; 20 | s1 -> s1 [label="feature_rsp/BTLE|BTLE_DATA"]; 21 | s1 -> s1 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 22 | s1 -> s2 [label="version_req/BTLE|BTLE_DATA"]; 23 | s1 -> s1 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 24 | s2 -> s0 [label="scan_req/Adv"]; 25 | s2 -> s1 [label="connection_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_REQ"]; 26 | s2 -> s2 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_RSP"]; 27 | s2 -> s5 [label="length_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_VERSION_IND"]; 28 | s2 -> s2 [label="feature_rsp/BTLE|BTLE_DATA"]; 29 | s2 -> s2 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 30 | s2 -> s4 [label="version_req/BTLE|BTLE_DATA"]; 31 | s2 -> s2 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 32 | s3 -> s0 [label="scan_req/Adv"]; 33 | s3 -> s1 [label="connection_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_REQ"]; 34 | s3 -> s3 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_RSP"]; 35 | s3 -> s3 [label="length_rsp/BTLE|BTLE_DATA"]; 36 | s3 -> s3 [label="feature_rsp/BTLE|BTLE_DATA"]; 37 | s3 -> s3 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 38 | s3 -> s5 [label="version_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_VERSION_IND"]; 39 | s3 -> s3 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 40 | s4 -> s0 [label="scan_req/Adv"]; 41 | s4 -> s1 [label="connection_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_REQ"]; 42 | s4 -> s4 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_RSP"]; 43 | s4 -> s5 [label="length_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP|LL_VERSION_IND"]; 44 | s4 -> s4 [label="feature_rsp/BTLE|BTLE_DATA"]; 45 | s4 -> s4 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 46 | s4 -> s4 [label="version_req/BTLE|BTLE_DATA"]; 47 | s4 -> s4 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 48 | s5 -> s0 [label="scan_req/Adv"]; 49 | s5 -> s1 [label="connection_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_REQ"]; 50 | s5 -> s5 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_RSP"]; 51 | s5 -> s5 [label="length_rsp/BTLE|BTLE_DATA"]; 52 | s5 -> s5 [label="feature_rsp/BTLE|BTLE_DATA"]; 53 | s5 -> s5 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 54 | s5 -> s5 [label="version_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 55 | s5 -> s5 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 56 | __start0 [label="", shape=none]; 57 | __start0 -> s0 [label=""]; 58 | } 59 | -------------------------------------------------------------------------------- /DotModels/Bluetooth/CC2650.dot: -------------------------------------------------------------------------------- 1 | digraph "cc2650" { 2 | s0 [label=s0]; 3 | s1 [label=s1]; 4 | s2 [label=s2]; 5 | s3 [label=s3]; 6 | s4 [label=s4]; 7 | s0 -> s0 [label="scan_req/Adv"]; 8 | s0 -> s1 [label="connection_req/BTLE|BTLE_DATA"]; 9 | s0 -> s0 [label="length_req/Empty"]; 10 | s0 -> s0 [label="length_rsp/Empty"]; 11 | s0 -> s0 [label="feature_rsp/Empty"]; 12 | s0 -> s0 [label="feature_req/Empty"]; 13 | s0 -> s0 [label="version_req/Empty"]; 14 | s0 -> s0 [label="mtu_req/Empty"]; 15 | s0 -> s0 [label="pairing_req/Empty"]; 16 | s1 -> s0 [label="scan_req/Adv"]; 17 | s1 -> s1 [label="connection_req/BTLE|BTLE_DATA"]; 18 | s1 -> s1 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 19 | s1 -> s1 [label="length_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 20 | s1 -> s1 [label="feature_rsp/BTLE|BTLE_DATA"]; 21 | s1 -> s1 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 22 | s1 -> s3 [label="version_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_VERSION_IND"]; 23 | s1 -> s1 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 24 | s1 -> s2 [label="pairing_req/BTLE|BTLE_DATA|L2CAP_Hdr|SM_Hdr|SM_Pairing_Response"]; 25 | s2 -> s0 [label="scan_req/Adv"]; 26 | s2 -> s1 [label="connection_req/BTLE|BTLE_DATA"]; 27 | s2 -> s2 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 28 | s2 -> s2 [label="length_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 29 | s2 -> s2 [label="feature_rsp/BTLE|BTLE_DATA"]; 30 | s2 -> s2 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 31 | s2 -> s4 [label="version_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_VERSION_IND"]; 32 | s2 -> s2 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 33 | s2 -> s1 [label="pairing_req/BTLE|BTLE_DATA|L2CAP_Hdr|SM_Failed|SM_Hdr"]; 34 | s3 -> s0 [label="scan_req/Adv"]; 35 | s3 -> s1 [label="connection_req/BTLE|BTLE_DATA"]; 36 | s3 -> s3 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 37 | s3 -> s3 [label="length_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 38 | s3 -> s3 [label="feature_rsp/BTLE|BTLE_DATA"]; 39 | s3 -> s3 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 40 | s3 -> s3 [label="version_req/BTLE|BTLE_DATA"]; 41 | s3 -> s3 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 42 | s3 -> s4 [label="pairing_req/BTLE|BTLE_DATA|L2CAP_Hdr|SM_Hdr|SM_Pairing_Response"]; 43 | s4 -> s0 [label="scan_req/Adv"]; 44 | s4 -> s1 [label="connection_req/BTLE|BTLE_DATA"]; 45 | s4 -> s4 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 46 | s4 -> s4 [label="length_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 47 | s4 -> s4 [label="feature_rsp/BTLE|BTLE_DATA"]; 48 | s4 -> s4 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 49 | s4 -> s4 [label="version_req/BTLE|BTLE_DATA"]; 50 | s4 -> s4 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 51 | s4 -> s3 [label="pairing_req/BTLE|BTLE_DATA|L2CAP_Hdr|SM_Failed|SM_Hdr"]; 52 | __start0 [label="", shape=none]; 53 | __start0 -> s0 [label=""]; 54 | } 55 | -------------------------------------------------------------------------------- /DotModels/Bluetooth/CYBLE-416045-02.dot: -------------------------------------------------------------------------------- 1 | digraph "cyble-416045-02" { 2 | s0 [label=s0]; 3 | s1 [label=s1]; 4 | s2 [label=s2]; 5 | s0 -> s0 [label="scan_req/Adv"]; 6 | s0 -> s1 [label="connection_req/BTLE|BTLE_DATA"]; 7 | s0 -> s0 [label="length_req/Empty"]; 8 | s0 -> s0 [label="length_rsp/Empty"]; 9 | s0 -> s0 [label="feature_rsp/Empty"]; 10 | s0 -> s0 [label="feature_req/Empty"]; 11 | s0 -> s0 [label="version_req/Empty"]; 12 | s0 -> s0 [label="mtu_req/Empty"]; 13 | s0 -> s0 [label="pairing_req/Empty"]; 14 | s1 -> s0 [label="scan_req/Adv"]; 15 | s1 -> s1 [label="connection_req/BTLE|BTLE_DATA"]; 16 | s1 -> s1 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 17 | s1 -> s1 [label="length_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 18 | s1 -> s1 [label="feature_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_REJECT_IND"]; 19 | s1 -> s1 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 20 | s1 -> s2 [label="version_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_VERSION_IND"]; 21 | s1 -> s1 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 22 | s1 -> s1 [label="pairing_req/BTLE|BTLE_DATA"]; 23 | s2 -> s0 [label="scan_req/Adv"]; 24 | s2 -> s1 [label="connection_req/BTLE|BTLE_DATA"]; 25 | s2 -> s2 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 26 | s2 -> s2 [label="length_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 27 | s2 -> s2 [label="feature_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_REJECT_IND"]; 28 | s2 -> s2 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 29 | s2 -> s2 [label="version_req/BTLE|BTLE_DATA"]; 30 | s2 -> s2 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 31 | s2 -> s2 [label="pairing_req/BTLE|BTLE_DATA"]; 32 | __start0 [label="", shape=none]; 33 | __start0 -> s0 [label=""]; 34 | } 35 | -------------------------------------------------------------------------------- /DotModels/Bluetooth/bluetooth_model.dot: -------------------------------------------------------------------------------- 1 | digraph LearnedModel { 2 | s0 [label=s0]; 3 | s1 [label=s1]; 4 | s2 [label=s2]; 5 | s0 -> s0 [label="i0/o0"]; 6 | s0 -> s1 [label="i1/o1"]; 7 | s0 -> s0 [label="i2/o2"]; 8 | s0 -> s0 [label="i3/o2"]; 9 | s0 -> s0 [label="i4/o2"]; 10 | s0 -> s0 [label="i5/o2"]; 11 | s0 -> s0 [label="i6/o2"]; 12 | s0 -> s0 [label="i7/o2"]; 13 | s0 -> s0 [label="i8/o2"]; 14 | s1 -> s0 [label="i0/o0"]; 15 | s1 -> s1 [label="i1/o1"]; 16 | s1 -> s1 [label="i2/o3"]; 17 | s1 -> s1 [label="i3/o3"]; 18 | s1 -> s1 [label="i4/o4"]; 19 | s1 -> s1 [label="i5/o5"]; 20 | s1 -> s2 [label="i6/o6"]; 21 | s1 -> s1 [label="i7/o7"]; 22 | s1 -> s1 [label="i8/o8"]; 23 | s2 -> s0 [label="i0/o0"]; 24 | s2 -> s1 [label="i1/o1"]; 25 | s2 -> s2 [label="i2/o3"]; 26 | s2 -> s2 [label="i3/o3"]; 27 | s2 -> s2 [label="i4/o4"]; 28 | s2 -> s2 [label="i5/o5"]; 29 | s2 -> s2 [label="i6/o1"]; 30 | s2 -> s2 [label="i7/o7"]; 31 | s2 -> s2 [label="i8/o8"]; 32 | __start0 [label="", shape=none]; 33 | __start0 -> s0 [label=""]; 34 | } -------------------------------------------------------------------------------- /DotModels/Bluetooth/bluetooth_reduced.dot: -------------------------------------------------------------------------------- 1 | digraph LearnedModel { 2 | s0 [label=s0]; 3 | s1 [label=s1]; 4 | s2 [label=s2]; 5 | s0 -> s0 [label="scan_req/Adv"]; 6 | s0 -> s1 [label="connection_req/BTLE|BTLE_DATA"]; 7 | s0 -> s0 [label="length_req/Empty"]; 8 | s0 -> s0 [label="length_rsp/Empty"]; 9 | s0 -> s0 [label="feature_rsp/Empty"]; 10 | s0 -> s0 [label="feature_req/Empty"]; 11 | s0 -> s0 [label="version_req/Empty"]; 12 | s0 -> s0 [label="mtu_req/Empty"]; 13 | s0 -> s0 [label="pairing_req/Empty"]; 14 | s1 -> s0 [label="scan_req/Adv"]; 15 | s1 -> s1 [label="connection_req/BTLE|BTLE_DATA"]; 16 | s1 -> s1 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 17 | s1 -> s1 [label="length_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 18 | s1 -> s1 [label="feature_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_REJECT_IND"]; 19 | s1 -> s1 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 20 | s1 -> s2 [label="version_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_VERSION_IND"]; 21 | s1 -> s1 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 22 | s1 -> s1 [label="pairing_req/BTLE|BTLE_DATA|L2CAP_Hdr|SM_Failed|SM_Hdr"]; 23 | s2 -> s0 [label="scan_req/Adv"]; 24 | s2 -> s1 [label="connection_req/BTLE|BTLE_DATA"]; 25 | s2 -> s2 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 26 | s2 -> s2 [label="length_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 27 | s2 -> s2 [label="feature_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_REJECT_IND"]; 28 | s2 -> s2 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 29 | s2 -> s2 [label="version_req/BTLE|BTLE_DATA"]; 30 | s2 -> s2 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 31 | s2 -> s2 [label="pairing_req/BTLE|BTLE_DATA|L2CAP_Hdr|SM_Failed|SM_Hdr"]; 32 | __start0 [label="", shape=none]; 33 | __start0 -> s0 [label=""]; 34 | } -------------------------------------------------------------------------------- /DotModels/Bluetooth/cc2652r1.dot: -------------------------------------------------------------------------------- 1 | digraph "CC2652R1" { 2 | s0 [label=s0]; 3 | s1 [label=s1]; 4 | s2 [label=s2]; 5 | s3 [label=s3]; 6 | s0 -> s0 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_RSP"]; 7 | s0 -> s0 [label="length_rsp/BTLE|BTLE_DATA"]; 8 | s0 -> s2 [label="feature_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_REQ"]; 9 | s0 -> s0 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 10 | s0 -> s0 [label="version_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_VERSION_IND"]; 11 | s0 -> s0 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 12 | s0 -> s1 [label="pairing_req/BTLE|BTLE_DATA|L2CAP_Hdr|SM_Hdr|SM_Pairing_Response"]; 13 | s1 -> s1 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_RSP"]; 14 | s1 -> s1 [label="length_rsp/BTLE|BTLE_DATA"]; 15 | s1 -> s3 [label="feature_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_REQ"]; 16 | s1 -> s1 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 17 | s1 -> s1 [label="version_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_VERSION_IND"]; 18 | s1 -> s1 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 19 | s1 -> s0 [label="pairing_req/BTLE|BTLE_DATA|L2CAP_Hdr|SM_Failed|SM_Hdr"]; 20 | s2 -> s2 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_RSP"]; 21 | s2 -> s0 [label="length_rsp/BTLE|BTLE_DATA"]; 22 | s2 -> s2 [label="feature_rsp/BTLE|BTLE_DATA"]; 23 | s2 -> s2 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 24 | s2 -> s2 [label="version_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_VERSION_IND"]; 25 | s2 -> s2 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 26 | s2 -> s3 [label="pairing_req/BTLE|BTLE_DATA|L2CAP_Hdr|SM_Hdr|SM_Pairing_Response"]; 27 | s3 -> s3 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_RSP"]; 28 | s3 -> s1 [label="length_rsp/BTLE|BTLE_DATA"]; 29 | s3 -> s3 [label="feature_rsp/BTLE|BTLE_DATA"]; 30 | s3 -> s3 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 31 | s3 -> s3 [label="version_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_VERSION_IND"]; 32 | s3 -> s3 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 33 | s3 -> s2 [label="pairing_req/BTLE|BTLE_DATA|L2CAP_Hdr|SM_Failed|SM_Hdr"]; 34 | __start0 [label="", shape=none]; 35 | __start0 -> s0 [label=""]; 36 | } 37 | -------------------------------------------------------------------------------- /DotModels/Bluetooth/nRF52832.dot: -------------------------------------------------------------------------------- 1 | digraph "nRF52832" { 2 | s0 [label=s0]; 3 | s1 [label=s1]; 4 | s2 [label=s2]; 5 | s3 [label=s3]; 6 | s4 [label=s4]; 7 | s0 -> s0 [label="scan_req/Adv"]; 8 | s0 -> s1 [label="connection_req/BTLE|BTLE_DATA|L2CAP_Hdr|Raw|SM_Hdr"]; 9 | s0 -> s0 [label="length_req/Empty"]; 10 | s0 -> s0 [label="length_rsp/Empty"]; 11 | s0 -> s0 [label="feature_rsp/Empty"]; 12 | s0 -> s0 [label="feature_req/Empty"]; 13 | s0 -> s0 [label="version_req/Empty"]; 14 | s0 -> s0 [label="mtu_req/Empty"]; 15 | s0 -> s0 [label="pairing_req/Empty"]; 16 | s1 -> s0 [label="scan_req/Adv"]; 17 | s1 -> s1 [label="connection_req/BTLE|BTLE_DATA|L2CAP_Hdr|Raw|SM_Hdr"]; 18 | s1 -> s1 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_RSP"]; 19 | s1 -> s0 [label="length_rsp/BTLE|BTLE_DATA"]; 20 | s1 -> s1 [label="feature_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 21 | s1 -> s1 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 22 | s1 -> s3 [label="version_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_VERSION_IND"]; 23 | s1 -> s2 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 24 | s1 -> s1 [label="pairing_req/BTLE|BTLE_DATA|L2CAP_Hdr|SM_Hdr|SM_Pairing_Response"]; 25 | s2 -> s0 [label="scan_req/Adv"]; 26 | s2 -> s1 [label="connection_req/BTLE|BTLE_DATA|L2CAP_Hdr|Raw|SM_Hdr"]; 27 | s2 -> s2 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_RSP"]; 28 | s2 -> s0 [label="length_rsp/BTLE|BTLE_DATA"]; 29 | s2 -> s2 [label="feature_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 30 | s2 -> s2 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 31 | s2 -> s4 [label="version_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_VERSION_IND"]; 32 | s2 -> s2 [label="mtu_req/ATT_Error_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 33 | s2 -> s2 [label="pairing_req/BTLE|BTLE_DATA|L2CAP_Hdr|SM_Hdr|SM_Pairing_Response"]; 34 | s3 -> s0 [label="scan_req/Adv"]; 35 | s3 -> s1 [label="connection_req/BTLE|BTLE_DATA|L2CAP_Hdr|Raw|SM_Hdr"]; 36 | s3 -> s3 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_RSP"]; 37 | s3 -> s0 [label="length_rsp/BTLE|BTLE_DATA"]; 38 | s3 -> s3 [label="feature_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 39 | s3 -> s3 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 40 | s3 -> s3 [label="version_req/BTLE|BTLE_DATA"]; 41 | s3 -> s4 [label="mtu_req/ATT_Exchange_MTU_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 42 | s3 -> s3 [label="pairing_req/BTLE|BTLE_DATA|L2CAP_Hdr|SM_Hdr|SM_Pairing_Response"]; 43 | s4 -> s0 [label="scan_req/Adv"]; 44 | s4 -> s1 [label="connection_req/BTLE|BTLE_DATA|L2CAP_Hdr|Raw|SM_Hdr"]; 45 | s4 -> s4 [label="length_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_RSP"]; 46 | s4 -> s0 [label="length_rsp/BTLE|BTLE_DATA"]; 47 | s4 -> s4 [label="feature_rsp/BTLE|BTLE_CTRL|BTLE_DATA|LL_UNKNOWN_RSP"]; 48 | s4 -> s4 [label="feature_req/BTLE|BTLE_CTRL|BTLE_DATA|LL_FEATURE_RSP"]; 49 | s4 -> s4 [label="version_req/BTLE|BTLE_DATA"]; 50 | s4 -> s4 [label="mtu_req/ATT_Error_Response|ATT_Hdr|BTLE|BTLE_DATA|L2CAP_Hdr"]; 51 | s4 -> s4 [label="pairing_req/BTLE|BTLE_DATA|L2CAP_Hdr|SM_Hdr|SM_Pairing_Response"]; 52 | __start0 [label="", shape=none]; 53 | __start0 -> s0 [label=""]; 54 | } 55 | -------------------------------------------------------------------------------- /DotModels/MDPs/faulty_car_alarm.dot: -------------------------------------------------------------------------------- 1 | digraph faulty_car_alarm { 2 | q1_locked_closed [label="N"]; 3 | q2_locked_open [label="A"]; 4 | q3_locked_closed [label="A"]; 5 | q5_unlocked_closed [label="N"]; 6 | q6_unlocked_open [label="N"]; 7 | q7_locked_open [label="N"]; 8 | q4_faulty [label="N"]; 9 | q1_locked_closed -> q2_locked_open [label="d:1"]; 10 | q1_locked_closed -> q5_unlocked_closed [label="l:1"]; 11 | q2_locked_open -> q3_locked_closed [label="d:1"]; 12 | q2_locked_open -> q6_unlocked_open [label="l:1"]; 13 | q3_locked_closed -> q2_locked_open [label="d:1"]; 14 | q3_locked_closed -> q5_unlocked_closed [label="l:1"]; 15 | q5_unlocked_closed -> q6_unlocked_open [label="d:1"]; 16 | q5_unlocked_closed -> q1_locked_closed [label="l:1"]; 17 | q6_unlocked_open -> q5_unlocked_closed [label="d:1"]; 18 | q6_unlocked_open -> q7_locked_open [label="l:1"]; 19 | q7_locked_open -> q4_faulty [label="d:1"]; 20 | q7_locked_open -> q6_unlocked_open [label="l:1"]; 21 | q4_faulty -> q2_locked_open [label="d:0.9"]; 22 | q4_faulty -> q7_locked_open [label="d:0.1"]; 23 | q4_faulty -> q5_unlocked_closed [label="l:1"]; 24 | __start0 [label="", shape=none]; 25 | __start0 -> q1_locked_closed [label=""]; 26 | } 27 | -------------------------------------------------------------------------------- /DotModels/TLS/JSSE_1.8.0_25_server_regular.dot: -------------------------------------------------------------------------------- 1 | digraph g { 2 | __start0 [shape="none", label=""]; 3 | s0 [shape="circle", label="0"]; 4 | s1 [shape="circle", label="1"]; 5 | s2 [shape="circle", label="2"]; 6 | s3 [shape="circle", label="3"]; 7 | s4 [shape="circle", label="4"]; 8 | s5 [shape="circle", label="5"]; 9 | s6 [shape="circle", label="6"]; 10 | s7 [shape="circle", label="7"]; 11 | s8 [shape="circle", label="8"]; 12 | s8 -> s3 [label=Empty>]; 13 | s8 -> s2 [label=Alert Fatal (Unexpected message) / ConnectionClosed>]; 14 | s8 -> s2 [label=Alert Fatal (Handshake failure) / ConnectionClosed>]; 15 | s8 -> s8 [label=ServerHello / Certificate / ServerHelloDone>]; 16 | s8 -> s8 [label=Empty>]; 17 | s3 -> s3 [label=Empty>]; 18 | s3 -> s2 [label=Alert Fatal (Internal error) / ConnectionClosed>]; 19 | s3 -> s2 [label=Alert Fatal (Unexpected message) / ConnectionClosed>]; 20 | s3 -> s6 [label=ChangeCipherSpec / Finished>]; 21 | s3 -> s5 [label=Empty>]; 22 | s3 -> s4 [label=ServerHello / Certificate / ServerHelloDone>]; 23 | s2 -> s2 [label=ConnectionClosed>]; 24 | s1 -> s3 [label=Empty>]; 25 | s1 -> s2 [label=ChangeCipherSpecDecryption failed>]; 26 | s1 -> s2 [label=Alert Fatal (Unexpected message) / ConnectionClosed>]; 27 | s1 -> s1 [label=Empty>]; 28 | s0 -> s2 [label=Alert Fatal (Internal error) / ConnectionClosed>]; 29 | s0 -> s2 [label=Alert Fatal (Unexpected message) / ConnectionClosed>]; 30 | s0 -> s1 [label=ServerHello / Certificate / ServerHelloDone>]; 31 | s7 -> s2 [label=Alert Fatal (Bad record MAC) / ConnectionClosed>]; 32 | s7 -> s2 [label=Alert Fatal (Handshake failure) / ConnectionClosed>]; 33 | s6 -> s2 [label=Alert Fatal (Internal error) / ConnectionClosed>]; 34 | s6 -> s2 [label=Alert Fatal (Handshake failure) / ConnectionClosed>]; 35 | s6 -> s2 [label=Alert Fatal (Unexpected message) / ConnectionClosed>]; 36 | s6 -> s6 [label=ApplicationData>]; 37 | s6 -> s6 [label=Empty>]; 38 | s5 -> s2 [label=Alert Fatal (Internal error) / ConnectionClosed>]; 39 | s5 -> s2 [label=Alert Fatal (Unexpected message) / ConnectionClosed>]; 40 | s5 -> s8 [label=ServerHello / Certificate / ServerHelloDone>]; 41 | s5 -> s6 [label=ChangeCipherSpec / Finished>]; 42 | s5 -> s5 [label=Empty>]; 43 | s4 -> s3 [label=Empty>]; 44 | s4 -> s2 [label=Alert Fatal (Unexpected message) / ConnectionClosed>]; 45 | s4 -> s2 [label=Alert Fatal (Handshake failure) / ConnectionClosed>]; 46 | s4 -> s7 [label=Empty>]; 47 | s4 -> s4 [label=ServerHello / Certificate / ServerHelloDone>]; 48 | s4 -> s4 [label=Empty>]; 49 | __start0 -> s0 [label=Empty>]; 50 | } 51 | -------------------------------------------------------------------------------- /DotModels/TLS/OpenSSL_1.0.2_server_regular.dot: -------------------------------------------------------------------------------- 1 | digraph { 2 | 6 [label="s6"] 3 | 0 [label="s0"] 4 | 1 [label="s1"] 5 | 2 [label="s2"] 6 | 3 [label="s3"] 7 | 4 [label="s4"] 8 | 5 [label="s5"] 9 | 6 -> 4 [label="ApplicationData/ConnectionClosed"] 10 | 6 -> 5 [label="ApplicationDataEmpty/Empty"] 11 | 6 -> 4 [label="ChangeCipherSpec/Empty"] 12 | 6 -> 1 [label="ClientHelloRSA/ServerHello & Certificate & ServerHelloDone"] 13 | 6 -> 4 [label="ClientKeyExchange/ConnectionClosed"] 14 | 6 -> 4 [label="EmptyCertificate/ConnectionClosed"] 15 | 6 -> 4 [label="Finished/ConnectionClosed"] 16 | 0 -> 4 [label="ApplicationData/Alert Fatal (Unexpected message) & ConnectionClosed"] 17 | 0 -> 0 [label="ApplicationDataEmpty/Empty"] 18 | 0 -> 4 [label="ChangeCipherSpec/Alert Fatal (Unexpected message) & ConnectionClosed"] 19 | 0 -> 4 [label="ClientHelloRSA/Alert Fatal (Unexpected message) & ConnectionClosed"] 20 | 0 -> 4 [label="ClientKeyExchange/Alert Fatal (Unexpected message) & ConnectionClosed"] 21 | 0 -> 4 [label="EmptyCertificate/Alert Fatal (Unexpected message) & ConnectionClosed"] 22 | 0 -> 3 [label="Finished/ChangeCipherSpec & Finished"] 23 | 1 -> 4 [label="ApplicationData/Alert Fatal (Unexpected message) & ConnectionClosed"] 24 | 1 -> 1 [label="ApplicationDataEmpty/Empty"] 25 | 1 -> 4 [label="ChangeCipherSpec/Alert Fatal (Unexpected message) & ConnectionClosed"] 26 | 1 -> 4 [label="ClientHelloRSA/Alert Fatal (Unexpected message) & ConnectionClosed"] 27 | 1 -> 2 [label="ClientKeyExchange/Empty"] 28 | 1 -> 4 [label="EmptyCertificate/Alert Fatal (Unexpected message) & ConnectionClosed"] 29 | 1 -> 4 [label="Finished/Alert Fatal (Unexpected message) & ConnectionClosed"] 30 | 2 -> 4 [label="ApplicationData/Alert Fatal (Unexpected message) & ConnectionClosed"] 31 | 2 -> 2 [label="ApplicationDataEmpty/Empty"] 32 | 2 -> 0 [label="ChangeCipherSpec/Empty"] 33 | 2 -> 4 [label="ClientHelloRSA/Alert Fatal (Unexpected message) & ConnectionClosed"] 34 | 2 -> 4 [label="ClientKeyExchange/Alert Fatal (Unexpected message) & ConnectionClosed"] 35 | 2 -> 4 [label="EmptyCertificate/Alert Fatal (Unexpected message) & ConnectionClosed"] 36 | 2 -> 4 [label="Finished/Alert Fatal (Unexpected message) & ConnectionClosed"] 37 | 3 -> 4 [label="ApplicationData/ApplicationData & ConnectionClosed"] 38 | 3 -> 3 [label="ApplicationDataEmpty/Empty"] 39 | 3 -> 4 [label="ChangeCipherSpec/Alert Fatal (Unexpected message) & ConnectionClosed"] 40 | 3 -> 4 [label="ClientHelloRSA/Alert Fatal (Handshake failure) & ConnectionClosed"] 41 | 3 -> 4 [label="ClientKeyExchange/Alert Fatal (Unexpected message) & ConnectionClosed"] 42 | 3 -> 4 [label="EmptyCertificate/Alert Fatal (Unexpected message) & ConnectionClosed"] 43 | 3 -> 4 [label="Finished/Alert Fatal (Unexpected message) & ConnectionClosed"] 44 | 4 -> 4 [label="ApplicationData/ConnectionClosed"] 45 | 4 -> 4 [label="ApplicationDataEmpty/ConnectionClosed"] 46 | 4 -> 4 [label="ChangeCipherSpec/ConnectionClosed"] 47 | 4 -> 4 [label="ClientHelloRSA/ConnectionClosed"] 48 | 4 -> 4 [label="ClientKeyExchange/ConnectionClosed"] 49 | 4 -> 4 [label="EmptyCertificate/ConnectionClosed"] 50 | 4 -> 4 [label="Finished/ConnectionClosed"] 51 | 5 -> 4 [label="ApplicationData/ConnectionClosed"] 52 | 5 -> 4 [label="ApplicationDataEmpty/Empty"] 53 | 5 -> 4 [label="ChangeCipherSpec/ConnectionClosed"] 54 | 5 -> 4 [label="ClientHelloRSA/ConnectionClosed"] 55 | 5 -> 4 [label="ClientKeyExchange/ConnectionClosed"] 56 | 5 -> 4 [label="EmptyCertificate/ConnectionClosed"] 57 | 5 -> 4 [label="Finished/ConnectionClosed"] 58 | __start0 [label="", shape=none]; 59 | __start0 -> 6 [label=""]; 60 | } 61 | -------------------------------------------------------------------------------- /DotModels/TLS/miTLS_0.1.3_server_regular.dot: -------------------------------------------------------------------------------- 1 | digraph { 2 | 2 [label="s2"] 3 | 0 [label="s0"] 4 | 1 [label="s1"] 5 | 3 [label="s3"] 6 | 4 [label="s4"] 7 | 5 [label="s5"] 8 | 2 -> 5 [label="ApplicationData/ConnectionClosed"] 9 | 2 -> 5 [label="ApplicationDataEmpty/Alert Fatal (Illegal parameter) & ConnectionClosed"] 10 | 2 -> 5 [label="ChangeCipherSpec/ConnectionClosed"] 11 | 2 -> 1 [label="ClientHelloRSA/ServerHello & Certificate & ServerHelloDone"] 12 | 2 -> 5 [label="ClientKeyExchange/Alert Fatal (Unexpected message) & ConnectionClosed"] 13 | 2 -> 5 [label="EmptyCertificate/Alert Fatal (Unexpected message) & ConnectionClosed"] 14 | 2 -> 5 [label="Finished/Alert Fatal (Unexpected message) & ConnectionClosed"] 15 | 2 -> 5 [label="HeartbeatRequest/ConnectionClosed"] 16 | 0 -> 5 [label="ApplicationData/ApplicationDataApplicationDataApplicationDataApplicationDataApplicationDataApplicationData & ConnectionClosed"] 17 | 0 -> 0 [label="ApplicationDataEmpty/Empty"] 18 | 0 -> 5 [label="ChangeCipherSpec/Alert Fatal (Unexpected message) & ConnectionClosed"] 19 | 0 -> 5 [label="ClientHelloRSA/Alert Fatal (Handshake failure) & ConnectionClosed"] 20 | 0 -> 5 [label="ClientKeyExchange/Alert Fatal (Unexpected message) & ConnectionClosed"] 21 | 0 -> 5 [label="EmptyCertificate/Alert Fatal (Unexpected message) & ConnectionClosed"] 22 | 0 -> 5 [label="Finished/Alert Fatal (Unexpected message) & ConnectionClosed"] 23 | 0 -> 5 [label="HeartbeatRequest/ConnectionClosed"] 24 | 1 -> 5 [label="ApplicationData/ConnectionClosed"] 25 | 1 -> 5 [label="ApplicationDataEmpty/Alert Fatal (Illegal parameter) & ConnectionClosed"] 26 | 1 -> 5 [label="ChangeCipherSpec/Alert Fatal (Unexpected message) & ConnectionClosed"] 27 | 1 -> 5 [label="ClientHelloRSA/Alert Fatal (Unexpected message) & ConnectionClosed"] 28 | 1 -> 3 [label="ClientKeyExchange/Empty"] 29 | 1 -> 5 [label="EmptyCertificate/Alert Fatal (Unexpected message) & ConnectionClosed"] 30 | 1 -> 5 [label="Finished/Alert Fatal (Unexpected message) & ConnectionClosed"] 31 | 1 -> 5 [label="HeartbeatRequest/ConnectionClosed"] 32 | 3 -> 5 [label="ApplicationData/ConnectionClosed"] 33 | 3 -> 5 [label="ApplicationDataEmpty/Alert Fatal (Illegal parameter) & ConnectionClosed"] 34 | 3 -> 4 [label="ChangeCipherSpec/Empty"] 35 | 3 -> 5 [label="ClientHelloRSA/Alert Fatal (Unexpected message) & ConnectionClosed"] 36 | 3 -> 5 [label="ClientKeyExchange/Alert Fatal (Unexpected message) & ConnectionClosed"] 37 | 3 -> 5 [label="EmptyCertificate/Alert Fatal (Unexpected message) & ConnectionClosed"] 38 | 3 -> 5 [label="Finished/Alert Fatal (Unexpected message) & ConnectionClosed"] 39 | 3 -> 5 [label="HeartbeatRequest/ConnectionClosed"] 40 | 4 -> 5 [label="ApplicationData/ConnectionClosed"] 41 | 4 -> 5 [label="ApplicationDataEmpty/ConnectionClosed"] 42 | 4 -> 5 [label="ChangeCipherSpec/ConnectionClosed"] 43 | 4 -> 5 [label="ClientHelloRSA/Alert Fatal (Unexpected message) & ConnectionClosed"] 44 | 4 -> 5 [label="ClientKeyExchange/Alert Fatal (Unexpected message) & ConnectionClosed"] 45 | 4 -> 5 [label="EmptyCertificate/Alert Fatal (Unexpected message) & ConnectionClosed"] 46 | 4 -> 0 [label="Finished/ChangeCipherSpec & Finished"] 47 | 4 -> 5 [label="HeartbeatRequest/ConnectionClosed"] 48 | 5 -> 5 [label="ApplicationData/ConnectionClosed"] 49 | 5 -> 5 [label="ApplicationDataEmpty/ConnectionClosed"] 50 | 5 -> 5 [label="ChangeCipherSpec/ConnectionClosed"] 51 | 5 -> 5 [label="ClientHelloRSA/ConnectionClosed"] 52 | 5 -> 5 [label="ClientKeyExchange/ConnectionClosed"] 53 | 5 -> 5 [label="EmptyCertificate/ConnectionClosed"] 54 | 5 -> 5 [label="Finished/ConnectionClosed"] 55 | 5 -> 5 [label="HeartbeatRequest/ConnectionClosed"] 56 | __start0 [label="", shape=none]; 57 | __start0 -> 2 [label=""]; 58 | } 59 | -------------------------------------------------------------------------------- /DotModels/arithmetics.dot: -------------------------------------------------------------------------------- 1 | digraph learnedModel { 2 | s0 [label="s0", shape=circle]; 3 | s1 [label="s1", shape=doublecircle]; 4 | s0 -> s1 [label="1"]; 5 | s0 -> s0 [label="( / push(()"]; 6 | s1 -> s0 [label="+"]; 7 | s1 -> s1 [label=") / pop(()"]; 8 | __start0 [label="", shape=none]; 9 | __start0 -> s0 [label=""]; 10 | } -------------------------------------------------------------------------------- /DotModels/car_alarm.dot: -------------------------------------------------------------------------------- 1 | digraph car_alarm { 2 | q1_locked_closed [label="N"]; 3 | q2_locked_open [label="A"]; 4 | q3_locked_closed [label="A"]; 5 | q5_unlocked_closed [label="N"]; 6 | q6_unlocked_open [label="N"]; 7 | q7_locked_open [label="N"]; 8 | q1_locked_closed -> q2_locked_open [label="d"]; 9 | q1_locked_closed -> q5_unlocked_closed [label="l"]; 10 | q2_locked_open -> q3_locked_closed [label="d"]; 11 | q2_locked_open -> q6_unlocked_open [label="l"]; 12 | q3_locked_closed -> q2_locked_open [label="d"]; 13 | q3_locked_closed -> q5_unlocked_closed [label="l"]; 14 | q5_unlocked_closed -> q6_unlocked_open [label="d"]; 15 | q5_unlocked_closed -> q1_locked_closed [label="l"]; 16 | q6_unlocked_open -> q5_unlocked_closed [label="d"]; 17 | q6_unlocked_open -> q7_locked_open [label="l"]; 18 | q7_locked_open -> q1_locked_closed [label="d"]; 19 | q7_locked_open -> q6_unlocked_open [label="l"]; 20 | __start0 [label="", shape=none]; 21 | __start0 -> q1_locked_closed [label=""]; 22 | } 23 | -------------------------------------------------------------------------------- /DotModels/coffee_mealy.dot: -------------------------------------------------------------------------------- 1 | digraph coffee_mealy { 2 | s0 [label="s0"]; 3 | s1 [label="s1"]; 4 | s0 -> s1 [label="coin/ beep"]; 5 | s0 -> s0 [label="button/ init"]; 6 | s1 -> s1 [label="coin/ beep"]; 7 | s1 -> s0 [label="button/ coffee"]; 8 | __start0 [label="", shape=none]; 9 | __start0 -> s0 [label=""]; 10 | } 11 | -------------------------------------------------------------------------------- /DotModels/coffee_moore.dot: -------------------------------------------------------------------------------- 1 | digraph g { 2 | __start0 [label="" shape="none"]; 3 | __start0 -> A; 4 | A [shape="record", style="rounded", label="{ A | init }"]; 5 | B [shape="record", style="rounded", label="{ B | beep }"]; 6 | C [shape="doublecircle", style="rounded", label="{ C | coffee }"]; 7 | A -> B [label="coin"]; 8 | A -> A [label="button"]; 9 | B -> C [label="button"]; 10 | B -> B [label="coin"]; 11 | C -> B [label="coin"]; 12 | C -> A [label="button"]; 13 | } 14 | -------------------------------------------------------------------------------- /DotModels/mooreModel.dot: -------------------------------------------------------------------------------- 1 | digraph g { 2 | __start0 [label="" shape="none"]; 3 | __start0 -> A; 4 | 5 | A [shape="record", style="rounded", label="{ A | 0 }"]; 6 | B [shape="record", style="rounded", label="{ B | 0 }"]; 7 | C [shape="record", style="rounded", label="{ C | 0 }"]; 8 | D [shape="record", style="rounded", label="{ D | 0 }"]; 9 | E [shape="record", style="rounded", label="{ E | 0 }"]; 10 | F [shape="record", style="rounded", label="{ F | 0 }"]; 11 | G [shape="record", style="rounded", label="{ G | 0 }"]; 12 | H [shape="record", style="rounded", label="{ H | 0 }"]; 13 | I [shape="record", style="rounded", label="{ I | 1 }"]; 14 | 15 | 16 | A -> D [label="0"]; 17 | A -> B [label="1"]; 18 | B -> E [label="0"]; 19 | B -> C [label="1"]; 20 | C -> F [label="0"]; 21 | C -> C [label="1"]; 22 | D -> G [label="0"]; 23 | D -> E [label="1"]; 24 | E -> H [label="0"]; 25 | E -> F [label="1"]; 26 | F -> I [label="0"]; 27 | F -> F [label="1"]; 28 | G -> G [label="0"]; 29 | G -> H [label="1"]; 30 | H -> H [label="0"]; 31 | H -> I [label="1"]; 32 | I -> I [label="0"]; 33 | I -> I [label="1"]; 34 | 35 | } 36 | -------------------------------------------------------------------------------- /DotModels/onfsm_0.dot: -------------------------------------------------------------------------------- 1 | digraph g { 2 | __start0 [label="" shape="none"]; 3 | q0 [shape="circle" margin=0 label="q0"]; 4 | q1 [shape="circle" margin=0 label="q1"]; 5 | q0 -> q0 [label="a/1"]; 6 | q0 -> q1 [label="b/0"]; 7 | q1 -> q1 [label="b/O"]; 8 | q1 -> q1 [label="a/1"]; 9 | __start0 -> q0; 10 | } -------------------------------------------------------------------------------- /DotModels/onfsm_1.dot: -------------------------------------------------------------------------------- 1 | digraph g { 2 | __start0 [label="" shape="none"]; 3 | q0 [shape="circle" margin=0 label="q0"]; 4 | q2 [shape="circle" margin=0 label="q2"]; 5 | q1 [shape="circle" margin=0 label="q1"]; 6 | q0 -> q2 [label="b/1"]; 7 | q0 -> q0 [label="a/0"]; 8 | q0 -> q0 [label="b/2"]; 9 | q0 -> q2 [label="a/1"]; 10 | q2 -> q1 [label="b/0"]; 11 | q2 -> q0 [label="a/2"]; 12 | q1 -> q1 [label="a/2"]; 13 | q1 -> q2 [label="b/0"]; 14 | __start0 -> q1; 15 | } -------------------------------------------------------------------------------- /DotModels/onfsm_2.dot: -------------------------------------------------------------------------------- 1 | digraph g { 2 | __start0 [label="" shape="none"]; 3 | q1 [shape="circle" margin=0 label="q1"]; 4 | q0 [shape="circle" margin=0 label="q0"]; 5 | q2 [shape="circle" margin=0 label="q2"]; 6 | q2 -> q0 [label="b/O"]; 7 | q0 -> q1 [label="a/2"]; 8 | q2 -> q1 [label="b/0"]; 9 | q2 -> q2 [label="a/1"]; 10 | q1 -> q2 [label="b/0"]; 11 | q1 -> q0 [label="a/2"]; 12 | q0 -> q0 [label="b/0"]; 13 | __start0 -> q0; 14 | } -------------------------------------------------------------------------------- /DotModels/onfsm_3.dot: -------------------------------------------------------------------------------- 1 | digraph g { 2 | __start0 [label="" shape="none"]; 3 | q5 [shape="circle" margin=0 label="q5"]; 4 | q2 [shape="circle" margin=0 label="q2"]; 5 | q7 [shape="circle" margin=0 label="q7"]; 6 | q8 [shape="circle" margin=0 label="q8"]; 7 | q4 [shape="circle" margin=0 label="q4"]; 8 | q6 [shape="circle" margin=0 label="q6"]; 9 | q0 [shape="circle" margin=0 label="q0"]; 10 | q1 [shape="circle" margin=0 label="q1"]; 11 | q3 [shape="circle" margin=0 label="q3"]; 12 | q8 -> q6 [label="b/0"]; 13 | q5 -> q1 [label="b/O"]; 14 | q1 -> q3 [label="a/2"]; 15 | q7 -> q2 [label="b/0"]; 16 | q2 -> q8 [label="b/0"]; 17 | q8 -> q8 [label="a/1"]; 18 | q6 -> q5 [label="b/0"]; 19 | q2 -> q4 [label="a/2"]; 20 | q4 -> q4 [label="b/0"]; 21 | q1 -> q1 [label="b/0"]; 22 | q5 -> q5 [label="a/1"]; 23 | q3 -> q7 [label="b/0"]; 24 | q6 -> q0 [label="a/2"]; 25 | q0 -> q6 [label="a/2"]; 26 | q4 -> q2 [label="a/2"]; 27 | q3 -> q1 [label="a/2"]; 28 | q0 -> q0 [label="b/0"]; 29 | q7 -> q7 [label="a/1"]; 30 | __start0 -> q1; 31 | } -------------------------------------------------------------------------------- /DotModels/onfsm_4.dot: -------------------------------------------------------------------------------- 1 | digraph Angluin_Mealy { 2 | s0 [label=s0]; 3 | s1 [label=s1]; 4 | s2 [label=s2]; 5 | s3 [label=s3]; 6 | s0 -> s1 [label="a/x"]; 7 | s0 -> s2 [label="a/y"]; 8 | s0 -> s3 [label="a/z"]; 9 | 10 | s1 -> s1 [label="a/0"]; 11 | s2 -> s2 [label="a/0"]; 12 | s3 -> s3 [label="a/0"]; 13 | __start0 [label="", shape=none]; 14 | __start0 -> s0 [label=""]; 15 | } -------------------------------------------------------------------------------- /DotModels/onfsm_5.dot: -------------------------------------------------------------------------------- 1 | digraph Angluin_Mealy { 2 | s0 [label=s0]; 3 | s1 [label=s1]; 4 | s2 [label=s2]; 5 | s3 [label=s3]; 6 | s4 [label=s4]; 7 | s0 -> s1 [label="a/X"]; 8 | s0 -> s2 [label="a/Y"]; 9 | s0 -> s0 [label="b/Z"]; 10 | s1 -> s1 [label="a/X"]; 11 | s1 -> s3 [label="b/Z"]; 12 | s2 -> s2 [label="a/Y"]; 13 | s2 -> s4 [label="b/W"]; 14 | s3 -> s0 [label="a/V"]; 15 | s3 -> s3 [label="b/Z"]; 16 | s4 -> s0 [label="a/V"]; 17 | s4 -> s4 [label="b/W"]; 18 | __start0 [label="", shape=none]; 19 | __start0 -> s0 [label=""]; 20 | } -------------------------------------------------------------------------------- /DotModels/tomitaGrammars/tomita_1.dot: -------------------------------------------------------------------------------- 1 | digraph "tomita_1" { 2 | s0 [label=s0, shape=doublecircle]; 3 | s1 [label=s1]; 4 | s0 -> s1 [label=0]; 5 | s0 -> s0 [label=1]; 6 | s1 -> s1 [label=0]; 7 | s1 -> s1 [label=1]; 8 | __start0 [label="", shape=none]; 9 | __start0 -> s0 [label=""]; 10 | } 11 | -------------------------------------------------------------------------------- /DotModels/tomitaGrammars/tomita_2.dot: -------------------------------------------------------------------------------- 1 | digraph "tomita_2" { 2 | s0 [label=s0]; 3 | s1 [label=s1]; 4 | s2 [label=s2, shape=doublecircle]; 5 | s3 [label=s3]; 6 | s0 -> s3 [label=0]; 7 | s0 -> s1 [label=1]; 8 | s1 -> s2 [label=0]; 9 | s1 -> s3 [label=1]; 10 | s2 -> s3 [label=0]; 11 | s2 -> s1 [label=1]; 12 | s3 -> s3 [label=0]; 13 | s3 -> s3 [label=1]; 14 | __start0 [label="", shape=none]; 15 | __start0 -> s0 [label=""]; 16 | } 17 | -------------------------------------------------------------------------------- /DotModels/tomitaGrammars/tomita_3.dot: -------------------------------------------------------------------------------- 1 | digraph "tomita_3" { 2 | s0 [label=s0, shape=doublecircle]; 3 | s1 [label=s1, shape=doublecircle]; 4 | s2 [label=s2]; 5 | s3 [label=s3]; 6 | s4 [label=s4, shape=doublecircle]; 7 | s0 -> s0 [label=0]; 8 | s0 -> s1 [label=1]; 9 | s1 -> s2 [label=0]; 10 | s1 -> s0 [label=1]; 11 | s2 -> s4 [label=0]; 12 | s2 -> s3 [label=1]; 13 | s3 -> s3 [label=0]; 14 | s3 -> s3 [label=1]; 15 | s4 -> s2 [label=0]; 16 | s4 -> s4 [label=1]; 17 | __start0 [label="", shape=none]; 18 | __start0 -> s0 [label=""]; 19 | } 20 | -------------------------------------------------------------------------------- /DotModels/tomitaGrammars/tomita_4.dot: -------------------------------------------------------------------------------- 1 | digraph "tomita_4" { 2 | s0 [label=s0, shape=doublecircle]; 3 | s1 [label=s1, shape=doublecircle]; 4 | s2 [label=s2, shape=doublecircle]; 5 | s3 [label=s3]; 6 | s0 -> s1 [label=0]; 7 | s0 -> s0 [label=1]; 8 | s1 -> s2 [label=0]; 9 | s1 -> s0 [label=1]; 10 | s2 -> s3 [label=0]; 11 | s2 -> s0 [label=1]; 12 | s3 -> s3 [label=0]; 13 | s3 -> s3 [label=1]; 14 | __start0 [label="", shape=none]; 15 | __start0 -> s0 [label=""]; 16 | } 17 | -------------------------------------------------------------------------------- /DotModels/tomitaGrammars/tomita_5.dot: -------------------------------------------------------------------------------- 1 | digraph "tomita_5" { 2 | s0 [label=s0, shape=doublecircle]; 3 | s1 [label=s1]; 4 | s2 [label=s2]; 5 | s3 [label=s3]; 6 | s0 -> s1 [label=0]; 7 | s0 -> s2 [label=1]; 8 | s1 -> s0 [label=0]; 9 | s1 -> s3 [label=1]; 10 | s2 -> s3 [label=0]; 11 | s2 -> s0 [label=1]; 12 | s3 -> s2 [label=0]; 13 | s3 -> s1 [label=1]; 14 | __start0 [label="", shape=none]; 15 | __start0 -> s0 [label=""]; 16 | } 17 | -------------------------------------------------------------------------------- /DotModels/tomitaGrammars/tomita_6.dot: -------------------------------------------------------------------------------- 1 | digraph "tomita_6" { 2 | s0 [label=s0, shape=doublecircle]; 3 | s1 [label=s1]; 4 | s2 [label=s2]; 5 | s0 -> s1 [label=0]; 6 | s0 -> s2 [label=1]; 7 | s1 -> s2 [label=0]; 8 | s1 -> s0 [label=1]; 9 | s2 -> s0 [label=0]; 10 | s2 -> s1 [label=1]; 11 | __start0 [label="", shape=none]; 12 | __start0 -> s0 [label=""]; 13 | } 14 | -------------------------------------------------------------------------------- /DotModels/tomitaGrammars/tomita_7.dot: -------------------------------------------------------------------------------- 1 | digraph "tomita_7" { 2 | s0 [label=s0, shape=doublecircle]; 3 | s1 [label=s1, shape=doublecircle]; 4 | s2 [label=s2, shape=doublecircle]; 5 | s3 [label=s3, shape=doublecircle]; 6 | s4 [label=s4]; 7 | s0 -> s0 [label=0]; 8 | s0 -> s1 [label=1]; 9 | s1 -> s2 [label=0]; 10 | s1 -> s1 [label=1]; 11 | s2 -> s2 [label=0]; 12 | s2 -> s3 [label=1]; 13 | s3 -> s4 [label=0]; 14 | s3 -> s3 [label=1]; 15 | s4 -> s4 [label=0]; 16 | s4 -> s4 [label=1]; 17 | __start0 [label="", shape=none]; 18 | __start0 -> s0 [label=""]; 19 | } 20 | -------------------------------------------------------------------------------- /LICENCE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 TU Graz - SAL Dependable Embedded Systems Lab (DES Lab), 4 | Edi Muskardin 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. -------------------------------------------------------------------------------- /aalpy/SULs/AutomataSUL.py: -------------------------------------------------------------------------------- 1 | from aalpy.base import Automaton 2 | from aalpy.base import SUL 3 | 4 | 5 | class AutomatonSUL(SUL): 6 | def __init__(self, automaton: Automaton): 7 | super().__init__() 8 | self.automaton: Automaton = automaton 9 | 10 | def pre(self): 11 | self.automaton.reset_to_initial() 12 | 13 | def step(self, letter=None): 14 | return self.automaton.step(letter) 15 | 16 | def post(self): 17 | pass 18 | 19 | 20 | MealySUL = OnfsmSUL = StochasticMealySUL = DfaSUL = MooreSUL = MdpSUL = McSUL = SevpaSUL = AutomatonSUL 21 | -------------------------------------------------------------------------------- /aalpy/SULs/PyMethodSUL.py: -------------------------------------------------------------------------------- 1 | from aalpy.base import SUL 2 | 3 | 4 | class FunctionDecorator: 5 | """ 6 | Decorator of methods found in the SUL class. 7 | """ 8 | 9 | def __init__(self, function, args=None): 10 | """ 11 | Args: 12 | 13 | function: function of the class to be learned 14 | 15 | args: arguments to be passed to the function. Either a single argument, or a list of arguments if 16 | function has more than one parameter. 17 | """ 18 | 19 | self.function = function 20 | self.args = None 21 | if args: 22 | self.args = [args] if not isinstance(args, (list, tuple)) else args 23 | 24 | def __repr__(self): 25 | if self.args: 26 | return f'{self.function.__name__}{self.args}' 27 | return self.function.__name__ 28 | 29 | 30 | class PyClassSUL(SUL): 31 | """ 32 | System under learning for inferring python classes. 33 | """ 34 | def __init__(self, python_class): 35 | """ 36 | Args: 37 | 38 | python_class: class to be learned 39 | """ 40 | super().__init__() 41 | self._class = python_class 42 | self.sul: object = None 43 | 44 | def pre(self): 45 | """ 46 | Do the reset by initializing the class again or call reset method of the class 47 | """ 48 | self.sul = self._class() 49 | 50 | def post(self): 51 | pass 52 | 53 | def step(self, letter): 54 | """ 55 | Executes the function(with arguments) found in letter against the SUL 56 | 57 | Args: 58 | 59 | letter: single input of type FunctionDecorator 60 | 61 | Returns: 62 | 63 | output of the function 64 | 65 | """ 66 | if letter.args: 67 | return getattr(self.sul, letter.function.__name__, letter)(*letter.args) 68 | return getattr(self.sul, letter.function.__name__, letter)() 69 | -------------------------------------------------------------------------------- /aalpy/SULs/RegexSUL.py: -------------------------------------------------------------------------------- 1 | from aalpy.base import SUL 2 | import re 3 | 4 | 5 | class RegexSUL(SUL): 6 | """ 7 | An example implementation of a system under learning that can be used to learn any regex expression. 8 | Note that the $ is added to the expression as in this SUL only exact matches are learned. 9 | """ 10 | def __init__(self, regex: str): 11 | super().__init__() 12 | self.regex = regex if regex[-1] == '$' else regex + '$' 13 | self.string = "" 14 | 15 | def pre(self): 16 | self.string = "" 17 | pass 18 | 19 | def post(self): 20 | self.string = "" 21 | pass 22 | 23 | def step(self, letter): 24 | """ 25 | 26 | Args: 27 | 28 | letter: single element of the input alphabet 29 | 30 | Returns: 31 | 32 | Whether the current string (previous string + letter) is accepted 33 | 34 | """ 35 | if letter is not None: 36 | self.string += str(letter) 37 | return True if re.match(self.regex, self.string) else False 38 | -------------------------------------------------------------------------------- /aalpy/SULs/TomitaSUL.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from aalpy.base import SUL 4 | 5 | 6 | class TomitaSUL(SUL): 7 | """ 8 | Tomita grammars are often used as a benchmark for automata-related challenges. Simple SUL that implements all 7 9 | Tomita grammars and enables their learning. 10 | """ 11 | 12 | def __init__(self, tomita_level_fun): 13 | super().__init__() 14 | num_fun_map = {1: tomita_1, 2: tomita_2, 3: tomita_3, 4: tomita_4, 5: tomita_5, 6: tomita_6, 7: tomita_7, 15 | -3: not_tomita_3} 16 | assert tomita_level_fun in num_fun_map.keys() 17 | self.string = "" 18 | self.tomita_level = num_fun_map[tomita_level_fun] 19 | 20 | def pre(self): 21 | self.string = "" 22 | pass 23 | 24 | def post(self): 25 | self.string = "" 26 | pass 27 | 28 | def step(self, letter): 29 | if input: 30 | self.string += str(letter) 31 | return self.tomita_level(self.string) 32 | 33 | 34 | _not_tomita_3 = re.compile("((0|1)*0)*1(11)*(0(0|1)*1)*0(00)*(1(0|1)*)*$") 35 | 36 | 37 | def tomita_1(word): 38 | return "0" not in word 39 | 40 | 41 | def tomita_2(word): 42 | return word == "10" * (int(len(word) / 2)) 43 | 44 | 45 | def tomita_3(word): 46 | if not _not_tomita_3.match(word): 47 | return True 48 | return False 49 | 50 | 51 | def not_tomita_3(word): 52 | return not tomita_3(word) 53 | 54 | 55 | def tomita_4(word): 56 | return "000" not in word 57 | 58 | 59 | def tomita_5(word): 60 | return (word.count("0") % 2 == 0) and (word.count("1") % 2 == 0) 61 | 62 | 63 | def tomita_6(word): 64 | return ((word.count("0") - word.count("1")) % 3) == 0 65 | 66 | 67 | def tomita_7(word): 68 | return word.count("10") <= 1 69 | -------------------------------------------------------------------------------- /aalpy/SULs/__init__.py: -------------------------------------------------------------------------------- 1 | from .AutomataSUL import * 2 | from .PyMethodSUL import FunctionDecorator, PyClassSUL 3 | from .RegexSUL import RegexSUL 4 | from .TomitaSUL import TomitaSUL 5 | -------------------------------------------------------------------------------- /aalpy/__init__.py: -------------------------------------------------------------------------------- 1 | from .automata import ( 2 | Dfa, 3 | DfaState, 4 | MarkovChain, 5 | McState, 6 | Mdp, 7 | MdpState, 8 | MealyMachine, 9 | MealyState, 10 | MooreMachine, 11 | MooreState, 12 | NDMooreMachine, 13 | NDMooreState, 14 | Onfsm, 15 | OnfsmState, 16 | Sevpa, 17 | SevpaAlphabet, 18 | SevpaState, 19 | SevpaTransition, 20 | StochasticMealyMachine, 21 | StochasticMealyState, 22 | ) 23 | from .base import ( 24 | SUL, 25 | Automaton, 26 | AutomatonState, 27 | CacheTree, 28 | DeterministicAutomaton, 29 | Oracle, 30 | ) 31 | from .learning_algs import ( 32 | run_abstracted_ONFSM_Lstar, 33 | run_active_Alergia, 34 | run_active_RPNI, 35 | run_Alergia, 36 | run_JAlergia, 37 | run_KV, 38 | run_Lstar, 39 | run_non_det_Lstar, 40 | run_RPNI, 41 | run_stochastic_Lstar, 42 | run_GSM, 43 | run_PAPNI 44 | ) 45 | from .oracles import ( 46 | BreadthFirstExplorationEqOracle, 47 | CacheBasedEqOracle, 48 | KWayStateCoverageEqOracle, 49 | KWayTransitionCoverageEqOracle, 50 | PacOracle, 51 | PerfectKnowledgeEqOracle, 52 | ProvidedSequencesOracleWrapper, 53 | RandomWalkEqOracle, 54 | RandomWMethodEqOracle, 55 | RandomWordEqOracle, 56 | StatePrefixEqOracle, 57 | TransitionFocusOracle, 58 | UserInputEqOracle, 59 | WMethodEqOracle, 60 | kWayStateCoverageEqOracle, 61 | kWayTransitionCoverageEqOracle, 62 | ) 63 | from .SULs import ( 64 | AutomatonSUL, 65 | FunctionDecorator, 66 | PyClassSUL, 67 | RegexSUL, 68 | TomitaSUL 69 | ) 70 | from .utils import ( 71 | CharacterTokenizer, 72 | DataHandler, 73 | DelimiterTokenizer, 74 | IODelimiterTokenizer, 75 | bisimilar, 76 | compare_automata, 77 | convert_i_o_traces_for_RPNI, 78 | generate_random_deterministic_automata, 79 | generate_random_dfa, 80 | generate_random_markov_chain, 81 | generate_random_mdp, 82 | generate_random_mealy_machine, 83 | generate_random_moore_machine, 84 | generate_random_ONFSM, 85 | generate_random_sevpa, 86 | generate_random_smm, 87 | generate_test_cases, 88 | get_correct_prop_values, 89 | get_properties_file, 90 | load_automaton_from_file, 91 | make_input_complete, 92 | mdp_2_prism_format, 93 | model_check_experiment, 94 | model_check_properties, 95 | save_automaton_to_file, 96 | statistical_model_checking, 97 | visualize_automaton, 98 | ) 99 | -------------------------------------------------------------------------------- /aalpy/automata/Dfa.py: -------------------------------------------------------------------------------- 1 | from typing import Generic, Dict 2 | 3 | from aalpy.base import AutomatonState, DeterministicAutomaton 4 | from aalpy.base.Automaton import InputType 5 | 6 | 7 | class DfaState(AutomatonState, Generic[InputType]): 8 | """ 9 | Single state of a deterministic finite automaton. 10 | """ 11 | 12 | def __init__(self, state_id, is_accepting=False): 13 | super().__init__(state_id) 14 | self.transitions : Dict[InputType, DfaState] = dict() 15 | self.is_accepting = is_accepting 16 | 17 | @property 18 | def output(self): 19 | return self.is_accepting 20 | 21 | class Dfa(DeterministicAutomaton[DfaState[InputType]]): 22 | """ 23 | Deterministic finite automaton. 24 | """ 25 | 26 | def __init__(self, initial_state: DfaState, states): 27 | super().__init__(initial_state, states) 28 | 29 | def step(self, letter): 30 | """ 31 | Args: 32 | 33 | letter: single input that is looked up in the transition table of the DfaState 34 | 35 | Returns: 36 | 37 | True if the reached state is an accepting state, False otherwise 38 | """ 39 | if letter is not None: 40 | self.current_state = self.current_state.transitions[letter] 41 | return self.current_state.is_accepting 42 | 43 | def compute_characterization_set(self, char_set_init=None, online_suffix_closure=True, split_all_blocks=True, 44 | return_same_states=False, raise_warning=True): 45 | return super(Dfa, self).compute_characterization_set(char_set_init if char_set_init else [()], 46 | online_suffix_closure, split_all_blocks, 47 | return_same_states, raise_warning) 48 | 49 | def compute_output_seq(self, state, sequence): 50 | if not sequence: 51 | return [state.is_accepting] 52 | return super(Dfa, self).compute_output_seq(state, sequence) 53 | 54 | def execute_sequence(self, origin_state, seq): 55 | if not seq: 56 | return origin_state.output 57 | return super(Dfa, self).execute_sequence(origin_state, seq) 58 | 59 | 60 | def to_state_setup(self): 61 | state_setup_dict = {} 62 | 63 | # ensure prefixes are computed 64 | self.compute_prefixes() 65 | 66 | sorted_states = sorted(self.states, key=lambda x: len(x.prefix) if x.prefix is not None else len(self.states)) 67 | for s in sorted_states: 68 | state_setup_dict[s.state_id] = (s.is_accepting, {k: v.state_id for k, v in s.transitions.items()}) 69 | 70 | return state_setup_dict 71 | 72 | @staticmethod 73 | def from_state_setup(state_setup : dict, **kwargs): 74 | """ 75 | First state in the state setup is the initial state. 76 | Example state setup: 77 | state_setup = { 78 | "a": (True, {"x": "b1", "y": "a"}), 79 | "b1": (False, {"x": "b2", "y": "a"}), 80 | "b2": (True, {"x": "b3", "y": "a"}), 81 | "b3": (False, {"x": "b4", "y": "a"}), 82 | "b4": (False, {"x": "c", "y": "a"}), 83 | "c": (True, {"x": "a", "y": "a"}), 84 | } 85 | 86 | Args: 87 | 88 | state_setup: map from state_id to tuple(output and transitions_dict) 89 | 90 | Returns: 91 | 92 | DFA 93 | """ 94 | # state_setup should map from state_id to tuple(is_accepting and transitions_dict) 95 | 96 | # build states with state_id and output 97 | states = {key: DfaState(key, val[0]) for key, val in state_setup.items()} 98 | 99 | # add transitions to states 100 | for state_id, state in states.items(): 101 | for _input, target_state_id in state_setup[state_id][1].items(): 102 | state.transitions[_input] = states[target_state_id] 103 | 104 | # states to list 105 | states = [state for state in states.values()] 106 | 107 | # build moore machine with first state as starting state 108 | dfa = Dfa(states[0], states) 109 | 110 | for state in states: 111 | state.prefix = dfa.get_shortest_path(dfa.initial_state, state) 112 | 113 | return dfa -------------------------------------------------------------------------------- /aalpy/automata/MarkovChain.py: -------------------------------------------------------------------------------- 1 | import random 2 | from typing import Generic, Tuple, List 3 | 4 | from aalpy.base import Automaton, AutomatonState 5 | from aalpy.base.Automaton import OutputType 6 | 7 | 8 | class McState(AutomatonState, Generic[OutputType]): 9 | def __init__(self, state_id, output): 10 | super().__init__(state_id) 11 | self.output: OutputType = output 12 | # transitions is a list of tuples (Node(output), probability) 13 | self.transitions: List[Tuple[McState, float]] = list() 14 | 15 | 16 | class MarkovChain(Automaton[McState[OutputType]]): 17 | """Markov Decision Process.""" 18 | 19 | def __init__(self, initial_state, states: list): 20 | super().__init__(initial_state, states) 21 | 22 | def reset_to_initial(self): 23 | self.current_state = self.initial_state 24 | 25 | def step(self, letter=None): 26 | """Next step is determined based on transition probabilities of the current state. 27 | 28 | Args: 29 | 30 | letter: input 31 | 32 | Returns: 33 | 34 | output of the current state 35 | """ 36 | 37 | if not self.current_state.transitions: 38 | return self.current_state.output 39 | 40 | probability_distributions = [i[1] for i in self.current_state.transitions] 41 | states = [i[0] for i in self.current_state.transitions] 42 | 43 | new_state = random.choices(states, probability_distributions, k=1)[0] 44 | 45 | self.current_state = new_state 46 | return self.current_state.output 47 | 48 | def step_to(self, input): 49 | """Performs a step on the automaton based on the input `inp` and output `out`. 50 | 51 | Args: 52 | 53 | input: input 54 | 55 | Returns: 56 | 57 | output of the reached state, None otherwise 58 | """ 59 | for s in self.current_state.transitions: 60 | if s[0].output == input: 61 | self.current_state = s[0] 62 | return self.current_state.output 63 | return None 64 | 65 | @staticmethod 66 | def from_state_setup(state_setup: dict, **kwargs): 67 | raise NotImplementedError() # TODO implement 68 | 69 | def to_state_setup(self): 70 | raise NotImplementedError() # TODO implement 71 | -------------------------------------------------------------------------------- /aalpy/automata/Mdp.py: -------------------------------------------------------------------------------- 1 | import random 2 | from collections import defaultdict 3 | from typing import Dict, Generic, List, Tuple 4 | 5 | from aalpy.base import Automaton, AutomatonState 6 | from aalpy.base.Automaton import OutputType, InputType 7 | 8 | 9 | class MdpState(AutomatonState, Generic[InputType, OutputType]): 10 | """ 11 | For transitions, each transition is a tuple (Node(output), probability) 12 | """ 13 | def __init__(self, state_id, output=None): 14 | super().__init__(state_id) 15 | self.output: OutputType = output 16 | # each transition is a tuple (Node(output), probability) 17 | self.transitions: Dict[InputType, List[Tuple[MdpState, float]]] = defaultdict(list) 18 | 19 | 20 | class Mdp(Automaton[MdpState[InputType, OutputType]]): 21 | """Markov Decision Process.""" 22 | 23 | def __init__(self, initial_state: MdpState, states: list): 24 | super().__init__(initial_state, states) 25 | 26 | def reset_to_initial(self): 27 | self.current_state = self.initial_state 28 | 29 | def step(self, letter): 30 | """Next step is determined based on transition probabilities of the current state. 31 | 32 | Args: 33 | 34 | letter: input 35 | 36 | Returns: 37 | 38 | output of the current state 39 | """ 40 | if letter is None: 41 | return self.current_state.output 42 | 43 | probability_distributions = [i[1] for i in self.current_state.transitions[letter]] 44 | states = [i[0] for i in self.current_state.transitions[letter]] 45 | 46 | new_state = random.choices(states, probability_distributions, k=1)[0] 47 | 48 | self.current_state = new_state 49 | return self.current_state.output 50 | 51 | def step_to(self, inp, out): 52 | """Performs a step on the automaton based on the input `inp` and output `out`. 53 | 54 | Args: 55 | 56 | inp: input 57 | out: output 58 | 59 | Returns: 60 | 61 | output of the reached state, None otherwise 62 | """ 63 | for new_state in self.current_state.transitions[inp]: 64 | if new_state[0].output == out: 65 | self.current_state = new_state[0] 66 | return out 67 | return None 68 | 69 | def to_state_setup(self): 70 | state_setup_dict = {} 71 | 72 | # ensure initial state is first in the list 73 | if self.states[0] != self.initial_state: 74 | self.states.remove(self.initial_state) 75 | self.states.insert(0, self.initial_state) 76 | 77 | for s in self.states: 78 | state_setup_dict[s.state_id] = (s.output, {k: [(node.state_id, prob) for node, prob in v] 79 | for k, v in s.transitions.items()}) 80 | 81 | return state_setup_dict 82 | 83 | @staticmethod 84 | def from_state_setup(state_setup: dict, **kwargs): 85 | states_map = {key: MdpState(key, output=value[0]) for key, value in state_setup.items()} 86 | 87 | for key, values in state_setup.items(): 88 | source = states_map[key] 89 | for i, transitions in values[1].items(): 90 | for node, prob in transitions: 91 | source.transitions[i].append((states_map[node], prob)) 92 | 93 | initial_state = states_map[list(state_setup.keys())[0]] 94 | return Mdp(initial_state, list(states_map.values())) 95 | -------------------------------------------------------------------------------- /aalpy/automata/MealyMachine.py: -------------------------------------------------------------------------------- 1 | from typing import Generic, Dict 2 | 3 | from aalpy.base import AutomatonState, DeterministicAutomaton 4 | from aalpy.base.Automaton import OutputType, InputType 5 | 6 | 7 | class MealyState(AutomatonState, Generic[InputType, OutputType]): 8 | """ 9 | Single state of a Mealy machine. Each state has an output_fun dictionary that maps inputs to outputs. 10 | """ 11 | 12 | def __init__(self, state_id): 13 | super().__init__(state_id) 14 | self.transitions : Dict[InputType, MealyState] = dict() 15 | self.output_fun : Dict[InputType, OutputType] = dict() 16 | 17 | 18 | class MealyMachine(DeterministicAutomaton[MealyState[InputType, OutputType]]): 19 | 20 | def __init__(self, initial_state: MealyState, states): 21 | super().__init__(initial_state, states) 22 | 23 | def step(self, letter): 24 | """ 25 | In Mealy machines, outputs depend on the input and the current state. 26 | 27 | Args: 28 | 29 | letter: single input that is looked up in the transition and output functions 30 | 31 | Returns: 32 | 33 | output corresponding to the input from the current state 34 | """ 35 | output = self.current_state.output_fun[letter] 36 | self.current_state = self.current_state.transitions[letter] 37 | return output 38 | 39 | def to_state_setup(self): 40 | state_setup_dict = {} 41 | 42 | # ensure prefixes are computed 43 | self.compute_prefixes() 44 | 45 | sorted_states = sorted(self.states, key=lambda x: len(x.prefix) if x.prefix is not None else len(self.states)) 46 | for s in sorted_states: 47 | state_setup_dict[s.state_id] = {k: (s.output_fun[k], v.state_id) for k, v in s.transitions.items()} 48 | 49 | return state_setup_dict 50 | 51 | @staticmethod 52 | def from_state_setup(state_setup : dict, **kwargs): 53 | """ 54 | First state in the state setup is the initial state. 55 | state_setup = { 56 | "a": {"x": ("o1", "b1"), "y": ("o2", "a")}, 57 | "b1": {"x": ("o3", "b2"), "y": ("o1", "a")}, 58 | "b2": {"x": ("o1", "b3"), "y": ("o2", "a")}, 59 | "b3": {"x": ("o3", "b4"), "y": ("o1", "a")}, 60 | "b4": {"x": ("o1", "c"), "y": ("o4", "a")}, 61 | "c": {"x": ("o3", "a"), "y": ("o5", "a")}, 62 | } 63 | 64 | 65 | Args: 66 | 67 | state_setup: 68 | state_setup should map from state_id to tuple(transitions_dict). 69 | 70 | Returns: 71 | 72 | Mealy Machine 73 | """ 74 | # state_setup should map from state_id to tuple(transitions_dict). 75 | # Each entry in transition dict is : 76 | 77 | # build states with state_id and output 78 | states = {key: MealyState(key) for key, _ in state_setup.items()} 79 | 80 | # add transitions to states 81 | for state_id, state in states.items(): 82 | for _input, (output, new_state) in state_setup[state_id].items(): 83 | state.transitions[_input] = states[new_state] 84 | state.output_fun[_input] = output 85 | 86 | # states to list 87 | states = [state for state in states.values()] 88 | 89 | # build moore machine with first state as starting state 90 | mm = MealyMachine(states[0], states) 91 | 92 | for state in states: 93 | state.prefix = mm.get_shortest_path(mm.initial_state, state) 94 | 95 | return mm -------------------------------------------------------------------------------- /aalpy/automata/MooreMachine.py: -------------------------------------------------------------------------------- 1 | from typing import Generic, Dict 2 | 3 | from aalpy.base import AutomatonState, DeterministicAutomaton 4 | from aalpy.base.Automaton import InputType, OutputType 5 | 6 | 7 | class MooreState(AutomatonState, Generic[InputType,OutputType]): 8 | """ 9 | Single state of a Moore machine. Each state has an output value. 10 | """ 11 | 12 | def __init__(self, state_id, output=None): 13 | super().__init__(state_id) 14 | self.output : OutputType = output 15 | self.transitions : Dict[InputType, MooreState] = dict() 16 | 17 | 18 | class MooreMachine(DeterministicAutomaton[MooreState[InputType, OutputType]]): 19 | 20 | def __init__(self, initial_state: AutomatonState, states: list): 21 | super().__init__(initial_state, states) 22 | 23 | def step(self, letter): 24 | """ 25 | In Moore machines outputs depend on the current state. 26 | 27 | Args: 28 | 29 | letter: single input that is looked up in the transition function leading to a new state 30 | 31 | Returns: 32 | 33 | the output of the reached state 34 | 35 | """ 36 | if letter is not None: 37 | self.current_state = self.current_state.transitions[letter] 38 | return self.current_state.output 39 | 40 | def compute_characterization_set(self, char_set_init=None, online_suffix_closure=True, split_all_blocks=True, 41 | return_same_states=False, raise_warning=True): 42 | return super(MooreMachine, self).compute_characterization_set(char_set_init if char_set_init else [()], 43 | online_suffix_closure, split_all_blocks, 44 | return_same_states, raise_warning) 45 | 46 | def compute_output_seq(self, state, sequence): 47 | if not sequence: 48 | return [state.output] 49 | return super(MooreMachine, self).compute_output_seq(state, sequence) 50 | 51 | def execute_sequence(self, origin_state, seq): 52 | if not seq: 53 | return origin_state.output 54 | return super(MooreMachine, self).execute_sequence(origin_state, seq) 55 | 56 | def to_state_setup(self): 57 | state_setup_dict = {} 58 | 59 | # ensure prefixes are computed 60 | self.compute_prefixes() 61 | 62 | sorted_states = sorted(self.states, key=lambda x: len(x.prefix) if x.prefix is not None else len(self.states)) 63 | for s in sorted_states: 64 | state_setup_dict[s.state_id] = (s.output, {k: v.state_id for k, v in s.transitions.items()}) 65 | 66 | return state_setup_dict 67 | 68 | @staticmethod 69 | def from_state_setup(state_setup : dict, **kwargs): 70 | """ 71 | First state in the state setup is the initial state. 72 | Example state setup: 73 | state_setup = { 74 | "a": ("a", {"x": "b1", "y": "a"}), 75 | "b1": ("b", {"x": "b2", "y": "a"}), 76 | "b2": ("b", {"x": "b3", "y": "a"}), 77 | "b3": ("b", {"x": "b4", "y": "a"}), 78 | "b4": ("b", {"x": "c", "y": "a"}), 79 | "c": ("c", {"x": "a", "y": "a"}), 80 | } 81 | 82 | Args: 83 | 84 | state_setup: map from state_id to tuple(output and transitions_dict) 85 | 86 | Returns: 87 | 88 | Moore machine 89 | """ 90 | 91 | # build states with state_id and output 92 | states = {key: MooreState(key, val[0]) for key, val in state_setup.items()} 93 | 94 | # add transitions to states 95 | for state_id, state in states.items(): 96 | for _input, target_state_id in state_setup[state_id][1].items(): 97 | state.transitions[_input] = states[target_state_id] 98 | 99 | # states to list 100 | states = [state for state in states.values()] 101 | 102 | # build moore machine with first state as starting state 103 | mm = MooreMachine(states[0], states) 104 | 105 | for state in states: 106 | state.prefix = mm.get_shortest_path(mm.initial_state, state) 107 | 108 | return mm -------------------------------------------------------------------------------- /aalpy/automata/NonDeterministicMooreMachine.py: -------------------------------------------------------------------------------- 1 | import random 2 | from collections import defaultdict 3 | from typing import List, Dict, Generic 4 | 5 | from aalpy.base import AutomatonState, Automaton 6 | from aalpy.base.Automaton import OutputType, InputType 7 | 8 | 9 | class NDMooreState(AutomatonState, Generic[InputType, OutputType]): 10 | """ 11 | Single state of a non-deterministic Moore machine. Each state has an output value. 12 | """ 13 | 14 | def __init__(self, state_id, output=None): 15 | super().__init__(state_id) 16 | self.transitions: Dict[InputType, List['NDMooreState']] = defaultdict(lambda: list()) 17 | self.output: OutputType = output 18 | 19 | 20 | class NDMooreMachine(Automaton[NDMooreState[InputType, OutputType]]): 21 | 22 | def to_state_setup(self): 23 | state_setup = dict() 24 | 25 | def set_dict_entry(state: NDMooreState): 26 | state_setup[state.state_id] = (state.output, 27 | {in_sym: [target.state_id for target in trans] for in_sym, trans in 28 | state.transitions.items()}) 29 | 30 | set_dict_entry(self.initial_state) 31 | for state in self.states: 32 | if state is self.initial_state: 33 | continue 34 | set_dict_entry(state) 35 | 36 | @staticmethod 37 | def from_state_setup(state_setup: dict, **kwargs) -> 'NDMooreMachine': 38 | states_map = {key: NDMooreState(key, output=value[0]) for key, value in state_setup.items()} 39 | 40 | for key, values in state_setup.items(): 41 | source = states_map[key] 42 | for i, transitions in values[1].items(): 43 | for node in transitions: 44 | source.transitions[i].append(states_map[node]) 45 | 46 | initial_state = states_map[list(state_setup.keys())[0]] 47 | return NDMooreMachine(initial_state, list(states_map.values())) 48 | 49 | def __init__(self, initial_state: AutomatonState, states: list): 50 | super().__init__(initial_state, states) 51 | 52 | def step(self, letter): 53 | """ 54 | In Moore machines outputs depend on the current state. 55 | 56 | Args: 57 | 58 | letter: single input that is looked up in the transition function leading to a new state 59 | 60 | Returns: 61 | 62 | the output of the reached state 63 | 64 | """ 65 | options = self.current_state.transitions[letter] 66 | self.current_state = random.choice(options) 67 | return self.current_state.output 68 | -------------------------------------------------------------------------------- /aalpy/automata/Onfsm.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | from random import choice 3 | from typing import Generic, Tuple, Dict, List 4 | 5 | from aalpy.base import Automaton, AutomatonState 6 | from aalpy.base.Automaton import OutputType, InputType 7 | 8 | 9 | class OnfsmState(AutomatonState, Generic[InputType, OutputType]): 10 | """ """ 11 | def __init__(self, state_id): 12 | super().__init__(state_id) 13 | # TODO this order is inconsistent with probabilistic models 14 | # key/input maps to the list of tuples of possible output/new state [(output1, state1), (output2, state2)] 15 | self.transitions : Dict[InputType, List[Tuple[OutputType, OnfsmState]]] = defaultdict(list) 16 | 17 | def add_transition(self, inp, out, new_state) : 18 | """ 19 | 20 | Args: 21 | inp: 22 | out: 23 | new_state: 24 | 25 | Returns: 26 | 27 | """ 28 | self.transitions[inp].append((out, new_state)) 29 | 30 | def get_transition(self, input, output=None): 31 | """ 32 | 33 | Args: 34 | input: 35 | output: (Default value = None) 36 | 37 | Returns: 38 | 39 | """ 40 | possible_transitions = self.transitions[input] 41 | if output: 42 | return next((t for t in possible_transitions if t[0] == output), None) 43 | else: 44 | return possible_transitions 45 | 46 | 47 | class Onfsm(Automaton[OnfsmState[InputType, OutputType]]): 48 | """ 49 | Observable non-deterministic finite state automaton. 50 | """ 51 | def __init__(self, initial_state: OnfsmState, states: list): 52 | super().__init__(initial_state, states) 53 | 54 | def step(self, letter): 55 | """Next step is determined based on a uniform distribution over all transitions with the input 'letter'. 56 | 57 | Args: 58 | 59 | letter: input 60 | 61 | Returns: 62 | 63 | output of the probabilistically chosen transition 64 | 65 | """ 66 | transition = choice(self.current_state.transitions[letter]) 67 | output = transition[0] 68 | self.current_state = transition[1] 69 | return output 70 | 71 | def outputs_on_input(self, letter): 72 | """All possible observable outputs after executing the current input 'letter'. 73 | 74 | Args: 75 | 76 | letter: input 77 | 78 | Returns: 79 | 80 | list of observable outputs 81 | 82 | """ 83 | return [trans[0] for trans in self.current_state.transitions[letter]] 84 | 85 | def step_to(self, inp, out): 86 | """Performs a step on the automaton based on the input `inp` and output `out`. 87 | 88 | Args: 89 | 90 | inp: input 91 | out: output 92 | 93 | Returns: 94 | 95 | output of the reached state, None otherwise 96 | 97 | """ 98 | for new_state in self.current_state.transitions[inp]: 99 | if new_state[0] == out: 100 | self.current_state = new_state[1] 101 | return out 102 | return None 103 | 104 | @staticmethod 105 | def from_state_setup(state_setup : dict, **kwargs): 106 | raise NotImplementedError() # TODO implement 107 | 108 | def to_state_setup(self): 109 | raise NotImplementedError # TODO implement -------------------------------------------------------------------------------- /aalpy/automata/__init__.py: -------------------------------------------------------------------------------- 1 | from .Dfa import Dfa, DfaState 2 | from .Mdp import Mdp, MdpState 3 | from .MealyMachine import MealyMachine, MealyState 4 | from .MooreMachine import MooreMachine, MooreState 5 | from .Onfsm import Onfsm, OnfsmState 6 | from .StochasticMealyMachine import StochasticMealyMachine, StochasticMealyState 7 | from .MarkovChain import MarkovChain, McState 8 | from .NonDeterministicMooreMachine import NDMooreMachine, NDMooreState 9 | from .Sevpa import Sevpa, SevpaState, SevpaAlphabet, SevpaTransition 10 | from .Vpa import Vpa, VpaAlphabet, VpaState, VpaTransition 11 | -------------------------------------------------------------------------------- /aalpy/base/Oracle.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | from aalpy.base import SUL 4 | 5 | 6 | class Oracle(ABC): 7 | """Abstract class implemented by all equivalence oracles.""" 8 | 9 | def __init__(self, alphabet: list, sul: SUL): 10 | """ 11 | Default constructor for all equivalence oracles. 12 | 13 | Args: 14 | 15 | alphabet: input alphabet 16 | sul: system under learning 17 | """ 18 | 19 | self.alphabet = alphabet 20 | self.sul = sul 21 | self.num_queries = 0 22 | self.num_steps = 0 23 | 24 | @abstractmethod 25 | def find_cex(self, hypothesis): 26 | """ 27 | Return a counterexample (inputs) that displays different behavior on system under learning and 28 | current hypothesis. 29 | 30 | Args: 31 | 32 | hypothesis: current hypothesis 33 | 34 | Returns: 35 | 36 | tuple or list containing counterexample inputs, None if no counterexample is found 37 | """ 38 | pass 39 | 40 | def reset_hyp_and_sul(self, hypothesis): 41 | """ 42 | Reset SUL and hypothesis to initial state. 43 | 44 | Args: 45 | 46 | hypothesis: current hypothesis 47 | 48 | """ 49 | hypothesis.reset_to_initial() 50 | self.sul.post() 51 | self.sul.pre() 52 | self.num_queries += 1 -------------------------------------------------------------------------------- /aalpy/base/__init__.py: -------------------------------------------------------------------------------- 1 | from .Automaton import Automaton, AutomatonState, DeterministicAutomaton 2 | from .Oracle import Oracle 3 | from .SUL import SUL 4 | -------------------------------------------------------------------------------- /aalpy/learning_algs/__init__.py: -------------------------------------------------------------------------------- 1 | # public API for running automata learning algorithms 2 | from .deterministic.LStar import run_Lstar 3 | from .deterministic.KV import run_KV 4 | from .deterministic.LSharp import run_Lsharp 5 | from .adaptive.AdaptiveLSharp import run_adaptive_Lsharp 6 | from .non_deterministic.OnfsmLstar import run_non_det_Lstar 7 | from .non_deterministic.AbstractedOnfsmLstar import run_abstracted_ONFSM_Lstar 8 | from .stochastic.StochasticLStar import run_stochastic_Lstar 9 | from .stochastic_passive.Alergia import run_Alergia, run_JAlergia 10 | from .stochastic_passive.ActiveAleriga import run_active_Alergia 11 | from .deterministic_passive.RPNI import run_RPNI, run_PAPNI 12 | from .deterministic_passive.active_RPNI import run_active_RPNI 13 | from .general_passive.GeneralizedStateMerging import run_GSM 14 | from .general_passive.GsmAlgorithms import run_EDSM, run_Alergia_EDSM, run_k_tails 15 | -------------------------------------------------------------------------------- /aalpy/learning_algs/adaptive/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/aalpy/learning_algs/adaptive/__init__.py -------------------------------------------------------------------------------- /aalpy/learning_algs/deterministic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/aalpy/learning_algs/deterministic/__init__.py -------------------------------------------------------------------------------- /aalpy/learning_algs/deterministic_passive/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/aalpy/learning_algs/deterministic_passive/__init__.py -------------------------------------------------------------------------------- /aalpy/learning_algs/deterministic_passive/active_RPNI.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from random import randint, choice 3 | 4 | from aalpy.learning_algs import run_RPNI 5 | from aalpy.utils import convert_i_o_traces_for_RPNI 6 | 7 | 8 | class RpniActiveSampler(ABC): 9 | """ 10 | Abstract class whose implementations are used to provide samples for active passive learning. 11 | """ 12 | 13 | @abstractmethod 14 | def sample(self, sul, model): 15 | """ 16 | Abstract method implementing sampling strategy. 17 | 18 | Args: 19 | 20 | sul: system under learning 21 | model: current learned model 22 | 23 | Returns: 24 | 25 | Data to be added to the data set for the passive RPNI learning in its data-format. 26 | 27 | """ 28 | pass 29 | 30 | 31 | class RandomWordSampler(RpniActiveSampler): 32 | def __init__(self, num_walks, min_walk_len, max_walk_len): 33 | self.num_walks = num_walks 34 | self.min_walk_len = min_walk_len 35 | self.max_walk_len = max_walk_len 36 | 37 | def sample(self, sul, model): 38 | input_al = list({el for s in model.states for el in s.transitions.keys()}) 39 | samples = [] 40 | 41 | for _ in range(self.num_walks): 42 | walk_len = randint(self.min_walk_len, self.max_walk_len) 43 | random_walk = tuple(choice(input_al) for _ in range(walk_len)) 44 | 45 | outputs = sul.query(random_walk) 46 | samples.append(list(zip(random_walk, outputs))) 47 | 48 | samples = convert_i_o_traces_for_RPNI(samples) 49 | return samples 50 | 51 | 52 | def run_active_RPNI(data, sul, sampler, n_iter, automaton_type, print_info=True): 53 | model = None 54 | for i in range(n_iter): 55 | if print_info: 56 | print(f'-------------Active RPNI Iteration: {i}-------------') 57 | model = run_RPNI(data, automaton_type=automaton_type, print_info=print_info) 58 | 59 | new_samples = sampler.sample(sul, model) 60 | data.extend(new_samples) 61 | 62 | return model 63 | -------------------------------------------------------------------------------- /aalpy/learning_algs/general_passive/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/aalpy/learning_algs/general_passive/__init__.py -------------------------------------------------------------------------------- /aalpy/learning_algs/non_deterministic/NonDeterministicSULWrapper.py: -------------------------------------------------------------------------------- 1 | from aalpy.base import SUL 2 | from aalpy.learning_algs.non_deterministic.TraceTree import TraceTree 3 | 4 | 5 | class NonDeterministicSULWrapper(SUL): 6 | """ 7 | Wrapper for non-deterministic SUL. After every step, input/output pair is added to the tree containing all traces. 8 | """ 9 | 10 | def __init__(self, sul: SUL): 11 | super().__init__() 12 | self.sul = sul 13 | self.cache = TraceTree() 14 | 15 | def pre(self): 16 | self.cache.reset() 17 | self.sul.pre() 18 | 19 | def post(self): 20 | self.sul.post() 21 | 22 | def step(self, letter): 23 | out = self.sul.step(letter) 24 | self.cache.add_to_tree(letter, out) 25 | return out 26 | -------------------------------------------------------------------------------- /aalpy/learning_algs/non_deterministic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/aalpy/learning_algs/non_deterministic/__init__.py -------------------------------------------------------------------------------- /aalpy/learning_algs/stochastic/StochasticCexProcessing.py: -------------------------------------------------------------------------------- 1 | from aalpy.automata import Mdp 2 | from aalpy.base import SUL 3 | 4 | 5 | def stochastic_longest_prefix(cex, prefixes): 6 | """ 7 | Counterexample processing based on Shabaz-Groz cex processing. 8 | 9 | Args: 10 | 11 | cex: counterexample 12 | prefixes: all prefixes in the observation table 13 | Returns: 14 | 15 | Single suffix. 16 | """ 17 | prefixes = list(prefixes) 18 | prefixes.sort(key=len, reverse=True) 19 | 20 | trimmed_cex = None 21 | trimmed = False 22 | for p in prefixes: 23 | if p[1::2] == cex[:len(p)][1::2]: 24 | trimmed_cex = cex[len(p):] 25 | trimmed = True 26 | break 27 | 28 | trimmed_cex = trimmed_cex if trimmed else cex 29 | trimmed_cex = list(trimmed_cex) 30 | 31 | if not trimmed_cex: 32 | return () 33 | 34 | # get all suffixes and return 35 | suffixes = [tuple(trimmed_cex[len(trimmed_cex) - i - 1:]) for i in range(0, len(trimmed_cex), 2)] 36 | 37 | # prefixes 38 | # need to pop 0 for MDP, for SMM remove the line 39 | # trimmed_cex.pop(0) 40 | # prefixes = [tuple(trimmed_cex[:i + 1]) for i in range(0, len(trimmed_cex), 2)] 41 | 42 | return suffixes 43 | 44 | 45 | def stochastic_rs(sul: SUL, cex: tuple, hypothesis): 46 | """Riverst-Schapire counter example processing. 47 | 48 | Args: 49 | 50 | sul: system under learning 51 | cex: found counterexample 52 | hypothesis: hypothesis on which counterexample was found 53 | Returns: 54 | 55 | suffixes to be added to the E set 56 | 57 | """ 58 | # cex_out = self.sul.query(tuple(cex)) 59 | 60 | if isinstance(hypothesis, Mdp): 61 | cex = cex[1:] 62 | 63 | inputs = tuple(cex[::2]) 64 | outputs = tuple(cex[1::2]) 65 | # cex_out = self.teacher.sul.query(cex) 66 | 67 | lower = 1 68 | upper = len(inputs) - 2 69 | 70 | while True: 71 | hypothesis.reset_to_initial() 72 | mid = (lower + upper) // 2 73 | 74 | # arr[:n] -> first n values 75 | # arr[n:] -> last n values 76 | 77 | for i, o in zip(inputs[:mid], outputs[:mid]): 78 | hypothesis.step_to(i, o) 79 | 80 | s_bracket = hypothesis.current_state.prefix 81 | 82 | # prefix in hyp is reached 83 | 84 | prefix_inputs = s_bracket[1::2] if isinstance(hypothesis, Mdp) else s_bracket[::2] 85 | # prefix_outputs = s_bracket[0::2] if isinstance(hypothesis, Mdp) else s_bracket[1::2] 86 | 87 | not_same = False 88 | 89 | prefix_reached = False 90 | while not prefix_reached: 91 | hypothesis.reset_to_initial() 92 | sul.post() 93 | sul.pre() 94 | 95 | repeat = False 96 | for inp in prefix_inputs: 97 | o_sul = sul.step(inp) 98 | o_hyp = hypothesis.step_to(inp, o_sul) 99 | 100 | if o_hyp is None: 101 | repeat = True 102 | break 103 | 104 | prefix_reached = not repeat 105 | 106 | for inp in inputs[mid:]: 107 | 108 | o_sul = sul.step(inp) 109 | o_hyp = hypothesis.step_to(inp, o_sul) 110 | 111 | if o_hyp is None: 112 | not_same = True 113 | break 114 | 115 | if not not_same: 116 | lower = mid + 1 117 | if upper < lower: 118 | suffix = cex[(mid + 1) * 2:] 119 | break 120 | else: 121 | upper = mid - 1 122 | if upper < lower: 123 | suffix = cex[mid * 2:] 124 | break 125 | 126 | suffixes = [tuple(suffix[len(suffix) - i - 1:]) for i in range(0, len(suffix), 2)] 127 | 128 | # suffixes = [suffixes[-1]] 129 | # print(len(cex), len(suffixes[-1])) 130 | return suffixes 131 | -------------------------------------------------------------------------------- /aalpy/learning_algs/stochastic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/aalpy/learning_algs/stochastic/__init__.py -------------------------------------------------------------------------------- /aalpy/learning_algs/stochastic_passive/ActiveAleriga.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from random import randint, choice 3 | 4 | from aalpy.learning_algs import run_Alergia 5 | 6 | 7 | class Sampler(ABC): 8 | """ 9 | Abstract class whose implementations are used to provide samples for active passive learning. 10 | """ 11 | 12 | @abstractmethod 13 | def sample(self, sul, model): 14 | """ 15 | Abstract method implementing sampling strategy. 16 | 17 | Args: 18 | 19 | sul: system under learning 20 | model: current learned model 21 | 22 | Returns: 23 | 24 | Data to be added to the data set for the passive learnign. 25 | 26 | """ 27 | pass 28 | 29 | 30 | class RandomWordSampler(Sampler): 31 | def __init__(self, num_walks, min_walk_len, max_walk_len): 32 | self.num_walks = num_walks 33 | self.min_walk_len = min_walk_len 34 | self.max_walk_len = max_walk_len 35 | 36 | def sample(self, sul, model): 37 | input_al = list({el for s in model.states for el in s.transitions.keys()}) 38 | samples = [] 39 | 40 | for _ in range(self.num_walks): 41 | walk_len = randint(self.min_walk_len, self.max_walk_len) 42 | random_walk = tuple(choice(input_al) for _ in range(walk_len)) 43 | 44 | outputs = sul.query(random_walk) 45 | 46 | sample = [outputs.pop(0)] 47 | for i in range(len(outputs)): 48 | sample.append((random_walk[i], outputs[i])) 49 | 50 | samples.append(sample) 51 | 52 | return samples 53 | 54 | 55 | def run_active_Alergia(data, sul, sampler, n_iter, eps=0.05, compatibility_checker=None, automaton_type='mdp', 56 | print_info=True): 57 | """ 58 | Active version of IOAlergia algorithm. Based on intermediate hypothesis sampling on the system is performed. 59 | Sampled data is added to the learning data and more accurate model is learned. 60 | Proposed in "Aichernig and Tappler, Probabilistic Black-Box Reachability Checking" 61 | 62 | Args: 63 | 64 | data: initial learning data, in form [[O, (I,O), (I,O)...] ,...] where O is outputs and I input. 65 | sul: system under learning which is basis for sampling 66 | sampler: instance of Sampler class 67 | n_iter: number of iterations of active learning 68 | eps: epsilon value if the default checker is used. Look in run_Alergia for description 69 | compatibility_checker: passed to run_Alergia, check there for description 70 | automaton_type: either 'mdp' or 'smm' (Markov decision process or Stochastic Mealy Machine) 71 | print_info: print current learning iteration 72 | 73 | Returns: 74 | 75 | learned MDP 76 | 77 | """ 78 | model = None 79 | for i in range(n_iter): 80 | if print_info: 81 | print(f'Active Alergia Iteration: {i}') 82 | model = run_Alergia(data, automaton_type='mdp', eps=eps, compatibility_checker=compatibility_checker) 83 | 84 | new_samples = sampler.sample(sul, model) 85 | data.extend(new_samples) 86 | 87 | return model 88 | 89 | -------------------------------------------------------------------------------- /aalpy/learning_algs/stochastic_passive/CompatibilityChecker.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from math import sqrt, log 3 | 4 | from aalpy.learning_algs.stochastic_passive.FPTA import AlergiaPtaNode 5 | 6 | 7 | class CompatibilityChecker(ABC): 8 | 9 | @abstractmethod 10 | def are_states_different(self, a: AlergiaPtaNode, b: AlergiaPtaNode, **kwargs) -> bool: 11 | pass 12 | 13 | 14 | class HoeffdingCompatibility(CompatibilityChecker): 15 | def __init__(self, eps): 16 | self.eps = eps 17 | self.log_term = sqrt(0.5 * log(2 / self.eps)) 18 | 19 | def hoeffding_bound(self, a: dict, b: dict): 20 | n1 = sum(a.values()) 21 | n2 = sum(b.values()) 22 | 23 | if n1 * n2 == 0: 24 | return False 25 | 26 | bound = (sqrt(1 / n1) + sqrt(1 / n2)) * self.log_term 27 | 28 | for o in set(a.keys()).union(b.keys()): 29 | a_freq = a[o] if o in a else 0 30 | b_freq = b[o] if o in b else 0 31 | 32 | if abs(a_freq / n1 - b_freq / n2) > bound: 33 | return True 34 | return False 35 | 36 | def are_states_different(self, a: AlergiaPtaNode, b: AlergiaPtaNode, **kwargs): 37 | 38 | # no data available for any node 39 | if len(a.original_input_frequency) * len(b.original_children) == 0: 40 | return False 41 | 42 | # assuming tuples are used for IOAlergia and not as Alergia outputs 43 | if not isinstance(list(a.original_input_frequency.keys())[0], tuple): 44 | return self.hoeffding_bound(a.original_input_frequency, b.original_input_frequency) 45 | 46 | # IOAlergia: check hoeffding bound conditioned on inputs 47 | for i in a.get_immutable_inputs().intersection(b.get_immutable_inputs()): 48 | if self.hoeffding_bound(a.get_original_output_frequencies(i), b.get_original_output_frequencies(i)): 49 | return True 50 | return False 51 | -------------------------------------------------------------------------------- /aalpy/learning_algs/stochastic_passive/FPTA.py: -------------------------------------------------------------------------------- 1 | from functools import total_ordering 2 | 3 | 4 | @total_ordering 5 | class AlergiaPtaNode: 6 | __slots__ = ['prefix', 'output', 'input_frequency', 'children', 'original_input_frequency', 7 | 'original_children', 'state_id', 'children_prob'] 8 | 9 | def __init__(self, output, prefix): 10 | self.prefix = prefix 11 | self.output = output 12 | # mutable values 13 | self.input_frequency = dict() 14 | self.children = dict() 15 | # immutable values used for statistical computability check 16 | self.original_input_frequency = dict() 17 | self.original_children = dict() 18 | # # for visualization 19 | self.state_id = None 20 | self.children_prob = None 21 | 22 | def successors(self): 23 | return list(self.children.values()) 24 | 25 | def get_inputs(self): 26 | return {i for i, _ in self.input_frequency.keys()} 27 | 28 | def get_input_frequency(self, target_input): 29 | return sum(freq for (i, _), freq in self.input_frequency.items() if i == target_input) 30 | 31 | def get_output_frequencies(self, target_input): 32 | return {o: freq for (i, o), freq in self.input_frequency.items() if i == target_input} 33 | 34 | def get_immutable_inputs(self): 35 | return {i for i, _ in self.original_children.keys()} 36 | 37 | def get_immutable_input_frequency(self, target_input): 38 | return sum(freq for (i, _), freq in self.original_input_frequency.items() if i == target_input) 39 | 40 | def get_original_output_frequencies(self, target_input): 41 | return {o: freq for (i, o), freq in self.original_input_frequency.items() if i == target_input} 42 | 43 | def __lt__(self, other): 44 | return (len(self.prefix), self.prefix) < (len(other.prefix), other.prefix) 45 | 46 | def __le__(self, other): 47 | return self < other or self == other 48 | 49 | def __eq__(self, other): 50 | return self.prefix == other.prefix 51 | 52 | 53 | def create_fpta(data, automaton_type): 54 | # in case of SMM, there is no initial input 55 | seq_iter_index = 0 if automaton_type == 'smm' else 1 56 | 57 | initial_output = None if automaton_type == 'smm' else data[0][0] 58 | 59 | root_node = AlergiaPtaNode(initial_output, ()) 60 | 61 | for seq in data: 62 | if automaton_type != 'smm' and seq[0] != root_node.output: 63 | print('All sequances passed to Alergia should have the same initial output!') 64 | assert False 65 | 66 | curr_node = root_node 67 | 68 | for el in seq[seq_iter_index:]: 69 | if el not in curr_node.children: 70 | out = None 71 | if automaton_type == 'mc': 72 | out = el 73 | elif automaton_type == 'mdp': 74 | out = el[1] 75 | 76 | reached_node = AlergiaPtaNode(out, curr_node.prefix + (el,)) 77 | curr_node.children[el] = reached_node 78 | curr_node.original_children[el] = reached_node 79 | 80 | curr_node.input_frequency[el] = 0 81 | curr_node.original_input_frequency[el] = 0 82 | 83 | curr_node.input_frequency[el] += 1 84 | curr_node.original_input_frequency[el] += 1 85 | 86 | curr_node = curr_node.children[el] 87 | 88 | return root_node 89 | -------------------------------------------------------------------------------- /aalpy/learning_algs/stochastic_passive/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/aalpy/learning_algs/stochastic_passive/__init__.py -------------------------------------------------------------------------------- /aalpy/oracles/BreadthFirstExplorationEqOracle.py: -------------------------------------------------------------------------------- 1 | from aalpy.base.Oracle import Oracle 2 | from aalpy.base.SUL import SUL 3 | 4 | from itertools import product 5 | from random import shuffle 6 | 7 | 8 | class BreadthFirstExplorationEqOracle(Oracle): 9 | """ 10 | Breadth-First Exploration of all possible input combinations up to a certain depth. 11 | Extremely inefficient equivalence oracle and should only be used for demonstrations. 12 | """ 13 | 14 | def __init__(self, alphabet, sul: SUL, depth=5): 15 | """ 16 | Args: 17 | 18 | alphabet: input alphabet 19 | 20 | sul: system under learning 21 | 22 | depth: depth of the tree 23 | """ 24 | 25 | super().__init__(alphabet, sul) 26 | self.depth = depth 27 | self.queue = [] 28 | 29 | # generate all test-cases 30 | for seq in product(self.alphabet, repeat=self.depth): 31 | self.queue.append(seq) 32 | 33 | shuffle(self.queue) 34 | 35 | def find_cex(self, hypothesis): 36 | 37 | while self.queue: 38 | test_case = self.queue.pop() 39 | self.reset_hyp_and_sul(hypothesis) 40 | 41 | for ind, letter in enumerate(test_case): 42 | out_hyp = hypothesis.step(letter) 43 | out_sul = self.sul.step(letter) 44 | self.num_steps += 1 45 | 46 | if out_hyp != out_sul: 47 | self.sul.post() 48 | return test_case[:ind + 1] 49 | 50 | return None 51 | -------------------------------------------------------------------------------- /aalpy/oracles/CacheBasedEqOracle.py: -------------------------------------------------------------------------------- 1 | from aalpy.base import Oracle, SUL 2 | from aalpy.base.SUL import CacheSUL 3 | 4 | from random import choice 5 | 6 | 7 | class CacheBasedEqOracle(Oracle): 8 | """ 9 | Equivalence oracle where test case selection is based on the multiset of all traces observed during learning and 10 | conformance checking. Firstly all leaves of the tree are gathered and then random leaves are extended with a suffix 11 | of length (max_tree_depth + 'depth_increase') - len(prefix), where prefix is a path to the leaf. 12 | """ 13 | 14 | def __init__(self, alphabet: list, sul: SUL, num_walks=100, depth_increase=5, reset_after_cex=True): 15 | """ 16 | 17 | Args: 18 | 19 | alphabet: input alphabet 20 | 21 | sul: system under learning 22 | 23 | num_walks: number of random walks to perform 24 | 25 | depth_increase: length of random walk that exceeds the maximum depth of the tree 26 | 27 | reset_after_cex: if False, total number of queries will equal num_walks, if True, in each execution of 28 | find_cex method at most num_walks will be executed 29 | """ 30 | 31 | super().__init__(alphabet, sul) 32 | self.cache_tree = None 33 | self.num_walks = num_walks 34 | self.depth_increase = depth_increase 35 | self.reset_after_cex = reset_after_cex 36 | self.num_walks_done = 0 37 | 38 | def find_cex(self, hypothesis): 39 | 40 | assert isinstance(self.sul, CacheSUL) 41 | self.cache_tree = self.sul.cache 42 | 43 | paths_to_leaves = self.get_paths(self.cache_tree.root_node) 44 | max_tree_depth = len(max(paths_to_leaves, key=len)) 45 | 46 | while self.num_walks_done < self.num_walks: 47 | self.num_walks_done += 1 48 | self.reset_hyp_and_sul(hypothesis) 49 | 50 | prefix = choice(paths_to_leaves) 51 | walk_len = (max_tree_depth + self.depth_increase) - len(prefix) 52 | inputs = [] 53 | inputs.extend(prefix) 54 | 55 | for p in prefix: 56 | hypothesis.step(p) 57 | self.sul.step(p) 58 | self.num_steps += 1 59 | 60 | for _ in range(walk_len): 61 | inputs.append(choice(self.alphabet)) 62 | 63 | out_sul = self.sul.step(inputs[-1]) 64 | out_hyp = hypothesis.step(inputs[-1]) 65 | self.num_steps += 1 66 | 67 | if out_sul != out_hyp: 68 | if self.reset_after_cex: 69 | self.num_walks_done = 0 70 | self.sul.post() 71 | return inputs 72 | 73 | return None 74 | 75 | def get_paths(self, t, paths=None, current_path=None): 76 | """ 77 | 78 | Args: 79 | t: 80 | paths: (Default value = None) 81 | current_path: (Default value = None) 82 | 83 | Returns: 84 | 85 | """ 86 | if paths is None: 87 | paths = [] 88 | if current_path is None: 89 | current_path = [] 90 | 91 | if len(t.children) == 0: 92 | paths.append(current_path) 93 | else: 94 | for inp, child in t.children.items(): 95 | current_path.append(inp) 96 | self.get_paths(child, paths, list(current_path)) 97 | return paths 98 | -------------------------------------------------------------------------------- /aalpy/oracles/PacOracle.py: -------------------------------------------------------------------------------- 1 | from math import ceil, log 2 | from random import choice, randint 3 | 4 | from aalpy.base import Oracle, SUL 5 | 6 | 7 | class PacOracle(Oracle): 8 | """ 9 | Probably approximately correct oracle. Number of queries is defined by the following equation: 10 | 1 / self.epsilon * (log(1 / self.delta) + self.round * log(2)), where epsilon is the generalization error and delta 11 | the confidence. Thus, returned hypothesis is the epsilon-approximation of the correct hypothesis with the probability 12 | 1 - delta (Mohri, M et al.: Foundations of Machine Learning). 13 | Queries are of random length in a predefined range. 14 | """ 15 | 16 | def __init__(self, alphabet: list, sul: SUL, epsilon=0.01, delta=0.01, min_walk_len=10, max_walk_len=25): 17 | 18 | super().__init__(alphabet, sul) 19 | self.min_walk_len = min_walk_len 20 | self.max_walk_len = max_walk_len 21 | self.epsilon = epsilon 22 | self.delta = delta 23 | self.round = 0 24 | 25 | def find_cex(self, hypothesis): 26 | self.round += 1 27 | num_test_cases = 1 / self.epsilon * (log(1 / self.delta) + self.round * log(2)) 28 | 29 | for i in range(ceil(num_test_cases)): 30 | inputs = [] 31 | self.reset_hyp_and_sul(hypothesis) 32 | 33 | num_steps = randint(self.min_walk_len, self.max_walk_len) 34 | 35 | for _ in range(num_steps): 36 | inputs.append(choice(self.alphabet)) 37 | 38 | out_sul = self.sul.step(inputs[-1]) 39 | out_hyp = hypothesis.step(inputs[-1]) 40 | self.num_steps += 1 41 | 42 | if out_sul != out_hyp: 43 | self.sul.post() 44 | return inputs 45 | 46 | return None 47 | -------------------------------------------------------------------------------- /aalpy/oracles/PerfectKnowledgeEqOracle.py: -------------------------------------------------------------------------------- 1 | from aalpy.base import Oracle, SUL, DeterministicAutomaton 2 | from aalpy.utils import bisimilar 3 | 4 | 5 | class PerfectKnowledgeEqOracle(Oracle): 6 | """ 7 | Oracle that can be used when developing and testing deterministic learning algorithms, 8 | so that the focus is put off equivalence query. 9 | """ 10 | def __init__(self, alphabet: list, sul: SUL, model_under_learning: DeterministicAutomaton): 11 | super().__init__(alphabet, sul, ) 12 | self.model_under_learning = model_under_learning 13 | 14 | def find_cex(self, hypothesis): 15 | return bisimilar(hypothesis, self.model_under_learning, return_cex=True) 16 | -------------------------------------------------------------------------------- /aalpy/oracles/ProvidedSequencesOracleWrapper.py: -------------------------------------------------------------------------------- 1 | from aalpy.base import Oracle, SUL 2 | 3 | 4 | class ProvidedSequencesOracleWrapper(Oracle): 5 | def __init__(self, alphabet: list, sul: SUL, oracle: Oracle, provided_counterexamples: list): 6 | """ 7 | Oracle wrapper which first executes provided sequences (possible counterexamples) and then switches to another 8 | oracle instance. 9 | 10 | Args: 11 | alphabet: input alphabet 12 | sul: system under learning 13 | oracle: oracle which will be used once all provided counterexamples are used 14 | provided_counterexamples: list of input sequance lists. eg [[1,2,3], [2,3,1], ...] where 1,2,3 are elements 15 | of input alphabet 16 | """ 17 | super().__init__(alphabet, sul) 18 | self.provided_counterexamples = provided_counterexamples 19 | self.oracle = oracle 20 | 21 | def find_cex(self, hypothesis): 22 | for provided_cex in self.provided_counterexamples.copy(): 23 | inputs = [] 24 | self.reset_hyp_and_sul(hypothesis) 25 | 26 | for i in provided_cex: 27 | inputs.append(i) 28 | out_sul = self.sul.step(i) 29 | out_hyp = hypothesis.step(i) 30 | self.num_steps += 1 31 | 32 | if out_sul != out_hyp: 33 | self.sul.post() 34 | return tuple(inputs) 35 | 36 | self.provided_counterexamples.remove(provided_cex) 37 | 38 | cex = self.oracle.find_cex(hypothesis) 39 | 40 | # to account for steps statistics from actual oracle 41 | if cex is None: 42 | self.num_queries += self.oracle.num_queries 43 | self.num_steps += self.oracle.num_steps 44 | 45 | return cex 46 | 47 | -------------------------------------------------------------------------------- /aalpy/oracles/RandomWalkEqOracle.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from aalpy.automata import Onfsm, Mdp, StochasticMealyMachine 4 | from aalpy.base import Oracle, SUL 5 | 6 | automaton_dict = {Onfsm: 'onfsm', Mdp: 'mdp', StochasticMealyMachine: 'smm'} 7 | 8 | 9 | class RandomWalkEqOracle(Oracle): 10 | """ 11 | Equivalence oracle where queries contain random inputs. After every step, 'reset_prob' determines the probability 12 | that the system will reset and a new query asked. 13 | """ 14 | 15 | def __init__(self, alphabet: list, sul: SUL, num_steps=5000, reset_after_cex=True, reset_prob=0.09): 16 | """ 17 | 18 | Args: 19 | alphabet: input alphabet 20 | 21 | sul: system under learning 22 | 23 | num_steps: number of steps to be preformed 24 | 25 | reset_after_cex: if true, num_steps will be preformed after every counter example, else the total number 26 | or steps will equal to num_steps 27 | 28 | reset_prob: probability that the new query will be asked 29 | """ 30 | 31 | super().__init__(alphabet, sul) 32 | self.step_limit = num_steps 33 | self.reset_after_cex = reset_after_cex 34 | self.reset_prob = reset_prob 35 | self.random_steps_done = 0 36 | self.automata_type = None 37 | 38 | def find_cex(self, hypothesis): 39 | if not self.automata_type: 40 | self.automata_type = automaton_dict.get(type(hypothesis), 'det') 41 | 42 | inputs = [] 43 | outputs = [] 44 | self.reset_hyp_and_sul(hypothesis) 45 | 46 | while self.random_steps_done < self.step_limit: 47 | self.num_steps += 1 48 | self.random_steps_done += 1 49 | 50 | if random.random() <= self.reset_prob: 51 | self.reset_hyp_and_sul(hypothesis) 52 | inputs.clear() 53 | outputs.clear() 54 | 55 | inputs.append(random.choice(self.alphabet)) 56 | 57 | out_sul = self.sul.step(inputs[-1]) 58 | outputs.append(out_sul) 59 | 60 | if self.automata_type == 'det': 61 | out_hyp = hypothesis.step(inputs[-1]) 62 | else: 63 | out_hyp = hypothesis.step_to(inputs[-1], out_sul) 64 | 65 | if self.automata_type == 'det' and out_sul != out_hyp: 66 | if self.reset_after_cex: 67 | self.random_steps_done = 0 68 | 69 | self.sul.post() 70 | return inputs 71 | elif out_hyp is None and self.automata_type != 'det': 72 | if self.reset_after_cex: 73 | self.random_steps_done = 0 74 | self.sul.post() 75 | 76 | if self.automata_type == 'onfsm': 77 | return inputs, outputs 78 | else: 79 | # hypothesis is MDP or SMM 80 | cex = [hypothesis.initial_state.output] if self.automata_type == 'mdp' else [] 81 | for i, o in zip(inputs, outputs): 82 | cex.extend([i, o]) 83 | return cex 84 | 85 | return None 86 | 87 | def reset_counter(self): 88 | if self.reset_after_cex: 89 | self.random_steps_done = 0 -------------------------------------------------------------------------------- /aalpy/oracles/RandomWordEqOracle.py: -------------------------------------------------------------------------------- 1 | from statistics import mean 2 | 3 | from aalpy.automata import Onfsm, Mdp, StochasticMealyMachine 4 | from aalpy.base import Oracle, SUL 5 | from random import randint, choice 6 | 7 | automaton_dict = {Onfsm: 'onfsm', Mdp: 'mdp', StochasticMealyMachine: 'smm'} 8 | 9 | 10 | class RandomWordEqOracle(Oracle): 11 | """ 12 | Equivalence oracle where queries are of random length in a predefined range. 13 | """ 14 | 15 | def __init__(self, alphabet: list, sul: SUL, num_walks=500, min_walk_len=10, max_walk_len=30, 16 | reset_after_cex=True): 17 | """ 18 | Args: 19 | alphabet: input alphabet 20 | 21 | sul: system under learning 22 | 23 | num_walks: number of walks to perform during search for cex 24 | 25 | min_walk_len: minimum length of each walk 26 | 27 | max_walk_len: maximum length of each walk 28 | 29 | reset_after_cex: if True, num_walks will be preformed after every counter example, else the total number 30 | or walks will equal to num_walks 31 | """ 32 | 33 | super().__init__(alphabet, sul) 34 | self.num_walks = num_walks 35 | self.min_walk_len = min_walk_len 36 | self.max_walk_len = max_walk_len 37 | self.reset_after_cex = reset_after_cex 38 | self.num_walks_done = 0 39 | self.automata_type = None 40 | 41 | self.walk_lengths = [randint(min_walk_len, max_walk_len) for _ in range(num_walks)] 42 | 43 | def find_cex(self, hypothesis): 44 | if not self.automata_type: 45 | self.automata_type = automaton_dict.get(type(hypothesis), 'det') 46 | 47 | while self.num_walks_done < self.num_walks: 48 | inputs = [] 49 | outputs = [] 50 | self.reset_hyp_and_sul(hypothesis) 51 | self.num_walks_done += 1 52 | 53 | num_steps = self.walk_lengths.pop(0) 54 | 55 | for _ in range(num_steps): 56 | inputs.append(choice(self.alphabet)) 57 | 58 | out_sul = self.sul.step(inputs[-1]) 59 | if self.automata_type == 'det': 60 | out_hyp = hypothesis.step(inputs[-1]) 61 | else: 62 | out_hyp = hypothesis.step_to(inputs[-1], out_sul) 63 | outputs.append(out_sul) 64 | 65 | self.num_steps += 1 66 | 67 | if self.automata_type == 'det' and out_sul != out_hyp: 68 | if self.reset_after_cex: 69 | self.walk_lengths = [randint(self.min_walk_len, self.max_walk_len) for _ in range(self.num_walks)] 70 | self.num_walks_done = 0 71 | 72 | self.sul.post() 73 | return inputs 74 | 75 | elif out_hyp is None and self.automata_type != 'det': 76 | self.sul.post() 77 | 78 | if self.reset_after_cex: 79 | self.walk_lengths = [randint(self.min_walk_len, self.max_walk_len) for _ in range(self.num_walks)] 80 | self.num_walks_done = 0 81 | 82 | if self.automata_type == 'onfsm': 83 | return inputs, outputs 84 | else: 85 | # hypothesis is MDP or SMM 86 | cex = [hypothesis.initial_state.output] if self.automata_type == 'mdp' else [] 87 | for i, o in zip(inputs, outputs): 88 | cex.extend([i, o]) 89 | return cex 90 | 91 | return None 92 | 93 | def reset_counter(self): 94 | if self.reset_after_cex: 95 | self.num_walks_done = 0 96 | -------------------------------------------------------------------------------- /aalpy/oracles/StatePrefixEqOracle.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from aalpy.base.Oracle import Oracle 4 | from aalpy.base.SUL import SUL 5 | 6 | 7 | class StatePrefixEqOracle(Oracle): 8 | """ 9 | Equivalence oracle that achieves guided exploration by starting random walks from each state a walk_per_state 10 | times. Starting the random walk ensures that all states are reached at least walk_per_state times and that their 11 | surrounding is randomly explored. Note that each state serves as a root of random exploration of maximum length 12 | rand_walk_len exactly walk_per_state times during learning. Therefore excessive testing of initial states is 13 | avoided. 14 | """ 15 | def __init__(self, alphabet: list, sul: SUL, walks_per_state=25, walk_len=12, max_tests=None, depth_first=True): 16 | """ 17 | Args: 18 | 19 | alphabet: input alphabet 20 | 21 | sul: system under learning 22 | 23 | walks_per_state:individual walks per state of the automaton over the whole learning process 24 | 25 | walk_len:length of random walk 26 | 27 | max_tests:number of maximum tests. If set to None, this parameter will be ignored. 28 | 29 | depth_first:first explore the newest states 30 | """ 31 | 32 | super().__init__(alphabet, sul) 33 | self.walks_per_state = walks_per_state 34 | self.steps_per_walk = walk_len 35 | self.depth_first = depth_first 36 | self.max_tests = max_tests 37 | 38 | self.freq_dict = dict() 39 | 40 | def find_cex(self, hypothesis): 41 | states_to_cover = [] 42 | for state in hypothesis.states: 43 | if state.prefix is None: 44 | state.prefix = hypothesis.get_shortest_path(hypothesis.initial_state, state) 45 | if state.prefix not in self.freq_dict.keys(): 46 | self.freq_dict[state.prefix] = 0 47 | 48 | states_to_cover.extend([state] * (self.walks_per_state - self.freq_dict[state.prefix])) 49 | 50 | if self.depth_first: 51 | # reverse sort the states by length of their access sequences 52 | # first do the random walk on the state with longest access sequence 53 | states_to_cover.sort(key=lambda x: len(x.prefix), reverse=True) 54 | else: 55 | random.shuffle(states_to_cover) 56 | 57 | for state in states_to_cover: 58 | self.freq_dict[state.prefix] = self.freq_dict[state.prefix] + 1 59 | 60 | self.reset_hyp_and_sul(hypothesis) 61 | 62 | if self.max_tests and self.num_queries == self.max_tests: 63 | return None 64 | 65 | prefix = state.prefix 66 | for p in prefix: 67 | hypothesis.step(p) 68 | self.sul.step(p) 69 | self.num_steps += 1 70 | 71 | suffix = () 72 | for _ in range(self.steps_per_walk): 73 | suffix += (random.choice(self.alphabet),) 74 | 75 | out_sul = self.sul.step(suffix[-1]) 76 | out_hyp = hypothesis.step(suffix[-1]) 77 | self.num_steps += 1 78 | 79 | if out_sul != out_hyp: 80 | self.sul.post() 81 | return prefix + suffix 82 | 83 | return None 84 | -------------------------------------------------------------------------------- /aalpy/oracles/TransitionFocusOracle.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from aalpy.base.Oracle import Oracle 4 | from aalpy.base.SUL import SUL 5 | 6 | 7 | class TransitionFocusOracle(Oracle): 8 | """ 9 | This equivalence oracle focuses either on the same state transitions or transitions that lead to the different 10 | states. This equivalence oracle should be used on grammars like balanced parentheses. In such grammars, 11 | all interesting behavior occurs on the transitions between states and potential bugs can be found only by 12 | focusing on transitions. 13 | """ 14 | def __init__(self, alphabet, sul: SUL, num_random_walks=500, walk_len=20, same_state_prob=0.2): 15 | """ 16 | Args: 17 | alphabet: input alphabet 18 | sul: system under learning 19 | num_random_walks: number of walks 20 | walk_len: length of each walk 21 | same_state_prob: probability that the next input will lead to same state transition 22 | """ 23 | 24 | super().__init__(alphabet, sul) 25 | self.num_walks = num_random_walks 26 | self.steps_per_walk = walk_len 27 | self.same_state_prob = same_state_prob 28 | 29 | def find_cex(self, hypothesis): 30 | 31 | for _ in range(self.num_walks): 32 | self.reset_hyp_and_sul(hypothesis) 33 | 34 | curr_state = hypothesis.initial_state 35 | inputs = [] 36 | for _ in range(self.steps_per_walk): 37 | if random.random() <= self.same_state_prob: 38 | possible_inputs = curr_state.get_same_state_transitions() 39 | else: 40 | possible_inputs = curr_state.get_diff_state_transitions() 41 | 42 | act = random.choice(possible_inputs) if possible_inputs else random.choice(self.alphabet) 43 | inputs.append(act) 44 | 45 | out_sul = self.sul.step(inputs[-1]) 46 | out_hyp = hypothesis.step(inputs[-1]) 47 | self.num_steps += 1 48 | 49 | if out_sul != out_hyp: 50 | self.sul.post() 51 | return inputs 52 | 53 | return None 54 | -------------------------------------------------------------------------------- /aalpy/oracles/UserInputEqOracle.py: -------------------------------------------------------------------------------- 1 | from aalpy.base import Oracle, SUL 2 | from aalpy.utils.FileHandler import visualize_automaton 3 | 4 | 5 | class UserInputEqOracle(Oracle): 6 | """ 7 | Interactive equivalence oracle. For every counterexample, the current hypothesis will be visualized and the user can 8 | enter the counterexample step by step. 9 | The user provides elements of the input alphabet or commands. 10 | When the element of the input alphabet is entered, the step will be performed in the current hypothesis and output 11 | will be printed. 12 | 13 | Commands offered to the users are: 14 | 15 | print alphabet - prints the input alphabet 16 | 17 | current inputs - inputs entered so far 18 | 19 | cex - returns inputs entered so far as the counterexample 20 | 21 | end - no counterexample exists 22 | 23 | reset - resets the current state of the hypothesis and clears inputs 24 | """ 25 | def __init__(self, alphabet: list, sul: SUL): 26 | super().__init__(alphabet, sul) 27 | self.curr_hypothesis = 0 28 | 29 | def find_cex(self, hypothesis): 30 | 31 | self.reset_hyp_and_sul(hypothesis) 32 | 33 | self.curr_hypothesis += 1 34 | inputs = [] 35 | visualize_automaton(hypothesis, path=f'Hypothesis_{self.curr_hypothesis}') 36 | while True: 37 | inp = input('Please provide an input: ') 38 | if inp == 'help': 39 | print('Use one of following commands [print alphabet, current inputs, cex, end, reset] ' 40 | 'or provide an input') 41 | continue 42 | if inp == 'print alphabet': 43 | print(self.alphabet) 44 | continue 45 | if inp == 'current inputs': 46 | print(inputs) 47 | continue 48 | if inp == 'cex': 49 | if inputs: 50 | self.sul.post() 51 | return inputs 52 | if inp == 'end': 53 | return None 54 | if inp == 'reset': 55 | inputs.clear() 56 | self.reset_hyp_and_sul(hypothesis) 57 | print('You are back in the initial state. Please provide an input: ') 58 | continue 59 | if inp not in self.alphabet: 60 | print("Provided input is not in the input alphabet.") 61 | continue 62 | inputs.append(inp) 63 | self.num_steps += 1 64 | out_hyp = hypothesis.step(inp) 65 | out_sul = self.sul.step(inp) 66 | print('Hypothesis Output :', out_hyp) 67 | print('SUL Output :', out_sul) 68 | if out_hyp != out_sul: 69 | print('Counterexample found.\nIf you want to return it, type \'cex\'.') 70 | -------------------------------------------------------------------------------- /aalpy/oracles/__init__.py: -------------------------------------------------------------------------------- 1 | from .BreadthFirstExplorationEqOracle import BreadthFirstExplorationEqOracle 2 | from .CacheBasedEqOracle import CacheBasedEqOracle 3 | from .kWayStateCoverageEqOracle import KWayStateCoverageEqOracle 4 | from .kWayTransitionCoverageEqOracle import KWayTransitionCoverageEqOracle 5 | from .RandomWalkEqOracle import RandomWalkEqOracle 6 | from .RandomWordEqOracle import RandomWordEqOracle 7 | from .StatePrefixEqOracle import StatePrefixEqOracle 8 | from .TransitionFocusOracle import TransitionFocusOracle 9 | from .UserInputEqOracle import UserInputEqOracle 10 | from .WMethodEqOracle import RandomWMethodEqOracle, WMethodEqOracle 11 | from .WpMethodEqOracle import RandomWpMethodEqOracle, WpMethodEqOracle 12 | from .PacOracle import PacOracle 13 | from .ProvidedSequencesOracleWrapper import ProvidedSequencesOracleWrapper 14 | from .PerfectKnowledgeEqOracle import PerfectKnowledgeEqOracle 15 | -------------------------------------------------------------------------------- /aalpy/oracles/kWayStateCoverageEqOracle.py: -------------------------------------------------------------------------------- 1 | from random import choices, shuffle 2 | 3 | from aalpy.base import Oracle, SUL 4 | from itertools import combinations, permutations 5 | 6 | 7 | class KWayStateCoverageEqOracle(Oracle): 8 | """ 9 | A test case will be computed for every k-combination or k-permutation of states with additional 10 | random walk at the end. 11 | """ 12 | 13 | def __init__(self, alphabet: list, sul: SUL, k=2, random_walk_len=20, method='permutations'): 14 | """ 15 | 16 | Args: 17 | 18 | alphabet: input alphabet 19 | 20 | sul: system under learning 21 | 22 | k: k value used for k-wise combinations/permutations of states 23 | 24 | random_walk_len: length of random walk performed at the end of each combination/permutation 25 | 26 | method: either 'combinations' or 'permutations' 27 | """ 28 | super().__init__(alphabet, sul) 29 | assert k > 1 and method in ['combinations', 'permutations'] 30 | self.k = k 31 | self.cache = set() 32 | self.fun = combinations if method == 'combinations' else permutations 33 | self.random_walk_len = random_walk_len 34 | 35 | def find_cex(self, hypothesis): 36 | 37 | if len(hypothesis.states) == 1: 38 | for _ in range(self.random_walk_len): 39 | path = choices(self.alphabet, k=self.random_walk_len) 40 | hypothesis.reset_to_initial() 41 | self.sul.post() 42 | self.sul.pre() 43 | for i, p in enumerate(path): 44 | out_sul = self.sul.step(p) 45 | out_hyp = hypothesis.step(p) 46 | self.num_steps += 1 47 | 48 | if out_sul != out_hyp: 49 | self.sul.post() 50 | return path[:i + 1] 51 | 52 | states = hypothesis.states 53 | shuffle(states) 54 | 55 | for comb in self.fun(hypothesis.states, self.k): 56 | prefixes = frozenset([c.prefix for c in comb]) 57 | if prefixes in self.cache: 58 | continue 59 | else: 60 | self.cache.add(prefixes) 61 | 62 | index = 0 63 | path = comb[0].prefix 64 | 65 | # in case of non-strongly connected automata test case might not be possible as a path between 2 states 66 | # might not exist 67 | possible_test_case = True 68 | while index < len(comb) - 1: 69 | path_between_states = hypothesis.get_shortest_path(comb[index], comb[index + 1]) 70 | index += 1 71 | 72 | if not path_between_states: 73 | possible_test_case = False 74 | break 75 | 76 | path += path_between_states 77 | 78 | if possible_test_case is None: 79 | continue 80 | 81 | path += tuple(choices(self.alphabet, k=self.random_walk_len)) 82 | 83 | self.reset_hyp_and_sul(hypothesis) 84 | 85 | for i, p in enumerate(path): 86 | out_sul = self.sul.step(p) 87 | out_hyp = hypothesis.step(p) 88 | self.num_steps += 1 89 | 90 | if out_sul != out_hyp: 91 | self.sul.post() 92 | return path[:i + 1] 93 | 94 | return None 95 | -------------------------------------------------------------------------------- /aalpy/paths.py: -------------------------------------------------------------------------------- 1 | """ 2 | File in which necessary paths for model checking are defined. 3 | 4 | path_to_prism is the absolute or relative path to the prism executable. Note that it has to include the executable file, 5 | not just the folder. Eg. /usr/edi/prism/prism.bat and NOT /usr/edi/prism/ 6 | 7 | If you learn one of the provided examples path to properties should be relative or 8 | absolute path to 'Benchmarking\prism_eval_props'. 9 | """ 10 | 11 | path_to_prism = None 12 | path_to_properties = None 13 | -------------------------------------------------------------------------------- /aalpy/utils/DataHandler.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class DataHandler(ABC): 5 | """ 6 | Abstract class used for data loading for Alergia algorithm. Usage of class is not needed, but recommended for 7 | consistency. 8 | """ 9 | 10 | @abstractmethod 11 | def tokenize_data(self, path): 12 | pass 13 | 14 | 15 | class CharacterTokenizer(DataHandler): 16 | """ 17 | Used for Markov Chain data parsing. 18 | Processes data where each input is a single character. 19 | Each input sequence is in the separate line. 20 | """ 21 | 22 | def tokenize_data(self, path): 23 | data = [] 24 | lines = open(path).read().splitlines() 25 | for l in lines: 26 | data.append(list(l)) 27 | return data 28 | 29 | 30 | class DelimiterTokenizer(DataHandler): 31 | """ 32 | Used for Markov Chain data parsing. 33 | Processes data where each input is separated by the delimiter. 34 | Each input sequence is in the separate line. 35 | """ 36 | 37 | def tokenize_data(self, path, delimiter=','): 38 | data = [] 39 | lines = open(path).read().splitlines() 40 | for l in lines: 41 | data.append(l.split(delimiter)) 42 | return data 43 | 44 | 45 | class IODelimiterTokenizer(DataHandler): 46 | """ 47 | Used for Markov Decision Process data parsing. 48 | Processes data where each input/output is separated by the io_delimiter, and i/o pairs are separated 49 | by word delimiter. 50 | Each [output, tuple(input,output)*] sequence is in the separate line. 51 | """ 52 | 53 | def tokenize_data(self, path, io_delimiter='/', word_delimiter=','): 54 | data = [] 55 | lines = open(path).read().splitlines() 56 | for l in lines: 57 | words = l.split(word_delimiter) 58 | seq = [words[0]] 59 | for w in words[1:]: 60 | i_o = w.split(io_delimiter) 61 | if len(i_o) != 2: 62 | print('Data formatting error. io_delimiter should split words into ' 63 | 'where is values of param \"io_delimiter\'"') 64 | exit(-1) 65 | seq.append(tuple([try_int(i_o[0]), try_int(i_o[1])])) 66 | data.append(seq) 67 | return data 68 | 69 | 70 | def try_int(x): 71 | if str.isdigit(x): 72 | return int(x) 73 | return x 74 | -------------------------------------------------------------------------------- /aalpy/utils/Sampling.py: -------------------------------------------------------------------------------- 1 | from functools import wraps 2 | from random import randint, choices, random 3 | 4 | from aalpy import MooreMachine, Dfa, NDMooreMachine, Mdp, MarkovChain 5 | from aalpy.base import Automaton, DeterministicAutomaton 6 | 7 | def get_io_traces(automaton: Automaton, input_traces: list) -> list: 8 | moore_automata = (MooreMachine, Dfa, NDMooreMachine, Mdp, MarkovChain) 9 | is_moore = isinstance(automaton, moore_automata) 10 | 11 | traces = [] 12 | for input_trace in input_traces: 13 | output_trace = automaton.execute_sequence(automaton.initial_state, input_trace) 14 | trace = list(zip(input_trace, output_trace)) 15 | if is_moore: 16 | trace = [automaton.initial_state.output] + trace 17 | traces.append(trace) 18 | return traces 19 | 20 | 21 | def get_labeled_sequences(automaton: Automaton, input_traces: list) -> list: 22 | moore_automata = (MooreMachine, Dfa, NDMooreMachine, Mdp, MarkovChain) 23 | is_moore = isinstance(automaton, moore_automata) 24 | 25 | data = [] 26 | for input_trace in input_traces: 27 | if len(input_trace) == 0: 28 | if not is_moore: 29 | raise ValueError("tried to get label of empty sequence for Mealy automaton.") 30 | output = automaton.initial_state.output 31 | else: 32 | output = automaton.execute_sequence(automaton.initial_state, input_trace)[-1] 33 | data.append((input_trace, output)) 34 | return data 35 | 36 | 37 | def get_data_from_input_sequence(automaton: Automaton, input_sequence: list, data_format: str = "io_sequences"): 38 | if data_format == "io_sequences": 39 | return get_io_traces(automaton, input_sequence) 40 | elif data_format == "labeled_sequences": 41 | return get_labeled_sequences(automaton, input_sequence) 42 | else: 43 | raise ValueError(f"invalid data_format {data_format}. must be 'io_sequences' or 'labeled_sequences'") 44 | 45 | 46 | def support_automaton_arg(require_transform): 47 | def decorator(f): 48 | @wraps(f) 49 | def inner(alphabet, *args, include_outputs=False, **kwargs): 50 | automaton = None 51 | if isinstance(alphabet, Automaton): 52 | automaton = alphabet 53 | if require_transform: 54 | alphabet = alphabet.get_input_alphabet() 55 | traces = f(alphabet, *args, **kwargs) 56 | if include_outputs: 57 | if automaton is None: 58 | raise ValueError("automaton must be provided") 59 | traces = get_io_traces(automaton, traces) 60 | return traces 61 | return inner 62 | return decorator 63 | 64 | 65 | @support_automaton_arg(True) 66 | def sample_with_length_limits(alphabet, nr_samples, min_len, max_len): 67 | return [choices(alphabet, k = randint(min_len, max_len)) for _ in range(nr_samples)] 68 | 69 | 70 | @support_automaton_arg(True) 71 | def sample_with_term_prob(alphabet, nr_samples, term_prob): 72 | ret = [] 73 | for _ in range(nr_samples): 74 | k = 0 75 | while term_prob < random(): 76 | k += 1 77 | ret.append(choices(alphabet, k=k)) 78 | return ret 79 | 80 | 81 | @support_automaton_arg(False) 82 | def get_complete_sample(automaton: DeterministicAutomaton): 83 | alphabet = automaton.get_input_alphabet() 84 | automaton.compute_prefixes() 85 | char_set = automaton.compute_characterization_set() 86 | infixes = [(x,) for x in alphabet] + [tuple()] 87 | return [state.prefix + infix + suffix for state in automaton.states for suffix in char_set for infix in infixes] 88 | -------------------------------------------------------------------------------- /aalpy/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .AutomatonGenerators import ( 2 | generate_random_dfa, 3 | generate_random_mealy_machine, 4 | generate_random_smm, 5 | generate_random_moore_machine, 6 | generate_random_markov_chain, 7 | generate_random_deterministic_automata, 8 | ) 9 | from .AutomatonGenerators import ( 10 | generate_random_mdp, 11 | generate_random_ONFSM, 12 | generate_random_sevpa, 13 | ) 14 | from .BenchmarkSULs import * 15 | from .DataHandler import ( 16 | DataHandler, 17 | CharacterTokenizer, 18 | DelimiterTokenizer, 19 | IODelimiterTokenizer, 20 | ) 21 | from .FileHandler import ( 22 | save_automaton_to_file, 23 | load_automaton_from_file, 24 | visualize_automaton, 25 | ) 26 | from .ModelChecking import ( 27 | model_check_experiment, 28 | mdp_2_prism_format, 29 | model_check_properties, 30 | get_properties_file, 31 | get_correct_prop_values, 32 | compare_automata, 33 | generate_test_cases, 34 | statistical_model_checking, 35 | bisimilar, 36 | ) 37 | from .HelperFunctions import ( 38 | make_input_complete, 39 | convert_i_o_traces_for_RPNI, 40 | is_balanced, 41 | generate_input_output_data_from_automata, 42 | generate_input_output_data_from_vpa, 43 | ) 44 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | AALpy is a light-weight automata learning library written in Python. 2 | You can start learning models of black-box systems with a few lines of code. 3 | 4 | AALpy supports both **active** and **passive** automata learning algorithms that can be used to learn a variety of modeling formalisms, including 5 | **deterministic**, **non-deterministic**, and **stochastic automata**, as well as **deterministic context-free grammars/pushdown automata**. 6 | 7 | ## Installation 8 | 9 | Use the package manager [pip](https://pip.pypa.io/en/stable/) to install the latest release of AALpy: 10 | ```bash 11 | pip install aalpy 12 | ``` 13 | To install current version of the master branch (it might contain bugfixes and added functionalities between releases): 14 | ```bash 15 | pip install https://github.com/DES-Lab/AALpy/archive/master.zip 16 | ``` 17 | The minimum required version of Python is 3.6. 18 | Ensure that you have [Graphviz](https://graphviz.org/) installed and added to your path if you want to visualize models. 19 | 20 | For manual installation, clone the repo and install `pydot` (the only dependency). 21 | 22 | ## Documentation and Wiki 23 | 24 | If you are interested in automata learning or would like to understand the automata learning process in more detail, 25 | please check out our **Wiki**. On Wiki, you will find more detailed examples on how to use AALpy. 26 | - 27 | 28 | ***[Examples.py](https://github.com/DES-Lab/AALpy/blob/master/Examples.py)*** contains examples covering almost the whole of AALpy's functionality and its a great starting point. 29 | 30 | ### Usage 31 | 32 | All active automata learning procedures follow this high-level approach: 33 | - [Define the input alphabet and system under learning (SUL)](https://github.com/DES-Lab/AALpy/wiki/SUL-Interface,-or-How-to-Learn-Your-Systems) 34 | - [Choose the equivalence oracle](https://github.com/DES-Lab/AALpy/wiki/Equivalence-Oracles) 35 | - [Run the learning algorithm](https://github.com/DES-Lab/AALpy/wiki/Setting-Up-Learning) 36 | 37 | Passive learning algorithm simply require you to provide data in the appropriate format (check Wiki and Examples) and run the learning function. 38 | 39 | ## Selected Applications 40 | AALpy has been used to: 41 | - [Learn Models of Bluetooth Low-Energy](https://github.com/apferscher/ble-learning) 42 | - [Find bugs in VIM text editor](https://github.com/DES-Lab/AALpy/discussions/13) 43 | - [Learn Input-Output Behavior of RNNs](https://github.com/DES-Lab/Extracting-FSM-From-RNNs) 44 | - [Learn Models of GIT](https://github.com/taburg/git-learning) 45 | - [Solve RL Problems](https://github.com/DES-Lab/Learning-Environment-Models-with-Continuous-Stochastic-Dynamics) 46 | 47 | ## Cite AALpy and Research Contact 48 | If you use AALpy in your research, please cite us with of the following: 49 | - [Extended version (preferred)](https://www.researchgate.net/publication/359517046_AALpy_an_active_automata_learning_library/citation/download) 50 | - [Tool paper](https://dblp.org/rec/conf/atva/MuskardinAPPT21.html?view=bibtex) 51 | 52 | If you have research suggestions or you need specific help concerning your research, feel free to start a [discussion](https://github.com/DES-Lab/AALpy/discussions) or contact [edi.muskardin@silicon-austria.com](mailto:edi.muskardin@silicon-austria.com). 53 | We are happy to help you and consult you in applying automata learning in various domains. 54 | 55 | ## Contributing 56 | Pull requests are welcome. For significant changes, please open an issue first to discuss what you would like to change. 57 | In case of any questions or possible bugs, please open issues. 58 | -------------------------------------------------------------------------------- /docs/_config.yml: -------------------------------------------------------------------------------- 1 | title : AALpy 2 | author: Edi Muskardin 3 | description: An Active Automata Learning Library 4 | rss-description: An Active Automata Learning Library 5 | theme: jekyll-theme-slate 6 | google-site-verification: google306875680a34d740.html -------------------------------------------------------------------------------- /docs/arithmeticSevpa.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/docs/arithmeticSevpa.PNG -------------------------------------------------------------------------------- /docs/google306875680a34d740.html: -------------------------------------------------------------------------------- 1 | google-site-verification: google306875680a34d740.html -------------------------------------------------------------------------------- /docs/instructions.txt: -------------------------------------------------------------------------------- 1 | 1. For commenting use Google style with one additional space for between Args: and arguments, and one space between 2 | Returns and return values. 3 | 2. Install pdoc with pip install pdoc 4 | 3. pdoc -o docs/documentation ./aalpy 5 | -------------------------------------------------------------------------------- /docs/logo_dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/docs/logo_dark.png -------------------------------------------------------------------------------- /docs/logo_dark_cent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/docs/logo_dark_cent.png -------------------------------------------------------------------------------- /docs/logo_light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/docs/logo_light.png -------------------------------------------------------------------------------- /docs/logo_light_cent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/docs/logo_light_cent.png -------------------------------------------------------------------------------- /docs/mqtt_example.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/docs/mqtt_example.PNG -------------------------------------------------------------------------------- /docs/passiveLearning.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/docs/passiveLearning.png -------------------------------------------------------------------------------- /docs/regex_example_wiki.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/docs/regex_example_wiki.png -------------------------------------------------------------------------------- /jAlergia/alergia.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DES-Lab/AALpy/177862fb505df3f92a945861ceb1977962327327/jAlergia/alergia.jar -------------------------------------------------------------------------------- /notebooks/ONFSM_Example.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "metadata": { 5 | "trusted": true 6 | }, 7 | "id": "strange-memory", 8 | "cell_type": "code", 9 | "source": "#Learning a ONFSM presented in 'Learning Finite State Models of Observable Nondeterministic Systems in a Testing Context'", 10 | "execution_count": 13, 11 | "outputs": [] 12 | }, 13 | { 14 | "metadata": { 15 | "trusted": true 16 | }, 17 | "id": "prompt-sample", 18 | "cell_type": "code", 19 | "source": "from aalpy.utils import get_benchmark_ONFSM\n\n# get the automaton \n\nonfsm = get_benchmark_ONFSM()\n\n# get its alphabet\n\nalphabet = onfsm.get_input_alphabet()", 20 | "execution_count": 18, 21 | "outputs": [] 22 | }, 23 | { 24 | "metadata": { 25 | "trusted": true 26 | }, 27 | "id": "threaded-mongolia", 28 | "cell_type": "code", 29 | "source": "from aalpy.SULs import OnfsmSUL\n\n# warp the benchmark in the SUL\n\nsul = OnfsmSUL(onfsm)", 30 | "execution_count": 19, 31 | "outputs": [] 32 | }, 33 | { 34 | "metadata": { 35 | "trusted": true 36 | }, 37 | "id": "brown-generic", 38 | "cell_type": "code", 39 | "source": "from aalpy.oracles import RandomWalkEqOracle\n\n# define the eq. oracle\n\neq_oracle = RandomWalkEqOracle(alphabet, sul, num_steps=5000, reset_prob=0.09, reset_after_cex=True)", 40 | "execution_count": 20, 41 | "outputs": [] 42 | }, 43 | { 44 | "metadata": { 45 | "trusted": true 46 | }, 47 | "id": "foster-background", 48 | "cell_type": "code", 49 | "source": "from aalpy.learning_algs import run_Lstar_ONFSM\n\n# start learning and print detailed output\n\nlearned_onfsm = run_Lstar_ONFSM(alphabet, sul, eq_oracle, n_sampling=200, print_level=2)\n", 50 | "execution_count": 21, 51 | "outputs": [ 52 | { 53 | "output_type": "stream", 54 | "text": "Starting learning with an all-weather assumption.\nSee run_Lstar_ONFSM documentation for more details about possible non-convergence.\nHypothesis 1: 3 states.\nHypothesis 2: 4 states.\n-----------------------------------\nLearning Finished.\nLearning Rounds: 2\nNumber of states: 4\nTime (in seconds)\n Total : 0.56\n Learning algorithm : 0.52\n Conformance checking : 0.04\nLearning Algorithm\n # Membership Queries : 22527\n # Steps : 114735\nEquivalence Query\n # Membership Queries : 432\n # Steps : 5027\n-----------------------------------\n", 55 | "name": "stdout" 56 | } 57 | ] 58 | }, 59 | { 60 | "metadata": { 61 | "trusted": true 62 | }, 63 | "id": "after-vacation", 64 | "cell_type": "code", 65 | "source": "# print the result\n\nprint(learned_onfsm)", 66 | "execution_count": 22, 67 | "outputs": [ 68 | { 69 | "output_type": "stream", 70 | "text": "digraph learnedModel {\ns0 [label=s0];\ns1 [label=s1];\ns2 [label=s2];\ns3 [label=s3];\ns0 -> s1 [label=\"a/0\"];\ns0 -> s2 [label=\"b/0\"];\ns0 -> s0 [label=\"b/2\"];\ns1 -> s0 [label=\"a/2\"];\ns1 -> s1 [label=\"b/3\"];\ns2 -> s3 [label=\"a/2\"];\ns2 -> s2 [label=\"b/0\"];\ns2 -> s2 [label=\"b/3\"];\ns3 -> s1 [label=\"a/2\"];\ns3 -> s3 [label=\"b/3\"];\n__start0 [label=\"\", shape=none];\n__start0 -> s0 [label=\"\"];\n}\n\n", 71 | "name": "stdout" 72 | } 73 | ] 74 | }, 75 | { 76 | "metadata": { 77 | "trusted": true 78 | }, 79 | "cell_type": "code", 80 | "source": "", 81 | "execution_count": null, 82 | "outputs": [] 83 | } 84 | ], 85 | "metadata": { 86 | "kernelspec": { 87 | "name": "python3", 88 | "display_name": "Python 3", 89 | "language": "python" 90 | }, 91 | "language_info": { 92 | "name": "python", 93 | "version": "3.7.10", 94 | "mimetype": "text/x-python", 95 | "codemirror_mode": { 96 | "name": "ipython", 97 | "version": 3 98 | }, 99 | "pygments_lexer": "ipython3", 100 | "nbconvert_exporter": "python", 101 | "file_extension": ".py" 102 | } 103 | }, 104 | "nbformat": 4, 105 | "nbformat_minor": 5 106 | } -------------------------------------------------------------------------------- /notebooks/RegexExample.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "metadata": { 5 | "trusted": true 6 | }, 7 | "id": "charming-absence", 8 | "cell_type": "code", 9 | "source": "from aalpy.base import SUL\nimport re\n\n# Create a system under learning that can learn any regular experssion\n\nclass RegexSUL(SUL):\n \"\"\"\n An example implementation of a system under learning that can be used to learn any regex expression.\n Note that the $ is added to the expression as in this SUL only exact matches are learned.\n \"\"\"\n def __init__(self, regex: str):\n super().__init__()\n self.regex = regex if regex[-1] == '$' else regex + '$'\n self.string = \"\"\n\n def pre(self):\n self.string = \"\"\n pass\n\n def post(self):\n self.string = \"\"\n pass\n\n def step(self, letter):\n \"\"\"\n\n Args:\n\n letter: single element of the input alphabet\n\n Returns:\n\n Whether the current string (previous string + letter) is accepted\n\n \"\"\"\n if letter is not None:\n self.string += str(letter)\n return True if re.match(self.regex, self.string) else False", 10 | "execution_count": 1, 11 | "outputs": [] 12 | }, 13 | { 14 | "metadata": { 15 | "trusted": true 16 | }, 17 | "id": "norman-bolivia", 18 | "cell_type": "code", 19 | "source": "# define a regex and its alphabet\nregex = '((0|1)*0)*1(11)*(0(0|1)*1)*0(00)*(1(0|1)*)*'\nalphabet = [0,1]", 20 | "execution_count": 2, 21 | "outputs": [] 22 | }, 23 | { 24 | "metadata": { 25 | "trusted": true 26 | }, 27 | "id": "industrial-sociology", 28 | "cell_type": "code", 29 | "source": "# pass the regex to the RegexSUL\n\nregex_sul = RegexSUL(regex)\n\nfrom aalpy.oracles import StatePrefixEqOracle\n\n# create the oracle\n\neq_oracle = StatePrefixEqOracle(alphabet, regex_sul, walks_per_state=15,\n walk_len=10)\n\nfrom aalpy.learning_algs import run_Lstar\n\n# start learning with no counterexample processing\n\nlearned_regex = run_Lstar(alphabet, regex_sul, eq_oracle, automaton_type='dfa', cex_processing=None)\n\nprint(learned_regex)", 30 | "execution_count": 3, 31 | "outputs": [ 32 | { 33 | "output_type": "stream", 34 | "text": "Hypothesis 1: 1 states.\nHypothesis 2: 4 states.\nHypothesis 3: 5 states.\n-----------------------------------\nLearning Finished.\nLearning Rounds: 3\nNumber of states: 5\nTime (in seconds)\n Total : 0.02\n Learning algorithm : 0.0\n Conformance checking : 0.02\nLearning Algorithm\n # Membership Queries : 37\n # MQ Saved by Caching : 37\n # Steps : 271\nEquivalence Query\n # Membership Queries : 75\n # Steps : 935\n-----------------------------------\ndigraph learnedModel {\ns0 [label=s0];\ns1 [label=s1];\ns2 [label=s2, shape=doublecircle];\ns3 [label=s3, shape=doublecircle];\ns4 [label=s4];\ns0 -> s0 [label=0];\ns0 -> s1 [label=1];\ns1 -> s2 [label=0];\ns1 -> s0 [label=1];\ns2 -> s4 [label=0];\ns2 -> s3 [label=1];\ns3 -> s3 [label=0];\ns3 -> s3 [label=1];\ns4 -> s2 [label=0];\ns4 -> s4 [label=1];\n__start0 [label=\"\", shape=none];\n__start0 -> s0 [label=\"\"];\n}\n\n", 35 | "name": "stdout" 36 | } 37 | ] 38 | } 39 | ], 40 | "metadata": { 41 | "kernelspec": { 42 | "name": "python3", 43 | "display_name": "Python 3", 44 | "language": "python" 45 | }, 46 | "language_info": { 47 | "name": "python", 48 | "version": "3.7.10", 49 | "mimetype": "text/x-python", 50 | "codemirror_mode": { 51 | "name": "ipython", 52 | "version": 3 53 | }, 54 | "pygments_lexer": "ipython3", 55 | "nbconvert_exporter": "python", 56 | "file_extension": ".py" 57 | } 58 | }, 59 | "nbformat": 4, 60 | "nbformat_minor": 5 61 | } -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from os import path 3 | 4 | this_directory = path.abspath(path.dirname(__file__)) 5 | with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: 6 | long_description = f.read() 7 | 8 | setup( 9 | name='aalpy', 10 | version='1.5.1', 11 | packages=['aalpy', 'aalpy.base', 'aalpy.SULs', 'aalpy.utils', 'aalpy.oracles', 'aalpy.automata', 12 | 'aalpy.learning_algs', 'aalpy.learning_algs.stochastic', 'aalpy.learning_algs.deterministic', 13 | 'aalpy.learning_algs.non_deterministic', 'aalpy.learning_algs.general_passive', 'aalpy.learning_algs.adaptive', 14 | 'aalpy.learning_algs.stochastic_passive', 'aalpy.learning_algs.deterministic_passive'], 15 | url='https://github.com/DES-Lab/AALpy', 16 | license='MIT', 17 | license_files=('LICENSE.txt',), 18 | long_description=long_description, 19 | long_description_content_type='text/markdown', 20 | author='Edi Muskardin', 21 | author_email='edi.muskardin@silicon-austria.com', 22 | description='An active automata learning library', 23 | classifiers=[ 24 | "Programming Language :: Python :: 3", 25 | "License :: OSI Approved :: MIT License", 26 | "Operating System :: OS Independent" 27 | ], 28 | install_requires=['pydot'], 29 | python_requires=">=3.6", 30 | ) 31 | 32 | # python setup.py sdist 33 | # pip wheel . -w dist 34 | # twine upload dist/* 35 | 36 | # for test pypi 37 | # twine upload --repository testpypi dist/* 38 | -------------------------------------------------------------------------------- /tests/oracles/test_baseOracle.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from aalpy.SULs import AutomatonSUL 4 | from aalpy.learning_algs import run_Lstar 5 | from aalpy.oracles import WMethodEqOracle 6 | from aalpy.utils import generate_random_dfa 7 | 8 | 9 | class BaseOracleTests(unittest.TestCase): 10 | """ 11 | Abstract class for testing oracles. 12 | """ 13 | 14 | def generate_dfa_suls(self, number_of_states=10, alphabet_size=10, num_accepting_states=5): 15 | """ 16 | Creates a random DFA and creates a learning and validation SUL, both SUL are identical. 17 | 18 | Args: 19 | number_of_states: number of states (Default value = 10) 20 | alphabet_size: size of alphabet (Default value = 10) 21 | num_accepting_states: number of accepting states (Default value = 5) 22 | 23 | Returns: learning_sul, validation_sul, alphabet 24 | 25 | """ 26 | alphabet = [*range(0, alphabet_size)] 27 | 28 | dfa = generate_random_dfa(number_of_states, alphabet, num_accepting_states) 29 | 30 | learning_sul = AutomatonSUL(dfa) 31 | validation_sul = AutomatonSUL(dfa) 32 | 33 | return learning_sul, validation_sul, alphabet 34 | 35 | def test_validate_eq_oracle(self, alphabet, eq_oracle, learning_sul, validation_sul): 36 | """ 37 | Validates the correctness of the given eq_oracle via WMethodEqOracle. 38 | 39 | Args: 40 | alphabet: input alphabet 41 | eq_oracle: oracle to be validated 42 | learning_sul:the SUL form the eq_oracle 43 | validation_sul: identical SUL that was not used to learn the oracle 44 | 45 | Returns: 46 | 47 | """ 48 | learned_model = run_Lstar( 49 | alphabet, learning_sul, eq_oracle, 'dfa', print_level=2) 50 | 51 | validation_eq_oracle = WMethodEqOracle( 52 | alphabet, validation_sul, max_number_of_states=len(learned_model.states) + 2) 53 | self.assertIsNone(validation_eq_oracle.find_cex( 54 | learned_model), "Counterexample found by WMethodEqOracle") 55 | -------------------------------------------------------------------------------- /tests/oracles/test_kWayTransitionCoverageEqOracle.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from aalpy.oracles import KWayTransitionCoverageEqOracle 4 | from tests.oracles.test_baseOracle import BaseOracleTests 5 | 6 | 7 | class KWayTransitionCoverageEqOracleTests(BaseOracleTests): 8 | 9 | def test_default(self): 10 | learning_sul, validation_sul, alphabet = self.generate_dfa_suls() 11 | 12 | eq_oracle = KWayTransitionCoverageEqOracle(alphabet, learning_sul) 13 | self.test_validate_eq_oracle(alphabet, eq_oracle, learning_sul, validation_sul) 14 | 15 | def test_k_4(self): 16 | learning_sul, validation_sul, alphabet = self.generate_dfa_suls(5, 5, 2) 17 | 18 | eq_oracle = KWayTransitionCoverageEqOracle( 19 | alphabet, learning_sul, k=4) 20 | self.test_validate_eq_oracle(alphabet, eq_oracle, learning_sul, validation_sul) 21 | 22 | def test_method_prefix(self): 23 | learning_sul, validation_sul, alphabet = self.generate_dfa_suls() 24 | 25 | eq_oracle = KWayTransitionCoverageEqOracle( 26 | alphabet, learning_sul, method='prefix') 27 | self.test_validate_eq_oracle(alphabet, eq_oracle, learning_sul, validation_sul) 28 | 29 | @unittest.expectedFailure 30 | def test_max_number_of_steps_10(self): 31 | learning_sul, validation_sul, alphabet = self.generate_dfa_suls(50, 4, 4) 32 | 33 | eq_oracle = KWayTransitionCoverageEqOracle( 34 | alphabet, learning_sul, max_number_of_steps=10, max_path_len=10) 35 | self.test_validate_eq_oracle(alphabet, eq_oracle, learning_sul, validation_sul) 36 | 37 | def test_default_large_dfa(self): 38 | learning_sul, validation_sul, alphabet = self.generate_dfa_suls(50, 10, 10) 39 | 40 | eq_oracle = KWayTransitionCoverageEqOracle(alphabet, learning_sul) 41 | self.test_validate_eq_oracle(alphabet, eq_oracle, learning_sul, validation_sul) 42 | -------------------------------------------------------------------------------- /tests/test_charSet.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from aalpy.utils import get_Angluin_dfa, load_automaton_from_file 4 | from aalpy.utils.HelperFunctions import all_suffixes 5 | 6 | 7 | class TestCharSet(unittest.TestCase): 8 | 9 | def get_test_automata(self): 10 | return {"angluin_dfa": get_Angluin_dfa(), 11 | "angluin_mealy": load_automaton_from_file('../DotModels/Angluin_Mealy.dot', automaton_type='mealy'), 12 | "angluin_moore": load_automaton_from_file('../DotModels/Angluin_Moore.dot', automaton_type='moore'), 13 | "mqtt": load_automaton_from_file('../DotModels/MQTT/emqtt__two_client_will_retain.dot', 14 | automaton_type='mealy'), 15 | "openssl": load_automaton_from_file('../DotModels/TLS/OpenSSL_1.0.2_server_regular.dot', 16 | automaton_type='mealy'), 17 | "tcp_server": load_automaton_from_file('../DotModels/TCP/TCP_Linux_Client.dot', 18 | automaton_type='mealy')} 19 | 20 | def test_can_differentiate(self): 21 | automata = self.get_test_automata() 22 | for init_with_alphabet in [True, False]: 23 | for (online_suffix_closure, split_all_blocks) in [(False, False), (False, True), (True, False), 24 | (True, True)]: 25 | for test_aut_name in automata: 26 | print(f"Testing with {test_aut_name}") 27 | test_aut = automata[test_aut_name] 28 | char_set_init = list(map(lambda input: tuple([input]), test_aut.get_input_alphabet())) \ 29 | if init_with_alphabet else None 30 | if "dfa" in test_aut_name or "moore" in test_aut_name: 31 | char_set_init = [] if char_set_init is None else char_set_init 32 | char_set_init.append(()) 33 | char_set = test_aut.compute_characterization_set(char_set_init=char_set_init, 34 | online_suffix_closure=online_suffix_closure, 35 | split_all_blocks=split_all_blocks) 36 | print(f"Char. set {char_set}") 37 | all_responses = set() 38 | for s in test_aut.states: 39 | responses_from_s = [] 40 | for c in char_set: 41 | responses_from_s.append(tuple(test_aut.compute_output_seq(s, c))) 42 | all_responses.add(tuple(responses_from_s)) 43 | 44 | # every state must have a unique response to the whole characterization set 45 | assert len(all_responses) == len(test_aut.states) 46 | 47 | def test_suffix_closed(self): 48 | automata = self.get_test_automata() 49 | for init_with_alphabet in [True, False]: 50 | online_suffix_closure = True 51 | for split_all_blocks in [True, False]: 52 | for test_aut_name in automata: 53 | print(f"Testing with {test_aut_name}") 54 | test_aut = automata[test_aut_name] 55 | char_set_init = list(map(lambda input: tuple([input]), test_aut.get_input_alphabet())) \ 56 | if init_with_alphabet else None 57 | if "dfa" in test_aut_name or "moore" in test_aut_name: 58 | char_set_init = [] if char_set_init is None else char_set_init 59 | char_set_init.append(()) 60 | char_set = test_aut.compute_characterization_set(char_set_init=char_set_init, 61 | online_suffix_closure=online_suffix_closure, 62 | split_all_blocks=split_all_blocks) 63 | print(f"Char. set {char_set}") 64 | for s in char_set: 65 | for suffix in all_suffixes(s): 66 | if suffix not in char_set: 67 | print(suffix) 68 | assert suffix in char_set 69 | -------------------------------------------------------------------------------- /tests/test_file_operations.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from aalpy.utils import generate_random_markov_chain, load_automaton_from_file 4 | from aalpy.utils.BenchmarkSULs import * 5 | 6 | 7 | class TestFileHandler(unittest.TestCase): 8 | 9 | def test_saving_loading(self): 10 | try: 11 | type_model_pairs = [ 12 | ("dfa", get_Angluin_dfa()), 13 | ("mealy", load_automaton_from_file('../DotModels/Angluin_Mealy.dot', automaton_type='mealy')), 14 | ("moore", load_automaton_from_file('../DotModels/Angluin_Moore.dot', automaton_type='moore')), 15 | ("onfsm", get_benchmark_ONFSM()), 16 | ("mdp", get_small_pomdp()), 17 | ("mdp", load_automaton_from_file('../DotModels/MDPs/first_grid.dot', automaton_type='mdp')), 18 | ("smm", get_faulty_coffee_machine_SMM()), 19 | ("mc", generate_random_markov_chain(num_states=10)), 20 | ] 21 | 22 | for type, model in type_model_pairs: 23 | model.save() 24 | print(model) 25 | loaded_model = load_automaton_from_file('LearnedModel.dot', type) 26 | loaded_model.save() 27 | loaded_model2 = load_automaton_from_file('LearnedModel.dot', type) 28 | 29 | if type != 'mc': 30 | assert set(model.get_input_alphabet()) == set(loaded_model.get_input_alphabet()) 31 | assert set(model.get_input_alphabet()) == set(loaded_model2.get_input_alphabet()) 32 | 33 | if type in {'dfa', 'moore', 'mealy'}: 34 | assert model.compute_characterization_set() == loaded_model2.compute_characterization_set() 35 | 36 | assert True 37 | except: 38 | assert False 39 | -------------------------------------------------------------------------------- /tests/test_non_deterministic.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | 4 | class NonDeterministicTest(unittest.TestCase): 5 | 6 | def test_non_det(self): 7 | 8 | from aalpy.SULs import AutomatonSUL 9 | from aalpy.oracles import RandomWordEqOracle, RandomWalkEqOracle 10 | from aalpy.learning_algs import run_non_det_Lstar 11 | from aalpy.utils import get_benchmark_ONFSM 12 | 13 | onfsm = get_benchmark_ONFSM() 14 | alphabet = onfsm.get_input_alphabet() 15 | 16 | for _ in range(100): 17 | sul = AutomatonSUL(onfsm) 18 | 19 | oracle = RandomWordEqOracle(alphabet, sul, num_walks=500, min_walk_len=2, max_walk_len=5) 20 | 21 | learned_onfsm = run_non_det_Lstar(alphabet, sul, oracle, n_sampling=50, print_level=0) 22 | 23 | eq_oracle = RandomWalkEqOracle(alphabet, sul, num_steps=10000, reset_prob=0.09, 24 | reset_after_cex=True) 25 | 26 | cex = eq_oracle.find_cex(learned_onfsm) 27 | 28 | if cex or len(learned_onfsm.states) != len(onfsm.states): 29 | assert False 30 | assert True 31 | -------------------------------------------------------------------------------- /tests/test_stochastic.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import aalpy.paths 4 | from aalpy.SULs import AutomatonSUL 5 | from aalpy.learning_algs import run_stochastic_Lstar 6 | from aalpy.oracles import RandomWalkEqOracle 7 | from aalpy.utils import load_automaton_from_file 8 | 9 | 10 | class StochasticTest(unittest.TestCase): 11 | 12 | def test_learning_based_on_accuracy_based_stopping(self): 13 | 14 | example = 'first_grid' 15 | mdp = load_automaton_from_file(f'../DotModels/MDPs/{example}.dot', automaton_type='mdp') 16 | 17 | min_rounds = 10 18 | max_rounds = 500 19 | 20 | from aalpy.automata import StochasticMealyMachine 21 | from aalpy.utils import model_check_experiment, get_properties_file, \ 22 | get_correct_prop_values 23 | from aalpy.automata.StochasticMealyMachine import smm_to_mdp_conversion 24 | 25 | aalpy.paths.path_to_prism = "C:/Program Files/prism-4.6/bin/prism.bat" 26 | aalpy.paths.path_to_properties = "../Benchmarking/prism_eval_props/" 27 | 28 | stopping_based_on_prop = (get_properties_file(example), get_correct_prop_values(example), 0.02) 29 | 30 | input_alphabet = mdp.get_input_alphabet() 31 | 32 | automaton_type = ['mdp', 'smm'] 33 | similarity_strategy = ['classic', 'normal', 'chi2'] 34 | cex_processing = [None, 'longest_prefix'] 35 | samples_cex_strategy = [None, 'bfs', 'random:200:0.3'] 36 | 37 | for aut_type in automaton_type: 38 | for strategy in similarity_strategy: 39 | for cex in cex_processing: 40 | for sample_cex in samples_cex_strategy: 41 | 42 | sul = AutomatonSUL(mdp) 43 | 44 | eq_oracle = RandomWalkEqOracle(input_alphabet, sul=sul, num_steps=200, 45 | reset_prob=0.25, 46 | reset_after_cex=True) 47 | 48 | learned_model = run_stochastic_Lstar(input_alphabet=input_alphabet, eq_oracle=eq_oracle, 49 | sul=sul, n_c=20, 50 | n_resample=1000, min_rounds=min_rounds, 51 | max_rounds=max_rounds, 52 | automaton_type=aut_type, strategy=strategy, 53 | cex_processing=cex, 54 | samples_cex_strategy=sample_cex, target_unambiguity=0.99, 55 | property_based_stopping=stopping_based_on_prop, 56 | print_level=0) 57 | 58 | if isinstance(learned_model, StochasticMealyMachine): 59 | mdp = smm_to_mdp_conversion(learned_model) 60 | else: 61 | mdp = learned_model 62 | 63 | results, diff = model_check_experiment(get_properties_file(example), 64 | get_correct_prop_values(example), mdp) 65 | 66 | for d in diff.values(): 67 | if d > stopping_based_on_prop[2]: 68 | assert False 69 | 70 | assert True 71 | -------------------------------------------------------------------------------- /tests/tests_imports.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | 4 | class ImportTest(unittest.TestCase): 5 | 6 | def test_imports(self): 7 | try: 8 | import aalpy.utils 9 | import aalpy.oracles 10 | import aalpy.utils 11 | import aalpy.SULs 12 | import aalpy.learning_algs 13 | import aalpy.base 14 | import aalpy.base.Automaton 15 | import aalpy.utils.HelperFunctions 16 | import aalpy.utils.DataHandler 17 | import aalpy.utils.AutomatonGenerators 18 | import aalpy.utils.ModelChecking 19 | import aalpy.utils.FileHandler 20 | except: 21 | assert False 22 | assert True 23 | --------------------------------------------------------------------------------