├── .flake8 ├── .gitattributes ├── .github └── workflows │ └── python.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── README.rst ├── large ├── apa_0_23_0.jar ├── tlc_2_17.jar └── tlc_2_18.jar ├── modelator_py ├── __init__.py ├── apalache │ ├── __init__.py │ ├── apalache_help_0.23.0.txt │ ├── args.py │ ├── cli.py │ ├── pure.py │ └── raw.py ├── cli.py ├── helper.py ├── tlc │ ├── __init__.py │ ├── args.py │ ├── cli.py │ ├── pure.py │ ├── raw.py │ └── tlc_help_2.18.txt └── util │ ├── __init__.py │ ├── cli.py │ ├── informal_trace_format.py │ ├── tla │ ├── LICENSE │ ├── README.md │ ├── README_license.md │ ├── __init__.py │ ├── _combinators.py │ ├── _error.py │ ├── _expr_parser.py │ ├── _intf.py │ ├── _location.py │ ├── _module_parser.py │ ├── _optable.py │ ├── _proof_parser.py │ ├── _tla_combinators.py │ ├── ast.py │ ├── examples │ │ ├── Counter.tla │ │ ├── README.md │ │ ├── __init__.py │ │ ├── parsing_tla_expressions.py │ │ ├── parsing_tla_modules.py │ │ └── syntax_tree_visitor.py │ ├── iter.py │ ├── lex.py │ ├── parser.py │ ├── to_str.py │ ├── tokens.py │ └── visit.py │ └── tlc │ ├── __init__.py │ ├── cli.py │ ├── itf.py │ ├── state_to_informal_trace_format.py │ └── stdout_to_informal_trace_format.py ├── pylama.ini ├── pyproject.toml ├── samples ├── .gitignore ├── Hello.cfg ├── Hello.tla ├── README.md ├── TlcTraces.out ├── cli_input_apalache_pure.json ├── cli_input_apalache_raw.json ├── cli_input_gen.py ├── cli_input_tlc_itf.json ├── cli_input_tlc_pure.json ├── cli_input_tlc_raw.json ├── helper.py ├── library_usage.py ├── requirements.txt └── usage.md └── tests ├── __init__.py ├── apalache ├── __init__.py └── test_apalache.py ├── helper.py ├── resource ├── .gitignore ├── HelloWorld.cfg ├── HelloWorld.tla ├── HelloWorldTyped.cfg ├── HelloWorldTyped.tla ├── HelloWorld_util_tlc_itf.json ├── TlcLassoTraceParse.cfg ├── TlcLassoTraceParse.tla ├── TlcLassoTraceParse.txt ├── TlcMultipleTraceParse.cfg ├── TlcMultipleTraceParse.tla ├── TlcMultipleTraceParse.txt ├── TlcMultipleTraceParseCutoff0.txt ├── TlcMultipleTraceParseCutoff1.txt ├── TlcMultipleTraceParseSimulationMode.txt ├── TlcMultipleTraceParse_RealWorld0.txt ├── TlcMultipleTraceParse_RealWorld1.txt ├── TlcStateExpressionExample0.txt ├── TlcStateExpressionExample1.txt ├── TlcStateExpressionExample2.txt ├── TlcStateExpressionExample3.txt ├── TlcStateExpressionExample4.txt ├── TlcStateExpressionExample5.txt ├── TlcStateExpressionExample6.txt ├── TlcStateExpressionExample7.txt ├── TlcStateExpressionExample8.txt ├── TlcTraceAbsenceParse.txt ├── TlcTraceParse.cfg ├── TlcTraceParse.tla ├── TlcTraceParse.txt ├── TlcTraceParseInitState.txt ├── TlcTraceParseInitStateContinue.txt └── TlcTraceParseSimulationMode.txt ├── tlc ├── __init__.py └── test_tlc.py └── util ├── __init__.py ├── tla ├── LICENSE ├── README.md ├── README_license.md ├── lexer_test.py ├── parser_test.py └── tlaps_lib_test.py └── tlc ├── __init__.py ├── state_to_informal_trace_format_test.py └── stdout_to_informal_trace_format_test.py /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude = modelator_py/util/tla/**,tests/util/tla/**,samples/**,**/__init__.py 3 | max-line-length = 88 4 | extend-ignore = E203,E501,W605 5 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | .jar filter=lfs diff=lfs merge=lfs -text 2 | -------------------------------------------------------------------------------- /.github/workflows/python.yml: -------------------------------------------------------------------------------- 1 | name: Python 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - .github/workflows/python.yml 8 | - modelator_py/** 9 | - tests/** 10 | - pyproject.toml 11 | - poetry.lock 12 | 13 | jobs: 14 | linting: 15 | strategy: 16 | matrix: 17 | os: ["ubuntu-latest"] 18 | python-version: ["3.8", "3.10"] 19 | runs-on: ${{ matrix.os }} 20 | steps: 21 | - uses: actions/checkout@v3 22 | - uses: actions/setup-python@v4 23 | with: 24 | python-version: ${{ matrix.python-version }} 25 | - uses: actions/cache@v3 26 | id: pip-cache 27 | with: 28 | path: ~/.cache/pip 29 | key: ${{ runner.os }}-pip 30 | - name: Install dependencies 31 | run: python -m pip install pyflakes==2.4.0 black pylama[all] 32 | - name: Run linters 33 | run: | 34 | black . --check 35 | pylama -l pyflakes,pycodestyle,isort 36 | 37 | compile: 38 | strategy: 39 | matrix: 40 | os: ["ubuntu-latest"] 41 | python-version: ["3.8", "3.10"] 42 | runs-on: ${{ matrix.os }} 43 | steps: 44 | - name: Check out repository 45 | uses: actions/checkout@v3 46 | - name: Set up python ${{ matrix.python-version }} 47 | id: setup-python 48 | uses: actions/setup-python@v4 49 | with: 50 | python-version: ${{ matrix.python-version }} 51 | - name: Compile python source code 52 | run: python -m compileall modelator_py tests 53 | 54 | test: 55 | strategy: 56 | fail-fast: true 57 | matrix: 58 | os: ["ubuntu-latest", "macos-latest"] 59 | python-version: ["3.8", "3.10"] 60 | runs-on: ${{ matrix.os }} 61 | steps: 62 | - name: Check out repository 63 | uses: actions/checkout@v3 64 | - name: Set up python ${{ matrix.python-version }} 65 | id: setup-python 66 | uses: actions/setup-python@v4 67 | with: 68 | python-version: ${{ matrix.python-version }} 69 | - name: Install Poetry 70 | uses: snok/install-poetry@v1 71 | with: 72 | virtualenvs-create: true 73 | virtualenvs-in-project: true 74 | - name: Load cached venv 75 | id: cached-poetry-dependencies 76 | uses: actions/cache@v3 77 | with: 78 | path: .venv 79 | key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} 80 | - name: Install dependencies 81 | if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' 82 | run: poetry install --no-interaction --no-root 83 | - name: Run tests 84 | run: | 85 | source .venv/bin/activate 86 | pytest 87 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # See https://pre-commit.com for more information 2 | # See https://pre-commit.com/hooks.html for more hooks 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v4.3.0 6 | hooks: 7 | - id: trailing-whitespace 8 | - id: end-of-file-fixer 9 | - id: check-yaml 10 | - id: check-added-large-files 11 | - repo: https://github.com/psf/black 12 | rev: 22.10.0 13 | hooks: 14 | - id: black 15 | - repo: https://github.com/pre-commit/mirrors-prettier 16 | rev: v2.7.1 17 | hooks: 18 | - id: prettier 19 | - repo: https://github.com/rnbguy/pylama-pre-commit 20 | rev: 0.2.0 21 | hooks: 22 | - id: pylama 23 | args: ["-l", "pyflakes,pycodestyle,isort"] 24 | pass_filenames: false 25 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | 2 | # Contributor Covenant Code of Conduct 3 | 4 | ## Our Pledge 5 | 6 | We as members, contributors, and leaders pledge to make participation in our 7 | community a harassment-free experience for everyone, regardless of age, body 8 | size, visible or invisible disability, ethnicity, sex characteristics, gender 9 | identity and expression, level of experience, education, socio-economic status, 10 | nationality, personal appearance, race, caste, color, religion, or sexual 11 | identity and orientation. 12 | 13 | We pledge to act and interact in ways that contribute to an open, welcoming, 14 | diverse, inclusive, and healthy community. 15 | 16 | ## Our Standards 17 | 18 | Examples of behavior that contributes to a positive environment for our 19 | community include: 20 | 21 | * Demonstrating empathy and kindness toward other people 22 | * Being respectful of differing opinions, viewpoints, and experiences 23 | * Giving and gracefully accepting constructive feedback 24 | * Accepting responsibility and apologizing to those affected by our mistakes, 25 | and learning from the experience 26 | * Focusing on what is best not just for us as individuals, but for the overall 27 | community 28 | 29 | Examples of unacceptable behavior include: 30 | 31 | * The use of sexualized language or imagery, and sexual attention or advances of 32 | any kind 33 | * Trolling, insulting or derogatory comments, and personal or political attacks 34 | * Public or private harassment 35 | * Publishing others' private information, such as a physical or email address, 36 | without their explicit permission 37 | * Other conduct which could reasonably be considered inappropriate in a 38 | professional setting 39 | 40 | ## Enforcement Responsibilities 41 | 42 | Community leaders are responsible for clarifying and enforcing our standards of 43 | acceptable behavior and will take appropriate and fair corrective action in 44 | response to any behavior that they deem inappropriate, threatening, offensive, 45 | or harmful. 46 | 47 | Community leaders have the right and responsibility to remove, edit, or reject 48 | comments, commits, code, wiki edits, issues, and other contributions that are 49 | not aligned to this Code of Conduct, and will communicate reasons for moderation 50 | decisions when appropriate. 51 | 52 | ## Scope 53 | 54 | This Code of Conduct applies within all community spaces, and also applies when 55 | an individual is officially representing the community in public spaces. 56 | Examples of representing our community include using an official e-mail address, 57 | posting via an official social media account, or acting as an appointed 58 | representative at an online or offline event. 59 | 60 | ## Enforcement Guidelines 61 | 62 | Community leaders will follow these Community Impact Guidelines in determining 63 | the consequences for any action they deem in violation of this Code of Conduct: 64 | 65 | ### 1. Correction 66 | 67 | **Community Impact**: Use of inappropriate language or other behavior deemed 68 | unprofessional or unwelcome in the community. 69 | 70 | **Consequence**: A private, written warning from community leaders, providing 71 | clarity around the nature of the violation and an explanation of why the 72 | behavior was inappropriate. A public apology may be requested. 73 | 74 | ### 2. Warning 75 | 76 | **Community Impact**: A violation through a single incident or series of 77 | actions. 78 | 79 | **Consequence**: A warning with consequences for continued behavior. No 80 | interaction with the people involved, including unsolicited interaction with 81 | those enforcing the Code of Conduct, for a specified period of time. This 82 | includes avoiding interactions in community spaces as well as external channels 83 | like social media. Violating these terms may lead to a temporary or permanent 84 | ban. 85 | 86 | ### 3. Temporary Ban 87 | 88 | **Community Impact**: A serious violation of community standards, including 89 | sustained inappropriate behavior. 90 | 91 | **Consequence**: A temporary ban from any sort of interaction or public 92 | communication with the community for a specified period of time. No public or 93 | private interaction with the people involved, including unsolicited interaction 94 | with those enforcing the Code of Conduct, is allowed during this period. 95 | Violating these terms may lead to a permanent ban. 96 | 97 | ### 4. Permanent Ban 98 | 99 | **Community Impact**: Demonstrating a pattern of violation of community 100 | standards, including sustained inappropriate behavior, harassment of an 101 | individual, or aggression toward or disparagement of classes of individuals. 102 | 103 | **Consequence**: A permanent ban from any sort of public interaction within the 104 | community. 105 | 106 | ## Attribution 107 | 108 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 109 | version 2.1, available at 110 | [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. 111 | 112 | Community Impact Guidelines were inspired by 113 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. 114 | 115 | For answers to common questions about this code of conduct, see the FAQ at 116 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at 117 | [https://www.contributor-covenant.org/translations][translations]. 118 | 119 | [homepage]: https://www.contributor-covenant.org 120 | [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html 121 | [Mozilla CoC]: https://github.com/mozilla/diversity 122 | [FAQ]: https://www.contributor-covenant.org/faq 123 | [translations]: https://www.contributor-covenant.org/translations 124 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Thank you for your interest in contributing to modelator-py! 4 | 5 | This document gives best practices for contributing: 6 | 7 | - [Development](#development) - how to develop 8 | - [Architecture](#architecture-summary) - project architecture 9 | - [Proposing Changes](#proposing-changes) - process for agreeing to changes 10 | - [Forking](#forking) - fork the repo to make pull requests 11 | - [Pull Requests](#pull-requests) - what makes a good pull request 12 | 13 | ## Development 14 | 15 | ### Dependencies 16 | 17 | - [pyenv](https://github.com/pyenv/pyenv) should be used to manage local Python versions. It can be installed with e.g. `brew install pyenv` (Linux and Windows users should check instructions). 18 | - [Poetry](https://github.com/python-poetry/poetry) is used to manage dependencies and packaging. See the [github](https://github.com/python-poetry/poetry) page for instructions. 19 | - [Git Large File Storage](https://git-lfs.github.com/) is used to efficiently store large files at Github. The model checker `.jars` are stored this way. 20 | 21 | ### Python version 22 | 23 | The Python version used is `3.9.9`. 24 | 25 | ### Packaging 26 | 27 | To publish a new release increment the version in `pyproject.toml`. 28 | 29 | Then 30 | 31 | ```bash 32 | poetry build; 33 | poetry publish; # requires credentials 34 | ``` 35 | 36 | ### Usage 37 | 38 | Please see [usage](./samples/usage.md). 39 | 40 | ### Setting up a workstation 41 | 42 | 1. Install `pyenv` 43 | 2. Install `poetry` 44 | 3. Clone this repo 45 | 4. `cd ` 46 | 5. `pyenv install 3.9.9` 47 | 6. `poetry env use python` 48 | 7. `poetry shell` 49 | 8. `poetry install` 50 | 9. `code .` to open a VSCode instance with a Python 3.9.9 interpreter (assuming VSCode) 51 | 52 | ### Useful commands 53 | 54 | With Poetry installed run `poetry install`. Then 55 | 56 | - Run the cli program: `poetry run modelator ` (entrypoint is `modelator/cli:cli`) 57 | - Tests: `poetry run pytest` or use your code editor. VSCode has built in support. 58 | - Tests with coverage: `poetry run pytest --cov=modelator tests/` 59 | - Tests with logging output to terminal: `poetry run pytest --log-cli-level=debug` 60 | - Specific test: `poetry run pytest tests//.py -k 'test_'` 61 | - Specific test and display stdout: `poetry run pytest tests//.py -s -k 'test_'` 62 | - Run the pre-commit hooks: `pre-commit run --all-files` 63 | - Run linter manually: `flake8 .` 64 | - Run formatter manually: `black .` 65 | - Run static type checker: `mypy .` 66 | - Sort imports `isort .` 67 | 68 | ### VSCode Tips 69 | 70 | VSCode has solid support for Python development using Poetry. If VSCode does not pick up on Poetry then try navigating to this directory and executing 71 | 72 | ```bash 73 | poetry shell; 74 | code .; 75 | ``` 76 | 77 | Ensure that the bottom left of your VSCode window shows that you are using the correct Python environment. 78 | 79 | The branch [vscode-configuration-template](https://github.com/informalsystems/mbt-python/tree/vscode-configuration-template) contains a .vscode directory which can be used as a starting point for configuring your dev environment. 80 | 81 | ### Troubleshooting 82 | 83 | This project has been setup following the guidelines at [this](https://mitelman.engineering/blog/python-best-practice/automating-python-best-practices-for-a-new-project/) blog post. The page contains useful context for troubleshooting. 84 | 85 | If having difficulties installing poetry using `curl -sSL https://install.python-poetry.org | python3 -` on MacOS then try adding `eval "$(pyenv init --path)";` to your .bashrc or .zshrc file (given that pyenv is installed). 86 | 87 | ## Architecture summary 88 | 89 | The project is setup as both a cli program and a collection of pure functions. The cli is an interface to use the pure functions but the pure functions can also be used directly by including modelator-py as a dependency and importing it into your python program. The launch point of the cli is in `modelator/cli::cli`. This is specified in `pyproject.toml::tool.poetry.scripts`. 90 | 91 | The cli uses the [python-fire](https://github.com/google/python-fire) library. Documentation for cli commands is inferred [[1](https://github.com/google/python-fire/blob/master/fire/docstrings.py),[2]()] from [python docstrings](https://peps.python.org/pep-0257/). [Here's](https://github.com/informalsystems/modelator-py/blob/c87d0985d9b40d2d2980216eadbbf2b3ca2e8998/modelator/cli.py#L15-L26) an example. 92 | 93 | ## Proposing Changes 94 | 95 | When contributing to the project, following the guidelines will increase the likelihood of changes being accepted quickly. 96 | 97 | ### Create/locate an issue 98 | 99 | 1. A good place to start is to search through the [existing 100 | issues](https://github.com/informalsystems/modelator-py/issues) for the 101 | problem you're encountering. 102 | 2. If no issues exist, submit one describing the _problem_ you're 103 | facing, as well as a _definition of done_. A definition of done, which tells 104 | us how to know when the issue can be closed, helps us to scope the problem 105 | and give it definite boundaries. Without a definition of done, issues can 106 | become vague. 107 | 108 | ## Forking 109 | 110 | If you do not have write access to the repository, your contribution should be 111 | made through a fork on GitHub. Fork the repository, contribute to your fork, and 112 | make a pull request back upstream. 113 | 114 | When forking, add your fork's URL as a new git remote in your local copy of the 115 | repo. For instance, to create a fork and work on a branch of it: 116 | 117 | - Create the fork on GitHub, using the fork button. 118 | - `cd` to the original clone of the repo on your machine 119 | - `git remote rename origin upstream` 120 | - `git remote add origin git@github.com:` 121 | 122 | Now `origin` refers to your fork and `upstream` refers to this version. 123 | 124 | `git push -u origin master` to update the fork, and make pull requests against 125 | this repo. 126 | 127 | To pull in updates from the origin repo, run 128 | 129 | - `git fetch upstream` 130 | - `git rebase upstream/master` (or whatever branch you want) 131 | 132 | ## Pull Requests 133 | 134 | PRs should: 135 | 136 | - make reference to an issue outlining the context. 137 | - update any relevant documentation and include tests. 138 | 139 | Commits should be concise but informative, and moderately clean. Commits will be 140 | squashed into a single commit for the PR with all the commit messages. 141 | 142 | ### Draft PRs 143 | 144 | When the problem as well as proposed solution are well understood, changes 145 | should start with a [draft pull request](https://github.blog/2019-02-14-introducing-draft-pull-requests/) 146 | against master. The draft signals that work is underway. When the work is ready 147 | for feedback, hitting "Ready for Review" will signal to the maintainers to take 148 | a look. Maintainers will not review draft PRs. 149 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # modelator-py 2 | 3 | |⚠️ The tools in this repo are unstable and may be subject to major changes ⚠️| 4 | |-| 5 | 6 | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) 7 | [![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](CODE_OF_CONDUCT.md) 8 | [![PyPI](https://img.shields.io/pypi/v/modelator-py?label=pypi%20package)](https://pypi.python.org/pypi/modelator-py/) 9 | [![Downloads](https://pepy.tech/badge/modelator-py/month)](https://pepy.tech/project/modelator-py) 10 | 11 | _**Lightweight utilities to assist model writing and model-based testing activities using the TLA+ ecosystem.**_ 12 | 13 | ## What is this project? 14 | 15 | A collection of cli utilities and library functions to reduce leg-work when developing TLA+ models, running model checkers, and doing model-based testing. The utilities are also intended to act as building blocks for tool development in the TLA+ ecosystem. 16 | 17 | ### What can it do right now? 18 | 19 | Currently there is a cli and library functions implementing utilities: 20 | 21 | - [x] Run [TLC](https://github.com/tlaplus/tlaplus) model checker without side effects (runs in temporary directory and is cleaned up) 22 | - [x] Run [TLC](https://github.com/tlaplus/tlaplus) model checker programmatically (reads and returns json data) 23 | - [x] Run [Apalache](https://github.com/informalsystems/apalache) model checker without side effects (runs in temporary directory and is cleaned up) 24 | - [x] Run [Apalache](https://github.com/informalsystems/apalache) model checker programmatically (reads and returns json data) 25 | - [x] Extract traces from TLC output in [Informal Trace Format](https://apalache.informal.systems/docs/adr/015adr-trace.html?highlight=trace%20format#the-itf-format) format (concise and machine readable counterexample representation) 26 | 27 | Allowing clean programmatic access to model checkers and other utility. 28 | 29 | ### What will it do in the future? 30 | 31 | The model-based testing capabilities developed at Informal are currently in the [modelator](https://github.com/informalsystems/modelator) tool and are being migrated to a multi language architecture. Please expect more utilities and more tooling soon. 32 | 33 | ## Usage 34 | 35 | Please see [usage](./samples/usage.md). 36 | 37 | ## Running the code in this repository 38 | 39 | Please see [contributing](./CONTRIBUTING.md). 40 | 41 | ## Contributing 42 | 43 | Please see [contributing](./CONTRIBUTING.md). 44 | 45 | ## License 46 | 47 | Copyright © 2021 Informal Systems Inc. and modelator authors. 48 | 49 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use the files in this repository except in compliance with the License. You may obtain a copy of the License at 50 | 51 | https://www.apache.org/licenses/LICENSE-2.0 52 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/informalsystems/modelator-py/98c6f3356b39e653ade2624a07540c174e97af9f/README.rst -------------------------------------------------------------------------------- /large/apa_0_23_0.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/informalsystems/modelator-py/98c6f3356b39e653ade2624a07540c174e97af9f/large/apa_0_23_0.jar -------------------------------------------------------------------------------- /large/tlc_2_17.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/informalsystems/modelator-py/98c6f3356b39e653ade2624a07540c174e97af9f/large/tlc_2_17.jar -------------------------------------------------------------------------------- /large/tlc_2_18.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/informalsystems/modelator-py/98c6f3356b39e653ade2624a07540c174e97af9f/large/tlc_2_18.jar -------------------------------------------------------------------------------- /modelator_py/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.2.6" 2 | -------------------------------------------------------------------------------- /modelator_py/apalache/__init__.py: -------------------------------------------------------------------------------- 1 | from .args import ApalacheArgs 2 | from .pure import PureCmd as ApalachePureCmd 3 | from .pure import apalache_pure 4 | from .raw import RawCmd as ApalacheRawCmd 5 | from .raw import apalache_raw 6 | 7 | __all__ = [ 8 | "ApalacheArgs", 9 | "ApalachePureCmd", 10 | "ApalacheRawCmd", 11 | "apalache_pure", 12 | "apalache_raw", 13 | ] 14 | -------------------------------------------------------------------------------- /modelator_py/apalache/apalache_help_0.23.0.txt: -------------------------------------------------------------------------------- 1 | Usage 2 | 3 | apalache-mc [options] command [command options] 4 | 5 | Options 6 | 7 | --config-file : configuration to read from (JSON and HOCON formats supported). Overrides any local .aplache.cfg files. (overrides envvar CONFIG_FILE) 8 | --debug : extensive logging in detailed.log and log.smt, default: false 9 | --out-dir : where all output files will be written, default: ./_apalache-out (overrides envvar OUT_DIR) 10 | --profiling : write general profiling data to profile-rules.txt in the run directory, default: false (overrides envvar PROFILING) 11 | --run-dir : additional directory wherein output files for this run will be written directly, default: none (overrides envvar RUN_DIR) 12 | --smtprof : profile SMT constraints in log.smt, default: false 13 | --write-intermediate : write intermediate output files to `out-dir`, default: false (overrides envvar WRITE_INTERMEDIATE) 14 | 15 | Commands 16 | 17 | check [command options] : Check a TLA+ specification 18 | --algo=STRING : the search algorithm: offline, incremental, parallel (soon), default: incremental 19 | --cinit=STRING : the name of an operator that initializes CONSTANTS, 20 | default: None 21 | --config=STRING : configuration file in TLC format, 22 | default: .cfg, or none if .cfg not present 23 | --discard-disabled : pre-check, whether a transition is disabled, and discard it, to make SMT queries smaller, default: true 24 | --init=STRING : the name of an operator that initializes VARIABLES, 25 | default: Init 26 | --inv=STRING : the name of an invariant operator, e.g., Inv 27 | --length=NUM : maximal number of Next steps, default: 10 28 | --max-error=NUM : do not stop on first error, but produce up to a given number of counterexamples (fine tune with --view), default: 1 29 | --next=STRING : the name of a transition operator, default: Next 30 | --no-deadlock : do not check for deadlocks, default: true 31 | --nworkers=NUM : the number of workers for the parallel checker (soon), default: 1 32 | --smt-encoding : the SMT encoding: oopsla19, arrays (experimental), default: oopsla19 (overrides envvar SMT_ENCODING) 33 | --tuning=STRING : filename of the tuning options, see docs/tuning.md 34 | --tuning-options=STRING : tuning options as arguments in the format key1=val1:key2=val2:key3=val3 (priority over --tuning) 35 | --view=STRING : the state view to use with --max-error=n, default: transition index 36 | : a file containing a TLA+ specification (.tla or .json) 37 | 38 | config [command options] : Configure Apalache options 39 | --enable-stats : Let Apalache submit usage statistics to tlapl.us 40 | (shared with TLC and TLA+ Toolbox) 41 | See: https://apalache.informal.systems/docs/apalache/statistics.html 42 | 43 | parse [command options] : Parse a TLA+ specification and quit 44 | --output : file to which the parsed source is written (.tla or .json), default: None 45 | : a file containing a TLA+ specification (.tla or .json) 46 | 47 | server : Run apalache in server mode (not yet supported) 48 | 49 | test [command options] : Quickly test a TLA+ specification 50 | --cinit=STRING : the name of an operator that initializes CONSTANTS, 51 | default: None 52 | : a file containing a TLA+ specification (.tla or .json) 53 | : the name of an operator to prepare the test, similar to Init 54 | : the name of an action to execute, similar to Next 55 | : the name of an operator that should evaluate to true after executing `action` 56 | 57 | transpile [command options] : Transpile and quit 58 | --cinit=STRING : the name of an operator that initializes CONSTANTS, 59 | default: None 60 | --config=STRING : configuration file in TLC format, 61 | default: .cfg, or none if .cfg not present 62 | --init=STRING : the name of an operator that initializes VARIABLES, 63 | default: Init 64 | --inv=STRING : the name of an invariant operator, e.g., Inv 65 | --length=NUM : maximal number of Next steps, default: 10 66 | --next=STRING : the name of a transition operator, default: Next 67 | : a file containing a TLA+ specification (.tla or .json) 68 | 69 | typecheck [command options] : Check types in a TLA+ specification 70 | --infer-poly : allow the type checker to infer polymorphic types, default: true 71 | --output : file to which the typechecked source is written (.tla or .json), default: None 72 | : a TLA+ specification (.tla or .json) 73 | -------------------------------------------------------------------------------- /modelator_py/apalache/args.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Optional 3 | 4 | 5 | @dataclass 6 | class ApalacheArgs: 7 | # The Apalache to run. (check | config | parse | test | transpile | typecheck | noop) 8 | cmd: Optional[str] = None 9 | file: Optional[str] = None # A file containing a TLA+ specification (.tla or .json) 10 | # configuration to read from (JSON and HOCON formats supported). Overrides any local .aplache.cfg files. (overrides envvar CONFIG_FILE) 11 | config_file: Optional[str] = None 12 | debug: Optional[ 13 | str 14 | ] = None # extensive logging in detailed.log and log.smt, default: false 15 | # where all output files will be written, default: ./_apalache-out (overrides envvar OUT_DIR) 16 | out_dir: Optional[str] = None 17 | # write general profiling data to profile-rules.txt in the run directory, default: false (overrides envvar PROFILING) 18 | profiling: Optional[str] = None 19 | # additional directory wherein output files for this run will be written directly, default: none (overrides envvar RUN_DIR) 20 | run_dir: Optional[str] = None 21 | smtprof: Optional[str] = None # profile SMT constraints in log.smt, default: false 22 | # write intermediate output files to `out-dir`, default: false (overrides envvar WRITE_INTERMEDIATE) 23 | write_intermediate: Optional[str] = None 24 | # the search algorithm: offline, incremental, parallel (soon), default: incremental 25 | algo: Optional[str] = None 26 | cinit: Optional[ 27 | str 28 | ] = None # the name of an operator that initializes CONSTANTS, default: None 29 | config: Optional[ 30 | str 31 | ] = None # configuration file in TLC format, default: .cfg, or none if .cfg not present 32 | discard_disabled: Optional[ 33 | str 34 | ] = None # pre-check, whether a transition is disabled, and discard it, to make SMT queries smaller, default: true 35 | init: Optional[ 36 | str 37 | ] = None # the name of an operator that initializes VARIABLES, default: Init 38 | inv: Optional[str] = None # the name of an invariant operator, e.g., Inv 39 | length: Optional[str] = None # maximal number of Next steps, default: 10 40 | # do not stop on first error, but produce up to a given number of counterexamples (fine tune with --view), default: 1 41 | max_error: Optional[str] = None 42 | # do not stop after a first simulation run, but produce up to a given number of runs (unless reached --max-error), default: 100 43 | max_run: Optional[str] = None 44 | no_deadlock: Optional[str] = None # do not check for deadlocks, default: true 45 | nworkers: Optional[ 46 | str 47 | ] = None # the number of workers for the parallel checker (soon), default: 1 48 | # the SMT encoding: oopsla19, arrays (experimental), default: oopsla19 (overrides envvar SMT_ENCODING) 49 | smt_encoding: Optional[str] = None 50 | tuning: Optional[str] = None # filename of the tuning options, see docs/tuning.md 51 | # tuning options as arguments in the format key1=val1:key2=val2:key3=val3 (priority over --tuning) 52 | tuning_options: Optional[str] = None 53 | view: Optional[ 54 | str 55 | ] = None # the state view to use with --max-error=n, default: transition index 56 | # Let Apalache submit usage statistics to tlapl.us (shared with TLC and TLA+ Toolbox) See: https://apalache.informal.systems/docs/apalache/statistics.html 57 | enable_stats: Optional[str] = None 58 | before: Optional[ 59 | str 60 | ] = None # the name of an operator to prepare the test, similar to Init 61 | action: Optional[str] = None # the name of an action to execute, similar to Next 62 | assertion: Optional[ 63 | str 64 | ] = None # the name of an operator that should evaluate to true after executing `action` 65 | # the name of a transition operator, default: Next : a file containing a TLA+ specification (.tla or .json) 66 | next: Optional[str] = None 67 | infer_poly: Optional[ 68 | str 69 | ] = None # allow the type checker to infer polymorphic types, default: true 70 | # file to which the typechecked or parsed source is written (.tla or .json), default: None 71 | output: Optional[str] = None 72 | features: Optional[ 73 | str 74 | ] = None # a comma-separated list of experimental features, default: None 75 | output_traces: Optional[ 76 | str 77 | ] = None # save an example trace for each symbolic run, default: false 78 | -------------------------------------------------------------------------------- /modelator_py/apalache/cli.py: -------------------------------------------------------------------------------- 1 | import json as stdjson 2 | 3 | from .pure import apalache_pure 4 | from .raw import ApalacheArgs, RawCmd, apalache_raw 5 | 6 | 7 | class Apalache: 8 | def __init__(self, stdin): 9 | self._stdin = stdin 10 | 11 | def pure(self): 12 | """ 13 | Run Apalache without side effects using json input data. 14 | 15 | Runs Apalache in a temporary directory, writing all necessary data into the 16 | directory before calling Apalache, and reading back all necessary results 17 | back into memory. 18 | 19 | Writes the result to stdout in json. 20 | 21 | Requires json input data on stdin (` < data.json`). 22 | """ 23 | assert ( 24 | self._stdin is not None 25 | ), "The pure interface requires json input in stdin" 26 | json_dict = stdjson.loads(self._stdin.read()) 27 | 28 | result = apalache_pure(json=json_dict) 29 | to_print = stdjson.dumps(result, indent=4, sort_keys=True) 30 | print(to_print) 31 | 32 | def raw( 33 | self, 34 | *, 35 | json=None, 36 | cwd=None, 37 | jar=None, 38 | cmd=None, 39 | file=None, 40 | config_file=None, 41 | debug=None, 42 | out_dir=None, 43 | profiling=None, 44 | run_dir=None, 45 | smtprof=None, 46 | write_intermediate=None, 47 | algo=None, 48 | cinit=None, 49 | config=None, 50 | discard_disabled=None, 51 | init=None, 52 | inv=None, 53 | length=None, 54 | max_error=None, 55 | no_deadlock=None, 56 | nworkers=None, 57 | smt_encoding=None, 58 | tuning=None, 59 | tuning_options=None, 60 | view=None, 61 | enable_stats=None, 62 | before=None, 63 | action=None, 64 | assertion=None, 65 | next=None, 66 | infer_poly=None, 67 | output=None, 68 | features=None, 69 | ): 70 | """ 71 | Run Apalache without removing side effects (for debugging). 72 | 73 | Run Apalache directly without creating a temporary directory. This is mainly 74 | useful for debugging. Arguments can be provided on command line or by 75 | specifying the --json flag and providing json on stdin (` < data.json`). 76 | 77 | Arguments: 78 | json : Read arguments from json instead of cli? 79 | cwd : Full path to directory to run Apalache from. 80 | jar : Full path to Apalache version 0.23.0 jar (other versions may work). 81 | cmd : Apalache argument, see ` --help`. 82 | file : Apalache argument, see ` --help`. 83 | config_file : Apalache argument, see ` --help`. 84 | debug : Apalache argument, see ` --help`. 85 | out_dir : Apalache argument, see ` --help`. 86 | profiling : Apalache argument, see ` --help`. 87 | run_dir : Apalache argument, see ` --help`. 88 | smtprof : Apalache argument, see ` --help`. 89 | write_intermediate : Apalache argument, see ` --help`. 90 | algo : Apalache argument, see ` --help`. 91 | cinit : Apalache argument, see ` --help`. 92 | config : Apalache argument, see ` --help`. 93 | discard_disabled : Apalache argument, see ` --help`. 94 | init : Apalache argument, see ` --help`. 95 | inv : Apalache argument, see ` --help`. 96 | length : Apalache argument, see ` --help`. 97 | max_error : Apalache argument, see ` --help`. 98 | no_deadlock : Apalache argument, see ` --help`. 99 | nworkers : Apalache argument, see ` --help`. 100 | smt_encoding : Apalache argument, see ` --help`. 101 | tuning : Apalache argument, see ` --help`. 102 | tuning_options : Apalache argument, see ` --help`. 103 | view : Apalache argument, see ` --help`. 104 | enable_stats : Apalache argument, see ` --help`. 105 | before : Apalache argument, see ` --help`. 106 | action : Apalache argument, see ` --help`. 107 | assertion : Apalache argument, see ` --help`. 108 | next : Apalache argument, see ` --help`. 109 | infer_poly : Apalache argument, see ` --help`. 110 | output : Apalache argument, see ` --help`. 111 | features : Apalache argument, see ` --help`. 112 | """ 113 | result = None 114 | if json: 115 | json_dict = stdjson.loads(self._stdin.read()) 116 | result = apalache_raw(json=json_dict) 117 | else: 118 | raw_cmd = RawCmd() 119 | raw_cmd.cwd = cwd 120 | raw_cmd.jar = jar 121 | raw_cmd.args = ApalacheArgs( 122 | cmd, 123 | file, 124 | config_file, 125 | debug, 126 | out_dir, 127 | profiling, 128 | run_dir, 129 | smtprof, 130 | write_intermediate, 131 | algo, 132 | cinit, 133 | config, 134 | discard_disabled, 135 | init, 136 | inv, 137 | length, 138 | max_error, 139 | no_deadlock, 140 | nworkers, 141 | smt_encoding, 142 | tuning, 143 | tuning_options, 144 | view, 145 | enable_stats, 146 | before, 147 | action, 148 | assertion, 149 | next, 150 | infer_poly, 151 | output, 152 | features, 153 | ) 154 | 155 | result = apalache_raw(cmd=raw_cmd) 156 | 157 | stdout_pretty = result.stdout.decode() 158 | stderr_pretty = result.stderr.decode() 159 | 160 | obj_to_print = {} 161 | obj_to_print["shell_cmd"] = result.args 162 | obj_to_print["return_code"] = result.returncode 163 | obj_to_print["stdout"] = stdout_pretty 164 | obj_to_print["stderr"] = stderr_pretty 165 | 166 | to_print = stdjson.dumps(obj_to_print, indent=4, sort_keys=True) 167 | print(to_print) 168 | -------------------------------------------------------------------------------- /modelator_py/apalache/pure.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import tempfile 4 | from dataclasses import dataclass 5 | from typing import Optional 6 | 7 | from ..helper import get_dirnames_in_dir, read_entire_dir_contents 8 | from .args import ApalacheArgs 9 | from .raw import RawCmd, apalache_raw 10 | 11 | LOG = logging.getLogger(__name__) 12 | 13 | # mypy: ignore-errors 14 | 15 | 16 | @dataclass 17 | class PureCmd: 18 | jar: Optional[ 19 | str 20 | ] = None # Location of Apalache jar (full path with suffix like apalache.jar) 21 | args: Optional[ApalacheArgs] = None # Apalache args 22 | files: Optional[str] = None # Current working directory for child shell process 23 | 24 | 25 | # Used to overwrite Apalache's "--out-dir" flag 26 | APALACHE_OUT_DIR_NAME = "out" 27 | 28 | 29 | def json_to_cmd(json) -> PureCmd: 30 | json = { 31 | **{ 32 | "files": None, 33 | "jar": None, 34 | "args": None, 35 | }, 36 | **json, 37 | } 38 | cmd = PureCmd() 39 | cmd.jar = json["jar"] 40 | cmd.args = ApalacheArgs(**json["args"]) 41 | cmd.files = json["files"] 42 | return cmd 43 | 44 | 45 | def read_apalache_output_into_memory(full_dirname): 46 | """ 47 | Read files output by Apalache into a dictionary 48 | 49 | Apalache writes output of a command with argument --out-dir= to a 50 | directory inside //, (3 levels deep). 51 | """ 52 | 53 | subdirs = get_dirnames_in_dir(full_dirname) 54 | 55 | assert ( 56 | APALACHE_OUT_DIR_NAME in os.path.dirname(sd) for sd in subdirs 57 | ), f"[Apalache output directory has unexpected structure, contains subdirs] [{subdirs=}]: " 58 | 59 | full_dirname = os.path.join(full_dirname, APALACHE_OUT_DIR_NAME) 60 | 61 | # Traverse two directories deep 62 | 63 | subdirs = get_dirnames_in_dir(full_dirname) 64 | assert ( 65 | len(subdirs) == 1 66 | ), f"[Apalache output directory has unexpected structure, contains subdirs] [{subdirs=}]: " 67 | full_dirname = subdirs[0] 68 | 69 | subdirs = get_dirnames_in_dir(full_dirname) 70 | assert ( 71 | len(subdirs) == 1 72 | ), f"[Apalache output subdirectory has unexpected structure, contains subdirs] [{subdirs=}]" 73 | full_dirname = subdirs[0] 74 | 75 | LOG.debug(f"{full_dirname=}") 76 | 77 | all_files = read_entire_dir_contents(full_dirname) 78 | all_files = {os.path.basename(fn): content for fn, content in all_files.items()} 79 | 80 | subdirs = get_dirnames_in_dir(full_dirname) 81 | 82 | INTERMEDIATE_DIR = "intermediate" 83 | if os.path.join(full_dirname, INTERMEDIATE_DIR) in subdirs: 84 | intermediate_files = read_entire_dir_contents( 85 | os.path.join(full_dirname, INTERMEDIATE_DIR) 86 | ) 87 | 88 | def filename(full_filename): 89 | base = os.path.basename(full_filename) 90 | return os.path.join(INTERMEDIATE_DIR, base) 91 | 92 | all_files = { 93 | **all_files, 94 | **{filename(fn): content for fn, content in intermediate_files.items()}, 95 | } 96 | 97 | return all_files 98 | 99 | 100 | def apalache_pure(*, cmd: PureCmd = None, json=None): # type: ignore 101 | """ 102 | Run a Apalache command using either a PureCmd object, or build the PureCmd from json. 103 | 104 | Run Apalache without side effects in a temporary directory. 105 | 106 | Returns an ExecutionResult with .process and .files properties. Contains the 107 | subprocess result, and the list of filesystem files (and contents). 108 | """ 109 | 110 | assert not (cmd is not None and json is not None) 111 | assert (cmd is not None) or (json is not None) 112 | if json is not None: 113 | cmd = json_to_cmd(json) 114 | 115 | raw_cmd = RawCmd() 116 | raw_cmd.args = cmd.args 117 | raw_cmd.jar = cmd.jar 118 | 119 | if raw_cmd.args.out_dir is not None: 120 | raise Exception( 121 | "--out-dir flag value is not None but Apalache pure command overwrites\ 122 | this flag. Do not include a value for this flag." 123 | ) 124 | raw_cmd.args.out_dir = "out" 125 | 126 | ret = {} 127 | 128 | result = None 129 | 130 | with tempfile.TemporaryDirectory( 131 | prefix="modelator-py-apalache-temp-dir-" 132 | ) as dirname: 133 | raw_cmd.cwd = dirname 134 | for filename, file_content_str in cmd.files.items(): 135 | full_path = os.path.join(dirname, filename) 136 | with open(full_path, "w") as fd: 137 | fd.write(file_content_str) 138 | 139 | result = apalache_raw(cmd=raw_cmd) 140 | 141 | try: 142 | ret["files"] = read_apalache_output_into_memory(dirname) 143 | except FileNotFoundError: 144 | ret["files"] = dict() 145 | 146 | # Throw out the files that the user originally gave as input 147 | ret["files"] = { 148 | fn: content for fn, content in ret["files"].items() if fn not in cmd.files 149 | } 150 | 151 | stdout_pretty = result.stdout.decode() 152 | stderr_pretty = result.stderr.decode() 153 | 154 | ret["shell_cmd"] = result.args 155 | ret["return_code"] = result.returncode 156 | ret["stdout"] = stdout_pretty 157 | ret["stderr"] = stderr_pretty 158 | 159 | return ret 160 | -------------------------------------------------------------------------------- /modelator_py/apalache/raw.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import tempfile 4 | from dataclasses import asdict, dataclass 5 | from typing import Optional 6 | 7 | from .args import ApalacheArgs 8 | 9 | # mypy: ignore-errors 10 | 11 | 12 | @dataclass 13 | class RawCmd: 14 | cwd: Optional[str] = None # Current working directory for child shell process 15 | jar: Optional[ 16 | str 17 | ] = None # Location of Apalache jar (full path with suffix like apalache.jar) 18 | args: Optional[ApalacheArgs] = None # Apalache args 19 | 20 | 21 | def stringify_raw_cmd(cmd: RawCmd, java_temp_dir: str = None): 22 | 23 | jar = cmd.jar 24 | args = cmd.args 25 | 26 | if java_temp_dir is None: 27 | tmpdir_setup = "" 28 | else: 29 | tmpdir_setup = " -Djava.io.tmpdir={}".format(java_temp_dir) 30 | 31 | def stringify(value): 32 | # Apalache will not accept capitalized bools 33 | if isinstance(value, bool): 34 | return str(value).lower() 35 | return value 36 | 37 | args = ApalacheArgs(**{k: stringify(v) for k, v in asdict(args).items()}) 38 | 39 | cmd_str = f"""java{tmpdir_setup}\ 40 | -jar "{jar}"\ 41 | {f" --config-file={args.config_file}" if args.config_file is not None else ""}\ 42 | {f" --debug={args.debug}" if args.debug is not None else ""}\ 43 | {f" --out-dir={args.out_dir}" if args.out_dir is not None else ""}\ 44 | {f" --profiling={args.profiling}" if args.profiling is not None else ""}\ 45 | {f" --run-dir={args.run_dir}" if args.run_dir is not None else ""}\ 46 | {f" --smtprof={args.smtprof}" if args.smtprof is not None else ""}\ 47 | {f" --write-intermediate={args.write_intermediate}" if args.write_intermediate is not None else ""}\ 48 | {args.cmd}\ 49 | {f" --algo={args.algo}" if args.algo is not None else ""}\ 50 | {f" --cinit={args.cinit}" if args.cinit is not None else ""}\ 51 | {f" --config={args.config}" if args.config is not None else ""}\ 52 | {f" --discard-disabled={args.discard_disabled}" if args.discard_disabled is not None else ""}\ 53 | {f" --init={args.init}" if args.init is not None else ""}\ 54 | {f" --inv={args.inv}" if args.inv is not None else ""}\ 55 | {f" --length={args.length}" if args.length is not None else ""}\ 56 | {f" --max-error={args.max_error}" if args.max_error is not None else ""}\ 57 | {f" --max-run={args.max_run}" if args.max_run is not None else ""}\ 58 | {f" --no-deadlock={args.no_deadlock}" if args.no_deadlock is not None else ""}\ 59 | {f" --output-traces={args.output_traces}" if args.output_traces is not None else ""}\ 60 | {f" --nworkers={args.nworkers}" if args.nworkers is not None else ""}\ 61 | {f" --smt-encoding={args.smt_encoding}" if args.smt_encoding is not None else ""}\ 62 | {f" --tuning={args.tuning}" if args.tuning is not None else ""}\ 63 | {f" --tuning-options={args.tuning_options}" if args.tuning_options is not None else ""}\ 64 | {f" --view={args.view}" if args.view is not None else ""}\ 65 | {f" --enable-stats={args.enable_stats}" if args.enable_stats is not None else ""}\ 66 | {f" --before={args.before}" if args.before is not None else ""}\ 67 | {f" --action={args.action}" if args.action is not None else ""}\ 68 | {f" --assertion={args.assertion}" if args.assertion is not None else ""}\ 69 | {f" --next={args.next}" if args.next is not None else ""}\ 70 | {f" --infer-poly={args.infer_poly}" if args.infer_poly is not None else ""}\ 71 | {f" --output={args.output}" if args.output is not None else ""}\ 72 | {f" --features={args.features}" if args.features is not None else ""}\ 73 | {f" {args.file}" if args.file is not None else ""}\ 74 | {f" {args.before}" if args.before is not None else ""}\ 75 | {f" {args.action}" if args.action is not None else ""}\ 76 | {f" {args.assertion}" if args.assertion is not None else ""}\ 77 | """ 78 | 79 | return cmd_str 80 | 81 | 82 | def json_to_cmd(json) -> RawCmd: 83 | json = { 84 | **{ 85 | "cwd": None, 86 | "jar": None, 87 | "args": None, 88 | }, 89 | **json, 90 | } 91 | cmd = RawCmd() 92 | cmd.cwd = json["cwd"] 93 | cmd.jar = json["jar"] 94 | cmd.args = ApalacheArgs(**json["args"]) 95 | return cmd 96 | 97 | 98 | def apalache_raw(*, cmd: RawCmd = None, json=None): 99 | """ 100 | Run an Apalache command using either a RawCmd object, or build the RawCmd from json. 101 | 102 | Run Apalache with side effects without creating a temporary directory. 103 | 104 | Returns a subprocess call result object. 105 | """ 106 | assert cmd is not None or json is not None 107 | assert not (cmd is not None and json is not None) 108 | 109 | if json is not None: 110 | cmd = json_to_cmd(json) 111 | 112 | if cmd.cwd is not None: 113 | cmd.cwd = os.path.expanduser(cmd.cwd) 114 | if not os.path.isabs(cmd.cwd): 115 | raise Exception("cwd must be absolute (after expanding user)") 116 | if cmd.cwd is None: 117 | raise Exception("cwd must be absolute (after expanding user)") 118 | if cmd.jar is not None: 119 | cmd.jar = os.path.expanduser(cmd.jar) 120 | if not os.path.isabs(cmd.jar): 121 | raise Exception("Apalache jar path must be absolute (after expanding user)") 122 | if cmd.jar is None: 123 | raise Exception("Apalache jar path must be absolute (after expanding user)") 124 | 125 | with tempfile.TemporaryDirectory( 126 | prefix="modelator-py-apalache-java-temp-dir-" 127 | ) as java_temp: 128 | cmd_str = stringify_raw_cmd(cmd, java_temp_dir=java_temp) 129 | 130 | # Semantics a bit complex here - see https://stackoverflow.com/a/15109975/8346628 131 | return subprocess.run(cmd_str, shell=True, capture_output=True, cwd=cmd.cwd) 132 | -------------------------------------------------------------------------------- /modelator_py/cli.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import fire 4 | 5 | from .apalache.cli import Apalache 6 | from .tlc.cli import Tlc 7 | from .util.cli import Util 8 | 9 | 10 | class App: 11 | def __init__(self, stdin): 12 | self._stdin = stdin 13 | self.tlc = Tlc(stdin) 14 | self.apalache = Apalache(stdin) 15 | self.util = Util(stdin) 16 | 17 | def easter(self, fizz, *, foo=True, bar=None, wiz): 18 | """ 19 | This is an easter egg function designed as an example. 20 | 21 | You can read this documentation with ` easter --help`. 22 | 23 | Arguments: 24 | fizz : Crackle, pop! 25 | foo : Is it a bird, is it a plane? 26 | bar : How much wood would a woodchuck chuck? 27 | wiz : If Peter Piper picked a peck of pickled peppers... 28 | """ 29 | print(f"Warning: this is just an example command: {foo=} {bar=} {wiz=}") 30 | 31 | 32 | def cli(): 33 | """ 34 | Entrypoint for the cli 35 | """ 36 | if len(sys.argv) == 1: 37 | raise Exception( 38 | "Providing only stdin input is not yet supported (at least one argument must be given)" 39 | ) 40 | else: 41 | app = App(sys.stdin) 42 | fire.Fire(app) 43 | 44 | 45 | if __name__ == "__main__": 46 | cli() 47 | -------------------------------------------------------------------------------- /modelator_py/helper.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import shutil 4 | import typing 5 | 6 | import pathos.multiprocessing as multiprocessing 7 | 8 | LOG = logging.getLogger(__name__) 9 | 10 | 11 | def get_filenames_in_dir(path): 12 | bases = os.listdir(path) 13 | full = [os.path.join(path, f) for f in bases] 14 | return [f for f in full if os.path.isfile(f)] 15 | 16 | 17 | def get_dirnames_in_dir(path): 18 | bases = os.listdir(path) 19 | full = [os.path.join(path, f) for f in bases] 20 | return [f for f in full if os.path.isdir(f)] 21 | 22 | 23 | def read_entire_dir_contents(path): 24 | """ 25 | Read contents of directory into a dictionary 26 | 27 | Non recursive 28 | """ 29 | if not os.path.isabs(path): 30 | raise Exception(f"Cannot read directory {path=} as it is not absolute") 31 | files = get_filenames_in_dir(path) 32 | ret = {} 33 | for f in files: 34 | with open(f, "r") as fd: 35 | ret[f] = fd.read() 36 | return ret 37 | 38 | 39 | def delete_dir(path): 40 | if not os.path.isabs(path): 41 | raise Exception(f"Cannot delete directory {path=} as it is not absolute") 42 | LOG.debug(f"Exec shutil.rmtree({path})") 43 | shutil.rmtree(path) 44 | 45 | 46 | def parallel_map(function, data: typing.List): 47 | cores = multiprocessing.cpu_count() 48 | 49 | # Make chunk size smaller to fill up gaps 50 | # if processing time for different chunks differ 51 | HEURISTIC_PARAM = 2 52 | chunksize = len(data) // (cores * HEURISTIC_PARAM) 53 | 54 | with multiprocessing.ProcessPool(cores) as p: 55 | return p.map(function, data, chunksize=chunksize) 56 | -------------------------------------------------------------------------------- /modelator_py/tlc/__init__.py: -------------------------------------------------------------------------------- 1 | from .args import TlcArgs 2 | from .pure import PureCmd as TlcPureCmd 3 | from .pure import tlc_pure 4 | from .raw import RawCmd as TlcRawCmd 5 | from .raw import tlc_raw 6 | 7 | __all__ = ["TlcArgs", "TlcPureCmd", "TlcRawCmd", "tlc_pure", "tlc_raw"] 8 | -------------------------------------------------------------------------------- /modelator_py/tlc/args.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Optional 3 | 4 | # mypy: ignore-errors 5 | # flake8: noqa 6 | 7 | 8 | @dataclass 9 | class TlcArgs: 10 | aril: Optional[str] = None 11 | checkpoint: Optional[str] = None 12 | cleanup: Optional[str] = None 13 | config: Optional[str] = None 14 | cont: Optional[str] = None 15 | coverage: Optional[str] = None 16 | deadlock: Optional[str] = None 17 | debug: Optional[str] = None 18 | depth: Optional[str] = None 19 | dfid: Optional[str] = None 20 | difftrace: Optional[str] = None 21 | dump: Optional[str] = None 22 | fp: Optional[str] = None 23 | fpbits: Optional[str] = None 24 | fpmem: Optional[str] = None 25 | generate_spec_te: Optional[str] = None 26 | gzip: Optional[str] = None 27 | h: Optional[str] = None 28 | max_set_size: Optional[str] = None 29 | metadir: Optional[str] = None 30 | nowarning: Optional[str] = None 31 | recover: Optional[str] = None 32 | seed: Optional[str] = None 33 | simulate: Optional[str] = None 34 | terse: Optional[str] = None 35 | tool: Optional[str] = None 36 | userfile: Optional[str] = None 37 | view: Optional[str] = None 38 | workers: Optional[str] = None 39 | file: Optional[str] = None 40 | -------------------------------------------------------------------------------- /modelator_py/tlc/cli.py: -------------------------------------------------------------------------------- 1 | import json as stdjson 2 | 3 | from .pure import tlc_pure 4 | from .raw import RawCmd, TlcArgs, tlc_raw 5 | 6 | 7 | class Tlc: 8 | def __init__(self, stdin): 9 | self._stdin = stdin 10 | 11 | def pure(self): 12 | """ 13 | Run TLC without side effects using json input data. 14 | 15 | Runs TLC in a temporary directory, writing all necessary data into the 16 | directory before calling TLC, and reading back all necessary results 17 | back into memory. 18 | 19 | Writes the result to stdout in json. 20 | 21 | Requires json input data on stdin (` < data.json`). 22 | 23 | WARNING: does not support all CLI arguments in TLC 2.18 24 | """ 25 | assert ( 26 | self._stdin is not None 27 | ), "The pure interface requires json input in stdin" 28 | json_dict = stdjson.loads(self._stdin.read()) 29 | 30 | result = tlc_pure(json=json_dict) 31 | to_print = stdjson.dumps(result, indent=4, sort_keys=True) 32 | print(to_print) 33 | 34 | def raw( 35 | self, 36 | *, 37 | json=False, 38 | cwd=None, 39 | jar=None, 40 | aril=None, 41 | checkpoint=None, 42 | cleanup=None, 43 | config=None, 44 | cont=None, 45 | coverage=None, 46 | deadlock=None, 47 | debug=None, 48 | depth=None, 49 | dfid=None, 50 | difftrace=None, 51 | dump=None, 52 | fp=None, 53 | fpbits=None, 54 | fpmem=None, 55 | generate_spec_te=None, 56 | gzip=None, 57 | h=None, 58 | max_set_size=None, 59 | metadir=None, 60 | nowarning=None, 61 | recover=None, 62 | seed=None, 63 | simulate=None, 64 | terse=None, 65 | tool=None, 66 | userfile=None, 67 | view=None, 68 | workers=None, 69 | file=None, 70 | ): 71 | """ 72 | Run TLC without removing side effects (for debugging). 73 | 74 | Run TLC directly without creating a temporary directory. This is mainly 75 | useful for debugging. Arguments can be provided on command line or by 76 | specifying the --json flag and providing json on stdin (` < data.json`). 77 | 78 | WARNING: does not support all CLI arguments in TLC 2.18 79 | 80 | Arguments: 81 | json : Read arguments from json instead of cli? 82 | cwd : Full path to directory to run TLC from. 83 | jar : Full path to TLC version 2.18 jar (other versions may work). 84 | aril : TLC argument, see ` --help`. 85 | checkpoint : TLC argument, see ` --help`. 86 | cleanup : TLC argument, see ` --help`. 87 | config : TLC argument, see ` --help`. 88 | cont : TLC argument, see ` --help`. 89 | coverage : TLC argument, see ` --help`. 90 | deadlock : TLC argument, see ` --help`. 91 | debug : TLC argument, see ` --help`. 92 | depth : TLC argument, see ` --help`. 93 | dfid : TLC argument, see ` --help`. 94 | difftrace : TLC argument, see ` --help`. 95 | dump : TLC argument, see ` --help`. 96 | fp : TLC argument, see ` --help`. 97 | fpbits : TLC argument, see ` --help`. 98 | fpmem : TLC argument, see ` --help`. 99 | generate_spec_te : TLC argument, see ` --help`. 100 | gzip : TLC argument, see ` --help`. 101 | h : TLC argument, see ` --help`. 102 | max_set_size : TLC argument, see ` --help`. 103 | metadir : TLC argument, see ` --help`. 104 | nowarning : TLC argument, see ` --help`. 105 | recover : TLC argument, see ` --help`. 106 | seed : TLC argument, see ` --help`. 107 | simulate : TLC argument, see ` --help`. 108 | terse : TLC argument, see ` --help`. 109 | tool : TLC argument, see ` --help`. 110 | userfile : TLC argument, see ` --help`. 111 | view : TLC argument, see ` --help`. 112 | workers : TLC argument, see ` --help`. 113 | file : TLC argument, see ` --help`. 114 | """ 115 | result = None 116 | if json: 117 | """Read instructions from json""" 118 | json_dict = stdjson.loads(self._stdin.read()) 119 | result = tlc_raw(json=json_dict) 120 | else: 121 | """Read instructions from cli flags and arguments""" 122 | cmd = RawCmd() 123 | cmd.cwd = cwd 124 | cmd.jar = jar 125 | cmd.args = TlcArgs( 126 | aril, 127 | checkpoint, 128 | cleanup, 129 | config, 130 | cont, 131 | coverage, 132 | deadlock, 133 | debug, 134 | depth, 135 | dfid, 136 | difftrace, 137 | dump, 138 | fp, 139 | fpbits, 140 | fpmem, 141 | generate_spec_te, 142 | gzip, 143 | h, 144 | max_set_size, 145 | metadir, 146 | nowarning, 147 | recover, 148 | seed, 149 | simulate, 150 | terse, 151 | tool, 152 | userfile, 153 | view, 154 | workers, 155 | file, 156 | ) 157 | result = tlc_raw(cmd=cmd) 158 | 159 | stdout_pretty = result.stdout.decode() 160 | stderr_pretty = result.stderr.decode() 161 | 162 | obj_to_print = {} 163 | obj_to_print["shell_cmd"] = result.args 164 | obj_to_print["return_code"] = result.returncode 165 | obj_to_print["stdout"] = stdout_pretty 166 | obj_to_print["stderr"] = stderr_pretty 167 | 168 | to_print = stdjson.dumps(obj_to_print, indent=4, sort_keys=True) 169 | print(to_print) 170 | -------------------------------------------------------------------------------- /modelator_py/tlc/pure.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | from dataclasses import dataclass 4 | from typing import Optional 5 | 6 | from modelator_py.tlc.args import TlcArgs 7 | 8 | from ..helper import read_entire_dir_contents 9 | from .raw import RawCmd, tlc_raw 10 | 11 | # mypy: ignore-errors 12 | 13 | 14 | @dataclass 15 | class PureCmd: 16 | files: Optional[str] = None # dict : file name -> content 17 | jar: Optional[ 18 | str 19 | ] = None # Location of TLC jar (e.g. full path with suffix like tla2tools.jar) 20 | args: Optional[TlcArgs] = None # TLC args 21 | 22 | 23 | def json_to_cmd(json) -> PureCmd: 24 | json = { 25 | **{ 26 | "files": None, 27 | "jar": None, 28 | "args": None, 29 | }, 30 | **json, 31 | } 32 | cmd = PureCmd() 33 | cmd.jar = json["jar"] 34 | cmd.args = TlcArgs(**json["args"]) 35 | cmd.files = json["files"] 36 | return cmd 37 | 38 | 39 | def tlc_pure(*, cmd: PureCmd = None, json=None): # type: ignore 40 | """ 41 | Run a TLC command using either a PureCmd object, or build the PureCmd from json. 42 | 43 | Run TLC without side effects in a temporary directory. 44 | 45 | Returns an ExecutionResult with .process and .files properties. Contains the 46 | subprocess result, and the list of filesystem files (and contents). 47 | """ 48 | assert not (cmd is not None and json is not None) 49 | assert (cmd is not None) or (json is not None) 50 | 51 | if json is not None: 52 | cmd = json_to_cmd(json) 53 | 54 | raw_cmd = RawCmd() 55 | raw_cmd.args = cmd.args 56 | # Always specify tlc '-cleanup' 57 | raw_cmd.args.cleanup = True 58 | raw_cmd.jar = cmd.jar 59 | 60 | ret = {} 61 | 62 | result = None 63 | 64 | with tempfile.TemporaryDirectory(prefix="modelator-py-tlc-temp-dir-") as dirname: 65 | raw_cmd.cwd = dirname 66 | for filename, file_content_str in cmd.files.items(): 67 | full_path = os.path.join(dirname, filename) 68 | with open(full_path, "w") as fd: 69 | fd.write(file_content_str) 70 | 71 | result = tlc_raw(cmd=raw_cmd) 72 | 73 | # Read dir contents (not recursively) 74 | all_files = read_entire_dir_contents(dirname) 75 | all_files = {os.path.basename(fn): content for fn, content in all_files.items()} 76 | # Throw out the files that the user gave as input 77 | ret["files"] = { 78 | fn: content for fn, content in all_files.items() if fn not in cmd.files 79 | } 80 | 81 | stdout_pretty = result.stdout.decode() 82 | stderr_pretty = result.stderr.decode() 83 | 84 | ret["shell_cmd"] = result.args 85 | ret["return_code"] = result.returncode 86 | ret["stdout"] = stdout_pretty 87 | ret["stderr"] = stderr_pretty 88 | 89 | return ret 90 | -------------------------------------------------------------------------------- /modelator_py/tlc/raw.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import tempfile 4 | from dataclasses import asdict, dataclass 5 | from typing import Optional 6 | 7 | from .args import TlcArgs 8 | 9 | # mypy: ignore-errors 10 | 11 | 12 | @dataclass 13 | class RawCmd: 14 | cwd: Optional[str] = None # Current working directory for child shell process 15 | jar: Optional[ 16 | str 17 | ] = None # Location of TLC jar (full path with suffix like tla2tools.jar) 18 | args: Optional[TlcArgs] = None # TLC args 19 | 20 | 21 | def stringify_raw_cmd(cmd: RawCmd, java_temp_dir: str = None) -> str: 22 | """ 23 | Returns a string which can be passed to a shell to run TLC. 24 | """ 25 | 26 | jar = cmd.jar 27 | args = cmd.args 28 | 29 | if java_temp_dir is None: 30 | tmpdir_setup = "" 31 | else: 32 | tmpdir_setup = " -Djava.io.tmpdir={}".format(java_temp_dir) 33 | 34 | def stringify(value): 35 | # Tlc will not accept capitals 36 | if isinstance(value, bool): 37 | return str(value).lower() 38 | return value 39 | 40 | args = TlcArgs(**{k: stringify(v) for k, v in asdict(args).items()}) 41 | 42 | cmd_str = f"""java{tmpdir_setup}\ 43 | -cp "{jar}"\ 44 | tlc2.TLC\ 45 | {f" -aril {args.aril}" if args.aril is not None else ""}\ 46 | {f" -checkpoint {args.checkpoint}" if args.checkpoint is not None else ""}\ 47 | {f" -cleanup" if args.cleanup is not None else ""}\ 48 | {f" -config {args.config}" if args.config is not None else ""}\ 49 | {f" -continue" if args.cont is not None else ""}\ 50 | {f" -coverage {args.coverage}" if args.coverage is not None else ""}\ 51 | {f" -deadlock" if args.deadlock is not None else ""}\ 52 | {f" -debug" if args.debug is not None else ""}\ 53 | {f" -depth {args.depth}" if args.depth is not None else ""}\ 54 | {f" -dfid {args.dfid}" if args.dfid is not None else ""}\ 55 | {f" -difftrace" if args.difftrace is not None else ""}\ 56 | {f" -dump {args.dump}" if args.dump is not None else ""}\ 57 | {f" -fp {args.fp}" if args.fp is not None else ""}\ 58 | {f" -fpbits {args.fpbits}" if args.fpbits is not None else ""}\ 59 | {f" -fpmem {args.fpmem}" if args.fpmem is not None else ""}\ 60 | {f" -generateSpecTE" if args.generate_spec_te is not None else ""}\ 61 | {f" -gzip" if args.gzip is not None else ""}\ 62 | {f" -h" if args.h is not None else ""}\ 63 | {f" -maxSetSize {args.max_set_size}" if args.max_set_size is not None else ""}\ 64 | {f" -metadir {args.metadir}" if args.metadir is not None else ""}\ 65 | {f" -nowarning" if args.nowarning is not args.nowarning else ""}\ 66 | {f" -recover {args.recover}" if args.recover is not None else ""}\ 67 | {f" -seed {args.seed}" if args.seed is not None else ""}\ 68 | {f" -simulate" if args.simulate is not None else ""}\ 69 | {f" -terse" if args.terse is not None else ""}\ 70 | {f" -tool" if args.tool is not None else ""}\ 71 | {f" -userFile {args.userfile}" if args.userfile is not None else ""}\ 72 | {f" -view" if args.view is not None else ""}\ 73 | {f" -workers {args.workers}" if args.workers is not None else ""}\ 74 | {f" {args.file}" if args.file is not None else ""}\ 75 | """ 76 | 77 | return cmd_str 78 | 79 | 80 | def json_to_cmd(json) -> RawCmd: 81 | json = { 82 | **{ 83 | "cwd": None, 84 | "jar": None, 85 | "args": None, 86 | }, 87 | **json, 88 | } 89 | cmd = RawCmd() 90 | cmd.cwd = json["cwd"] 91 | cmd.jar = json["jar"] 92 | cmd.args = TlcArgs(**json["args"]) 93 | return cmd 94 | 95 | 96 | def tlc_raw(*, cmd: RawCmd = None, json=None): 97 | """ 98 | Run a TLC command using either a RawCmd object, or build the RawCmd from json. 99 | 100 | Run TLC with side effects without creating a temporary directory. 101 | 102 | Returns a subprocess call result object. 103 | """ 104 | assert cmd is not None or json is not None 105 | assert not (cmd is not None and json is not None) 106 | 107 | if json is not None: 108 | cmd = json_to_cmd(json) 109 | 110 | if cmd.cwd is not None: 111 | cmd.cwd = os.path.expanduser(cmd.cwd) 112 | if not os.path.isabs(cmd.cwd): 113 | raise Exception("cwd must be absolute (after expanding user)") 114 | if cmd.cwd is None: 115 | raise Exception("cwd must be absolute (after expanding user)") 116 | if cmd.jar is not None: 117 | cmd.jar = os.path.expanduser(cmd.jar) 118 | if not os.path.isabs(cmd.jar): 119 | raise Exception("TLC jar path must be absolute (after expanding user)") 120 | if cmd.jar is None: 121 | raise Exception("TLC jar path must be absolute (after expanding user)") 122 | 123 | with tempfile.TemporaryDirectory( 124 | prefix="modelator-py-tlc-java-temp-dir-" 125 | ) as java_temp: 126 | cmd_str = stringify_raw_cmd(cmd, java_temp_dir=java_temp) 127 | 128 | # Semantics a bit complex here - see https://stackoverflow.com/a/15109975/8346628 129 | return subprocess.run(cmd_str, shell=True, capture_output=True, cwd=cmd.cwd) 130 | -------------------------------------------------------------------------------- /modelator_py/tlc/tlc_help_2.18.txt: -------------------------------------------------------------------------------- 1 | 2 | NAME 3 | 4 | TLC - provides model checking and simulation of TLA+ specifications - Version 2.18 of Day Month 20?? 5 | 6 | 7 | SYNOPSIS 8 | 9 | TLC [-h] [-cleanup] [-continue] [-deadlock] [-debug] [-difftrace] [-gzip] [-noGenerateSpecTE] [-nowarning] [-terse] [-tool] [-view] [-checkpoint minutes] [-config file] [-coverage minutes] [-dfid num] [-dump [dot actionlabels,colorize,snapshot] file] [-dumpTrace format file] [-fp N] [-fpbits num] [-fpmem num] [-maxSetSize num] [-metadir path] [-postCondition mod!oper] [-recover id] [-teSpecOutDir some-dir-name] [-userFile file] [-workers num] -debugger nosuspend SPEC 10 | TLC [-h] [-cleanup] [-continue] [-deadlock] [-debug] [-difftrace] [-gzip] [-noGenerateSpecTE] [-nowarning] [-terse] [-tool] [-aril num] [-checkpoint minutes] [-config file] [-coverage minutes] [-depth num] [-dump [dot actionlabels,colorize,snapshot] file] [-dumpTrace format file] [-fp N] [-fpbits num] [-fpmem num] [-maxSetSize num] [-metadir path] [-postCondition mod!oper] [-recover id] [-seed num] [-teSpecOutDir some-dir-name] [-userFile file] [-workers num] -debugger nosuspend -simulate [file=X,num=Y] SPEC 11 | 12 | DESCRIPTION 13 | 14 | The model checker (TLC) provides the functionalities of model checking 15 | or simulation of TLA+ specifications. It may be invoked from the command 16 | line, or via the model checking functionality of the Toolbox. 17 | 18 | By default, TLC starts in the model checking mode using breadth-first 19 | approach for the state space exploration. 20 | 21 | A pretty-printed and in-depth description of TLC can be found at: 22 | 23 | https://lamport.azurewebsites.net/tla/current-tools.pdf 24 | 25 | OPTIONS 26 | 27 | -aril num 28 | adjust the seed for random simulation; defaults to 0 29 | -checkpoint minutes 30 | interval between check point; defaults to 30 31 | -cleanup 32 | clean up the states directory 33 | -config file 34 | provide the configuration file; defaults to SPEC.cfg 35 | -continue 36 | continue running even when an invariant is violated; default 37 | behavior is to halt on first violation 38 | -coverage minutes 39 | interval between the collection of coverage information; 40 | if not specified, no coverage will be collected 41 | -deadlock 42 | if specified DO NOT CHECK FOR DEADLOCK. Setting the flag is 43 | the same as setting CHECK_DEADLOCK to FALSE in config 44 | file. When -deadlock is specified, config entry is 45 | ignored; default behavior is to check for deadlocks 46 | -debug 47 | print various debugging information - not for production use 48 | 49 | -debugger nosuspend 50 | run simulation or model-checking in debug mode such that TLC's 51 | state-space exploration can be temporarily halted and variables 52 | be inspected. The only debug front-end so far is the TLA+ 53 | VSCode extension, which has to be downloaded and configured 54 | separately, though other front-ends could be implemeted via the 55 | debug-adapter-protocol. 56 | Specifying the optional parameter 'nosuspend' causes 57 | TLC to start state-space exploration without waiting for a 58 | debugger front-end to connect. Without 'nosuspend', TLC 59 | suspends state-space exploration before the first ASSUME is 60 | evaluated (but after constants are processed). With 'nohalt', 61 | TLC does not halt state-space exploration when an evaluation 62 | or runtime error is caught. Without 'nohalt', evaluation or 63 | runtime errors can be inspected in the debugger before TLC 64 | terminates. The optional parameter 'port=1274' makes the 65 | debugger listen on port 1274 instead of on the standard 66 | port 4712, and 'port=0' lets the debugger choose a port. 67 | Multiple optional parameters must be comma-separated. 68 | Specifying '-debugger' implies '-workers 1'. 69 | -depth num 70 | specifies the depth of random simulation; defaults to 100 71 | -dfid num 72 | run the model check in depth-first iterative deepening 73 | starting with an initial depth of 'num' 74 | -difftrace 75 | show only the differences between successive states when 76 | printing trace information; defaults to printing 77 | full state descriptions 78 | -dump file 79 | dump all states into the specified file; this parameter takes 80 | optional parameters for dot graph generation. Specifying 81 | 'dot' allows further options, comma delimited, of zero 82 | or more of 'actionlabels', 'colorize', 'snapshot' to be 83 | specified before the '.dot'-suffixed filename 84 | -dumpTrace format file 85 | in case of a property violation, formats the TLA+ error trace 86 | as the given format and dumps the output to the specified 87 | file. The file is relative to the same directory as the 88 | main spec. At the time of writing, TLC supports the "tla" 89 | and the "json" formats. To dump to multiple formats, the 90 | -dumpTrace parameter may appear multiple times. 91 | The git commits 1eb815620 and 386eaa19f show that adding new 92 | formats is easy. 93 | 94 | -fp N 95 | use the Nth irreducible polynomial from the list stored 96 | in the class FP64 97 | -fpbits num 98 | the number of MSB used by MultiFPSet to create nested 99 | FPSets; defaults to 1 100 | -fpmem num 101 | a value in (0.0,1.0) representing the ratio of total 102 | physical memory to devote to storing the fingerprints 103 | of found states; defaults to 0.25 104 | -gzip 105 | control if gzip is applied to value input/output streams; 106 | defaults to 'off' 107 | -h 108 | display these help instructions 109 | -maxSetSize num 110 | the size of the largest set which TLC will enumerate; defaults 111 | to 1000000 (10^6) 112 | -metadir path 113 | specify the directory in which to store metadata; defaults to 114 | SPEC-directory/states if not specified 115 | -noGenerateSpecTE 116 | Whether to skip generating a trace exploration (TE) spec in 117 | the event of TLC finding a state or behavior that does 118 | not satisfy the invariants; TLC's default behavior is to 119 | generate this spec. 120 | -nowarning 121 | disable all warnings; defaults to reporting warnings 122 | -postCondition mod!oper 123 | evaluate the given (constant-level) operator oper in the TLA+ 124 | module mod at the end of model-checking. 125 | -recover id 126 | recover from the checkpoint with the specified id 127 | -seed num 128 | provide the seed for random simulation; defaults to a 129 | random long pulled from a pseudo-RNG 130 | -simulate 131 | run in simulation mode; optional parameters may be specified 132 | comma delimited: 'num=X' where X is the maximum number of 133 | total traces to generate and/or 'file=Y' where Y is the 134 | absolute-pathed prefix for trace file modules to be written 135 | by the simulation workers; for example Y='/a/b/c/tr' would 136 | produce, e.g, '/a/b/c/tr_1_15' 137 | -teSpecOutDir some-dir-name 138 | Directory to which to output the TE spec if TLC generates 139 | an error trace. Can be a relative (to root spec dir) 140 | or absolute path. By default the TE spec is output 141 | to the same directory as the main spec. 142 | -terse 143 | do not expand values in Print statements; defaults to 144 | expanding values 145 | -tool 146 | run in 'tool' mode, surrounding output with message codes; 147 | if '-generateSpecTE' is specified, this is enabled 148 | automatically 149 | -userFile file 150 | an absolute path to a file in which to log user output (for 151 | example, that which is produced by Print) 152 | -view 153 | apply VIEW (if provided) when printing out states 154 | -workers num 155 | the number of TLC worker threads; defaults to 1. Use 'auto' 156 | to automatically select the number of threads based on the 157 | number of available cores. 158 | 159 | TIPS 160 | 161 | When using the '-generateSpecTE' you can version the generated specification by doing: 162 | ./tla2tools.jar -generateSpecTE MySpec.tla && NAME="SpecTE-$(date +%s)" && sed -e "s/MODULE SpecTE/MODULE $NAME/g" SpecTE.tla > $NAME.tla 163 | 164 | If, while checking a SpecTE created via '-generateSpecTE', you get an error message concerning 165 | CONSTANT declaration and you've previous used 'integers' as model values, rename your 166 | model values to start with a non-numeral and rerun the model check to generate a new SpecTE. 167 | 168 | If, while checking a SpecTE created via '-generateSpecTE', you get a warning concerning 169 | duplicate operator definitions, this is likely due to the 'monolith' specification 170 | creation. Try re-running TLC adding the 'nomonolith' option to the '-generateSpecTE' 171 | parameter. 172 | -------------------------------------------------------------------------------- /modelator_py/util/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/informalsystems/modelator-py/98c6f3356b39e653ade2624a07540c174e97af9f/modelator_py/util/__init__.py -------------------------------------------------------------------------------- /modelator_py/util/cli.py: -------------------------------------------------------------------------------- 1 | from .tlc.cli import Tlc 2 | 3 | 4 | class Util: 5 | def __init__(self, stdin): 6 | self._stdin = stdin 7 | self.tlc = Tlc(stdin) 8 | -------------------------------------------------------------------------------- /modelator_py/util/informal_trace_format.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta 2 | 3 | """ 4 | In memory tree representing Informal Trace Format trace. 5 | """ 6 | 7 | 8 | class ITFNode(object): 9 | __metaclass__ = ABCMeta 10 | 11 | def __repr__(self): 12 | assert False, """Not implemented as uses visitor pattern 13 | and I don't want to think about circular reference right now.""" 14 | 15 | 16 | class ITFRecord(ITFNode): 17 | """{ "field1": , ..., "fieldN": }""" 18 | 19 | def __init__(self, elements): 20 | self.elements = elements # dict 21 | 22 | def __eq__(self, other): 23 | """Overrides the default implementation""" 24 | if isinstance(other, ITFRecord): 25 | return self.elements == other.elements 26 | return False 27 | 28 | 29 | class ITFList(ITFNode): 30 | """[ , ..., ]""" 31 | 32 | def __init__(self, elements): 33 | self.elements = elements 34 | 35 | def __eq__(self, other): 36 | """Overrides the default implementation""" 37 | if isinstance(other, ITFList): 38 | return self.elements == other.elements 39 | return False 40 | 41 | 42 | class ITFSet(ITFNode): 43 | """{ "#set": [ , ..., ] }""" 44 | 45 | def __init__(self, elements): 46 | self.elements = elements 47 | 48 | def __eq__(self, other): 49 | """Overrides the default implementation""" 50 | if isinstance(other, ITFSet): 51 | return self.elements == other.elements 52 | return False 53 | 54 | 55 | class ITFMap(ITFNode): 56 | """{ "#map": [ [ , ], ..., [ , ] ] }""" 57 | 58 | def __init__(self, elements): 59 | self.elements = elements 60 | 61 | def __eq__(self, other): 62 | """Overrides the default implementation""" 63 | if isinstance(other, ITFMap): 64 | return self.elements == other.elements 65 | return False 66 | 67 | 68 | class ITFState(ITFNode): 69 | """ 70 | { 71 | "#meta": , 72 | "": , 73 | ... 74 | "": 75 | } 76 | """ 77 | 78 | def __init__(self, var_value_map): 79 | self.var_value_map = var_value_map 80 | 81 | def __eq__(self, other): 82 | """Overrides the default implementation""" 83 | if isinstance(other, ITFState): 84 | return self.var_value_map == other.var_value_map 85 | return False 86 | 87 | 88 | class ITFTrace(ITFNode): 89 | """ 90 | { 91 | "#meta": , 92 | "params": , 93 | "vars": , 94 | "states": , 95 | "loop": 96 | } 97 | """ 98 | 99 | def __init__(self, vars_, states, meta=None): 100 | self.meta = meta 101 | self.vars = vars_ 102 | self.states = states 103 | 104 | def __eq__(self, other): 105 | """Overrides the default implementation""" 106 | if isinstance(other, ITFState): 107 | return ( 108 | self.meta == other.meta 109 | and self.vars == other.vars 110 | and self.states == other.states 111 | ) 112 | return False 113 | 114 | 115 | class Visitor: 116 | def visit(self, node, *arg, **kw): 117 | # Only visit ITFNode objects. 118 | # It is sufficient to take face-value for python built-ins. 119 | if not isinstance(node, ITFNode): 120 | return node 121 | method_name = f"visit_{type(node).__name__}" 122 | method = getattr(self, method_name) 123 | return method(node, *arg, **kw) 124 | 125 | def visit_ITFRecord(self, node, *arg, **kw): 126 | elements = {k: self.visit(v) for k, v in node.elements.items()} 127 | return ITFRecord(elements) 128 | 129 | def visit_ITFList(self, node, *arg, **kw): 130 | elements = [self.visit(e) for e in node.elements] 131 | return ITFList(elements) 132 | 133 | def visit_ITFSet(self, node, *arg, **kw): 134 | elements = [self.visit(e) for e in node.elements] 135 | return ITFSet(elements) 136 | 137 | def visit_ITFMap(self, node, *arg, **kw): 138 | elements = [[self.visit(e) for e in p] for p in node.elements] 139 | return ITFMap(elements) 140 | 141 | def visit_ITFState(self, node, *arg, **kw): 142 | var_value_map = {k: self.visit(v) for k, v in node.var_value_map.items()} 143 | return ITFState(var_value_map) 144 | 145 | def visit_ITFTrace(self, node, *arg, **kw): 146 | states = [self.visit(e) for e in node.states] 147 | return ITFTrace(node.vars, states, node.meta) 148 | 149 | 150 | class JsonSerializer(Visitor): 151 | def visit_ITFRecord(self, node, *arg, **kw): 152 | elements = {k: self.visit(v) for k, v in node.elements.items()} 153 | return elements 154 | 155 | def visit_ITFList(self, node, *arg, **kw): 156 | elements = [self.visit(e) for e in node.elements] 157 | return elements 158 | 159 | def visit_ITFSet(self, node, *arg, **kw): 160 | elements = [self.visit(e) for e in node.elements] 161 | return {"#set": elements} 162 | 163 | def visit_ITFMap(self, node, *arg, **kw): 164 | elements = [[self.visit(e) for e in p] for p in node.elements] 165 | return {"#map": elements} 166 | 167 | def visit_ITFState(self, node, *arg, **kw): 168 | var_value_map = {k: self.visit(v) for k, v in node.var_value_map.items()} 169 | return var_value_map 170 | 171 | def visit_ITFTrace(self, node, *arg, **kw): 172 | states = [self.visit(e) for e in node.states] 173 | return {"#meta": node.meta, "vars": node.vars, "states": states} 174 | 175 | 176 | class Listifier(Visitor): 177 | def visit_ITFMap(self, node, *arg, **kw): 178 | keys = [p[0] for p in node.elements] 179 | # Is this map has only integer keys and they are from a domain 1..n 180 | if all(type(k) == int for k in keys): 181 | keys.sort() 182 | if keys == list(range(1, len(keys) + 1)): 183 | elements = [self.visit(p[1]) for p in node.elements] 184 | return ITFList(elements) 185 | elements = [[self.visit(e) for e in p] for p in node.elements] 186 | return ITFMap(elements) 187 | 188 | 189 | class Recordifier(Visitor): 190 | def visit_ITFMap(self, node, *arg, **kw): 191 | keys = [p[0] for p in node.elements] 192 | # Is this map has only integer keys and they are from a domain 1..n 193 | if all(type(k) == str for k in keys): 194 | elements = {p[0]: self.visit(p[1]) for p in node.elements} 195 | return ITFRecord(elements) 196 | elements = [[self.visit(e) for e in p] for p in node.elements] 197 | return ITFMap(elements) 198 | 199 | 200 | def with_lists(trace: ITFTrace) -> ITFTrace: 201 | """ 202 | Create a copy of the trace where lists take the place 203 | of 1-indexed maps. 204 | 205 | In TLA+ sequences (lists) are precisely functions with domain 206 | 1..n for some n. This function transforms maps with domain 207 | 1..n into ITF lists. 208 | 209 | Warning: may mangle input object. 210 | """ 211 | visitor = Listifier() 212 | return visitor.visit(trace) 213 | 214 | 215 | def with_records(trace: ITFTrace) -> ITFTrace: 216 | """ 217 | Create a copy of the trace where sequences take the place 218 | of string-indexed maps. 219 | 220 | In TLA+ records are precisely functions with domain entirely 221 | of strings. This function transforms maps with domain of 222 | strings into ITF records. 223 | 224 | Warning: may mangle input object. 225 | """ 226 | visitor = Recordifier() 227 | return visitor.visit(trace) 228 | -------------------------------------------------------------------------------- /modelator_py/util/tla/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016-2020 by California Institute of Technology 2 | Copyright (c) 2008-2013 INRIA and Microsoft Corporation 3 | Copyright (c) 2016-2020 by Ioannis Filippidis 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions 8 | are met: 9 | 10 | 1. Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | 13 | 2. Redistributions in binary form must reproduce the above copyright 14 | notice, this list of conditions and the following disclaimer in the 15 | documentation and/or other materials provided with the distribution. 16 | 17 | 3. Neither the name of the California Institute of Technology nor 18 | the names of its contributors may be used to endorse or promote 19 | products derived from this software without specific prior 20 | written permission. 21 | 22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 | -------------------------------------------------------------------------------- /modelator_py/util/tla/README.md: -------------------------------------------------------------------------------- 1 | This directory contains the source code of the [tla_python](https://github.com/tlaplus/tla_python) parser project. 2 | -------------------------------------------------------------------------------- /modelator_py/util/tla/README_license.md: -------------------------------------------------------------------------------- 1 | This directory contains code from [tla_python](https://github.com/tlaplus/tla_python) and is licensed as such. 2 | -------------------------------------------------------------------------------- /modelator_py/util/tla/__init__.py: -------------------------------------------------------------------------------- 1 | """TLA+ parser and syntax tree.""" 2 | from .parser import parse, parse_expr 3 | 4 | __all__ = ["parse", "parse_expr"] 5 | -------------------------------------------------------------------------------- /modelator_py/util/tla/_error.py: -------------------------------------------------------------------------------- 1 | """Parser error messages.""" 2 | # Copyright 2020 by California Institute of Technology 3 | # Copyright (c) 2008-2013 INRIA and Microsoft Corporation 4 | # All rights reserved. Licensed under 3-clause BSD. 5 | # 6 | # This module is based on the file: 7 | # 8 | # 9 | # 10 | # 11 | # open Ext 12 | 13 | 14 | def setdefault(value, default): 15 | if value is None: 16 | value = default 17 | return value 18 | 19 | 20 | # type error_ = 21 | # { err_unex : string option ; 22 | # err_exps : string list ; 23 | # err_msgs : string list ; 24 | # err_ints : string list } 25 | class Error_: 26 | def __init__(self, err_unex=None, err_exps=None, err_msgs=None, err_ints=None): 27 | self.err_unex = err_unex 28 | self.err_exps = setdefault(err_exps, list()) 29 | self.err_msgs = setdefault(err_msgs, list()) 30 | self.err_ints = setdefault(err_ints, list()) 31 | 32 | 33 | # type error = Error of error_ * Loc.locus 34 | class Error: 35 | def __init__(self, error_, locus): 36 | self.error_ = error_ 37 | self.locus = locus 38 | 39 | 40 | # type t = error 41 | class T(Error): 42 | pass 43 | 44 | 45 | # (* FIXME make this return a string *) 46 | # let print_error ?(verbose = false) ouch (Error (err, locus)) = 47 | # let unexp = 48 | # match err.err_unex with 49 | # | None -> "" 50 | # | Some s -> "Unexpected " ^ s ^ "\n" 51 | # in 52 | # let exps = 53 | # match List.unique (err.err_exps) with 54 | # | [] -> "" 55 | # | exps -> 56 | # "Expecting one of {" ^ String.concat ", " exps ^ "}\n" 57 | # in 58 | # let ints = 59 | # if verbose then 60 | # String.concat "" (List.map 61 | # (fun i -> "[Internal] " ^ i ^ "\n") 62 | # (List.unique err.err_ints)) 63 | # else "" 64 | # in 65 | # let msgs = 66 | # String.concat "" (List.map 67 | # (fun i -> i ^ "\n") 68 | # (List.unique (err.err_msgs))) 69 | # in 70 | # let loc = Printf.sprintf "%s\n" (Loc.string_of_locus locus) in 71 | # output_string ouch loc; 72 | # output_string ouch unexp ; 73 | # output_string ouch exps ; 74 | # output_string ouch msgs ; 75 | # output_string ouch ints ; 76 | # flush ouch; 77 | # 78 | # if !Params.toolbox 79 | # then Toolbox_msg.print_warning (loc ^ unexp ^ exps ^ msgs ^ ints); 80 | # ;; 81 | def print_error(verbose, ouch, error): 82 | if error.error_.err_unex is None: 83 | unexp = "" 84 | else: 85 | unexp = f"Unexpected {error.error_.err_unex}\n" 86 | if not error.error_.err_exps: 87 | exps = "" 88 | else: 89 | exps = "Expecting one of: {s}\n".format(s="\n".join(error.error_.err_exps)) 90 | if verbose: 91 | ints = "".join(f"[Internal] {i}\n" for i in error.error_.err_ints) 92 | else: 93 | ints = "" 94 | msgs = "".join(i + "\n" for i in error.error_.err_msgs) 95 | print(error.locus) 96 | print(unexp) 97 | print(exps) 98 | print(msgs) 99 | print(ints) 100 | 101 | 102 | # let error locus = 103 | # Error ({ err_unex = None ; 104 | # err_exps = [] ; 105 | # err_ints = [] ; 106 | # err_msgs = [] }, locus) 107 | def error(locus): 108 | return Error(Error_(), locus) 109 | 110 | 111 | # let err_combine (Error (a, alocus)) (Error (b, blocus)) = 112 | # let combo a b = 113 | # { err_unex = None ; 114 | # err_exps = a.err_exps @ b.err_exps ; 115 | # err_ints = a.err_ints @ b.err_ints ; 116 | # err_msgs = a.err_msgs @ b.err_msgs ; 117 | # } 118 | # in 119 | # Error (combo a b, blocus) 120 | def err_combine(a, b): 121 | error_ = Error_( 122 | err_unex=None, 123 | err_exps=a.error_.err_exps + b.error_.err_exps, 124 | err_ints=a.error_.err_ints + b.error_.err_ints, 125 | err_msgs=a.error_.err_msgs + b.error_.err_msgs, 126 | ) 127 | return Error(error_, b.locus) 128 | 129 | 130 | # let err_add_message msg (Error (e, elocus)) = 131 | # Error ({ e with err_msgs = msg :: e.err_msgs }, elocus) 132 | def err_add_message(msg, error): 133 | e = error.error_ 134 | elocus = error.locus 135 | error_ = Error_( 136 | err_unex=e.err_unex, 137 | err_exps=e.err_exps, 138 | err_msgs=[msg] + e.err_msgs, 139 | err_ints=e.err_ints, 140 | ) 141 | return Error(error_, elocus) 142 | 143 | 144 | # let err_add_internal i (Error (e, elocus)) = 145 | # Error ({ e with err_ints = i :: e.err_ints }, elocus) 146 | def err_add_internal(i, error): 147 | e = error.error_ 148 | elocus = error.locus 149 | error_ = Error_( 150 | err_unex=e.err_unex, 151 | err_exps=e.err_exps, 152 | err_msgs=e.err_msgs, 153 | err_ints=[i] + e.err_ints, 154 | ) 155 | return Error(error_, elocus) 156 | 157 | 158 | # let err_add_expecting x (Error (e, elocus)) = 159 | # Error ({ e with err_exps = x :: e.err_exps }, elocus) 160 | def err_add_expecting(x, error): 161 | e = error.error_ 162 | elocus = error.locus 163 | error_ = Error_( 164 | err_unex=e.err_unex, 165 | err_exps=[x] + e.err_exps, 166 | err_msgs=e.err_msgs, 167 | err_ints=e.err_ints, 168 | ) 169 | return Error(error_, elocus) 170 | 171 | 172 | # let err_set_unexpected u (Error (e, elocus)) = 173 | # Error ({ e with err_unex = Some u }, elocus) 174 | def err_set_unexpected(u, error): 175 | e = error.error_ 176 | elocus = error.locus 177 | error_ = Error_( 178 | err_unex=u, err_exps=e.err_exps, err_msgs=e.err_msgs, err_ints=e.err_ints 179 | ) 180 | return Error(error_, elocus) 181 | -------------------------------------------------------------------------------- /modelator_py/util/tla/_intf.py: -------------------------------------------------------------------------------- 1 | """This module describes an abstract interface. 2 | 3 | The implementation is in the module `tla.tokens`. 4 | """ 5 | # Copyright 2020 by California Institute of Technology 6 | # Copyright (C) 2008-2010 INRIA and Microsoft Corporation 7 | # All rights reserved. Licensed under 3-clause BSD. 8 | # 9 | # This module is based on the file: 10 | # 11 | # 12 | 13 | 14 | # # (** Tokens *) 15 | # # module type Tok = sig 16 | # # type token 17 | # # (** Type of tokens *) 18 | 19 | 20 | # class Token: 21 | # """Type of tokens.""" 22 | 23 | # pass 24 | 25 | 26 | # # val bof : Loc.locus -> token (* beginning of file *) 27 | # # (** token representing start of file *) 28 | 29 | 30 | # def bof(locus): 31 | # """Token representing beginning of file.""" 32 | # return token 33 | 34 | 35 | # # val rep : token -> string 36 | # # (** String representation of tokens *) 37 | 38 | 39 | # def rep(token): 40 | # """String representation of token.""" 41 | # return string 42 | 43 | 44 | # # val locus : token -> Loc.locus 45 | # # (** Origin of the token *) 46 | 47 | 48 | # def locus(token): 49 | # """Location of the token in text.""" 50 | # return locus 51 | 52 | 53 | # # val eq : token -> token -> bool 54 | # # (** Are the tokens equivalent? *) 55 | 56 | 57 | # def eq(token, other_token): 58 | # """Whether tokens are equivalent.""" 59 | # return boolean 60 | 61 | 62 | # # val pp_print_token : Format.formatter -> token -> unit 63 | # # (** For use in format strings *) 64 | 65 | 66 | # def pp_print_token(formatter, token): 67 | # """For use in format strings.""" 68 | # pass 69 | 70 | 71 | # # end 72 | 73 | # # (** Precedence *) 74 | # # module type Prec = sig 75 | # # type prec 76 | # # (** Abstract type of precedence *) 77 | 78 | 79 | # class Prec: 80 | # """Abstract type of operator precedence.""" 81 | 82 | # pass 83 | 84 | 85 | # # val below : prec -> prec -> bool 86 | # # (** {!below} [p q] means that [p] is entirely below [q] *) 87 | 88 | 89 | # def below(prec, other_prec): 90 | # """Whether `prec` is entirely below `other_prec`.""" 91 | # return boolean 92 | 93 | 94 | # # val conflict : prec -> prec -> bool 95 | # # (** {!conflict} [p q] means that an unbracketed expression with 96 | # # two operators of precedence [p] and [q] respectively would be 97 | # # ambiguous. *) 98 | 99 | 100 | # def conflict(prec, other_prec): 101 | # """Whether `prec` and `other_prec` have overlapping precedence ranges.""" 102 | # return boolean 103 | 104 | 105 | # # end 106 | -------------------------------------------------------------------------------- /modelator_py/util/tla/_location.py: -------------------------------------------------------------------------------- 1 | """Source locations.""" 2 | # Copyright 2020 by California Institute of Technology 3 | # Copyright (c) 2008-2013 INRIA and Microsoft Corporation 4 | # All rights reserved. Licensed under 3-clause BSD. 5 | # 6 | # This module is based on the file: 7 | # 8 | # 9 | 10 | 11 | # type pt_ = { line : int ; 12 | # bol : int ; 13 | # col : int ; 14 | # } 15 | class Pt: 16 | """Point in string. 17 | 18 | The string is typically the contents of 19 | a source file. 20 | """ 21 | 22 | def __init__(self, line, bol, col): 23 | self.line = line 24 | self.bol = bol # beginning of line (offset 25 | # from beginning of file) 26 | self.col = col # column number from beginning of line 27 | 28 | # let string_of_pt ?(file="") l = 29 | # string_of_locus { start = l ; stop = l ; file = file } 30 | def __str__(self): 31 | return f"line {self.line}, character {self.col}" 32 | 33 | def __eq__(self, other): 34 | if other is None: 35 | return False 36 | return ( 37 | self.line == other.line and self.bol == other.bol and self.col == other.col 38 | ) 39 | 40 | def __hash__(self): 41 | return hash(self.offset) 42 | 43 | # let column = function 44 | # | Actual l -> l.col 45 | # | Dummy -> failwith "Loc.column" 46 | @property 47 | def column(self): 48 | return self.col 49 | 50 | # let offset = function 51 | # | Actual l -> l.bol + l.col 52 | # | Dummy -> failwith "Loc.offset" 53 | @property 54 | def offset(self): 55 | if self.bol is None or self.col is None: 56 | raise ValueError("unknown beginning of line or column") 57 | return self.bol + self.col 58 | 59 | 60 | # type pt = Actual of pt_ | Dummy 61 | 62 | # let dummy = Dummy 63 | # None represents Dummy 64 | 65 | 66 | # type locus = { start : pt ; 67 | # stop : pt ; 68 | # file : string ; 69 | # } 70 | class Locus: 71 | """Location in file.""" 72 | 73 | def __init__(self, start, stop, filename): 74 | self.start = start # Pt | None 75 | self.stop = stop # Pt | None 76 | self.file = filename 77 | 78 | def __repr__(self): 79 | return ( 80 | f"{self.file}: " 81 | f"line {self.start.line}, column {self.start.column} to " 82 | f"line {self.stop.line}, column {self.stop.column}" 83 | ) 84 | 85 | def __copy__(self): 86 | return Locus(self.start, self.stop, self.file) 87 | 88 | def __eq__(self, other): 89 | return ( 90 | self.start == other.start 91 | and self.stop == other.stop 92 | and self.file == other.file 93 | ) 94 | 95 | def __hash__(self): 96 | t = (self.start, self.stop, self.file) 97 | return hash(t) 98 | 99 | # let left_of l = { l with stop = l.start } 100 | def left_of(self): 101 | return Locus(self.start, self.start, self.filename) 102 | 103 | # let right_of l = { l with start = l.stop } 104 | def right_of(self): 105 | return Locus(self.stop, self.stop, self.filename) 106 | 107 | # let merge r1 r2 = 108 | # if r1.file <> r2.file then 109 | # failwith ("Loc.merge: " ^ r1.file ^ " <> " ^ r2.file) 110 | # else 111 | # try { 112 | # start = if offset r1.start <= offset r2.start then 113 | # r1.start else r2.start ; 114 | # stop = if offset r1.stop >= offset r2.stop then 115 | # r1.stop else r2.stop ; 116 | # file = r1.file 117 | # } with _ -> unknown 118 | def merge(self, other): 119 | if self.file != other.file: 120 | raise ValueError(f"different files: {self.file}, {other.file}") 121 | if self.start.offset <= other.start.offset: 122 | start = self.start 123 | else: 124 | start = other.start 125 | if self.stop.offset >= other.stop.offset: 126 | stop = self.stop 127 | else: 128 | stop = other.stop 129 | return Locus(start, stop, self.file) 130 | 131 | 132 | # let unknown = { 133 | # start = Dummy ; 134 | # stop = Dummy ; 135 | # file = "" ; 136 | # } 137 | 138 | unknown = Locus(None, None, "") 139 | 140 | 141 | # let column = function 142 | # | Actual l -> l.col 143 | # | Dummy -> failwith "Loc.column" 144 | # NOTE: `column` is implemented above as `Pt.column` 145 | 146 | 147 | # let line = function 148 | # | Actual l -> l.line 149 | # | Dummy -> failwith "Loc.line" 150 | # NOTE: `line` is implemented above as `Pt.line` 151 | 152 | 153 | # let offset = function 154 | # | Actual l -> l.bol + l.col 155 | # | Dummy -> failwith "Loc.offset" 156 | # NOTE: `offset` is implemented above as `Pt.offset` 157 | 158 | 159 | # let locus_of_position lp = 160 | # let pt = { line = lp.Lexing.pos_lnum ; 161 | # bol = lp.Lexing.pos_bol ; 162 | # col = lp.Lexing.pos_cnum - lp.Lexing.pos_bol + 1 ; 163 | # } 164 | # in { start = Actual pt ; 165 | # stop = Actual pt ; 166 | # file = lp.Lexing.pos_fname ; 167 | # } 168 | def locus_of_position(filename, lineno, bol, cnum): 169 | column = cnum - bol + 1 # lp.pos_cnum - lp.pos_bol + 1 170 | pt = Pt(line=lineno, bol=bol, col=column) # lp.pos_lnum # lp.pos_bol, 171 | return Locus(pt, pt, filename) # lp.pos_fname 172 | 173 | 174 | # let merge r1 r2 = 175 | # if r1.file <> r2.file then 176 | # failwith ("Loc.merge: " ^ r1.file ^ " <> " ^ r2.file) 177 | # else 178 | # try { 179 | # start = if offset r1.start <= offset r2.start then r1.start else r2.start ; 180 | # stop = if offset r1.stop >= offset r2.stop then r1.stop else r2.stop ; 181 | # file = r1.file 182 | # } with _ -> unknown 183 | # NOTE: `merge` is implemented above as `Locus.merge` 184 | 185 | 186 | # let string_of_locus ?(cap = true) r = 187 | # let ftok = if cap then "File" else "file" in 188 | # match r.start, r.stop with 189 | # | Actual start, Actual stop -> 190 | # if start.line = stop.line && start.col >= stop.col - 1 then 191 | # Printf.sprintf "%s %S, line %d, character %d" 192 | # ftok r.file start.line start.col 193 | # else 194 | # (* || start.line <> stop.line 195 | # * || start.bol <> stop.bol 196 | # *) 197 | # if start.line = stop.line then 198 | # Printf.sprintf "%s %S, line %d, characters %d-%d" 199 | # ftok r.file start.line start.col (stop.col - 1) 200 | # else 201 | # (* start.line <> stop.line *) 202 | # Printf.sprintf "%s %S, line %d, character %d to line %d, character %d" 203 | # ftok r.file 204 | # start.line start.col 205 | # stop.line (stop.col - 1) 206 | # | _ -> 207 | # Printf.sprintf "%s %S" ftok r.file 208 | 209 | 210 | # let string_of_locus_nofile r = 211 | # match r.start, r.stop with 212 | # | Actual start, Actual stop -> 213 | # if start.line = stop.line && start.col >= stop.col - 1 then 214 | # Printf.sprintf "line %d, character %d" 215 | # start.line start.col 216 | # else 217 | # (* || start.line <> stop.line 218 | # * || start.bol <> stop.bol 219 | # *) 220 | # if start.line = stop.line then 221 | # Printf.sprintf "line %d, characters %d-%d" 222 | # start.line start.col (stop.col - 1) 223 | # else 224 | # (* start.line <> stop.line *) 225 | # Printf.sprintf "line %d, character %d to line %d, character %d" 226 | # start.line start.col 227 | # stop.line (stop.col - 1) 228 | # | _ -> "" 229 | 230 | 231 | # let string_of_pt ?(file="") l = 232 | # string_of_locus { start = l ; stop = l ; file = file } 233 | # NOTE: string_of_pt is implemented above as `Pt.__str__` 234 | 235 | 236 | # let compare r s = 237 | # match Pervasives.compare (line r.start) (line s.start) with 238 | # | 0 -> 239 | # Pervasives.compare (column r.start) (column s.start) 240 | # | c -> c 241 | -------------------------------------------------------------------------------- /modelator_py/util/tla/_tla_combinators.py: -------------------------------------------------------------------------------- 1 | """Parser combinators and state for TLA+.""" 2 | # Copyright 2020 by California Institute of Technology 3 | # Copyright (c) 2008-2013 INRIA and Microsoft Corporation 4 | # All rights reserved. Licensed under 3-clause BSD. 5 | # 6 | # This module is based on the file: 7 | # 8 | # 9 | # 10 | from . import _combinators as pco 11 | from . import _optable, tokens 12 | 13 | 14 | # (** The [pcx] is the state carried by the parsers. The [ledge] field 15 | # contains the left edge of the active rectangle of input. *) 16 | # type pcx = { 17 | # ledge : int ; 18 | # clean : bool ; 19 | # } 20 | class Pcx: 21 | """State carried by the parsers.""" 22 | 23 | def __init__(self, ledge, clean): 24 | self.ledge = ledge 25 | self.clean = clean 26 | 27 | def __repr__(self): 28 | return f"Pcx({self.ledge}, {self.clean})" 29 | 30 | 31 | init = Pcx(-1, True) 32 | 33 | 34 | class WrappedStr(str): 35 | pass 36 | 37 | 38 | class WrappedTuple(tuple): 39 | pass 40 | 41 | 42 | class WrappedList(list): 43 | pass 44 | 45 | 46 | builtins = (bool, str, int, float, tuple, list, dict) 47 | 48 | # let locate p = 49 | # withloc p <$> begin 50 | # fun (a, loc) -> Util.set_locus (Property.noprops a) loc 51 | # end 52 | 53 | 54 | def locate(p): 55 | def apply_location(a_loc): 56 | a, loc = a_loc 57 | if isinstance(a, str): 58 | a = WrappedStr(a) 59 | elif isinstance(a, tuple): 60 | a = WrappedTuple(a) 61 | elif isinstance(a, list): 62 | a = WrappedList(a) 63 | else: 64 | assert type(a) not in builtins 65 | a.loc = loc # Util.set_locus ... loc 66 | return a 67 | 68 | return pco.withloc(p) << pco.apply >> apply_location 69 | 70 | 71 | # let scan ts = 72 | # get >>= fun px -> 73 | # P.scan begin 74 | # fun t -> 75 | # if px.ledge <= Loc.column t.loc.start then ts t.form 76 | # else None 77 | # end 78 | def scan(ts): 79 | return ( 80 | pco.get() 81 | << pco.shift_eq 82 | >> ( 83 | lambda px: pco.scan( 84 | lambda t: ts(t.form) if px.ledge <= t.loc.start.column else None 85 | ) 86 | ) 87 | ) 88 | 89 | 90 | # open Token 91 | 92 | # let punct p = scan begin 93 | # function 94 | # | PUNCT q when q = p -> Some p 95 | # | _ -> None 96 | # end 97 | def punct(p): 98 | def f(form): 99 | # print('punct', p) 100 | if isinstance(form, tokens.PUNCT) and form.string == p: 101 | return p 102 | else: 103 | return None 104 | 105 | return scan(f) 106 | 107 | 108 | # let kwd k = scan begin 109 | # fun tok -> 110 | # match tok with 111 | # | KWD j when j = k -> Some k 112 | # | _ -> None 113 | # end 114 | def kwd(k): 115 | def f(form): 116 | # print('kwd', k) 117 | if isinstance(form, tokens.KWD) and form.string == k: 118 | return k 119 | else: 120 | return None 121 | 122 | return scan(f) 123 | 124 | 125 | # module Op = Optable 126 | 127 | 128 | # let anyinfix = scan begin 129 | # function 130 | # | OP p -> 131 | # let rec loop = function 132 | # | [] -> None 133 | # | ({ Op.fix = Op.Infix _ } as top) :: _ -> Some (top.name) 134 | # | _ :: tops -> loop tops 135 | # in loop (Hashtbl.find_all Op.optable p) 136 | # | _ -> None 137 | # end 138 | def anyinfix(): 139 | def f(form): 140 | if not isinstance(form, tokens.OP): 141 | return None 142 | name = form.string 143 | ops = _optable.optable[name] 144 | fixities = [op.name for op in ops if isinstance(op.fix, _optable.Infix)] 145 | if fixities: 146 | (name,) = fixities 147 | return name 148 | return None 149 | 150 | return scan(f) 151 | 152 | 153 | # let infix o = anyinfix (fun p -> o = p) 154 | def infix(op): 155 | return anyinfix() << pco.question >> (lambda p: op == p) 156 | 157 | 158 | # let anyprefix = scan begin 159 | # function 160 | # | OP p -> 161 | # let rec loop = function 162 | # | [] -> None 163 | # | ({ Op.fix = Op.Prefix } as top) :: _ -> Some (top.name) 164 | # | _ :: tops -> loop tops 165 | # in loop (Hashtbl.find_all Op.optable p) 166 | # | _ -> None 167 | # end 168 | def anyprefix(): 169 | def f(form): 170 | if not isinstance(form, tokens.OP): 171 | return None 172 | name = form.string 173 | ops = _optable.optable[name] 174 | fixities = [op.name for op in ops if isinstance(op.fix, _optable.Prefix)] 175 | if fixities: 176 | (name,) = fixities 177 | return name 178 | return None 179 | 180 | return scan(f) 181 | 182 | 183 | # let prefix o = anyprefix (fun p -> o = p) 184 | def prefix(op): 185 | return anyprefix() << pco.question >> (lambda p: op == p) 186 | 187 | 188 | # let anypostfix = scan begin 189 | # function 190 | # | OP p -> 191 | # let rec loop = function 192 | # | [] -> None 193 | # | ({ Op.fix = Op.Postfix } as top) :: _ -> Some (top.name) 194 | # | _ :: tops -> loop tops 195 | # in loop (Hashtbl.find_all Op.optable p) 196 | # | _ -> None 197 | # end 198 | def anypostfix(): 199 | def f(form): 200 | if not isinstance(form, tokens.OP): 201 | return None 202 | name = form.string 203 | ops = _optable.optable[name] 204 | fixities = [op.name for op in ops if isinstance(op.fix, _optable.Postfix)] 205 | if fixities: 206 | (name,) = fixities 207 | return name 208 | return None 209 | 210 | return scan(f) 211 | 212 | 213 | # let anyop = scan begin 214 | # function 215 | # | OP o -> 216 | # let op = Hashtbl.find Optable.optable o in 217 | # Some op.Optable.name 218 | # | _ -> None 219 | # end 220 | def anyop(): 221 | def f(form): 222 | if isinstance(form, tokens.OP): 223 | name = form.string 224 | *_, op = _optable.optable[name] 225 | return op.name 226 | else: 227 | return None 228 | 229 | return scan(f) 230 | 231 | 232 | # let anyident = scan begin 233 | # function 234 | # | ID i -> Some i 235 | # | _ -> None 236 | # end 237 | def anyident(): 238 | def f(form): 239 | if isinstance(form, tokens.ID): 240 | return form.string 241 | else: 242 | return None 243 | 244 | return scan(f) 245 | 246 | 247 | # let ident i = anyident (fun j -> i = j) 248 | def ident(i): 249 | return anyident() << pco.question >> (lambda j: i == j) 250 | 251 | 252 | # let anyname = scan begin 253 | # function 254 | # | ID nm | KWD nm -> Some nm 255 | # | _ -> None 256 | # end 257 | def anyname(): 258 | def f(form): 259 | if isinstance(form, tokens.ID) or isinstance(form, tokens.KWD): 260 | return form.string 261 | else: 262 | return None 263 | 264 | return scan(f) 265 | 266 | 267 | # let number = scan begin 268 | # function 269 | # | NUM (m, n) -> Some (m, n) 270 | # | _ -> None 271 | # end 272 | def number(): 273 | def f(form): 274 | if isinstance(form, tokens.NUM): 275 | return (form.string1, form.string2) 276 | else: 277 | return None 278 | 279 | return scan(f) 280 | 281 | 282 | # let nat = scan begin 283 | # function 284 | # | NUM (m, "") -> Some (int_of_string m) 285 | # | _ -> None 286 | # end 287 | def nat(): 288 | def f(form): 289 | if isinstance(form, tokens.NUM) and form.string2 is None: 290 | return int(form.string1) 291 | else: 292 | return None 293 | 294 | return scan(f) 295 | 296 | 297 | # let str = scan begin 298 | # function 299 | # | STR (s) -> Some (s) 300 | # | _ -> None 301 | # end 302 | def str_(): 303 | def f(form): 304 | if isinstance(form, tokens.STR): 305 | return form.string 306 | else: 307 | return None 308 | 309 | return scan(f) 310 | 311 | 312 | # let pragma p = punct "(*{" >>> p <<< punct "}*)" 313 | def pragma(p): 314 | return punct("(*{") << pco.second >> p << pco.first >> punct("}*)") 315 | -------------------------------------------------------------------------------- /modelator_py/util/tla/examples/Counter.tla: -------------------------------------------------------------------------------- 1 | ---- MODULE Counter ---- 2 | (* A state machine that indefinitely increments a variable. *) 3 | VARIABLE x 4 | 5 | 6 | Init == x = 0 7 | Next == x' = x + 1 8 | Spec == Init /\ [][Next]_x /\ WF_x(Next) 9 | ======================== 10 | -------------------------------------------------------------------------------- /modelator_py/util/tla/examples/README.md: -------------------------------------------------------------------------------- 1 | This directory contains the following example scripts: 2 | 3 | - `parsing_tla_modules.py`: how to parse a TLA+ module 4 | - `parsing_tla_expressions.py`: how to parse a TLA+ expression 5 | - `syntax_tree_visitor.py`: how to implement the visitor pattern using the 6 | TLA+ abstract syntax tree 7 | -------------------------------------------------------------------------------- /modelator_py/util/tla/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/informalsystems/modelator-py/98c6f3356b39e653ade2624a07540c174e97af9f/modelator_py/util/tla/examples/__init__.py -------------------------------------------------------------------------------- /modelator_py/util/tla/examples/parsing_tla_expressions.py: -------------------------------------------------------------------------------- 1 | """How to parse a TLA+ expression.""" 2 | from modelator_py.util.tla import parser 3 | from modelator_py.util.tla.to_str import Nodes 4 | 5 | expr = r""" 6 | \/ /\ x = 1 7 | /\ x' = 2 8 | 9 | \/ /\ x = 2 10 | /\ x' = 1 11 | """ 12 | 13 | 14 | def parse_expr(): 15 | """Parse a TLA+ expression.""" 16 | tree = parser.parse_expr(expr) 17 | print(tree) 18 | 19 | 20 | def parse_expr_and_pretty_print(): 21 | """Parse and print a TLA+ expression.""" 22 | tree = parser.parse_expr(expr, nodes=Nodes) 23 | s = tree.to_str(width=80) 24 | print(s) 25 | 26 | 27 | if __name__ == "__main__": 28 | parse_expr() 29 | parse_expr_and_pretty_print() 30 | -------------------------------------------------------------------------------- /modelator_py/util/tla/examples/parsing_tla_modules.py: -------------------------------------------------------------------------------- 1 | """How to parse a TLA+ module.""" 2 | from modelator_py.util.tla import parser 3 | from modelator_py.util.tla.to_str import Nodes 4 | 5 | TLA_FILE_PATH = "Counter.tla" 6 | 7 | 8 | def parse_module(): 9 | """Parse a TLA+ module.""" 10 | tla_spec = _load_tla_module() 11 | tree = parser.parse(tla_spec) 12 | print(tree) 13 | 14 | 15 | def parse_module_and_pretty_print(): 16 | """Parse and print a TLA+ module.""" 17 | tla_spec = _load_tla_module() 18 | tree = parser.parse(tla_spec, nodes=Nodes) 19 | s = tree.to_str(width=80) 20 | print(s) 21 | 22 | 23 | def _load_tla_module(): 24 | """Return contents of TLA+ file.""" 25 | tla_file_path = TLA_FILE_PATH 26 | with open(tla_file_path, "r") as f: 27 | tla_spec = f.read() 28 | return tla_spec 29 | 30 | 31 | if __name__ == "__main__": 32 | parse_module() 33 | parse_module_and_pretty_print() 34 | -------------------------------------------------------------------------------- /modelator_py/util/tla/examples/syntax_tree_visitor.py: -------------------------------------------------------------------------------- 1 | """How to use the visitor pattern when traversing the syntax tree.""" 2 | from modelator_py.util.tla import parser, to_str, visit 3 | 4 | expr = r"x = 1 /\ y = 2" 5 | 6 | 7 | class CollectIdentifiers(visit.NodeTransformer): 8 | """A visitor that collects identifiers.""" 9 | 10 | def visit_Opaque(self, node, *arg, **kw): 11 | name = node.name 12 | kw["identifiers"].add(name) 13 | return self.nodes.Opaque(name) 14 | 15 | 16 | def visit_tla_expr(): 17 | """Traverse the syntax tree to collect identifiers.""" 18 | tree = parser.parse_expr(expr, nodes=to_str.Nodes) 19 | identifiers = set() 20 | visitor = CollectIdentifiers() 21 | visitor.visit(tree, identifiers=identifiers) 22 | print(identifiers) 23 | 24 | 25 | if __name__ == "__main__": 26 | visit_tla_expr() 27 | -------------------------------------------------------------------------------- /modelator_py/util/tla/parser.py: -------------------------------------------------------------------------------- 1 | """TLA+ parser using combinators.""" 2 | # Copyright 2020 by California Institute of Technology 3 | # All rights reserved. Licensed under 3-clause BSD. 4 | # 5 | from . import _combinators as pco 6 | from . import _expr_parser as ep 7 | from . import _module_parser as mp 8 | from . import _optable 9 | from . import _proof_parser as pfp 10 | from . import _tla_combinators, lex 11 | 12 | 13 | def parse(module_text, nodes=None): 14 | """Return abstract syntax tree for `str`ing `module_text`. 15 | 16 | `module_text` is a module specification. 17 | Use the syntax tree classes from `nodes`. 18 | For example: 19 | 20 | ```python 21 | from . import parser 22 | from .to_str import Nodes 23 | 24 | module_text = r''' 25 | ---- MODULE Foo ---- 26 | A == 1 27 | ==================== 28 | ''' 29 | 30 | tree = parser.parse(module_text, nodes=Nodes) 31 | ``` 32 | """ 33 | memo = _save(nodes) 34 | parser = mp.parse() 35 | init = _tla_combinators.init 36 | tokens = lex.tokenize(module_text, omit_preamble=True) 37 | tree, pst = pco.run(parser, init=init, source=tokens) 38 | _restore(memo) 39 | return tree 40 | 41 | 42 | def parse_expr(expr, nodes=None): 43 | r"""Return abstract syntax tree for `str`ing `expr`. 44 | 45 | `expr` is an expression string. 46 | Use the syntax tree classes from `nodes`. 47 | For example: 48 | 49 | ```python 50 | from . import parser 51 | from .to_str import Nodes 52 | 53 | expr = r'x = 1 /\ y = 2' 54 | 55 | tree = parser.parse_expr(expr, nodes=Nodes) 56 | ``` 57 | """ 58 | memo = _save(nodes) 59 | parser = ep.expr(False) 60 | init = _tla_combinators.init 61 | tokens = lex.tokenize(expr, omit_preamble=False) 62 | tree, pst = pco.run(parser, init=init, source=tokens) 63 | _restore(memo) 64 | return tree 65 | 66 | 67 | def _save(nodes): 68 | """Store the `tla_ast` attribute of parser modules. 69 | 70 | Set the AST nodes that are used by parser modules 71 | to the classes of `nodes`. 72 | """ 73 | if nodes is None: 74 | return None 75 | assert nodes is not None 76 | memo = (ep.tla_ast, mp.nodes, pfp.tla_ast, _optable.nodes) 77 | ep.tla_ast = nodes 78 | mp.nodes = nodes 79 | pfp.tla_ast = nodes 80 | _optable.nodes = nodes 81 | _optable.optable = _optable._generate_optable() 82 | ep.fixities = ep._generate_fixities() 83 | return memo 84 | 85 | 86 | def _restore(memo): 87 | """Set the `tla_ast` attribute of parser modules.""" 88 | if memo is None: 89 | return 90 | assert memo is not None 91 | (ep.tla_ast, mp.nodes, pfp.tla_ast, _optable.nodes) = memo 92 | _optable.optable = _optable._generate_optable() 93 | ep.fixities = ep._generate_fixities() 94 | -------------------------------------------------------------------------------- /modelator_py/util/tlc/__init__.py: -------------------------------------------------------------------------------- 1 | from .itf import TlcITFCmd, tlc_itf 2 | 3 | __all__ = ["TlcITFCmd", "tlc_itf"] 4 | -------------------------------------------------------------------------------- /modelator_py/util/tlc/cli.py: -------------------------------------------------------------------------------- 1 | import json as stdjson 2 | 3 | from .itf import TlcITFCmd, tlc_itf 4 | 5 | 6 | class Tlc: 7 | def __init__(self, stdin): 8 | self._stdin = stdin 9 | 10 | def itf( 11 | self, 12 | *, 13 | lists=True, 14 | records=True, 15 | json=False, # Read parameters from Json? 16 | ): 17 | """ 18 | Extract a list of Informal Trace Format traces from the stdout of TLC. 19 | 20 | Runs a parser over the contents of the stdout of a TLC execution to extract TLA+ traces 21 | and then converts them to Informal Trace Format. 22 | 23 | Arguments: 24 | lists : Convert 1-indexed functions (TLA+ sequences) to ITF lists? 25 | records : Convert string-indexed functions (TLA+ records) to ITF records? 26 | json : Read arguments from json instead of cli? 27 | """ 28 | result = None 29 | if json: 30 | json_dict = stdjson.loads(self._stdin.read()) 31 | result = tlc_itf(json=json_dict) 32 | else: 33 | 34 | assert ( 35 | self._stdin is not None 36 | ), "TLC's stdout string should be passed on stdin if not passing json" 37 | 38 | cmd = TlcITFCmd() 39 | cmd.stdout = self._stdin.read() 40 | cmd.lists = lists 41 | cmd.records = records 42 | 43 | assert ( 44 | cmd.stdout is not None 45 | ), "TLC's stdout string should be passed on stdin if not passing json" 46 | 47 | result = tlc_itf(cmd=cmd) 48 | 49 | obj_to_print = {} 50 | obj_to_print["traces"] = result 51 | 52 | to_print = stdjson.dumps(obj_to_print, indent=4, sort_keys=True) 53 | print(to_print) 54 | -------------------------------------------------------------------------------- /modelator_py/util/tlc/itf.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Optional 3 | 4 | from modelator_py.helper import parallel_map 5 | 6 | from ..informal_trace_format import JsonSerializer, with_lists, with_records 7 | from .stdout_to_informal_trace_format import ( 8 | extract_traces, 9 | tlc_trace_to_informal_trace_format_trace, 10 | ) 11 | 12 | # mypy: ignore-errors 13 | 14 | 15 | @dataclass 16 | class TlcITFCmd: 17 | stdout: Optional[str] = None # Captured stdout from TLC execution 18 | lists: Optional[str] = None # Transform 1-indexed TLA+ functions into lists 19 | records: Optional[str] = None # Transform string indexed functions into records 20 | 21 | 22 | def json_to_cmd(json) -> TlcITFCmd: 23 | json = {"stdout": None, "lists": True, "records": True} | json 24 | cmd = TlcITFCmd() 25 | cmd.stdout = json["stdout"] 26 | cmd.lists = json["lists"] 27 | cmd.records = json["records"] 28 | return cmd 29 | 30 | 31 | def tlc_itf(*, cmd=None, json=None): # types: ignore 32 | """ 33 | Extract a list of execution traces in the Informal Trace Format from the 34 | stdout of a TLC execution. 35 | 36 | Returns a list of ITFTrace objects. 37 | 38 | Benefits from multiple cpu cores as parallelizes TLA+ raw text to AST parsing. 39 | """ 40 | 41 | if json is not None: 42 | cmd = json_to_cmd(json) 43 | 44 | assert cmd.stdout is not None, "tlc_itf requires TLC's stdout as input data" 45 | 46 | tlc_traces = extract_traces(cmd.stdout) 47 | 48 | itf_traces = parallel_map(tlc_trace_to_informal_trace_format_trace, tlc_traces) 49 | 50 | if cmd.lists: 51 | itf_traces = parallel_map(with_lists, itf_traces) 52 | if cmd.records: 53 | itf_traces = parallel_map(with_records, itf_traces) 54 | 55 | itf_traces_objects = parallel_map(lambda t: JsonSerializer().visit(t), itf_traces) 56 | 57 | return itf_traces_objects 58 | -------------------------------------------------------------------------------- /modelator_py/util/tlc/state_to_informal_trace_format.py: -------------------------------------------------------------------------------- 1 | from modelator_py.util.informal_trace_format import ITFMap, ITFSet, ITFState 2 | from modelator_py.util.tla import parser, visit 3 | from modelator_py.util.tla.to_str import Nodes 4 | 5 | 6 | def merge_itf_maps(f, g): 7 | """ 8 | 9 | Computes the result of the @@ operator. 10 | 11 | f @@ g == [ 12 | x \\in (DOMAIN f) \\cup (DOMAIN g) |-> 13 | IF x \\in DOMAIN f THEN f[x] ELSE g[x] 14 | ] 15 | 16 | The output of TLC should never contain functions 17 | with overlapping domains so we can skip the overlap 18 | check that is present in a model checker. 19 | """ 20 | assert isinstance(f, ITFMap) 21 | assert isinstance(g, ITFMap) 22 | elements = f.elements 23 | elements.extend(g.elements) 24 | return ITFMap(elements) 25 | 26 | 27 | class Visitor(visit.NodeTransformer): 28 | """ 29 | Translates a state expression from the stdout of TLC to 30 | a list of [, ] pairs. 31 | 32 | TLC states are given in a conjunction list. This visitor ONLY 33 | work on such input. 34 | """ 35 | 36 | def visit(self, node, *arg, **kw): 37 | """Call the implementation method for `node`. 38 | 39 | For each `node` of class named `ClsName`, 40 | there is a method named `visit_ClsName`. 41 | 42 | Override the `visit_*` methods to change 43 | the visitor's behavior, by subclassing it. 44 | """ 45 | method_name = f"visit_{type(node).__name__}" 46 | method = getattr(self, method_name) 47 | return method(node, *arg, **kw) 48 | 49 | def visit_Opaque(self, node, *arg, **kw): 50 | # .name 51 | return node.name 52 | 53 | def visit_List(self, node, *arg, **kw): 54 | """ 55 | For parsing TLC state the only List is at the top 56 | level (variable conjunction), therefore we do not 57 | need to return the op 58 | """ 59 | # .op 60 | # .exprs 61 | self.visit(node.op, *arg, **kw) 62 | variable_pairs = [] 63 | for expr in node.exprs: 64 | e = self.visit(expr, *arg, **kw) 65 | variable_pairs.append(e) 66 | return variable_pairs 67 | 68 | def visit_SetEnum(self, node, *arg, **kw): 69 | # .exprs 70 | elements = [] 71 | for expr in node.exprs: 72 | e = self.visit(expr, *arg, **kw) 73 | elements.append(e) 74 | return ITFSet(elements) 75 | 76 | def visit_Record(self, node, *arg, **kw): 77 | # .items 78 | pairs = [] 79 | for name, expr in node.items: 80 | e = self.visit(expr, *arg, **kw) 81 | pair = [name, e] 82 | pairs.append(pair) 83 | return ITFMap(pairs) 84 | 85 | def visit_String(self, node, *arg, **kw): 86 | # .value 87 | 88 | """.value is a string with quote characters""" 89 | return node.value[1:-1] 90 | 91 | def visit_Eq(self, node, *arg, **kw): 92 | pass 93 | 94 | def visit_Parens(self, node, *arg, **kw): 95 | # .expr 96 | # .pform 97 | expr = self.visit(node.expr, *arg, **kw) 98 | self.visit(node.pform, *arg, **kw) 99 | return expr 100 | 101 | def visit_Syntax(self, node, *arg, **kw): 102 | pass 103 | 104 | def visit_Tuple(self, node, *arg, **kw): 105 | # .exprs 106 | i = 1 107 | pairs = [] 108 | for expr in node.exprs: 109 | e = self.visit(expr, *arg, **kw) 110 | pair = [i, e] 111 | pairs.append(pair) 112 | i += 1 113 | return ITFMap(pairs) 114 | 115 | def visit_FALSE(self, node, *arg, **kw): 116 | return False 117 | 118 | def visit_TRUE(self, node, *arg, **kw): 119 | return True 120 | 121 | def visit_Apply(self, node, *arg, **kw): 122 | # .op 123 | # .operands 124 | 125 | assert type(node.op) in {Nodes.Eq, Nodes.Opaque} 126 | 127 | self.visit(node.op, *arg, **kw) 128 | 129 | if type(node.op) == Nodes.Eq: 130 | assert len(node.operands) == 2 131 | variable_name = node.operands[0].name 132 | variable_value = self.visit(node.operands[1], *arg, **kw) 133 | return [variable_name, variable_value] 134 | if type(node.op) == Nodes.Opaque: 135 | assert node.op.name in {":>", "@@", "-."} 136 | if node.op.name == ":>": 137 | assert len(node.operands) == 2 138 | key = self.visit(node.operands[0], *arg, **kw) 139 | value = self.visit(node.operands[1], *arg, **kw) 140 | return ITFMap([[key, value]]) 141 | if node.op.name == "@@": 142 | assert len(node.operands) == 2 143 | f = self.visit(node.operands[0], *arg, **kw) 144 | g = self.visit(node.operands[1], *arg, **kw) 145 | assert type(f) == ITFMap 146 | assert type(g) == ITFMap 147 | return merge_itf_maps(f, g) 148 | if node.op.name == "-.": 149 | assert len(node.operands) == 1 150 | return -self.visit(node.operands[0], *arg, **kw) 151 | 152 | def visit_Number(self, node, *arg, **kw): 153 | """WARNING: does not support floating point""" 154 | # .integer 155 | # .mantissa 156 | return int(node.integer) 157 | 158 | def visit_And(self, node, *arg, **kw): 159 | pass 160 | 161 | 162 | def state_to_informal_trace_format_state(state_expr_str: str): 163 | """ 164 | Converts a state expression string as found in the stdout of TLC 165 | into an in memory AST representation. 166 | 167 | Note: this is a slow operation. 168 | """ 169 | tree = parser.parse_expr(state_expr_str, nodes=Nodes) 170 | visitor = Visitor() 171 | var_value_pairs = visitor.visit(tree) 172 | var_value_map = {key: value for key, value in var_value_pairs} 173 | return ITFState(var_value_map) 174 | -------------------------------------------------------------------------------- /modelator_py/util/tlc/stdout_to_informal_trace_format.py: -------------------------------------------------------------------------------- 1 | import typing 2 | 3 | from modelator_py.util.informal_trace_format import ITFTrace 4 | 5 | from .state_to_informal_trace_format import state_to_informal_trace_format_state 6 | 7 | 8 | def trace_lines_model_checking_mode(stdout) -> typing.List[typing.List[str]]: 9 | """ 10 | Returns list of lists. Each sublist is a list of lines 11 | that make a trace. 12 | 13 | Args: 14 | stdout : stdout of TLC execution run in model checking mode 15 | """ 16 | ret = [] 17 | lines = stdout.split("\n") 18 | header_open = False 19 | 20 | def is_header(line): 21 | """One line before the beginning of the trace.""" 22 | single_state_trace_header = "is violated by the initial state" in line 23 | mult_state_trace_header = line == "Error: The behavior up to this point is:" 24 | return single_state_trace_header or mult_state_trace_header 25 | 26 | def is_start_of_new_trace(line): 27 | """When there are multiple traces, closes the previous trace""" 28 | 29 | # when there are multiple violations, a new trace report starts with: 30 | continue_case = line.startswith("Error: Invariant") 31 | 32 | # when the first violation was in the init state, the second one starts with: 33 | init_state_continue_case = line.startswith("Finished computing initial states") 34 | return continue_case or init_state_continue_case 35 | 36 | def is_footer(line): 37 | multi_state_footer = ( 38 | ("states generated" in line) 39 | and ("distinct states found" in line) 40 | and ("states left on queue" in line) 41 | and (not line.startswith("Progress")) 42 | ) or ("Model checking completed" in line) 43 | 44 | single_state_footer = "Finished in" in line 45 | 46 | return single_state_footer or multi_state_footer 47 | 48 | header_cnt = 0 49 | header_ix = -1 50 | for i, line in enumerate(lines): 51 | if is_start_of_new_trace(line): 52 | header_open = True 53 | if 0 < header_cnt: 54 | trace = lines[header_ix + 1 : i] 55 | ret.append(trace) 56 | 57 | if is_header(line): 58 | header_cnt += 1 59 | header_ix = i 60 | 61 | # we need boolean header_open because the footer the conditions for the footer 62 | # of a single state trace will be met also in the line after the footer of a multi-state trace 63 | if header_open and is_footer(line): 64 | header_open = False 65 | if 0 < header_cnt: 66 | trace = lines[header_ix + 1 : i] 67 | ret.append(trace) 68 | break 69 | 70 | return ret 71 | 72 | 73 | def trace_lines_simulation_mode(stdout) -> typing.List[typing.List[str]]: 74 | """ 75 | Returns list of lists. Each sublist is a list of lines 76 | that make a trace. 77 | 78 | Args: 79 | stdout : stdout of TLC execution run in simulation mode 80 | """ 81 | ret = [] 82 | lines = stdout.split("\n") 83 | 84 | def is_header(line): 85 | """Begins a trace and may also end a previous trace""" 86 | HEADER = "State 1:" 87 | return line.startswith(HEADER) 88 | 89 | def is_footer(line): 90 | """Ends the list of traces""" 91 | return line.startswith("Finished in") 92 | 93 | header_cnt = 0 94 | header_ix = -1 95 | for i, line in enumerate(lines): 96 | if is_header(line): 97 | if 0 < header_cnt: 98 | trace = lines[header_ix : i - 4] 99 | ret.append(trace) 100 | header_cnt += 1 101 | header_ix = i 102 | if is_footer(line) and 0 < header_cnt: 103 | ret.append(lines[header_ix : i - 4]) 104 | 105 | return ret 106 | 107 | 108 | def split_into_states(lines: typing.List[str]) -> typing.List[typing.List[str]]: 109 | """ 110 | Converts a TLA+/ASCII trace string expression into a list of TLA+ state 111 | string expressions. Requires removing non-TLA+ ascii from the trace string 112 | expression. 113 | 114 | A trace from TLC is a sequence of [header, content] pairs. 115 | The headers are not valid TLA+. 116 | This function returns a list where each item is valid TLA+ content. 117 | """ 118 | ret = [] 119 | HEADER = "State " 120 | header_cnt = 0 121 | header_ix = -1 122 | 123 | # this is for the case when the invariant is violated in the initial state 124 | # then, the counterexample is not prefixed with "State " 125 | if len(lines) > 0 and not lines[0].startswith(HEADER): 126 | lines = [HEADER] + lines 127 | for i, line in enumerate(lines): 128 | if line.startswith(HEADER): 129 | if 0 < header_cnt: 130 | ret.append(lines[header_ix + 1 : i]) 131 | header_ix = i 132 | header_cnt += 1 133 | if 0 < header_cnt: 134 | ret.append(lines[header_ix + 1 :]) 135 | 136 | return ret 137 | 138 | 139 | def extract_traces(stdout: str): 140 | """ 141 | Extract zero, one or more traces from the stdout of TLC. 142 | 143 | A trace returned by this function is a list of lists of substrings of stdout. 144 | Each sublist of substrings is a trace and each substring is a state. 145 | 146 | WARNING: Does not support lasso traces 147 | """ 148 | traces = None 149 | if "Running Random Simulation" in stdout: 150 | traces = trace_lines_simulation_mode(stdout) 151 | else: 152 | traces = trace_lines_model_checking_mode(stdout) 153 | traces = [split_into_states(t) for t in traces] # type:ignore 154 | traces = [["\n".join(lines) for lines in t] for t in traces] 155 | return traces 156 | 157 | 158 | def tlc_trace_to_informal_trace_format_trace(trace: typing.List[str]): 159 | """ 160 | Convert a tla trace from TLC stdout to the Informal Trace Format 161 | https://apalache.informal.systems/docs/adr/015adr-trace.html?highlight=trace%20format#adr-015-informal-trace-format-in-json 162 | 163 | Trace input is a list of states. Each state is a string. 164 | """ 165 | 166 | states = [state_to_informal_trace_format_state(state) for state in trace] 167 | vars = [] 168 | if 0 < len(states): 169 | vars = list(states[0].var_value_map.keys()) 170 | 171 | return ITFTrace(vars, states) 172 | -------------------------------------------------------------------------------- /pylama.ini: -------------------------------------------------------------------------------- 1 | [pylama] 2 | ignore = E501,C901,E203 3 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "modelator_py" 3 | version = "0.2.6" 4 | description = "Lightweight utilities to assist model writing and model-based testing activities using the TLA+ ecosystem" 5 | authors = ["Daniel Tisdall ", "Ivan Gavran "] 6 | readme = "README.md" 7 | homepage = "https://mbt.informal.systems/" 8 | repository = "https://github.com/informalsystems/modelator-py/" 9 | documentation = "https://github.com/informalsystems/modelator-py/" 10 | keywords = ["utility", "tla", "tlaplus", "tlc", "apalache"] 11 | 12 | [tool.poetry.dependencies] 13 | python = "^3.8" 14 | fire = "^0.4" 15 | infix = "^1.2" 16 | ply = "^3.11" 17 | pathos = "^0.3" 18 | 19 | [tool.poetry.dev-dependencies] 20 | pytest = "^7.2.0" 21 | black = {version = "^22.3.0", allow-prereleases = true} 22 | pytest-cov = "^3.0.0" 23 | pre-commit = "^2.15.0" 24 | flake8 = "^4.0.1" 25 | mypy = "^0.931" 26 | isort = "^5.9.3" 27 | 28 | [tool.poetry.scripts] 29 | modelator = "modelator_py.cli:cli" 30 | 31 | [build-system] 32 | requires = ["poetry-core>=1.0.0"] 33 | build-backend = "poetry.core.masonry.api" 34 | 35 | [tool.black] 36 | line-length = 88 37 | target-version = ['py38'] 38 | include = '\.pyi?$' 39 | exclude = ''' 40 | ( 41 | /( 42 | \.eggs # exclude a few common directories in the 43 | | \.git # root of the project 44 | | \.hg 45 | | \.mypy_cache 46 | | \.tox 47 | | \.venv 48 | | _build 49 | | buck-out 50 | | build 51 | | dist 52 | )/ 53 | | foo.py # also separately exclude a file named foo.py in 54 | # the root of the project 55 | ) 56 | ''' 57 | 58 | [tool.isort] 59 | multi_line_output = 3 60 | include_trailing_comma = true 61 | force_grid_wrap = 0 62 | use_parentheses = true 63 | line_length = 88 64 | profile = "black" 65 | -------------------------------------------------------------------------------- /samples/.gitignore: -------------------------------------------------------------------------------- 1 | ### Linux ### 2 | *~ 3 | 4 | # temporary files which can be created if a process still has a handle open of a deleted file 5 | .fuse_hidden* 6 | 7 | # KDE directory preferences 8 | .directory 9 | 10 | # Linux trash folder which might appear on any partition or disk 11 | .Trash-* 12 | 13 | # .nfs files are created when an open file is removed but is still being accessed 14 | .nfs* 15 | 16 | ### macOS ### 17 | # General 18 | .DS_Store 19 | .AppleDouble 20 | .LSOverride 21 | 22 | # Icon must end with two \r 23 | Icon 24 | 25 | 26 | # Thumbnails 27 | ._* 28 | 29 | # Files that might appear in the root of a volume 30 | .DocumentRevisions-V100 31 | .fseventsd 32 | .Spotlight-V100 33 | .TemporaryItems 34 | .Trashes 35 | .VolumeIcon.icns 36 | .com.apple.timemachine.donotpresent 37 | 38 | # Directories potentially created on remote AFP share 39 | .AppleDB 40 | .AppleDesktop 41 | Network Trash Folder 42 | Temporary Items 43 | .apdisk 44 | 45 | ### PyCharm ### 46 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider 47 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 48 | 49 | # User-specific stuff 50 | .idea/**/workspace.xml 51 | .idea/**/tasks.xml 52 | .idea/**/usage.statistics.xml 53 | .idea/**/dictionaries 54 | .idea/**/shelf 55 | 56 | # AWS User-specific 57 | .idea/**/aws.xml 58 | 59 | # Generated files 60 | .idea/**/contentModel.xml 61 | 62 | # Sensitive or high-churn files 63 | .idea/**/dataSources/ 64 | .idea/**/dataSources.ids 65 | .idea/**/dataSources.local.xml 66 | .idea/**/sqlDataSources.xml 67 | .idea/**/dynamic.xml 68 | .idea/**/uiDesigner.xml 69 | .idea/**/dbnavigator.xml 70 | 71 | # Gradle 72 | .idea/**/gradle.xml 73 | .idea/**/libraries 74 | 75 | # Gradle and Maven with auto-import 76 | # When using Gradle or Maven with auto-import, you should exclude module files, 77 | # since they will be recreated, and may cause churn. Uncomment if using 78 | # auto-import. 79 | # .idea/artifacts 80 | # .idea/compiler.xml 81 | # .idea/jarRepositories.xml 82 | # .idea/modules.xml 83 | # .idea/*.iml 84 | # .idea/modules 85 | # *.iml 86 | # *.ipr 87 | 88 | # CMake 89 | cmake-build-*/ 90 | 91 | # Mongo Explorer plugin 92 | .idea/**/mongoSettings.xml 93 | 94 | # File-based project format 95 | *.iws 96 | 97 | # IntelliJ 98 | out/ 99 | 100 | # mpeltonen/sbt-idea plugin 101 | .idea_modules/ 102 | 103 | # JIRA plugin 104 | atlassian-ide-plugin.xml 105 | 106 | # Cursive Clojure plugin 107 | .idea/replstate.xml 108 | 109 | # SonarLint plugin 110 | .idea/sonarlint/ 111 | 112 | # Crashlytics plugin (for Android Studio and IntelliJ) 113 | com_crashlytics_export_strings.xml 114 | crashlytics.properties 115 | crashlytics-build.properties 116 | fabric.properties 117 | 118 | # Editor-based Rest Client 119 | .idea/httpRequests 120 | 121 | # Android studio 3.1+ serialized cache file 122 | .idea/caches/build_file_checksums.ser 123 | 124 | ### PyCharm Patch ### 125 | # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 126 | 127 | # *.iml 128 | # modules.xml 129 | # .idea/misc.xml 130 | # *.ipr 131 | 132 | # Sonarlint plugin 133 | # https://plugins.jetbrains.com/plugin/7973-sonarlint 134 | .idea/**/sonarlint/ 135 | 136 | # SonarQube Plugin 137 | # https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin 138 | .idea/**/sonarIssues.xml 139 | 140 | # Markdown Navigator plugin 141 | # https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced 142 | .idea/**/markdown-navigator.xml 143 | .idea/**/markdown-navigator-enh.xml 144 | .idea/**/markdown-navigator/ 145 | 146 | # Cache file creation bug 147 | # See https://youtrack.jetbrains.com/issue/JBR-2257 148 | .idea/$CACHE_FILE$ 149 | 150 | # CodeStream plugin 151 | # https://plugins.jetbrains.com/plugin/12206-codestream 152 | .idea/codestream.xml 153 | 154 | ### Python ### 155 | # Byte-compiled / optimized / DLL files 156 | __pycache__/ 157 | *.py[cod] 158 | *$py.class 159 | 160 | # C extensions 161 | *.so 162 | 163 | # Distribution / packaging 164 | .Python 165 | build/ 166 | develop-eggs/ 167 | dist/ 168 | downloads/ 169 | eggs/ 170 | .eggs/ 171 | lib/ 172 | lib64/ 173 | parts/ 174 | sdist/ 175 | var/ 176 | wheels/ 177 | share/python-wheels/ 178 | *.egg-info/ 179 | .installed.cfg 180 | *.egg 181 | MANIFEST 182 | 183 | # PyInstaller 184 | # Usually these files are written by a python script from a template 185 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 186 | *.manifest 187 | *.spec 188 | 189 | # Installer logs 190 | pip-log.txt 191 | pip-delete-this-directory.txt 192 | 193 | # Unit test / coverage reports 194 | htmlcov/ 195 | .tox/ 196 | .nox/ 197 | .coverage 198 | .coverage.* 199 | .cache 200 | nosetests.xml 201 | coverage.xml 202 | *.cover 203 | *.py,cover 204 | .hypothesis/ 205 | .pytest_cache/ 206 | cover/ 207 | 208 | # Translations 209 | *.mo 210 | *.pot 211 | 212 | # Django stuff: 213 | *.log 214 | local_settings.py 215 | db.sqlite3 216 | db.sqlite3-journal 217 | 218 | # Flask stuff: 219 | instance/ 220 | .webassets-cache 221 | 222 | # Scrapy stuff: 223 | .scrapy 224 | 225 | # Sphinx documentation 226 | docs/_build/ 227 | 228 | # PyBuilder 229 | .pybuilder/ 230 | target/ 231 | 232 | # Jupyter Notebook 233 | .ipynb_checkpoints 234 | 235 | # IPython 236 | profile_default/ 237 | ipython_config.py 238 | 239 | # pyenv 240 | # For a library or package, you might want to ignore these files since the code is 241 | # intended to run in multiple environments; otherwise, check them in: 242 | # .python-version 243 | 244 | # pipenv 245 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 246 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 247 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 248 | # install all needed dependencies. 249 | #Pipfile.lock 250 | 251 | # poetry 252 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 253 | # This is especially recommended for binary packages to ensure reproducibility, and is more 254 | # commonly ignored for libraries. 255 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 256 | #poetry.lock 257 | 258 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 259 | __pypackages__/ 260 | 261 | # Celery stuff 262 | celerybeat-schedule 263 | celerybeat.pid 264 | 265 | # SageMath parsed files 266 | *.sage.py 267 | 268 | # Environments 269 | .env 270 | .venv 271 | env/ 272 | venv/ 273 | ENV/ 274 | env.bak/ 275 | venv.bak/ 276 | 277 | # Spyder project settings 278 | .spyderproject 279 | .spyproject 280 | 281 | # Rope project settings 282 | .ropeproject 283 | 284 | # mkdocs documentation 285 | /site 286 | 287 | # mypy 288 | .mypy_cache/ 289 | .dmypy.json 290 | dmypy.json 291 | 292 | # Pyre type checker 293 | .pyre/ 294 | 295 | # pytype static type analyzer 296 | .pytype/ 297 | 298 | # Cython debug symbols 299 | cython_debug/ 300 | 301 | # PyCharm 302 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 303 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 304 | # and can be added to the global gitignore or merged into this file. For a more nuclear 305 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 306 | #.idea/ 307 | 308 | ### VisualStudioCode ### 309 | .vscode/* 310 | !.vscode/settings.json 311 | !.vscode/tasks.json 312 | !.vscode/launch.json 313 | !.vscode/extensions.json 314 | !.vscode/*.code-snippets 315 | 316 | # Local History for Visual Studio Code 317 | .history/ 318 | 319 | # Built Visual Studio Code Extensions 320 | *.vsix 321 | 322 | ### VisualStudioCode Patch ### 323 | # Ignore all local history of files 324 | .history 325 | .ionide 326 | 327 | # Support for Project snippet scope 328 | 329 | ### Windows ### 330 | # Windows thumbnail cache files 331 | Thumbs.db 332 | Thumbs.db:encryptable 333 | ehthumbs.db 334 | ehthumbs_vista.db 335 | 336 | # Dump file 337 | *.stackdump 338 | 339 | # Folder config file 340 | [Dd]esktop.ini 341 | 342 | # Recycle Bin used on file shares 343 | $RECYCLE.BIN/ 344 | 345 | # Windows Installer files 346 | *.cab 347 | *.msi 348 | *.msix 349 | *.msm 350 | *.msp 351 | 352 | # Windows shortcuts 353 | *.lnk 354 | -------------------------------------------------------------------------------- /samples/Hello.cfg: -------------------------------------------------------------------------------- 1 | INIT Init 2 | NEXT Next 3 | INVARIANT Inv 4 | -------------------------------------------------------------------------------- /samples/Hello.tla: -------------------------------------------------------------------------------- 1 | ------------ MODULE Hello ------------- 2 | 3 | EXTENDS Naturals, FiniteSets, Sequences 4 | 5 | VARIABLES 6 | \* @type: Str; 7 | x, 8 | \* @type: Int; 9 | y 10 | 11 | Init == 12 | /\ x = "hello" 13 | /\ y = 42 14 | 15 | Next == 16 | /\ x' = IF x = "hello" THEN "world" ELSE "hello" 17 | /\ y' = 42-y 18 | 19 | Inv == 20 | ~ 21 | ( 22 | /\ x = "world" 23 | /\ y = 0 24 | ) 25 | 26 | =========================================== 27 | -------------------------------------------------------------------------------- /samples/README.md: -------------------------------------------------------------------------------- 1 | # Samples 2 | 3 | Sample usage for library and cli 4 | 5 | ## Library 6 | 7 | ```bash 8 | pip install -r requirements.txt; 9 | ``` 10 | 11 | ## Cli 12 | 13 | ```bash 14 | pip install modelator-py; 15 | modelator 16 | ``` 17 | -------------------------------------------------------------------------------- /samples/cli_input_apalache_pure.json: -------------------------------------------------------------------------------- 1 | { 2 | "jar": "/Users/danwt/Documents/work/modelator-py/large/apa_0_23_0.jar", 3 | "args": { 4 | "cmd": "check", 5 | "nworkers": 4, 6 | "file": "Hello.tla", 7 | "config": "Hello.cfg" 8 | }, 9 | "files": { 10 | "Hello.tla": "------------ MODULE Hello -------------\n\nEXTENDS Naturals, FiniteSets, Sequences\n\nVARIABLES\n \\* @type: Str;\n x,\n \\* @type: Int;\n y\n\nInit ==\n /\\ x = \"hello\"\n /\\ y = 42\n\nNext ==\n /\\ x' = IF x = \"hello\" THEN \"world\" ELSE \"hello\"\n /\\ y' = 42-y\n\nInv ==\n ~\n (\n /\\ x = \"world\"\n /\\ y = 0\n )\n\n===========================================\n", 11 | "Hello.cfg": "INIT Init\nNEXT Next\nINVARIANT Inv\n" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /samples/cli_input_apalache_raw.json: -------------------------------------------------------------------------------- 1 | { 2 | "jar": "/Users/danwt/Documents/work/modelator-py/large/apa_0_23_0.jar", 3 | "cwd": "/Users/danwt/Documents/work/modelator-py/samples", 4 | "args": { 5 | "cmd": "check", 6 | "nworkers": 4, 7 | "file": "Hello.tla", 8 | "config": "Hello.cfg" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /samples/cli_input_gen.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | from helper import apa_path, read_file, tlc_path 5 | 6 | # mypy: ignore-errors 7 | 8 | """ 9 | Used to generate JSON input for CLI examples. 10 | """ 11 | 12 | 13 | def apalache_pure(): 14 | data = { 15 | "jar": apa_path(), 16 | "args": { 17 | "cmd": "check", 18 | "nworkers": 4, 19 | "file": "Hello.tla", 20 | "config": "Hello.cfg", 21 | }, 22 | "files": read_file("Hello.tla") | read_file("Hello.cfg"), 23 | } 24 | with open("cli_input_apalache_pure.json", "w") as fd: 25 | fd.write(json.dumps(data, indent=4)) 26 | 27 | 28 | def apalache_raw(): 29 | data = { 30 | "jar": apa_path(), 31 | "cwd": os.getcwd(), 32 | "args": { 33 | "cmd": "check", 34 | "nworkers": 4, 35 | "file": "Hello.tla", 36 | "config": "Hello.cfg", 37 | }, 38 | } 39 | with open("cli_input_apalache_raw.json", "w") as fd: 40 | fd.write(json.dumps(data, indent=4)) 41 | 42 | 43 | def tlc_pure(): 44 | 45 | data = { 46 | "jar": tlc_path(), 47 | "args": { 48 | "workers": "auto", 49 | "file": "Hello.tla", 50 | "config": "Hello.cfg", 51 | }, 52 | "files": read_file("Hello.tla") | read_file("Hello.cfg"), 53 | } 54 | with open("cli_input_tlc_pure.json", "w") as fd: 55 | fd.write(json.dumps(data, indent=4)) 56 | 57 | 58 | def tlc_raw(): 59 | data = { 60 | "jar": tlc_path(), 61 | "cwd": os.getcwd(), 62 | "args": { 63 | "workers": "auto", 64 | "file": "Hello.tla", 65 | "config": "Hello.cfg", 66 | }, 67 | } 68 | with open("cli_input_tlc_raw.json", "w") as fd: 69 | fd.write(json.dumps(data, indent=4)) 70 | 71 | 72 | if __name__ == "__main__": 73 | apalache_pure() 74 | apalache_raw() 75 | tlc_pure() 76 | tlc_raw() 77 | -------------------------------------------------------------------------------- /samples/cli_input_tlc_itf.json: -------------------------------------------------------------------------------- 1 | { 2 | "lists": true, 3 | "records": false, 4 | "stdout": "TLC2 Version 2.16 of Day Month 20?? (rev: 7d936f2)\nRunning breadth-first search Model-Checking with fp 67 and seed 5767608612270942875 with 8 workers on 8 cores with 10923MB heap and 64MB offheap memory [pid: 50075] (Mac OS X 11.4 aarch64, Homebrew 17.0.2 x86_64, MSBDiskFPSet, DiskStateQueue).\nParsing file /Users/danwt/Documents/work/mbt-python/tests/resource/TlcTraceParse.tla\nParsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/Naturals.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/Naturals.tla)\nParsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/FiniteSets.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/FiniteSets.tla)\nParsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/Sequences.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/Sequences.tla)\nParsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/TLC.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/TLC.tla)\nParsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/Reals.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/Reals.tla)\nParsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/Integers.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/Integers.tla)\nSemantic processing of module Naturals\nSemantic processing of module Sequences\nSemantic processing of module FiniteSets\nSemantic processing of module TLC\nSemantic processing of module Integers\nSemantic processing of module Reals\nSemantic processing of module TlcTraceParse\nStarting... (2022-02-09 08:59:37)\nComputing initial states...\nFinished computing initial states: 1 distinct state generated at 2022-02-09 08:59:37.\nError: Invariant Inv is violated.\nError: The behavior up to this point is:\nState 1: \n/\\ sequence_indexed_map = (<<\"one\", \"two\">> :> 42)\n/\\ one_indexed_sequential_map = <<42, 42, 42, 42, 42>>\n/\\ string_indexed_map = [two |-> 42, one |-> 42]\n/\\ negative_number = -123456\n/\\ record = [foo |-> 42, bar |-> 43]\n/\\ tuple = <<1, 2>>\n/\\ bool = FALSE\n/\\ map_indexed_map = ([foo |-> 42, bar |-> 42] :> 42)\n/\\ set_indexed_map = ({1, 2, 3} :> 42 @@ {4, 5, 6} :> 42)\n/\\ set = {1, 2, 3}\n/\\ list = <<1, \"two\">>\n/\\ map = ( 0 :> 42 @@\n 1 :> 42 @@\n 2 :> 42 @@\n 3 :> 42 @@\n 4 :> 42 @@\n 5 :> 42 @@\n 6 :> \"forty-two\" @@\n 8 :> \"forty-two\" @@\n 13 :> \"forty-two\" )\n/\\ json_int = 123\n/\\ string_literal = \"hello\"\n/\\ zero_indexed_sequential_map = (0 :> 42 @@ 1 :> 42 @@ 2 :> 42 @@ 3 :> 42 @@ 4 :> 42 @@ 5 :> 42)\n/\\ other_bool = TRUE\n\nState 2: \n/\\ sequence_indexed_map = (<<\"one\", \"two\">> :> 42)\n/\\ one_indexed_sequential_map = <<42, 42, 42, 42, 42>>\n/\\ string_indexed_map = [two |-> 42, one |-> 42]\n/\\ negative_number = -123456\n/\\ record = [foo |-> 42, bar |-> 43]\n/\\ tuple = <<1, 2>>\n/\\ bool = TRUE\n/\\ map_indexed_map = ([foo |-> 42, bar |-> 42] :> 42)\n/\\ set_indexed_map = ({1, 2, 3} :> 42 @@ {4, 5, 6} :> 42)\n/\\ set = {1, 2, 3}\n/\\ list = <<1, \"two\">>\n/\\ map = ( 0 :> 42 @@\n 1 :> 42 @@\n 2 :> 42 @@\n 3 :> 42 @@\n 4 :> 42 @@\n 5 :> 42 @@\n 6 :> \"forty-two\" @@\n 8 :> \"forty-two\" @@\n 13 :> \"forty-two\" )\n/\\ json_int = 123\n/\\ string_literal = \"hello\"\n/\\ zero_indexed_sequential_map = (0 :> 42 @@ 1 :> 42 @@ 2 :> 42 @@ 3 :> 42 @@ 4 :> 42 @@ 5 :> 42)\n/\\ other_bool = TRUE\n\n2 states generated, 2 distinct states found, 0 states left on queue.\nThe depth of the complete state graph search is 2.\nFinished in 00s at (2022-02-09 08:59:37)\nTrace exploration spec path: ./TlcTraceParse_TTrace_1644397177.tla\n" 5 | } 6 | -------------------------------------------------------------------------------- /samples/cli_input_tlc_pure.json: -------------------------------------------------------------------------------- 1 | { 2 | "jar": "/Users/danwt/Documents/work/modelator-py/large/tlc_2_18.jar", 3 | "args": { 4 | "workers": "auto", 5 | "file": "Hello.tla", 6 | "config": "Hello.cfg" 7 | }, 8 | "files": { 9 | "Hello.tla": "------------ MODULE Hello -------------\n\nEXTENDS Naturals, FiniteSets, Sequences\n\nVARIABLES\n \\* @type: Str;\n x,\n \\* @type: Int;\n y\n\nInit ==\n /\\ x = \"hello\"\n /\\ y = 42\n\nNext ==\n /\\ x' = IF x = \"hello\" THEN \"world\" ELSE \"hello\"\n /\\ y' = 42-y\n\nInv ==\n ~\n (\n /\\ x = \"world\"\n /\\ y = 0\n )\n\n===========================================\n", 10 | "Hello.cfg": "INIT Init\nNEXT Next\nINVARIANT Inv\n" 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /samples/cli_input_tlc_raw.json: -------------------------------------------------------------------------------- 1 | { 2 | "jar": "/Users/danwt/Documents/work/modelator-py/large/tlc_2_18.jar", 3 | "cwd": "/Users/danwt/Documents/work/modelator-py/samples", 4 | "args": { 5 | "workers": "auto", 6 | "file": "Hello.tla", 7 | "config": "Hello.cfg" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /samples/helper.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def tlc_path(): 5 | # For demo purposes, find the path to TLC in this repo, but you can use 6 | # a path to your own TLC jar. 7 | return os.path.abspath( 8 | os.path.join(os.path.dirname(__file__), "..", "large/tlc_2_17.jar") 9 | ) 10 | 11 | 12 | def apa_path(): 13 | # For demo purposes, find the path to Apalache in this repo, but you can use 14 | # a path to your own Apalache jar. 15 | return os.path.abspath( 16 | os.path.join(os.path.dirname(__file__), "..", "large/apa_0_23_0.jar") 17 | ) 18 | 19 | 20 | def read_file(fn): 21 | # Utility used in these demos 22 | with open(os.path.join(os.path.dirname(__file__), fn), "r") as fd: 23 | return {fn: fd.read()} 24 | -------------------------------------------------------------------------------- /samples/requirements.txt: -------------------------------------------------------------------------------- 1 | modelator-py==0.1.6 2 | -------------------------------------------------------------------------------- /samples/usage.md: -------------------------------------------------------------------------------- 1 | # Usage examples 2 | 3 | **_This document is a work in progress and not all features are documented yet_**. 4 | 5 | This document focuses on usage overview and using the cli executable tool. The functionality is all also useable via library functions. Please see [library_usage.py](./library_usage.py) for library function examples. 6 | 7 | ## Getting the cli tool 8 | 9 | To install the cli tool 10 | 11 | ```bash 12 | pip install modelator-py; 13 | ``` 14 | 15 | To call the cli program 16 | 17 | ```bash 18 | modelator 19 | ``` 20 | 21 | ## Feature: Extract traces from TLC output in [Informal Trace Format](https://apalache.informal.systems/docs/adr/015adr-trace.html?highlight=trace%20format#the-itf-format) format 22 | 23 | The TLC model checker will generate counterexamples written in TLA+ and embed them in stdout, interleaved with ASCII text. This output can contain 0, 1 or more traces, and will extract traces generated by TLCs model checking or simulator modes. Run the `modelator util tlc itf` tool to extract a list of traces in the Informal Trace Format. 24 | 25 | There is more than one way to run the tool. The tool always writes output as Json to `stdout`. 26 | 27 | ### Option 1: provide TLC's stdout data on stdin and flags as cli args 28 | 29 | ```bash 30 | modelator util tlc itf --help 31 | modelator util tlc itf < # Run without flags 32 | modelator util tlc itf --lists= --records= < # Run with flags 33 | ``` 34 | 35 | ### Option 2: provide TLC's stdout data and flags inside a json object 36 | 37 | ```bash 38 | modelator util tlc itf --json < 39 | ``` 40 | 41 | ### Flag explanation 42 | 43 | ```bash 44 | # In TLA+ there is no distinction between some functions and sequences. This means 45 | # that a sequence may be represented by function with domain 1..n for some n, 46 | # and vice versa. It is likely convenient to handle such functions as lists. 47 | # Default: True 48 | --lists 49 | # In TLA+ there is no distinction between some functions and records. This means 50 | # that a record may be represented by functions with domain all strings, and vice 51 | # versa. It is likely convenient to handle such functions as records. 52 | # Default: True 53 | --records 54 | ``` 55 | 56 | ### Examples 57 | 58 | ```bash 59 | # Provide TLC's stdout data and other flags inside a json object 60 | modelator util tlc itf --json < cli_input_tlc_itf.json > traces.json 61 | # Provide TLC's stdout data on stdin and flags as cli args 62 | modelator util tlc itf --lists=True records=False < TlcTraces.out > traces.json 63 | ``` 64 | 65 | ### Nuance 66 | 67 | The tool should be able to handle partial TLC outputs (if you hit `ctrl+c` while it was running, for example). The tool cannot handle lasso shaped traces yet (for temporal property violations). The tool _can_ handle multiple traces (generated when using TLC's `continue` feature, for example). 68 | 69 | ### FAQ 70 | 71 | 1. You can dump various formats using TLC's `-dumpTrace` flag. Why does this exist?\ 72 | This was written before that flag existed. This tool also supports multiple traces 73 | 2. Is this fast?\ 74 | Not really: it will take a while to extract traces with hundreds of thousands of states. If you're doing that you're likely doing model-based testing. For regular users, the speed is fine. 75 | 3. Why don't you parse the `TTrace` files that TLC outputs instead of parsing stdout?\ 76 | It probably should be done that way! 77 | 78 | ## Feature: run [Apalache model checker](https://github.com/informalsystems/apalache) 79 | 80 | Run the Apalache model checker in a basic 'raw' mode or run it in a 'pure' mode. The pure mode encapsulates file system operations, removing side effects. The raw mode does not, but can be useful for debugging. 81 | 82 | ### Option 1: Run Apalache pure with all input data piped from json 83 | 84 | ```bash 85 | # See cli_input_apalache_pure.json 86 | modelator apalache pure < 87 | ``` 88 | 89 | ### Option 2: Run Apalache raw with all input data piped from json 90 | 91 | ```bash 92 | # See cli_input_apalache_raw.json 93 | modelator apalache raw --json < 94 | ``` 95 | 96 | ### Option 3: Run Apalache raw with all input data given in args 97 | 98 | ```bash 99 | modelator apalache raw\ 100 | --jar=\ # absolute paths! 101 | --cwd=\ # absolute paths! 102 | --cmd=\ # apalache check, typecheck ect 103 | --file=<.tla file>\ # target 104 | # other Apalache arguments ... 105 | ``` 106 | 107 | ### Argument explanation 108 | 109 | `apalache pure` has no arguments but `apalache raw` takes modelator 110 | and Apalache [arguments](https://github.com/informalsystems/modelator-py/blob/549d1927cbfeb25c1c20eda6451b0c476f63367a/modelator_py/apalache/args.py). Only some are shown here. 111 | 112 | ```bash 113 | # Read arguments from json piped to stdin instead of from sys.argv? 114 | --json 115 | # Full path to directory to run Apalache from. 116 | --cwd= 117 | # Full path to Apalache jar. 118 | --jar= 119 | # Apalache command: check, typecheck ect 120 | --cmd= 121 | # Apalache target tla file name 122 | --file= 123 | ``` 124 | 125 | ### Examples 126 | 127 | **_Please run `python3 cli_input_gen.py` to generate working example input data._** 128 | 129 | ```bash 130 | # Provide the inputs to Apalache pure mode with a json object 131 | modelator apalache pure < cli_input_apalache_pure.json > apalache_pure_result.json 132 | # Provide the inputs to Apalache raw mode with a json object 133 | modelator apalache raw --json < cli_input_apalache_raw.json > apalache_raw_result.json 134 | # Provide the inputs to Apalache raw mode with explicit values 135 | modelator apalache raw\ 136 | --jar=\ # absolute paths! 137 | --cwd=\ # absolute paths! 138 | --cmd=check\ # apalache check 139 | --file=Hello.tla\ 140 | > apalache_raw_result.json 141 | ``` 142 | 143 | ### Nuance 144 | 145 | Pure mode does write to the filesystem, but inside a temporary directory. 146 | 147 | ## Feature: run [TLC model checker](https://github.com/tlaplus/tlaplus) 148 | 149 | Run the TLC model checker in a basic 'raw' mode or run it in a 'pure' mode. The pure mode encapsulates file system operations, removing side effects. The raw mode does not, but can be useful for debugging. 150 | 151 | ### Option 1: Run TLC pure with all input data piped from json 152 | 153 | ```bash 154 | # See cli_input_tlc_pure.json 155 | modelator tlc pure < 156 | ``` 157 | 158 | ### Option 2: Run TLC raw with all input data piped from json 159 | 160 | ```bash 161 | # See cli_input_tlc_raw.json 162 | modelator tlc raw --json < 163 | ``` 164 | 165 | ### Option 3: Run TLC raw with all input data given in args 166 | 167 | ```bash 168 | modelator tlc raw\ 169 | --jar=\ # absolute paths! 170 | --cwd=\ # absolute paths! 171 | --file=<.tla file>\ # tla target 172 | --config=<.cfg file>\ # TLC config file 173 | # other TLC arguments ... 174 | ``` 175 | 176 | ### Argument explanation 177 | 178 | `tlc pure` has no arguments but `tlc raw` takes modelator 179 | and TLC [arguments](https://github.com/informalsystems/modelator-py/blob/549d1927cbfeb25c1c20eda6451b0c476f63367a/modelator_py/apalache/args.py). Only some are shown here. 180 | 181 | ```bash 182 | # Read arguments from json piped to stdin instead of from sys.argv? 183 | --json 184 | # Full path to directory to run TLC from. 185 | --cwd= 186 | # Full path to TLC jar. 187 | --jar= 188 | # TLC target tla file name 189 | --file= 190 | # TLC target cfg file name 191 | --config= 192 | ``` 193 | 194 | ### Examples 195 | 196 | **_Please run `python3 cli_input_gen.py` to generate working example input data._** 197 | 198 | ```bash 199 | # Provide the inputs to TLC pure mode with a json object 200 | modelator tlc pure < cli_input_tlc_pure.json > tlc_pure_result.json 201 | # Provide the inputs to TLC raw mode with a json object 202 | modelator tlc raw --json < cli_input_tlc_raw.json > tlc_raw_result.json 203 | # Provide the inputs to TLC raw mode with explicit values 204 | modelator tlc raw\ 205 | --jar=\ # absolute paths! 206 | --cwd=\ # absolute paths! 207 | --file=Hello.tla\ 208 | --config=Hello.cfg\ 209 | > tlc_raw_result.json 210 | ``` 211 | 212 | ### Nuance 213 | 214 | Pure mode does write to the filesystem, but inside a temporary directory. 215 | 216 | ## How to get help 217 | 218 | Please try 219 | 220 | ```bash 221 | modelator tlc --help 222 | modelator tlc pure --help 223 | modelator tlc raw --help 224 | modelator apalache --help 225 | modelator apalache pure --help 226 | modelator apalache raw --help 227 | modelator util --help 228 | modelator util tlc --help 229 | modelator util tlc itf --help 230 | ``` 231 | 232 | ect. 233 | 234 | This CLI documentation will grow as the APIs stabilize. 235 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/informalsystems/modelator-py/98c6f3356b39e653ade2624a07540c174e97af9f/tests/__init__.py -------------------------------------------------------------------------------- /tests/apalache/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/informalsystems/modelator-py/98c6f3356b39e653ade2624a07540c174e97af9f/tests/apalache/__init__.py -------------------------------------------------------------------------------- /tests/apalache/test_apalache.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | import unittest.mock 5 | from contextlib import redirect_stdout 6 | from io import StringIO 7 | 8 | import pytest 9 | 10 | from modelator_py.apalache.cli import Apalache 11 | from modelator_py.apalache.raw import ApalacheArgs, RawCmd, stringify_raw_cmd 12 | 13 | from ..helper import get_apalache_path, get_resource_dir 14 | 15 | LOG = logging.getLogger(__name__) 16 | 17 | 18 | def test_stringify_raw_cmd(): 19 | """ 20 | Use for debugging - ensure that the shell command generated is sensible. 21 | """ 22 | cmd = RawCmd() 23 | cmd.jar = get_apalache_path() 24 | args = ApalacheArgs() 25 | args.cmd = "check" 26 | args.out_dir = "foo" 27 | args.nworkers = 8 28 | args.config = "HelloWorldTyped.cfg" 29 | args.file = "HelloWorldTyped.tla" 30 | cmd.args = args 31 | cmd_str = stringify_raw_cmd(cmd) 32 | assert cmd_str.endswith( 33 | "--out-dir=foo check --config=HelloWorldTyped.cfg --nworkers=8 HelloWorldTyped.tla" 34 | ) 35 | 36 | 37 | def test_pure_with_json_write_intermediate_false(): 38 | def get_files(): 39 | 40 | fns = [ 41 | "HelloWorldTyped.cfg", 42 | "HelloWorldTyped.tla", 43 | ] 44 | 45 | ret = {} 46 | 47 | for fn in fns: 48 | full_fn = os.path.join(get_resource_dir(), fn) 49 | with open(full_fn, "r") as fd: 50 | ret[fn] = fd.read() # type: ignore 51 | 52 | return ret 53 | 54 | data = { 55 | "jar": get_apalache_path(), 56 | "args": { 57 | "cmd": "check", 58 | "nworkers": 8, 59 | "config": "HelloWorldTyped.cfg", 60 | "file": "HelloWorldTyped.tla", 61 | "write_intermediate": False, 62 | }, 63 | "files": get_files(), 64 | } 65 | 66 | stdin = unittest.mock.Mock() 67 | stdin.read = lambda: json.dumps(data) 68 | 69 | app = Apalache(stdin) 70 | s = StringIO() 71 | with redirect_stdout(s): 72 | app.pure() 73 | # Check that Apalache finishes 74 | json_obj = json.loads(s.getvalue()) 75 | LOG.debug(json.dumps(json_obj, indent=4)) 76 | 77 | # HelloWorldTyped.tla should contain an error 78 | assert "Checker has found an error" in json_obj["stdout"] 79 | # There should be a counterexample 80 | assert any(fn.startswith("counterexample") for fn in json_obj["files"]) 81 | 82 | 83 | def test_pure_with_json_write_intermediate_true(): 84 | def get_files(): 85 | 86 | fns = [ 87 | "HelloWorldTyped.cfg", 88 | "HelloWorldTyped.tla", 89 | ] 90 | 91 | ret = {} 92 | 93 | for fn in fns: 94 | full_fn = os.path.join(get_resource_dir(), fn) 95 | with open(full_fn, "r") as fd: 96 | ret[fn] = fd.read() # type: ignore 97 | 98 | return ret 99 | 100 | data = { 101 | "jar": get_apalache_path(), 102 | "args": { 103 | "cmd": "check", 104 | "nworkers": 8, 105 | "config": "HelloWorldTyped.cfg", 106 | "file": "HelloWorldTyped.tla", 107 | "write_intermediate": True, 108 | }, 109 | "files": get_files(), 110 | } 111 | 112 | stdin = unittest.mock.Mock() 113 | stdin.read = lambda: json.dumps(data) 114 | 115 | app = Apalache(stdin) 116 | s = StringIO() 117 | with redirect_stdout(s): 118 | app.pure() 119 | # Check that Apalache finishes 120 | json_obj = json.loads(s.getvalue()) 121 | LOG.debug(json.dumps(json_obj, indent=4)) 122 | 123 | # HelloWorldTyped.tla should contain an error 124 | assert "Checker has found an error" in json_obj["stdout"] 125 | # There should be a counterexample 126 | assert any(fn.startswith("counterexample") for fn in json_obj["files"]) 127 | # There should be some intermediate files 128 | assert any(fn.startswith("intermediate") for fn in json_obj["files"]) 129 | 130 | 131 | @pytest.mark.skip( 132 | reason="The 'apalache raw' command has side effects. E.g. polluting the filesystem" 133 | ) 134 | def test_raw_with_json(): 135 | """ 136 | Use for debugging - using the raw interface is not idempotent 137 | 138 | This is a convenient debugging test, and you could write your own assertions. 139 | """ 140 | 141 | data = { 142 | "jar": get_apalache_path(), 143 | "cwd": get_resource_dir(), 144 | "args": { 145 | "cmd": "check", 146 | "out_dir": "foo", 147 | "nworkers": 8, 148 | "config": "HelloWorldTyped.cfg", 149 | "file": "HelloWorldTyped.tla", 150 | }, 151 | } 152 | 153 | stdin = unittest.mock.Mock() 154 | stdin.read = lambda: json.dumps(data) 155 | app = Apalache(stdin) 156 | app.raw(json=True) 157 | -------------------------------------------------------------------------------- /tests/helper.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | TLC_PATH = "large/tlc_2_17.jar" 5 | APALACHE_PATH = "large/apa_0_23_0.jar" 6 | 7 | 8 | def get_tests_dir(): 9 | this_file_path = Path(__file__) 10 | return this_file_path.parent 11 | 12 | 13 | def get_project_dir(): 14 | tests_dir = get_tests_dir() 15 | return tests_dir.parent 16 | 17 | 18 | def get_resource_dir(): 19 | tests_dir = get_tests_dir() 20 | return os.path.join(tests_dir, "resource") 21 | 22 | 23 | def get_tlc_path(): 24 | return os.path.join(get_project_dir(), TLC_PATH) 25 | 26 | 27 | def get_apalache_path(): 28 | return os.path.join(get_project_dir(), APALACHE_PATH) 29 | -------------------------------------------------------------------------------- /tests/resource/.gitignore: -------------------------------------------------------------------------------- 1 | *.toolbox/*.launch 2 | *.toolbox/*.pmap 3 | *.toolbox/**/*.tla 4 | *.toolbox/**/*.out 5 | *.toolbox/**/*.cfg 6 | *.old 7 | *.toolbox/*aux 8 | *.toolbox/*.log 9 | *.toolbox/*.pdf 10 | *.toolbox/*.tex 11 | *aux 12 | *.log 13 | *.pdf 14 | *.tex 15 | states/ 16 | -------------------------------------------------------------------------------- /tests/resource/HelloWorld.cfg: -------------------------------------------------------------------------------- 1 | INIT Init 2 | NEXT Next 3 | INVARIANT Inv 4 | -------------------------------------------------------------------------------- /tests/resource/HelloWorld.tla: -------------------------------------------------------------------------------- 1 | ------------ MODULE HelloWorld ------------- 2 | 3 | EXTENDS Naturals, FiniteSets, Sequences 4 | 5 | VARIABLES 6 | x, 7 | y 8 | 9 | Init == 10 | /\ x = "hello" 11 | /\ y = 42 12 | 13 | Next == 14 | /\ x' = IF x = "hello" THEN "world" ELSE "hello" 15 | /\ y' = 42-y 16 | 17 | Inv == 18 | ~ 19 | ( 20 | /\ x = "world" 21 | /\ y = 0 22 | ) 23 | 24 | 25 | =========================================== 26 | -------------------------------------------------------------------------------- /tests/resource/HelloWorldTyped.cfg: -------------------------------------------------------------------------------- 1 | INIT Init 2 | NEXT Next 3 | INVARIANT Inv 4 | -------------------------------------------------------------------------------- /tests/resource/HelloWorldTyped.tla: -------------------------------------------------------------------------------- 1 | ------------ MODULE HelloWorldTyped ------------- 2 | 3 | EXTENDS Naturals, FiniteSets, Sequences 4 | 5 | VARIABLES 6 | \* @type: Str; 7 | x, 8 | \* @type: Int; 9 | y 10 | 11 | Init == 12 | /\ x = "hello" 13 | /\ y = 42 14 | 15 | Next == 16 | /\ x' = IF x = "hello" THEN "world" ELSE "hello" 17 | /\ y' = 42-y 18 | 19 | Inv == 20 | ~ 21 | ( 22 | /\ x = "world" 23 | /\ y = 0 24 | ) 25 | 26 | 27 | =========================================== 28 | -------------------------------------------------------------------------------- /tests/resource/HelloWorld_util_tlc_itf.json: -------------------------------------------------------------------------------- 1 | { 2 | "lists": true, 3 | "records": false, 4 | "stdout": "TLC2 Version 2.16 of Day Month 20?? (rev: 7d936f2)\nRunning breadth-first search Model-Checking with fp 67 and seed 5767608612270942875 with 8 workers on 8 cores with 10923MB heap and 64MB offheap memory [pid: 50075] (Mac OS X 11.4 aarch64, Homebrew 17.0.2 x86_64, MSBDiskFPSet, DiskStateQueue).\nParsing file /Users/danwt/Documents/work/mbt-python/tests/resource/TlcTraceParse.tla\nParsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/Naturals.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/Naturals.tla)\nParsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/FiniteSets.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/FiniteSets.tla)\nParsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/Sequences.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/Sequences.tla)\nParsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/TLC.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/TLC.tla)\nParsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/Reals.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/Reals.tla)\nParsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/Integers.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/Integers.tla)\nSemantic processing of module Naturals\nSemantic processing of module Sequences\nSemantic processing of module FiniteSets\nSemantic processing of module TLC\nSemantic processing of module Integers\nSemantic processing of module Reals\nSemantic processing of module TlcTraceParse\nStarting... (2022-02-09 08:59:37)\nComputing initial states...\nFinished computing initial states: 1 distinct state generated at 2022-02-09 08:59:37.\nError: Invariant Inv is violated.\nError: The behavior up to this point is:\nState 1: \n/\\ sequence_indexed_map = (<<\"one\", \"two\">> :> 42)\n/\\ one_indexed_sequential_map = <<42, 42, 42, 42, 42>>\n/\\ string_indexed_map = [two |-> 42, one |-> 42]\n/\\ negative_number = -123456\n/\\ record = [foo |-> 42, bar |-> 43]\n/\\ tuple = <<1, 2>>\n/\\ bool = FALSE\n/\\ map_indexed_map = ([foo |-> 42, bar |-> 42] :> 42)\n/\\ set_indexed_map = ({1, 2, 3} :> 42 @@ {4, 5, 6} :> 42)\n/\\ set = {1, 2, 3}\n/\\ list = <<1, \"two\">>\n/\\ map = ( 0 :> 42 @@\n 1 :> 42 @@\n 2 :> 42 @@\n 3 :> 42 @@\n 4 :> 42 @@\n 5 :> 42 @@\n 6 :> \"forty-two\" @@\n 8 :> \"forty-two\" @@\n 13 :> \"forty-two\" )\n/\\ json_int = 123\n/\\ string_literal = \"hello\"\n/\\ zero_indexed_sequential_map = (0 :> 42 @@ 1 :> 42 @@ 2 :> 42 @@ 3 :> 42 @@ 4 :> 42 @@ 5 :> 42)\n/\\ other_bool = TRUE\n\nState 2: \n/\\ sequence_indexed_map = (<<\"one\", \"two\">> :> 42)\n/\\ one_indexed_sequential_map = <<42, 42, 42, 42, 42>>\n/\\ string_indexed_map = [two |-> 42, one |-> 42]\n/\\ negative_number = -123456\n/\\ record = [foo |-> 42, bar |-> 43]\n/\\ tuple = <<1, 2>>\n/\\ bool = TRUE\n/\\ map_indexed_map = ([foo |-> 42, bar |-> 42] :> 42)\n/\\ set_indexed_map = ({1, 2, 3} :> 42 @@ {4, 5, 6} :> 42)\n/\\ set = {1, 2, 3}\n/\\ list = <<1, \"two\">>\n/\\ map = ( 0 :> 42 @@\n 1 :> 42 @@\n 2 :> 42 @@\n 3 :> 42 @@\n 4 :> 42 @@\n 5 :> 42 @@\n 6 :> \"forty-two\" @@\n 8 :> \"forty-two\" @@\n 13 :> \"forty-two\" )\n/\\ json_int = 123\n/\\ string_literal = \"hello\"\n/\\ zero_indexed_sequential_map = (0 :> 42 @@ 1 :> 42 @@ 2 :> 42 @@ 3 :> 42 @@ 4 :> 42 @@ 5 :> 42)\n/\\ other_bool = TRUE\n\n2 states generated, 2 distinct states found, 0 states left on queue.\nThe depth of the complete state graph search is 2.\nFinished in 00s at (2022-02-09 08:59:37)\nTrace exploration spec path: ./TlcTraceParse_TTrace_1644397177.tla\n" 5 | } 6 | -------------------------------------------------------------------------------- /tests/resource/TlcLassoTraceParse.cfg: -------------------------------------------------------------------------------- 1 | SPECIFICATION Spec 2 | PROPERTY Prop 3 | -------------------------------------------------------------------------------- /tests/resource/TlcLassoTraceParse.tla: -------------------------------------------------------------------------------- 1 | ------------ MODULE TlcLassoTraceParse ------------- 2 | 3 | EXTENDS Naturals, FiniteSets, Sequences 4 | 5 | VARIABLES 6 | x 7 | 8 | Init == x = 0 9 | 10 | 11 | Choices == 12 | CASE x = 0 -> {1, 3} 13 | [] x = 1 -> {2} 14 | [] x = 2 -> {1} 15 | [] x = 3 -> {4} 16 | [] x = 4 -> {3} 17 | 18 | Next == 19 | x' \in Choices 20 | 21 | Prop == []<>(x = 5) 22 | 23 | Spec == Init /\ [][Next]_x /\ WF_x(Next) 24 | 25 | =========================================== 26 | -------------------------------------------------------------------------------- /tests/resource/TlcLassoTraceParse.txt: -------------------------------------------------------------------------------- 1 | TLC2 Version 2.16 of Day Month 20?? (rev: 7d936f2) 2 | Running breadth-first search Model-Checking with fp 72 and seed -14328800372612691 with 8 workers on 8 cores with 10923MB heap and 64MB offheap memory [pid: 55105] (Mac OS X 11.4 aarch64, Homebrew 17.0.1 x86_64, MSBDiskFPSet, DiskStateQueue). 3 | Parsing file /Users/danwt/Documents/work/mbt-python/tests/resource/TlcLassoTraceParse.tla 4 | Parsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-519656545032447101/Naturals.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/Naturals.tla) 5 | Parsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-519656545032447101/FiniteSets.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/FiniteSets.tla) 6 | Parsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-519656545032447101/Sequences.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/Sequences.tla) 7 | Semantic processing of module Naturals 8 | Semantic processing of module Sequences 9 | Semantic processing of module FiniteSets 10 | Semantic processing of module TlcLassoTraceParse 11 | Starting... (2022-01-27 12:40:57) 12 | Implied-temporal checking--satisfiability problem has 1 branches. 13 | Computing initial states... 14 | Finished computing initial states: 1 distinct state generated at 2022-01-27 12:40:57. 15 | Progress(3) at 2022-01-27 12:40:57: 7 states generated, 5 distinct states found, 0 states left on queue. 16 | Checking temporal properties for the complete state space with 5 total distinct states at (2022-01-27 12:40:57) 17 | Error: Temporal properties were violated. 18 | 19 | Error: The following behavior constitutes a counter-example: 20 | 21 | State 1: 22 | x = 0 23 | 24 | State 2: 25 | x = 3 26 | 27 | State 3: 28 | x = 4 29 | 30 | Back to state 2: 31 | 32 | Finished checking temporal properties in 00s at 2022-01-27 12:40:57 33 | 7 states generated, 5 distinct states found, 0 states left on queue. 34 | The depth of the complete state graph search is 3. 35 | Finished in 00s at (2022-01-27 12:40:57) 36 | -------------------------------------------------------------------------------- /tests/resource/TlcMultipleTraceParse.cfg: -------------------------------------------------------------------------------- 1 | INIT Init 2 | NEXT Next 3 | INVARIANT Inv 4 | -------------------------------------------------------------------------------- /tests/resource/TlcMultipleTraceParse.tla: -------------------------------------------------------------------------------- 1 | ------------ MODULE TlcMultipleTraceParse ------------- 2 | 3 | EXTENDS Naturals, FiniteSets, Sequences 4 | 5 | VARIABLES 6 | x, 7 | y, 8 | z, 9 | steps 10 | 11 | Init == 12 | /\ x = "hello" 13 | /\ y = 42 14 | /\ z = [a : 1..2, b : {"foo", "bar"}, c : {<<1, "cat", [dog |-> 3]>>}] 15 | /\ steps = 0 16 | 17 | Next == 18 | /\ UNCHANGED x 19 | /\ UNCHANGED y 20 | /\ UNCHANGED z 21 | /\ steps' = IF steps = 8 THEN 0 ELSE steps + 1 22 | 23 | Inv == steps < 5 24 | 25 | =========================================== 26 | -------------------------------------------------------------------------------- /tests/resource/TlcStateExpressionExample0.txt: -------------------------------------------------------------------------------- 1 | /\ sequence_indexed_map = (<<"one", "two">> :> 42) 2 | /\ one_indexed_sequential_map = <<42, 42, 42, 42, 42>> 3 | /\ string_indexed_map = [two |-> 42, one |-> 42] 4 | /\ negative_number = -123456 5 | /\ record = [foo |-> 42, bar |-> 43] 6 | /\ tuple = <<1, 2>> 7 | /\ bool = FALSE 8 | /\ map_indexed_map = ([foo |-> 42, bar |-> 42] :> 42) 9 | /\ set_indexed_map = ({1, 2, 3} :> 42 @@ {4, 5, 6} :> 42) 10 | /\ set = {1, 2, 3} 11 | /\ list = <<1, "two">> 12 | /\ map = ( 0 :> 42 @@ 13 | 1 :> 42 @@ 14 | 2 :> 42 @@ 15 | 3 :> 42 @@ 16 | 4 :> 42 @@ 17 | 5 :> 42 @@ 18 | 6 :> "forty-two" @@ 19 | 8 :> "forty-two" @@ 20 | 13 :> "forty-two" ) 21 | /\ json_int = 123 22 | /\ string_literal = "hello" 23 | /\ zero_indexed_sequential_map = (0 :> 42 @@ 1 :> 42 @@ 2 :> 42 @@ 3 :> 42 @@ 4 :> 42 @@ 5 :> 42) 24 | /\ other_bool = TRUE 25 | -------------------------------------------------------------------------------- /tests/resource/TlcStateExpressionExample1.txt: -------------------------------------------------------------------------------- 1 | /\ actionOutcome = "None" 2 | /\ action = [type |-> "None"] 3 | /\ reserved = (0 :> 0 @@ 1 :> 0 @@ 2 :> 0 @@ 3 :> 0 @@ 4 :> 0) 4 | /\ indices = << [who |-> -1, deposit |-> 0, perm |-> FALSE], 5 | [who |-> -1, deposit |-> 0, perm |-> FALSE], 6 | [who |-> -1, deposit |-> 0, perm |-> FALSE], 7 | [who |-> -1, deposit |-> 0, perm |-> FALSE] >> 8 | -------------------------------------------------------------------------------- /tests/resource/TlcStateExpressionExample2.txt: -------------------------------------------------------------------------------- 1 | /\ wmem = (a1 :> v1) 2 | /\ memQ = <<>> 3 | /\ ctl = (p1 :> "rdy" @@ p2 :> "rdy") 4 | /\ buf = (p1 :> NoVal @@ p2 :> NoVal) 5 | /\ cache = (p1 :> (a1 :> NoVal) @@ p2 :> (a1 :> NoVal)) 6 | /\ memInt = <> 7 | -------------------------------------------------------------------------------- /tests/resource/TlcStateExpressionExample3.txt: -------------------------------------------------------------------------------- 1 | /\ steps = 9 2 | /\ blockTime = 8 3 | /\ lastValSet = {"v1"} 4 | /\ undelegationQ = {} 5 | /\ unbondingHeight = [v0 |-> 0, v1 |-> 0] 6 | /\ delegation = (<<"d0", "v0">> :> 1 @@ <<"d0", "v1">> :> 0) 7 | /\ blockHeight = 8 8 | /\ action = [ nature |-> "endBlock", 9 | timeDelta |-> 1, 10 | heightDelta |-> 1, 11 | delegator |-> "NullStr", 12 | validator |-> "NullStr", 13 | validatorSrc |-> "NullStr", 14 | validatorDst |-> "NullStr", 15 | amount |-> 0 ] 16 | /\ unbondingTime = [v0 |-> 0, v1 |-> 0] 17 | /\ jailed = [v0 |-> FALSE, v1 |-> FALSE] 18 | /\ redelegationQ = {} 19 | /\ tokens = [d0 |-> 0, v0 |-> 2, v1 |-> 5] 20 | /\ status = [v0 |-> "unbonded", v1 |-> "bonded"] 21 | /\ validatorQ = {} 22 | /\ success = TRUE 23 | -------------------------------------------------------------------------------- /tests/resource/TlcStateExpressionExample4.txt: -------------------------------------------------------------------------------- 1 | /\ actionTaken = "init" 2 | /\ lifetimeSyscalls = [v0 |-> 0, v1 |-> 0, v2 |-> 0] 3 | /\ pipeliningEnabled = [v0 |-> FALSE, v1 |-> FALSE, v2 |-> FALSE] 4 | /\ executor = "v0" 5 | /\ gcActions = {} 6 | /\ isReachable = ( <<"p0", "v0">> :> TRUE @@ 7 | <<"p0", "v1">> :> TRUE @@ 8 | <<"p0", "v2">> :> TRUE @@ 9 | <<"p2", "v1">> :> FALSE @@ 10 | <<"p2", "v2">> :> FALSE @@ 11 | <<"o0", "v1">> :> TRUE @@ 12 | <<"o0", "v2">> :> TRUE @@ 13 | <<"o2", "v0">> :> FALSE @@ 14 | <<"o2", "v1">> :> FALSE @@ 15 | <<"o2", "v2">> :> FALSE ) 16 | /\ executorPermittedOperations = "anySyscall" 17 | /\ things = [ p0 |-> 18 | [ nature |-> "promise", 19 | owner |-> "NullStr", 20 | q |-> <<>>, 21 | status |-> "unresolved", 22 | decider |-> "v0", 23 | subscribers |-> {"v0", "v1", "v2"}, 24 | refCnt |-> 3, 25 | reachableCnt |-> 0, 26 | recognizableCnt |-> 0, 27 | trueAllocator |-> "v0" ], 28 | p1 |-> 29 | [ nature |-> "promise", 30 | owner |-> "NullStr", 31 | q |-> <<>>, 32 | status |-> "NullStr", 33 | decider |-> "NullStr", 34 | subscribers |-> {}, 35 | refCnt |-> 0, 36 | reachableCnt |-> 0, 37 | recognizableCnt |-> 0, 38 | trueAllocator |-> "NullStr" ], 39 | p2 |-> 40 | [ nature |-> "promise", 41 | owner |-> "NullStr", 42 | q |-> <<>>, 43 | status |-> "NullStr", 44 | decider |-> "NullStr", 45 | subscribers |-> {}, 46 | refCnt |-> 0, 47 | reachableCnt |-> 0, 48 | recognizableCnt |-> 0, 49 | trueAllocator |-> "NullStr" ], 50 | o0 |-> 51 | [ nature |-> "object", 52 | owner |-> "v0", 53 | q |-> <<>>, 54 | status |-> "NullStr", 55 | decider |-> "NullStr", 56 | subscribers |-> {}, 57 | refCnt |-> 0, 58 | reachableCnt |-> 2, 59 | recognizableCnt |-> 2, 60 | trueAllocator |-> "v0" ], 61 | o1 |-> 62 | [ nature |-> "object", 63 | owner |-> "NullStr", 64 | q |-> <<>>, 65 | status |-> "NullStr", 66 | decider |-> "NullStr", 67 | subscribers |-> {}, 68 | refCnt |-> 0, 69 | reachableCnt |-> 0, 70 | recognizableCnt |-> 0, 71 | trueAllocator |-> "NullStr" ], 72 | o2 |-> 73 | [ nature |-> "object", 74 | owner |-> "NullStr", 75 | q |-> <<>>, 76 | status |-> "NullStr", 77 | decider |-> "NullStr", 78 | subscribers |-> {}, 79 | refCnt |-> 0, 80 | reachableCnt |-> 0, 81 | recognizableCnt |-> 0, 82 | trueAllocator |-> "NullStr" ] ] 83 | /\ maybeFree = {} 84 | /\ vatThingTrueState = ( <<"p0", "v0">> :> "truly_reachable" @@ 85 | <<"p0", "v1">> :> "truly_reachable" @@ 86 | <<"p0", "v2">> :> "truly_reachable" @@ 87 | <<"p1", "v0">> :> "truly_unknown" @@ 88 | <<"p1", "v1">> :> "truly_unknown" @@ 89 | <<"o0", "v0">> :> "truly_reachable" @@ 90 | <<"o0", "v1">> :> "truly_reachable" @@ 91 | <<"o2", "v1">> :> "truly_unknown" @@ 92 | <<"o2", "v2">> :> "truly_unknown" ) 93 | /\ validGcAction = [ nature |-> "NullStr", 94 | objId |-> "NullStr", 95 | targetVatId |-> "NullStr", 96 | groupedObjIds |-> {} ] 97 | /\ runQ = <<>> 98 | /\ clistExists = ( <<"p0", "v0">> :> TRUE @@ 99 | <<"p0", "v1">> :> TRUE @@ 100 | <<"p0", "v2">> :> TRUE @@ 101 | <<"p2", "v1">> :> FALSE @@ 102 | <<"p2", "v2">> :> FALSE @@ 103 | <<"o0", "v0">> :> TRUE @@ 104 | <<"o0", "v2">> :> TRUE @@ 105 | <<"o2", "v1">> :> FALSE @@ 106 | <<"o2", "v2">> :> FALSE ) 107 | -------------------------------------------------------------------------------- /tests/resource/TlcStateExpressionExample5.txt: -------------------------------------------------------------------------------- 1 | /\ outcome = [name |-> "Success"] 2 | /\ chains = [ cosmoshub |-> 3 | [ id |-> "cosmoshub", 4 | ports |-> {"transfer"}, 5 | channel |-> << >>, 6 | activeChannels |-> {}, 7 | bank |-> 8 | ( 0 :> [cosmoshub |-> 5] @@ 9 | 1 :> [cosmoshub |-> 0] @@ 10 | 2 :> [cosmoshub |-> 0] ), 11 | supply |-> [cosmoshub |-> 5], 12 | localPackets |-> 13 | [list |-> << >>, pending |-> {}, expired |-> {}, success |-> {}], 14 | remotePackets |-> << >>, 15 | ics20 |-> [portId |-> "transfer", channel |-> << >>, escrow |-> << >>], 16 | nextChannelId |-> 0, 17 | nextPacketId |-> 0, 18 | nextAccountId |-> 3 ], 19 | osmosis |-> 20 | [ id |-> "osmosis", 21 | ports |-> {"transfer"}, 22 | channel |-> << >>, 23 | activeChannels |-> {}, 24 | bank |-> 25 | ( 0 :> [osmosis |-> 5] @@ 26 | 1 :> [osmosis |-> 0] @@ 27 | 2 :> [osmosis |-> 0] ), 28 | supply |-> [osmosis |-> 5], 29 | localPackets |-> 30 | [list |-> << >>, pending |-> {}, expired |-> {}, success |-> {}], 31 | remotePackets |-> << >>, 32 | ics20 |-> [portId |-> "transfer", channel |-> << >>, escrow |-> << >>], 33 | nextChannelId |-> 0, 34 | nextPacketId |-> 0, 35 | nextAccountId |-> 3 ], 36 | ixo |-> 37 | [ id |-> "ixo", 38 | ports |-> {"transfer"}, 39 | channel |-> << >>, 40 | activeChannels |-> {}, 41 | bank |-> (0 :> [ixo |-> 5] @@ 1 :> [ixo |-> 0] @@ 2 :> [ixo |-> 0]), 42 | supply |-> [ixo |-> 5], 43 | localPackets |-> 44 | [list |-> << >>, pending |-> {}, expired |-> {}, success |-> {}], 45 | remotePackets |-> << >>, 46 | ics20 |-> [portId |-> "transfer", channel |-> << >>, escrow |-> << >>], 47 | nextChannelId |-> 0, 48 | nextPacketId |-> 0, 49 | nextAccountId |-> 3 ] ] 50 | /\ action = [name |-> "Null"] 51 | -------------------------------------------------------------------------------- /tests/resource/TlcStateExpressionExample6.txt: -------------------------------------------------------------------------------- 1 | /\ outcome_status = "" 2 | /\ num_execs = 0 3 | /\ action_taken = [grant_payload |-> [special_value |-> "", limit |-> 0, allow_list |-> {}, deny_list |-> {}, staking_type |-> ""], granter |-> "", authorization_type |-> "", action_type |-> "", grentee |-> "", exec_message |-> [message_type |-> "", amount |-> -1, validator |-> "", staking_action |-> ""]] 4 | /\ active_grants = << >> 5 | /\ num_grants = 0 6 | /\ expired_grants = {} 7 | -------------------------------------------------------------------------------- /tests/resource/TlcStateExpressionExample7.txt: -------------------------------------------------------------------------------- 1 | /\ steps = 3 2 | /\ blockTime = 2 3 | /\ lastValSet = {"v0"} 4 | /\ undelegationQ = { [ delegator |-> "d0", 5 | validator |-> "v1", 6 | completionTime |-> 2, 7 | balance |-> 5, 8 | creationHeight |-> 1 ] } 9 | /\ unbondingHeight = [v0 |-> 0, v1 |-> 1] 10 | /\ delegation = (<<"d0", "v0">> :> 0 @@ <<"d0", "v1">> :> 0) 11 | /\ blockHeight = 2 12 | /\ action = [ nature |-> "endBlock", 13 | timeDelta |-> 1, 14 | heightDelta |-> 1, 15 | delegator |-> "NullStr", 16 | validator |-> "NullStr", 17 | validatorSrc |-> "NullStr", 18 | validatorDst |-> "NullStr", 19 | amount |-> 0 ] 20 | /\ unbondingTime = [v0 |-> 0, v1 |-> 2] 21 | /\ jailed = [v0 |-> FALSE, v1 |-> FALSE] 22 | /\ redelegationQ = {} 23 | /\ tokens = [d0 |-> 1, v0 |-> 1, v1 |-> 0] 24 | /\ status = [v0 |-> "bonded", v1 |-> "unbonding"] 25 | /\ validatorQ = {"v1"} 26 | /\ success = TRUE 27 | -------------------------------------------------------------------------------- /tests/resource/TlcStateExpressionExample8.txt: -------------------------------------------------------------------------------- 1 | /\ sequence_indexed_map = (<<"one", "two">> :> 42) 2 | /\ one_indexed_sequential_map = <<42, 42, 42, 42, 42>> 3 | /\ string_indexed_map = [two |-> 42, one |-> 42] 4 | /\ negative_number = -123456 5 | /\ record = [foo |-> 42, bar |-> 43] 6 | /\ tuple = <<1, 2>> 7 | /\ bool = FALSE 8 | /\ map_indexed_map = ([foo |-> 42, bar |-> 42] :> 42) 9 | /\ set = {1, 2, 3} 10 | /\ list = <<1, "two">> 11 | /\ map = ( 0 :> 42 @@ 12 | 1 :> 42 @@ 13 | 2 :> 42 @@ 14 | 3 :> 42 @@ 15 | 4 :> 42 @@ 16 | 5 :> 42 @@ 17 | 6 :> "forty-two" @@ 18 | 8 :> "forty-two" @@ 19 | 13 :> "forty-two" ) 20 | /\ json_int = 123 21 | /\ string_literal = "hello" 22 | /\ zero_indexed_sequential_map = (0 :> 42 @@ 1 :> 42 @@ 2 :> 42 @@ 3 :> 42 @@ 4 :> 42 @@ 5 :> 42) 23 | /\ other_bool = TRUE 24 | -------------------------------------------------------------------------------- /tests/resource/TlcTraceAbsenceParse.txt: -------------------------------------------------------------------------------- 1 | TLC2 Version 2.16 of Day Month 20?? (rev: 7d936f2) 2 | Running breadth-first search Model-Checking with fp 84 and seed 3174664161629981490 with 8 workers on 8 cores with 10923MB heap and 64MB offheap memory [pid: 12388] (Mac OS X 11.4 aarch64, Homebrew 17.0.1 x86_64, MSBDiskFPSet, DiskStateQueue). 3 | Parsing file /Users/danwt/Documents/work/mbt-python/tests/resource/TlcMultipleTraceParse.tla 4 | Parsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-8383582653156030327/Naturals.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/Naturals.tla) 5 | Parsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-8383582653156030327/FiniteSets.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/FiniteSets.tla) 6 | Parsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-8383582653156030327/Sequences.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/Sequences.tla) 7 | Semantic processing of module Naturals 8 | Semantic processing of module Sequences 9 | Semantic processing of module FiniteSets 10 | Semantic processing of module TlcMultipleTraceParse 11 | Starting... (2022-01-21 12:38:44) 12 | Computing initial states... 13 | Finished computing initial states: 1 distinct state generated at 2022-01-21 12:38:45. 14 | Model checking completed. No error has been found. 15 | Estimates of the probability that TLC did not check all reachable states 16 | because two distinct states had the same fingerprint: 17 | calculated (optimistic): val = 4.9E-19 18 | 10 states generated, 9 distinct states found, 0 states left on queue. 19 | The depth of the complete state graph search is 9. 20 | The average outdegree of the complete state graph is 1 (minimum is 0, the maximum 1 and the 95th percentile is 1). 21 | Finished in 00s at (2022-01-21 12:38:45) 22 | -------------------------------------------------------------------------------- /tests/resource/TlcTraceParse.cfg: -------------------------------------------------------------------------------- 1 | INIT Init 2 | NEXT Next 3 | INVARIANT Inv 4 | -------------------------------------------------------------------------------- /tests/resource/TlcTraceParse.tla: -------------------------------------------------------------------------------- 1 | ------------ MODULE TlcTraceParse ------------- 2 | 3 | EXTENDS Naturals, FiniteSets, Sequences, TLC, Reals 4 | 5 | VARIABLES 6 | bool, 7 | other_bool, 8 | string_literal, 9 | json_int, 10 | list, 11 | record, 12 | tuple, 13 | set, 14 | map, 15 | zero_indexed_sequential_map, 16 | one_indexed_sequential_map, 17 | string_indexed_map, 18 | sequence_indexed_map, 19 | map_indexed_map, 20 | set_indexed_map, 21 | negative_number 22 | 23 | Init == 24 | /\ bool = FALSE 25 | /\ other_bool = TRUE 26 | /\ string_literal = "hello" 27 | /\ json_int = 123 28 | /\ list = <<1,"two">> 29 | /\ record = [ foo |-> 42, bar |-> 43] 30 | /\ tuple = <<1,2>> 31 | /\ set = {1,2,3} 32 | /\ map = [ x \in 0..5 |-> 42 ] @@ [ x \in {6, 8, 13} |-> "forty-two" ] 33 | /\ zero_indexed_sequential_map = [ x \in 0..5 |-> 42 ] 34 | /\ one_indexed_sequential_map = [ x \in 1..5 |-> 42 ] 35 | /\ string_indexed_map = [ x \in {"one", "two"} |-> 42 ] 36 | /\ sequence_indexed_map = [ x \in {<<"one", "two">>} |-> 42 ] 37 | /\ map_indexed_map = [ x \in {[foo |-> 42, bar |-> 42]} |-> 42 ] 38 | /\ set_indexed_map = [ x \in {{1,2,3},{4,5,6}} |-> 42 ] 39 | /\ negative_number = -123456 40 | 41 | Next == 42 | /\ bool' = TRUE 43 | /\ UNCHANGED other_bool 44 | /\ UNCHANGED string_literal 45 | /\ UNCHANGED json_int 46 | /\ UNCHANGED list 47 | /\ UNCHANGED record 48 | /\ UNCHANGED tuple 49 | /\ UNCHANGED set 50 | /\ UNCHANGED map 51 | /\ UNCHANGED zero_indexed_sequential_map 52 | /\ UNCHANGED one_indexed_sequential_map 53 | /\ UNCHANGED string_indexed_map 54 | /\ UNCHANGED sequence_indexed_map 55 | /\ UNCHANGED map_indexed_map 56 | /\ UNCHANGED set_indexed_map 57 | /\ UNCHANGED negative_number 58 | 59 | Inv == bool = FALSE 60 | 61 | =========================================== 62 | -------------------------------------------------------------------------------- /tests/resource/TlcTraceParse.txt: -------------------------------------------------------------------------------- 1 | TLC2 Version 2.16 of Day Month 20?? (rev: 7d936f2) 2 | Running breadth-first search Model-Checking with fp 67 and seed 5767608612270942875 with 8 workers on 8 cores with 10923MB heap and 64MB offheap memory [pid: 50075] (Mac OS X 11.4 aarch64, Homebrew 17.0.2 x86_64, MSBDiskFPSet, DiskStateQueue). 3 | Parsing file /Users/danwt/Documents/work/mbt-python/tests/resource/TlcTraceParse.tla 4 | Parsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/Naturals.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/Naturals.tla) 5 | Parsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/FiniteSets.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/FiniteSets.tla) 6 | Parsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/Sequences.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/Sequences.tla) 7 | Parsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/TLC.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/TLC.tla) 8 | Parsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/Reals.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/Reals.tla) 9 | Parsing file /private/var/folders/9l/fjtclx4d6txfdsj2ynqnrr5c0000gn/T/tlc-16867280054817799297/Integers.tla (jar:file:/Users/danwt/Documents/model-checkers/tla2tools.jar!/tla2sany/StandardModules/Integers.tla) 10 | Semantic processing of module Naturals 11 | Semantic processing of module Sequences 12 | Semantic processing of module FiniteSets 13 | Semantic processing of module TLC 14 | Semantic processing of module Integers 15 | Semantic processing of module Reals 16 | Semantic processing of module TlcTraceParse 17 | Starting... (2022-02-09 08:59:37) 18 | Computing initial states... 19 | Finished computing initial states: 1 distinct state generated at 2022-02-09 08:59:37. 20 | Error: Invariant Inv is violated. 21 | Error: The behavior up to this point is: 22 | State 1: 23 | /\ sequence_indexed_map = (<<"one", "two">> :> 42) 24 | /\ one_indexed_sequential_map = <<42, 42, 42, 42, 42>> 25 | /\ string_indexed_map = [two |-> 42, one |-> 42] 26 | /\ negative_number = -123456 27 | /\ record = [foo |-> 42, bar |-> 43] 28 | /\ tuple = <<1, 2>> 29 | /\ bool = FALSE 30 | /\ map_indexed_map = ([foo |-> 42, bar |-> 42] :> 42) 31 | /\ set_indexed_map = ({1, 2, 3} :> 42 @@ {4, 5, 6} :> 42) 32 | /\ set = {1, 2, 3} 33 | /\ list = <<1, "two">> 34 | /\ map = ( 0 :> 42 @@ 35 | 1 :> 42 @@ 36 | 2 :> 42 @@ 37 | 3 :> 42 @@ 38 | 4 :> 42 @@ 39 | 5 :> 42 @@ 40 | 6 :> "forty-two" @@ 41 | 8 :> "forty-two" @@ 42 | 13 :> "forty-two" ) 43 | /\ json_int = 123 44 | /\ string_literal = "hello" 45 | /\ zero_indexed_sequential_map = (0 :> 42 @@ 1 :> 42 @@ 2 :> 42 @@ 3 :> 42 @@ 4 :> 42 @@ 5 :> 42) 46 | /\ other_bool = TRUE 47 | 48 | State 2: 49 | /\ sequence_indexed_map = (<<"one", "two">> :> 42) 50 | /\ one_indexed_sequential_map = <<42, 42, 42, 42, 42>> 51 | /\ string_indexed_map = [two |-> 42, one |-> 42] 52 | /\ negative_number = -123456 53 | /\ record = [foo |-> 42, bar |-> 43] 54 | /\ tuple = <<1, 2>> 55 | /\ bool = TRUE 56 | /\ map_indexed_map = ([foo |-> 42, bar |-> 42] :> 42) 57 | /\ set_indexed_map = ({1, 2, 3} :> 42 @@ {4, 5, 6} :> 42) 58 | /\ set = {1, 2, 3} 59 | /\ list = <<1, "two">> 60 | /\ map = ( 0 :> 42 @@ 61 | 1 :> 42 @@ 62 | 2 :> 42 @@ 63 | 3 :> 42 @@ 64 | 4 :> 42 @@ 65 | 5 :> 42 @@ 66 | 6 :> "forty-two" @@ 67 | 8 :> "forty-two" @@ 68 | 13 :> "forty-two" ) 69 | /\ json_int = 123 70 | /\ string_literal = "hello" 71 | /\ zero_indexed_sequential_map = (0 :> 42 @@ 1 :> 42 @@ 2 :> 42 @@ 3 :> 42 @@ 4 :> 42 @@ 5 :> 42) 72 | /\ other_bool = TRUE 73 | 74 | 2 states generated, 2 distinct states found, 0 states left on queue. 75 | The depth of the complete state graph search is 2. 76 | Finished in 00s at (2022-02-09 08:59:37) 77 | Trace exploration spec path: ./TlcTraceParse_TTrace_1644397177.tla 78 | -------------------------------------------------------------------------------- /tests/resource/TlcTraceParseInitState.txt: -------------------------------------------------------------------------------- 1 | TLC2 Version 2.15 of Day Month 20?? (rev: 920e6fa) 2 | Running breadth-first search Model-Checking with fp 6 and seed 3761596547624107330 with 8 workers on 8 cores with 7282MB heap and 64MB offheap memory (Mac OS X 12.0.1 x86_64, Oracle Corporation 1.8.0_333 x86_64, MSBDiskFPSet, DiskStateQueue). 3 | Parsing file /private/var/folders/ws/8cq6ncrs0h91f1tx4pb7tzx80000gn/T/modelator-py-tlc-temp-dir-5ks5zoff/Hello.tla 4 | Parsing file /private/var/folders/ws/8cq6ncrs0h91f1tx4pb7tzx80000gn/T/Naturals.tla (jar:file:/Users/ivan/Documents/codebase/modelator/jars/tla2tools-v1.8.0.jar!/tla2sany/StandardModules/Naturals.tla) 5 | Parsing file /private/var/folders/ws/8cq6ncrs0h91f1tx4pb7tzx80000gn/T/FiniteSets.tla (jar:file:/Users/ivan/Documents/codebase/modelator/jars/tla2tools-v1.8.0.jar!/tla2sany/StandardModules/FiniteSets.tla) 6 | Parsing file /private/var/folders/ws/8cq6ncrs0h91f1tx4pb7tzx80000gn/T/Sequences.tla (jar:file:/Users/ivan/Documents/codebase/modelator/jars/tla2tools-v1.8.0.jar!/tla2sany/StandardModules/Sequences.tla) 7 | Parsing file /private/var/folders/ws/8cq6ncrs0h91f1tx4pb7tzx80000gn/T/modelator-py-tlc-temp-dir-5ks5zoff/HelloInv.tla 8 | Semantic processing of module Naturals 9 | Semantic processing of module Sequences 10 | Semantic processing of module FiniteSets 11 | Semantic processing of module HelloInv 12 | Semantic processing of module Hello 13 | Semantic errors: 14 | 15 | *** Warnings: 2 16 | 17 | line 7, col 5 to line 7, col 5 of module Hello 18 | 19 | Multiple declarations or definitions for symbol x. 20 | This duplicates the one at line 4, col 5 to line 4, col 5 of module HelloInv. 21 | 22 | 23 | line 9, col 5 to line 9, col 5 of module Hello 24 | 25 | Multiple declarations or definitions for symbol y. 26 | This duplicates the one at line 6, col 5 to line 6, col 5 of module HelloInv. 27 | 28 | 29 | 30 | Starting... (2022-05-18 11:34:17) 31 | Computing initial states... 32 | Error: Invariant Inv2 is violated by the initial state: 33 | /\ x = "hello" 34 | /\ y = 22 35 | 36 | Finished in 01s at (2022-05-18 11:34:17) 37 | -------------------------------------------------------------------------------- /tests/resource/TlcTraceParseInitStateContinue.txt: -------------------------------------------------------------------------------- 1 | TLC2 Version 2.16 of 31 December 2020 (rev: cdddf55) 2 | Running breadth-first search Model-Checking with fp 7 and seed 7495137621034827281 with 8 workers on 8 cores with 7282MB heap and 64MB offheap memory (Mac OS X 12.4 x86_64, Oracle Corporation 1.8.0_333 x86_64, MSBDiskFPSet, DiskStateQueue). 3 | Parsing file /Users/ivan/Documents/codebase/modelator/python/modelator/samples/Hello.tla 4 | Parsing file /private/var/folders/ws/8cq6ncrs0h91f1tx4pb7tzx80000gn/T/Naturals.tla 5 | Parsing file /private/var/folders/ws/8cq6ncrs0h91f1tx4pb7tzx80000gn/T/FiniteSets.tla 6 | Parsing file /private/var/folders/ws/8cq6ncrs0h91f1tx4pb7tzx80000gn/T/Sequences.tla 7 | Parsing file /Users/ivan/Documents/codebase/modelator/python/modelator/samples/HelloInv.tla 8 | Semantic processing of module Naturals 9 | Semantic processing of module Sequences 10 | Semantic processing of module FiniteSets 11 | Semantic processing of module HelloInv 12 | Semantic processing of module Hello 13 | Semantic errors: 14 | 15 | *** Warnings: 2 16 | 17 | line 7, col 5 to line 7, col 5 of module Hello 18 | 19 | Multiple declarations or definitions for symbol x. 20 | This duplicates the one at line 4, col 5 to line 4, col 5 of module HelloInv. 21 | 22 | 23 | line 9, col 5 to line 9, col 5 of module Hello 24 | 25 | Multiple declarations or definitions for symbol y. 26 | This duplicates the one at line 6, col 5 to line 6, col 5 of module HelloInv. 27 | 28 | 29 | 30 | Starting... (2022-05-23 12:51:55) 31 | Computing initial states... 32 | Error: Invariant Inv2 is violated by the initial state: 33 | /\ x = "hello" 34 | /\ y = 22 35 | 36 | Finished computing initial states: 1 distinct state generated at 2022-05-23 12:52:00. 37 | Error: Invariant Inv2 is violated. 38 | Error: The behavior up to this point is: 39 | State 1: 40 | /\ x = "hello" 41 | /\ y = 22 42 | 43 | State 2: 44 | /\ x = "world" 45 | /\ y = 20 46 | 47 | Error: Invariant Inv2 is violated. 48 | Error: The behavior up to this point is: 49 | State 1: 50 | /\ x = "hello" 51 | /\ y = 22 52 | 53 | State 2: 54 | /\ x = "world" 55 | /\ y = 20 56 | 57 | State 3: 58 | /\ x = "hello" 59 | /\ y = 18 60 | -------------------------------------------------------------------------------- /tests/tlc/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/informalsystems/modelator-py/98c6f3356b39e653ade2624a07540c174e97af9f/tests/tlc/__init__.py -------------------------------------------------------------------------------- /tests/tlc/test_tlc.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | import unittest.mock 5 | from contextlib import redirect_stdout 6 | from io import StringIO 7 | 8 | import pytest 9 | 10 | from modelator_py.tlc.cli import Tlc 11 | from modelator_py.tlc.raw import RawCmd, TlcArgs, stringify_raw_cmd 12 | 13 | from ..helper import get_resource_dir, get_tlc_path 14 | 15 | LOG = logging.getLogger(__name__) 16 | 17 | 18 | def test_stringify_raw_cmd(): 19 | """ 20 | Use for debugging - ensure that the shell command generated is sensible. 21 | """ 22 | cmd = RawCmd() 23 | cmd.jar = get_tlc_path() 24 | args = TlcArgs() 25 | args.cleanup = True 26 | args.workers = "auto" 27 | args.config = "HelloWorld.cfg" 28 | args.file = "HelloWorld.tla" 29 | cmd.args = args 30 | cmd_str = stringify_raw_cmd(cmd) 31 | assert cmd_str.endswith( 32 | "tlc2.TLC -cleanup -config HelloWorld.cfg -workers auto HelloWorld.tla" 33 | ) 34 | 35 | 36 | def test_pure_with_json(): 37 | def get_files(): 38 | 39 | fns = [ 40 | "HelloWorld.cfg", 41 | "HelloWorld.tla", 42 | ] 43 | 44 | ret = {} 45 | 46 | for fn in fns: 47 | full_fn = os.path.join(get_resource_dir(), fn) 48 | with open(full_fn, "r") as fd: 49 | ret[fn] = fd.read() # type: ignore 50 | 51 | return ret 52 | 53 | data = { 54 | "jar": get_tlc_path(), 55 | "args": { 56 | "workers": "auto", 57 | "config": "HelloWorld.cfg", 58 | "file": "HelloWorld.tla", 59 | }, 60 | "files": get_files(), 61 | } 62 | 63 | stdin = unittest.mock.Mock() 64 | stdin.read = lambda: json.dumps(data) 65 | 66 | app = Tlc(stdin) 67 | s = StringIO() 68 | with redirect_stdout(s): 69 | app.pure() 70 | # Check that TLC finishes 71 | json_obj = json.loads(s.getvalue()) 72 | assert "Finished in" in json_obj["stdout"] 73 | 74 | 75 | @pytest.mark.skip( 76 | reason="The 'tlc raw' command has side effects. E.g. polluting the filesystem" 77 | ) 78 | def test_raw_with_json(): 79 | """ 80 | Use for debugging - using the raw interface is not idempotent 81 | 82 | This is a convenient debugging test, and you could write your own assertions. 83 | """ 84 | 85 | data = { 86 | "jar": get_tlc_path(), 87 | "cwd": get_resource_dir(), 88 | "args": { 89 | "cleanup": False, 90 | "workers": "auto", 91 | "config": "HelloWorld.cfg", 92 | "file": "HelloWorld.tla", 93 | }, 94 | } 95 | 96 | stdin = unittest.mock.Mock() 97 | stdin.read = lambda: json.dumps(data) 98 | app = Tlc(stdin) 99 | app.raw(json=True) 100 | -------------------------------------------------------------------------------- /tests/util/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/informalsystems/modelator-py/98c6f3356b39e653ade2624a07540c174e97af9f/tests/util/__init__.py -------------------------------------------------------------------------------- /tests/util/tla/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016-2020 by California Institute of Technology 2 | Copyright (c) 2008-2013 INRIA and Microsoft Corporation 3 | Copyright (c) 2016-2020 by Ioannis Filippidis 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions 8 | are met: 9 | 10 | 1. Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | 13 | 2. Redistributions in binary form must reproduce the above copyright 14 | notice, this list of conditions and the following disclaimer in the 15 | documentation and/or other materials provided with the distribution. 16 | 17 | 3. Neither the name of the California Institute of Technology nor 18 | the names of its contributors may be used to endorse or promote 19 | products derived from this software without specific prior 20 | written permission. 21 | 22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 | -------------------------------------------------------------------------------- /tests/util/tla/README.md: -------------------------------------------------------------------------------- 1 | This directory contains test scripts for the Python package `tla`. 2 | 3 | To run them using the package [`pytest`](https://pypi.org/project/pytest/): 4 | 5 | ```shell 6 | pytest -v --continue-on-collection-errors . 7 | ``` 8 | -------------------------------------------------------------------------------- /tests/util/tla/README_license.md: -------------------------------------------------------------------------------- 1 | This directory contains code from [tla_python](https://github.com/tlaplus/tla_python) and is licensed as such. 2 | -------------------------------------------------------------------------------- /tests/util/tla/lexer_test.py: -------------------------------------------------------------------------------- 1 | """Tests of module `tla.lex`.""" 2 | import pprint 3 | 4 | import modelator_py.util.tla.lex as lex 5 | 6 | MODULE_FOO = r""" 7 | comments 8 | ---- MODULE Foo ---- 9 | VARIABLE x 10 | 11 | \* a one-line comment 12 | \* a nested \* one-line comment 13 | 14 | (* This is a multi-line comment. *) 15 | (* This is a multi-line 16 | comment. * ( 17 | *) 18 | (* A nested 19 | (* multi-line *) 20 | comment. *) 21 | a == 1.0 22 | b == a + 2 23 | 24 | A == x' = x + 1 25 | 26 | P == SF_x(A) 27 | 28 | THEOREM Thm == 29 | ASSUME x = 1 30 | PROVE x + 1 = 2 31 | PROOF 32 | <1>1. x = 1 33 | OBVIOUS 34 | <1>2. x + 1 = 1 + 1 35 | BY <1>1 36 | <1>3. 1 + 1 = 2 37 | OBVIOUS 38 | <1> QED 39 | BY <1>2, <1>3 40 | ==================== 41 | """ 42 | 43 | 44 | def test_lexer(): 45 | """Test lexing and conversions between tokens.""" 46 | data = MODULE_FOO 47 | data = lex._omit_preamble(data) 48 | lextokens = lex._lex(data) 49 | # Token_ instances 50 | tokens_ = [lex._map_to_token_(token) for token in lextokens] 51 | pprint.pprint(tokens_) 52 | for token_ in tokens_: 53 | print(token_) 54 | print(str(token_)) 55 | str_of_tokens_ = [str(token_) for token_ in tokens_] 56 | pprint.pprint(str_of_tokens_) 57 | # Token instances 58 | tokens = [lex._map_to_token(data, token) for token in lextokens] 59 | pprint.pprint(tokens) 60 | # join raw strings 61 | print("".join(str_of_tokens_)) 62 | # join with newlines in between 63 | s = lex._join_with_newlines(tokens) 64 | print(s) 65 | # check location information 66 | for token in tokens: 67 | print(token.loc) 68 | 69 | 70 | if __name__ == "__main__": 71 | test_lexer() 72 | -------------------------------------------------------------------------------- /tests/util/tla/tlaps_lib_test.py: -------------------------------------------------------------------------------- 1 | """Parse the files in the TLAPS library. 2 | 3 | This module requires an installation of TLAPS. 4 | """ 5 | import os 6 | 7 | from modelator_py.util.tla import parser, to_str 8 | 9 | # change this variable to a path where 10 | # the TLAPS library is present 11 | TLAPS_LIB_PATH = "$HOME/lib/tlaps" 12 | 13 | 14 | def parse_tlaps_modules(): 15 | module_paths = _collect_tlaps_module_files() 16 | for module_path in module_paths: 17 | print(f"parsing module `{module_path}`") 18 | text = _read_file(module_path) 19 | _parse_and_format(text) 20 | 21 | 22 | def _collect_tlaps_module_files(): 23 | path = TLAPS_LIB_PATH 24 | path = os.path.expandvars(path) 25 | tlafiles = list() 26 | with os.scandir(path) as it: 27 | for entry in it: 28 | if entry.is_file() and entry.name.endswith(".tla"): 29 | tlafiles.append(entry) 30 | return [entry.path for entry in tlafiles] 31 | 32 | 33 | def _read_file(path): 34 | with open(path, "r") as f: 35 | text = f.read() 36 | return text 37 | 38 | 39 | def _parse_and_format(text): 40 | """Return parse tree from `tla.parser.parse`.""" 41 | r = parser.parse(text, nodes=to_str.Nodes) 42 | assert r is not None 43 | text = r.to_str() 44 | to_str._print_overwide_lines(text, to_str.LINE_WIDTH) 45 | return r 46 | 47 | 48 | if __name__ == "__main__": 49 | parse_tlaps_modules() 50 | -------------------------------------------------------------------------------- /tests/util/tlc/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/informalsystems/modelator-py/98c6f3356b39e653ade2624a07540c174e97af9f/tests/util/tlc/__init__.py -------------------------------------------------------------------------------- /tests/util/tlc/state_to_informal_trace_format_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from modelator_py.util.tla import parser, to_str 4 | from modelator_py.util.tlc.state_to_informal_trace_format import ( 5 | state_to_informal_trace_format_state, 6 | ) 7 | 8 | from ...helper import get_resource_dir 9 | 10 | 11 | def test_create_ast_from_tlc_state_expressions(): 12 | """ 13 | Test the tla_python library capabilities to parse TLA+ snippets 14 | included in counterexamples generated by TLC. 15 | """ 16 | 17 | fns = [ 18 | "TlcStateExpressionExample0.txt", 19 | "TlcStateExpressionExample1.txt", 20 | "TlcStateExpressionExample2.txt", 21 | "TlcStateExpressionExample3.txt", 22 | "TlcStateExpressionExample4.txt", 23 | "TlcStateExpressionExample5.txt", 24 | "TlcStateExpressionExample6.txt", 25 | "TlcStateExpressionExample7.txt", 26 | ] 27 | 28 | expressions = [] 29 | 30 | for fn in fns: 31 | path = os.path.join(get_resource_dir(), fn) 32 | with open(path, "r") as fd: 33 | content = fd.read() 34 | expressions.append(content) 35 | 36 | for expr in expressions: 37 | tree = parser.parse_expr(expr, nodes=to_str.Nodes) 38 | assert tree is not None 39 | 40 | 41 | def test_tla_state_expression_to_informal_trace_format_state(): 42 | 43 | """ 44 | json in the Informal Trace Format contains a state field mapping to a 45 | list of states. 46 | 47 | https://apalache.informal.systems/docs/adr/015adr-trace.html?highlight=trace%20format#the-itf-format 48 | 49 | Test the translation of a single state. 50 | """ 51 | 52 | fns = [ 53 | "TlcStateExpressionExample0.txt", 54 | "TlcStateExpressionExample1.txt", 55 | "TlcStateExpressionExample2.txt", 56 | "TlcStateExpressionExample3.txt", 57 | "TlcStateExpressionExample4.txt", 58 | "TlcStateExpressionExample5.txt", 59 | "TlcStateExpressionExample6.txt", 60 | "TlcStateExpressionExample7.txt", 61 | "TlcStateExpressionExample8.txt", 62 | ] 63 | 64 | expressions = [] 65 | 66 | for fn in fns: 67 | path = os.path.join(get_resource_dir(), fn) 68 | with open(path, "r") as fd: 69 | content = fd.read() 70 | expressions.append(content) 71 | 72 | for s in expressions: 73 | res = state_to_informal_trace_format_state(s) 74 | assert res is not None 75 | -------------------------------------------------------------------------------- /tests/util/tlc/stdout_to_informal_trace_format_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from modelator_py.util.informal_trace_format import with_lists, with_records 4 | from modelator_py.util.tlc.itf import TlcITFCmd, tlc_itf 5 | from modelator_py.util.tlc.stdout_to_informal_trace_format import ( 6 | extract_traces, 7 | tlc_trace_to_informal_trace_format_trace, 8 | ) 9 | 10 | from ...helper import get_resource_dir 11 | 12 | 13 | def test_extract_no_trace_from_tlc(): 14 | fn = "TlcTraceAbsenceParse.txt" 15 | fn = os.path.join(get_resource_dir(), fn) 16 | content = None 17 | with open(fn, "r") as fd: 18 | content = fd.read() 19 | 20 | tlc_traces = extract_traces(content) 21 | assert len(tlc_traces) == 0 22 | 23 | 24 | def _extract_trace_from_tlc(fn): 25 | fn = os.path.join(get_resource_dir(), fn) 26 | content = None 27 | with open(fn, "r") as fd: 28 | content = fd.read() 29 | 30 | tlc_traces = extract_traces(content) 31 | return tlc_traces 32 | 33 | 34 | def test_extract_trace_from_tlc(): 35 | fn = "TlcTraceParse.txt" 36 | tlc_traces = _extract_trace_from_tlc(fn) 37 | assert len(tlc_traces) == 1 38 | 39 | 40 | def test_extract_trace_initState_from_tlc(): 41 | fn = "TlcTraceParseInitState.txt" 42 | tlc_traces = _extract_trace_from_tlc(fn) 43 | assert len(tlc_traces) == 1 44 | 45 | 46 | def test_extract_trace_initStateContinue_from_tlc(): 47 | fn = "TlcTraceParseInitStateContinue.txt" 48 | tlc_traces = _extract_trace_from_tlc(fn) 49 | assert len(tlc_traces) == 3 50 | 51 | 52 | def test_extract_trace_from_tlc_simulation_mode(): 53 | fn = "TlcTraceParseSimulationMode.txt" 54 | fn = os.path.join(get_resource_dir(), fn) 55 | content = None 56 | with open(fn, "r") as fd: 57 | content = fd.read() 58 | 59 | tlc_traces = extract_traces(content) 60 | assert len(tlc_traces) == 1 61 | 62 | 63 | def test_extract_multiple_traces_from_tlc_simulation_mode(): 64 | fn = "TlcMultipleTraceParseSimulationMode.txt" 65 | fn = os.path.join(get_resource_dir(), fn) 66 | content = None 67 | with open(fn, "r") as fd: 68 | content = fd.read() 69 | 70 | tlc_traces = extract_traces(content) 71 | assert len(tlc_traces) == 51 72 | 73 | 74 | def test_extract_multiple_traces_from_tlc(): 75 | 76 | fn = "TlcMultipleTraceParse.txt" 77 | fn = os.path.join(get_resource_dir(), fn) 78 | content = None 79 | with open(fn, "r") as fd: 80 | content = fd.read() 81 | 82 | traces = extract_traces(content) 83 | 84 | assert len(traces) == 4 85 | 86 | 87 | def test_extract_multiple_traces_from_tlc_cutoff(): 88 | 89 | # Some number of lines from stdout have been removed. 90 | fns = [ 91 | "TlcMultipleTraceParseCutoff0.txt", 92 | "TlcMultipleTraceParseCutoff1.txt", 93 | ] 94 | 95 | contents = [] 96 | 97 | for fn in fns: 98 | path = os.path.join(get_resource_dir(), fn) 99 | with open(path, "r") as fd: 100 | content = fd.read() 101 | contents.append(content) 102 | 103 | traces = [extract_traces(content) for content in contents] 104 | assert all(len(r) == 3 for r in traces) 105 | 106 | 107 | def test_extract_informal_trace_format_trace_from_tlc_stress_example(): 108 | fn = "TlcTraceParse.txt" 109 | fn = os.path.join(get_resource_dir(), fn) 110 | content = None 111 | with open(fn, "r") as fd: 112 | content = fd.read() 113 | 114 | tlc_traces = extract_traces(content) 115 | assert len(tlc_traces) == 1 116 | tlc_trace = tlc_traces[0] 117 | tlc_trace_to_informal_trace_format_trace(tlc_trace) 118 | 119 | 120 | def test_extract_informal_trace_format_trace_from_tlc_stress_example_include_lists(): 121 | fn = "TlcTraceParse.txt" 122 | fn = os.path.join(get_resource_dir(), fn) 123 | content = None 124 | with open(fn, "r") as fd: 125 | content = fd.read() 126 | 127 | tlc_traces = extract_traces(content) 128 | assert len(tlc_traces) == 1 129 | tlc_trace = tlc_traces[0] 130 | itf_trace = tlc_trace_to_informal_trace_format_trace(tlc_trace) 131 | itf_trace = with_lists(itf_trace) 132 | 133 | 134 | def test_extract_informal_trace_format_trace_from_tlc_stress_example_include_records(): 135 | fn = "TlcTraceParse.txt" 136 | fn = os.path.join(get_resource_dir(), fn) 137 | content = None 138 | with open(fn, "r") as fd: 139 | content = fd.read() 140 | 141 | tlc_traces = extract_traces(content) 142 | assert len(tlc_traces) == 1 143 | tlc_trace = tlc_traces[0] 144 | itf_trace = tlc_trace_to_informal_trace_format_trace(tlc_trace) 145 | itf_trace = with_records(itf_trace) 146 | 147 | 148 | def test_extract_informal_trace_format_trace_from_tlc_stress_example_include_lists_and_records(): 149 | fn = "TlcTraceParse.txt" 150 | fn = os.path.join(get_resource_dir(), fn) 151 | content = None 152 | with open(fn, "r") as fd: 153 | content = fd.read() 154 | 155 | tlc_traces = extract_traces(content) 156 | assert len(tlc_traces) == 1 157 | tlc_trace = tlc_traces[0] 158 | itf_trace = tlc_trace_to_informal_trace_format_trace(tlc_trace) 159 | itf_trace = with_records(itf_trace) 160 | itf_trace = with_lists(itf_trace) 161 | 162 | 163 | def test_extract_informal_trace_format_traces_from_tlc_simple_example(): 164 | 165 | fn = "TlcMultipleTraceParse.txt" 166 | fn = os.path.join(get_resource_dir(), fn) 167 | content = None 168 | with open(fn, "r") as fd: 169 | content = fd.read() 170 | 171 | tlc_traces = extract_traces(content) 172 | itf_traces = [ 173 | tlc_trace_to_informal_trace_format_trace(trace) for trace in tlc_traces 174 | ] 175 | assert not any(e is None for e in itf_traces) 176 | 177 | 178 | def test_extract_informal_trace_format_traces_from_tlc_real_world_example(): 179 | 180 | fn = "TlcMultipleTraceParse_RealWorld0.txt" 181 | fn = os.path.join(get_resource_dir(), fn) 182 | content = None 183 | with open(fn, "r") as fd: 184 | content = fd.read() 185 | 186 | cmd = TlcITFCmd() 187 | cmd.stdout = content 188 | cmd.lists = True 189 | cmd.records = True 190 | tlc_itf(cmd=cmd) 191 | --------------------------------------------------------------------------------