├── .codecov.yml
├── .copier-answers.yml
├── .github
├── utils
│ └── release_tag_msg.txt
└── workflows
│ ├── release.yml
│ └── testing.yml
├── .gitignore
├── .pre-commit-config.yaml
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── INSTALL.md
├── LICENSE
├── README.md
├── doc
├── Makefile
├── img
│ ├── qtoolkit_icon.svg
│ ├── qtoolkit_logo.svg
│ └── qtoolkit_logo_horizontal.svg
└── source
│ ├── _static
│ ├── index-images
│ │ ├── api.svg
│ │ ├── contributor.svg
│ │ ├── getting_started.svg
│ │ ├── image_licences.txt
│ │ └── user_guide.svg
│ └── qtoolkit.css
│ ├── api
│ └── index.rst
│ ├── conf.py
│ ├── dev
│ └── .gitkeep
│ ├── glossary.rst
│ ├── index.rst
│ ├── license.rst
│ └── user
│ ├── index.rst
│ ├── quickstart.rst
│ └── whatisqtoolkit.rst
├── doc_requirements.txt
├── pyproject.toml
├── src
└── qtoolkit
│ ├── __init__.py
│ ├── _version.py
│ ├── core
│ ├── __init__.py
│ ├── base.py
│ ├── data_objects.py
│ └── exceptions.py
│ ├── host
│ ├── __init__.py
│ ├── base.py
│ ├── local.py
│ └── remote.py
│ ├── io
│ ├── __init__.py
│ ├── base.py
│ ├── pbs.py
│ ├── pbs_base.py
│ ├── sge.py
│ ├── shell.py
│ └── slurm.py
│ ├── manager.py
│ ├── py.typed
│ └── utils.py
└── tests
├── __init__.py
├── conftest.py
├── core
├── __init__.py
├── test_base.py
└── test_data_objects.py
├── io
├── __init__.py
├── test_base.py
├── test_pbs.py
├── test_sge.py
├── test_shell.py
└── test_slurm.py
├── test_data
└── io
│ ├── pbs
│ ├── create_parse_cancel_output_inout.py
│ ├── create_parse_job_output_inout.py
│ ├── create_parse_submit_output_inout.py
│ ├── parse_cancel_output_inout.yaml
│ ├── parse_job_output_inout.yaml
│ └── parse_submit_output_inout.yaml
│ ├── sge
│ ├── create_parse_cancel_output_inout.py
│ ├── create_parse_job_output_inout.py
│ ├── create_parse_submit_output_inout.py
│ ├── parse_cancel_output_inout.yaml
│ ├── parse_job_output_inout.yaml
│ └── parse_submit_output_inout.yaml
│ └── slurm
│ ├── create_parse_cancel_output_inout.py
│ ├── create_parse_job_output_inout.py
│ ├── create_parse_submit_output_inout.py
│ ├── parse_cancel_output_inout.yaml
│ ├── parse_job_output_inout.yaml
│ └── parse_submit_output_inout.yaml
├── test_qtoolkit.py
└── test_utils.py
/.codecov.yml:
--------------------------------------------------------------------------------
1 | comment:
2 | layout: "diff, files"
3 | behavior: default
4 | require_changes: false
5 | require_base: no
6 | require_head: yes
7 | coverage:
8 | status:
9 | project: off
10 | patch:
11 | default:
12 | only_pulls: true
13 |
--------------------------------------------------------------------------------
/.copier-answers.yml:
--------------------------------------------------------------------------------
1 | # Changes here will be overwritten by Copier; NEVER EDIT MANUALLY
2 | _commit: d0e6447
3 | _src_path: git@github.com:Matgenix/Matgenix-copier-template
4 | author_email: david.waroquiers@matgenix.com
5 | author_fullname: David Waroquiers
6 | author_username: davidwaroquiers
7 | conduct_email: conduct@matgenix.com
8 | copyright_date: '2023'
9 | copyright_holder: Matgenix SRL
10 | copyright_holder_email: software@matgenix.com
11 | package_name: qtoolkit
12 | project_long_name: Queue Tool Kit
13 | project_name: QToolKit
14 | repository_namespace: matgenix
15 | repository_provider: https://github.com
16 | short_description: QToolKit is a python wrapper interfacing with job queues (e.g.
17 | PBS, SLURM, ...).
18 |
--------------------------------------------------------------------------------
/.github/utils/release_tag_msg.txt:
--------------------------------------------------------------------------------
1 | TAG_NAME
2 |
3 | The full release changelog can be seen in the [online docs](https://matgenix.github.io/qtoolkit/changelog) and in the [repository source file](https://github.com/matgenix/qtoolkit/blob/TAG_NAME/CHANGELOG.md).
4 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Publish and Deploy
2 |
3 | on:
4 | release:
5 | types:
6 | - published
7 |
8 | env:
9 | PUBLISH_UPDATE_BRANCH: develop
10 | GIT_USER_NAME: Matgenix
11 | GIT_USER_EMAIL: "dev@matgenix.com"
12 |
13 | jobs:
14 |
15 | publish:
16 | name: Publish package
17 | runs-on: ubuntu-latest
18 | if: github.repository == 'matgenix/qtoolkit' && startsWith(github.ref, 'refs/tags/v')
19 |
20 | steps:
21 | - name: Checkout repository
22 | uses: actions/checkout@v4
23 | with:
24 | submodules: true
25 | fetch-depth: 0
26 |
27 | - name: Set up Python 3.11
28 | uses: actions/setup-python@v4
29 | with:
30 | python-version: '3.11'
31 |
32 | - name: Install Python dependencies
33 | run: |
34 | python -m pip install -U pip
35 | pip install -U setuptools wheel
36 | pip install -e .[all]
37 |
38 | - name: Update changelog
39 | uses: CharMixer/auto-changelog-action@v1
40 | with:
41 | token: ${{ secrets.RELEASE_PAT_BOT }}
42 | release_branch: ${{ env.PUBLISH_UPDATE_BRANCH }}
43 | exclude_labels: "duplicate,question,invalid,wontfix,dependency_updates,skip_changelog"
44 |
45 | - name: Configure git and commit CHANGELOG
46 | run: |
47 | git config --global user.email "${GIT_USER_EMAIL}"
48 | git config --global user.name "${GIT_USER_NAME}"
49 | git commit CHANGELOG.md -m "Release ${GITHUB_REF#refs/tags/}"
50 |
51 | - name: Move tag to new HEAD
52 | run: |
53 | TAG_MSG=.github/utils/release_tag_msg.txt
54 | sed -i "s|TAG_NAME|${GITHUB_REF#refs/tags/}|" "${TAG_MSG}"
55 | git tag -af -F "${TAG_MSG}" ${GITHUB_REF#refs/tags/}
56 |
57 | - name: Update '${{ env.PUBLISH_UPDATE_BRANCH }}' with automated changes
58 | uses: CasperWA/push-protected@v2
59 | with:
60 | token: ${{ secrets.RELEASE_PAT_BOT }}
61 | branch: ${{ env.PUBLISH_UPDATE_BRANCH }}
62 | unprotect_reviews: true
63 | sleep: 15
64 | force: true
65 | tags: true
66 |
67 | - name: Get tagged versions
68 | run: echo "PREVIOUS_VERSION=$(git tag -l --sort -version:refname | sed -n 2p)" >> $GITHUB_ENV
69 |
70 | - name: Create release-specific changelog
71 | uses: CharMixer/auto-changelog-action@v1
72 | with:
73 | token: ${{ secrets.RELEASE_PAT_BOT }}
74 | release_branch: ${{ env.PUBLISH_UPDATE_BRANCH }}
75 | since_tag: "${{ env.PREVIOUS_VERSION }}"
76 | output: "release_changelog.md"
77 | exclude_labels: "duplicate,question,invalid,wontfix,dependency_updates,skip_changelog"
78 |
79 | - name: Append changelog to release body
80 | run: |
81 | gh api /repos/${{ github.repository }}/releases/${{ github.event.release.id }} --jq '.body' > release_body.md
82 | cat release_changelog.md >> release_body.md
83 | gh api /repos/${{ github.repository }}/releases/${{ github.event.release.id }} -X PATCH -F body='@release_body.md'
84 | env:
85 | GITHUB_TOKEN: ${{ secrets.RELEASE_PAT_BOT }}
86 |
87 | - name: Install docs dependencies
88 | run: |
89 | # Required to generate rst files from markdown
90 | sudo apt install pandoc
91 | pip install .[docs]
92 |
93 | - name: Build Sphinx docs
94 | working-directory: doc
95 | run: |
96 | # cannot use sphinx build directly as the makefile handles generation
97 | # of some rst files
98 | make html
99 |
100 | - name: Fix permissions # following https://github.com/actions/upload-pages-artifact?tab=readme-ov-file#file-permissions
101 | run: |
102 | chmod -c -R +rX "./doc/build" | while read line; do
103 | echo "::warning title=Invalid file permissions automatically fixed::$line"
104 | done
105 |
106 | - name: Upload docs artifact
107 | uses: actions/upload-pages-artifact@v2
108 | with:
109 | path: ./doc/build/html
110 |
111 | - name: Build source distribution
112 | run: |
113 | pip install -U build
114 | python -m build
115 |
116 | - name: Publish package to PyPI
117 | uses: pypa/gh-action-pypi-publish@release/v1
118 | with:
119 | user: __token__
120 | password: ${{ secrets.PYPI_PASSWORD }}
121 |
122 | deploy_docs:
123 | if: github.repository == 'matgenix/qtoolkit' && startsWith(github.ref, 'refs/tags/v')
124 | runs-on: ubuntu-latest
125 | permissions:
126 | pages: write # to deploy to Pages
127 | id-token: write # to verify the deployment originates from an appropriate source
128 | needs: publish
129 | environment:
130 | name: "Documentation"
131 | url: https://matgenix.github.io/QToolKit
132 |
133 | steps:
134 | - name: Deploy docs
135 | uses: actions/deploy-pages@v2
136 |
--------------------------------------------------------------------------------
/.github/workflows/testing.yml:
--------------------------------------------------------------------------------
1 | name: testing
2 |
3 | on:
4 | push:
5 | branches:
6 | - develop
7 |
8 | pull_request:
9 |
10 | jobs:
11 | lint:
12 | runs-on: ubuntu-latest
13 | steps:
14 | - uses: actions/checkout@v3
15 |
16 | - uses: actions/setup-python@v4
17 | with:
18 | python-version: '3.10'
19 | cache: pip
20 | cache-dependency-path: pyproject.toml
21 |
22 | - name: Install dependencies
23 | run: |
24 | python -m pip install --upgrade pip
25 | pip install .[strict,tests,dev]
26 |
27 | - name: Lint
28 | run: pre-commit run --all-files --show-diff-on-failure
29 |
30 | test:
31 | runs-on: ubuntu-latest
32 | strategy:
33 | matrix:
34 | python-version: ['3.9', '3.10', '3.11']
35 |
36 | steps:
37 | - uses: actions/checkout@v3
38 |
39 | - uses: actions/setup-python@v4
40 | with:
41 | python-version: ${{ matrix.python-version }}
42 | cache: pip
43 | cache-dependency-path: pyproject.toml
44 |
45 | - name: Install dependencies
46 | run: |
47 | python -m pip install --upgrade pip
48 | pip install .[strict,tests,docs]
49 |
50 | - name: Test
51 | run: pytest --cov=qtoolkit --cov-report=xml
52 |
53 | - name: Upload coverage to Codecov
54 | if: matrix.python-version == '3.11'
55 | uses: codecov/codecov-action@v3
56 | with:
57 | token: ${{ secrets.CODECOV_TOKEN }}
58 | slug: matgenix/qtoolkit
59 |
60 | docs:
61 | runs-on: ubuntu-latest
62 |
63 | steps:
64 | - uses: actions/checkout@v3
65 |
66 | - uses: actions/setup-python@v4
67 | with:
68 | python-version: '3.11'
69 | cache: pip
70 | cache-dependency-path: pyproject.toml
71 |
72 | - name: Install dependencies
73 | run: |
74 | # Required to generate rst files from markdown
75 | sudo apt install pandoc
76 | python -m pip install --upgrade pip
77 | pip install .[docs]
78 |
79 | - name: Build Sphinx docs
80 | working-directory: doc
81 | run: |
82 | # cannot use sphinx build directly as the makefile handles generation
83 | # of some rst files
84 | make html
85 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | doc/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | .idea/
157 |
158 | # macOS
159 | .DS_store
160 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | default_language_version:
2 | python: python3
3 | exclude: ^doc/
4 | repos:
5 | - repo: https://github.com/charliermarsh/ruff-pre-commit
6 | rev: v0.4.6
7 | hooks:
8 | - id: ruff
9 | args: [--fix]
10 | - id: ruff-format
11 | - repo: https://github.com/pre-commit/pre-commit-hooks
12 | rev: v4.6.0
13 | hooks:
14 | - id: check-yaml
15 | - id: fix-encoding-pragma
16 | args: [--remove]
17 | - id: end-of-file-fixer
18 | - id: trailing-whitespace
19 | - repo: https://github.com/asottile/blacken-docs
20 | rev: 1.16.0
21 | hooks:
22 | - id: blacken-docs
23 | additional_dependencies: [black]
24 | exclude: README.md
25 | - repo: https://github.com/pre-commit/pygrep-hooks
26 | rev: v1.10.0
27 | hooks:
28 | - id: python-use-type-annotations
29 | - id: rst-backticks
30 | - id: rst-directive-colons
31 | - id: rst-inline-touching-normal
32 | - repo: https://github.com/pre-commit/mirrors-mypy
33 | rev: v1.10.0
34 | hooks:
35 | - id: mypy
36 | files: ^src/
37 | additional_dependencies:
38 | - tokenize-rt==4.1.0
39 | - types-paramiko
40 | - repo: https://github.com/codespell-project/codespell
41 | rev: v2.3.0
42 | hooks:
43 | - id: codespell
44 | stages: [commit, commit-msg]
45 | args: [--ignore-words-list, "titel,statics,ba,nd,te"]
46 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | ## [v0.1.6](https://github.com/Matgenix/qtoolkit/tree/v0.1.6) (2025-01-20)
4 |
5 | [Full Changelog](https://github.com/Matgenix/qtoolkit/compare/v0.1.5...v0.1.6)
6 |
7 | **Closed issues:**
8 |
9 | - Potential issue for jobs list in SGE [\#50](https://github.com/Matgenix/qtoolkit/issues/50)
10 | - Confusing SLURM template variables [\#48](https://github.com/Matgenix/qtoolkit/issues/48)
11 | - `process_placement` unused by `slurm.py`? [\#47](https://github.com/Matgenix/qtoolkit/issues/47)
12 |
13 | **Merged pull requests:**
14 |
15 | - Sanitization of job name and tests for PBS [\#53](https://github.com/Matgenix/qtoolkit/pull/53) ([gpetretto](https://github.com/gpetretto))
16 | - fix parse\_jobs\_list\_output parsing issues with SGE [\#52](https://github.com/Matgenix/qtoolkit/pull/52) ([QuantumChemist](https://github.com/QuantumChemist))
17 | - improve message for missing keys [\#49](https://github.com/Matgenix/qtoolkit/pull/49) ([gpetretto](https://github.com/gpetretto))
18 | - Implementation of SGE interface [\#43](https://github.com/Matgenix/qtoolkit/pull/43) ([QuantumChemist](https://github.com/QuantumChemist))
19 | - Same `ruff` linting as `jf-remote` [\#42](https://github.com/Matgenix/qtoolkit/pull/42) ([janosh](https://github.com/janosh))
20 |
21 | ## [v0.1.5](https://github.com/Matgenix/qtoolkit/tree/v0.1.5) (2024-08-09)
22 |
23 | [Full Changelog](https://github.com/Matgenix/qtoolkit/compare/v0.1.4...v0.1.5)
24 |
25 | **Merged pull requests:**
26 |
27 | - Fix codecov upload in CI [\#46](https://github.com/Matgenix/qtoolkit/pull/46) ([ml-evs](https://github.com/ml-evs))
28 | - Add simple test for QResources -\> slurm submission script [\#45](https://github.com/Matgenix/qtoolkit/pull/45) ([ml-evs](https://github.com/ml-evs))
29 | - Fix `mem_per_cpu` in `SlurmIO` not being passed as snake\_case [\#44](https://github.com/Matgenix/qtoolkit/pull/44) ([janosh](https://github.com/janosh))
30 | - Correctly handle `float` in `(Slurm|PBS)IO._convert_time_to_str` [\#41](https://github.com/Matgenix/qtoolkit/pull/41) ([janosh](https://github.com/janosh))
31 |
32 | ## [v0.1.4](https://github.com/Matgenix/qtoolkit/tree/v0.1.4) (2024-03-19)
33 |
34 | [Full Changelog](https://github.com/Matgenix/qtoolkit/compare/v0.1.3...v0.1.4)
35 |
36 | **Closed issues:**
37 |
38 | - Issue with SLURM RD state [\#37](https://github.com/Matgenix/qtoolkit/issues/37)
39 |
40 | **Merged pull requests:**
41 |
42 | - Slurm and QResources updates [\#38](https://github.com/Matgenix/qtoolkit/pull/38) ([gpetretto](https://github.com/gpetretto))
43 |
44 | ## [v0.1.3](https://github.com/Matgenix/qtoolkit/tree/v0.1.3) (2024-02-05)
45 |
46 | [Full Changelog](https://github.com/Matgenix/qtoolkit/compare/v0.1.2...v0.1.3)
47 |
48 | **Closed issues:**
49 |
50 | - Release workflow does not update changelog [\#35](https://github.com/Matgenix/qtoolkit/issues/35)
51 | - Missing bits of documentation [\#34](https://github.com/Matgenix/qtoolkit/issues/34)
52 |
53 | **Merged pull requests:**
54 |
55 | - Update docs and release workflow [\#36](https://github.com/Matgenix/qtoolkit/pull/36) ([ml-evs](https://github.com/ml-evs))
56 |
57 | ## [v0.1.2](https://github.com/Matgenix/qtoolkit/tree/v0.1.2) (2024-02-05)
58 |
59 | [Full Changelog](https://github.com/Matgenix/qtoolkit/compare/v0.1.1...v0.1.2)
60 |
61 | **Closed issues:**
62 |
63 | - Logo [\#21](https://github.com/Matgenix/qtoolkit/issues/21)
64 |
65 | **Merged pull requests:**
66 |
67 | - Add PyPI release workflow [\#33](https://github.com/Matgenix/qtoolkit/pull/33) ([ml-evs](https://github.com/ml-evs))
68 | - Fix bug due to poor naming of QResources attribute [\#32](https://github.com/Matgenix/qtoolkit/pull/32) ([gpetretto](https://github.com/gpetretto))
69 | - Fix README badge and warning [\#31](https://github.com/Matgenix/qtoolkit/pull/31) ([ml-evs](https://github.com/ml-evs))
70 | - Better deserialization handling for QTKEnum [\#30](https://github.com/Matgenix/qtoolkit/pull/30) ([gpetretto](https://github.com/gpetretto))
71 | - Unit tests [\#29](https://github.com/Matgenix/qtoolkit/pull/29) ([davidwaroquiers](https://github.com/davidwaroquiers))
72 | - Fix tests. Setup codecov [\#28](https://github.com/Matgenix/qtoolkit/pull/28) ([davidwaroquiers](https://github.com/davidwaroquiers))
73 |
74 | ## [v0.1.1](https://github.com/Matgenix/qtoolkit/tree/v0.1.1) (2023-10-09)
75 |
76 | [Full Changelog](https://github.com/Matgenix/qtoolkit/compare/v0.1.0...v0.1.1)
77 |
78 | **Closed issues:**
79 |
80 | - Automated releases through GH actions [\#17](https://github.com/Matgenix/qtoolkit/issues/17)
81 |
82 | **Merged pull requests:**
83 |
84 | - Add CI docs build and deploy [\#27](https://github.com/Matgenix/qtoolkit/pull/27) ([ml-evs](https://github.com/ml-evs))
85 | - Add logos [\#26](https://github.com/Matgenix/qtoolkit/pull/26) ([ml-evs](https://github.com/ml-evs))
86 |
87 | ## [v0.1.0](https://github.com/Matgenix/qtoolkit/tree/v0.1.0) (2023-10-06)
88 |
89 | [Full Changelog](https://github.com/Matgenix/qtoolkit/compare/3658f911689f65f7a0caf8728de48c3b1e2d1f90...v0.1.0)
90 |
91 | **Closed issues:**
92 |
93 | - Problem in parse\_jobs\_list\_output in class ShellIO [\#22](https://github.com/Matgenix/qtoolkit/issues/22)
94 | - `number_of_tasks` missing in slurm template [\#18](https://github.com/Matgenix/qtoolkit/issues/18)
95 |
96 | **Merged pull requests:**
97 |
98 | - Install build during build step [\#25](https://github.com/Matgenix/qtoolkit/pull/25) ([ml-evs](https://github.com/ml-evs))
99 | - Preparing public release [\#24](https://github.com/Matgenix/qtoolkit/pull/24) ([ml-evs](https://github.com/ml-evs))
100 | - Fix problem in parse\_jobs\_list\_output in class ShellIO [\#23](https://github.com/Matgenix/qtoolkit/pull/23) ([FabiPi3](https://github.com/FabiPi3))
101 | - Updates for SlurmIO and ShellIO [\#20](https://github.com/Matgenix/qtoolkit/pull/20) ([gpetretto](https://github.com/gpetretto))
102 | - Fix name for `ntasks` in slurm template [\#19](https://github.com/Matgenix/qtoolkit/pull/19) ([ml-evs](https://github.com/ml-evs))
103 | - Go Live version [\#16](https://github.com/Matgenix/qtoolkit/pull/16) ([davidwaroquiers](https://github.com/davidwaroquiers))
104 | - update shell io [\#15](https://github.com/Matgenix/qtoolkit/pull/15) ([gpetretto](https://github.com/gpetretto))
105 | - Modify slurm template [\#13](https://github.com/Matgenix/qtoolkit/pull/13) ([gpetretto](https://github.com/gpetretto))
106 | - Adding a shell io object [\#12](https://github.com/Matgenix/qtoolkit/pull/12) ([gpetretto](https://github.com/gpetretto))
107 | - Documentation layout [\#11](https://github.com/Matgenix/qtoolkit/pull/11) ([davidwaroquiers](https://github.com/davidwaroquiers))
108 | - split io and manager [\#7](https://github.com/Matgenix/qtoolkit/pull/7) ([gpetretto](https://github.com/gpetretto))
109 | - WIP Remote host [\#5](https://github.com/Matgenix/qtoolkit/pull/5) ([davidwaroquiers](https://github.com/davidwaroquiers))
110 | - typing [\#4](https://github.com/Matgenix/qtoolkit/pull/4) ([gpetretto](https://github.com/gpetretto))
111 | - Commented test on documentation in testing.yaml. [\#3](https://github.com/Matgenix/qtoolkit/pull/3) ([davidwaroquiers](https://github.com/davidwaroquiers))
112 | - Fixed state mapping from slurm COMPLETED to standard QState DONE. [\#2](https://github.com/Matgenix/qtoolkit/pull/2) ([davidwaroquiers](https://github.com/davidwaroquiers))
113 |
114 |
115 |
116 | \* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)*
117 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 |
2 | # Contributor Covenant Code of Conduct
3 |
4 | ## Our Pledge
5 |
6 | We as members, contributors, and leaders pledge to make participation in our
7 | community a harassment-free experience for everyone, regardless of age, body
8 | size, visible or invisible disability, ethnicity, sex characteristics, gender
9 | identity and expression, level of experience, education, socioeconomic status,
10 | nationality, personal appearance, race, caste, color, religion, or sexual
11 | identity and orientation.
12 |
13 | We pledge to act and interact in ways that contribute to an open, welcoming,
14 | diverse, inclusive, and healthy community.
15 |
16 | ## Our Standards
17 |
18 | Examples of behavior that contributes to a positive environment for our
19 | community include:
20 |
21 | * Demonstrating empathy and kindness toward other people
22 | * Being respectful of differing opinions, viewpoints, and experiences
23 | * Giving and gracefully accepting constructive feedback
24 | * Accepting responsibility and apologizing to those affected by our mistakes,
25 | and learning from the experience
26 | * Focusing on what is best not just for us as individuals, but for the overall
27 | community
28 |
29 | Examples of unacceptable behavior include:
30 |
31 | * The use of sexualized language or imagery, and sexual attention or advances of
32 | any kind
33 | * Trolling, insulting or derogatory comments, and personal or political attacks
34 | * Public or private harassment
35 | * Publishing others' private information, such as a physical or email address,
36 | without their explicit permission
37 | * Other conduct which could reasonably be considered inappropriate in a
38 | professional setting
39 |
40 | ## Enforcement Responsibilities
41 |
42 | Community leaders are responsible for clarifying and enforcing our standards of
43 | acceptable behavior and will take appropriate and fair corrective action in
44 | response to any behavior that they deem inappropriate, threatening, offensive,
45 | or harmful.
46 |
47 | Community leaders have the right and responsibility to remove, edit, or reject
48 | comments, commits, code, wiki edits, issues, and other contributions that are
49 | not aligned to this Code of Conduct, and will communicate reasons for moderation
50 | decisions when appropriate.
51 |
52 | ## Scope
53 |
54 | This Code of Conduct applies within all community spaces, and also applies when
55 | an individual is officially representing the community in public spaces.
56 | Examples of representing our community include using an official e-mail address,
57 | posting via an official social media account, or acting as an appointed
58 | representative at an online or offline event.
59 |
60 | ## Enforcement
61 |
62 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
63 | reported to the community leaders responsible for enforcement at
64 | conduct@matgenix.com.
65 | All complaints will be reviewed and investigated promptly and fairly.
66 |
67 | All community leaders are obligated to respect the privacy and security of the
68 | reporter of any incident.
69 |
70 | ## Enforcement Guidelines
71 |
72 | Community leaders will follow these Community Impact Guidelines in determining
73 | the consequences for any action they deem in violation of this Code of Conduct:
74 |
75 | ### 1. Correction
76 |
77 | **Community Impact**: Use of inappropriate language or other behavior deemed
78 | unprofessional or unwelcome in the community.
79 |
80 | **Consequence**: A private, written warning from community leaders, providing
81 | clarity around the nature of the violation and an explanation of why the
82 | behavior was inappropriate. A public apology may be requested.
83 |
84 | ### 2. Warning
85 |
86 | **Community Impact**: A violation through a single incident or series of
87 | actions.
88 |
89 | **Consequence**: A warning with consequences for continued behavior. No
90 | interaction with the people involved, including unsolicited interaction with
91 | those enforcing the Code of Conduct, for a specified period of time. This
92 | includes avoiding interactions in community spaces as well as external channels
93 | like social media. Violating these terms may lead to a temporary or permanent
94 | ban.
95 |
96 | ### 3. Temporary Ban
97 |
98 | **Community Impact**: A serious violation of community standards, including
99 | sustained inappropriate behavior.
100 |
101 | **Consequence**: A temporary ban from any sort of interaction or public
102 | communication with the community for a specified period of time. No public or
103 | private interaction with the people involved, including unsolicited interaction
104 | with those enforcing the Code of Conduct, is allowed during this period.
105 | Violating these terms may lead to a permanent ban.
106 |
107 | ### 4. Permanent Ban
108 |
109 | **Community Impact**: Demonstrating a pattern of violation of community
110 | standards, including sustained inappropriate behavior, harassment of an
111 | individual, or aggression toward or disparagement of classes of individuals.
112 |
113 | **Consequence**: A permanent ban from any sort of public interaction within the
114 | community.
115 |
116 | ## Attribution
117 |
118 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
119 | version 2.1, available at
120 | [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
121 |
122 | Community Impact Guidelines were inspired by
123 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC].
124 |
125 | For answers to common questions about this code of conduct, see the FAQ at
126 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
127 | [https://www.contributor-covenant.org/translations][translations].
128 |
129 | [homepage]: https://www.contributor-covenant.org
130 | [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
131 | [Mozilla CoC]: https://github.com/mozilla/diversity
132 | [FAQ]: https://www.contributor-covenant.org/faq
133 | [translations]: https://www.contributor-covenant.org/translations
134 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to QToolKit
2 |
3 | We love your input! We want to make contributing to as easy and
4 | transparent as possible, whether it's:
5 |
6 | - Reporting a bug
7 | - Discussing the current state of the code
8 | - Submitting a fix
9 | - Proposing or implementing new features
10 | - Becoming a maintainer
11 |
12 | ## Reporting bugs, getting help, and discussion
13 |
14 | QToolKit is still in development, so at the moment we
15 | do not have a dedicated help forum. For the time being, please
16 | submit questions and bugs to the
17 | [GitHub issues page](https://github.com/matgenix/qtoolkit/issues).
18 |
19 | If you are making a bug report, incorporate as many elements of the
20 | following as possible to ensure a timely response and avoid the
21 | need for followups:
22 |
23 | - A quick summary and/or background.
24 | - Steps to reproduce - be specific! **Provide sample code.**
25 | - What you expected would happen, compared to what actually happens.
26 | - The full stack trace of any errors you encounter.
27 | - Notes (possibly including why you think this might be happening,
28 | or steps you tried that didn't work).
29 |
30 | We love thorough bug reports as this means the development team can
31 | make quick and meaningful fixes. When we confirm your bug report,
32 | we'll move it to the GitHub issues where its progress can be
33 | further tracked.
34 |
35 | ## Contributing code modifications or additions through Github
36 |
37 | We use github to host code, to track issues and feature requests,
38 | as well as accept pull requests. We maintain a list of all
39 | contributors [here](https://matgenix.github.io/qtoolkit/contributors.html).
40 |
41 | Pull requests are the best way to propose changes to the codebase.
42 | Follow the [Github flow](https://www.atlassian.com/git/tutorials/comparing-workflows/forking-workflow)
43 | for more information on this procedure.
44 |
45 | The basic procedure for making a PR is:
46 |
47 | - Fork the repo and create your branch from main.
48 | - Commit your improvements to your branch and push to your Github fork (repo).
49 | - When you're finished, go to your fork and make a Pull Request. It will
50 | automatically update if you need to make further changes.
51 |
52 | ## How to Make a Great Pull Request
53 |
54 | We have a few tips for writing good PRs that are accepted into the main repo:
55 |
56 | - Use the Numpy Code style for all of your code. Find an example [here](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html#example-numpy).
57 | - Your code should have (4) spaces instead of tabs.
58 | - If needed, update the documentation.
59 | - **Write tests** for new features! Good tests are 100%, absolutely necessary
60 | for good code. We use the python `pytest` framework -- see some of the
61 | other tests in this repo for examples, or review the [Hitchhiker's guide
62 | to python](https://docs.python-guide.org/writing/tests) for some good
63 | resources on writing good tests.
64 | - Understand your contributions will fall under the same license as this repo.
65 | - This project uses `pre-commit` for uniform linting across many developers. You can install
66 | it through the extra dev dependencies with `pip install -e .[dev]` and then run `pre-commit install`
67 | to activate it for you local repository.
68 |
69 | When you submit your PR, our CI service will automatically run your tests.
70 | We welcome good discussion on the best ways to write your code, and the comments
71 | on your PR are an excellent area for discussion.
72 |
--------------------------------------------------------------------------------
/INSTALL.md:
--------------------------------------------------------------------------------
1 | # Installation
2 |
3 | QToolKit is available on [PyPI](https://pypi.org/project/qtoolkit) and can be installed with `pip`:
4 |
5 | ```shell
6 | pip install qtoolkit
7 | ```
8 |
9 | ## Development installation
10 |
11 | Clone this repository and then install with `pip` in the virtual environment of your choice.
12 |
13 | ```shell
14 | git clone git@https://github.com:matgenix/qtoolkit
15 | cd qtoolkit
16 | pip install -e .[dev,tests]
17 | ```
18 |
19 | This will perform an editable installation with additional development and test dependencies.
20 | You can then activate `pre-commit` in your local repository with `pre-commit install`.
21 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | QToolKit Copyright (c) 2023, Matgenix SRL.
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions
6 | are met:
7 |
8 | (1) Redistributions of source code must retain the above copyright
9 | notice, this list of conditions and the following disclaimer.
10 |
11 | (2) Redistributions in binary form must reproduce the above
12 | copyright notice, this list of conditions and the following
13 | disclaimer in the documentation and/or other materials provided with
14 | the distribution.
15 |
16 | (3) Neither the name of Matgenix SRL nor the names of
17 | its contributors may be used to endorse or promote products derived
18 | from this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 | FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 | COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 | ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 | POSSIBILITY OF SUCH DAMAGE.
32 |
33 | You are under no obligation whatsoever to provide any bug fixes,
34 | patches, or upgrades to the features, functionality or performance
35 | of the source code ("Enhancements") to anyone; however, if you
36 | choose to make your Enhancements available either publicly, or
37 | directly to Matgenix SRL or its
38 | contributors, without imposing a separate written license agreement
39 | for such Enhancements, then you hereby grant the following license:
40 | a non-exclusive, royalty-free perpetual license to install, use,
41 | modify, prepare derivative works, incorporate into other computer
42 | software, distribute, and sublicense such enhancements or derivative
43 | works thereof, in binary and source code form.
44 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | #
4 |
5 |
6 |
7 |
8 |
9 | [](https://github.com/matgenix/qtoolkit/actions/workflows/testing.yml)
10 | [](https://codecov.io/gh/matgenix/qtoolkit)
11 | [](https://pypi.org/project/qtoolkit)
12 | 
13 |
14 |
15 |
16 | **[Full Documentation][docs]**
17 |
18 | > [!WARNING]
19 | > 🚧 This repository is still under construction. 🚧
20 |
21 | ## Need help?
22 |
23 | Ask questions about QToolKit on the [QToolKit support forum][help-forum].
24 | If you've found an issue with QToolKit, please submit a bug report on [GitHub Issues][issues].
25 |
26 | ## What’s new?
27 |
28 | Track changes to qtoolkit through the [changelog][changelog].
29 |
30 | ## Contributing
31 |
32 | We greatly appreciate any contributions in the form of a pull request.
33 | Additional information on contributing to QToolKit can be found [here][contributing].
34 | We maintain a list of all contributors [here][contributors].
35 |
36 | ### Code of conduct
37 |
38 | Help us keep QToolKit open and inclusive.
39 | Please read and follow our [Code of Conduct][codeofconduct]
40 | [](CODE_OF_CONDUCT.md).
41 |
42 | ## License
43 |
44 | QToolKit is released under a modified BSD license; the full text can be found [here][license].
45 |
46 | ## Acknowledgements
47 |
48 | QToolKit is developed and maintained by Matgenix SRL.
49 |
50 | A full list of all contributors can be found [here][contributors].
51 |
52 | [help-forum]: https://github.com/matgenix/qtoolkit/issues
53 | [issues]: https://github.com/matgenix/qtoolkit/issues
54 | [installation]: https://github.com/matgenix/qtoolkit/blob/develop/INSTALL.md
55 | [contributing]: https://github.com/matgenix/qtoolkit/blob/develop/CONTRIBUTING.md
56 | [codeofconduct]: https://github.com/matgenix/qtoolkit/blob/develop/CODE_OF_CONDUCT.md
57 | [changelog]: https://github.com/matgenix/qtoolkit/blob/develop/CHANGELOG.md
58 | [contributors]: https://matgenix.github.io/qtoolkit/graphs/contributors
59 | [license]: https://raw.githubusercontent.com/matgenix/qtoolkit/develop/LICENSE
60 | [docs]: https://matgenix.github.io/qtoolkit/
61 |
--------------------------------------------------------------------------------
/doc/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # PYVER needs to be major.minor, just "3" doesn't work - it will result in
5 | # issues with the amendments to PYTHONPATH and install paths (see DIST_VARS).
6 |
7 | # Use explicit "version_info" indexing since make cannot handle colon characters, and
8 | # evaluate it now to allow easier debugging when printing the variable
9 |
10 | PYVER:=$(shell python3 -c 'from sys import version_info as v; print("{0}.{1}".format(v[0], v[1]))')
11 | PYTHON = python$(PYVER)
12 |
13 | # You can set these variables from the command line.
14 | SPHINXOPTS ?=
15 | SPHINXBUILD ?= LANG=C sphinx-build
16 | PAPER ?=
17 | # # For merging a documentation archive into a git checkout of numpy/doc
18 | # # Turn a tag like v1.18.0 into 1.18
19 | # # Use sed -n -e 's/patttern/match/p' to return a blank value if no match
20 | # TAG ?= $(shell git describe --tag | sed -n -e's,v\([1-9]\.[0-9]*\)\.[0-9].*,\1,p')
21 |
22 | FILES=
23 |
24 | # Internal variables.
25 | PAPEROPT_a4 = -D latex_paper_size=a4
26 | PAPEROPT_letter = -D latex_paper_size=letter
27 | ALLSPHINXOPTS = -WT --keep-going -d build/doctrees $(PAPEROPT_$(PAPER)) \
28 | $(SPHINXOPTS) source
29 |
30 | .PHONY: help clean html version-check html-build
31 |
32 | #------------------------------------------------------------------------------
33 |
34 | help:
35 | @echo "Please use \`make ' where is one of"
36 | @echo " clean to remove generated doc files and start fresh"
37 | @echo " html to make standalone HTML files"
38 |
39 | clean:
40 | -rm -rf build/*
41 | find . -name generated -type d -prune -exec rm -rf "{}" ";"
42 |
43 |
44 | #------------------------------------------------------------------------------
45 | # Automated generation of all documents
46 | #------------------------------------------------------------------------------
47 |
48 | # Build the current QToolKit version, and extract docs from it.
49 | # We have to be careful of some issues:
50 | #
51 | # - Everything must be done using the same Python version
52 | #
53 |
54 | #SPHINXBUILD="LANG=C sphinx-build"
55 |
56 |
57 | #------------------------------------------------------------------------------
58 | # Basic Sphinx generation rules for different formats
59 | #------------------------------------------------------------------------------
60 | generate: build/generate-stamp
61 | build/generate-stamp: $(wildcard source/reference/*.rst)
62 | mkdir -p build
63 | touch build/generate-stamp
64 |
65 | html: api-doc html-build
66 | html-build: generate
67 | pandoc --from=markdown --to=rst --output=./source/dev/index.rst ../CONTRIBUTING.md
68 | pandoc --from=markdown --to=rst --output=./source/user/install.rst ../INSTALL.md
69 | pandoc --from=markdown --to=rst --output=./source/changelog.rst ../CHANGELOG.md
70 | # Re-add rst links from pydata template
71 | sed -i -e "1i .. _install:\n" ./source/user/install.rst
72 | sed -i -e "1i ..\n THIS FILE IS AUTOMATICALLY GENERATED FROM INSTALL.md\n" ./source/user/install.rst
73 | sed -i -e "1i .. _devindex:\n" ./source/dev/index.rst
74 | sed -i -e "1i ..\n THIS FILE IS AUTOMATICALLY GENERATED FROM CONTRIBUTING.md\n" ./source/dev/index.rst
75 | sed -i -e "1i .. _changelog:\n" ./source/changelog.rst
76 | sed -i -e "1i ..\n THIS FILE IS AUTOMATICALLY GENERATED FROM CHANGELOG.md\n" ./source/changelog.rst
77 | mkdir -p build/html build/doctrees
78 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html $(FILES)
79 | @echo
80 | @echo "Build finished. The HTML pages are in build/html."
81 | api-doc:
82 | sphinx-apidoc -e -f -o source/api ../src/qtoolkit
83 |
--------------------------------------------------------------------------------
/doc/source/_static/index-images/api.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
7 |
8 |
9 |
22 |
35 |
48 |
49 |
50 |
51 |
--------------------------------------------------------------------------------
/doc/source/_static/index-images/contributor.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
7 |
8 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/doc/source/_static/index-images/getting_started.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/doc/source/_static/index-images/image_licences.txt:
--------------------------------------------------------------------------------
1 | getting_started.svg: https://www.svgrepo.com/svg/393367/rocket (PD Licence)
2 | user_guide.svg: https://www.svgrepo.com/svg/75531/user-guide (CC0 Licence)
3 | api.svg: https://www.svgrepo.com/svg/157898/gears-configuration-tool (CC0 Licence)
4 | contributor.svg: https://www.svgrepo.com/svg/57189/code-programing-symbol (CC0 Licence)
5 |
--------------------------------------------------------------------------------
/doc/source/_static/index-images/user_guide.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
7 |
8 |
9 |
10 |
16 |
44 |
45 |
46 |
47 |
48 |
--------------------------------------------------------------------------------
/doc/source/_static/qtoolkit.css:
--------------------------------------------------------------------------------
1 | @import url('https://fonts.googleapis.com/css2?family=Lato:ital,wght@0,400;0,700;0,900;1,400;1,700;1,900&family=Open+Sans:ital,wght@0,400;0,600;1,400;1,600&display=swap');
2 |
3 | :root {
4 | --matgenix-color: #46b3c1;
5 | --matgenix-dark-color: #338d99;
6 | }
7 |
8 | .navbar-brand {
9 | height: 75px;
10 | }
11 |
12 | body {
13 | font-family: 'Open Sans', sans-serif;
14 | }
15 |
16 | pre, code {
17 | font-size: 100%;
18 | line-height: 155%;
19 | }
20 |
21 | h1 {
22 | font-family: "Lato", sans-serif;
23 | color: #013243; /* warm black */
24 | }
25 |
26 | h2 {
27 | color: #4d77cf; /* han blue */
28 | letter-spacing: -.03em;
29 | }
30 |
31 | h3 {
32 | color: #013243; /* warm black */
33 | letter-spacing: -.03em;
34 | }
35 |
36 | /* Style the active version button.
37 |
38 | - dev: orange
39 | - stable: green
40 | - old, PR: red
41 |
42 | Colors from:
43 |
44 | Wong, B. Points of view: Color blindness.
45 | Nat Methods 8, 441 (2011). https://doi.org/10.1038/nmeth.1618
46 | */
47 |
48 | /* If the active version has the name "dev", style it orange */
49 | #version_switcher_button[data-active-version-name*="dev"] {
50 | background-color: #E69F00;
51 | border-color: #E69F00;
52 | color:#000000;
53 | }
54 |
55 | /* green for `stable` */
56 | #version_switcher_button[data-active-version-name*="stable"] {
57 | background-color: #009E73;
58 | border-color: #009E73;
59 | }
60 |
61 | /* red for `old` */
62 | #version_switcher_button:not([data-active-version-name*="stable"], [data-active-version-name*="dev"], [data-active-version-name=""]) {
63 | background-color: #980F0F;
64 | border-color: #980F0F;
65 | }
66 |
67 | /* Main page overview cards */
68 |
69 | .sd-card {
70 | background: #fff;
71 | border-radius: 0;
72 | padding: 30px 10px 20px 10px;
73 | margin: 10px 0px;
74 | }
75 |
76 | .sd-card .sd-card-header {
77 | text-align: center;
78 | }
79 |
80 | .sd-card .sd-card-header .sd-card-text {
81 | margin: 0px;
82 | }
83 |
84 | .sd-card .sd-card-img-top {
85 | height: 52px;
86 | width: 52px;
87 | margin-left: auto;
88 | margin-right: auto;
89 | }
90 |
91 | .sd-card .sd-card-header {
92 | border: none;
93 | background-color: white;
94 | color: #150458 !important;
95 | font-size: var(--pst-font-size-h5);
96 | font-weight: bold;
97 | padding: 2.5rem 0rem 0.5rem 0rem;
98 | }
99 |
100 | .sd-card .sd-card-footer {
101 | border: none;
102 | background-color: white;
103 | }
104 |
105 | .sd-card .sd-card-footer .sd-card-text {
106 | max-width: 220px;
107 | margin-left: auto;
108 | margin-right: auto;
109 | }
110 |
111 | /* Announcements */
112 | .bd-header-announcement {
113 | background-color: orange;
114 | }
115 |
116 | /* Dark theme tweaking */
117 | html[data-theme=dark] .sd-card img[src*='.svg'] {
118 | filter: invert(0.82) brightness(0.8) contrast(1.2);
119 | }
120 |
121 | /* Main index page overview cards */
122 | html[data-theme=dark] .sd-card {
123 | background-color:var(--pst-color-background);
124 | }
125 |
126 | html[data-theme=dark] .sd-shadow-sm {
127 | box-shadow: 0 .1rem 1rem rgba(250, 250, 250, .6) !important
128 | }
129 |
130 | html[data-theme=dark] .sd-card .sd-card-header {
131 | background-color:var(--pst-color-background);
132 | color: #150458 !important;
133 | }
134 |
135 | html[data-theme=dark] .sd-card .sd-card-footer {
136 | background-color:var(--pst-color-background);
137 | }
138 |
139 | html[data-theme=dark] .bd-header-announcement {
140 | background-color: red;
141 | }
142 |
143 | html[data-theme=dark] h1 {
144 | color: var(--pst-color-primary);
145 | }
146 |
147 | html[data-theme=dark] h3 {
148 | color: #0a6774;
149 | }
150 |
151 | .sd-btn-secondary {
152 | background-color: var(--matgenix-color) !important;
153 | border-color: var(--matgenix-color) !important;
154 | }
155 |
156 | .sd-btn-secondary:hover, .sd-btn-secondary:focus {
157 | background-color: var(--matgenix-dark-color) !important;
158 | border-color: var(--matgenix-dark-color) !important;
159 | }
160 |
--------------------------------------------------------------------------------
/doc/source/api/index.rst:
--------------------------------------------------------------------------------
1 | .. _api:
2 |
3 | #############
4 | API Reference
5 | #############
6 |
7 | This is the API reference.
8 |
9 | .. include:: modules.rst
10 |
--------------------------------------------------------------------------------
/doc/source/conf.py:
--------------------------------------------------------------------------------
1 | # noqa: INP001
2 | # Configuration file for the Sphinx documentation builder.
3 | #
4 | # This file does only contain a selection of the most common options. For a
5 | # full list see the documentation:
6 | # http://www.sphinx-doc.org/en/master/config
7 |
8 | # -- Path setup --------------------------------------------------------------
9 |
10 | # If extensions (or modules to document with autodoc) are in another directory,
11 | # add these directories to sys.path here. If the directory is relative to the
12 | # documentation root, use os.path.abspath to make it absolute, like shown here.
13 | #
14 | import os
15 | import sys
16 |
17 | # sys.path.insert(0, os.path.abspath('.'))
18 | sys.path.insert(
19 | 0, os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
20 | )
21 | import qtoolkit
22 |
23 | # -- Project information -----------------------------------------------------
24 |
25 | project = "QToolKit"
26 | copyright = "2023, Matgenix SRL" # noqa: A001
27 | author = "Guido Petretto, David Waroquiers"
28 |
29 |
30 | # The short X.Y version
31 | version = qtoolkit.__version__
32 | # The full version, including alpha/beta/rc tags
33 | release = qtoolkit.__version__
34 |
35 |
36 | # -- General configuration ---------------------------------------------------
37 |
38 | # If your documentation needs a minimal Sphinx version, state it here.
39 | #
40 | # needs_sphinx = '1.0'
41 |
42 | # Add any Sphinx extension module names here, as strings. They can be
43 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
44 | # ones.
45 | extensions = [
46 | "sphinx.ext.autodoc",
47 | "sphinx.ext.intersphinx",
48 | "sphinx.ext.todo",
49 | "sphinx.ext.viewcode",
50 | "sphinx.ext.napoleon", # For Google Python Style Guide
51 | "sphinx.ext.coverage",
52 | "sphinx.ext.doctest",
53 | "sphinx.ext.autosummary",
54 | "sphinx.ext.graphviz",
55 | "sphinx.ext.ifconfig",
56 | "matplotlib.sphinxext.plot_directive",
57 | "IPython.sphinxext.ipython_console_highlighting",
58 | "IPython.sphinxext.ipython_directive",
59 | "sphinx.ext.mathjax",
60 | "sphinx_design",
61 | ]
62 |
63 | # Add any paths that contain templates here, relative to this directory.
64 | templates_path = ["_templates"]
65 |
66 | # The suffix(es) of source filenames.
67 | # You can specify multiple suffix as a list of string:
68 | #
69 | # source_suffix = ['.rst', '.md']
70 | source_suffix = ".rst"
71 |
72 | # The master toctree document.
73 | master_doc = "index"
74 |
75 | # The language for content autogenerated by Sphinx. Refer to documentation
76 | # for a list of supported languages.
77 | #
78 | # This is also used if you do content translation via gettext catalogs.
79 | # Usually you set "language" from the command line for these cases.
80 | language = "en"
81 |
82 | # List of patterns, relative to source directory, that match files and
83 | # directories to ignore when looking for source files.
84 | # This pattern also affects html_static_path and html_extra_path .
85 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
86 |
87 | # The name of the Pygments (syntax highlighting) style to use.
88 | pygments_style = "sphinx"
89 |
90 |
91 | # -- Options for HTML output -------------------------------------------------
92 |
93 | # The theme to use for HTML and HTML Help pages. See the documentation for
94 | # a list of builtin themes.
95 | #
96 | # html_theme = 'sphinx_book_theme'
97 | html_theme = "pydata_sphinx_theme"
98 | html_favicon = "../img/qtoolkit_icon.svg"
99 |
100 | # Theme options are theme-specific and customize the look and feel of a theme
101 | # further. For a list of options available for each theme, see the
102 | # documentation.
103 | #
104 | html_theme_options = {
105 | "logo": {
106 | "image_light": "qtoolkit_logo_horizontal.svg",
107 | "image_dark": "qtoolkit_logo_horizontal.svg",
108 | "alt_text": "QToolKit",
109 | },
110 | "collapse_navigation": True,
111 | "announcement": (
112 | "QToolKit is still in beta phase. The API may change at any time.
"
113 | ),
114 | # "navbar_end": ["theme-switcher", "navbar-icon-links"],
115 | # "navbar_end": ["theme-switcher", "version-switcher", "navbar-icon-links"],
116 | }
117 |
118 | # Add any paths that contain custom static files (such as style sheets) here,
119 | # relative to this directory. They are copied after the builtin static files,
120 | # so a file named "default.css" will overwrite the builtin "default.css".
121 | html_static_path = ["_static", "../img"]
122 |
123 | # Custom sidebar templates, must be a dictionary that maps document names
124 | # to template names.
125 | #
126 | # The default sidebars (for documents that don't match any pattern) are
127 | # defined by theme itself. Builtin themes are using these templates by
128 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
129 | # 'searchbox.html']``.
130 | #
131 | # html_sidebars = {}
132 |
133 |
134 | # -- Options for HTMLHelp output ---------------------------------------------
135 |
136 | html_css_files = ["qtoolkit.css"]
137 | html_title = f"{project} v{version} Manual"
138 | html_last_updated_fmt = "%b %d, %Y"
139 | # html_css_files = ["numpy.css"]
140 | html_context = {"default_mode": "light"}
141 | html_use_modindex = True
142 | html_copy_source = False
143 | html_domain_indices = False
144 | html_file_suffix = ".html"
145 |
146 | # Output file base name for HTML help builder.
147 | htmlhelp_basename = "qtoolkitdoc"
148 |
149 |
150 | # # -- Options for LaTeX output ------------------------------------------------
151 | #
152 | # latex_elements = {
153 | # # The paper size ('letterpaper' or 'a4paper').
154 | # #
155 | # # 'papersize': 'letterpaper',
156 | # # The font size ('10pt', '11pt' or '12pt').
157 | # #
158 | # # 'pointsize': '10pt',
159 | # # Additional stuff for the LaTeX preamble.
160 | # #
161 | # # 'preamble': '',
162 | # # Latex figure (float) alignment
163 | # #
164 | # # 'figure_align': 'htbp',
165 | # }
166 | #
167 | # # Grouping the document tree into LaTeX files. List of tuples
168 | # # (source start file, target name, title,
169 | # # author, documentclass [howto, manual, or own class]).
170 | # latex_documents = [
171 | # # (master_doc, "turbomoleio.tex", "turbomoleio Documentation", author, "manual"),
172 | # ]
173 |
174 |
175 | # # -- Options for manual page output ------------------------------------------
176 | #
177 | # # One entry per manual page. List of tuples
178 | # # (source start file, name, description, authors, manual section).
179 | # man_pages = [(master_doc, "turbomoleio", "turbomoleio Documentation", [author], 1)]
180 | #
181 | #
182 | # # -- Options for Texinfo output ----------------------------------------------
183 | #
184 | # # Grouping the document tree into Texinfo files. List of tuples
185 | # # (source start file, target name, title, author,
186 | # # dir menu entry, description, category)
187 | # texinfo_documents = [
188 | # (
189 | # master_doc,
190 | # "turbomoleio",
191 | # "turbomoleio Documentation",
192 | # author,
193 | # "turbomoleio",
194 | # "One line description of project.",
195 | # "Miscellaneous",
196 | # ),
197 | # ]
198 |
199 |
200 | # -- Extension configuration -------------------------------------------------
201 |
202 | # -- Options for intersphinx extension ---------------------------------------
203 |
204 | # Example configuration for intersphinx: refer to the Python standard library.
205 | # intersphinx_mapping = {"https://docs.python.org/": None}
206 |
207 | # -- Options for todo extension ----------------------------------------------
208 |
209 | # If true, `todo` and `todoList` produce output, else they produce nothing.
210 | todo_include_todos = True
211 |
212 | # To print the content of the docstring of the __init__ method as well.
213 | autoclass_content = "both"
214 |
--------------------------------------------------------------------------------
/doc/source/dev/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Matgenix/qtoolkit/bcb445b903f3cb78295aa7641944e0bade9a3fb8/doc/source/dev/.gitkeep
--------------------------------------------------------------------------------
/doc/source/glossary.rst:
--------------------------------------------------------------------------------
1 | ********
2 | Glossary
3 | ********
4 |
5 | .. glossary::
6 |
7 |
8 | QJob
9 | The representation of a job in the queue.
10 |
11 |
12 | QResources
13 | The description of the resources for a job.
14 |
--------------------------------------------------------------------------------
/doc/source/index.rst:
--------------------------------------------------------------------------------
1 | .. _qtoolkit_docs_mainpage:
2 |
3 | ######################
4 | QToolKit documentation
5 | ######################
6 |
7 | .. toctree::
8 | :maxdepth: 1
9 | :hidden:
10 |
11 | User Guide
12 | API reference
13 | Development
14 | Changelog
15 |
16 |
17 | **Version**: |version|
18 |
19 | QToolKit is an interface to Distributed Resource Management (DRM) systems, e.g. SLURM and PBS, with the aim to enable programmatic control of queuing systems.
20 |
21 |
22 | .. grid:: 1 2 2 2
23 |
24 | .. grid-item-card::
25 | :img-top: ../source/_static/index-images/getting_started.svg
26 |
27 | Getting Started
28 | ^^^^^^^^^^^^^^^
29 |
30 | If you want to get started quickly, check out our quickstart section.
31 | It contains an introduction to QToolKit's main concepts.
32 |
33 | +++
34 |
35 | .. button-ref:: user/quickstart
36 | :expand:
37 | :color: secondary
38 | :click-parent:
39 |
40 | Quickstart
41 |
42 | .. grid-item-card::
43 | :img-top: ../source/_static/index-images/user_guide.svg
44 |
45 | User Guide
46 | ^^^^^^^^^^
47 |
48 | The user guide provides in-depth information on the
49 | key concepts of QToolKit with useful background information and explanation.
50 |
51 | +++
52 |
53 | .. button-ref:: user_guide
54 | :expand:
55 | :color: secondary
56 | :click-parent:
57 |
58 | User Guide
59 |
60 | .. grid-item-card::
61 | :img-top: ../source/_static/index-images/api.svg
62 |
63 | API Reference
64 | ^^^^^^^^^^^^^
65 |
66 | The reference guide contains a detailed description of the functions,
67 | modules, and objects included in QToolKit. The reference describes how the
68 | methods work and which parameters can be used. It assumes that you have an
69 | understanding of the key concepts.
70 |
71 | +++
72 |
73 | .. button-ref:: api
74 | :expand:
75 | :color: secondary
76 | :click-parent:
77 |
78 | API Reference
79 |
80 | .. grid-item-card::
81 | :img-top: ../source/_static/index-images/contributor.svg
82 |
83 | Contributor's Guide
84 | ^^^^^^^^^^^^^^^^^^^
85 |
86 | Want to add to the codebase? Can help add support to an additional DRM system?
87 | The contributing guidelines will guide you through the
88 | process of improving QToolKit.
89 |
90 | +++
91 |
92 | .. button-ref:: devindex
93 | :expand:
94 | :color: secondary
95 | :click-parent:
96 |
97 | To the contributor's guide
98 |
99 | .. This is not really the index page, that is found in
100 | _templates/indexcontent.html The toctree content here will be added to the
101 | top of the template header
102 |
--------------------------------------------------------------------------------
/doc/source/license.rst:
--------------------------------------------------------------------------------
1 | ****************
2 | QToolKit license
3 | ****************
4 |
5 | .. include:: ../../LICENSE
6 | :literal:
7 |
--------------------------------------------------------------------------------
/doc/source/user/index.rst:
--------------------------------------------------------------------------------
1 | .. _user_guide:
2 |
3 | ###################
4 | QToolKit user guide
5 | ###################
6 |
7 | This guide is an overview and explains the important features;
8 | details are found in :ref:`api`.
9 |
10 | .. toctree::
11 | :caption: Getting started
12 | :maxdepth: 1
13 |
14 | whatisqtoolkit
15 | install
16 | quickstart
17 |
18 | .. toctree::
19 | :hidden:
20 | :caption: Extras
21 |
22 | ../glossary
23 | ../license
24 |
--------------------------------------------------------------------------------
/doc/source/user/quickstart.rst:
--------------------------------------------------------------------------------
1 | .. _quickstart:
2 |
3 | ===================
4 | QToolKit quickstart
5 | ===================
6 |
7 | To be added.
8 |
--------------------------------------------------------------------------------
/doc/source/user/whatisqtoolkit.rst:
--------------------------------------------------------------------------------
1 | .. _whatisqtoolkit:
2 |
3 | =================
4 | What is QToolKit?
5 | =================
6 |
7 | To be added.
8 |
--------------------------------------------------------------------------------
/doc_requirements.txt:
--------------------------------------------------------------------------------
1 | # doxygen required, use apt-get or dnf
2 | sphinx>=4.5.0
3 | numpydoc==1.4
4 | pydata-sphinx-theme==0.13.3
5 | sphinx-design
6 | sphinx-apidoc
7 | ipython!=8.1.0
8 | scipy
9 | matplotlib
10 | pandas
11 | breathe
12 |
13 | # needed to build release notes
14 | towncrier
15 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 |
2 | [project]
3 | name = "qtoolkit"
4 | description = "QToolKit is a python wrapper interfacing with job queues (e.g. PBS, SLURM, ...)."
5 | readme = "README.md"
6 | keywords = []
7 | license = { text = "modified BSD" }
8 | authors = [
9 | { name = "David Waroquiers", email = "david.waroquiers@matgenix.com" },
10 | ]
11 | dynamic = ["version"]
12 | classifiers = [
13 | "Development Status :: 5 - Production/Stable",
14 | "Intended Audience :: Information Technology",
15 | "Intended Audience :: Science/Research",
16 | "Intended Audience :: System Administrators",
17 | "Operating System :: OS Independent",
18 | "Programming Language :: Python :: 3",
19 | "Programming Language :: Python :: 3.10",
20 | "Programming Language :: Python :: 3.11",
21 | "Programming Language :: Python :: 3.12",
22 | "Programming Language :: Python :: 3.9",
23 | "Topic :: Other/Nonlisted Topic",
24 | "Topic :: Scientific/Engineering",
25 | ]
26 | requires-python = ">=3.9"
27 | dependencies = []
28 |
29 | [project.optional-dependencies]
30 | dev = ["pre-commit>=3.0.0"]
31 | tests = [
32 | "monty>=2022.9.9",
33 | "pytest-cov==4.0.0",
34 | "pytest-mock==3.10.0",
35 | "pytest==7.2.1",
36 | "ruamel.yaml",
37 | ]
38 | maintain = ["git-changelog>=0.6"]
39 | docs = [
40 | "ipython!=8.1.0",
41 | "matplotlib",
42 | "pydata-sphinx-theme",
43 | "qtoolkit[remote,msonable]",
44 | "sphinx",
45 | "sphinx_design",
46 | ]
47 | strict = []
48 | remote = ["fabric>=3.0.0"]
49 | msonable = ["monty>=2022.9.9"]
50 |
51 | [project.urls]
52 | homepage = "https://matgenix.github.io/qtoolkit/"
53 | repository = "https://github.com/matgenix/qtoolkit"
54 | documentation = "https://matgenix.github.io/qtoolkit/"
55 | changelog = "https://matgenix.github.io/qtoolkit/changelog"
56 |
57 | [tool.setuptools.package-data]
58 | qtoolkit = ["py.typed"]
59 |
60 | [build-system]
61 | requires = ["setuptools >= 42", "versioningit ~= 1.0", "wheel"]
62 | build-backend = "setuptools.build_meta"
63 |
64 | [tool.versioningit.vcs]
65 | method = "git"
66 | default-tag = "0.0.1"
67 |
68 | [tool.ruff]
69 | target-version = "py39"
70 |
71 | [tool.ruff.lint]
72 | select = ["ALL"]
73 | ignore = [
74 | "ANN", # TODO fix all ANN errors
75 | "ARG", # TODO fix unused method argument
76 | "BLE001",
77 | "C901", # function too complex
78 | "COM812", # trailing comma missing
79 | "D",
80 | "D205",
81 | "DTZ", # datetime-tz-now
82 | "E501", # TODO fix line too long
83 | "EM", # exception message must not use f-string literal
84 | "ERA001", # found commented out code
85 | "FA100", # TODO fix FA errors
86 | "FBT001",
87 | "FBT002",
88 | "FIX002",
89 | "G004", # logging uses fstring
90 | "ISC001",
91 | "N802", # TODO maybe fix these
92 | "PERF203", # try-except-in-loop
93 | "PGH003",
94 | "PLR0912", # too many branches
95 | "PLR0913", # too many arguments
96 | "PLR0915", # too many statements
97 | "PLR2004", # magic value used in comparison
98 | "PT004", # pytest-missing-fixture-name-underscore
99 | "PT006", # pytest-parametrize-names-wrong-type
100 | "PT013", # pytest-incorrect-pytest-import
101 | "PTH", # prefer Pathlib to os.path
102 | "RUF013", # implicit-optional
103 | "SIM105", # contextlib.suppress(Exception) instead of try-except
104 | "T201", # print statement
105 | "TD", # TODOs
106 | "TRY003", # long message outside exception class
107 | ]
108 | pydocstyle.convention = "numpy"
109 | isort.known-first-party = ["qtoolkit"]
110 | isort.split-on-trailing-comma = false
111 |
112 | [tool.ruff.format]
113 | docstring-code-format = true
114 |
115 | [tool.ruff.lint.per-file-ignores]
116 | "__init__.py" = ["F401"]
117 | "**/tests/*" = ["INP001", "S101", "SLF001"]
118 |
119 | [tool.mypy]
120 | ignore_missing_imports = true
121 | no_strict_optional = true
122 |
123 | [tool.pytest.ini_options]
124 | filterwarnings = [
125 | "ignore:.*POTCAR.*:UserWarning",
126 | "ignore:.*input structure.*:UserWarning",
127 | "ignore:.*is not gzipped.*:UserWarning",
128 | "ignore:.*magmom.*:UserWarning",
129 | "ignore::DeprecationWarning",
130 | ]
131 |
132 | [tool.coverage.run]
133 | include = ["src/*"]
134 | parallel = true
135 | branch = true
136 |
137 | [tool.coverage.paths]
138 | source = ["src/"]
139 |
140 | [tool.coverage.report]
141 | skip_covered = true
142 | show_missing = true
143 | exclude_lines = [
144 | '# pragma: no cover',
145 | '^\s*@overload( |$)',
146 | '^\s*assert False(,|$)',
147 | 'if typing.TYPE_CHECKING:',
148 | ]
149 |
--------------------------------------------------------------------------------
/src/qtoolkit/__init__.py:
--------------------------------------------------------------------------------
1 | from qtoolkit._version import __version__
2 | from qtoolkit.core.data_objects import QJob, QJobInfo, QResources, QState, QSubState
3 |
--------------------------------------------------------------------------------
/src/qtoolkit/_version.py:
--------------------------------------------------------------------------------
1 | from importlib.metadata import version
2 |
3 | __version__ = version("qtoolkit")
4 |
--------------------------------------------------------------------------------
/src/qtoolkit/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Matgenix/qtoolkit/bcb445b903f3cb78295aa7641944e0bade9a3fb8/src/qtoolkit/core/__init__.py
--------------------------------------------------------------------------------
/src/qtoolkit/core/base.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 | try:
4 | from monty.json import MSONable
5 |
6 | supercls = MSONable
7 | enum_superclses = (MSONable, Enum)
8 |
9 | except ModuleNotFoundError:
10 | supercls = object
11 | enum_superclses = (Enum,) # type: ignore
12 |
13 |
14 | class QTKObject(supercls): # type: ignore
15 | pass
16 |
17 |
18 | class QTKEnum(*enum_superclses): # type: ignore
19 | @classmethod
20 | def _validate_monty(cls, __input_value):
21 | """
22 | Override the original pydantic Validator for MSONable pattern.
23 | If not would not allow to deserialize as a standard Enum in pydantic,
24 | that just needs the value.
25 | """
26 | try:
27 | super()._validate_monty(__input_value)
28 | except ValueError as exc:
29 | try:
30 | return cls(__input_value)
31 | except Exception:
32 | raise exc # noqa: B904
33 |
--------------------------------------------------------------------------------
/src/qtoolkit/core/exceptions.py:
--------------------------------------------------------------------------------
1 | class QTKError(Exception):
2 | """Base class for all the exceptions generated by qtoolkit."""
3 |
4 |
5 | class CommandFailedError(QTKError):
6 | """
7 | Exception raised when the execution of a command has failed,
8 | typically by a non-zero return code.
9 | """
10 |
11 |
12 | class OutputParsingError(QTKError):
13 | """
14 | Exception raised when errors are recognized during the parsing
15 | of the outputs of command.
16 | """
17 |
18 |
19 | class UnsupportedResourcesError(QTKError):
20 | """
21 | Exception raised when the resources requested are not supported
22 | in qtoolkit for the chosen scheduler.
23 | """
24 |
--------------------------------------------------------------------------------
/src/qtoolkit/host/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Matgenix/qtoolkit/bcb445b903f3cb78295aa7641944e0bade9a3fb8/src/qtoolkit/host/__init__.py
--------------------------------------------------------------------------------
/src/qtoolkit/host/base.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import abc
4 | from dataclasses import dataclass
5 | from typing import TYPE_CHECKING
6 |
7 | from qtoolkit.core.base import QTKObject
8 |
9 | if TYPE_CHECKING:
10 | from pathlib import Path
11 |
12 |
13 | @dataclass
14 | class HostConfig(QTKObject):
15 | root_dir: str | Path
16 |
17 |
18 | class BaseHost(QTKObject):
19 | """Base Host class."""
20 |
21 | # def __init__(self, config, user):
22 | def __init__(self, config: HostConfig | None = None) -> None:
23 | self.config = config
24 |
25 | # self.user = user
26 |
27 | @abc.abstractmethod
28 | def execute(
29 | self,
30 | command: str | list[str],
31 | workdir: str | Path | None = None,
32 | # stdin=None,
33 | # stdout=None,
34 | # stderr=None,
35 | ):
36 | """Execute the given command on the host.
37 |
38 | Parameters
39 | ----------
40 | command: str or list of str
41 | Command to execute, as a str or list of str
42 | workdir: str or None
43 | path where the command will be executed.
44 | stdin: None, PIPE or file-like
45 | Standard input, /dev/null if None
46 | stdout: None, PIPE or file-like
47 | Standard output, /dev/null if None
48 | stderr: None, PIPE, DEVNULL or file-like
49 | Standard error, same as stdout if None
50 |
51 | Returns
52 | -------
53 | :py:class:`subprocess.Popen` object or None
54 | Local process object associated to the connection, if dryrun is False,
55 | else None
56 | """
57 | # TODO: define a common error that is raised or a returned in case the procedure
58 | # fails to avoid handling different kind of errors for the different hosts
59 | raise NotImplementedError
60 |
61 | @abc.abstractmethod
62 | def mkdir(self, directory, recursive: bool = True, exist_ok: bool = True) -> bool:
63 | """Create directory on the host."""
64 | # TODO: define a common error that is raised or a returned in case the procedure
65 | # fails to avoid handling different kind of errors for the different hosts
66 | raise NotImplementedError
67 |
68 | @abc.abstractmethod
69 | def write_text_file(self, filepath, content):
70 | """Write content to a file on the host."""
71 | # TODO: define a common error that is raised or a returned in case the procedure
72 | # fails to avoid handling different kind of errors for the different hosts
73 | raise NotImplementedError
74 |
--------------------------------------------------------------------------------
/src/qtoolkit/host/local.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import subprocess
4 | from pathlib import Path
5 |
6 | from qtoolkit.host.base import BaseHost
7 | from qtoolkit.utils import cd
8 |
9 |
10 | class LocalHost(BaseHost):
11 | # def __init__(self, config):
12 | # self.config = config
13 | def execute(self, command: str | list[str], workdir: str | Path | None = None):
14 | """Execute the given command on the host.
15 |
16 | Note that the command is executed with shell=True, so commands can
17 | be exposed to command injection. Consider whether to escape part of
18 | the input if it comes from external users.
19 |
20 | Parameters
21 | ----------
22 | command: str or list of str
23 | Command to execute, as a str or list of str
24 |
25 | Returns
26 | -------
27 | stdout : str
28 | Standard output of the command
29 | stderr : str
30 | Standard error of the command
31 | exit_code : int
32 | Exit code of the command.
33 | """
34 | if isinstance(command, (list, tuple)):
35 | command = " ".join(command)
36 | workdir = str(workdir) if workdir else Path.cwd()
37 | with cd(workdir):
38 | proc = subprocess.run(command, capture_output=True, shell=True, check=False) # noqa: S602
39 | return proc.stdout.decode(), proc.stderr.decode(), proc.returncode
40 |
41 | def mkdir(self, directory, recursive=True, exist_ok=True) -> bool:
42 | try:
43 | Path(directory).mkdir(parents=recursive, exist_ok=exist_ok)
44 | except OSError:
45 | return False
46 | return True
47 |
48 | def write_text_file(self, filepath, content) -> None:
49 | Path(filepath).write_text(content)
50 |
--------------------------------------------------------------------------------
/src/qtoolkit/host/remote.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import io
4 | from dataclasses import dataclass, field
5 | from typing import TYPE_CHECKING
6 |
7 | import fabric
8 |
9 | from qtoolkit.host.base import BaseHost, HostConfig
10 |
11 | if TYPE_CHECKING:
12 | from pathlib import Path
13 |
14 | # from fabric import Connection, Config
15 |
16 |
17 | @dataclass
18 | class RemoteConfig(HostConfig):
19 | # Fabric's Connection init args:
20 | host: str
21 | user: str = None
22 | port: int = None
23 | # Here we could just provide a config_filename
24 | config: fabric.Config = field(default_factory=fabric.Config)
25 | gateway: fabric.Connection | str = None
26 | forward_agent: bool = None
27 | connect_timeout: int = None
28 | connect_kwargs: dict = None
29 | inline_ssh_env: bool = True
30 |
31 |
32 | # connect_kwargs in paramiko:
33 | # hostname,
34 | # port = SSH_PORT,
35 | # username = None,
36 | # password = None,
37 | # pkey = None,
38 | # key_filename = None,
39 | # timeout = None,
40 | # allow_agent = True,
41 | # look_for_keys = True,
42 | # compress = False,
43 | # sock = None,
44 | # gss_auth = False,
45 | # gss_kex = False,
46 | # gss_deleg_creds = True,
47 | # gss_host = None,
48 | # banner_timeout = None,
49 | # auth_timeout = None,
50 | # gss_trust_dns = True,
51 | # passphrase=None,
52 | # disabled_algorithms=None,
53 | """
54 | Connect to an SSH server and authenticate to it. The server's host key
55 | is checked against the system host keys (see `load_system_host_keys`)
56 | and any local host keys (`load_host_keys`). If the server's hostname
57 | is not found in either set of host keys, the missing host key policy
58 | is used (see `set_missing_host_key_policy`). The default policy is
59 | to reject the key and raise an `.SSHException`.
60 |
61 | Authentication is attempted in the following order of priority:
62 |
63 | - The ``pkey`` or ``key_filename`` passed in (if any)
64 |
65 | - ``key_filename`` may contain OpenSSH public certificate paths
66 | as well as regular private-key paths; when files ending in
67 | ``-cert.pub`` are found, they are assumed to match a private
68 | key, and both components will be loaded. (The private key
69 | itself does *not* need to be listed in ``key_filename`` for
70 | this to occur - *just* the certificate.)
71 |
72 | - Any key we can find through an SSH agent
73 | - Any "id_rsa", "id_dsa" or "id_ecdsa" key discoverable in
74 | ``~/.ssh/``
75 |
76 | - When OpenSSH-style public certificates exist that match an
77 | existing such private key (so e.g. one has ``id_rsa`` and
78 | ``id_rsa-cert.pub``) the certificate will be loaded alongside
79 | the private key and used for authentication.
80 |
81 | - Plain username/password auth, if a password was given
82 |
83 | If a private key requires a password to unlock it, and a password is
84 | passed in, that password will be used to attempt to unlock the key.
85 |
86 | :param str hostname: the server to connect to
87 | :param int port: the server port to connect to
88 | :param str username:
89 | the username to authenticate as (defaults to the current local
90 | username)
91 | :param str password:
92 | Used for password authentication; is also used for private key
93 | decryption if ``passphrase`` is not given.
94 | :param str passphrase:
95 | Used for decrypting private keys.
96 | :param .PKey pkey: an optional private key to use for authentication
97 | :param str key_filename:
98 | the filename, or list of filenames, of optional private key(s)
99 | and/or certs to try for authentication
100 | :param float timeout:
101 | an optional timeout (in seconds) for the TCP connect
102 | :param bool allow_agent:
103 | set to False to disable connecting to the SSH agent
104 | :param bool look_for_keys:
105 | set to False to disable searching for discoverable private key
106 | files in ``~/.ssh/``
107 | :param bool compress: set to True to turn on compression
108 | :param socket sock:
109 | an open socket or socket-like object (such as a `.Channel`) to use
110 | for communication to the target host
111 | :param bool gss_auth:
112 | ``True`` if you want to use GSS-API authentication
113 | :param bool gss_kex:
114 | Perform GSS-API Key Exchange and user authentication
115 | :param bool gss_deleg_creds: Delegate GSS-API client credentials or not
116 | :param str gss_host:
117 | The targets name in the kerberos database. default: hostname
118 | :param bool gss_trust_dns:
119 | Indicates whether or not the DNS is trusted to securely
120 | canonicalize the name of the host being connected to (default
121 | ``True``).
122 | :param float banner_timeout: an optional timeout (in seconds) to wait
123 | for the SSH banner to be presented.
124 | :param float auth_timeout: an optional timeout (in seconds) to wait for
125 | an authentication response.
126 | :param dict disabled_algorithms:
127 | an optional dict passed directly to `.Transport` and its keyword
128 | argument of the same name."""
129 |
130 |
131 | class RemoteHost(BaseHost):
132 | """
133 | Execute commands on a remote host.
134 | For some commands assumes the remote can run unix.
135 | """
136 |
137 | def __init__(self, config: RemoteConfig):
138 | self.config = config
139 | self._connection = fabric.Connection(
140 | host=self.config.host,
141 | port=self.config.port,
142 | user=self.config.user,
143 | connect_kwargs=self.config.connect_kwargs,
144 | )
145 |
146 | @property
147 | def connection(self):
148 | return self._connection
149 |
150 | def execute(self, command: str | list[str], workdir: str | Path | None = None):
151 | """Execute the given command on the host.
152 |
153 | Parameters
154 | ----------
155 | command: str or list of str
156 | Command to execute, as a str or list of str.
157 | workdir: str or None
158 | path where the command will be executed.
159 |
160 | Returns
161 | -------
162 | stdout : str
163 | Standard output of the command
164 | stderr : str
165 | Standard error of the command
166 | exit_code : int
167 | Exit code of the command.
168 | """
169 | if isinstance(command, (list, tuple)):
170 | command = " ".join(command)
171 |
172 | # TODO: check here if we use the context manager. What happens if we provide the
173 | # connection from outside (not through a config) and we want to keep it alive ?
174 |
175 | # TODO: check if this works:
176 | workdir = str(workdir) if workdir else "."
177 | with self.connection.cd(workdir):
178 | out = self.connection.run(command, hide=True, warn=True)
179 |
180 | return out.stdout, out.stderr, out.exited
181 |
182 | def mkdir(self, directory, recursive: bool = True, exist_ok: bool = True) -> bool:
183 | """Create directory on the host."""
184 | command = "mkdir "
185 | if recursive:
186 | command += "-p "
187 | command += str(directory)
188 | try:
189 | _stdout, _stderr, returncode = self.execute(command)
190 | except Exception:
191 | return False
192 | else:
193 | return returncode == 0
194 |
195 | def write_text_file(self, filepath, content):
196 | """Write content to a file on the host."""
197 | f = io.StringIO(content)
198 |
199 | self.connection.put(f, str(filepath))
200 |
--------------------------------------------------------------------------------
/src/qtoolkit/io/__init__.py:
--------------------------------------------------------------------------------
1 | from qtoolkit.io.base import BaseSchedulerIO
2 | from qtoolkit.io.pbs import PBSIO, PBSState
3 | from qtoolkit.io.sge import SGEIO, SGEState
4 | from qtoolkit.io.shell import ShellIO, ShellState
5 | from qtoolkit.io.slurm import SlurmIO, SlurmState
6 |
7 | scheduler_mapping = {"slurm": SlurmIO, "pbs": PBSIO, "sge": SGEIO, "shell": ShellIO}
8 |
--------------------------------------------------------------------------------
/src/qtoolkit/io/base.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import abc
4 | import difflib
5 | import shlex
6 | from dataclasses import fields
7 | from string import Template
8 | from typing import TYPE_CHECKING
9 |
10 | from qtoolkit.core.base import QTKObject
11 | from qtoolkit.core.data_objects import CancelResult, QJob, QResources, SubmissionResult
12 | from qtoolkit.core.exceptions import UnsupportedResourcesError
13 |
14 | if TYPE_CHECKING:
15 | from pathlib import Path
16 |
17 |
18 | class QTemplate(Template):
19 | delimiter = "$$"
20 |
21 | def get_identifiers(self) -> list:
22 | """
23 | Returns a list of the valid identifiers in the template,
24 | in the order they first appear, ignoring any invalid identifiers.
25 | Imported from implementation in python 3.11 for backward compatibility.
26 | """
27 | ids = []
28 | for mo in self.pattern.finditer(self.template):
29 | named = mo.group("named") or mo.group("braced")
30 | if named is not None and named not in ids:
31 | # add a named group only the first time it appears
32 | ids.append(named)
33 | elif (
34 | named is None
35 | and mo.group("invalid") is None
36 | and mo.group("escaped") is None
37 | ): # pragma: no cover - no complex patterns, part of python stdlib 3.11
38 | # If all the groups are None, there must be
39 | # another group we're not expecting
40 | raise ValueError("Unrecognized named group in pattern", self.pattern)
41 | return ids
42 |
43 |
44 | class BaseSchedulerIO(QTKObject, abc.ABC):
45 | """Base class for job queues."""
46 |
47 | header_template: str
48 |
49 | SUBMIT_CMD: str | None
50 | CANCEL_CMD: str | None
51 |
52 | shebang: str = "#!/bin/bash"
53 |
54 | sanitize_job_name: bool = False
55 |
56 | def get_submission_script(
57 | self,
58 | commands: str | list[str],
59 | options: dict | QResources | None = None,
60 | ) -> str:
61 | """Get the submission script for the given commands and options."""
62 | script_blocks = [self.shebang]
63 | if header := self.generate_header(options):
64 | script_blocks.append(header)
65 |
66 | run_commands = self.generate_run_commands(commands)
67 | script_blocks.append(run_commands)
68 |
69 | if footer := self.generate_footer():
70 | script_blocks.append(footer)
71 |
72 | return "\n".join(script_blocks)
73 |
74 | def generate_header(self, options: dict | QResources | None) -> str:
75 | # needs info from self.meta_info (email, job name [also execution])
76 | # queuing_options (priority, account, qos and submit as hold)
77 | # execution (rerunnable)
78 | # resources (nodes, cores, memory, time, [gpus])
79 | # default values for (almost) everything in the object ?
80 |
81 | options = options or {}
82 |
83 | if isinstance(options, QResources):
84 | options = self.check_convert_qresources(options)
85 |
86 | template = QTemplate(self.header_template)
87 |
88 | # check that all the options are present in the template
89 | keys = set(options.keys())
90 | all_identifiers = template.get_identifiers()
91 | extra = keys.difference(all_identifiers)
92 | if extra:
93 | close_matches = {}
94 | for extra_val in extra:
95 | m = difflib.get_close_matches(
96 | extra_val, all_identifiers, n=3, cutoff=0.65
97 | )
98 | if m:
99 | close_matches[extra_val] = m
100 | msg = (
101 | f"The following keys are not present in the template: {', '.join(sorted(extra))}. "
102 | f"Check the template in {type(self).__module__}.{type(self).__qualname__}.header_template."
103 | )
104 | if close_matches:
105 | msg += " Possible replacements:"
106 | for extra_val in sorted(close_matches):
107 | replacements = " or ".join(
108 | f"'{m}'" for m in close_matches[extra_val]
109 | )
110 | msg += f" {replacements} instead of '{extra_val}'."
111 | raise ValueError(msg)
112 |
113 | options = self.sanitize_options(options)
114 | unclean_header = template.safe_substitute(options)
115 | # Remove lines with leftover $$.
116 | clean_header = [line for line in unclean_header.split("\n") if "$$" not in line]
117 |
118 | return "\n".join(clean_header)
119 |
120 | def generate_run_commands(self, commands: list[str] | str) -> str:
121 | if isinstance(commands, list):
122 | commands = "\n".join(commands)
123 |
124 | return commands
125 |
126 | def generate_footer(self) -> str:
127 | return ""
128 |
129 | def generate_ids_list(self, jobs: list[QJob | int | str] | None) -> list[str]:
130 | if jobs is None:
131 | return None
132 | ids_list = []
133 | for j in jobs:
134 | if isinstance(j, QJob):
135 | ids_list.append(str(j.job_id))
136 | else:
137 | ids_list.append(str(j))
138 |
139 | return ids_list
140 |
141 | def get_submit_cmd(self, script_file: str | Path | None = "submit.script") -> str:
142 | """
143 | Get the command used to submit a given script to the queue.
144 |
145 | Parameters
146 | ----------
147 | script_file: (str) path of the script file to use.
148 | """
149 | script_file = script_file or ""
150 | return f"{self.SUBMIT_CMD} {script_file}"
151 |
152 | @abc.abstractmethod
153 | def parse_submit_output(self, exit_code, stdout, stderr) -> SubmissionResult:
154 | pass
155 |
156 | def get_cancel_cmd(self, job: QJob | int | str) -> str:
157 | """
158 | Get the command used to cancel a given job.
159 |
160 | Parameters
161 | ----------
162 | job: (str) job to be cancelled.
163 | """
164 | job_id = job.job_id if isinstance(job, QJob) else job
165 | if job_id is None or job_id == "":
166 | received = None if job_id is None else "'' (empty string)"
167 | raise ValueError(
168 | f"The id of the job to be cancelled should be defined. Received: {received}"
169 | )
170 | return f"{self.CANCEL_CMD} {job_id}"
171 |
172 | @abc.abstractmethod
173 | def parse_cancel_output(self, exit_code, stdout, stderr) -> CancelResult:
174 | pass
175 |
176 | def get_job_cmd(self, job: QJob | int | str) -> str:
177 | job_id = self.generate_ids_list([job])[0]
178 | shlex.quote(job_id)
179 | return self._get_job_cmd(job_id)
180 |
181 | @abc.abstractmethod
182 | def _get_job_cmd(self, job_id: str) -> str:
183 | pass
184 |
185 | @abc.abstractmethod
186 | def parse_job_output(self, exit_code, stdout, stderr) -> QJob | None:
187 | pass
188 |
189 | def check_convert_qresources(self, resources: QResources) -> dict:
190 | """
191 | Converts a Qresources instance to a dict that will be used to fill in the
192 | header of the submission script.
193 | Also checks that passed values are declared to be handled by the corresponding
194 | subclass.
195 | """
196 | not_empty = set()
197 | for field in fields(resources):
198 | if getattr(resources, field.name):
199 | not_empty.add(field.name)
200 |
201 | unsupported_options = not_empty.difference(self.supported_qresources_keys)
202 |
203 | if unsupported_options:
204 | msg = f"Keys not supported: {', '.join(sorted(unsupported_options))}"
205 | raise UnsupportedResourcesError(msg)
206 |
207 | return self._convert_qresources(resources)
208 |
209 | @abc.abstractmethod
210 | def _convert_qresources(self, resources: QResources) -> dict:
211 | """
212 | Converts a QResources instance to a dict that will be used to fill in the
213 | header of the submission script.
214 | A subclass does not strictly need to support all the options available in
215 | QResources. For this reason a list of supported attributes should be
216 | maintained and the supported attributes in the implementation of this
217 | method should match the list of values defined in supported_qresources_keys.
218 | """
219 |
220 | @property
221 | def supported_qresources_keys(self) -> list:
222 | """
223 | List of attributes of QResources that are correctly handled by the
224 | _convert_qresources method. It is used to validate that the user
225 | does not pass an unsupported value, expecting to have an effect.
226 | """
227 | return []
228 |
229 | def get_jobs_list_cmd(
230 | self, jobs: list[QJob | int | str] | None, user: str | None
231 | ) -> str:
232 | job_ids = self.generate_ids_list(jobs)
233 | if user:
234 | user = shlex.quote(user)
235 | return self._get_jobs_list_cmd(job_ids, user)
236 |
237 | @abc.abstractmethod
238 | def _get_jobs_list_cmd(
239 | self, job_ids: list[str] | None = None, user: str | None = None
240 | ) -> str:
241 | pass
242 |
243 | @abc.abstractmethod
244 | def parse_jobs_list_output(self, exit_code, stdout, stderr) -> list[QJob]:
245 | pass
246 |
247 | def sanitize_options(self, options):
248 | """
249 | A function to sanitize the values in the options used to generate the
250 | header. Subclasses should implement their own sanitizations.
251 | """
252 | return options
253 |
--------------------------------------------------------------------------------
/src/qtoolkit/io/pbs.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import re
4 | from typing import ClassVar
5 |
6 | from qtoolkit.core.data_objects import QJob, QJobInfo, QState, QSubState
7 | from qtoolkit.core.exceptions import OutputParsingError
8 | from qtoolkit.io.pbs_base import PBSIOBase
9 |
10 | # States in PBS from qstat's man.
11 | # B Array job: at least one subjob has started.
12 | #
13 | # E Job is exiting after having run.
14 | #
15 | # F Job is finished.
16 | #
17 | # H Job is held.
18 | #
19 | # M Job was moved to another server.
20 | #
21 | # Q Job is queued.
22 | #
23 | # R Job is running.
24 | #
25 | # S Job is suspended.
26 | #
27 | # T Job is being moved to new location.
28 | #
29 | # U Cycle-harvesting job is suspended due to keyboard activity.
30 | #
31 | # W Job is waiting for its submitter-assigned start time to be reached.
32 | #
33 | # X Subjob has completed execution or has been deleted.
34 |
35 |
36 | class PBSState(QSubState):
37 | ARRAY_RUNNING = "B"
38 | EXITING = "E"
39 | FINISHED = "F"
40 | HELD = "H"
41 | MOVED = "M"
42 | QUEUED = "Q"
43 | RUNNING = "R"
44 | SUSPENDED = "S"
45 | TRANSITING = "T"
46 | SUSPENDED_KEYBOARD = "U"
47 | WAITING = "W"
48 | ARRAY_FINISHED = "X"
49 |
50 | @property
51 | def qstate(self) -> QState:
52 | return _STATUS_MAPPING[self] # type: ignore
53 |
54 |
55 | _STATUS_MAPPING = {
56 | PBSState.ARRAY_RUNNING: QState.RUNNING,
57 | PBSState.EXITING: QState.RUNNING,
58 | PBSState.FINISHED: QState.DONE,
59 | PBSState.HELD: QState.QUEUED_HELD,
60 | PBSState.MOVED: QState.REQUEUED,
61 | PBSState.QUEUED: QState.QUEUED,
62 | PBSState.RUNNING: QState.RUNNING,
63 | PBSState.SUSPENDED: QState.SUSPENDED,
64 | PBSState.TRANSITING: QState.REQUEUED,
65 | PBSState.SUSPENDED_KEYBOARD: QState.SUSPENDED,
66 | PBSState.WAITING: QState.QUEUED,
67 | PBSState.ARRAY_FINISHED: QState.DONE,
68 | }
69 |
70 |
71 | class PBSIO(PBSIOBase):
72 | header_template: str = """
73 | #PBS -q $${queue}
74 | #PBS -N $${job_name}
75 | #PBS -A $${account}
76 | #PBS -l $${select}
77 | #PBS -l walltime=$${walltime}
78 | #PBS -l model=$${model}
79 | #PBS -l place=$${place}
80 | #PBS -W group_list=$${group_list}
81 | #PBS -M $${mail_user}
82 | #PBS -m $${mail_type}
83 | #PBS -o $${qout_path}
84 | #PBS -e $${qerr_path}
85 | #PBS -p $${priority}
86 | #PBS -r $${rerunnable}
87 | #PBS -J $${array}
88 | $${qverbatim}"""
89 |
90 | SUBMIT_CMD: str | None = "qsub"
91 | CANCEL_CMD: str | None = "qdel"
92 | system_name: str = "PBS"
93 | default_unit: str = "mb"
94 | power_labels: ClassVar[dict] = {"kb": 0, "mb": 1, "gb": 2, "tb": 3}
95 | _qresources_mapping: ClassVar[dict] = {
96 | "queue_name": "queue",
97 | "job_name": "job_name",
98 | "account": "account",
99 | "priority": "priority",
100 | "output_filepath": "qout_path",
101 | "error_filepath": "qerr_path",
102 | "project": "group_list",
103 | }
104 |
105 | def extract_job_id(self, stdout):
106 | return stdout.strip()
107 |
108 | def extract_job_id_from_cancel(self, stderr):
109 | # PBS doesn't return the job ID if successfully canceled, so return None
110 | return None
111 |
112 | def _get_jobs_list_cmd(
113 | self, job_ids: list[str] | None = None, user: str | None = None
114 | ) -> str:
115 | if user and job_ids:
116 | self._check_user_and_job_ids_conflict()
117 |
118 | command = self._get_qstat_base_command()
119 |
120 | if user:
121 | command.append(f"-u {user}")
122 |
123 | if job_ids:
124 | job_ids_str = " ".join(job_ids)
125 | command.append(self._get_job_ids_flag(job_ids_str))
126 |
127 | return " ".join(command)
128 |
129 | def parse_job_output(self, exit_code, stdout, stderr) -> QJob | None:
130 | out = self.parse_jobs_list_output(exit_code, stdout, stderr)
131 | if out:
132 | return out[0]
133 | return None
134 |
135 | def _get_qstat_base_command(self) -> list[str]:
136 | return ["qstat", "-f", "-w"]
137 |
138 | def _get_job_cmd(self, job_id: str):
139 | return f"{' '.join(self._get_qstat_base_command())} {job_id}"
140 |
141 | def _get_job_ids_flag(self, job_ids_str: str) -> str:
142 | return job_ids_str
143 |
144 | def parse_jobs_list_output(self, exit_code, stdout, stderr) -> list[QJob]:
145 | if isinstance(stdout, bytes):
146 | stdout = stdout.decode()
147 | if isinstance(stderr, bytes):
148 | stderr = stderr.decode()
149 |
150 | # if some jobs of the list do not exist the exit code is not zero, but
151 | # the data for other jobs is still present. Some the exit code is ignored here
152 |
153 | # The error messages are included in the stderr and could be of the form:
154 | # qstat: Unknown Job Id 10000.c2cf5fbe1102
155 | # qstat: 1008.c2cf5fbe1102 Job has finished, use -x or -H to
156 | # obtain historical job information
157 | # TODO raise if these two kinds of error are not present and exit_code != 0?
158 |
159 | # Split by the beginning of "Job Id:" and iterate on the different chunks.
160 | # Matching the beginning of the line to avoid problems in case the "Job Id"
161 | # string is present elsewhere.
162 | jobs_chunks = re.split(r"^\s*Job Id: ", stdout, flags=re.MULTILINE)
163 |
164 | # regex to split the key-values pairs separated by " = "
165 | # Explanation:
166 | # - \s*([A-Za-z_.]+)\s+=\s+ matches the key in the key-value pair,
167 | # allowing for leading and trailing whitespace before and after the
168 | # equals sign, and allowing for a dot in the key.
169 | # - ([\s\S]*?) matches the value in the key-value pair, allowing for any
170 | # character including newlines.
171 | # - (?=\n\s*[A-Za-z_.]+\s+=|\Z) is a positive lookahead that matches a
172 | # newline followed by a key with optional leading and trailing
173 | # whitespace and an equals sign or the end of the string,
174 | # without including the lookahead match in the result.
175 | # The key_pattern is separated in case needs to be updated.
176 | key_pattern = r"[A-Za-z_.]+"
177 | values_regex = re.compile(
178 | rf"\s*({key_pattern})\s+=\s+([\s\S]*?)(?=\n\s*{key_pattern}\s+=|\Z)"
179 | )
180 |
181 | jobs_list = []
182 | for chunk in jobs_chunks:
183 | chunk = chunk.strip() # noqa: PLW2901
184 | if not chunk:
185 | continue
186 |
187 | # first line is the id:
188 | job_id, chunk_data = chunk.split("\n", 1)
189 | job_id = job_id.strip()
190 | results = values_regex.findall(chunk_data)
191 | if not results:
192 | continue
193 | data = dict(results)
194 |
195 | qjob = QJob()
196 | qjob.job_id = job_id
197 |
198 | job_state_string = data["job_state"]
199 |
200 | try:
201 | pbs_job_state = PBSState(job_state_string)
202 | except ValueError as exc:
203 | msg = f"Unknown job state {job_state_string} for job id {qjob.job_id}"
204 | raise OutputParsingError(msg) from exc
205 | qjob.sub_state = pbs_job_state
206 | qjob.state = pbs_job_state.qstate
207 |
208 | qjob.username = data["Job_Owner"]
209 |
210 | info = QJobInfo()
211 |
212 | try:
213 | info.nodes = int(data.get("Resource_List.nodect"))
214 | except ValueError:
215 | info.nodes = None
216 |
217 | try:
218 | info.cpus = int(data.get("Resource_List.ncpus"))
219 | except ValueError:
220 | info.cpus = None
221 |
222 | try:
223 | info.memory_per_cpu = self._convert_memory_str(
224 | data.get("Resource_List.mem")
225 | )
226 | except OutputParsingError:
227 | info.memory_per_cpu = None
228 |
229 | info.partition = data["queue"]
230 |
231 | # TODO here _convert_time_str can raise. If parsing errors are accepted
232 | # handle differently
233 | info.time_limit = self._convert_str_to_time(
234 | data.get("Resource_List.walltime")
235 | )
236 |
237 | try:
238 | runtime_str = data.get("resources_used.walltime")
239 | if runtime_str:
240 | qjob.runtime = self._convert_str_to_time(runtime_str)
241 | except OutputParsingError:
242 | qjob.runtime = None
243 |
244 | qjob.name = data.get("Job_Name")
245 | qjob.info = info
246 |
247 | # I append to the list of jobs to return
248 | jobs_list.append(qjob)
249 |
250 | return jobs_list
251 |
252 | @staticmethod
253 | def _convert_str_to_time(time_str: str | None):
254 | """
255 | Convert a string in the format used by PBS DD:HH:MM:SS to a number of seconds.
256 | It may contain only H:M:S, only M:S or only S.
257 | """
258 | if not time_str:
259 | return None
260 |
261 | time_split = time_str.split(":")
262 |
263 | # array containing seconds, minutes, hours and days
264 | time = [0] * 4
265 |
266 | try:
267 | for i, v in enumerate(reversed(time_split)):
268 | time[i] = int(v)
269 |
270 | except ValueError as exc:
271 | raise OutputParsingError from exc
272 |
273 | return time[3] * 86400 + time[2] * 3600 + time[1] * 60 + time[0]
274 |
275 | def sanitize_options(self, options):
276 | if "job_name" in options:
277 | options = dict(options)
278 | options["job_name"] = re.sub(r"[^a-zA-Z0-9_\-+.]", "_", options["job_name"])
279 | return options
280 |
--------------------------------------------------------------------------------
/src/qtoolkit/io/pbs_base.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import abc
4 | import re
5 | from abc import ABC
6 | from datetime import timedelta
7 | from typing import ClassVar
8 |
9 | from qtoolkit.core.data_objects import (
10 | CancelResult,
11 | CancelStatus,
12 | ProcessPlacement,
13 | QResources,
14 | SubmissionResult,
15 | SubmissionStatus,
16 | )
17 | from qtoolkit.core.exceptions import OutputParsingError, UnsupportedResourcesError
18 | from qtoolkit.io.base import BaseSchedulerIO
19 |
20 |
21 | class PBSIOBase(BaseSchedulerIO, ABC):
22 | """Abstract class for PBS and SGE schedulers."""
23 |
24 | header_template: str
25 |
26 | SUBMIT_CMD: str | None = "qsub"
27 | CANCEL_CMD: str | None = "qdel"
28 | _qresources_mapping: ClassVar[dict]
29 | system_name: str
30 | default_unit: str
31 | power_labels: ClassVar[dict]
32 |
33 | def parse_submit_output(self, exit_code, stdout, stderr) -> SubmissionResult:
34 | if isinstance(stdout, bytes):
35 | stdout = stdout.decode()
36 | if isinstance(stderr, bytes):
37 | stderr = stderr.decode()
38 | if exit_code != 0:
39 | return SubmissionResult(
40 | exit_code=exit_code,
41 | stdout=stdout,
42 | stderr=stderr,
43 | status=SubmissionStatus("FAILED"),
44 | )
45 | job_id = self.extract_job_id(stdout)
46 | status = (
47 | SubmissionStatus("SUCCESSFUL")
48 | if job_id
49 | else SubmissionStatus("JOB_ID_UNKNOWN")
50 | )
51 | return SubmissionResult(
52 | job_id=job_id,
53 | exit_code=exit_code,
54 | stdout=stdout,
55 | stderr=stderr,
56 | status=status,
57 | )
58 |
59 | @abc.abstractmethod
60 | def extract_job_id(self, stdout):
61 | pass
62 |
63 | def parse_cancel_output(self, exit_code, stdout, stderr) -> CancelResult:
64 | """Parse the output of the qdel command."""
65 | if isinstance(stdout, bytes):
66 | stdout = stdout.decode()
67 | if isinstance(stderr, bytes):
68 | stderr = stderr.decode()
69 | if exit_code != 0:
70 | return CancelResult(
71 | exit_code=exit_code,
72 | stdout=stdout,
73 | stderr=stderr,
74 | status=CancelStatus("FAILED"),
75 | )
76 |
77 | job_id = self.extract_job_id_from_cancel(stderr)
78 | status = CancelStatus("SUCCESSFUL")
79 | return CancelResult(
80 | job_id=job_id,
81 | exit_code=exit_code,
82 | stdout=stdout,
83 | stderr=stderr,
84 | status=status,
85 | )
86 |
87 | @abc.abstractmethod
88 | def extract_job_id_from_cancel(self, stderr):
89 | pass
90 |
91 | @abc.abstractmethod
92 | def _get_jobs_list_cmd(
93 | self, job_ids: list[str] | None = None, user: str | None = None
94 | ) -> str:
95 | pass
96 |
97 | def _check_user_and_job_ids_conflict(self):
98 | # Use system_name for more informative error messages
99 | raise ValueError(f"Cannot query by user and job(s) in {self.system_name}")
100 |
101 | @abc.abstractmethod
102 | def _get_qstat_base_command(self) -> list[str]:
103 | pass
104 |
105 | @abc.abstractmethod
106 | def _get_job_ids_flag(self, job_ids_str: str) -> str:
107 | pass
108 |
109 | @abc.abstractmethod
110 | def _get_job_cmd(self, job_id: str) -> str:
111 | pass
112 |
113 | def _convert_memory_str(self, memory: str | None) -> int | None:
114 | if not memory:
115 | return None
116 |
117 | match = re.match(r"([0-9]+)([a-zA-Z]*)", memory)
118 | if not match:
119 | raise OutputParsingError("No numbers and units parsed")
120 | memory, units = match.groups()
121 |
122 | # Now we call the methods specific to the child class (PBSIO or SGEIO)
123 | power_labels = self.power_labels
124 |
125 | if not units:
126 | units = self.default_unit
127 | elif units.lower() not in power_labels:
128 | raise OutputParsingError(f"Unknown units {units}")
129 |
130 | try:
131 | v = int(memory)
132 | except ValueError as exc:
133 | raise OutputParsingError from exc
134 |
135 | return v * (1024 ** power_labels[units.lower()])
136 |
137 | @staticmethod
138 | def _convert_time_to_str(time: int | float | timedelta) -> str: # noqa: PYI041
139 | if not isinstance(time, timedelta):
140 | time = timedelta(seconds=time)
141 |
142 | hours, remainder = divmod(int(time.total_seconds()), 3600)
143 | minutes, seconds = divmod(remainder, 60)
144 |
145 | return f"{hours}:{minutes}:{seconds}"
146 |
147 | def _convert_qresources(self, resources: QResources) -> dict:
148 | header_dict = {}
149 | for qr_field, system_field in self._qresources_mapping.items():
150 | val = getattr(resources, qr_field)
151 | if val is not None:
152 | header_dict[system_field] = val
153 |
154 | if resources.njobs and resources.njobs > 1:
155 | header_dict["array"] = f"1-{resources.njobs}"
156 |
157 | if resources.time_limit:
158 | header_dict["walltime"] = self._convert_time_to_str(resources.time_limit)
159 | self._add_soft_walltime(header_dict, resources)
160 |
161 | if resources.rerunnable is not None:
162 | header_dict["rerunnable"] = "y" if resources.rerunnable else "n"
163 |
164 | # Build select clause logic directly within _convert_qresources
165 | nodes, processes, processes_per_node = resources.get_processes_distribution()
166 | select = None
167 | if resources.process_placement == ProcessPlacement.NO_CONSTRAINTS:
168 | select = f"select={processes}"
169 | if resources.threads_per_process:
170 | select += f":ncpus={resources.threads_per_process}"
171 | select += f":ompthreads={resources.threads_per_process}"
172 | if resources.memory_per_thread:
173 | threads_per_process = resources.threads_per_process or 1
174 | select += f":mem={threads_per_process * resources.memory_per_thread}mb"
175 | elif resources.process_placement in (
176 | ProcessPlacement.EVENLY_DISTRIBUTED,
177 | ProcessPlacement.SAME_NODE,
178 | ProcessPlacement.SCATTERED,
179 | ):
180 | select = f"select={nodes}"
181 | if resources.threads_per_process and resources.threads_per_process > 1:
182 | cpus = resources.threads_per_process * processes_per_node
183 | ompthreads = resources.threads_per_process
184 | else:
185 | cpus = processes_per_node
186 | ompthreads = None
187 | select += f":ncpus={cpus}"
188 | select += f":mpiprocs={processes_per_node}"
189 | if ompthreads:
190 | select += f":ompthreads={ompthreads}"
191 | if resources.memory_per_thread:
192 | mem = cpus * resources.memory_per_thread
193 | select += f":mem={mem}mb"
194 |
195 | if resources.process_placement in (
196 | ProcessPlacement.EVENLY_DISTRIBUTED,
197 | ProcessPlacement.SCATTERED,
198 | ):
199 | header_dict["place"] = "scatter"
200 | elif resources.process_placement == ProcessPlacement.SAME_NODE:
201 | header_dict["place"] = "pack"
202 | else:
203 | raise UnsupportedResourcesError(
204 | f"process placement {resources.process_placement} is not supported for {self.system_name}"
205 | )
206 |
207 | header_dict["select"] = select
208 |
209 | if resources.email_address:
210 | header_dict["mail_user"] = resources.email_address
211 | header_dict["mail_type"] = "abe"
212 |
213 | if resources.scheduler_kwargs:
214 | header_dict.update(resources.scheduler_kwargs)
215 |
216 | return header_dict
217 |
218 | def _add_soft_walltime(self, header_dict: dict, resources: QResources):
219 | """Add soft_walltime if required by child classes (e.g., SGE)."""
220 |
221 | @property
222 | def supported_qresources_keys(self) -> list:
223 | supported = list(self._qresources_mapping.keys())
224 | supported += [
225 | "njobs",
226 | "time_limit",
227 | "processes",
228 | "processes_per_node",
229 | "process_placement",
230 | "nodes",
231 | "threads_per_process",
232 | "memory_per_thread",
233 | "email_address",
234 | "scheduler_kwargs",
235 | ]
236 | return supported
237 |
--------------------------------------------------------------------------------
/src/qtoolkit/io/shell.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import TYPE_CHECKING, ClassVar
4 |
5 | from qtoolkit.core.data_objects import (
6 | CancelResult,
7 | CancelStatus,
8 | QJob,
9 | QResources,
10 | QState,
11 | QSubState,
12 | SubmissionResult,
13 | SubmissionStatus,
14 | )
15 | from qtoolkit.core.exceptions import CommandFailedError, OutputParsingError
16 | from qtoolkit.io.base import BaseSchedulerIO
17 |
18 | if TYPE_CHECKING:
19 | from pathlib import Path
20 |
21 | # States in from ps command, extracted from man ps.
22 | # D uninterruptible sleep (usually IO)
23 | # R running or runnable (on run queue)
24 | # S interruptible sleep (waiting for an event to complete)
25 | # T stopped by job control signal
26 | # t stopped by debugger during the tracing
27 | # W paging (not valid since the 2.6.xx kernel)
28 | # X dead (should never be seen)
29 | # Z defunct ("zombie") process, terminated but not reaped by its parent
30 |
31 |
32 | class ShellState(QSubState):
33 | UNINTERRUPTIBLE_SLEEP = "D"
34 | RUNNING = "R"
35 | INTERRUPTIBLE_SLEEP = "S"
36 | STOPPED = "T"
37 | STOPPED_DEBUGGER = "t"
38 | PAGING = "W"
39 | DEAD = "X"
40 | DEFUNCT = "Z"
41 |
42 | @property
43 | def qstate(self) -> QState:
44 | return _STATUS_MAPPING[self] # type: ignore
45 |
46 |
47 | _STATUS_MAPPING = {
48 | ShellState.UNINTERRUPTIBLE_SLEEP: QState.RUNNING,
49 | ShellState.RUNNING: QState.RUNNING,
50 | ShellState.INTERRUPTIBLE_SLEEP: QState.RUNNING,
51 | ShellState.STOPPED: QState.SUSPENDED,
52 | ShellState.STOPPED_DEBUGGER: QState.SUSPENDED,
53 | ShellState.PAGING: QState.RUNNING,
54 | ShellState.DEAD: QState.DONE,
55 | ShellState.DEFUNCT: QState.DONE, # TODO should be failed?
56 | }
57 |
58 |
59 | class ShellIO(BaseSchedulerIO):
60 | header_template: str = """
61 | exec > $${qout_path}
62 | exec 2> $${qerr_path}
63 |
64 | echo $${job_name}
65 | $${qverbatim}
66 | """
67 |
68 | CANCEL_CMD: str | None = "kill -9"
69 |
70 | def __init__(self, blocking=False, stdout_path="stdout", stderr_path="stderr"):
71 | """Construct the ShellIO object.
72 |
73 | Parameters
74 | ----------
75 | blocking: bool
76 | Whether the execution should be blocking.
77 | stdout_path: str or Path
78 | Path to the standard output file.
79 | stderr_path: str or Path
80 | Path to the standard error file.
81 | """
82 | self.blocking = blocking
83 | self.stdout_path = stdout_path
84 | self.stderr_path = stderr_path
85 |
86 | def get_submit_cmd(self, script_file: str | Path | None = "submit.script") -> str:
87 | """
88 | Get the command used to submit a given script to the queue.
89 |
90 | Parameters
91 | ----------
92 | script_file: str or Path
93 | Path of the script file to use.
94 | """
95 | script_file = script_file or ""
96 |
97 | # nohup and the redirection of the outputs is needed when running through fabric
98 | # see https://www.fabfile.org/faq.html#why-can-t-i-run-programs-in-the-background-with-it-makes-fabric-hang
99 | command = f"bash {script_file} > {self.stdout_path} 2> {self.stderr_path}"
100 | if not self.blocking:
101 | command = f"nohup {command} & echo $!"
102 | return command
103 |
104 | def parse_submit_output(self, exit_code, stdout, stderr) -> SubmissionResult:
105 | if isinstance(stdout, bytes):
106 | stdout = stdout.decode()
107 | if isinstance(stderr, bytes):
108 | stderr = stderr.decode()
109 | if exit_code != 0:
110 | return SubmissionResult(
111 | exit_code=exit_code,
112 | stdout=stdout,
113 | stderr=stderr,
114 | status=SubmissionStatus("FAILED"),
115 | )
116 | job_id = stdout.strip() or None
117 | status = (
118 | SubmissionStatus("SUCCESSFUL")
119 | if job_id
120 | else SubmissionStatus("JOB_ID_UNKNOWN")
121 | )
122 | return SubmissionResult(
123 | job_id=job_id,
124 | exit_code=exit_code,
125 | stdout=stdout,
126 | stderr=stderr,
127 | status=status,
128 | )
129 |
130 | def parse_cancel_output(self, exit_code, stdout, stderr) -> CancelResult:
131 | """Parse the output of the kill command."""
132 | if isinstance(stdout, bytes):
133 | stdout = stdout.decode()
134 | if isinstance(stderr, bytes):
135 | stderr = stderr.decode()
136 | if exit_code != 0:
137 | return CancelResult(
138 | exit_code=exit_code,
139 | stdout=stdout,
140 | stderr=stderr,
141 | status=CancelStatus("FAILED"),
142 | )
143 |
144 | status = CancelStatus("SUCCESSFUL")
145 | return CancelResult(
146 | job_id=None,
147 | exit_code=exit_code,
148 | stdout=stdout,
149 | stderr=stderr,
150 | status=status,
151 | )
152 |
153 | def _get_job_cmd(self, job_id: str):
154 | return self._get_jobs_list_cmd(job_ids=[job_id])
155 |
156 | def parse_job_output(self, exit_code, stdout, stderr) -> QJob | None:
157 | """Parse the output of the ps command and return the corresponding QJob object.
158 |
159 | If the ps command returns multiple shell jobs, only the first corresponding
160 | QJob is returned.
161 | #TODO: should we check that there is only one job here ?
162 |
163 | Parameters
164 | ----------
165 | exit_code : int
166 | Exit code of the ps command.
167 | stdout : str
168 | Standard output of the ps command.
169 | stderr : str
170 | Standard error of the ps command.
171 | """
172 | out = self.parse_jobs_list_output(exit_code, stdout, stderr)
173 | if out:
174 | return out[0]
175 | return None
176 |
177 | def _get_jobs_list_cmd(
178 | self, job_ids: list[str] | None = None, user: str | None = None
179 | ) -> str:
180 | if user and job_ids:
181 | msg = (
182 | "Cannot query by user and job(s) with ps, "
183 | "as the user option will override the ids list"
184 | )
185 | raise ValueError(msg)
186 |
187 | # use etime instead of etimes for compatibility
188 | command = [
189 | "ps",
190 | "-o pid,user,etime,state,comm",
191 | ]
192 |
193 | if user:
194 | command.append(f"-U {user}")
195 |
196 | if job_ids:
197 | command.append("-p " + ",".join(job_ids))
198 |
199 | return " ".join(command)
200 |
201 | def parse_jobs_list_output(self, exit_code, stdout, stderr) -> list[QJob]:
202 | """Parse the output of the ps command to list jobs.
203 |
204 | Parameters
205 | ----------
206 | exit_code : int
207 | Exit code of the ps command.
208 | stdout : str
209 | Standard output of the ps command.
210 | stderr : str
211 | Standard error of the ps command.
212 | """
213 | if isinstance(stdout, bytes):
214 | stdout = stdout.decode()
215 | if isinstance(stderr, bytes):
216 | stderr = stderr.decode()
217 |
218 | # if asking only for pid that are not running the exit code is != 0,
219 | # so check also on stderr for failing
220 | if exit_code != 0 and stderr.strip():
221 | msg = f"command ps failed: stdout: {stdout}. stderr: {stderr}"
222 | raise CommandFailedError(msg)
223 |
224 | jobs_list = []
225 | for row in stdout.splitlines()[1:]:
226 | if not row.strip():
227 | continue
228 |
229 | data = row.split()
230 |
231 | qjob = QJob()
232 | qjob.job_id = data[0]
233 | qjob.username = data[1]
234 | qjob.runtime = self._convert_str_to_time(data[2])
235 | qjob.name = data[4]
236 |
237 | try:
238 | shell_job_state = ShellState(data[3][0])
239 | except ValueError as exc:
240 | msg = f"Unknown job state {data[3]} for job id {qjob.job_id}"
241 | raise OutputParsingError(msg) from exc
242 | qjob.sub_state = shell_job_state
243 | qjob.state = shell_job_state.qstate
244 |
245 | jobs_list.append(qjob)
246 |
247 | return jobs_list
248 |
249 | # helper attribute to match the values defined in QResources and
250 | # the dictionary that should be passed to the template
251 | _qresources_mapping: ClassVar = {
252 | "job_name": "job_name",
253 | "output_filepath": "qout_path",
254 | "error_filepath": "qerr_path",
255 | }
256 |
257 | def _convert_qresources(self, resources: QResources) -> dict:
258 | """
259 | Converts a QResources instance to a dict that will be used to fill in the
260 | header of the submission script.
261 | """
262 | header_dict = {}
263 | for qr_field, slurm_field in self._qresources_mapping.items():
264 | val = getattr(resources, qr_field)
265 | if val is not None:
266 | header_dict[slurm_field] = val
267 |
268 | return header_dict
269 |
270 | @property
271 | def supported_qresources_keys(self) -> list:
272 | """
273 | List of attributes of QResources that are correctly handled by the
274 | _convert_qresources method. It is used to validate that the user
275 | does not pass an unsupported value, expecting to have an effect.
276 | """
277 | return list(self._qresources_mapping)
278 |
279 | @staticmethod
280 | def _convert_str_to_time(time_str: str | None) -> int | None:
281 | """
282 | Convert a string in the format used in etime [[DD-]hh:]mm:ss to a
283 | number of seconds.
284 | """
285 | if not time_str:
286 | return None
287 |
288 | time_split = time_str.split(":")
289 |
290 | days = hours = 0
291 |
292 | try:
293 | if "-" in time_split[0]:
294 | split_day = time_split[0].split("-")
295 | days = int(split_day[0])
296 | time_split = [split_day[1]] + time_split[1:]
297 |
298 | if len(time_split) == 3:
299 | hours, minutes, seconds = (int(v) for v in time_split)
300 | elif len(time_split) == 2:
301 | minutes, seconds = (int(v) for v in time_split)
302 | else:
303 | raise OutputParsingError
304 |
305 | except ValueError as exc:
306 | raise OutputParsingError from exc
307 |
308 | return days * 86400 + hours * 3600 + minutes * 60 + seconds
309 |
--------------------------------------------------------------------------------
/src/qtoolkit/manager.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from pathlib import Path
4 | from typing import TYPE_CHECKING
5 |
6 | from qtoolkit.core.base import QTKObject
7 | from qtoolkit.host.local import LocalHost
8 |
9 | if TYPE_CHECKING:
10 | from qtoolkit.core.data_objects import (
11 | CancelResult,
12 | QJob,
13 | QResources,
14 | SubmissionResult,
15 | )
16 | from qtoolkit.host.base import BaseHost
17 | from qtoolkit.io.base import BaseSchedulerIO
18 |
19 |
20 | class QueueManager(QTKObject):
21 | """Base class for job queues.
22 |
23 | Attributes
24 | ----------
25 | scheduler_io : str
26 | Name of the queue
27 | host : BaseHost
28 | Host where the command should be executed.
29 | """
30 |
31 | def __init__(self, scheduler_io: BaseSchedulerIO, host: BaseHost = None):
32 | self.scheduler_io = scheduler_io
33 | self.host = host or LocalHost()
34 |
35 | def execute_cmd(self, cmd: str, workdir: str | Path | None = None):
36 | """Execute a command.
37 |
38 | Parameters
39 | ----------
40 | cmd : str
41 | Command to be executed
42 | workdir: str or None
43 | path where the command will be executed.
44 |
45 | Returns
46 | -------
47 | stdout : str
48 | stderr : str
49 | exit_code : int
50 | """
51 | return self.host.execute(cmd, workdir)
52 |
53 | def get_submission_script(
54 | self,
55 | commands: str | list[str] | None,
56 | options: dict | QResources | None = None,
57 | work_dir: str | Path | None = None,
58 | pre_run: str | list[str] | None = None,
59 | post_run: str | list[str] | None = None,
60 | environment=None,
61 | ) -> str:
62 | """ """
63 | commands_list = []
64 | if environment_setup := self.get_environment_setup(environment):
65 | commands_list.append(environment_setup)
66 | if change_dir := self.get_change_dir(work_dir):
67 | commands_list.append(change_dir)
68 | if pre_run := self.get_pre_run(pre_run):
69 | commands_list.append(pre_run)
70 | if run_commands := self.get_run_commands(commands):
71 | commands_list.append(run_commands)
72 | if post_run := self.get_post_run(post_run):
73 | commands_list.append(post_run)
74 | return self.scheduler_io.get_submission_script(commands_list, options)
75 |
76 | def get_environment_setup(self, env_config) -> str:
77 | if env_config:
78 | env_setup = []
79 | if "modules" in env_config:
80 | env_setup.append("module purge")
81 | env_setup += [f"module load {mod}" for mod in env_config["modules"]]
82 | if "source_files" in env_config:
83 | env_setup += [
84 | f"source {source_file}"
85 | for source_file in env_config["source_files"]
86 | ]
87 | if "conda_environment" in env_config:
88 | env_setup.append(f'conda activate {env_config["conda_environment"]}')
89 | if "environ" in env_config:
90 | for var, value in env_config["environ"].items():
91 | env_setup.append(f"export {var}={value}")
92 | return "\n".join(env_setup)
93 | # This is from aiida, maybe we need to think about this escape_for_bash ?
94 | # lines = ['# ENVIRONMENT VARIABLES BEGIN ###']
95 | # for key, value in template.job_environment.items():
96 | # lines.append(f'export {key.strip()}={
97 | # escape_for_bash(value,
98 | # template.environment_variables_double_quotes)
99 | # }')
100 | # lines.append('# ENVIRONMENT VARIABLES END ###')
101 | return None
102 |
103 | def get_change_dir(self, dir_path: str | Path | None) -> str:
104 | if dir_path:
105 | return f"cd {dir_path}"
106 | return ""
107 |
108 | def get_pre_run(self, pre_run) -> str:
109 | pass
110 |
111 | def get_run_commands(self, commands) -> str:
112 | if isinstance(commands, str):
113 | return commands
114 | if isinstance(commands, list):
115 | return "\n".join(commands)
116 | raise ValueError("commands should be a str or a list of str.")
117 |
118 | def get_post_run(self, post_run) -> str:
119 | pass
120 |
121 | def submit(
122 | self,
123 | commands: str | list[str] | None,
124 | options=None,
125 | work_dir=None,
126 | environment=None,
127 | script_fname="submit.script",
128 | create_submit_dir=False,
129 | ) -> SubmissionResult:
130 | script_str = self.get_submission_script(
131 | commands=commands,
132 | options=options,
133 | # TODO: Do we need the submit_dir here ?
134 | # Should we distinguish submit_dir and work_dir ?
135 | work_dir=work_dir,
136 | environment=environment,
137 | )
138 | # TODO: deal with remote directory directly on the host here.
139 | # Will currently only work on the localhost.
140 | work_dir = Path(work_dir) if work_dir is not None else Path.cwd()
141 | if create_submit_dir:
142 | created = self.host.mkdir(work_dir, recursive=True, exist_ok=True)
143 | if not created:
144 | raise RuntimeError("failed to create directory")
145 | script_fpath = Path(work_dir, script_fname)
146 | self.host.write_text_file(script_fpath, script_str)
147 | submit_cmd = self.scheduler_io.get_submit_cmd(script_fpath)
148 | stdout, stderr, returncode = self.execute_cmd(submit_cmd, work_dir)
149 | return self.scheduler_io.parse_submit_output(
150 | exit_code=returncode, stdout=stdout, stderr=stderr
151 | )
152 |
153 | def cancel(self, job: QJob | int | str) -> CancelResult:
154 | cancel_cmd = self.scheduler_io.get_cancel_cmd(job)
155 | stdout, stderr, returncode = self.execute_cmd(cancel_cmd)
156 | return self.scheduler_io.parse_cancel_output(
157 | exit_code=returncode, stdout=stdout, stderr=stderr
158 | )
159 |
160 | def get_job(self, job: QJob | int | str) -> QJob | None:
161 | job_cmd = self.scheduler_io.get_job_cmd(job)
162 | stdout, stderr, returncode = self.execute_cmd(job_cmd)
163 | return self.scheduler_io.parse_job_output(
164 | exit_code=returncode, stdout=stdout, stderr=stderr
165 | )
166 |
167 | def get_jobs_list(
168 | self, jobs: list[QJob | int | str] | None = None, user: str | None = None
169 | ) -> list[QJob]:
170 | job_cmd = self.scheduler_io.get_jobs_list_cmd(jobs, user)
171 | stdout, stderr, returncode = self.execute_cmd(job_cmd)
172 | return self.scheduler_io.parse_jobs_list_output(
173 | exit_code=returncode, stdout=stdout, stderr=stderr
174 | )
175 |
--------------------------------------------------------------------------------
/src/qtoolkit/py.typed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Matgenix/qtoolkit/bcb445b903f3cb78295aa7641944e0bade9a3fb8/src/qtoolkit/py.typed
--------------------------------------------------------------------------------
/src/qtoolkit/utils.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import os
4 | from contextlib import contextmanager
5 | from pathlib import Path
6 |
7 |
8 | @contextmanager
9 | def cd(path: str | Path):
10 | """
11 | A Fabric-inspired cd context that temporarily changes directory for
12 | performing some tasks, and returns to the original working directory
13 | afterwards. e.g.
14 |
15 | with cd("/my/path/"):
16 | do_something()
17 |
18 | Imported from monty to avoid importing it.
19 | TODO: check if can be directly used from monty.
20 |
21 | Args:
22 | path: Path to cd to.
23 | """
24 | cwd = Path.cwd()
25 | os.chdir(path)
26 | try:
27 | yield
28 | finally:
29 | os.chdir(cwd)
30 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """Tests for QToolKit."""
2 |
3 | from pathlib import Path
4 |
5 | module_dir = Path(__file__).resolve().parent
6 | test_dir = module_dir / "test_data"
7 | TEST_DIR = test_dir.resolve()
8 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | from dataclasses import is_dataclass
2 | from enum import Enum
3 | from pathlib import Path
4 |
5 | import pytest
6 |
7 | module_dir = Path(__file__).resolve().parent
8 | test_dir = module_dir / "test_data"
9 | TEST_DIR = test_dir.resolve()
10 |
11 |
12 | @pytest.fixture(scope="session")
13 | def test_dir():
14 | return TEST_DIR
15 |
16 |
17 | @pytest.fixture(scope="session")
18 | def log_to_stdout():
19 | import logging
20 | import sys
21 |
22 | # Set Logging
23 | root = logging.getLogger()
24 | root.setLevel(logging.DEBUG)
25 | ch = logging.StreamHandler(sys.stdout)
26 | formatter = logging.Formatter(
27 | "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
28 | )
29 | ch.setFormatter(formatter)
30 | root.addHandler(ch)
31 |
32 |
33 | @pytest.fixture(scope="session")
34 | def clean_dir(debug_mode):
35 | import os
36 | import shutil
37 | import tempfile
38 |
39 | old_cwd = os.getcwd()
40 | newpath = tempfile.mkdtemp()
41 | os.chdir(newpath)
42 | yield
43 | if debug_mode:
44 | print(f"Tests ran in {newpath}")
45 | else:
46 | os.chdir(old_cwd)
47 | shutil.rmtree(newpath)
48 |
49 |
50 | @pytest.fixture()
51 | def tmp_dir():
52 | """Same as clean_dir but is fresh for every test."""
53 | import os
54 | import shutil
55 | import tempfile
56 |
57 | old_cwd = os.getcwd()
58 | newpath = tempfile.mkdtemp()
59 | os.chdir(newpath)
60 | yield
61 | os.chdir(old_cwd)
62 | shutil.rmtree(newpath)
63 |
64 |
65 | @pytest.fixture(scope="session")
66 | def debug_mode():
67 | return False
68 |
69 |
70 | def is_msonable(obj):
71 | from monty.json import MSONable
72 |
73 | if not isinstance(obj, MSONable):
74 | return False
75 | return obj.as_dict() == obj.__class__.from_dict(obj.as_dict()).as_dict()
76 |
77 |
78 | class TestUtils:
79 | import json
80 |
81 | from monty.json import MSONable
82 | from monty.serialization import MontyDecoder, MontyEncoder
83 |
84 | @classmethod
85 | def is_msonable(cls, obj, obj_cls=None):
86 | if not isinstance(obj, cls.MSONable):
87 | return False
88 | obj_dict = obj.as_dict()
89 | if obj_dict != obj.__class__.from_dict(obj_dict).as_dict():
90 | return False
91 | json_string = cls.json.dumps(obj_dict, cls=cls.MontyEncoder)
92 | obj_from_json = cls.json.loads(json_string, cls=cls.MontyDecoder)
93 | # When the class is defined as an inner class, the MontyDecoder is unable
94 | # to find it automatically. This is only used in the core/test_base tests.
95 | # The next check on the type of the obj_from_json is of course not relevant
96 | # in that specific case.
97 | if obj_cls is not None:
98 | obj_from_json = obj_cls.from_dict(obj_from_json)
99 | if not isinstance(obj_from_json, obj.__class__):
100 | return False
101 | if is_dataclass(obj) or isinstance(obj, Enum):
102 | return obj_from_json == obj
103 | return obj_from_json.as_dict() == obj.as_dict()
104 |
105 | @classmethod
106 | def inkwargs_outref(cls, in_out_ref, inkey, outkey):
107 | dec = cls.MontyDecoder()
108 | inkwargs_string = in_out_ref[inkey]
109 | inkwargs = dec.decode(inkwargs_string)
110 | outref_string = in_out_ref[outkey]
111 | outref = dec.decode(outref_string)
112 | return inkwargs, outref
113 |
114 |
115 | @pytest.fixture(scope="session")
116 | def test_utils():
117 | return TestUtils
118 |
119 |
120 | @pytest.fixture() # scope="session")
121 | def maximalist_qresources():
122 | """A set of QResources options that try to make use of most features"""
123 | from qtoolkit.core.data_objects import QResources
124 |
125 | return QResources(
126 | queue_name="test_queue",
127 | job_name="test_job",
128 | memory_per_thread=1000,
129 | nodes=1,
130 | processes=1,
131 | processes_per_node=1,
132 | threads_per_process=1,
133 | gpus_per_job=1,
134 | time_limit=100,
135 | account="test_account",
136 | qos="test_qos",
137 | priority=1,
138 | output_filepath="test_output_filepath",
139 | error_filepath="test_error_filepath",
140 | process_placement="no_constraints",
141 | email_address="test_email_address@email.address",
142 | rerunnable=True,
143 | project="test_project",
144 | njobs=1,
145 | )
146 |
--------------------------------------------------------------------------------
/tests/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Matgenix/qtoolkit/bcb445b903f3cb78295aa7641944e0bade9a3fb8/tests/core/__init__.py
--------------------------------------------------------------------------------
/tests/core/test_base.py:
--------------------------------------------------------------------------------
1 | """Unit tests for the core.base module of QToolKit."""
2 |
3 | import enum
4 | import importlib
5 | import sys
6 | from dataclasses import dataclass
7 |
8 | import pytest
9 |
10 | try:
11 | import monty
12 | except ModuleNotFoundError:
13 | monty = None
14 |
15 |
16 | @pytest.fixture()
17 | def qtk_core_base_mocked_monty_not_found(mocker):
18 | # Note:
19 | # Here we use importlib to dynamically import the qtoolkit.core.base module.
20 | # We want to test the QTKObject and QTKEnum super classes with monty present or not.
21 | # This is done by mocking the import. We then need to use importlib to reload
22 | # the qtoolkit.core.base module when we want to change the behaviour of the
23 | # the monty.json import inside the qtoolkit.core.base module (i.e. mocking the
24 | # import monty.json or doing the real import). This is due to "module caching"
25 | # in python which stores imported modules in sys.modules. Using importlib.reload
26 | # forces python to reevaluate the imported module instead of reusing the one
27 | # already imported and available in sys.modules.
28 | # Note that this is local to this test_base.py file as pytest
29 | orig_import = __import__
30 |
31 | def _import_mock(name, *args):
32 | if name == "monty.json":
33 | raise ModuleNotFoundError
34 | return orig_import(name, *args)
35 |
36 | mocker.patch("builtins.__import__", side_effect=_import_mock)
37 |
38 | if "qtoolkit.core.base" in sys.modules:
39 | yield importlib.reload(sys.modules["qtoolkit.core.base"])
40 | else:
41 | yield importlib.import_module("qtoolkit.core.base")
42 | del sys.modules["qtoolkit.core.base"]
43 |
44 |
45 | class TestQBase:
46 | @pytest.mark.skipif(monty is None, reason="monty is not installed")
47 | def test_msonable(self, test_utils):
48 | import qtoolkit.core.base as qbase
49 |
50 | @dataclass
51 | class QClass(qbase.QTKObject):
52 | name: str = "name"
53 |
54 | qc = QClass()
55 | assert test_utils.is_msonable(qc, obj_cls=QClass)
56 |
57 | def test_not_msonable(self, test_utils, qtk_core_base_mocked_monty_not_found):
58 | @dataclass
59 | class QClass(qtk_core_base_mocked_monty_not_found.QTKObject):
60 | name: str = "name"
61 |
62 | qc = QClass()
63 | assert not test_utils.is_msonable(qc)
64 |
65 |
66 | class TestQEnum:
67 | @pytest.mark.skipif(monty is None, reason="monty is not installed")
68 | def test_msonable(self, test_utils):
69 | import qtoolkit.core.base as qbase
70 |
71 | class SomeEnum(qbase.QTKEnum):
72 | VAL1 = "VAL1"
73 | VAL2 = "VAL2"
74 |
75 | se = SomeEnum("VAL1")
76 | assert test_utils.is_msonable(se, obj_cls=SomeEnum)
77 | assert isinstance(se, enum.Enum)
78 |
79 | se = SomeEnum.VAL2
80 | assert test_utils.is_msonable(se, obj_cls=SomeEnum)
81 | assert isinstance(se, enum.Enum)
82 |
83 | class SomeEnum(qbase.QTKEnum):
84 | VAL1 = 3
85 | VAL2 = 4
86 |
87 | se = SomeEnum(3)
88 | assert test_utils.is_msonable(se, obj_cls=SomeEnum)
89 | assert isinstance(se, enum.Enum)
90 |
91 | se = SomeEnum.VAL2
92 | assert test_utils.is_msonable(se, obj_cls=SomeEnum)
93 | assert isinstance(se, enum.Enum)
94 |
95 | def test_not_msonable(self, test_utils, qtk_core_base_mocked_monty_not_found):
96 | class SomeEnum(qtk_core_base_mocked_monty_not_found.QTKEnum):
97 | VAL1 = "VAL1"
98 | VAL2 = "VAL2"
99 |
100 | se = SomeEnum("VAL1")
101 | assert not test_utils.is_msonable(se)
102 | assert isinstance(se, enum.Enum)
103 |
104 | se = SomeEnum.VAL2
105 | assert not test_utils.is_msonable(se)
106 | assert isinstance(se, enum.Enum)
107 |
108 | class SomeEnum(qtk_core_base_mocked_monty_not_found.QTKEnum):
109 | VAL1 = 3
110 | VAL2 = 4
111 |
112 | se = SomeEnum(3)
113 | assert not test_utils.is_msonable(se)
114 | assert isinstance(se, enum.Enum)
115 |
116 | se = SomeEnum.VAL2
117 | assert not test_utils.is_msonable(se)
118 | assert isinstance(se, enum.Enum)
119 |
--------------------------------------------------------------------------------
/tests/io/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Matgenix/qtoolkit/bcb445b903f3cb78295aa7641944e0bade9a3fb8/tests/io/__init__.py
--------------------------------------------------------------------------------
/tests/io/test_base.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import pytest
4 |
5 | from qtoolkit.core.data_objects import CancelResult, QJob, QResources, SubmissionResult
6 | from qtoolkit.io.base import BaseSchedulerIO, QTemplate
7 |
8 |
9 | def test_qtemplate():
10 | template_str = """This is a template
11 | with some $$substitutions
12 | another $${tata}te"""
13 | template = QTemplate(template_str)
14 | identifiers = set(template.get_identifiers())
15 | assert identifiers == {"substitutions", "tata"}
16 | substituted_template = template.safe_substitute({"substitutions": "mysubstitution"})
17 | assert (
18 | substituted_template
19 | == """This is a template
20 | with some mysubstitution
21 | another $${tata}te"""
22 | )
23 | substituted_template = template.safe_substitute({})
24 | assert (
25 | substituted_template
26 | == """This is a template
27 | with some $$substitutions
28 | another $${tata}te"""
29 | )
30 | substituted_template = template.safe_substitute({"tata": "pata"})
31 | assert (
32 | substituted_template
33 | == """This is a template
34 | with some $$substitutions
35 | another patate"""
36 | )
37 |
38 | template_str = """Multi template $$subst
39 | $${subst1}$${subst2}
40 | $${subst3}$${subst3}"""
41 | template = QTemplate(template_str)
42 | identifiers = template.get_identifiers()
43 | assert len(identifiers) == 4
44 | assert set(identifiers) == {"subst", "subst1", "subst2", "subst3"}
45 | substituted_template = template.safe_substitute({"subst3": "to", "subst": "bla"})
46 | assert (
47 | substituted_template
48 | == """Multi template bla
49 | $${subst1}$${subst2}
50 | toto"""
51 | )
52 |
53 |
54 | class TestBaseScheduler:
55 | @pytest.fixture(scope="module")
56 | def scheduler(self):
57 | class MyScheduler(BaseSchedulerIO):
58 | header_template = """#SPECCMD --option1=$${option1}
59 | #SPECCMD --option2=$${option2}
60 | #SPECCMD --option3=$${option3}
61 | #SPECCMD --processes=$${processes}
62 | #SPECCMD --processes_per_node=$${processes_per_node}
63 | #SPECCMD --nodes=$${nodes}"""
64 |
65 | SUBMIT_CMD = "mysubmit"
66 | CANCEL_CMD = "mycancel"
67 |
68 | def parse_submit_output(
69 | self, exit_code, stdout, stderr
70 | ) -> SubmissionResult:
71 | pass
72 |
73 | def parse_cancel_output(self, exit_code, stdout, stderr) -> CancelResult:
74 | pass
75 |
76 | def _get_job_cmd(self, job_id: str) -> str:
77 | pass
78 |
79 | def parse_job_output(self, exit_code, stdout, stderr) -> QJob | None:
80 | pass
81 |
82 | def _convert_qresources(self, resources: QResources) -> dict:
83 | header_dict = {}
84 |
85 | (
86 | nodes,
87 | processes,
88 | processes_per_node,
89 | ) = resources.get_processes_distribution()
90 | if processes:
91 | header_dict["processes"] = processes
92 | if processes_per_node:
93 | header_dict["processes_per_node"] = processes_per_node
94 | if nodes:
95 | header_dict["nodes"] = nodes
96 |
97 | if resources.scheduler_kwargs:
98 | header_dict.update(resources.scheduler_kwargs)
99 |
100 | return header_dict
101 |
102 | @property
103 | def supported_qresources_keys(self) -> list:
104 | return [
105 | "scheduler_kwargs",
106 | "nodes",
107 | "processes_per_node",
108 | "process_placement",
109 | "processes",
110 | ]
111 |
112 | def _get_jobs_list_cmd(
113 | self, job_ids: list[str] | None = None, user: str | None = None
114 | ) -> str:
115 | pass
116 |
117 | def parse_jobs_list_output(self, exit_code, stdout, stderr) -> list[QJob]:
118 | pass
119 |
120 | return MyScheduler()
121 |
122 | def test_subclass_base_scheduler(self, scheduler):
123 | class MyScheduler(BaseSchedulerIO):
124 | pass
125 |
126 | with pytest.raises(TypeError):
127 | MyScheduler()
128 |
129 | def test_generate_header(self, scheduler):
130 | header = scheduler.generate_header({"option2": "value_option2"})
131 | assert header == """#SPECCMD --option2=value_option2"""
132 |
133 | res = QResources(processes=8)
134 | header = scheduler.generate_header(res)
135 | assert header == """#SPECCMD --processes=8"""
136 | res = QResources(
137 | nodes=4, processes_per_node=16, scheduler_kwargs={"option2": "myopt2"}
138 | )
139 | header = scheduler.generate_header(res)
140 | assert (
141 | header
142 | == """#SPECCMD --option2=myopt2
143 | #SPECCMD --processes_per_node=16
144 | #SPECCMD --nodes=4"""
145 | )
146 |
147 | # check that the error message contains the expected error, but should not match
148 | # the possible replacements, as they are too different
149 | res = QResources(
150 | nodes=4,
151 | processes_per_node=16,
152 | scheduler_kwargs={"tata": "tata", "titi": "titi"},
153 | )
154 | with pytest.raises(
155 | ValueError,
156 | match=r"The following keys are not present in the template: tata, titi. Check "
157 | r"the template in .*MyScheduler.header_template(?!.*instead of 'titi')",
158 | ):
159 | scheduler.generate_header(res)
160 |
161 | res = QResources(
162 | nodes=4,
163 | processes_per_node=16,
164 | scheduler_kwargs={"option32": "xxx", "processes-per-node": "yyy"},
165 | )
166 | with pytest.raises(
167 | ValueError,
168 | match=r"The following keys are not present in the template: option32, processes-per-node. "
169 | r"Check the template in .*MyScheduler.header_template.*'option3' or 'option2' or 'option1' "
170 | r"instead of 'option32'. 'processes_per_node' or 'processes' instead of 'processes-per-node'",
171 | ):
172 | scheduler.generate_header(res)
173 |
174 | def test_generate_ids_list(self, scheduler):
175 | ids_list = scheduler.generate_ids_list(
176 | [QJob(job_id=4), QJob(job_id="job_id_abc1"), 215, "job12345"]
177 | )
178 | assert ids_list == ["4", "job_id_abc1", "215", "job12345"]
179 |
180 | def test_get_submit_cmd(self, scheduler):
181 | submit_cmd = scheduler.get_submit_cmd()
182 | assert submit_cmd == "mysubmit submit.script"
183 | submit_cmd = scheduler.get_submit_cmd(script_file="sub.sh")
184 | assert submit_cmd == "mysubmit sub.sh"
185 |
186 | def test_get_cancel_cmd(self, scheduler):
187 | cancel_cmd = scheduler.get_cancel_cmd(QJob(job_id=5))
188 | assert cancel_cmd == "mycancel 5"
189 | cancel_cmd = scheduler.get_cancel_cmd(QJob(job_id="abc1"))
190 | assert cancel_cmd == "mycancel abc1"
191 | cancel_cmd = scheduler.get_cancel_cmd("jobid2")
192 | assert cancel_cmd == "mycancel jobid2"
193 | cancel_cmd = scheduler.get_cancel_cmd(632)
194 | assert cancel_cmd == "mycancel 632"
195 |
196 | with pytest.raises(
197 | ValueError,
198 | match=r"The id of the job to be cancelled should be defined. "
199 | r"Received: None",
200 | ):
201 | scheduler.get_cancel_cmd(job=None)
202 |
203 | with pytest.raises(
204 | ValueError,
205 | match=r"The id of the job to be cancelled should be defined. "
206 | r"Received: '' \(empty string\)",
207 | ):
208 | scheduler.get_cancel_cmd(job="")
209 |
--------------------------------------------------------------------------------
/tests/io/test_shell.py:
--------------------------------------------------------------------------------
1 | # ruff: noqa: SLF001
2 | import pytest
3 |
4 | try:
5 | import monty
6 | except ModuleNotFoundError:
7 | monty = None
8 |
9 | from qtoolkit.core.data_objects import (
10 | CancelResult,
11 | CancelStatus,
12 | QJob,
13 | QResources,
14 | QState,
15 | SubmissionResult,
16 | SubmissionStatus,
17 | )
18 | from qtoolkit.core.exceptions import (
19 | CommandFailedError,
20 | OutputParsingError,
21 | UnsupportedResourcesError,
22 | )
23 | from qtoolkit.io.shell import ShellIO, ShellState
24 |
25 |
26 | @pytest.fixture(scope="module")
27 | def shell_io():
28 | return ShellIO()
29 |
30 |
31 | class TestShellState:
32 | @pytest.mark.skipif(monty is None, reason="monty is not installed")
33 | def test_msonable(self, test_utils):
34 | shell_state = ShellState.DEFUNCT
35 | assert test_utils.is_msonable(shell_state)
36 |
37 | def test_states_list(self):
38 | all_states = [state.value for state in ShellState]
39 | assert set(all_states) == {"D", "R", "S", "T", "t", "W", "X", "Z"}
40 |
41 | def test_qstate(self):
42 | shell_state = ShellState.UNINTERRUPTIBLE_SLEEP
43 | assert shell_state.qstate == QState.RUNNING
44 | shell_state = ShellState.RUNNING
45 | assert shell_state.qstate == QState.RUNNING
46 | shell_state = ShellState.INTERRUPTIBLE_SLEEP
47 | assert shell_state.qstate == QState.RUNNING
48 | shell_state = ShellState.STOPPED
49 | assert shell_state.qstate == QState.SUSPENDED
50 | shell_state = ShellState.STOPPED_DEBUGGER
51 | assert shell_state.qstate == QState.SUSPENDED
52 | shell_state = ShellState.PAGING
53 | assert shell_state.qstate == QState.RUNNING
54 | shell_state = ShellState.DEAD
55 | assert shell_state.qstate == QState.DONE
56 | shell_state = ShellState.DEFUNCT
57 | assert shell_state.qstate == QState.DONE
58 |
59 |
60 | class TestShellIO:
61 | @pytest.mark.skipif(monty is None, reason="monty is not installed")
62 | def test_msonable(self, test_utils, shell_io):
63 | assert test_utils.is_msonable(shell_io)
64 |
65 | def test_get_submit_cmd(self):
66 | shell_io = ShellIO(blocking=True)
67 | submit_cmd = shell_io.get_submit_cmd(script_file="myscript.sh")
68 | assert submit_cmd == "bash myscript.sh > stdout 2> stderr"
69 | shell_io = ShellIO(blocking=False)
70 | submit_cmd = shell_io.get_submit_cmd(script_file="myscript.sh")
71 | assert submit_cmd == "nohup bash myscript.sh > stdout 2> stderr & echo $!"
72 |
73 | def test_parse_submit_output(self, shell_io):
74 | sr = shell_io.parse_submit_output(exit_code=0, stdout="13647\n", stderr="")
75 | assert sr == SubmissionResult(
76 | job_id="13647",
77 | step_id=None,
78 | exit_code=0,
79 | stdout="13647\n",
80 | stderr="",
81 | status=SubmissionStatus.SUCCESSFUL,
82 | )
83 | sr = shell_io.parse_submit_output(exit_code=0, stdout=b"13647\n", stderr=b"")
84 | assert sr == SubmissionResult(
85 | job_id="13647",
86 | step_id=None,
87 | exit_code=0,
88 | stdout="13647\n",
89 | stderr="",
90 | status=SubmissionStatus.SUCCESSFUL,
91 | )
92 | sr = shell_io.parse_submit_output(exit_code=104, stdout="tata", stderr=b"titi")
93 | assert sr == SubmissionResult(
94 | job_id=None,
95 | step_id=None,
96 | exit_code=104,
97 | stdout="tata",
98 | stderr="titi",
99 | status=SubmissionStatus.FAILED,
100 | )
101 | sr = shell_io.parse_submit_output(exit_code=0, stdout=b"\n", stderr="")
102 | assert sr == SubmissionResult(
103 | job_id=None,
104 | step_id=None,
105 | exit_code=0,
106 | stdout="\n",
107 | stderr="",
108 | status=SubmissionStatus.JOB_ID_UNKNOWN,
109 | )
110 |
111 | def test_parse_cancel_output(self, shell_io):
112 | cr = shell_io.parse_cancel_output(exit_code=0, stdout="", stderr="")
113 | assert cr == CancelResult(
114 | job_id=None,
115 | step_id=None,
116 | exit_code=0,
117 | stdout="",
118 | stderr="",
119 | status=CancelStatus.SUCCESSFUL,
120 | )
121 | cr = shell_io.parse_cancel_output(
122 | exit_code=1,
123 | stdout=b"",
124 | stderr=b"/bin/sh: line 1: kill: (14020) - No such process\n",
125 | )
126 | assert cr == CancelResult(
127 | job_id=None,
128 | step_id=None,
129 | exit_code=1,
130 | stdout="",
131 | stderr="/bin/sh: line 1: kill: (14020) - No such process\n",
132 | status=CancelStatus.FAILED,
133 | )
134 |
135 | def test_get_job_cmd(self, shell_io):
136 | get_job_cmd = shell_io.get_job_cmd(123)
137 | assert get_job_cmd == "ps -o pid,user,etime,state,comm -p 123"
138 | get_job_cmd = shell_io.get_job_cmd(456)
139 | assert get_job_cmd == "ps -o pid,user,etime,state,comm -p 456"
140 | get_job_cmd = shell_io.get_job_cmd(QJob(job_id="789"))
141 | assert get_job_cmd == "ps -o pid,user,etime,state,comm -p 789"
142 |
143 | def test_get_jobs_list_cmd(self, shell_io):
144 | get_jobs_list_cmd = shell_io.get_jobs_list_cmd(
145 | jobs=[QJob(job_id=125), 126, "127"], user=None
146 | )
147 | assert get_jobs_list_cmd == "ps -o pid,user,etime,state,comm -p 125,126,127"
148 | get_jobs_list_cmd = shell_io.get_jobs_list_cmd(jobs=None, user="johndoe")
149 | assert get_jobs_list_cmd == "ps -o pid,user,etime,state,comm -U johndoe"
150 | with pytest.raises(
151 | ValueError,
152 | match=r"Cannot query by user and job\(s\) with ps, "
153 | r"as the user option will override the ids list",
154 | ):
155 | shell_io.get_jobs_list_cmd(
156 | jobs=[QJob(job_id=125), 126, "127"], user="johndoe"
157 | )
158 |
159 | def test_parse_jobs_list_output(self, shell_io):
160 | joblist = shell_io.parse_jobs_list_output(
161 | exit_code=0,
162 | stdout=" PID USER ELAPSED S COMMAND\n 18092 davidwa+ 04:52 S bash\n 18112 davidwa+ 01:12 S bash\n",
163 | stderr="",
164 | )
165 | assert joblist == [
166 | QJob(
167 | job_id="18092",
168 | runtime=292,
169 | name="bash",
170 | state=QState.RUNNING,
171 | sub_state=ShellState.INTERRUPTIBLE_SLEEP,
172 | ),
173 | QJob(
174 | job_id="18112",
175 | runtime=72,
176 | name="bash",
177 | state=QState.RUNNING,
178 | sub_state=ShellState.INTERRUPTIBLE_SLEEP,
179 | ),
180 | ]
181 | with pytest.raises(
182 | CommandFailedError, match=r"command ps failed: .* string in stderr"
183 | ):
184 | shell_io.parse_jobs_list_output(
185 | exit_code=1,
186 | stdout=b"",
187 | stderr=b"string in stderr",
188 | )
189 | with pytest.raises(
190 | OutputParsingError, match=r"Unknown job state K for job id 18112"
191 | ):
192 | shell_io.parse_jobs_list_output(
193 | exit_code=0,
194 | stdout=b" PID USER ELAPSED S COMMAND\n 18092 davidwa+ 04:52 S bash\n 18112 davidwa+ 01:12 K bash\n",
195 | stderr=b"",
196 | )
197 | joblist = shell_io.parse_jobs_list_output(
198 | exit_code=0,
199 | stdout=b" PID USER ELAPSED S COMMAND\n\n",
200 | stderr=b"",
201 | )
202 | assert joblist == []
203 |
204 | def test_check_convert_qresources(self, shell_io):
205 | qr = QResources(processes=1)
206 | with pytest.raises(
207 | UnsupportedResourcesError,
208 | match=r"Keys not supported: process_placement, processes",
209 | ):
210 | shell_io.check_convert_qresources(qr)
211 |
212 | qr = QResources()
213 | assert shell_io.check_convert_qresources(qr) == {}
214 |
215 | qr = QResources(
216 | job_name="test", output_filepath="t.out", error_filepath="t.err"
217 | )
218 | assert shell_io.check_convert_qresources(qr) == {
219 | "job_name": "test",
220 | "qerr_path": "t.err",
221 | "qout_path": "t.out",
222 | }
223 |
224 | def test_header(self, shell_io):
225 | # check that the required elements are properly handled in header template
226 | options = {
227 | "qout_path": "/some/file",
228 | "qerr_path": "/some/file",
229 | "job_name": "NAME",
230 | }
231 | header = shell_io.generate_header(options)
232 | assert "exec > /some/file" in header
233 | assert "exec 2> /some/file" in header
234 | assert "NAME" in header
235 |
236 | def test_parse_job_output(self, shell_io):
237 | job = shell_io.parse_job_output(
238 | exit_code=0,
239 | stdout=" PID USER ELAPSED S COMMAND\n 18092 davidwa+ 04:52 S bash\n 18112 davidwa+ 01:12 S bash\n",
240 | stderr="",
241 | )
242 | assert isinstance(job, QJob)
243 | assert job.job_id == "18092"
244 | assert job.name == "bash"
245 | assert job.runtime == 292
246 | job = shell_io.parse_job_output(
247 | exit_code=0,
248 | stdout=" PID USER ELAPSED S COMMAND\n",
249 | stderr="",
250 | )
251 | assert job is None
252 |
253 | def test_convert_str_to_time(self, shell_io):
254 | assert shell_io._convert_str_to_time("") is None
255 | assert shell_io._convert_str_to_time(None) is None
256 | assert shell_io._convert_str_to_time("2-11:21:32") == 213692
257 | with pytest.raises(OutputParsingError):
258 | shell_io._convert_str_to_time("2-11:21:32:5")
259 | with pytest.raises(OutputParsingError):
260 | shell_io._convert_str_to_time("2-11:21:hello")
261 |
--------------------------------------------------------------------------------
/tests/test_data/io/pbs/create_parse_cancel_output_inout.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import yaml
4 |
5 | from qtoolkit.io.pbs import PBSIO
6 |
7 | pbs_io = PBSIO()
8 |
9 | mylist = []
10 |
11 | # First case: successful termination
12 | return_code = 0
13 | stdout = b""
14 | stderr = b""
15 |
16 | cr = pbs_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
17 |
18 | a = {
19 | "parse_cancel_kwargs": json.dumps(
20 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
21 | ),
22 | "cancel_result_ref": json.dumps(cr.as_dict()),
23 | }
24 | mylist.append(a)
25 |
26 | # Second case: no job identification provided
27 | return_code = 1
28 | stdout = b""
29 | stderr = b"""usage:
30 | qdel [-W force|suppress_email=X] [-x] job_identifier...
31 | qdel --version
32 | """
33 |
34 | cr = pbs_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
35 |
36 | a = {
37 | "parse_cancel_kwargs": json.dumps(
38 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
39 | ),
40 | "cancel_result_ref": json.dumps(cr.as_dict()),
41 | }
42 | mylist.append(a)
43 |
44 | # Third case: access/permission denied
45 | return_code = 210
46 | stdout = b""
47 | stderr = b"qdel: Unauthorized Request 210\n"
48 |
49 | cr = pbs_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
50 |
51 | a = {
52 | "parse_cancel_kwargs": json.dumps(
53 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
54 | ),
55 | "cancel_result_ref": json.dumps(cr.as_dict()),
56 | }
57 | mylist.append(a)
58 |
59 | # Fourth case: invalid job id
60 | return_code = 1
61 | stdout = b""
62 | stderr = b"qdel: illegally formed job identifier: a\n"
63 |
64 | cr = pbs_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
65 |
66 | a = {
67 | "parse_cancel_kwargs": json.dumps(
68 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
69 | ),
70 | "cancel_result_ref": json.dumps(cr.as_dict()),
71 | }
72 | mylist.append(a)
73 |
74 | # Fifth case: job already completed
75 | return_code = 1
76 | stdout = b""
77 | stderr = b"qdel: Job has finished 8\n"
78 |
79 | cr = pbs_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
80 |
81 | a = {
82 | "parse_cancel_kwargs": json.dumps(
83 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
84 | ),
85 | "cancel_result_ref": json.dumps(cr.as_dict()),
86 | }
87 | mylist.append(a)
88 |
89 | # Sixth case: unkwnown job id
90 | return_code = 1
91 | stdout = b""
92 | stderr = b"qdel: Unknown Job Id 120\n"
93 |
94 | cr = pbs_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
95 |
96 | a = {
97 | "parse_cancel_kwargs": json.dumps(
98 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
99 | ),
100 | "cancel_result_ref": json.dumps(cr.as_dict()),
101 | }
102 | mylist.append(a)
103 |
104 | with open("parse_cancel_output_inout.yaml", "w") as f:
105 | yaml.dump(mylist, f, sort_keys=False)
106 |
--------------------------------------------------------------------------------
/tests/test_data/io/pbs/create_parse_job_output_inout.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import yaml
4 |
5 | from qtoolkit.io.pbs import PBSIO
6 |
7 | pbs_io = PBSIO()
8 |
9 | mylist = []
10 |
11 | # First case: successful job parsing
12 | return_code = 0
13 | stdout = b"""Job Id: 14
14 | Job_Name = myscript_1
15 | Job_Owner = testu@f41a0fbae027
16 | resources_used.cpupercent = 0
17 | resources_used.cput = 00:00:00
18 | resources_used.mem = 0kb
19 | resources_used.ncpus = 1
20 | resources_used.vmem = 0kb
21 | resources_used.walltime = 00:00:00
22 | job_state = R
23 | queue = workq
24 | server = f41a0fbae027
25 | Checkpoint = u
26 | ctime = Sun Dec 29 20:13:12 2024
27 | Error_Path = f41a0fbae027:/home/testu/myscript_1.e14
28 | exec_host = f41a0fbae027/0
29 | exec_vnode = (f41a0fbae027:ncpus=1)
30 | Hold_Types = n
31 | Join_Path = n
32 | Keep_Files = n
33 | Mail_Points = a
34 | mtime = Sun Dec 29 20:13:14 2024
35 | Output_Path = f41a0fbae027:/home/testu/myscript_1.o14
36 | Priority = 0
37 | qtime = Sun Dec 29 20:13:12 2024
38 | Rerunable = True
39 | Resource_List.ncpus = 1
40 | Resource_List.nodect = 1
41 | Resource_List.nodes = 1:ppn=1
42 | Resource_List.place = scatter
43 | Resource_List.select = 1:ncpus=1
44 | Resource_List.walltime = 01:00:00
45 | stime = Sun Dec 29 20:13:12 2024
46 | session_id = 1534
47 | Shell_Path_List = /bin/bash
48 | jobdir = /home/testu
49 | substate = 42
50 | Variable_List = PBS_O_HOME=/home/testu,PBS_O_LANG=C.UTF-8,
51 | PBS_O_LOGNAME=testu,
52 | PBS_O_PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bi
53 | n:/usr/games:/usr/local/games:/snap/bin:/opt/pbs/bin,
54 | PBS_O_SHELL=/bin/bash,PBS_O_WORKDIR=/home/testu,PBS_O_SYSTEM=Linux,
55 | PBS_O_QUEUE=workq,PBS_O_HOST=f41a0fbae027
56 | comment = Job run at Sun Dec 29 at 20:13 on (f41a0fbae027:ncpus=1)
57 | etime = Sun Dec 29 20:13:12 2024
58 | run_count = 1
59 | Submit_arguments = test_submit.sh
60 | project = _pbs_project_default
61 | Submit_Host = f41a0fbae027
62 | """
63 | stderr = b""
64 | job = pbs_io.parse_job_output(exit_code=return_code, stdout=stdout, stderr=stderr)
65 | a = {
66 | "parse_job_kwargs": json.dumps(
67 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
68 | ),
69 | "job_ref": json.dumps(job.as_dict()),
70 | }
71 | mylist.append(a)
72 |
73 |
74 | # Second case: empty stdout and stderr
75 | return_code = 0
76 | stdout = b""
77 | stderr = b""
78 | job = pbs_io.parse_job_output(exit_code=return_code, stdout=stdout, stderr=stderr)
79 | a = {
80 | "parse_job_kwargs": json.dumps(
81 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
82 | ),
83 | "job_ref": json.dumps(job.as_dict() if job is not None else None),
84 | }
85 | mylist.append(a)
86 |
87 | with open("parse_job_output_inout.yaml", "w") as f:
88 | yaml.dump(mylist, f, sort_keys=False)
89 |
--------------------------------------------------------------------------------
/tests/test_data/io/pbs/create_parse_submit_output_inout.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import yaml
4 |
5 | from qtoolkit.io.pbs import PBSIO
6 |
7 | pbs_io = PBSIO()
8 |
9 | mylist = []
10 |
11 | # First case: invalid queue specified
12 | return_code = 1
13 | stdout = b""
14 | stderr = b"qsub: Unknown queue\n"
15 |
16 | sr = pbs_io.parse_submit_output(
17 | exit_code=return_code, stdout=stdout.decode(), stderr=stderr.decode()
18 | )
19 |
20 | a = {
21 | "parse_submit_kwargs": json.dumps(
22 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
23 | ),
24 | "submission_result_ref": json.dumps(sr.as_dict()),
25 | }
26 | mylist.append(a)
27 |
28 | # Second case: successful submission
29 | return_code = 0
30 | stdout = b"24\n"
31 | stderr = b""
32 | sr = pbs_io.parse_submit_output(
33 | exit_code=return_code, stdout=stdout.decode(), stderr=stderr.decode()
34 | )
35 | a = {
36 | "parse_submit_kwargs": json.dumps(
37 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
38 | ),
39 | "submission_result_ref": json.dumps(sr.as_dict()),
40 | }
41 | mylist.append(a)
42 |
43 |
44 | with open("parse_submit_output_inout.yaml", "w") as f:
45 | yaml.dump(mylist, f, sort_keys=False)
46 |
--------------------------------------------------------------------------------
/tests/test_data/io/pbs/parse_cancel_output_inout.yaml:
--------------------------------------------------------------------------------
1 | - parse_cancel_kwargs: '{"exit_code": 0, "stdout": "", "stderr": ""}'
2 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult",
3 | "@version": "0.1.5.post38+g62b683f.d20241229", "job_id": null, "step_id": null,
4 | "exit_code": 0, "stdout": "", "stderr": "", "status": {"@module": "qtoolkit.core.data_objects",
5 | "@class": "CancelStatus", "@version": "0.1.5.post38+g62b683f.d20241229", "value":
6 | "SUCCESSFUL"}}'
7 | - parse_cancel_kwargs: '{"exit_code": 1, "stdout": "", "stderr": "usage:\n qdel
8 | [-W force|suppress_email=X] [-x] job_identifier...\n qdel --version\n"}'
9 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult",
10 | "@version": "0.1.5.post38+g62b683f.d20241229", "job_id": null, "step_id": null,
11 | "exit_code": 1, "stdout": "", "stderr": "usage:\n qdel [-W force|suppress_email=X]
12 | [-x] job_identifier...\n qdel --version\n", "status": {"@module": "qtoolkit.core.data_objects",
13 | "@class": "CancelStatus", "@version": "0.1.5.post38+g62b683f.d20241229", "value":
14 | "FAILED"}}'
15 | - parse_cancel_kwargs: '{"exit_code": 210, "stdout": "", "stderr": "qdel: Unauthorized
16 | Request 210\n"}'
17 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult",
18 | "@version": "0.1.5.post38+g62b683f.d20241229", "job_id": null, "step_id": null,
19 | "exit_code": 210, "stdout": "", "stderr": "qdel: Unauthorized Request 210\n",
20 | "status": {"@module": "qtoolkit.core.data_objects", "@class": "CancelStatus",
21 | "@version": "0.1.5.post38+g62b683f.d20241229", "value": "FAILED"}}'
22 | - parse_cancel_kwargs: '{"exit_code": 1, "stdout": "", "stderr": "qdel: illegally
23 | formed job identifier: a\n"}'
24 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult",
25 | "@version": "0.1.5.post38+g62b683f.d20241229", "job_id": null, "step_id": null,
26 | "exit_code": 1, "stdout": "", "stderr": "qdel: illegally formed job identifier:
27 | a\n", "status": {"@module": "qtoolkit.core.data_objects", "@class": "CancelStatus",
28 | "@version": "0.1.5.post38+g62b683f.d20241229", "value": "FAILED"}}'
29 | - parse_cancel_kwargs: '{"exit_code": 1, "stdout": "", "stderr": "qdel: Job has finished
30 | 8\n"}'
31 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult",
32 | "@version": "0.1.5.post38+g62b683f.d20241229", "job_id": null, "step_id": null,
33 | "exit_code": 1, "stdout": "", "stderr": "qdel: Job has finished 8\n", "status":
34 | {"@module": "qtoolkit.core.data_objects", "@class": "CancelStatus", "@version":
35 | "0.1.5.post38+g62b683f.d20241229", "value": "FAILED"}}'
36 | - parse_cancel_kwargs: '{"exit_code": 1, "stdout": "", "stderr": "qdel: Unknown Job
37 | Id 120\n"}'
38 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult",
39 | "@version": "0.1.5.post38+g62b683f.d20241229", "job_id": null, "step_id": null,
40 | "exit_code": 1, "stdout": "", "stderr": "qdel: Unknown Job Id 120\n", "status":
41 | {"@module": "qtoolkit.core.data_objects", "@class": "CancelStatus", "@version":
42 | "0.1.5.post38+g62b683f.d20241229", "value": "FAILED"}}'
43 |
--------------------------------------------------------------------------------
/tests/test_data/io/pbs/parse_job_output_inout.yaml:
--------------------------------------------------------------------------------
1 | - parse_job_kwargs: '{"exit_code": 0, "stdout": "Job Id: 14\n Job_Name = myscript_1\n Job_Owner
2 | = testu@f41a0fbae027\n resources_used.cpupercent = 0\n resources_used.cput
3 | = 00:00:00\n resources_used.mem = 0kb\n resources_used.ncpus = 1\n resources_used.vmem
4 | = 0kb\n resources_used.walltime = 00:00:00\n job_state = R\n queue =
5 | workq\n server = f41a0fbae027\n Checkpoint = u\n ctime = Sun Dec 29 20:13:12
6 | 2024\n Error_Path = f41a0fbae027:/home/testu/myscript_1.e14\n exec_host
7 | = f41a0fbae027/0\n exec_vnode = (f41a0fbae027:ncpus=1)\n Hold_Types = n\n Join_Path
8 | = n\n Keep_Files = n\n Mail_Points = a\n mtime = Sun Dec 29 20:13:14
9 | 2024\n Output_Path = f41a0fbae027:/home/testu/myscript_1.o14\n Priority
10 | = 0\n qtime = Sun Dec 29 20:13:12 2024\n Rerunable = True\n Resource_List.ncpus
11 | = 1\n Resource_List.nodect = 1\n Resource_List.nodes = 1:ppn=1\n Resource_List.place
12 | = scatter\n Resource_List.select = 1:ncpus=1\n Resource_List.walltime =
13 | 01:00:00\n stime = Sun Dec 29 20:13:12 2024\n session_id = 1534\n Shell_Path_List
14 | = /bin/bash\n jobdir = /home/testu\n substate = 42\n Variable_List =
15 | PBS_O_HOME=/home/testu,PBS_O_LANG=C.UTF-8,\n PBS_O_LOGNAME=testu,\n PBS_O_PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bi\n n:/usr/games:/usr/local/games:/snap/bin:/opt/pbs/bin,\n PBS_O_SHELL=/bin/bash,PBS_O_WORKDIR=/home/testu,PBS_O_SYSTEM=Linux,\n PBS_O_QUEUE=workq,PBS_O_HOST=f41a0fbae027\n comment
16 | = Job run at Sun Dec 29 at 20:13 on (f41a0fbae027:ncpus=1)\n etime = Sun Dec
17 | 29 20:13:12 2024\n run_count = 1\n Submit_arguments = test_submit.sh\n project
18 | = _pbs_project_default\n Submit_Host = f41a0fbae027\n", "stderr": ""}'
19 | job_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "QJob", "@version":
20 | "0.1.5.post38+g62b683f.d20241229", "name": "myscript_1", "job_id": "14", "exit_status":
21 | null, "state": {"@module": "qtoolkit.core.data_objects", "@class": "QState", "@version":
22 | "0.1.5.post38+g62b683f.d20241229", "value": "RUNNING"}, "sub_state": {"@module":
23 | "qtoolkit.io.pbs", "@class": "PBSState", "@version": "0.1.5.post38+g62b683f.d20241229",
24 | "value": "R"}, "info": {"@module": "qtoolkit.core.data_objects", "@class": "QJobInfo",
25 | "@version": "0.1.5.post38+g62b683f.d20241229", "memory": null, "memory_per_cpu":
26 | null, "nodes": 1, "cpus": 1, "threads_per_process": null, "time_limit": 3600},
27 | "account": null, "runtime": 0, "queue_name": null}'
28 | - parse_job_kwargs: '{"exit_code": 0, "stdout": "", "stderr": ""}'
29 | job_ref: 'null'
30 |
--------------------------------------------------------------------------------
/tests/test_data/io/pbs/parse_submit_output_inout.yaml:
--------------------------------------------------------------------------------
1 | - parse_submit_kwargs: '{"exit_code": 1, "stdout": "", "stderr": "qsub: Unknown queue\n"}'
2 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult",
3 | "@version": "0.1.5.post38+g62b683f.d20241229", "job_id": null, "step_id": null,
4 | "exit_code": 1, "stdout": "", "stderr": "qsub: Unknown queue\n", "status": {"@module":
5 | "qtoolkit.core.data_objects", "@class": "SubmissionStatus", "@version": "0.1.5.post38+g62b683f.d20241229",
6 | "value": "FAILED"}}'
7 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "24\n", "stderr": ""}'
8 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult",
9 | "@version": "0.1.5.post38+g62b683f.d20241229", "job_id": "24", "step_id": null,
10 | "exit_code": 0, "stdout": "24\n", "stderr": "", "status": {"@module": "qtoolkit.core.data_objects",
11 | "@class": "SubmissionStatus", "@version": "0.1.5.post38+g62b683f.d20241229", "value":
12 | "SUCCESSFUL"}}'
13 |
--------------------------------------------------------------------------------
/tests/test_data/io/sge/create_parse_cancel_output_inout.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import yaml
4 |
5 | from qtoolkit.io.sge import SGEIO
6 |
7 | sge_io = SGEIO()
8 |
9 | mylist = []
10 |
11 | # First case: successful termination
12 | return_code = 0
13 | stdout = b""
14 | stderr = b"qdel: job 267 deleted\n"
15 |
16 | cr = sge_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
17 |
18 | a = {
19 | "parse_cancel_kwargs": json.dumps(
20 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
21 | ),
22 | "cancel_result_ref": json.dumps(cr.as_dict()),
23 | }
24 | mylist.append(a)
25 |
26 | # Second case: no job identification provided
27 | return_code = 1
28 | stdout = b""
29 | stderr = b"qdel: No job id specified\n"
30 |
31 | cr = sge_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
32 |
33 | a = {
34 | "parse_cancel_kwargs": json.dumps(
35 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
36 | ),
37 | "cancel_result_ref": json.dumps(cr.as_dict()),
38 | }
39 | mylist.append(a)
40 |
41 | # Third case: access/permission denied
42 | return_code = 210
43 | stdout = b""
44 | stderr = b"qdel: job 1 access denied\n"
45 |
46 | cr = sge_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
47 |
48 | a = {
49 | "parse_cancel_kwargs": json.dumps(
50 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
51 | ),
52 | "cancel_result_ref": json.dumps(cr.as_dict()),
53 | }
54 | mylist.append(a)
55 |
56 | # Fourth case: invalid job id
57 | return_code = 1
58 | stdout = b""
59 | stderr = b"qdel: Invalid job id a\n"
60 |
61 | cr = sge_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
62 |
63 | a = {
64 | "parse_cancel_kwargs": json.dumps(
65 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
66 | ),
67 | "cancel_result_ref": json.dumps(cr.as_dict()),
68 | }
69 | mylist.append(a)
70 |
71 | # Fifth case: job already completed
72 | return_code = 0
73 | stdout = b""
74 | stderr = b"qdel: job 269 deleted\nqdel: job 269 already completed\n"
75 |
76 | cr = sge_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
77 |
78 | a = {
79 | "parse_cancel_kwargs": json.dumps(
80 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
81 | ),
82 | "cancel_result_ref": json.dumps(cr.as_dict()),
83 | }
84 | mylist.append(a)
85 |
86 | # Sixth case: invalid job id specified
87 | return_code = 0
88 | stdout = b""
89 | stderr = b"qdel: job 2675 deleted\nqdel: Invalid job id specified\n"
90 |
91 | cr = sge_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
92 |
93 | a = {
94 | "parse_cancel_kwargs": json.dumps(
95 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
96 | ),
97 | "cancel_result_ref": json.dumps(cr.as_dict()),
98 | }
99 | mylist.append(a)
100 |
101 | with open("parse_cancel_output_inout.yaml", "w") as f:
102 | yaml.dump(mylist, f, sort_keys=False)
103 |
--------------------------------------------------------------------------------
/tests/test_data/io/sge/create_parse_job_output_inout.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import yaml
4 |
5 | from qtoolkit.core.exceptions import OutputParsingError
6 | from qtoolkit.io.sge import SGEIO
7 |
8 | sge_io = SGEIO()
9 |
10 | mylist = []
11 |
12 | # First case: successful job parsing
13 | return_code = 0
14 | stdout = b"""
15 |
16 |
17 | 270
18 | submit.script
19 | matgenix-dwa
20 | matgenix-dwa
21 | (null)
22 | r
23 | 0
24 | 2023-10-11T11:08:17
25 | main.q
26 | 1
27 | 1
28 | 00:05:00
29 | 96G
30 |
31 |
32 | """
33 | stderr = b""
34 | job = sge_io.parse_job_output(exit_code=return_code, stdout=stdout, stderr=stderr)
35 | a = {
36 | "parse_job_kwargs": json.dumps(
37 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
38 | ),
39 | "job_ref": json.dumps(job.as_dict()),
40 | }
41 | mylist.append(a)
42 |
43 | # Second case: job parsing with invalid fields
44 | return_code = 0
45 | stdout = b"""
46 |
47 |
48 | 270
49 | submit.script
50 | matgenix-dwa
51 | matgenix-dwa
52 | (null)
53 | r
54 | 0
55 | 2023-10-11T11:08:17
56 | main.q
57 | a
58 | 1
59 | a
60 | 96G
61 |
62 |
63 | """
64 | stderr = b""
65 | try:
66 | job = sge_io.parse_job_output(exit_code=return_code, stdout=stdout, stderr=stderr)
67 | job_dict = job.as_dict()
68 | except OutputParsingError as e:
69 | job_dict = {"error": str(e)}
70 | a = {
71 | "parse_job_kwargs": json.dumps(
72 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
73 | ),
74 | "job_ref": json.dumps(job_dict),
75 | }
76 | mylist.append(a)
77 |
78 | # Third case: empty stdout and stderr
79 | return_code = 0
80 | stdout = b""
81 | stderr = b""
82 | job = sge_io.parse_job_output(exit_code=return_code, stdout=stdout, stderr=stderr)
83 | a = {
84 | "parse_job_kwargs": json.dumps(
85 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
86 | ),
87 | "job_ref": json.dumps(job.as_dict() if job is not None else None),
88 | }
89 | mylist.append(a)
90 |
91 | with open("parse_job_output_inout.yaml", "w") as f:
92 | yaml.dump(mylist, f, sort_keys=False)
93 |
--------------------------------------------------------------------------------
/tests/test_data/io/sge/create_parse_submit_output_inout.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import yaml
4 |
5 | from qtoolkit.io.sge import SGEIO
6 |
7 | sge_io = SGEIO()
8 |
9 | mylist = []
10 |
11 | # First case: invalid queue specified
12 | return_code = 1
13 | stdout = b""
14 | stderr = (
15 | b"qsub: Invalid queue specified: abcd\n"
16 | b"qsub: Job submission failed: Invalid queue name specified\n"
17 | )
18 |
19 | sr = sge_io.parse_submit_output(
20 | exit_code=return_code, stdout=stdout.decode(), stderr=stderr.decode()
21 | )
22 |
23 | a = {
24 | "parse_submit_kwargs": json.dumps(
25 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
26 | ),
27 | "submission_result_ref": json.dumps(sr.as_dict()),
28 | }
29 | mylist.append(a)
30 |
31 | # Second case: successful submission
32 | return_code = 0
33 | stdout = b'Your job 24 ("submit.script") has been submitted\n'
34 | stderr = b""
35 | sr = sge_io.parse_submit_output(
36 | exit_code=return_code, stdout=stdout.decode(), stderr=stderr.decode()
37 | )
38 | a = {
39 | "parse_submit_kwargs": json.dumps(
40 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
41 | ),
42 | "submission_result_ref": json.dumps(sr.as_dict()),
43 | }
44 | mylist.append(a)
45 |
46 | # Third case: another successful submission
47 | return_code = 0
48 | stdout = b'Your job 15 ("submit.script") has been submitted\n'
49 | stderr = b""
50 | sr = sge_io.parse_submit_output(
51 | exit_code=return_code, stdout=stdout.decode(), stderr=stderr.decode()
52 | )
53 | a = {
54 | "parse_submit_kwargs": json.dumps(
55 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
56 | ),
57 | "submission_result_ref": json.dumps(sr.as_dict()),
58 | }
59 | mylist.append(a)
60 |
61 | # Fourth case: successful job allocation
62 | return_code = 0
63 | stdout = b'Your job 10 ("submit.script") has been submitted\n'
64 | stderr = b""
65 | sr = sge_io.parse_submit_output(
66 | exit_code=return_code, stdout=stdout.decode(), stderr=stderr.decode()
67 | )
68 | a = {
69 | "parse_submit_kwargs": json.dumps(
70 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
71 | ),
72 | "submission_result_ref": json.dumps(sr.as_dict()),
73 | }
74 | mylist.append(a)
75 |
76 | # Fifth case: another successful job allocation
77 | return_code = 0
78 | stdout = b'Your job 124 ("submit.script") has been submitted\n'
79 | stderr = b""
80 | sr = sge_io.parse_submit_output(
81 | exit_code=return_code, stdout=stdout.decode(), stderr=stderr.decode()
82 | )
83 | a = {
84 | "parse_submit_kwargs": json.dumps(
85 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
86 | ),
87 | "submission_result_ref": json.dumps(sr.as_dict()),
88 | }
89 | mylist.append(a)
90 |
91 | with open("parse_submit_output_inout.yaml", "w") as f:
92 | yaml.dump(mylist, f, sort_keys=False)
93 |
--------------------------------------------------------------------------------
/tests/test_data/io/sge/parse_cancel_output_inout.yaml:
--------------------------------------------------------------------------------
1 | - parse_cancel_kwargs: '{"exit_code": 0, "stdout": "", "stderr": "qdel: job 267 deleted\n"}'
2 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult", "@version": "0.1.1", "job_id": "267", "step_id": null, "exit_code": 0, "stdout": "", "stderr": "qdel: job 267 deleted\n", "status": {"@module": "qtoolkit.core.data_objects", "@class": "CancelStatus", "@version": "0.1.1", "value": "SUCCESSFUL"}}'
3 | - parse_cancel_kwargs: '{"exit_code": 1, "stdout": "", "stderr": "qdel: No job id specified\n"}'
4 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult", "@version": "0.1.1", "job_id": null, "step_id": null, "exit_code": 1, "stdout": "", "stderr": "qdel: No job id specified\n", "status": {"@module": "qtoolkit.core.data_objects", "@class": "CancelStatus", "@version": "0.1.1", "value": "FAILED"}}'
5 | - parse_cancel_kwargs: '{"exit_code": 210, "stdout": "", "stderr": "qdel: job 1 access denied\n"}'
6 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult", "@version": "0.1.1", "job_id": null, "step_id": null, "exit_code": 210, "stdout": "", "stderr": "qdel: job 1 access denied\n", "status": {"@module": "qtoolkit.core.data_objects", "@class": "CancelStatus", "@version": "0.1.1", "value": "FAILED"}}'
7 | - parse_cancel_kwargs: '{"exit_code": 1, "stdout": "", "stderr": "qdel: Invalid job id a\n"}'
8 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult", "@version": "0.1.1", "job_id": null, "step_id": null, "exit_code": 1, "stdout": "", "stderr": "qdel: Invalid job id a\n", "status": {"@module": "qtoolkit.core.data_objects", "@class": "CancelStatus", "@version": "0.1.1", "value": "FAILED"}}'
9 | - parse_cancel_kwargs: '{"exit_code": 0, "stdout": "", "stderr": "qdel: job 269 deleted\nqdel: job 269 already completed\n"}'
10 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult", "@version": "0.1.1", "job_id": "269", "step_id": null, "exit_code": 0, "stdout": "", "stderr": "qdel: job 269 deleted\nqdel: job 269 already completed\n", "status": {"@module": "qtoolkit.core.data_objects", "@class": "CancelStatus", "@version": "0.1.1", "value": "SUCCESSFUL"}}'
11 | - parse_cancel_kwargs: '{"exit_code": 0, "stdout": "", "stderr": "qdel: job 2675 deleted\nqdel: Invalid job id specified\n"}'
12 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult", "@version": "0.1.1", "job_id": "2675", "step_id": null, "exit_code": 0, "stdout": "", "stderr": "qdel: job 2675 deleted\nqdel: Invalid job id specified\n", "status": {"@module": "qtoolkit.core.data_objects", "@class": "CancelStatus", "@version": "0.1.1", "value": "SUCCESSFUL"}}'
13 |
--------------------------------------------------------------------------------
/tests/test_data/io/sge/parse_job_output_inout.yaml:
--------------------------------------------------------------------------------
1 | - parse_job_kwargs: '{"exit_code": 0, "stdout": "job_id: 270\njob_name: submit.script\nowner: matgenix-dwa\npriority: 4294901497\nstate: r\nstart_time: 2023-10-11T11:08:17\nend_time: 2023-10-11T11:13:17\nqueue_name: main\nslots: 1\nhard_wallclock: UNLIMITED\nstdout_path_list: /home/matgenix-dwa/software/qtoolkit/tests/test_data/io/sge/sge-270.out\nstderr_path_list: /home/matgenix-dwa/software/qtoolkit/tests/test_data/io/sge/sge-270.out\n"}'
2 | job_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "QJob", "@version": "0.1.1", "name": "submit.script", "job_id": "270", "exit_status": null, "state": {"@module": "qtoolkit.core.data_objects", "@class": "QState", "@version": "0.1.1", "value": "RUNNING"}, "sub_state": {"@module": "qtoolkit.io.sge", "@class": "SGEState", "@version": "0.1.1", "value": "r"}, "info": {"@module": "qtoolkit.core.data_objects", "@class": "QJobInfo", "@version": "0.1.1", "memory": null, "memory_per_cpu": null, "nodes": 1, "cpus": 1, "threads_per_process": 1, "time_limit": null}, "account": "matgenix-dwa", "runtime": null, "queue_name": "main"}'
3 | - parse_job_kwargs: '{"exit_code": 0, "stdout": "job_id: 270\njob_name: submit.script\nowner: matgenix-dwa\npriority: 4294901497\nstate: r\nstart_time: 2023-10-11T11:08:17\nend_time: 2023-10-11T11:13:17\nqueue_name: main\nslots: a\nhard_wallclock: a\nstdout_path_list: /home/matgenix-dwa/software/qtoolkit/tests/test_data/io/sge/sge-270.out\nstderr_path_list: /home/matgenix-dwa/software/qtoolkit/tests/test_data/io/sge/sge-270.out\n"}'
4 | job_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "QJob", "@version": "0.1.1", "name": "submit.script", "job_id": "270", "exit_status": null, "state": {"@module": "qtoolkit.core.data_objects", "@class": "QState", "@version": "0.1.1", "value": "RUNNING"}, "sub_state": {"@module": "qtoolkit.io.sge", "@class": "SGEState", "@version": "0.1.1", "value": "r"}, "info": {"@module": "qtoolkit.core.data_objects", "@class": "QJobInfo", "@version": "0.1.1", "memory": null, "memory_per_cpu": null, "nodes": null, "cpus": null, "threads_per_process": null, "time_limit": null}, "account": "matgenix-dwa", "runtime": null, "queue_name": "main"}'
5 | - parse_job_kwargs: '{"exit_code": 0, "stdout": "", "stderr": ""}'
6 | job_ref: 'null'
7 |
--------------------------------------------------------------------------------
/tests/test_data/io/sge/parse_submit_output_inout.yaml:
--------------------------------------------------------------------------------
1 | - parse_submit_kwargs: '{"exit_code": 1, "stdout": "", "stderr": "qsub: error: invalid queue specified: abcd\nqsub: error: Batch job submission failed: Invalid queue name specified\n"}'
2 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult", "@version": "0.0.1+d20230127", "job_id": null, "step_id": null, "exit_code": 1, "stdout": "", "stderr": "qsub: error: invalid queue specified: abcd\nqsub: error: Batch job submission failed: Invalid queue name specified\n", "status": {"@module": "qtoolkit.core.data_objects", "@class": "SubmissionStatus", "@version": "0.0.1+d20230127", "value": "FAILED"}}'
3 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "Your job 24 (\"submit.script\") has been submitted\n", "stderr": ""}'
4 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult", "@version": "0.0.1+d20230127", "job_id": "24", "step_id": null, "exit_code": 0, "stdout": "Your job 24 (\"submit.script\") has been submitted\n", "stderr": "", "status": {"@module": "qtoolkit.core.data_objects", "@class": "SubmissionStatus", "@version": "0.0.1+d20230127", "value": "SUCCESSFUL"}}'
5 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "Your job 15 (\"submit.script\") has been submitted\n", "stderr": ""}'
6 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult", "@version": "0.0.1+d20230127", "job_id": "15", "step_id": null, "exit_code": 0, "stdout": "Your job 15 (\"submit.script\") has been submitted\n", "stderr": "", "status": {"@module": "qtoolkit.core.data_objects", "@class": "SubmissionStatus", "@version": "0.0.1+d20230127", "value": "SUCCESSFUL"}}'
7 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "Your job 10 (\"submit.script\") has been submitted\n", "stderr": ""}'
8 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult", "@version": "0.0.1+d20230127", "job_id": "10", "step_id": null, "exit_code": 0, "stdout": "Your job 10 (\"submit.script\") has been submitted\n", "stderr": "", "status": {"@module": "qtoolkit.core.data_objects", "@class": "SubmissionStatus", "@version": "0.0.1+d20230127", "value": "SUCCESSFUL"}}'
9 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "Your job 124 (\"submit.script\") has been submitted\n", "stderr": ""}'
10 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult", "@version": "0.0.1+d20230127", "job_id": "124", "step_id": null, "exit_code": 0, "stdout": "Your job 124 (\"submit.script\") has been submitted\n", "stderr": "", "status": {"@module": "qtoolkit.core.data_objects", "@class": "SubmissionStatus", "@version": "0.0.1+d20230127", "value": "SUCCESSFUL"}}'
11 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "Your job 24 (\"submit.script\") has been submitted\n", "stderr": ""}'
12 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult", "@version": "0.0.1+d20230127", "job_id": "24", "step_id": null, "exit_code": 0, "stdout": "Your job 24 (\"submit.script\") has been submitted\n", "stderr": "", "status": {"@module": "qtoolkit.core.data_objects", "@class": "SubmissionStatus", "@version": "0.0.1+d20230127", "value": "SUCCESSFUL"}}'
13 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "Your job 15 (\"submit.script\") has been submitted\n", "stderr": ""}'
14 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult", "@version": "0.0.1+d20230127", "job_id": "15", "step_id": null, "exit_code": 0, "stdout": "Your job 15 (\"submit.script\") has been submitted\n", "stderr": "", "status": {"@module": "qtoolkit.core.data_objects", "@class": "SubmissionStatus", "@version": "0.0.1+d20230127", "value": "SUCCESSFUL"}}'
15 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "Your job 10 (\"submit.script\") has been submitted\n", "stderr": ""}'
16 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult", "@version": "0.0.1+d20230127", "job_id": "10", "step_id": null, "exit_code": 0, "stdout": "Your job 10 (\"submit.script\") has been submitted\n", "stderr": "", "status": {"@module": "qtoolkit.core.data_objects", "@class": "SubmissionStatus", "@version": "0.0.1+d20230127", "value": "SUCCESSFUL"}}'
17 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "Your job 124 (\"submit.script\") has been submitted\n", "stderr": ""}'
18 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult", "@version": "0.0.1+d20230127", "job_id": "124", "step_id": null, "exit_code": 0, "stdout": "Your job 124 (\"submit.script\") has been submitted\n", "stderr": "", "status": {"@module": "qtoolkit.core.data_objects", "@class": "SubmissionStatus", "@version": "0.0.1+d20230127", "value": "SUCCESSFUL"}}'
19 |
--------------------------------------------------------------------------------
/tests/test_data/io/slurm/create_parse_cancel_output_inout.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import yaml
4 |
5 | from qtoolkit.io.slurm import SlurmIO
6 |
7 | slurm_io = SlurmIO()
8 |
9 | mylist = []
10 |
11 | return_code = 0
12 | stdout = b""
13 | stderr = b"scancel: Terminating job 267\n"
14 |
15 | cr = slurm_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
16 |
17 | a = {
18 | "parse_cancel_kwargs": json.dumps(
19 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
20 | ),
21 | "cancel_result_ref": json.dumps(cr.as_dict()),
22 | }
23 | mylist.append(a)
24 |
25 |
26 | return_code = 1
27 | stdout = b""
28 | stderr = b"scancel: error: No job identification provided\n"
29 |
30 | cr = slurm_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
31 |
32 | a = {
33 | "parse_cancel_kwargs": json.dumps(
34 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
35 | ),
36 | "cancel_result_ref": json.dumps(cr.as_dict()),
37 | }
38 | mylist.append(a)
39 |
40 |
41 | return_code = 210
42 | stdout = b""
43 | stderr = b"scancel: error: Kill job error on job id 1: Access/permission denied\n"
44 |
45 | cr = slurm_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
46 |
47 | a = {
48 | "parse_cancel_kwargs": json.dumps(
49 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
50 | ),
51 | "cancel_result_ref": json.dumps(cr.as_dict()),
52 | }
53 | mylist.append(a)
54 |
55 |
56 | return_code = 1
57 | stdout = b""
58 | stderr = b"scancel: error: Invalid job id a\n"
59 |
60 | cr = slurm_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
61 |
62 | a = {
63 | "parse_cancel_kwargs": json.dumps(
64 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
65 | ),
66 | "cancel_result_ref": json.dumps(cr.as_dict()),
67 | }
68 | mylist.append(a)
69 |
70 |
71 | return_code = 0
72 | stdout = b""
73 | stderr = b"scancel: Terminating job 269\nscancel: error: Kill job error on job id 269: Job/step already completing or completed\n"
74 |
75 | cr = slurm_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
76 |
77 | a = {
78 | "parse_cancel_kwargs": json.dumps(
79 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
80 | ),
81 | "cancel_result_ref": json.dumps(cr.as_dict()),
82 | }
83 | mylist.append(a)
84 |
85 |
86 | return_code = 0
87 | stdout = b""
88 | stderr = b"scancel: Terminating job 2675\nscancel: error: Kill job error on job id 2675: Invalid job id specified\n"
89 |
90 | cr = slurm_io.parse_cancel_output(exit_code=return_code, stdout=stdout, stderr=stderr)
91 |
92 | a = {
93 | "parse_cancel_kwargs": json.dumps(
94 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
95 | ),
96 | "cancel_result_ref": json.dumps(cr.as_dict()),
97 | }
98 | mylist.append(a)
99 |
100 |
101 | with open("parse_cancel_output_inout.yaml", "w") as f:
102 | yaml.dump(mylist, f, sort_keys=False)
103 |
--------------------------------------------------------------------------------
/tests/test_data/io/slurm/create_parse_job_output_inout.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import yaml
4 |
5 | from qtoolkit.io.slurm import SlurmIO
6 |
7 | slurm_io = SlurmIO()
8 |
9 | mylist = []
10 |
11 | return_code = 0
12 | stdout = b"JobId=270 JobName=submit.script UserId=matgenix-dwa(1001) GroupId=matgenix-dwa(1002) MCS_label=N/A Priority=4294901497 Nice=0 Account=(null) QOS=normal JobState=COMPLETED Reason=None Dependency=(null) Requeue=1 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0 RunTime=00:05:00 TimeLimit=UNLIMITED TimeMin=N/A SubmitTime=2023-10-11T11:08:17 EligibleTime=2023-10-11T11:08:17 AccrueTime=2023-10-11T11:08:17 StartTime=2023-10-11T11:08:17 EndTime=2023-10-11T11:13:17 Deadline=N/A SuspendTime=None SecsPreSuspend=0 LastSchedEval=2023-10-11T11:08:17 Scheduler=Main Partition=main AllocNode:Sid=matgenixdb:2556938 ReqNodeList=(null) ExcNodeList=(null) NodeList=matgenixdb BatchHost=matgenixdb NumNodes=1 NumCPUs=1 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:* TRES=cpu=1,mem=96G,node=1,billing=1 Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=* MinCPUsNode=1 MinMemoryNode=0 MinTmpDiskNode=0 Features=(null) DelayBoot=00:00:00 OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null) Command=/home/matgenix-dwa/software/qtoolkit/tests/test_data/io/slurm/submit.script WorkDir=/home/matgenix-dwa/software/qtoolkit/tests/test_data/io/slurm StdErr=/home/matgenix-dwa/software/qtoolkit/tests/test_data/io/slurm/slurm-270.out StdIn=/dev/null StdOut=/home/matgenix-dwa/software/qtoolkit/tests/test_data/io/slurm/slurm-270.out Power= \n"
13 | stderr = b""
14 | job = slurm_io.parse_job_output(exit_code=return_code, stdout=stdout, stderr=stderr)
15 | a = {
16 | "parse_job_kwargs": json.dumps(
17 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
18 | ),
19 | "job_ref": json.dumps(job.as_dict()),
20 | }
21 | mylist.append(a)
22 |
23 |
24 | return_code = 0
25 | stdout = b"JobId=270 JobName=submit.script UserId=matgenix-dwa(1001) GroupId=matgenix-dwa(1002) MCS_label=N/A Priority=4294901497 Nice=0 Account=(null) QOS=normal JobState=COMPLETED Reason=None Dependency=(null) Requeue=1 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0 RunTime=00:05:00 TimeLimit=a TimeMin=N/A SubmitTime=2023-10-11T11:08:17 EligibleTime=2023-10-11T11:08:17 AccrueTime=2023-10-11T11:08:17 StartTime=2023-10-11T11:08:17 EndTime=2023-10-11T11:13:17 Deadline=N/A SuspendTime=None SecsPreSuspend=0 LastSchedEval=2023-10-11T11:08:17 Scheduler=Main Partition=main AllocNode:Sid=matgenixdb:2556938 ReqNodeList=(null) ExcNodeList=(null) NodeList=matgenixdb BatchHost=matgenixdb NumNodes=a NumCPUs=a NumTasks=1 CPUs/Task=a ReqB:S:C:T=0:0:*:* TRES=cpu=1,mem=96G,node=1,billing=1 Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=* MinCPUsNode=1 MinMemoryNode=0 MinTmpDiskNode=0 Features=(null) DelayBoot=00:00:00 OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null) Command=/home/matgenix-dwa/software/qtoolkit/tests/test_data/io/slurm/submit.script WorkDir=/home/matgenix-dwa/software/qtoolkit/tests/test_data/io/slurm StdErr=/home/matgenix-dwa/software/qtoolkit/tests/test_data/io/slurm/slurm-270.out StdIn=/dev/null StdOut=/home/matgenix-dwa/software/qtoolkit/tests/test_data/io/slurm/slurm-270.out Power= \n"
26 | stderr = b""
27 | job = slurm_io.parse_job_output(exit_code=return_code, stdout=stdout, stderr=stderr)
28 | a = {
29 | "parse_job_kwargs": json.dumps(
30 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
31 | ),
32 | "job_ref": json.dumps(job.as_dict()),
33 | }
34 | mylist.append(a)
35 |
36 |
37 | return_code = 0
38 | stdout = b""
39 | stderr = b""
40 | job = slurm_io.parse_job_output(exit_code=return_code, stdout=stdout, stderr=stderr)
41 | a = {
42 | "parse_job_kwargs": json.dumps(
43 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
44 | ),
45 | "job_ref": json.dumps(job.as_dict() if job is not None else None),
46 | }
47 | mylist.append(a)
48 |
49 |
50 | with open("parse_job_output_inout.yaml", "w") as f:
51 | yaml.dump(mylist, f, sort_keys=False)
52 |
--------------------------------------------------------------------------------
/tests/test_data/io/slurm/create_parse_submit_output_inout.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import yaml
4 |
5 | from qtoolkit.io.slurm import SlurmIO
6 |
7 | slurm_io = SlurmIO()
8 |
9 | mylist = []
10 |
11 | return_code = 1
12 | stdout = b""
13 | stderr = (
14 | b"sbatch: error: invalid partition specified: abcd\n"
15 | b"sbatch: error: Batch job submission failed: Invalid partition name specified\n"
16 | )
17 |
18 | sr = slurm_io.parse_submit_output(exit_code=return_code, stdout=stdout, stderr=stderr)
19 |
20 | a = {
21 | "parse_submit_kwargs": json.dumps(
22 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
23 | ),
24 | "submission_result_ref": json.dumps(sr.as_dict()),
25 | }
26 | mylist.append(a)
27 |
28 | return_code = 0
29 | stdout = b"Submitted batch job 24\n"
30 | stderr = b""
31 | sr = slurm_io.parse_submit_output(exit_code=return_code, stdout=stdout, stderr=stderr)
32 | a = {
33 | "parse_submit_kwargs": json.dumps(
34 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
35 | ),
36 | "submission_result_ref": json.dumps(sr.as_dict()),
37 | }
38 | mylist.append(a)
39 |
40 | return_code = 0
41 | stdout = b"submitted batch job 15\n"
42 | stderr = b""
43 | sr = slurm_io.parse_submit_output(exit_code=return_code, stdout=stdout, stderr=stderr)
44 | a = {
45 | "parse_submit_kwargs": json.dumps(
46 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
47 | ),
48 | "submission_result_ref": json.dumps(sr.as_dict()),
49 | }
50 | mylist.append(a)
51 |
52 | return_code = 0
53 | stdout = b"Granted job allocation 10\n"
54 | stderr = b""
55 | sr = slurm_io.parse_submit_output(exit_code=return_code, stdout=stdout, stderr=stderr)
56 | a = {
57 | "parse_submit_kwargs": json.dumps(
58 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
59 | ),
60 | "submission_result_ref": json.dumps(sr.as_dict()),
61 | }
62 | mylist.append(a)
63 |
64 | return_code = 0
65 | stdout = b"granted job allocation 124\n"
66 | stderr = b""
67 | sr = slurm_io.parse_submit_output(exit_code=return_code, stdout=stdout, stderr=stderr)
68 | a = {
69 | "parse_submit_kwargs": json.dumps(
70 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
71 | ),
72 | "submission_result_ref": json.dumps(sr.as_dict()),
73 | }
74 | mylist.append(a)
75 |
76 | return_code = 0
77 | stdout = b"sbatch: Submitted batch job 24\n"
78 | stderr = b""
79 | sr = slurm_io.parse_submit_output(exit_code=return_code, stdout=stdout, stderr=stderr)
80 | a = {
81 | "parse_submit_kwargs": json.dumps(
82 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
83 | ),
84 | "submission_result_ref": json.dumps(sr.as_dict()),
85 | }
86 | mylist.append(a)
87 |
88 | return_code = 0
89 | stdout = b"sbatch: submitted batch job 15\n"
90 | stderr = b""
91 | sr = slurm_io.parse_submit_output(exit_code=return_code, stdout=stdout, stderr=stderr)
92 | a = {
93 | "parse_submit_kwargs": json.dumps(
94 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
95 | ),
96 | "submission_result_ref": json.dumps(sr.as_dict()),
97 | }
98 | mylist.append(a)
99 |
100 | return_code = 0
101 | stdout = b"salloc: Granted job allocation 10\n"
102 | stderr = b""
103 | sr = slurm_io.parse_submit_output(exit_code=return_code, stdout=stdout, stderr=stderr)
104 | a = {
105 | "parse_submit_kwargs": json.dumps(
106 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
107 | ),
108 | "submission_result_ref": json.dumps(sr.as_dict()),
109 | }
110 | mylist.append(a)
111 |
112 | return_code = 0
113 | stdout = b"salloc: granted job allocation 124\n"
114 | stderr = b""
115 | sr = slurm_io.parse_submit_output(exit_code=return_code, stdout=stdout, stderr=stderr)
116 | a = {
117 | "parse_submit_kwargs": json.dumps(
118 | {"exit_code": return_code, "stdout": stdout.decode(), "stderr": stderr.decode()}
119 | ),
120 | "submission_result_ref": json.dumps(sr.as_dict()),
121 | }
122 | mylist.append(a)
123 |
124 |
125 | with open("parse_submit_output_inout.yaml", "w") as f:
126 | yaml.dump(mylist, f, sort_keys=False)
127 |
--------------------------------------------------------------------------------
/tests/test_data/io/slurm/parse_cancel_output_inout.yaml:
--------------------------------------------------------------------------------
1 | - parse_cancel_kwargs: '{"exit_code": 0, "stdout": "", "stderr": "scancel: Terminating
2 | job 267\n"}'
3 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult",
4 | "@version": "0.1.1", "job_id": "267", "step_id": null, "exit_code": 0, "stdout":
5 | "", "stderr": "scancel: Terminating job 267\n", "status": {"@module": "qtoolkit.core.data_objects",
6 | "@class": "CancelStatus", "@version": "0.1.1", "value": "SUCCESSFUL"}}'
7 | - parse_cancel_kwargs: '{"exit_code": 1, "stdout": "", "stderr": "scancel: error:
8 | No job identification provided\n"}'
9 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult",
10 | "@version": "0.1.1", "job_id": null, "step_id": null, "exit_code": 1, "stdout":
11 | "", "stderr": "scancel: error: No job identification provided\n", "status": {"@module":
12 | "qtoolkit.core.data_objects", "@class": "CancelStatus", "@version": "0.1.1", "value":
13 | "FAILED"}}'
14 | - parse_cancel_kwargs: '{"exit_code": 210, "stdout": "", "stderr": "scancel: error:
15 | Kill job error on job id 1: Access/permission denied\n"}'
16 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult",
17 | "@version": "0.1.1", "job_id": null, "step_id": null, "exit_code": 210, "stdout":
18 | "", "stderr": "scancel: error: Kill job error on job id 1: Access/permission denied\n",
19 | "status": {"@module": "qtoolkit.core.data_objects", "@class": "CancelStatus",
20 | "@version": "0.1.1", "value": "FAILED"}}'
21 | - parse_cancel_kwargs: '{"exit_code": 1, "stdout": "", "stderr": "scancel: error:
22 | Invalid job id a\n"}'
23 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult",
24 | "@version": "0.1.1", "job_id": null, "step_id": null, "exit_code": 1, "stdout":
25 | "", "stderr": "scancel: error: Invalid job id a\n", "status": {"@module": "qtoolkit.core.data_objects",
26 | "@class": "CancelStatus", "@version": "0.1.1", "value": "FAILED"}}'
27 | - parse_cancel_kwargs: '{"exit_code": 0, "stdout": "", "stderr": "scancel: Terminating
28 | job 269\nscancel: error: Kill job error on job id 269: Job/step already completing
29 | or completed\n"}'
30 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult",
31 | "@version": "0.1.1", "job_id": "269", "step_id": null, "exit_code": 0, "stdout":
32 | "", "stderr": "scancel: Terminating job 269\nscancel: error: Kill job error on
33 | job id 269: Job/step already completing or completed\n", "status": {"@module":
34 | "qtoolkit.core.data_objects", "@class": "CancelStatus", "@version": "0.1.1", "value":
35 | "SUCCESSFUL"}}'
36 | - parse_cancel_kwargs: '{"exit_code": 0, "stdout": "", "stderr": "scancel: Terminating
37 | job 2675\nscancel: error: Kill job error on job id 2675: Invalid job id specified\n"}'
38 | cancel_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "CancelResult",
39 | "@version": "0.1.1", "job_id": "2675", "step_id": null, "exit_code": 0, "stdout":
40 | "", "stderr": "scancel: Terminating job 2675\nscancel: error: Kill job error on
41 | job id 2675: Invalid job id specified\n", "status": {"@module": "qtoolkit.core.data_objects",
42 | "@class": "CancelStatus", "@version": "0.1.1", "value": "SUCCESSFUL"}}'
43 |
--------------------------------------------------------------------------------
/tests/test_data/io/slurm/parse_job_output_inout.yaml:
--------------------------------------------------------------------------------
1 | - parse_job_kwargs: '{"exit_code": 0, "stdout": "JobId=270 JobName=submit.script UserId=matgenix-dwa(1001)
2 | GroupId=matgenix-dwa(1002) MCS_label=N/A Priority=4294901497 Nice=0 Account=(null)
3 | QOS=normal JobState=COMPLETED Reason=None Dependency=(null) Requeue=1 Restarts=0
4 | BatchFlag=1 Reboot=0 ExitCode=0:0 RunTime=00:05:00 TimeLimit=UNLIMITED TimeMin=N/A
5 | SubmitTime=2023-10-11T11:08:17 EligibleTime=2023-10-11T11:08:17 AccrueTime=2023-10-11T11:08:17
6 | StartTime=2023-10-11T11:08:17 EndTime=2023-10-11T11:13:17 Deadline=N/A SuspendTime=None
7 | SecsPreSuspend=0 LastSchedEval=2023-10-11T11:08:17 Scheduler=Main Partition=main
8 | AllocNode:Sid=matgenixdb:2556938 ReqNodeList=(null) ExcNodeList=(null) NodeList=matgenixdb
9 | BatchHost=matgenixdb NumNodes=1 NumCPUs=1 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
10 | TRES=cpu=1,mem=96G,node=1,billing=1 Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
11 | MinCPUsNode=1 MinMemoryNode=0 MinTmpDiskNode=0 Features=(null) DelayBoot=00:00:00
12 | OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null) Command=/home/matgenix-dwa/software/qtoolkit/tests/test_data/io/slurm/submit.script
13 | WorkDir=/home/matgenix-dwa/software/qtoolkit/tests/test_data/io/slurm StdErr=/home/matgenix-dwa/software/qtoolkit/tests/test_data/io/slurm/slurm-270.out
14 | StdIn=/dev/null StdOut=/home/matgenix-dwa/software/qtoolkit/tests/test_data/io/slurm/slurm-270.out
15 | Power= \n", "stderr": ""}'
16 | job_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "QJob", "@version":
17 | "0.1.1", "name": "submit.script", "job_id": "270", "exit_status": null, "state":
18 | {"@module": "qtoolkit.core.data_objects", "@class": "QState", "@version": "0.1.1",
19 | "value": "DONE"}, "sub_state": {"@module": "qtoolkit.io.slurm", "@class": "SlurmState",
20 | "@version": "0.1.1", "value": "COMPLETED"}, "info": {"@module": "qtoolkit.core.data_objects",
21 | "@class": "QJobInfo", "@version": "0.1.1", "memory": null, "memory_per_cpu": null,
22 | "nodes": 1, "cpus": 1, "threads_per_process": 1, "time_limit": null}, "account":
23 | "matgenix-dwa(1001)", "runtime": null, "queue_name": "main"}'
24 | - parse_job_kwargs: '{"exit_code": 0, "stdout": "JobId=270 JobName=submit.script UserId=matgenix-dwa(1001)
25 | GroupId=matgenix-dwa(1002) MCS_label=N/A Priority=4294901497 Nice=0 Account=(null)
26 | QOS=normal JobState=COMPLETED Reason=None Dependency=(null) Requeue=1 Restarts=0
27 | BatchFlag=1 Reboot=0 ExitCode=0:0 RunTime=00:05:00 TimeLimit=a TimeMin=N/A SubmitTime=2023-10-11T11:08:17
28 | EligibleTime=2023-10-11T11:08:17 AccrueTime=2023-10-11T11:08:17 StartTime=2023-10-11T11:08:17
29 | EndTime=2023-10-11T11:13:17 Deadline=N/A SuspendTime=None SecsPreSuspend=0 LastSchedEval=2023-10-11T11:08:17
30 | Scheduler=Main Partition=main AllocNode:Sid=matgenixdb:2556938 ReqNodeList=(null)
31 | ExcNodeList=(null) NodeList=matgenixdb BatchHost=matgenixdb NumNodes=a NumCPUs=a
32 | NumTasks=1 CPUs/Task=a ReqB:S:C:T=0:0:*:* TRES=cpu=1,mem=96G,node=1,billing=1
33 | Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=* MinCPUsNode=1 MinMemoryNode=0
34 | MinTmpDiskNode=0 Features=(null) DelayBoot=00:00:00 OverSubscribe=OK Contiguous=0
35 | Licenses=(null) Network=(null) Command=/home/matgenix-dwa/software/qtoolkit/tests/test_data/io/slurm/submit.script
36 | WorkDir=/home/matgenix-dwa/software/qtoolkit/tests/test_data/io/slurm StdErr=/home/matgenix-dwa/software/qtoolkit/tests/test_data/io/slurm/slurm-270.out
37 | StdIn=/dev/null StdOut=/home/matgenix-dwa/software/qtoolkit/tests/test_data/io/slurm/slurm-270.out
38 | Power= \n", "stderr": ""}'
39 | job_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "QJob", "@version":
40 | "0.1.1", "name": "submit.script", "job_id": "270", "exit_status": null, "state":
41 | {"@module": "qtoolkit.core.data_objects", "@class": "QState", "@version": "0.1.1",
42 | "value": "DONE"}, "sub_state": {"@module": "qtoolkit.io.slurm", "@class": "SlurmState",
43 | "@version": "0.1.1", "value": "COMPLETED"}, "info": {"@module": "qtoolkit.core.data_objects",
44 | "@class": "QJobInfo", "@version": "0.1.1", "memory": null, "memory_per_cpu": null,
45 | "nodes": null, "cpus": null, "threads_per_process": null, "time_limit": null},
46 | "account": "matgenix-dwa(1001)", "runtime": null, "queue_name": "main"}'
47 | - parse_job_kwargs: '{"exit_code": 0, "stdout": "", "stderr": ""}'
48 | job_ref: 'null'
49 |
--------------------------------------------------------------------------------
/tests/test_data/io/slurm/parse_submit_output_inout.yaml:
--------------------------------------------------------------------------------
1 | - parse_submit_kwargs: '{"exit_code": 1, "stdout": "", "stderr": "sbatch: error: invalid
2 | partition specified: abcd\nsbatch: error: Batch job submission failed: Invalid
3 | partition name specified\n"}'
4 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult",
5 | "@version": "0.0.1+d20230127", "job_id": null, "step_id": null, "exit_code": 1,
6 | "stdout": "", "stderr": "sbatch: error: invalid partition specified: abcd\nsbatch:
7 | error: Batch job submission failed: Invalid partition name specified\n", "status":
8 | {"@module": "qtoolkit.core.data_objects", "@class": "SubmissionStatus", "@version":
9 | "0.0.1+d20230127", "value": "FAILED"}}'
10 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "Submitted batch job 24\n", "stderr":
11 | ""}'
12 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult",
13 | "@version": "0.0.1+d20230127", "job_id": "24", "step_id": null, "exit_code": 0,
14 | "stdout": "Submitted batch job 24\n", "stderr": "", "status": {"@module": "qtoolkit.core.data_objects",
15 | "@class": "SubmissionStatus", "@version": "0.0.1+d20230127", "value": "SUCCESSFUL"}}'
16 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "submitted batch job 15\n", "stderr":
17 | ""}'
18 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult",
19 | "@version": "0.0.1+d20230127", "job_id": "15", "step_id": null, "exit_code": 0,
20 | "stdout": "submitted batch job 15\n", "stderr": "", "status": {"@module": "qtoolkit.core.data_objects",
21 | "@class": "SubmissionStatus", "@version": "0.0.1+d20230127", "value": "SUCCESSFUL"}}'
22 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "Granted job allocation 10\n",
23 | "stderr": ""}'
24 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult",
25 | "@version": "0.0.1+d20230127", "job_id": "10", "step_id": null, "exit_code": 0,
26 | "stdout": "Granted job allocation 10\n", "stderr": "", "status": {"@module": "qtoolkit.core.data_objects",
27 | "@class": "SubmissionStatus", "@version": "0.0.1+d20230127", "value": "SUCCESSFUL"}}'
28 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "granted job allocation 124\n",
29 | "stderr": ""}'
30 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult",
31 | "@version": "0.0.1+d20230127", "job_id": "124", "step_id": null, "exit_code":
32 | 0, "stdout": "granted job allocation 124\n", "stderr": "", "status": {"@module":
33 | "qtoolkit.core.data_objects", "@class": "SubmissionStatus", "@version": "0.0.1+d20230127",
34 | "value": "SUCCESSFUL"}}'
35 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "sbatch: Submitted batch job 24\n",
36 | "stderr": ""}'
37 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult",
38 | "@version": "0.0.1+d20230127", "job_id": "24", "step_id": null, "exit_code": 0,
39 | "stdout": "sbatch: Submitted batch job 24\n", "stderr": "", "status": {"@module":
40 | "qtoolkit.core.data_objects", "@class": "SubmissionStatus", "@version": "0.0.1+d20230127",
41 | "value": "SUCCESSFUL"}}'
42 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "sbatch: submitted batch job 15\n",
43 | "stderr": ""}'
44 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult",
45 | "@version": "0.0.1+d20230127", "job_id": "15", "step_id": null, "exit_code": 0,
46 | "stdout": "sbatch: submitted batch job 15\n", "stderr": "", "status": {"@module":
47 | "qtoolkit.core.data_objects", "@class": "SubmissionStatus", "@version": "0.0.1+d20230127",
48 | "value": "SUCCESSFUL"}}'
49 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "salloc: Granted job allocation
50 | 10\n", "stderr": ""}'
51 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult",
52 | "@version": "0.0.1+d20230127", "job_id": "10", "step_id": null, "exit_code": 0,
53 | "stdout": "salloc: Granted job allocation 10\n", "stderr": "", "status": {"@module":
54 | "qtoolkit.core.data_objects", "@class": "SubmissionStatus", "@version": "0.0.1+d20230127",
55 | "value": "SUCCESSFUL"}}'
56 | - parse_submit_kwargs: '{"exit_code": 0, "stdout": "salloc: granted job allocation
57 | 124\n", "stderr": ""}'
58 | submission_result_ref: '{"@module": "qtoolkit.core.data_objects", "@class": "SubmissionResult",
59 | "@version": "0.0.1+d20230127", "job_id": "124", "step_id": null, "exit_code":
60 | 0, "stdout": "salloc: granted job allocation 124\n", "stderr": "", "status": {"@module":
61 | "qtoolkit.core.data_objects", "@class": "SubmissionStatus", "@version": "0.0.1+d20230127",
62 | "value": "SUCCESSFUL"}}'
63 |
--------------------------------------------------------------------------------
/tests/test_qtoolkit.py:
--------------------------------------------------------------------------------
1 | # def test_version():
2 | # assert __version__ == '0.1.0'
3 |
--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 |
4 | from monty.tempfile import ScratchDir
5 |
6 | from qtoolkit.utils import cd
7 |
8 |
9 | def test_cd():
10 | with ScratchDir("."):
11 | dirpath = Path("mydir")
12 | dirpath.mkdir()
13 | filepath = dirpath / "empty_file.txt"
14 | filepath.touch()
15 |
16 | with cd(dirpath):
17 | assert os.path.exists("empty_file.txt")
18 | assert not os.path.exists("empty_file.txt")
19 |
--------------------------------------------------------------------------------