├── .dockerignore
├── .github
└── workflows
│ ├── ebrains.yml
│ └── test_simqueue.yml
├── .gitignore
├── .gitlab-ci.yml
├── LICENSE
├── README.md
├── api
├── README.md
├── dashboard
│ ├── index.html
│ ├── js
│ │ ├── active-user-count.js
│ │ ├── cumulative-job-count.js
│ │ ├── cumulative-project-count.js
│ │ ├── cumulative-user-count.js
│ │ ├── job-count.js
│ │ ├── job-duration.js
│ │ ├── queue-length.js
│ │ └── quota-usage.js
│ └── lib
│ │ ├── c3
│ │ ├── c3.css
│ │ ├── c3.js
│ │ ├── c3.min.css
│ │ └── c3.min.js
│ │ ├── d3
│ │ ├── d3.js
│ │ └── d3.min.js
│ │ └── hbp-collaboratory-theme
│ │ └── dist
│ │ └── css
│ │ └── bootstrap.css
├── deployment
│ ├── Dockerfile.prod
│ ├── Dockerfile.staging
│ ├── docker-compose-template.yml
│ ├── nginx-app-prod.conf
│ ├── nginx-app-staging.conf
│ └── supervisor-app.conf
├── pyproject.toml
├── requirements.txt
├── requirements.txt.lock
├── requirements_testing.txt
├── setup_test_db.py
└── simqueue
│ ├── __init__.py
│ ├── data_models.py
│ ├── data_repositories.py
│ ├── db.py
│ ├── globals.py
│ ├── main.py
│ ├── oauth.py
│ ├── resources
│ ├── __init__.py
│ ├── auth.py
│ ├── for_admins.py
│ ├── for_providers.py
│ ├── for_users.py
│ └── statistics.py
│ ├── settings.py
│ ├── tests
│ ├── __init__.py
│ ├── test_auth_router.py
│ ├── test_db_access.py
│ ├── test_integration.py
│ ├── test_oauth.py
│ ├── test_queue_router.py
│ ├── test_quotas_router.py
│ ├── test_repositories.py
│ ├── test_statistics_router.py
│ └── test_utility_functions.py
│ └── utils.py
├── documentation
├── Dockerfile
├── developer_guide
│ ├── Makefile
│ ├── _extra
│ │ └── extra.js
│ ├── _templates
│ │ └── layout.html
│ ├── architecture.rst
│ ├── conf.py
│ ├── deployment.rst
│ ├── development_environment.rst
│ ├── index.rst
│ ├── monitoring.rst
│ └── testing.rst
├── issuetracker.html
├── nginx_default
└── splash
│ ├── home.html
│ ├── img
│ ├── NeuromorphicSystem_BrainScaleS_blur.jpg
│ ├── P1000727lowres.jpg
│ ├── P1000939lowres.jpg
│ ├── SpiNN5blur.jpg
│ ├── banner.jpg
│ ├── banner2.jpeg
│ ├── common
│ │ ├── 18_FRAUNDHOFER.jpg
│ │ ├── 33_KTH.jpg
│ │ ├── 39_POLITO.jpg
│ │ ├── 46_SU.jpg
│ │ ├── 52_TUD.jpg
│ │ ├── Fzi_logo.518fad9e.png
│ │ ├── Logo_EPFL.bd241da6.png
│ │ ├── SSSA_logo_eng.e1670e5b.png
│ │ ├── Tum_logo.6de952ac.gif
│ │ ├── cnrs_logo_140.png
│ │ ├── fortiss-logo.e0554979.png
│ │ ├── hbp-logo.e99c1e9f.png
│ │ ├── logo-university-of-manchester.png
│ │ └── uhei.jpg
│ ├── from_your_laptop.jpeg
│ └── postprocessed_wafer.jpg
│ └── styles
│ ├── main.093d5d20.css
│ └── vendor.4f071187.css
└── get_build_info.sh
/.dockerignore:
--------------------------------------------------------------------------------
1 | .git
2 | backups
3 | misc
4 | django.log
5 |
--------------------------------------------------------------------------------
/.github/workflows/ebrains.yml:
--------------------------------------------------------------------------------
1 | name: Mirror to EBRAINS
2 |
3 | on:
4 | push:
5 | branches: [ master, staging, api-v2 ]
6 |
7 | jobs:
8 | to_ebrains:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - name: syncmaster
12 | uses: wei/git-sync@v3
13 | with:
14 | source_repo: "HumanBrainProject/hbp_neuromorphic_platform"
15 | source_branch: "master"
16 | destination_repo: "https://ghpusher:${{ secrets.EBRAINS_GITLAB_ACCESS_TOKEN }}@gitlab.ebrains.eu/neuromorphic/job-queue-api.git"
17 | destination_branch: "main"
18 | - name: syncstaging
19 | uses: wei/git-sync@v3
20 | with:
21 | source_repo: "HumanBrainProject/hbp_neuromorphic_platform"
22 | source_branch: "staging"
23 | destination_repo: "https://ghpusher:${{ secrets.EBRAINS_GITLAB_ACCESS_TOKEN }}@gitlab.ebrains.eu/neuromorphic/job-queue-api.git"
24 | destination_branch: "staging"
25 | - name: sync-api-v3
26 | uses: wei/git-sync@v3
27 | with:
28 | source_repo: "HumanBrainProject/hbp_neuromorphic_platform"
29 | source_branch: "api-v2"
30 | destination_repo: "https://ghpusher:${{ secrets.EBRAINS_GITLAB_ACCESS_TOKEN }}@gitlab.ebrains.eu/neuromorphic/job-queue-api.git"
31 | destination_branch: "api-v2"
32 | - name: synctags
33 | uses: wei/git-sync@v3
34 | with:
35 | source_repo: "HumanBrainProject/hbp_neuromorphic_platform"
36 | source_branch: "refs/tags/*"
37 | destination_repo: "https://ghpusher:${{ secrets.EBRAINS_GITLAB_ACCESS_TOKEN }}@gitlab.ebrains.eu/neuromorphic/job-queue-api.git"
38 | destination_branch: "refs/tags/*"
39 |
--------------------------------------------------------------------------------
/.github/workflows/test_simqueue.yml:
--------------------------------------------------------------------------------
1 | name: Test simqueue API
2 |
3 | on:
4 | push:
5 | branches: [ "master", "staging" ]
6 | pull_request:
7 | branches: [ "master", "staging" ]
8 |
9 | permissions:
10 | contents: read
11 |
12 | jobs:
13 | test-stable:
14 |
15 | runs-on: ubuntu-latest
16 |
17 | steps:
18 | - uses: actions/checkout@v4
19 | - name: Set up Python 3.11
20 | uses: actions/setup-python@v5
21 | with:
22 | python-version: "3.11"
23 | - name: Install dependencies
24 | run: |
25 | python -m pip install --upgrade pip
26 | pip install -r api/requirements.txt.lock
27 | pip install -r api/requirements_testing.txt
28 | - name: Lint with flake8
29 | run: |
30 | # stop the build if there are Python syntax errors or undefined names
31 | flake8 api --count --select=E9,F63,F7,F82 --show-source --statistics
32 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
33 | flake8 api --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
34 | - name: Test with pytest
35 | run: |
36 | cd api
37 | pytest --cov=simqueue --cov-report=term
38 |
39 | test-latest:
40 |
41 | runs-on: ubuntu-latest
42 |
43 | steps:
44 | - uses: actions/checkout@v4
45 | - name: Set up Python 3.13
46 | uses: actions/setup-python@v5
47 | with:
48 | python-version: "3.13"
49 | - name: Install dependencies
50 | run: |
51 | python -m pip install --upgrade pip
52 | pip install -r api/requirements.txt
53 | pip install -r api/requirements_testing.txt
54 | - name: Lint with flake8
55 | run: |
56 | # stop the build if there are Python syntax errors or undefined names
57 | flake8 api --count --select=E9,F63,F7,F82 --show-source --statistics
58 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
59 | flake8 api --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
60 | - name: Test with pytest
61 | run: |
62 | cd api
63 | pytest --cov=simqueue --cov-report=term
64 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.log
2 | *.pyc
3 | *.orig
4 | .idea
5 | _build
6 | packages
7 | packages_
8 | *_old
9 | htmlcov
10 | coverage
11 | .venv
12 |
13 | # ensure these are not tracked by git
14 | *secrets.yml
15 | letsencrypt
16 | ssl
17 | env_local.sh
18 | docker-compose.yml
19 | docker-compose-staging.yml
20 |
--------------------------------------------------------------------------------
/.gitlab-ci.yml:
--------------------------------------------------------------------------------
1 | stages:
2 | - build
3 | - test
4 |
5 | build_job_queue_v2:
6 | stage: build
7 | only:
8 | variables:
9 | - $CI_COMMIT_BRANCH == "api-v2"
10 | script:
11 | - bash get_build_info.sh v2
12 | - docker build -f job_manager/Dockerfile.prod -t docker-registry.ebrains.eu/neuromorphic/nmpi_queue_server:v2 .
13 | - echo $DOCKER_REGISTRY_USER
14 | - docker login -u $DOCKER_REGISTRY_USER -p $DOCKER_REGISTRY_SECRET docker-registry.ebrains.eu
15 | - docker push docker-registry.ebrains.eu/neuromorphic/nmpi_queue_server:v2
16 | tags:
17 | - shell-runner
18 |
19 | build_quotas_v2:
20 | stage: build
21 | only:
22 | variables:
23 | - $CI_COMMIT_BRANCH == "api-v2"
24 | script:
25 | - bash get_build_info.sh v2
26 | - docker build -f resource_manager/Dockerfile.prod -t docker-registry.ebrains.eu/neuromorphic/nmpi_resource_manager:v2 .
27 | - echo $DOCKER_REGISTRY_USER
28 | - docker login -u $DOCKER_REGISTRY_USER -p $DOCKER_REGISTRY_SECRET docker-registry.ebrains.eu
29 | - docker push docker-registry.ebrains.eu/neuromorphic/nmpi_resource_manager:v2
30 | tags:
31 | - shell-runner
32 |
33 |
34 | build_job_queue_v3_staging:
35 | stage: build
36 | only:
37 | variables:
38 | - $CI_COMMIT_BRANCH == "staging"
39 | script:
40 | - bash get_build_info.sh staging
41 | - docker build -f api/deployment/Dockerfile.staging -t docker-registry.ebrains.eu/neuromorphic/nmpi_queue_server_v3:staging api
42 | - echo $DOCKER_REGISTRY_USER
43 | - docker login -u $DOCKER_REGISTRY_USER -p $DOCKER_REGISTRY_SECRET docker-registry.ebrains.eu
44 | - docker push docker-registry.ebrains.eu/neuromorphic/nmpi_queue_server_v3:staging
45 | tags:
46 | - shell-runner
47 |
48 |
49 | test_job_queue_v3_staging:
50 | stage: test
51 | only:
52 | variables:
53 | - $CI_COMMIT_BRANCH == "staging"
54 | services:
55 | - postgres:14
56 | variables:
57 | EBRAINS_IAM_SERVICE_URL: https://iam-int.ebrains.eu/auth/realms/hbp
58 | EBRAINS_COLLAB_SERVICE_URL: https://wiki-int.ebrains.eu/rest/v1/
59 | EBRAINS_DRIVE_SERVICE_URL: drive-int.ebrains.eu
60 | EBRAINS_BUCKET_SERVICE_URL: data-proxy-int.ebrains.eu
61 | NMPI_DATABASE_USER: test_user
62 | NMPI_DATABASE_PASSWORD: abc123
63 | NMPI_DATABASE_HOST: postgres
64 | NMPI_BASE_URL: http://localhost:8000
65 | POSTGRES_DB: postgres
66 | POSTGRES_USER: postgres
67 | POSTGRES_HOST_AUTH_METHOD: trust
68 | script:
69 | - export PGPASSWORD=$POSTGRES_PASSWORD
70 | - python3 -m pip install -r api/requirements.txt.lock
71 | - python3 -m pip install -r api/requirements_testing.txt
72 | - cd api
73 | - python3 setup_test_db.py
74 | - python3 -m pytest -v --cov=simqueue --cov-report=term
75 | tags:
76 | - docker-runner
77 | image: docker-registry.ebrains.eu/neuromorphic/python:3.10-slim
78 |
79 |
80 | build_job_queue_v3_production:
81 | stage: build
82 | only:
83 | variables:
84 | - $CI_COMMIT_BRANCH == "main"
85 | script:
86 | - bash get_build_info.sh production
87 | - docker build -f api/deployment/Dockerfile.prod -t docker-registry.ebrains.eu/neuromorphic/nmpi_queue_server_v3:prod api
88 | - echo $DOCKER_REGISTRY_USER
89 | - docker login -u $DOCKER_REGISTRY_USER -p $DOCKER_REGISTRY_SECRET docker-registry.ebrains.eu
90 | - docker push docker-registry.ebrains.eu/neuromorphic/nmpi_queue_server_v3:prod
91 | tags:
92 | - shell-runner
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2019 Centre National de la Recherche Scientifique
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # EBRAINS Neuromorphic Computing Job Queue and Quota API
2 |
3 | The EBRAINS neuromorphic computing remote access service allows users to run simulations/emulations
4 | on the [SpiNNaker](https://www.ebrains.eu/tools/spinnaker)
5 | and [BrainScaleS](https://www.ebrains.eu/tools/brainscales) systems
6 | by submitting a [PyNN](http://neuralensemble.org/docs/PyNN/) script
7 | and associated job configuration information to a central queue.
8 |
9 | The service consists of:
10 | - a [web API](https://nmpi-v3.hbpneuromorphic.eu/docs) (this repository)
11 | - a GUI client (the [Job Manager app](https://neuromorphic-job-manager.apps.ebrains.eu/); [code](https://github.com/HumanBrainProject/nmpi-job-manager-app))
12 | - a [Python/command-line client](https://github.com/HumanBrainProject/hbp-neuromorphic-client).
13 |
14 | Users can submit scripts stored locally on their own machine, in a public Git repository,
15 | in the [EBRAINS Knowledge Graph](https://search.kg.ebrains.eu/?category=Model),
16 | or in [EBRAINS Collaboratory](https://wiki.ebrains.eu/) storage (Drive/Bucket).
17 | Users can track the progress of their job, and view and/or download the results,
18 | log files, and provenance information.
19 |
20 | For more information, visit the [EBRAINS website](https://www.ebrains.eu/modelling-simulation-and-computing/simulation/neuromorphic-computing-3).
21 |
22 |
23 | All code is copyright 2015-2025 CNRS unless otherwise indicated.
24 |
25 | This repository previously contained code for all components of the service.
26 | Each of these is now developed in a separate repository.
27 |
28 |
29 |
30 | This open source software code was developed in part or in whole in the Human Brain Project,
31 | funded from the European Union's Horizon 2020 Framework Programme for Research and Innovation
32 | under Specific Grant Agreements No. 720270, No. 785907 and No. 945539 (Human Brain Project SGA1, SGA2 and SGA3), and in the EBRAINS research infrastructure,
33 | funded from the European Union's Horizon Europe funding programme under grant agreement No. 101147319 (EBRAINS-2.0).
--------------------------------------------------------------------------------
/api/README.md:
--------------------------------------------------------------------------------
1 | Version 3 of the HBP/EBRAINS Neuromorphic Computing Job Queue API, incorporating the Quotas API.
2 |
3 | For local development, set environment variables (see settings.py) then run:
4 |
5 | uvicorn simqueue.main:app --reload
6 |
7 | To run tests:
8 |
9 | pytest --cov=simqueue --cov-report=term --cov-report=html
10 |
11 | Certain tests require a valid EBRAINS IAM authorization token,
12 | provided via an environment variable `EBRAINS_AUTH_TOKEN`.
--------------------------------------------------------------------------------
/api/dashboard/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Dashboard : EBRAINS Neuromorphic Computing Platform
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
Length of queues
16 |
17 |
18 |
Completed jobs per week
19 |
20 |
21 |
Completed jobs (cumulative)
22 |
23 |
24 |
Platform users
25 |
Number of users who have submitted at least one job to any system
26 |
27 |
28 |
Number of users who have submitted at least one job to BrainScaleS
29 |
30 |
31 |
Number of users who have submitted at least one job to BrainScaleS-2
32 |
33 |
34 |
Number of users who have submitted at least one job to SpiNNaker
35 |
36 |
37 |
Number of users who have submitted at least one job to Spikey
38 |
39 |
40 |
Active users
41 |
Number of users who have submitted a job within the last 90 days
42 |
43 |
44 |
Projects (cumulative)
45 |
46 |
47 |
Quota usage (cumulative)
48 |
49 |
50 |
Job duration
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
--------------------------------------------------------------------------------
/api/dashboard/js/active-user-count.js:
--------------------------------------------------------------------------------
1 | var now = new Date();
2 | var today = now.toISOString().substring(0, 10);
3 |
4 | d3.json(
5 | "/statistics/active-user-count?start=2015-06-22&end=" + today + "&interval=7",
6 | function (error, user_stats) {
7 | if (error) return console.warn(error);
8 |
9 | /* pull the user counts out of the user_stats dict so c3 can plot them */
10 | user_stats.forEach(function (entry) {
11 | for (var platform in entry.count) {
12 | entry[platform] = entry.count[platform];
13 | }
14 | });
15 |
16 | var chart = c3.generate({
17 | bindto: "#chart-active-user-count",
18 | data: {
19 | json: user_stats,
20 | keys: {
21 | x: "end",
22 | value: [
23 | "SpiNNaker",
24 | "BrainScaleS",
25 | "BrainScaleS-2",
26 | "BrainScaleS-ESS",
27 | "Spikey",
28 | ],
29 | },
30 | x: "end",
31 | xFormat: "%Y-%m-%d",
32 | type: "area",
33 | groups: [
34 | [
35 | "SpiNNaker",
36 | "BrainScaleS",
37 | "BrainScaleS-2",
38 | "BrainScaleS-ESS",
39 | "Spikey",
40 | ],
41 | ],
42 | },
43 | point: {
44 | show: false,
45 | },
46 | axis: {
47 | x: {
48 | type: "timeseries",
49 | tick: {
50 | format: "%Y-%m-%d",
51 | },
52 | },
53 | y: {
54 | label: {
55 | position: "outer-middle",
56 | text: "Number of active users (total over all systems)",
57 | },
58 | min: 0,
59 | padding: { top: 0, bottom: 0 },
60 | },
61 | },
62 | padding: {
63 | top: 20,
64 | right: 80,
65 | bottom: 20,
66 | left: 80,
67 | },
68 | });
69 | }
70 | );
71 |
--------------------------------------------------------------------------------
/api/dashboard/js/cumulative-job-count.js:
--------------------------------------------------------------------------------
1 | var now = new Date();
2 | var today = now.toISOString().substring(0, 10);
3 |
4 | d3.json(
5 | "/statistics/cumulative-job-count?start=2015-06-22&end=" +
6 | today +
7 | "&interval=7",
8 | function (error, job_stats) {
9 | if (error) return console.warn(error);
10 |
11 | /* pull the job counts out of the job_counts dict so c3 can plot them */
12 | job_stats.forEach(function (entry) {
13 | for (var platform in entry.count) {
14 | entry[platform] = entry.count[platform];
15 | }
16 | });
17 |
18 | var chart = c3.generate({
19 | bindto: "#chart-cumulative-job-count",
20 | data: {
21 | json: job_stats,
22 | keys: {
23 | x: "end",
24 | value: [
25 | "SpiNNaker",
26 | "BrainScaleS",
27 | "BrainScaleS-2",
28 | "BrainScaleS-ESS",
29 | "Spikey",
30 | ],
31 | },
32 | x: "end",
33 | xFormat: "%Y-%m-%d",
34 | type: "area",
35 | groups: [
36 | [
37 | "SpiNNaker",
38 | "BrainScaleS",
39 | "BrainScaleS-2",
40 | "BrainScaleS-ESS",
41 | "Spikey",
42 | ],
43 | ],
44 | },
45 | point: {
46 | show: false,
47 | },
48 | axis: {
49 | x: {
50 | type: "timeseries",
51 | tick: {
52 | format: "%Y-%m-%d",
53 | },
54 | },
55 | y: {
56 | label: {
57 | position: "outer-middle",
58 | text: "Number of completed jobs (cumulative)",
59 | },
60 | min: 0,
61 | padding: { top: 0, bottom: 0 },
62 | },
63 | },
64 | padding: {
65 | top: 20,
66 | right: 80,
67 | bottom: 20,
68 | left: 80,
69 | },
70 | });
71 | }
72 | );
73 |
--------------------------------------------------------------------------------
/api/dashboard/js/cumulative-project-count.js:
--------------------------------------------------------------------------------
1 | var user_stats;
2 |
3 | d3.json(
4 | "/statistics/cumulative-project-count?status=accepted",
5 | function (error, data) {
6 | if (error) return console.warn(error);
7 |
8 | var dates = data.dates;
9 | var values = data.values;
10 |
11 | values.unshift("accepted");
12 | dates.unshift("x");
13 |
14 | var chart = c3.generate({
15 | bindto: "#chart-cumulative-project-count",
16 | data: {
17 | x: "x",
18 | xFormat: "%Y-%m-%d",
19 | columns: [dates, values],
20 | type: "area-step",
21 | },
22 | axis: {
23 | x: {
24 | type: "timeseries",
25 | tick: {
26 | format: "%Y-%m-%d",
27 | },
28 | },
29 | y: {
30 | label: {
31 | position: "outer-middle",
32 | text: "Number of accepted projects",
33 | },
34 | min: 0,
35 | padding: { top: 0, bottom: 0 },
36 | },
37 | },
38 | line: {
39 | step: {
40 | type: "step-after",
41 | },
42 | },
43 | legend: {
44 | show: false,
45 | },
46 | padding: {
47 | top: 20,
48 | right: 80,
49 | bottom: 20,
50 | left: 80,
51 | },
52 | });
53 | }
54 | );
55 |
--------------------------------------------------------------------------------
/api/dashboard/js/cumulative-user-count.js:
--------------------------------------------------------------------------------
1 | var user_stats;
2 |
3 | d3.json("/statistics/cumulative-user-count", function (error, data) {
4 | if (error) return console.warn(error);
5 |
6 | var dates = data.dates;
7 | var values = data.values;
8 |
9 | values.unshift("users");
10 | dates.unshift("x");
11 |
12 | var chart = c3.generate({
13 | bindto: "#chart-cumulative-user-count",
14 | data: {
15 | x: "x",
16 | xFormat: "%Y-%m-%d",
17 | columns: [dates, values],
18 | type: "area-step",
19 | },
20 | axis: {
21 | x: {
22 | type: "timeseries",
23 | tick: {
24 | format: "%Y-%m-%d",
25 | },
26 | },
27 | y: {
28 | label: {
29 | position: "outer-middle",
30 | text: "Number of platform users",
31 | },
32 | min: 0,
33 | padding: { top: 0, bottom: 0 },
34 | },
35 | },
36 | line: {
37 | step: {
38 | type: "step-after",
39 | },
40 | },
41 | legend: {
42 | show: false,
43 | },
44 | padding: {
45 | top: 20,
46 | right: 80,
47 | bottom: 20,
48 | left: 80,
49 | },
50 | });
51 | });
52 |
53 | d3.json(
54 | "/statistics/cumulative-user-count?hardware_platform=BrainScaleS",
55 | function (error, data) {
56 | if (error) return console.warn(error);
57 |
58 | var dates = data.dates;
59 | var values = data.values;
60 |
61 | values.unshift("users");
62 | dates.unshift("x");
63 |
64 | var chart = c3.generate({
65 | bindto: "#chart-cumulative-user-count-BrainScaleS",
66 | data: {
67 | x: "x",
68 | xFormat: "%Y-%m-%d",
69 | columns: [dates, values],
70 | type: "area-step",
71 | },
72 | axis: {
73 | x: {
74 | type: "timeseries",
75 | tick: {
76 | format: "%Y-%m-%d",
77 | },
78 | },
79 | y: {
80 | label: {
81 | position: "outer-middle",
82 | text: "Number of BrainScaleS users",
83 | },
84 | min: 0,
85 | padding: { top: 0, bottom: 0 },
86 | },
87 | },
88 | line: {
89 | step: {
90 | type: "step-after",
91 | },
92 | },
93 | legend: {
94 | show: false,
95 | },
96 | padding: {
97 | top: 20,
98 | right: 80,
99 | bottom: 20,
100 | left: 80,
101 | },
102 | });
103 | }
104 | );
105 |
106 | d3.json(
107 | "/statistics/cumulative-user-count?hardware_platform=BrainScaleS-2",
108 | function (error, data) {
109 | if (error) return console.warn(error);
110 |
111 | var dates = data.dates;
112 | var values = data.values;
113 |
114 | values.unshift("users");
115 | dates.unshift("x");
116 |
117 | var chart = c3.generate({
118 | bindto: "#chart-cumulative-user-count-BrainScaleS-2",
119 | data: {
120 | x: "x",
121 | xFormat: "%Y-%m-%d",
122 | columns: [dates, values],
123 | type: "area-step",
124 | },
125 | axis: {
126 | x: {
127 | type: "timeseries",
128 | tick: {
129 | format: "%Y-%m-%d",
130 | },
131 | },
132 | y: {
133 | label: {
134 | position: "outer-middle",
135 | text: "Number of BrainScaleS-2 users",
136 | },
137 | min: 0,
138 | padding: { top: 0, bottom: 0 },
139 | },
140 | },
141 | line: {
142 | step: {
143 | type: "step-after",
144 | },
145 | },
146 | legend: {
147 | show: false,
148 | },
149 | padding: {
150 | top: 20,
151 | right: 80,
152 | bottom: 20,
153 | left: 80,
154 | },
155 | });
156 | }
157 | );
158 |
159 | d3.json(
160 | "/statistics/cumulative-user-count?hardware_platform=SpiNNaker",
161 | function (error, data) {
162 | if (error) return console.warn(error);
163 |
164 | var dates = data.dates;
165 | var values = data.values;
166 |
167 | values.unshift("users");
168 | dates.unshift("x");
169 |
170 | var chart = c3.generate({
171 | bindto: "#chart-cumulative-user-count-SpiNNaker",
172 | data: {
173 | x: "x",
174 | xFormat: "%Y-%m-%d",
175 | columns: [dates, values],
176 | type: "area-step",
177 | },
178 | axis: {
179 | x: {
180 | type: "timeseries",
181 | tick: {
182 | format: "%Y-%m-%d",
183 | },
184 | },
185 | y: {
186 | label: {
187 | position: "outer-middle",
188 | text: "Number of SpiNNaker users",
189 | },
190 | min: 0,
191 | padding: { top: 0, bottom: 0 },
192 | },
193 | },
194 | line: {
195 | step: {
196 | type: "step-after",
197 | },
198 | },
199 | legend: {
200 | show: false,
201 | },
202 | padding: {
203 | top: 20,
204 | right: 80,
205 | bottom: 20,
206 | left: 80,
207 | },
208 | });
209 | }
210 | );
211 |
212 | d3.json(
213 | "/statistics/cumulative-user-count?hardware_platform=Spikey",
214 | function (error, data) {
215 | if (error) return console.warn(error);
216 |
217 | var dates = data.dates;
218 | var values = data.values;
219 |
220 | values.unshift("users");
221 | dates.unshift("x");
222 |
223 | var chart = c3.generate({
224 | bindto: "#chart-cumulative-user-count-Spikey",
225 | data: {
226 | x: "x",
227 | xFormat: "%Y-%m-%d",
228 | columns: [dates, values],
229 | type: "area-step",
230 | },
231 | axis: {
232 | x: {
233 | type: "timeseries",
234 | tick: {
235 | format: "%Y-%m-%d",
236 | },
237 | },
238 | y: {
239 | label: {
240 | position: "outer-middle",
241 | text: "Number of Spikey users",
242 | },
243 | min: 0,
244 | padding: { top: 0, bottom: 0 },
245 | },
246 | },
247 | line: {
248 | step: {
249 | type: "step-after",
250 | },
251 | },
252 | legend: {
253 | show: false,
254 | },
255 | padding: {
256 | top: 20,
257 | right: 80,
258 | bottom: 20,
259 | left: 80,
260 | },
261 | });
262 | }
263 | );
264 |
--------------------------------------------------------------------------------
/api/dashboard/js/job-count.js:
--------------------------------------------------------------------------------
1 | var now = new Date();
2 | var today = now.toISOString().substring(0, 10);
3 |
4 | d3.json(
5 | "/statistics/job-count?start=2015-06-22&end=" + today + "&interval=7",
6 | function (error, job_stats) {
7 | if (error) return console.warn(error);
8 |
9 | /* pull the job counts out of the job_counts dict so c3 can plot them */
10 | job_stats.forEach(function (entry) {
11 | for (var platform in entry.count) {
12 | entry[platform] = entry.count[platform];
13 | }
14 | });
15 |
16 | var chart = c3.generate({
17 | bindto: "#chart-job-count",
18 | data: {
19 | json: job_stats,
20 | keys: {
21 | x: "end",
22 | value: [
23 | "SpiNNaker",
24 | "BrainScaleS",
25 | "BrainScaleS-2",
26 | "BrainScaleS-ESS",
27 | "Spikey",
28 | ],
29 | },
30 | x: "end",
31 | xFormat: "%Y-%m-%d",
32 | type: "area",
33 | groups: [
34 | [
35 | "SpiNNaker",
36 | "BrainScaleS",
37 | "BrainScaleS-2",
38 | "BrainScaleS-ESS",
39 | "Spikey",
40 | ],
41 | ],
42 | },
43 | point: {
44 | show: false,
45 | },
46 | axis: {
47 | x: {
48 | type: "timeseries",
49 | tick: {
50 | format: "%Y-%m-%d",
51 | },
52 | },
53 | y: {
54 | label: {
55 | position: "outer-middle",
56 | text: "Number of completed jobs",
57 | },
58 | min: 0,
59 | padding: { top: 0, bottom: 0 },
60 | },
61 | },
62 | padding: {
63 | top: 20,
64 | right: 80,
65 | bottom: 20,
66 | left: 80,
67 | },
68 | });
69 | }
70 | );
71 |
--------------------------------------------------------------------------------
/api/dashboard/js/job-duration.js:
--------------------------------------------------------------------------------
1 | d3.json("/statistics/job-duration?scale=log&bins=50", function (error, data) {
2 | if (error) return console.warn(error);
3 | var job_durations = {
4 | spinnaker_finished: data.filter(function (item) {
5 | return item.platform == "SpiNNaker" && item.status == "finished";
6 | })[0],
7 | brainscales_finished: data.filter(function (item) {
8 | return item.platform == "BrainScaleS" && item.status == "finished";
9 | })[0],
10 | brainscales2_finished: data.filter(function (item) {
11 | return item.platform == "BrainScaleS-2" && item.status == "finished";
12 | })[0],
13 | spikey_finished: data.filter(function (item) {
14 | return item.platform == "Spikey" && item.status == "finished";
15 | })[0],
16 | };
17 |
18 | job_durations.spinnaker_finished.bins.unshift("x");
19 | job_durations.spinnaker_finished.values.unshift("SpiNNaker (finished)");
20 | job_durations.brainscales_finished.bins.unshift("x");
21 | job_durations.brainscales_finished.values.unshift("BrainScaleS (finished)");
22 | job_durations.spikey_finished.bins.unshift("x");
23 | job_durations.spikey_finished.values.unshift("Spikey (finished)");
24 | job_durations.brainscales2_finished.bins.unshift("x");
25 | job_durations.brainscales2_finished.values.unshift(
26 | "BrainScaleS-2 (finished)"
27 | );
28 |
29 | var axis = {
30 | x: {
31 | label: {
32 | position: "outer-center",
33 | text: "Job duration (seconds)",
34 | },
35 | tick: {
36 | values: ["0", "1", "2", "3", "4", "5"],
37 | format: function (x) {
38 | return "10^" + x;
39 | },
40 | },
41 | },
42 | y: {
43 | label: {
44 | position: "outer-middle",
45 | text: "Number of jobs",
46 | },
47 | min: 0,
48 | padding: { top: 0, bottom: 0 },
49 | },
50 | };
51 | var padding = {
52 | top: 20,
53 | right: 20,
54 | bottom: 20,
55 | left: 60,
56 | };
57 |
58 | var chart1 = c3.generate({
59 | bindto: "#chart-job-duration-spinnaker",
60 | data: {
61 | x: "x",
62 | columns: [
63 | job_durations.spinnaker_finished.bins,
64 | job_durations.spinnaker_finished.values,
65 | ],
66 | type: "area-step",
67 | colors: {
68 | "SpiNNaker (finished)": "#1f77b4",
69 | },
70 | },
71 | line: {
72 | step: {
73 | type: "step-after",
74 | },
75 | },
76 | axis: axis,
77 | padding: padding,
78 | });
79 |
80 | var chart2 = c3.generate({
81 | bindto: "#chart-job-duration-brainscales",
82 | data: {
83 | x: "x",
84 | columns: [
85 | job_durations.brainscales_finished.bins,
86 | job_durations.brainscales_finished.values,
87 | ],
88 | type: "area-step",
89 | colors: {
90 | "BrainScaleS (finished)": "#ff7f0e",
91 | },
92 | },
93 | line: {
94 | step: {
95 | type: "step-after",
96 | },
97 | },
98 | axis: axis,
99 | padding: padding,
100 | });
101 |
102 | var chart3 = c3.generate({
103 | bindto: "#chart-job-duration-spikey",
104 | data: {
105 | x: "x",
106 | columns: [
107 | job_durations.spikey_finished.bins,
108 | job_durations.spikey_finished.values,
109 | ],
110 | type: "area-step",
111 | colors: {
112 | "Spikey (finished)": "#d62728",
113 | },
114 | },
115 | line: {
116 | step: {
117 | type: "step-after",
118 | },
119 | },
120 | axis: axis,
121 | padding: padding,
122 | });
123 |
124 | var chart4 = c3.generate({
125 | bindto: "#chart-job-duration-brainscales2",
126 | data: {
127 | x: "x",
128 | columns: [
129 | job_durations.brainscales2_finished.bins,
130 | job_durations.brainscales2_finished.values,
131 | ],
132 | type: "area-step",
133 | colors: {
134 | "BrainScaleS-2 (finished)": "#ee6f9e",
135 | },
136 | },
137 | line: {
138 | step: {
139 | type: "step-after",
140 | },
141 | },
142 | axis: axis,
143 | padding: padding,
144 | });
145 | });
146 |
--------------------------------------------------------------------------------
/api/dashboard/js/queue-length.js:
--------------------------------------------------------------------------------
1 | d3.json("/statistics/queue-length", function (error, json_data) {
2 | if (error) return console.warn(error);
3 |
4 | var chart = c3.generate({
5 | bindto: "#chart-queue-length",
6 | data: {
7 | json: json_data,
8 | keys: {
9 | x: "queue_name",
10 | value: ["running", "submitted"],
11 | },
12 | type: "bar",
13 | groups: [["running", "submitted"]],
14 | colors: {
15 | running: "#edce2f",
16 | submitted: "#4797ae",
17 | },
18 | },
19 | axis: {
20 | x: {
21 | type: "category",
22 | },
23 | rotated: true,
24 | },
25 | padding: {
26 | top: 20,
27 | right: 120,
28 | bottom: 20,
29 | left: 120,
30 | },
31 | });
32 | });
33 |
--------------------------------------------------------------------------------
/api/dashboard/js/quota-usage.js:
--------------------------------------------------------------------------------
1 | var now = new Date();
2 | var today = now.toISOString().substring(0, 10);
3 |
4 | d3.json(
5 | "/statistics/resource-usage?start=2015-06-22&end=" + today + "&interval=7",
6 | function (error, user_stats) {
7 | if (error) return console.warn(error);
8 |
9 | /* pull the values out of the user_stats dict so c3 can plot them */
10 | user_stats.forEach(function (entry) {
11 | for (var platform in entry.value) {
12 | entry[platform] = entry.value[platform];
13 | }
14 | });
15 |
16 | var chart = c3.generate({
17 | bindto: "#chart-resource-usage",
18 | data: {
19 | json: user_stats,
20 | keys: {
21 | x: "end",
22 | value: [
23 | "SpiNNaker",
24 | "BrainScaleS",
25 | "BrainScaleS-2",
26 | "BrainScaleS-ESS",
27 | "Spikey",
28 | ],
29 | },
30 | x: "end",
31 | xFormat: "%Y-%m-%d",
32 | type: "area",
33 | groups: [
34 | [
35 | "SpiNNaker",
36 | "BrainScaleS",
37 | "BrainScaleS-2",
38 | "BrainScaleS-ESS",
39 | "Spikey",
40 | ],
41 | ],
42 | },
43 | point: {
44 | show: false,
45 | },
46 | axis: {
47 | x: {
48 | type: "timeseries",
49 | tick: {
50 | format: "%Y-%m-%d",
51 | },
52 | },
53 | y: {
54 | label: {
55 | position: "outer-middle",
56 | text: "Resource usage",
57 | },
58 | min: 0,
59 | padding: { top: 0, bottom: 0 },
60 | },
61 | },
62 | padding: {
63 | top: 20,
64 | right: 80,
65 | bottom: 20,
66 | left: 80,
67 | },
68 | });
69 | }
70 | );
71 |
--------------------------------------------------------------------------------
/api/dashboard/lib/c3/c3.css:
--------------------------------------------------------------------------------
1 | /*-- Chart --*/
2 | .c3 svg {
3 | font: 10px sans-serif;
4 | -webkit-tap-highlight-color: transparent; }
5 |
6 | .c3 path, .c3 line {
7 | fill: none;
8 | stroke: #000; }
9 |
10 | .c3 text {
11 | -webkit-user-select: none;
12 | -moz-user-select: none;
13 | user-select: none; }
14 |
15 | .c3-legend-item-tile,
16 | .c3-xgrid-focus,
17 | .c3-ygrid,
18 | .c3-event-rect,
19 | .c3-bars path {
20 | shape-rendering: crispEdges; }
21 |
22 | .c3-chart-arc path {
23 | stroke: #fff; }
24 |
25 | .c3-chart-arc text {
26 | fill: #fff;
27 | font-size: 13px; }
28 |
29 | /*-- Axis --*/
30 | /*-- Grid --*/
31 | .c3-grid line {
32 | stroke: #aaa; }
33 |
34 | .c3-grid text {
35 | fill: #aaa; }
36 |
37 | .c3-xgrid, .c3-ygrid {
38 | stroke-dasharray: 3 3; }
39 |
40 | /*-- Text on Chart --*/
41 | .c3-text.c3-empty {
42 | fill: #808080;
43 | font-size: 2em; }
44 |
45 | /*-- Line --*/
46 | .c3-line {
47 | stroke-width: 1px; }
48 |
49 | /*-- Point --*/
50 | .c3-circle._expanded_ {
51 | stroke-width: 1px;
52 | stroke: white; }
53 |
54 | .c3-selected-circle {
55 | fill: white;
56 | stroke-width: 2px; }
57 |
58 | /*-- Bar --*/
59 | .c3-bar {
60 | stroke-width: 0; }
61 |
62 | .c3-bar._expanded_ {
63 | fill-opacity: 0.75; }
64 |
65 | /*-- Focus --*/
66 | .c3-target.c3-focused {
67 | opacity: 1; }
68 |
69 | .c3-target.c3-focused path.c3-line, .c3-target.c3-focused path.c3-step {
70 | stroke-width: 2px; }
71 |
72 | .c3-target.c3-defocused {
73 | opacity: 0.3 !important; }
74 |
75 | /*-- Region --*/
76 | .c3-region {
77 | fill: steelblue;
78 | fill-opacity: .1; }
79 |
80 | /*-- Brush --*/
81 | .c3-brush .extent {
82 | fill-opacity: .1; }
83 |
84 | /*-- Select - Drag --*/
85 | /*-- Legend --*/
86 | .c3-legend-item {
87 | font-size: 12px; }
88 |
89 | .c3-legend-item-hidden {
90 | opacity: 0.15; }
91 |
92 | .c3-legend-background {
93 | opacity: 0.75;
94 | fill: white;
95 | stroke: lightgray;
96 | stroke-width: 1; }
97 |
98 | /*-- Title --*/
99 | .c3-title {
100 | font: 14px sans-serif; }
101 |
102 | /*-- Tooltip --*/
103 | .c3-tooltip-container {
104 | z-index: 10; }
105 |
106 | .c3-tooltip {
107 | border-collapse: collapse;
108 | border-spacing: 0;
109 | background-color: #fff;
110 | empty-cells: show;
111 | -webkit-box-shadow: 7px 7px 12px -9px #777777;
112 | -moz-box-shadow: 7px 7px 12px -9px #777777;
113 | box-shadow: 7px 7px 12px -9px #777777;
114 | opacity: 0.9; }
115 |
116 | .c3-tooltip tr {
117 | border: 1px solid #CCC; }
118 |
119 | .c3-tooltip th {
120 | background-color: #aaa;
121 | font-size: 14px;
122 | padding: 2px 5px;
123 | text-align: left;
124 | color: #FFF; }
125 |
126 | .c3-tooltip td {
127 | font-size: 13px;
128 | padding: 3px 6px;
129 | background-color: #fff;
130 | border-left: 1px dotted #999; }
131 |
132 | .c3-tooltip td > span {
133 | display: inline-block;
134 | width: 10px;
135 | height: 10px;
136 | margin-right: 6px; }
137 |
138 | .c3-tooltip td.value {
139 | text-align: right; }
140 |
141 | /*-- Area --*/
142 | .c3-area {
143 | stroke-width: 0;
144 | opacity: 0.2; }
145 |
146 | /*-- Arc --*/
147 | .c3-chart-arcs-title {
148 | dominant-baseline: middle;
149 | font-size: 1.3em; }
150 |
151 | .c3-chart-arcs .c3-chart-arcs-background {
152 | fill: #e0e0e0;
153 | stroke: none; }
154 |
155 | .c3-chart-arcs .c3-chart-arcs-gauge-unit {
156 | fill: #000;
157 | font-size: 16px; }
158 |
159 | .c3-chart-arcs .c3-chart-arcs-gauge-max {
160 | fill: #777; }
161 |
162 | .c3-chart-arcs .c3-chart-arcs-gauge-min {
163 | fill: #777; }
164 |
165 | .c3-chart-arc .c3-gauge-value {
166 | fill: #000;
167 | /* font-size: 28px !important;*/ }
168 |
--------------------------------------------------------------------------------
/api/dashboard/lib/c3/c3.min.css:
--------------------------------------------------------------------------------
1 | .c3 svg{font:10px sans-serif;-webkit-tap-highlight-color:transparent}.c3 line,.c3 path{fill:none;stroke:#000}.c3 text{-webkit-user-select:none;-moz-user-select:none;user-select:none}.c3-bars path,.c3-event-rect,.c3-legend-item-tile,.c3-xgrid-focus,.c3-ygrid{shape-rendering:crispEdges}.c3-chart-arc path{stroke:#fff}.c3-chart-arc text{fill:#fff;font-size:13px}.c3-grid line{stroke:#aaa}.c3-grid text{fill:#aaa}.c3-xgrid,.c3-ygrid{stroke-dasharray:3 3}.c3-text.c3-empty{fill:gray;font-size:2em}.c3-line{stroke-width:1px}.c3-circle._expanded_{stroke-width:1px;stroke:#fff}.c3-selected-circle{fill:#fff;stroke-width:2px}.c3-bar{stroke-width:0}.c3-bar._expanded_{fill-opacity:.75}.c3-target.c3-focused{opacity:1}.c3-target.c3-focused path.c3-line,.c3-target.c3-focused path.c3-step{stroke-width:2px}.c3-target.c3-defocused{opacity:.3!important}.c3-region{fill:#4682b4;fill-opacity:.1}.c3-brush .extent{fill-opacity:.1}.c3-legend-item{font-size:12px}.c3-legend-item-hidden{opacity:.15}.c3-legend-background{opacity:.75;fill:#fff;stroke:#d3d3d3;stroke-width:1}.c3-title{font:14px sans-serif}.c3-tooltip-container{z-index:10}.c3-tooltip{border-collapse:collapse;border-spacing:0;background-color:#fff;empty-cells:show;-webkit-box-shadow:7px 7px 12px -9px #777;-moz-box-shadow:7px 7px 12px -9px #777;box-shadow:7px 7px 12px -9px #777;opacity:.9}.c3-tooltip tr{border:1px solid #CCC}.c3-tooltip th{background-color:#aaa;font-size:14px;padding:2px 5px;text-align:left;color:#FFF}.c3-tooltip td{font-size:13px;padding:3px 6px;background-color:#fff;border-left:1px dotted #999}.c3-tooltip td>span{display:inline-block;width:10px;height:10px;margin-right:6px}.c3-tooltip td.value{text-align:right}.c3-area{stroke-width:0;opacity:.2}.c3-chart-arcs-title{dominant-baseline:middle;font-size:1.3em}.c3-chart-arcs .c3-chart-arcs-background{fill:#e0e0e0;stroke:none}.c3-chart-arcs .c3-chart-arcs-gauge-unit{fill:#000;font-size:16px}.c3-chart-arcs .c3-chart-arcs-gauge-max,.c3-chart-arcs .c3-chart-arcs-gauge-min{fill:#777}.c3-chart-arc .c3-gauge-value{fill:#000}
--------------------------------------------------------------------------------
/api/deployment/Dockerfile.prod:
--------------------------------------------------------------------------------
1 | #
2 | # Build an image for deploying the Neuromorphic Platform Job Queue API v3
3 | #
4 | # To build the image, from the parent directory:
5 | # docker-compose build
6 | #
7 | # To run the application:
8 | # docker-compose up -d
9 | #
10 | #
11 | # To check the content of the docker container:
12 | # sudo docker run -it nmpi_server /bin/bash
13 |
14 | FROM docker-registry.ebrains.eu/neuromorphic/debian:bookworm-slim
15 | MAINTAINER Andrew Davison
16 |
17 | ENV DEBIAN_FRONTEND noninteractive
18 | RUN apt-get update --fix-missing; apt-get -y -q install python3.11-venv supervisor build-essential nginx-extras git wget
19 | RUN unset DEBIAN_FRONTEND
20 |
21 | ENV VENV=/home/docker/venv
22 |
23 | RUN python3 -m venv $VENV
24 | RUN $VENV/bin/pip install --upgrade pip
25 |
26 | ENV SITEDIR /home/docker/site
27 |
28 | COPY requirements.txt.lock $SITEDIR/
29 | RUN $VENV/bin/pip install -r $SITEDIR/requirements.txt.lock
30 |
31 | COPY simqueue $SITEDIR/simqueue
32 | COPY dashboard $SITEDIR/dashboard
33 | RUN mkdir -p $SITEDIR/tmp_download
34 | RUN chmod a+w $SITEDIR/tmp_download
35 |
36 | ENV PYTHONPATH /home/docker:${SITEDIR}:${VENV}/lib/python3.11/site-packages:/usr/local/lib/python3.11/dist-packages:/usr/lib/python3.11/dist-packages
37 |
38 | RUN echo "daemon off;" >> /etc/nginx/nginx.conf
39 | RUN rm /etc/nginx/sites-enabled/default
40 | COPY deployment/nginx-app-prod.conf /etc/nginx/sites-enabled/nginx-app.conf
41 | COPY deployment/supervisor-app.conf /etc/supervisor/conf.d/
42 |
43 | EXPOSE 443
44 |
45 | CMD ["supervisord", "-n", "-c", "/etc/supervisor/conf.d/supervisor-app.conf"]
46 |
--------------------------------------------------------------------------------
/api/deployment/Dockerfile.staging:
--------------------------------------------------------------------------------
1 | #
2 | # Build an image for deploying the Neuromorphic Platform Job Queue API v3
3 | #
4 | # To build the image, from the parent directory:
5 | # docker-compose build
6 | #
7 | # To run the application:
8 | # docker-compose up -d
9 | #
10 | #
11 | # To check the content of the docker container:
12 | # sudo docker run -it nmpi_server /bin/bash
13 |
14 | FROM docker-registry.ebrains.eu/neuromorphic/debian:bookworm-slim
15 | MAINTAINER Andrew Davison
16 |
17 | ENV DEBIAN_FRONTEND noninteractive
18 | RUN apt-get update --fix-missing; apt-get -y -q install python3.11-venv supervisor build-essential nginx-extras git wget
19 | RUN unset DEBIAN_FRONTEND
20 |
21 | ENV VENV=/home/docker/venv
22 |
23 | RUN python3 -m venv $VENV
24 | RUN $VENV/bin/pip install --upgrade pip
25 |
26 | ENV SITEDIR /home/docker/site
27 |
28 | COPY requirements.txt.lock $SITEDIR/
29 | RUN $VENV/bin/pip install -r $SITEDIR/requirements.txt.lock
30 |
31 | COPY simqueue $SITEDIR/simqueue
32 | COPY dashboard $SITEDIR/dashboard
33 | RUN mkdir -p $SITEDIR/tmp_download
34 | RUN chmod a+w $SITEDIR/tmp_download
35 |
36 | ENV PYTHONPATH /home/docker:${SITEDIR}:${VENV}/lib/python3.11/site-packages:/usr/local/lib/python3.11/dist-packages:/usr/lib/python3.11/dist-packages
37 |
38 | RUN echo "daemon off;" >> /etc/nginx/nginx.conf
39 | RUN rm /etc/nginx/sites-enabled/default
40 | COPY deployment/nginx-app-staging.conf /etc/nginx/sites-enabled/nginx-app.conf
41 | COPY deployment/supervisor-app.conf /etc/supervisor/conf.d/
42 |
43 | EXPOSE 443
44 |
45 | CMD ["supervisord", "-n", "-c", "/etc/supervisor/conf.d/supervisor-app.conf"]
46 |
--------------------------------------------------------------------------------
/api/deployment/docker-compose-template.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | web:
4 | image: docker-registry.ebrains.eu/neuromorphic/nmpi_queue_server_v3:staging
5 | container_name: nmpi-v3
6 | build:
7 | context: .
8 | dockerfile: deployment/Dockerfile.staging
9 | ports:
10 | - "80:80"
11 | - "443:443"
12 | volumes:
13 | - /etc/letsencrypt:/etc/letsencrypt
14 | environment:
15 | - EBRAINS_IAM_CLIENT_ID=
16 | - EBRAINS_IAM_SECRET=
17 | - SESSIONS_SECRET_KEY=
18 | - NMPI_DATABASE_PASSWORD=
19 | - NMPI_DATABASE_HOST=
20 | - NMPI_DATABASE_PORT=
21 | - NMPI_BASE_URL=https://nmpi-v3-staging.hbpneuromorphic.eu
22 | - NMPI_TMP_FILE_ROOT=/home/docker/site/tmp_download
23 |
--------------------------------------------------------------------------------
/api/deployment/nginx-app-prod.conf:
--------------------------------------------------------------------------------
1 | # nginx configuration
2 |
3 | upstream uvicorn {
4 | server unix:/tmp/uvicorn.sock;
5 | }
6 |
7 | server {
8 | listen 80;
9 |
10 | server_name nmpi-v3.hbpneuromorphic.eu;
11 | return 301 https://nmpi-v3.hbpneuromorphic.eu$request_uri;
12 | }
13 |
14 | server {
15 | listen 80;
16 |
17 | server_name nmc-remote-access.apps.ebrains.eu;
18 | return 301 https://nmc-remote-access.apps.ebrains.eu$request_uri;
19 | }
20 |
21 | server {
22 | listen 443 ssl;
23 |
24 | ssl_certificate /etc/letsencrypt/live/nmpi-v3.hbpneuromorphic.eu/fullchain.pem;
25 | ssl_certificate_key /etc/letsencrypt/live/nmpi-v3.hbpneuromorphic.eu/privkey.pem;
26 |
27 | server_name nmpi-v3.hbpneuromorphic.eu;
28 | charset utf-8;
29 | client_max_body_size 4G;
30 |
31 | location / {
32 | proxy_set_header Host $http_host;
33 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
34 | proxy_set_header X-Forwarded-Proto $scheme;
35 | proxy_redirect off;
36 | proxy_buffering off;
37 | proxy_pass http://uvicorn;
38 | proxy_read_timeout 300s;
39 | }
40 |
41 | location /static {
42 | alias /home/docker/site/static;
43 | }
44 |
45 | location /tmp_download {
46 | alias /home/docker/site/tmp_download;
47 | }
48 |
49 | }
50 |
51 | server {
52 | listen 443 ssl;
53 |
54 | ssl_certificate /etc/letsencrypt/live/nmc-remote-access.apps.ebrains.eu/fullchain.pem;
55 | ssl_certificate_key /etc/letsencrypt/live/nmc-remote-access.apps.ebrains.eu/privkey.pem;
56 |
57 | server_name nmc-remote-access.apps.ebrains.eu;
58 | charset utf-8;
59 | client_max_body_size 4G;
60 |
61 | location / {
62 | proxy_set_header Host $http_host;
63 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
64 | proxy_set_header X-Forwarded-Proto $scheme;
65 | proxy_redirect off;
66 | proxy_buffering off;
67 | proxy_pass http://uvicorn;
68 | proxy_read_timeout 300s;
69 | }
70 |
71 | location /static {
72 | alias /home/docker/site/static;
73 | }
74 |
75 | location /tmp_download {
76 | alias /home/docker/site/tmp_download;
77 | }
78 |
79 | }
80 |
81 | server {
82 | listen 443 ssl;
83 |
84 | ssl_certificate /etc/letsencrypt/live/job-manager.hbpneuromorphic.eu/fullchain.pem;
85 | ssl_certificate_key /etc/letsencrypt/live/job-manager.hbpneuromorphic.eu/privkey.pem;
86 |
87 | server_name job-manager.hbpneuromorphic.eu;
88 | return 301 https://neuromorphic-job-manager.apps.ebrains.eu$request_uri;
89 | }
90 |
91 | server {
92 | listen 443 ssl;
93 |
94 | ssl_certificate /etc/letsencrypt/live/admin.hbpneuromorphic.eu/fullchain.pem;
95 | ssl_certificate_key /etc/letsencrypt/live/admin.hbpneuromorphic.eu/privkey.pem;
96 |
97 | server_name admin.hbpneuromorphic.eu;
98 | return 301 https://adminapp.apps.tc.humanbrainproject.eu$request_uri;
99 | }
100 |
101 | server {
102 | listen 443 ssl;
103 |
104 | ssl_certificate /etc/letsencrypt/live/www.hbpneuromorphic.eu/fullchain.pem;
105 | ssl_certificate_key /etc/letsencrypt/live/www.hbpneuromorphic.eu/privkey.pem;
106 |
107 | server_name www.hbpneuromorphic.eu;
108 | return 301 https://www.ebrains.eu/modelling-simulation-and-computing/simulation/neuromorphic-computing-3;
109 | }
110 |
111 | server {
112 | listen 443 ssl;
113 |
114 | ssl_certificate /etc/letsencrypt/live/corsproxy.hbpneuromorphic.eu/fullchain.pem;
115 | ssl_certificate_key /etc/letsencrypt/live/corsproxy.hbpneuromorphic.eu/privkey.pem;
116 |
117 | server_name corsproxy.hbpneuromorphic.eu;
118 | return 301 https://corsproxy.apps.tc.humanbrainproject.eu$request_uri;
119 | }
120 |
121 | server {
122 | listen 443 ssl;
123 |
124 | ssl_certificate /etc/letsencrypt/live/live-papers.brainsimulation.eu/fullchain.pem;
125 | ssl_certificate_key /etc/letsencrypt/live/live-papers.brainsimulation.eu/privkey.pem;
126 |
127 | server_name live-papers.brainsimulation.eu;
128 | return 301 https://live-papers.apps.ebrains.eu$request_uri;
129 | }
130 |
131 | server {
132 | listen 443 ssl;
133 |
134 | ssl_certificate /etc/letsencrypt/live/model-catalog.brainsimulation.eu/fullchain.pem;
135 | ssl_certificate_key /etc/letsencrypt/live/model-catalog.brainsimulation.eu/privkey.pem;
136 |
137 | server_name model-catalog.brainsimulation.eu;
138 | return 301 https://model-catalog.apps.ebrains.eu$request_uri;
139 | }
140 |
141 | server {
142 | listen 443 ssl;
143 |
144 | ssl_certificate /etc/letsencrypt/live/validation.brainsimulation.eu/fullchain.pem;
145 | ssl_certificate_key /etc/letsencrypt/live/validation.brainsimulation.eu/privkey.pem;
146 |
147 | server_name validation.brainsimulation.eu;
148 | return 301 https://model-validation-api.apps.ebrains.eu$request_uri;
149 | }
150 |
151 | server {
152 | listen 443 ssl;
153 |
154 | ssl_certificate /etc/letsencrypt/live/prov.brainsimulation.eu/fullchain.pem;
155 | ssl_certificate_key /etc/letsencrypt/live/prov.brainsimulation.eu/privkey.pem;
156 |
157 | server_name prov.brainsimulation.eu;
158 | return 301 https://prov-api.apps.ebrains.eu$request_uri;
159 | }
160 |
161 | server {
162 | listen 443 ssl;
163 |
164 | ssl_certificate /etc/letsencrypt/live/neo-viewer.brainsimulation.eu/fullchain.pem;
165 | ssl_certificate_key /etc/letsencrypt/live/neo-viewer.brainsimulation.eu/privkey.pem;
166 |
167 | server_name neo-viewer.brainsimulation.eu;
168 | return 301 https://neoviewer.apps.ebrains.eu$request_uri;
169 | }
170 |
--------------------------------------------------------------------------------
/api/deployment/nginx-app-staging.conf:
--------------------------------------------------------------------------------
1 | # nginx configuration
2 |
3 | upstream uvicorn {
4 | server unix:/tmp/uvicorn.sock;
5 | }
6 |
7 | server {
8 | listen 80;
9 |
10 | server_name nmpi-v3-staging.hbpneuromorphic.eu;
11 | return 301 https://nmpi-v3-staging.hbpneuromorphic.eu$request_uri;
12 | }
13 |
14 | server {
15 | listen 443 ssl;
16 |
17 | ssl_certificate /etc/letsencrypt/live/nmpi-v3-staging.hbpneuromorphic.eu/fullchain.pem;
18 | ssl_certificate_key /etc/letsencrypt/live/nmpi-v3-staging.hbpneuromorphic.eu/privkey.pem;
19 |
20 | server_name nmpi-v3-staging.hbpneuromorphic.eu;
21 | charset utf-8;
22 | client_max_body_size 4G;
23 |
24 | location / {
25 | proxy_set_header Host $http_host;
26 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
27 | proxy_set_header X-Forwarded-Proto $scheme;
28 | proxy_redirect off;
29 | proxy_buffering off;
30 | proxy_pass http://uvicorn;
31 | proxy_read_timeout 300s;
32 | }
33 |
34 | location /static {
35 | alias /home/docker/site/static;
36 | }
37 |
38 | location /tmp_download {
39 | alias /home/docker/site/tmp_download;
40 | }
41 |
42 | }
43 |
--------------------------------------------------------------------------------
/api/deployment/supervisor-app.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | loglevel=info
3 |
4 | [program:nginx-app]
5 | command = /usr/sbin/nginx
6 | stdout_logfile = /dev/stdout
7 | stdout_logfile_maxbytes = 0
8 | stderr_logfile = /dev/stderr
9 | stderr_logfile_maxbytes = 0
10 |
11 | [fcgi-program:uvicorn]
12 | socket = unix:///tmp/uvicorn.sock
13 | socket_owner = www-data
14 | user = www-data
15 | command = /home/docker/venv/bin/uvicorn simqueue.main:app --proxy-headers --uds /tmp/uvicorn.sock --timeout-keep-alive 300
16 | numprocs = 2
17 | process_name = uvicorn-%(process_num)d
18 | stdout_logfile = /dev/stdout
19 | stdout_logfile_maxbytes = 0
20 | stderr_logfile = /dev/stderr
21 | stderr_logfile_maxbytes = 0
--------------------------------------------------------------------------------
/api/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.pytest.ini_options]
2 | asyncio_default_fixture_loop_scope = "function"
3 |
--------------------------------------------------------------------------------
/api/requirements.txt:
--------------------------------------------------------------------------------
1 | urllib3 <=1.26.15
2 | uvicorn
3 | fastapi
4 | itsdangerous
5 | Authlib
6 | httpx
7 | databases[postgresql]
8 | sqlalchemy
9 | pytz
10 | requests
11 | numpy
12 | python-slugify
13 | ebrains-drive
14 |
--------------------------------------------------------------------------------
/api/requirements.txt.lock:
--------------------------------------------------------------------------------
1 | annotated-types==0.7.0
2 | anyio==4.8.0
3 | asyncpg==0.30.0
4 | Authlib==1.5.1
5 | certifi==2025.1.31
6 | cffi==1.17.1
7 | charset-normalizer==3.4.1
8 | click==8.1.8
9 | cryptography==44.0.2
10 | databases==0.9.0
11 | ebrains-drive==0.6.0
12 | fastapi==0.115.11
13 | h11==0.14.0
14 | httpcore==1.0.7
15 | httpx==0.28.1
16 | idna==3.10
17 | itsdangerous==2.2.0
18 | numpy==2.2.3
19 | pycparser==2.22
20 | pydantic==2.10.6
21 | pydantic_core==2.27.2
22 | python-slugify==8.0.4
23 | pytz==2025.1
24 | requests==2.32.3
25 | sniffio==1.3.1
26 | SQLAlchemy==2.0.39
27 | starlette==0.46.1
28 | text-unidecode==1.3
29 | tqdm==4.67.1
30 | typing_extensions==4.12.2
31 | urllib3==1.26.15
32 | uvicorn==0.34.0
33 |
--------------------------------------------------------------------------------
/api/requirements_testing.txt:
--------------------------------------------------------------------------------
1 | pytest
2 | pytest-cov
3 | pytest-asyncio
4 | pytest-mock
5 | flake8
6 | faker
7 |
--------------------------------------------------------------------------------
/api/setup_test_db.py:
--------------------------------------------------------------------------------
1 | """
2 | docker run --name nmpidb -e POSTGRES_PASSWORD=sehgc98y94t -p 32768:5432 -d postgres:14
3 |
4 | """
5 |
6 | import asyncio
7 | from datetime import datetime, timedelta, date
8 | import os
9 | import random
10 | from uuid import UUID, uuid4
11 |
12 | from faker import Faker
13 | import asyncpg
14 | import sqlalchemy
15 | import databases
16 |
17 | from simqueue import settings
18 |
19 | assert settings.DATABASE_USERNAME == "test_user"
20 |
21 | from simqueue import db
22 |
23 |
24 | fake = Faker()
25 |
26 | job_status_options = ["submitted", "running", "finished", "error"]
27 | project_status_options = ["under review", "accepted", "rejected", "in preparation"]
28 | hardware_platform_options = ["BrainScaleS", "BrainScaleS-2", "SpiNNaker", "Spikey", "Demo"]
29 | tags = ["test"] + fake.words(10)
30 |
31 |
32 | async def create_fake_tag(database, tag):
33 | ins = db.taglist.insert().values(name=tag, slug=fake.word())
34 | await database.execute(ins)
35 |
36 |
37 | async def add_tag(database, job_id, tag):
38 | # get tag id
39 | query = db.taglist.select().where(db.taglist.c.name == tag)
40 | tag_obj = await database.fetch_one(query)
41 | ins = db.tagged_items.insert().values(
42 | object_id=job_id, tag_id=tag_obj["id"], content_type_id=fake.random_int()
43 | )
44 | await database.execute(ins)
45 |
46 |
47 | async def add_comment(database, job_id, user_id, content, timestamp):
48 | ins = db.comments.insert().values(
49 | content=content, created_time=timestamp, user=user_id, job_id=job_id
50 | )
51 | comment_id = await database.execute(ins)
52 |
53 |
54 | async def create_specific_job(database):
55 | # this is a job we expect to find when running the tests
56 | await database.execute("ALTER SEQUENCE simqueue_job_id_seq RESTART WITH 142972")
57 | job = dict(
58 | code="import sPyNNaker as sim\nsim.setup()",
59 | command="",
60 | status="finished",
61 | collab_id="neuromorphic-testing-private",
62 | user_id="adavison",
63 | hardware_platform="SpiNNaker",
64 | timestamp_submission=datetime(2021, 3, 10, 15, 16, 17),
65 | timestamp_completion=datetime(2021, 3, 10, 16, 17, 18),
66 | )
67 | ins = db.jobs.insert().values(**job)
68 | job_id = await database.execute(ins)
69 | await add_tag(database, job_id, "test")
70 | await add_comment(
71 | database, job_id, "stanlaurel", "This is a comment", datetime(2021, 3, 10, 17, 18, 19)
72 | )
73 |
74 |
75 | async def create_fake_job(database):
76 | job = dict(
77 | code=fake.text(),
78 | command=fake.sentence(),
79 | collab_id=fake.word(),
80 | user_id=fake.user_name(),
81 | status=random.choice(job_status_options),
82 | hardware_platform=random.choice(hardware_platform_options),
83 | hardware_config=str(fake.pydict()),
84 | timestamp_submission=fake.date_time_this_decade(),
85 | )
86 | # todo: add provenance, resource_usage for finished jobs
87 | if job["status"] in ("finished", "error"):
88 | job["timestamp_completion"] = job["timestamp_submission"] + timedelta(
89 | random.uniform(0, 1000)
90 | )
91 | ins = db.jobs.insert().values(**job)
92 | job_id = await database.execute(ins)
93 | assert isinstance(job_id, int)
94 |
95 | # tag some jobs
96 | if random.random() < 0.5:
97 | await add_tag(database, job_id, random.choice(tags))
98 |
99 |
100 | async def create_specific_project(database):
101 | # this is a project we expect to find when running the tests
102 | project = dict(
103 | context=uuid4(),
104 | collab="neuromorphic-testing-private",
105 | owner="adavison",
106 | title="Some project",
107 | abstract="Abstract goes here",
108 | description="",
109 | duration=42, # in days
110 | start_date=None,
111 | accepted=False,
112 | submission_date=date(2016, 3, 4),
113 | decision_date=None,
114 | )
115 |
116 | ins = db.projects.insert().values(**project)
117 | await database.execute(ins)
118 |
119 |
120 | async def create_fake_quota(database, project_id):
121 | quota_data = {
122 | "units": fake.word(),
123 | "limit": random.uniform(0.1, 10000),
124 | "usage": 0.0,
125 | "platform": random.choice(hardware_platform_options),
126 | "project_id": project_id,
127 | }
128 |
129 | ins = db.quotas.insert().values(**quota_data)
130 | await database.execute(ins)
131 |
132 |
133 | async def create_fake_project(database, status):
134 | project = dict(
135 | context=uuid4(),
136 | collab=fake.word(),
137 | owner=fake.user_name(),
138 | title=fake.sentence(),
139 | abstract=fake.paragraph(),
140 | description=fake.text(),
141 | duration=random.randint(0, 100), # in days
142 | start_date=None,
143 | accepted=False,
144 | submission_date=None,
145 | decision_date=None,
146 | )
147 |
148 | # Project status
149 | # - no submission_date: "in preparation"
150 | # - submission_date, accepted=False: "under review"
151 | # - submission_date, decision_date, accepted=True: "accepted"
152 | # - submission_date, decision_date, accepted=False: "rejected"
153 |
154 | if status == "in preparation":
155 | pass
156 | else:
157 | project["submission_date"] = fake.date_this_decade()
158 | if status == "under review":
159 | pass
160 | else:
161 | project["decision_date"] = project["submission_date"] + timedelta(
162 | days=random.randint(1, 10)
163 | )
164 | if status == "accepted":
165 | project["accepted"] = True
166 | project["start_date"] = project["decision_date"]
167 | else:
168 | assert status == "rejected"
169 |
170 | ins = db.projects.insert().values(**project)
171 | await database.execute(ins)
172 |
173 | if status == "accepted":
174 | for i in range(random.randint(0, 4)):
175 | await create_fake_quota(database, project["context"])
176 |
177 |
178 | async def create_fake_data(database):
179 | for tag in tags:
180 | await create_fake_tag(database, tag)
181 | for i in range(20):
182 | await create_fake_job(database)
183 | for status in ("in preparation", "under review", "accepted", "rejected"):
184 | for i in range(6):
185 | await create_fake_project(database, status)
186 |
187 |
188 | async def create_apikey(database):
189 | if "NMPI_TESTING_APIKEY" in os.environ:
190 | ins = db.api_keys.insert().values(
191 | key=os.environ["NMPI_TESTING_APIKEY"],
192 | user_id=4,
193 | created=fake.date_time_this_decade(),
194 | )
195 | await database.execute(ins)
196 |
197 |
198 | async def initialize_db():
199 | DATABASE_ADMIN_PASSWORD = os.environ["PGPASSWORD"]
200 | SQLALCHEMY_DATABASE_URL = f"postgresql://postgres:{DATABASE_ADMIN_PASSWORD}@{settings.DATABASE_HOST}:{settings.DATABASE_PORT}/postgres?ssl=false"
201 | database = databases.Database(SQLALCHEMY_DATABASE_URL)
202 | await database.connect()
203 | await database.execute("CREATE DATABASE nmpi")
204 | await database.execute(f"CREATE USER test_user PASSWORD '{settings.DATABASE_PASSWORD}'")
205 | await database.execute("ALTER DATABASE nmpi OWNER TO test_user")
206 | await database.disconnect()
207 |
208 |
209 | async def main():
210 |
211 | await initialize_db()
212 |
213 | await db.database.connect()
214 |
215 | dialect = sqlalchemy.dialects.postgresql.dialect()
216 |
217 | # drop tables to ensure we start with an empty db
218 | for table in db.metadata.tables.values():
219 | cmd = sqlalchemy.schema.DropTable(table)
220 | query = str(cmd.compile(dialect=dialect)) + " CASCADE" # this feels like a hack
221 | try:
222 | await db.database.execute(query=query)
223 | except asyncpg.exceptions.UndefinedTableError:
224 | pass
225 |
226 | # create tables
227 | for table in db.metadata.tables.values():
228 | schema = sqlalchemy.schema.CreateTable(table, if_not_exists=True)
229 | query = str(schema.compile(dialect=dialect))
230 | await db.database.execute(query=query)
231 |
232 | # add fake data
233 | await create_fake_data(db.database)
234 |
235 | # add test data we specifically test for
236 | await create_specific_job(db.database)
237 | await create_specific_project(db.database)
238 | await create_apikey(db.database)
239 |
240 | await db.database.disconnect()
241 |
242 |
243 | if __name__ == "__main__":
244 | loop = asyncio.new_event_loop()
245 | loop.run_until_complete(main())
246 |
--------------------------------------------------------------------------------
/api/simqueue/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HumanBrainProject/ebrains-neuromorphic-job-queue-api/402831ef0c98e092c472537d092aaa53a6209c78/api/simqueue/__init__.py
--------------------------------------------------------------------------------
/api/simqueue/data_models.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, date, timezone
2 | from enum import Enum
3 | from typing import List, Dict, Optional
4 | from uuid import UUID
5 | import json
6 | from urllib.parse import urlparse
7 | from pydantic import BaseModel, AnyUrl, constr
8 |
9 | from .globals import RESOURCE_USAGE_UNITS
10 | from .data_repositories import repository_lookup_by_host, repository_lookup_by_name
11 |
12 |
13 | class JobStatus(str, Enum):
14 | submitted = "submitted"
15 | validated = "validated" # unused at the moment
16 | running = "running"
17 | mapped = "mapped" # unused at the moment
18 | finished = "finished"
19 | error = "error"
20 | removed = "removed"
21 |
22 |
23 | class SessionStatus(str, Enum):
24 | running = "running"
25 | finished = "finished"
26 | error = "error"
27 | removed = "removed"
28 |
29 |
30 | class Comment(BaseModel):
31 | id: int = None
32 | job_id: int = None
33 | content: str
34 | user_id: str = None
35 | timestamp: datetime = None
36 | resource_uri: str = None
37 |
38 | @classmethod
39 | def from_db(cls, comment):
40 | return {
41 | "id": comment["id"],
42 | "job_id": comment["job_id"],
43 | "content": comment["content"],
44 | "user_id": comment["user"],
45 | "timestamp": comment["created_time"],
46 | "resource_uri": f"/jobs/{comment['job_id']}/comments/{comment['id']}",
47 | }
48 |
49 |
50 | Tag = constr(min_length=2, max_length=100, strip_whitespace=True)
51 |
52 |
53 | class CommentBody(BaseModel):
54 | content: constr(min_length=1, max_length=10000)
55 |
56 |
57 | class TimeSeries(BaseModel):
58 | dates: List
59 | values: List[int]
60 |
61 |
62 | class DataItem(BaseModel):
63 | url: AnyUrl
64 | path: Optional[str] = None
65 | content_type: Optional[str] = None
66 | size: Optional[int] = None # in bytes
67 | hash: Optional[str] = None
68 |
69 | @classmethod
70 | def from_db(cls, data_item, repository_obj=None):
71 | if (data_item["path"] is None) and repository_obj:
72 | data_item["path"] = repository_obj.get_path(data_item["url"])
73 | return cls(**data_item)
74 |
75 | def to_db(self):
76 | return {
77 | "url": str(self.url),
78 | "path": self.path,
79 | "hash": self.hash,
80 | "size": self.size,
81 | "content_type": self.content_type,
82 | }
83 |
84 |
85 | class DataSet(BaseModel):
86 | repository: str
87 | files: List[DataItem]
88 |
89 | @classmethod
90 | def from_db(cls, data_items):
91 | urls = [item["url"] for item in data_items]
92 | url_parts = urlparse(urls[0])
93 | repository_obj = repository_lookup_by_host.get(url_parts.hostname, None)
94 | if repository_obj:
95 | repository_name = repository_obj.name
96 | else:
97 | repository_name = "unknown data repository"
98 | return cls(
99 | repository=repository_name,
100 | files=[DataItem.from_db(data_item, repository_obj) for data_item in data_items],
101 | )
102 |
103 | def to_db(self):
104 | return [item.to_db() for item in self.files]
105 |
106 | def move_to(self, new_repository, user, collab=None):
107 | if new_repository not in repository_lookup_by_name:
108 | raise ValueError(f"Repository '{new_repository}' does not exist or is not supported")
109 | repository_obj = repository_lookup_by_name[new_repository]
110 | self.repository = new_repository
111 | for file in self.files:
112 | file.url = repository_obj.copy(file, user, collab=collab)
113 | # todo: delete from old repository if possible
114 | return self
115 |
116 |
117 | class ResourceUsage(BaseModel):
118 | value: float
119 | units: str
120 |
121 |
122 | class SubmittedJob(BaseModel):
123 | """
124 | Job
125 |
126 | Each job has an entry with all the required information to be run on the hardware.
127 | """
128 |
129 | code: str
130 | command: Optional[str] = None
131 | collab: str
132 | input_data: Optional[List[DataItem]] = None
133 | hardware_platform: str
134 | hardware_config: Optional[dict] = None
135 | tags: Optional[List[Tag]] = None
136 |
137 | def to_db(self):
138 | return {
139 | "code": self.code,
140 | "command": self.command,
141 | "collab_id": self.collab,
142 | "input_data": (
143 | [data_item.to_db() for data_item in self.input_data]
144 | if self.input_data is not None
145 | else None
146 | ),
147 | "hardware_platform": self.hardware_platform,
148 | "hardware_config": json.dumps(self.hardware_config) if self.hardware_config else None,
149 | "tags": self.tags,
150 | }
151 |
152 |
153 | class AcceptedJob(SubmittedJob):
154 | id: int
155 | user_id: str
156 | status: JobStatus = JobStatus.submitted
157 | timestamp_submission: Optional[datetime] = None
158 | resource_uri: str
159 |
160 |
161 | class CompletedJob(AcceptedJob):
162 | output_data: Optional[DataSet] = None
163 | provenance: Optional[dict] = None
164 | timestamp_completion: Optional[datetime] = None
165 | resource_usage: Optional[ResourceUsage] = None
166 | comments: Optional[List[Comment]] = None
167 | log: Optional[str] = None
168 |
169 |
170 | class Job(CompletedJob):
171 | """Used where we want to return jobs with different statuses"""
172 |
173 | @classmethod
174 | def from_db(cls, job):
175 | """Change certain fields that are stored as strings or floats into richer Python types"""
176 | data = {
177 | "id": job["id"],
178 | "code": job["code"],
179 | "command": job["command"],
180 | "collab": job["collab_id"],
181 | "input_data": job["input_data"],
182 | "hardware_platform": job["hardware_platform"],
183 | "tags": [tag for tag in job["tags"] if len(tag) > 1], # filter out invalid tags
184 | }
185 | if job["hardware_config"]:
186 | data["hardware_config"] = json.loads(job["hardware_config"])
187 | if job["provenance"]:
188 | data["provenance"] = json.loads(job["provenance"])
189 | if job["resource_usage"] is not None: # can be 0.0
190 | data["resource_usage"] = {
191 | "value": job["resource_usage"],
192 | "units": RESOURCE_USAGE_UNITS.get(job["hardware_platform"], "hours"),
193 | }
194 | if job["id"]:
195 | data["resource_uri"] = f"/jobs/{job['id']}"
196 | for field in ("user_id", "status", "timestamp_submission", "timestamp_completion", "log"):
197 | if job.get(field, None):
198 | data[field] = job[field]
199 | if job["output_data"]:
200 | data["output_data"] = DataSet.from_db(job["output_data"])
201 | if job.get("comments", None):
202 | data["comments"] = [Comment.from_db(comment) for comment in job["comments"]]
203 | return cls(**data)
204 |
205 |
206 | class JobPatch(BaseModel): # todo: rename to JobUpdate
207 | status: Optional[JobStatus] = None
208 | output_data: Optional[DataSet] = None
209 | provenance: Optional[dict] = None
210 | resource_usage: Optional[ResourceUsage] = None
211 | log: Optional[str] = None
212 |
213 | def to_db(self):
214 | values = {}
215 | if self.status is not None:
216 | values["status"] = self.status.value
217 | if self.output_data is not None:
218 | values["output_data"] = self.output_data.to_db()
219 | if self.provenance is not None:
220 | values["provenance"] = json.dumps(self.provenance)
221 | if self.resource_usage is not None:
222 | values["resource_usage"] = self.resource_usage.value
223 | if self.log is not None:
224 | values["log"] = self.log
225 | if self.status in (JobStatus.finished, JobStatus.error):
226 | values["timestamp_completion"] = datetime.now(timezone.utc)
227 | return values
228 |
229 |
230 | class SessionCreation(BaseModel):
231 | collab: str
232 | user_id: str
233 | hardware_platform: str
234 | hardware_config: Optional[dict] = None
235 |
236 | def to_db(self):
237 | return {
238 | "collab_id": self.collab,
239 | "hardware_platform": self.hardware_platform,
240 | "hardware_config": json.dumps(self.hardware_config),
241 | "user_id": self.user_id,
242 | "timestamp_start": datetime.now(timezone.utc),
243 | "resource_usage": 0.0,
244 | }
245 |
246 |
247 | class Session(SessionCreation):
248 | id: int
249 | user_id: str
250 | status: SessionStatus = SessionStatus.running
251 | timestamp_start: Optional[datetime] = None
252 | timestamp_end: Optional[datetime] = None
253 | resource_uri: str
254 | resource_usage: Optional[ResourceUsage] = None
255 |
256 | @classmethod
257 | def from_db(cls, session):
258 | data = {
259 | "id": session["id"],
260 | "resource_uri": f"/sessions/{session['id']}",
261 | "collab": session["collab_id"],
262 | "status": session["status"],
263 | "hardware_platform": session["hardware_platform"],
264 | "user_id": session["user_id"],
265 | "timestamp_start": session["timestamp_start"],
266 | }
267 | if session["hardware_config"]:
268 | data["hardware_config"] = json.loads(session["hardware_config"])
269 | if session["resource_usage"] is not None: # can be 0.0
270 | data["resource_usage"] = {
271 | "value": session["resource_usage"],
272 | "units": RESOURCE_USAGE_UNITS.get(session["hardware_platform"], "hours"),
273 | }
274 | if session["timestamp_end"]:
275 | data["timestamp_end"] = session["timestamp_end"]
276 | return cls(**data)
277 |
278 |
279 | class SessionUpdate(BaseModel):
280 | status: SessionStatus = SessionStatus.running
281 | resource_usage: ResourceUsage
282 |
283 | def to_db(self):
284 | values = {"status": self.status.value, "resource_usage": self.resource_usage.value}
285 | if self.status in (SessionStatus.finished, SessionStatus.error):
286 | values["timestamp_end"] = datetime.now(timezone.utc)
287 | return values
288 |
289 |
290 | # class Config #?
291 |
292 |
293 | # --- Data models for projects and quotas -----
294 |
295 |
296 | class QuotaSubmission(BaseModel):
297 | limit: float # "Quantity of resources granted"
298 | platform: str # "System to which quota applies"
299 | units: str # core-hours, wafer-hours, GB
300 |
301 | def to_db(self):
302 | return {"limit": self.limit, "platform": self.platform, "units": self.units, "usage": 0.0}
303 |
304 |
305 | class QuotaUpdate(BaseModel):
306 | limit: Optional[float] = None # "Quantity of resources granted"
307 | usage: float # "Quantity of resources used"
308 |
309 | def to_db(self):
310 | return {"limit": self.limit, "usage": self.usage}
311 |
312 |
313 | class Quota(QuotaSubmission, QuotaUpdate):
314 | # id: int # do we need this? or just use resource_uri
315 | project: str
316 | resource_uri: Optional[str] = None
317 |
318 | @classmethod
319 | def from_db(cls, quota):
320 | data = {
321 | # "id": quota["id"],
322 | "limit": quota["limit"],
323 | "platform": quota["platform"],
324 | "units": quota["units"],
325 | "usage": quota["usage"],
326 | "resource_uri": f"/projects/{quota['project_id']}/quotas/{quota['id']}",
327 | "project": f"/projects/{quota['project_id']}",
328 | }
329 | return cls(**data)
330 |
331 |
332 | class ProjectStatus(str, Enum):
333 | in_prep = "in preparation"
334 | accepted = "accepted"
335 | under_review = "under review"
336 | rejected = "rejected"
337 | # todo: consider adding "expired"
338 |
339 |
340 | class ProjectSubmission(BaseModel):
341 | collab: str
342 | title: str
343 | abstract: str
344 | description: Optional[str] = None
345 | status: ProjectStatus = ProjectStatus.in_prep
346 |
347 | def to_db(self, owner):
348 | values = {
349 | "collab": self.collab,
350 | "title": self.title,
351 | "abstract": self.abstract,
352 | "description": self.description or "",
353 | "accepted": False,
354 | "owner": owner,
355 | }
356 | if self.status == ProjectStatus.under_review:
357 | values["submission_date"] = date.today()
358 | return values
359 |
360 |
361 | class Project(ProjectSubmission):
362 | id: UUID
363 | owner: str
364 | submission_date: Optional[date] = None
365 | decision_date: Optional[date] = None
366 | resource_uri: str
367 | status: ProjectStatus = ProjectStatus.in_prep
368 | quotas: Optional[List[Quota]] = None
369 |
370 | @classmethod
371 | def _get_status(cls, project):
372 | if project["submission_date"] is None:
373 | return ProjectStatus.in_prep
374 | elif project["accepted"]:
375 | return ProjectStatus.accepted
376 | elif project["decision_date"] is None:
377 | return ProjectStatus.under_review
378 | else:
379 | return ProjectStatus.rejected
380 |
381 | @classmethod
382 | def from_db(cls, project, quotas=None):
383 | if project is None:
384 | return None
385 | if quotas is None:
386 | quotas = []
387 | data = {
388 | "id": project["context"],
389 | "collab": project["collab"],
390 | "title": project["title"],
391 | "abstract": project["abstract"],
392 | "description": project["description"],
393 | "owner": project["owner"],
394 | "submission_date": project["submission_date"],
395 | "decision_date": project["decision_date"],
396 | "status": cls._get_status(project),
397 | "resource_uri": f"/projects/{project['context']}",
398 | "quotas": [Quota.from_db(quota) for quota in quotas],
399 | }
400 | return cls(**data)
401 |
402 |
403 | class ProjectUpdate(BaseModel):
404 | title: Optional[str] = None
405 | abstract: Optional[str] = None
406 | description: Optional[str] = None
407 | owner: Optional[str] = None
408 | status: Optional[ProjectStatus] = None
409 |
410 | def to_db(self):
411 | values = {}
412 | for field in ("title", "abstract", "description", "owner"):
413 | value = getattr(self, field)
414 | if value is not None:
415 | values[field] = value
416 | if self.status == ProjectStatus.under_review:
417 | values["submission_date"] = date.today()
418 | elif self.status == ProjectStatus.accepted:
419 | values["accepted"] = True
420 | values["decision_date"] = date.today()
421 | elif self.status == ProjectStatus.rejected:
422 | values["accepted"] = False
423 | values["decision_date"] = date.today()
424 | return values
425 |
426 |
427 | # --- Data models for statistics -----
428 |
429 |
430 | class DateRangeCount(BaseModel):
431 | start: date
432 | end: date
433 | count: Dict[str, int]
434 |
435 |
436 | class DateRangeQuantity(BaseModel):
437 | start: date
438 | end: date
439 | value: Dict[str, float]
440 |
441 |
442 | class QueueStatus(BaseModel):
443 | queue_name: str
444 | running: int
445 | submitted: int
446 |
447 |
448 | class Histogram(BaseModel):
449 | values: List
450 | bins: List
451 | platform: str
452 | status: str
453 | scale: str
454 | max: int
455 |
--------------------------------------------------------------------------------
/api/simqueue/data_repositories.py:
--------------------------------------------------------------------------------
1 | import os
2 | import uuid
3 | from urllib.request import urlretrieve, urlcleanup, HTTPError
4 | from urllib.parse import urlparse
5 | import zipfile
6 | from ebrains_drive.client import DriveApiClient, BucketApiClient
7 | from ebrains_drive.exceptions import DoesNotExist
8 |
9 | from . import settings
10 |
11 |
12 | class SourceFileDoesNotExist(Exception):
13 | pass
14 |
15 |
16 | class SourceFileIsTooBig(Exception):
17 | pass
18 |
19 |
20 | def convert_bytes(size_in_bytes, unit):
21 | size_units = ["bytes", "KiB", "MiB", "GiB", "TiB"]
22 | return size_in_bytes / (1024 ** size_units.index(unit))
23 |
24 |
25 | def get_file_size(file_path, unit):
26 | if os.path.isfile(file_path):
27 | file_info = os.stat(file_path)
28 | return convert_bytes(file_info.st_size, unit)
29 |
30 |
31 | def drive_mkdir_p(base_dir, relative_path):
32 | # to move to ebrains_drive
33 | path_parts = relative_path.split("/")
34 | parent = base_dir
35 | for dirname in path_parts:
36 | subdirs = {
37 | subdir.name: subdir for subdir in parent.ls(entity_type="dir", force_refresh=False)
38 | }
39 | try:
40 | dir = subdirs[dirname]
41 | except KeyError:
42 | # create directory
43 | dir = parent.mkdir(dirname)
44 | parent = dir
45 | return dir
46 |
47 |
48 | def download_file_to_tmp_dir(url):
49 | try:
50 | local_path, headers = urlretrieve(str(url))
51 | except HTTPError as err:
52 | if err.code == 404:
53 | raise SourceFileDoesNotExist(err.reason)
54 | else:
55 | raise
56 | return local_path
57 |
58 |
59 | def ensure_path_from_root(path):
60 | if not path.startswith("/"):
61 | path = "/" + path
62 | return path
63 |
64 |
65 | class SpiNNakerTemporaryStorage:
66 | name = "SpiNNaker Manchester temporary storage"
67 | host = "spinnaker.cs.man.ac.uk"
68 | modes = ("read",)
69 |
70 | @classmethod
71 | def get_path(cls, url):
72 | # example url: http://spinnaker.cs.man.ac.uk/services/rest/output/neuromorphic-testing-private/142973/reports.zip
73 | prefix = "http://spinnaker.cs.man.ac.uk/services/rest/output/"
74 | return url[len(prefix) + 1 :]
75 |
76 |
77 | class BrainScaleSTemporaryStorage:
78 | name = "BrainScaleS temporary storage"
79 | host = "brainscales-r.kip.uni-heidelberg.de"
80 | modes = ("read",)
81 |
82 | @classmethod
83 | def get_path(cls, url):
84 | # example url: https://brainscales-r.kip.uni-heidelberg.de:7443/nmpi/job_165928/slurm-4215780.out
85 | prefix = "https://brainscales-r.kip.uni-heidelberg.de:7443/nmpi/"
86 | return url[len(prefix) + 1 :]
87 |
88 |
89 | class DemoTemporaryStorage:
90 | name = "Demo temporary storage"
91 | host = "demo.hbpneuromorphic.eu"
92 | modes = ("read",)
93 |
94 | @classmethod
95 | def get_path(cls, url):
96 | # example url: https://demo.hbpneuromorphic.eu/data/my_collab/job_536538/results.txt
97 | prefix = "https://demo.hbpneuromorphic.eu/data/"
98 | return url[len(prefix) + 1 :]
99 |
100 |
101 | class EBRAINSDrive:
102 | name = "EBRAINS Drive"
103 | host = settings.EBRAINS_DRIVE_SERVICE_URL
104 | modes = ("read", "write")
105 | size_limit = 1.0 # GiB
106 |
107 | @classmethod
108 | def _get_client(cls, token):
109 | env = ""
110 | if "-int." in cls.host:
111 | env = "int"
112 | return DriveApiClient(token=token, env=env)
113 |
114 | @classmethod
115 | def copy(cls, file, user, collab=None):
116 | access_token = user.token["access_token"]
117 | ebrains_drive_client = cls._get_client(token=access_token)
118 |
119 | path_parts = file.path.split("/")
120 | if collab:
121 | collab_name = collab
122 | remote_path = ensure_path_from_root(file.path)
123 | else:
124 | collab_name = path_parts[0]
125 | remote_path = "/".join([""] + path_parts[1:])
126 |
127 | # ebrains_drive_client.repos.get_repo_by_url is currently broken
128 | # while waiting for a release with a fix, we implement a fixed version here
129 | # target_repository = ebrains_drive_client.repos.get_repo_by_url(collab_name)
130 |
131 | match_repos = ebrains_drive_client.repos.get_repos_by_filter("name", collab_name)
132 |
133 | if len(match_repos) == 0:
134 | raise Exception("Couldn't identify any repo associated with specified URL!")
135 | elif len(match_repos) > 1:
136 | raise Exception("Couldn't uniquely identify the repo associated with specified URL!")
137 | else:
138 | target_repository = match_repos[0]
139 |
140 | try:
141 | file_obj = target_repository.get_file(remote_path)
142 | # todo: add option to overwrite files
143 | except DoesNotExist:
144 | local_path = download_file_to_tmp_dir(file.url)
145 | # upload the temporary copy to the Drive
146 | file_size = get_file_size(local_path, "GiB")
147 | if file_size > EBRAINSDrive.size_limit:
148 | raise SourceFileIsTooBig(
149 | f"The file is too large ({file_size} GiB) to be moved to the Drive (limit {EBRAINSDrive.size_limit} GiB"
150 | )
151 | root_dir = target_repository.get_dir("/")
152 | dir_path = "/".join(path_parts[1:-1])
153 | dir_obj = drive_mkdir_p(root_dir, dir_path)
154 | file_name = path_parts[-1]
155 | file_obj = dir_obj.upload_local_file(local_path, name=file_name, overwrite=True)
156 | urlcleanup()
157 |
158 | return file_obj.get_download_link()
159 |
160 | @classmethod
161 | def _delete(cls, collab_name, path, access_token):
162 | # private method for use by test framework to clean up
163 | ebrains_drive_client = cls._get_client(token=access_token)
164 | target_repository = ebrains_drive_client.repos.get_repo_by_url(collab_name)
165 | dir_obj = target_repository.get_dir(path)
166 | dir_obj.delete()
167 |
168 | @classmethod
169 | def get_download_url(cls, drive_uri, user):
170 | access_token = user.token["access_token"]
171 | ebrains_drive_client = cls._get_client(token=access_token)
172 | assert drive_uri.startswith("drive://")
173 | path = drive_uri[len("drive://") :]
174 |
175 | collab_name, *path_parts = path.split("/")
176 | remote_path = "/".join([""] + path_parts)
177 |
178 | target_repository = ebrains_drive_client.repos.get_repo_by_url(collab_name)
179 | try:
180 | dir_obj = target_repository.get_dir(remote_path)
181 | # todo: add option to overwrite files
182 | except DoesNotExist:
183 | dir_obj = None
184 | try:
185 | file_obj = target_repository.get_file(remote_path)
186 | except DoesNotExist:
187 | errmsg = (
188 | f"Tried to get download URL for {remote_path} in collab {collab_name} "
189 | f"from {ebrains_drive_client.server} but this path does not exist. "
190 | f"(Drive URI was {drive_uri})"
191 | )
192 | raise SourceFileDoesNotExist(errmsg)
193 |
194 | # generate a random but repeatable name for the temporary file
195 | try:
196 | os.makedirs(settings.TMP_FILE_ROOT, exist_ok=True)
197 | except PermissionError as err:
198 | raise Exception(os.getcwd()) from err
199 |
200 | zip_file_name = f"{uuid.uuid5(uuid.NAMESPACE_URL, drive_uri)}.zip"
201 | if zip_file_name not in os.listdir(settings.TMP_FILE_ROOT):
202 | local_zip_file_path = os.path.join(settings.TMP_FILE_ROOT, zip_file_name)
203 | if dir_obj:
204 | # download zip of Drive directory contents
205 | _response = dir_obj.download(local_zip_file_path)
206 | # todo: check the response
207 | else:
208 | # create a zip archive and put the remote file in it
209 | with zipfile.ZipFile(local_zip_file_path, mode="x") as zf:
210 | with zf.open(path_parts[-1], "w") as fp:
211 | fp.write(file_obj.get_content())
212 |
213 | return f"{settings.TMP_FILE_URL}/{zip_file_name}"
214 |
215 |
216 | class EBRAINSBucket:
217 | name = "EBRAINS Bucket"
218 | host = settings.EBRAINS_BUCKET_SERVICE_URL
219 | modes = ("read", "write")
220 |
221 | @classmethod
222 | def _get_client(cls, token):
223 | env = ""
224 | if "-int." in cls.host:
225 | env = "int"
226 | # Workaround, until https://github.com/HumanBrainProject/ebrains-storage/pull/31 is merged
227 | # client = BucketApiClient(token=token, env=env)
228 | client = BucketApiClient(token=token)
229 | client._set_env(env)
230 | client.server = f"https://data-proxy{client.suffix}.ebrains.eu/api"
231 | return client
232 |
233 | @classmethod
234 | def copy(cls, file, user, collab=None):
235 | access_token = user.token["access_token"]
236 | ebrains_bucket_client = cls._get_client(token=access_token)
237 |
238 | if collab:
239 | collab_name = collab
240 | remote_path = ensure_path_from_root(file.path)
241 | else:
242 | path_parts = file.path.split("/")
243 | collab_name = path_parts[0]
244 | remote_path = "/".join([""] + path_parts[1:])
245 |
246 | target_bucket = ebrains_bucket_client.buckets.get_bucket(collab_name)
247 | all_files = {dpf.name for dpf in target_bucket.ls()}
248 |
249 | if remote_path in all_files:
250 | pass # todo: add option to overwrite files
251 | else:
252 | local_path = download_file_to_tmp_dir(file.url)
253 | target_bucket.upload(local_path, remote_path)
254 |
255 | return f"https://{cls.host}/api/v1/buckets/{collab_name}{remote_path}"
256 |
257 | @classmethod
258 | def _delete(cls, collab_name, path, access_token):
259 | # private method for use by test framework to clean up
260 | ebrains_bucket_client = cls._get_client(token=access_token)
261 | bucket = ebrains_bucket_client.buckets.get_bucket(collab_name)
262 | file_obj = bucket.get_file(path)
263 | file_obj.delete()
264 |
265 |
266 | class TestRepository:
267 | name = "Fake repository used for testing"
268 | host = "example.com"
269 | modes = ("read", "write")
270 |
271 | def copy(file, user, collab=None):
272 | return "https://example.com/" + file.path
273 |
274 | @classmethod
275 | def get_path(cls, url):
276 | parts = urlparse(url)
277 | return parts.path
278 |
279 |
280 | available_repositories = (
281 | SpiNNakerTemporaryStorage,
282 | BrainScaleSTemporaryStorage,
283 | DemoTemporaryStorage,
284 | EBRAINSDrive,
285 | EBRAINSBucket,
286 | TestRepository,
287 | )
288 |
289 | repository_lookup_by_host = {r.host: r for r in available_repositories}
290 |
291 | repository_lookup_by_name = {r.name: r for r in available_repositories}
292 |
--------------------------------------------------------------------------------
/api/simqueue/globals.py:
--------------------------------------------------------------------------------
1 | STANDARD_QUEUES = ("BrainScaleS", "BrainScaleS-ESS", "Spikey", "SpiNNaker", "BrainScaleS-2")
2 |
3 | RESOURCE_USAGE_UNITS = {
4 | "BrainScaleS": "wafer-hours",
5 | "BrainScaleS-2": "chip-hours",
6 | "SpiNNaker": "core-hours",
7 | "BrainScaleS-ESS": "hours",
8 | "Spikey": "hours",
9 | "TestPlatform": "bushels", # for API testing
10 | "Test": "litres", # for client testing,
11 | "Demo": "hours",
12 | }
13 |
14 | PROVIDER_QUEUE_NAMES = {
15 | "uhei": ["BrainScaleS", "BrainScaleS-2", "BrainScaleS-ESS", "Spikey"],
16 | "uman": ["SpiNNaker"],
17 | "nmpi": ["TestPlatform", "Test", "Demo"],
18 | "benchmark_runner": [],
19 | "uhei-jenkins-test-user": ["BrainScaleS", "BrainScaleS-ESS", "BrainScaleS-2", "Spikey"],
20 | }
21 |
22 | DEMO_QUOTA_SIZES = {
23 | "BrainScaleS": 0.1,
24 | "BrainScaleS-2": 1.0,
25 | "SpiNNaker": 5000.0,
26 | "Spikey": 1.0,
27 | "Demo": 1.0,
28 | }
29 |
--------------------------------------------------------------------------------
/api/simqueue/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | from contextlib import asynccontextmanager
3 | from fastapi import FastAPI
4 | from fastapi.staticfiles import StaticFiles
5 | from starlette.middleware.sessions import SessionMiddleware
6 | from starlette.middleware.cors import CORSMiddleware
7 |
8 | from . import settings
9 | from .resources import for_users, for_providers, for_admins, statistics, auth
10 | from .db import database
11 |
12 |
13 | description = """
14 | The EBRAINS neuromorphic computing remote access service allows users to run simulations/emulations
15 | on the [SpiNNaker](https://www.ebrains.eu/tools/spinnaker)
16 | and [BrainScaleS](https://www.ebrains.eu/tools/brainscales) systems
17 | by submitting a [PyNN](http://neuralensemble.org/docs/PyNN/) script
18 | and associated job configuration information to a central queue.
19 |
20 | The system consists of:
21 | - a web API (this service) [[Source code](https://github.com/HumanBrainProject/hbp_neuromorphic_platform)]
22 | - a GUI client (the [Job Manager app](https://job-manager.hbpneuromorphic.eu/))
23 | - a [Python/command-line client](https://github.com/HumanBrainProject/hbp-neuromorphic-client).
24 |
25 | Users can submit scripts stored locally on their own machine, in a public Git repository,
26 | in the [EBRAINS Knowledge Graph](https://search.kg.ebrains.eu/?category=Model),
27 | or in [EBRAINS Collaboratory](https://wiki.ebrains.eu/) storage (Drive/Bucket).
28 | Users can track the progress of their job, and view and/or download the results,
29 | log files, and provenance information.
30 |
31 | To use the API, login here, click on "Authorize" then
32 | copy the *access_token* into the "HTTPBearer" box
33 | (this process will be streamlined for the final release).
34 |
35 | For more information, visit the [EBRAINS website](https://www.ebrains.eu/modelling-simulation-and-computing/simulation/neuromorphic-computing-3).
36 |
37 | This service was developed in the Human Brain Project,
38 | funded from the European Union’s Horizon 2020 Framework Programme for Research and Innovation
39 | under Specific Grant Agreements No. 720270, No. 785907 and No. 945539
40 | (Human Brain Project SGA1, SGA2 and SGA3).
41 | """
42 |
43 |
44 | @asynccontextmanager
45 | async def lifespan(app: FastAPI):
46 | # Before the application starts, connect to the database
47 | await database.connect()
48 | yield
49 | # When the application shuts down, disconnect from the database
50 | await database.disconnect()
51 |
52 |
53 | app = FastAPI(
54 | title="EBRAINS Neuromorphic Computing Job Queue API",
55 | description=description,
56 | version="3.0",
57 | lifespan=lifespan,
58 | )
59 |
60 | app.add_middleware(SessionMiddleware, secret_key=settings.SESSIONS_SECRET_KEY)
61 | app.add_middleware(
62 | CORSMiddleware,
63 | allow_origins=["*"],
64 | allow_credentials=True,
65 | allow_methods=["*"],
66 | allow_headers=["*"],
67 | )
68 |
69 | app.include_router(for_users.router, tags=["For all users"])
70 | app.include_router(for_providers.router, tags=["For use by computing system providers"])
71 | app.include_router(for_admins.router, tags=["For use by administrators"])
72 | app.include_router(statistics.router, tags=["Statistics"])
73 | app.include_router(auth.router, tags=["Authentication and authorization"])
74 |
75 | this_dir = os.path.dirname(__file__)
76 | dashboard_path = os.path.join(this_dir, "..", "dashboard")
77 | app.mount("/dashboard", StaticFiles(directory=dashboard_path, html=True), name="dashboard")
78 |
--------------------------------------------------------------------------------
/api/simqueue/oauth.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import requests
3 | from authlib.integrations.starlette_client import OAuth
4 | import httpx
5 | from httpx import Timeout
6 | from fastapi.security.api_key import APIKeyHeader
7 | from fastapi import Security, HTTPException, status as status_codes
8 |
9 | from . import settings, db
10 |
11 |
12 | logger = logging.getLogger("simqueue")
13 |
14 | oauth = OAuth()
15 |
16 | oauth.register(
17 | name="ebrains",
18 | server_metadata_url=f"{settings.EBRAINS_IAM_SERVICE_URL}/.well-known/openid-configuration",
19 | client_id=settings.EBRAINS_IAM_CLIENT_ID,
20 | client_secret=settings.EBRAINS_IAM_SECRET,
21 | userinfo_endpoint=f"{settings.EBRAINS_IAM_SERVICE_URL}/protocol/openid-connect/userinfo",
22 | client_kwargs={
23 | "scope": "openid profile collab.drive group team roles email",
24 | "trust_env": False,
25 | "timeout": Timeout(timeout=settings.AUTHENTICATION_TIMEOUT),
26 | },
27 | )
28 |
29 |
30 | async def get_collab_info(collab, token):
31 | collab_info_url = f"{settings.EBRAINS_COLLAB_SERVICE_URL}collabs/{collab}"
32 | headers = {"Authorization": f"Bearer {token}"}
33 | res = requests.get(collab_info_url, headers=headers)
34 | response = res.json()
35 | if isinstance(response, dict) and "code" in response and response["code"] == 404:
36 | raise ValueError("Invalid collab id")
37 | return response
38 |
39 |
40 | class User:
41 | def __init__(self, **kwargs):
42 | for key, value in kwargs.items():
43 | setattr(self, key, value)
44 |
45 | @classmethod
46 | async def from_token(cls, token):
47 | try:
48 | user_info = await oauth.ebrains.userinfo(
49 | token={"access_token": token, "token_type": "bearer"}
50 | )
51 | except httpx.HTTPStatusError as err:
52 | if "401" in str(err):
53 | if token:
54 | message = "Token may have expired"
55 | else:
56 | message = "No token provided"
57 | raise HTTPException(
58 | status_code=status_codes.HTTP_401_UNAUTHORIZED,
59 | detail=message,
60 | )
61 | else:
62 | raise
63 | user_info["token"] = {"access_token": token, "token_type": "bearer"}
64 | return cls(**user_info)
65 |
66 | def __repr__(self):
67 | return f"User('{self.username}')"
68 |
69 | @property
70 | def is_admin(self):
71 | return self.can_edit("neuromorphic-platform-admin")
72 |
73 | @property
74 | def username(self):
75 | return self.preferred_username
76 |
77 | async def can_view(self, collab):
78 | # first of all, check team permissions
79 | target_team_names = {
80 | role: f"collab-{collab}-{role}" for role in ("viewer", "editor", "administrator")
81 | }
82 | for role, team_name in target_team_names.items():
83 | if team_name in self.roles.get("team", []):
84 | return True
85 | # if that fails, check if it's a public collab
86 | try:
87 | collab_info = await get_collab_info(collab, self.token["access_token"])
88 | except ValueError:
89 | return False
90 | else:
91 | return collab_info.get("isPublic", False)
92 |
93 | def can_edit(self, collab):
94 | target_team_names = {
95 | role: f"collab-{collab}-{role}" for role in ("editor", "administrator")
96 | }
97 | for role, team_name in target_team_names.items():
98 | if team_name in self.roles.get("team", []):
99 | return True
100 |
101 | def get_collabs(self, access=["viewer", "editor", "administrator"]):
102 | collabs = set()
103 | for team_access in self.roles.get("team", []):
104 | # note, if team information is missing from userinfo that means
105 | # the user is not a member of any collab
106 | parts = team_access.split("-")
107 | assert parts[0] == "collab"
108 | collab = "-".join(parts[1:-1])
109 | role = parts[-1]
110 | if role in access:
111 | collabs.add(collab)
112 | return sorted(collabs)
113 |
114 |
115 | api_key_header_optional = APIKeyHeader(name="x-api-key", auto_error=False)
116 | api_key_header = APIKeyHeader(name="x-api-key", auto_error=True)
117 |
118 |
119 | async def _get_provider(api_key):
120 | provider_name = db.get_provider(api_key)
121 | if provider_name:
122 | return provider_name
123 | else:
124 | raise HTTPException(
125 | status_code=status_codes.HTTP_403_FORBIDDEN, detail="Could not validate API key"
126 | )
127 |
128 |
129 | async def get_provider(api_key: str = Security(api_key_header)):
130 | return await _get_provider(api_key)
131 |
132 |
133 | async def get_provider_optional(api_key: str = Security(api_key_header_optional)):
134 | if api_key:
135 | return await _get_provider(api_key)
136 | else:
137 | return None
138 |
--------------------------------------------------------------------------------
/api/simqueue/resources/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HumanBrainProject/ebrains-neuromorphic-job-queue-api/402831ef0c98e092c472537d092aaa53a6209c78/api/simqueue/resources/__init__.py
--------------------------------------------------------------------------------
/api/simqueue/resources/auth.py:
--------------------------------------------------------------------------------
1 | """
2 | docstring goes here
3 |
4 | Copyright 2022 CNRS
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | you may not use this file except in compliance with the License.
8 | You may obtain a copy of the License at
9 |
10 | http://www.apache.org/licenses/LICENSE-2.0
11 |
12 | Unless required by applicable law or agreed to in writing, software
13 | distributed under the License is distributed on an "AS IS" BASIS,
14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | See the License for the specific language governing permissions and
16 | limitations under the License.
17 | """
18 |
19 | from datetime import datetime
20 | from fastapi import APIRouter
21 | from fastapi.security import HTTPBearer
22 | from starlette.requests import Request
23 | from ..oauth import oauth
24 | from ..settings import BASE_URL
25 |
26 | router = APIRouter()
27 | auth = HTTPBearer()
28 |
29 |
30 | @router.get("/login")
31 | async def login_via_ebrains(request: Request):
32 | redirect_uri = BASE_URL + "/auth"
33 | return await oauth.ebrains.authorize_redirect(request, redirect_uri)
34 |
35 |
36 | @router.get("/auth")
37 | async def auth_via_ebrains(request: Request):
38 | token = await oauth.ebrains.authorize_access_token(request)
39 | user = token["userinfo"]
40 | user2 = await oauth.ebrains.userinfo(token=token)
41 | user.update(user2)
42 | response = {
43 | "access_token": token["access_token"],
44 | "token_expires": datetime.fromtimestamp(token["expires_at"]),
45 | "user": {
46 | "name": user["name"],
47 | "user_id_v1": user.get("mitreid-sub"),
48 | "username": user["preferred_username"],
49 | "given_name": user["given_name"],
50 | "family_name": user["family_name"],
51 | "team": user["roles"]["team"],
52 | "group": user["roles"]["group"],
53 | },
54 | }
55 | return response
56 |
--------------------------------------------------------------------------------
/api/simqueue/resources/for_admins.py:
--------------------------------------------------------------------------------
1 | from uuid import UUID
2 | import logging
3 | import asyncio
4 |
5 | from fastapi import APIRouter, Depends, Query, Path, HTTPException, status as status_codes
6 | from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
7 |
8 |
9 | from ..data_models import QuotaSubmission
10 | from .. import db, oauth
11 |
12 | logger = logging.getLogger("simqueue")
13 |
14 | auth = HTTPBearer()
15 | router = APIRouter()
16 |
17 |
18 | @router.delete("/jobs/{job_id}", status_code=status_codes.HTTP_200_OK)
19 | async def delete_job(
20 | job_id: int = Path(..., title="Job ID", description="ID of the job to be deleted"),
21 | as_admin: bool = Query(
22 | False, description="Run this query with admin privileges, if you have them"
23 | ),
24 | token: HTTPAuthorizationCredentials = Depends(auth),
25 | ):
26 | """
27 | If called normally this sets the job status to "removed".
28 | If called by an admin with "?as_admin=true", the job is completely deleted from the database.
29 | """
30 |
31 | get_user_task = asyncio.create_task(oauth.User.from_token(token.credentials))
32 | get_job_task = asyncio.create_task(db.get_job(job_id))
33 | user = await get_user_task
34 | job = await get_job_task
35 | if job is None:
36 | raise HTTPException(
37 | status_code=status_codes.HTTP_404_NOT_FOUND,
38 | detail=f"Either there is no job with id {job_id}, or you do not have access to it",
39 | )
40 | if as_admin and user.is_admin:
41 | result = await db.delete_job(job_id)
42 | return result
43 |
44 | access_allowed = job["user_id"] == user.username or await user.can_edit(job["collab_id"])
45 | if access_allowed:
46 | result = await db.update_job(job_id, {"status": "removed"})
47 | return None
48 | else:
49 | raise HTTPException(
50 | status_code=status_codes.HTTP_404_NOT_FOUND,
51 | detail=f"Either there is no job with id {job_id}, or you do not have access to it",
52 | )
53 |
54 |
55 | @router.post("/projects/{project_id}/quotas/", status_code=status_codes.HTTP_201_CREATED)
56 | async def create_quota(
57 | quota: QuotaSubmission,
58 | project_id: UUID = Path(
59 | ...,
60 | title="Project ID",
61 | description="ID of the project to which quotas should be added",
62 | ),
63 | # from header
64 | token: HTTPAuthorizationCredentials = Depends(auth),
65 | ):
66 | get_user_task = asyncio.create_task(oauth.User.from_token(token.credentials))
67 | get_project_task = asyncio.create_task(db.get_project(project_id))
68 | user = await get_user_task
69 | project = await get_project_task
70 | if project is None:
71 | raise HTTPException(
72 | status_code=status_codes.HTTP_404_NOT_FOUND,
73 | detail=f"Either there is no project with id {project_id}, or you do not have access to it",
74 | )
75 | if not user.is_admin:
76 | raise HTTPException(
77 | status_code=status_codes.HTTP_404_NOT_FOUND,
78 | detail="Only admins can add quotas",
79 | )
80 | await db.create_quota(str(project_id), quota.to_db())
81 |
--------------------------------------------------------------------------------
/api/simqueue/resources/for_providers.py:
--------------------------------------------------------------------------------
1 | from uuid import UUID
2 | import logging
3 | import asyncio
4 |
5 | from fastapi import APIRouter, Depends, Path, Request, HTTPException, status as status_codes
6 | from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
7 | from fastapi.security.api_key import APIKey
8 |
9 |
10 | from ..data_models import (
11 | Job,
12 | JobPatch,
13 | QuotaUpdate,
14 | Session,
15 | SessionUpdate,
16 | SessionCreation,
17 | SessionStatus,
18 | )
19 | from ..globals import PROVIDER_QUEUE_NAMES
20 | from .. import db, oauth, utils
21 |
22 | logger = logging.getLogger("simqueue")
23 |
24 | auth = HTTPBearer(auto_error=False)
25 | router = APIRouter()
26 |
27 |
28 | @router.get("/jobs/next/{hardware_platform}", response_model=Job)
29 | async def get_next_job(
30 | hardware_platform: str = Path(
31 | ...,
32 | title="Hardware Platform",
33 | description="hardware platform (e.g. SpiNNaker, BrainScales)",
34 | ),
35 | api_key: APIKey = Depends(oauth.get_provider),
36 | ):
37 | provider_name = await api_key
38 | utils.check_provider_matches_platform(provider_name, hardware_platform)
39 | job = await db.get_next_job(hardware_platform)
40 | if job:
41 | # raise NotImplementedError("todo: take the job off the queue")
42 | return Job.from_db(job)
43 | else:
44 | raise HTTPException(
45 | status_code=status_codes.HTTP_404_NOT_FOUND,
46 | detail=f"No queued job for {hardware_platform}",
47 | )
48 |
49 |
50 | @router.put("/jobs/{job_id}", status_code=status_codes.HTTP_200_OK)
51 | async def update_job(
52 | job_update: JobPatch,
53 | job_id: int = Path(..., title="Job ID", description="ID of the job to be retrieved"),
54 | api_key: APIKey = Depends(oauth.get_provider),
55 | ):
56 | """
57 | For use by job handlers to update job metadata
58 | """
59 |
60 | provider_name = await api_key
61 | old_job = await db.get_job(job_id)
62 | if old_job is None:
63 | raise HTTPException(
64 | status_code=status_codes.HTTP_404_NOT_FOUND,
65 | detail=f"Either there is no job with id {job_id}, or you do not have access to it",
66 | )
67 | utils.check_provider_matches_platform(provider_name, old_job["hardware_platform"])
68 | result = await db.update_job(job_id, job_update.to_db())
69 | if job_update.resource_usage:
70 | await utils.update_quotas(
71 | old_job["collab_id"], old_job["hardware_platform"], job_update.resource_usage
72 | )
73 | return result
74 |
75 |
76 | @router.put("/jobs/{job_id}/log", status_code=status_codes.HTTP_200_OK)
77 | async def replace_log(
78 | request: Request,
79 | job_id: int = Path(
80 | ..., title="Job ID", description="ID of the job whose log should be updated"
81 | ),
82 | api_key: APIKey = Depends(oauth.get_provider),
83 | ):
84 | """
85 | For use by job handlers to update job logs by replacing the existing content.
86 | This is available as a separate endpoint since logs can be very large,
87 | and so might need separate error handling.
88 | """
89 |
90 | provider_name = await api_key
91 | job = await db.get_job(job_id)
92 | if job is None:
93 | raise HTTPException(
94 | status_code=status_codes.HTTP_404_NOT_FOUND,
95 | detail=f"Either there is no job with id {job_id}, or you do not have access to it",
96 | )
97 | utils.check_provider_matches_platform(provider_name, job["hardware_platform"])
98 | log_update = await request.body()
99 | result = await db.update_log(job_id, log_update.decode("utf-8"), append=False)
100 | return result
101 |
102 |
103 | @router.patch("/jobs/{job_id}/log", status_code=status_codes.HTTP_200_OK)
104 | async def append_to_log(
105 | request: Request,
106 | job_id: int = Path(
107 | ..., title="Job ID", description="ID of the job whose log should be updated"
108 | ),
109 | api_key: APIKey = Depends(oauth.get_provider),
110 | ):
111 | """
112 | For use by job handlers to update job logs by appending to the existing content.
113 | This is available as a separate endpoint since logs can be very large,
114 | and so might need separate error handling.
115 | """
116 |
117 | provider_name = await api_key
118 | job = await db.get_job(job_id)
119 | if job is None:
120 | raise HTTPException(
121 | status_code=status_codes.HTTP_404_NOT_FOUND,
122 | detail=f"Either there is no job with id {job_id}, or you do not have access to it",
123 | )
124 | utils.check_provider_matches_platform(provider_name, job["hardware_platform"])
125 | log_update = await request.body()
126 | result = await db.update_log(job_id, log_update.decode("utf-8"), append=True)
127 | return result
128 |
129 |
130 | @router.put("/projects/{project_id}/quotas/{quota_id}", status_code=status_codes.HTTP_200_OK)
131 | async def update_quota(
132 | quota_update: QuotaUpdate,
133 | quota_id: int,
134 | project_id: UUID = Path(
135 | ...,
136 | title="Project ID",
137 | description="ID of the project whose quotas should be added",
138 | ),
139 | # from header
140 | token: HTTPAuthorizationCredentials = Depends(auth),
141 | api_key: APIKey = Depends(oauth.get_provider_optional),
142 | ):
143 | get_project_task = asyncio.create_task(db.get_project(project_id))
144 | project = await get_project_task
145 | if project is None:
146 | raise HTTPException(
147 | status_code=status_codes.HTTP_404_NOT_FOUND,
148 | detail=f"Either there is no project with id {project_id}, or you do not have access to it",
149 | )
150 |
151 | if token:
152 | get_user_task = asyncio.create_task(oauth.User.from_token(token.credentials))
153 | user = await get_user_task
154 | if not user.is_admin:
155 | raise HTTPException(
156 | status_code=status_codes.HTTP_404_NOT_FOUND,
157 | detail="Only admins can update quotas",
158 | )
159 | elif api_key:
160 | pass
161 | else:
162 | raise HTTPException(
163 | status_code=status_codes.HTTP_401_UNAUTHORIZED,
164 | detail="You must provide either a token or an API key",
165 | )
166 |
167 | quota_old = await db.get_quota(quota_id)
168 |
169 | if quota_old is None:
170 | raise HTTPException(
171 | status_code=status_codes.HTTP_404_NOT_FOUND,
172 | detail="There is no Quota with this id",
173 | )
174 |
175 | if api_key:
176 | provider_name = await api_key
177 | utils.check_provider_matches_platform(provider_name, quota_old["platform"])
178 |
179 | # perhaps should compare `quota` and `quota_old`.
180 | # If there are no changes we could avoid doing the database update.
181 | if quota_update is None:
182 | raise HTTPException(
183 | status_code=status_codes.HTTP_404_NOT_FOUND, detail="No content to change"
184 | )
185 |
186 | await db.update_quota(quota_id, quota_update.to_db())
187 |
188 |
189 | @router.post("/sessions/", response_model=Session, status_code=status_codes.HTTP_201_CREATED)
190 | async def start_session(
191 | session: SessionCreation,
192 | api_key: APIKey = Depends(oauth.get_provider),
193 | ):
194 | provider_name = await api_key
195 | utils.check_provider_matches_platform(provider_name, session.hardware_platform)
196 | proceed = await utils.check_quotas(
197 | session.collab, session.hardware_platform, user=session.user_id
198 | )
199 | if proceed:
200 | new_session = await db.create_session(session=session.to_db())
201 | return Session.from_db(new_session)
202 | else:
203 | raise HTTPException(
204 | status_code=status_codes.HTTP_403_FORBIDDEN,
205 | detail="The user does not have sufficient compute quota to start this session",
206 | )
207 |
208 |
209 | @router.put("/sessions/{session_id}", status_code=status_codes.HTTP_200_OK)
210 | async def update_session(
211 | session_update: SessionUpdate,
212 | session_id: int = Path(
213 | ..., title="session ID", description="ID of the session to be retrieved"
214 | ),
215 | api_key: APIKey = Depends(oauth.get_provider),
216 | ):
217 | """
218 | For use by computing system providers to update session metadata
219 | """
220 |
221 | provider_name = await api_key
222 | old_session = await db.get_session(session_id)
223 | if old_session is None:
224 | raise HTTPException(
225 | status_code=status_codes.HTTP_404_NOT_FOUND,
226 | detail=f"Either there is no session with id {session_id}, or you do not have access to it",
227 | )
228 | utils.check_provider_matches_platform(provider_name, old_session["hardware_platform"])
229 | # todo: update quotas
230 | if session_update.status in (SessionStatus.finished, SessionStatus.error):
231 | await utils.update_quotas(
232 | old_session["collab_id"],
233 | old_session["hardware_platform"],
234 | session_update.resource_usage,
235 | )
236 |
237 | result = await db.update_session(session_id, session_update.to_db())
238 |
--------------------------------------------------------------------------------
/api/simqueue/resources/statistics.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 | from datetime import date, timedelta
3 | from collections import defaultdict
4 | import logging
5 | import numpy as np
6 |
7 | from fastapi import APIRouter, Query
8 |
9 | from ..data_models import (
10 | DateRangeCount,
11 | DateRangeQuantity,
12 | TimeSeries,
13 | QueueStatus,
14 | Histogram,
15 | ProjectStatus,
16 | )
17 | from .. import db
18 | from ..globals import STANDARD_QUEUES
19 |
20 |
21 | logger = logging.getLogger("simqueue")
22 |
23 | router = APIRouter()
24 |
25 |
26 | def normalize_start_end(start: date = None, end: date = None):
27 | today = date.today()
28 | if (start is None) and (end is None):
29 | end = today
30 | start = end - timedelta(30)
31 | elif start is None:
32 | start = end - timedelta(30)
33 | elif end is None:
34 | end = today
35 | return start, end
36 |
37 |
38 | @router.get("/statistics/job-count", response_model=List[DateRangeCount])
39 | async def job_count(start: date = None, end: date = None, interval: int = 7):
40 | """
41 | Number of jobs for each backend in a given time period
42 | """
43 | start, end = normalize_start_end(start, end)
44 |
45 | results = []
46 | counts = defaultdict(lambda: 0)
47 | for platform in STANDARD_QUEUES:
48 | completion_timestamps = await db.query_jobs(
49 | status=["finished", "error"],
50 | hardware_platform=[platform],
51 | date_range_start=start,
52 | date_range_end=end,
53 | size=100000,
54 | fields=["timestamp_completion"],
55 | )
56 |
57 | completed = np.array(
58 | [
59 | (timestamp["timestamp_completion"].date() - start).days
60 | for timestamp in completion_timestamps
61 | if timestamp["timestamp_completion"]
62 | ],
63 | dtype=int,
64 | )
65 | counts[platform], bin_edges = np.histogram(
66 | completed, bins=np.arange(0, (end - start).days + 1, interval)
67 | ) # the + 1 is because `bins` must include the right-most edge
68 | for i, days in enumerate(bin_edges[:-1]):
69 | count = {}
70 | for platform in counts:
71 | count[platform] = counts[platform][i]
72 | results.append(
73 | {
74 | "start": start + timedelta(int(days)), # timedelta doesn't like numpy int64
75 | "end": start + timedelta(int(interval + days)),
76 | "count": count,
77 | }
78 | )
79 | return results
80 |
81 |
82 | @router.get("/statistics/cumulative-job-count", response_model=List[DateRangeCount])
83 | async def cumulative_job_count(start: date = None, end: date = None, interval: int = 7):
84 | """
85 | Cumulative number of jobs for each backend in a given time period
86 | """
87 | cumulative_job_counts = await job_count(start=start, end=end, interval=interval)
88 | count_cumul = defaultdict(lambda: 0)
89 | for entry in cumulative_job_counts:
90 | for platform, value in entry["count"].items():
91 | count_cumul[platform] += value
92 | entry["count"][platform] = count_cumul[platform]
93 | return cumulative_job_counts
94 |
95 |
96 | @router.get("/statistics/cumulative-user-count", response_model=TimeSeries)
97 | async def cumulative_user_count(
98 | hardware_platform: List[str] = Query(
99 | None, description="hardware platform (e.g. SpiNNaker, BrainScales)"
100 | )
101 | ):
102 | """
103 | Cumulative number of platform users
104 | """
105 |
106 | users = await db.get_users_list()
107 | first_job_dates = []
108 | for user in users:
109 | first_submissions_timestamp = await db.query_jobs(
110 | user_id=[user["user_id"]],
111 | hardware_platform=hardware_platform,
112 | size=1,
113 | fields=["timestamp_submission"],
114 | )
115 | if first_submissions_timestamp:
116 | first_job_dates.append(first_submissions_timestamp[0]["timestamp_submission"].date())
117 | first_job_dates.append(date.today())
118 | user_counts = list(range(1, len(first_job_dates)))
119 | user_counts.append(user_counts[-1]) # repeat last value for today's date
120 | return TimeSeries(dates=sorted(first_job_dates), values=user_counts)
121 |
122 |
123 | @router.get("/statistics/active-user-count", response_model=List[DateRangeCount])
124 | async def active_user_count(start: date = None, end: date = None, interval: int = 7):
125 | """
126 | Number of platform users who have submitted at least one job in the last 90 days
127 | """
128 | start, end = normalize_start_end(start, end)
129 | results = []
130 | date_list = list(db.daterange(start, end, interval))
131 | date_list.append(end)
132 | for start_date, end_date in zip(date_list[:-1], date_list[1:]):
133 | start_active_period = end_date - timedelta(90)
134 | active_users = {}
135 | for platform in STANDARD_QUEUES:
136 | active_users[platform] = await db.get_users_count(
137 | hardware_platform=[platform],
138 | date_range_start=start_active_period,
139 | date_range_end=end_date,
140 | )
141 | # note that the "total" value may be less than the sum of the per-platform values,
142 | # since some users use multiple platforms
143 | results.append(
144 | {
145 | "start": start_date, # timedelta doesn't like numpy int64
146 | "end": end_date,
147 | "count": active_users,
148 | }
149 | )
150 |
151 | return results
152 |
153 |
154 | @router.get("/statistics/queue-length", response_model=List[QueueStatus])
155 | async def queue_length():
156 | """
157 | Number of jobs in each queue (submitting and running)
158 | """
159 |
160 | queue_lengths = []
161 | for queue_name in STANDARD_QUEUES:
162 | r = await db.count_jobs(hardware_platform=[queue_name], status=["running"])
163 | s = await db.count_jobs(hardware_platform=[queue_name], status=["submitted"])
164 | queue_lengths.append(QueueStatus(queue_name=queue_name, running=r, submitted=s))
165 |
166 | return queue_lengths
167 |
168 |
169 | @router.get("/statistics/job-duration", response_model=List[Histogram])
170 | async def job_duration(requested_max: int = None, n_bins: int = 50, scale: str = "linear"):
171 | """
172 | Histograms of total job duration (from submission to completion)
173 | for completed jobs and for error jobs
174 | """
175 | job_durations = []
176 | for status in ["finished", "error"]:
177 | for platform in STANDARD_QUEUES:
178 | completed_jobs = await db.query_jobs(
179 | status=[status],
180 | hardware_platform=[platform],
181 | size=100000,
182 | fields=["timestamp_completion", "timestamp_submission"],
183 | )
184 |
185 | durations = np.array(
186 | [
187 | (job["timestamp_completion"] - job["timestamp_submission"]).seconds
188 | for job in completed_jobs
189 | if (job["timestamp_completion"] is not None)
190 | and (job["timestamp_submission"] is not None)
191 | ]
192 | )
193 | negative_durations = durations < 0
194 | if negative_durations.any():
195 | n_neg = negative_durations.sum()
196 | logger.warning(
197 | "There were {} negative durations ({}%) for status={} and platform={}".format(
198 | n_neg, 100 * n_neg / durations.size, status, platform
199 | )
200 | )
201 | durations = durations[~negative_durations]
202 | if durations.size > 0:
203 | if requested_max is None:
204 | max = (durations.max() // n_bins + 1) * n_bins
205 | else:
206 | max = float(requested_max)
207 | if scale == "log":
208 | log_bins = np.linspace(0, np.ceil(np.log10(max)), n_bins)
209 | values = np.histogram(np.log10(durations), bins=log_bins)[0]
210 | # bins = np.power(10, log_bins)
211 | bins = log_bins
212 | else: # linear, whatever the value of `scale`
213 | values, bins = np.histogram(durations, bins=n_bins, range=(0, max))
214 | job_durations.append(
215 | Histogram(
216 | platform=platform,
217 | status=status,
218 | values=values.tolist(),
219 | bins=bins.tolist(),
220 | scale=scale,
221 | max=max,
222 | )
223 | )
224 |
225 | return job_durations
226 |
227 |
228 | @router.get("/statistics/cumulative-project-count", response_model=TimeSeries)
229 | async def cumulative_project_count(
230 | status: ProjectStatus = Query(
231 | description="Project status (accepted, rejected, ...)",
232 | default=ProjectStatus.accepted,
233 | )
234 | ):
235 | """
236 | Cumulative number of projects (resource requests) with a given status
237 | """
238 |
239 | submission_dates = [
240 | res["submission_date"]
241 | for res in await db.query_projects(fields=["submission_date"], status=status, size=10000)
242 | ]
243 |
244 | submission_dates.append(date.today())
245 | project_counts = list(range(1, len(submission_dates)))
246 | project_counts.append(project_counts[-1]) # repeat last value for today's date
247 | return TimeSeries(dates=sorted(submission_dates), values=project_counts)
248 |
249 |
250 | @router.get("/statistics/resource-usage", response_model=List[DateRangeQuantity])
251 | async def resource_usage(start: date = None, end: date = None, interval: int = 7):
252 | """
253 | Cumulative quota usage
254 | """
255 | start, end = normalize_start_end(start, end)
256 |
257 | results = []
258 | usage_per_interval = defaultdict(lambda: 0.0)
259 | n_bins = (end - start).days // interval + 1
260 | for platform in STANDARD_QUEUES:
261 | completed_jobs = await db.query_jobs(
262 | status=["finished", "error"],
263 | hardware_platform=[platform],
264 | date_range_start=start,
265 | date_range_end=end,
266 | size=100000,
267 | fields=["timestamp_completion", "resource_usage"],
268 | )
269 | completed = np.array(
270 | [
271 | (job["timestamp_completion"].date() - start).days
272 | for job in completed_jobs
273 | if job["timestamp_completion"]
274 | ],
275 | dtype=int,
276 | )
277 | usage_per_job = np.array(
278 | [job["resource_usage"] for job in completed_jobs if job["timestamp_completion"]]
279 | )
280 | index = completed // interval
281 | usage_per_interval[platform] = np.zeros((n_bins,))
282 | for i, usage in zip(index, usage_per_job):
283 | if usage is not None:
284 | usage_per_interval[platform][i] += usage
285 |
286 | usage_cumul = defaultdict(lambda: 0.0)
287 | for i in range(n_bins):
288 | interval_start = start + timedelta(i * interval)
289 | interval_end = interval_start + timedelta(interval)
290 | for platform in STANDARD_QUEUES:
291 | usage_cumul[platform] += usage_per_interval[platform][i]
292 | new_obj = {"start": interval_start, "end": interval_end, "value": usage_cumul.copy()}
293 | results.append(new_obj)
294 |
295 | return results
296 |
--------------------------------------------------------------------------------
/api/simqueue/settings.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | EBRAINS_IAM_SERVICE_URL = os.environ.get(
4 | "EBRAINS_IAM_SERVICE_URL", "https://iam.ebrains.eu/auth/realms/hbp"
5 | )
6 | EBRAINS_COLLAB_SERVICE_URL = os.environ.get(
7 | "EBRAINS_COLLAB_SERVICE_URL", "https://wiki.ebrains.eu/rest/v1/"
8 | )
9 | EBRAINS_DRIVE_SERVICE_URL = os.environ.get("EBRAINS_DRIVE_SERVICE_URL", "drive.ebrains.eu")
10 | EBRAINS_BUCKET_SERVICE_URL = os.environ.get("EBRAINS_BUCKET_SERVICE_URL", "data-proxy.ebrains.eu")
11 | EBRAINS_IAM_CLIENT_ID = os.environ.get("EBRAINS_IAM_CLIENT_ID")
12 | EBRAINS_IAM_SECRET = os.environ.get("EBRAINS_IAM_SECRET")
13 | SESSIONS_SECRET_KEY = os.environ.get("SESSIONS_SECRET_KEY")
14 | DATABASE_USERNAME = os.environ.get("NMPI_DATABASE_USER", "nmpi_dbadmin")
15 | DATABASE_PASSWORD = os.environ.get("NMPI_DATABASE_PASSWORD")
16 | DATABASE_HOST = os.environ.get("NMPI_DATABASE_HOST")
17 | DATABASE_PORT = os.environ.get("NMPI_DATABASE_PORT")
18 | BASE_URL = os.environ.get("NMPI_BASE_URL", "")
19 | # ADMIN_GROUP_ID = ""
20 | AUTHENTICATION_TIMEOUT = 20
21 | TMP_FILE_URL = BASE_URL + "/tmp_download"
22 | TMP_FILE_ROOT = os.environ.get("NMPI_TMP_FILE_ROOT", "tmp_download")
23 | EMAIL_HOST = os.environ.get("NMPI_EMAIL_HOST")
24 | EMAIL_SENDER = "neuromorphic@ebrains.eu"
25 | EMAIL_PASSWORD = os.environ.get("NMPI_EMAIL_PASSWORD", None)
26 | ADMIN_EMAIL = os.environ.get("NMPI_ADMIN_EMAIL")
27 |
--------------------------------------------------------------------------------
/api/simqueue/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HumanBrainProject/ebrains-neuromorphic-job-queue-api/402831ef0c98e092c472537d092aaa53a6209c78/api/simqueue/tests/__init__.py
--------------------------------------------------------------------------------
/api/simqueue/tests/test_auth_router.py:
--------------------------------------------------------------------------------
1 | from fastapi.testclient import TestClient
2 | from simqueue.main import app
3 |
4 |
5 | client = TestClient(app)
6 |
7 |
8 | def test_read_main():
9 | response = client.get("/")
10 | assert response.status_code == 200
11 | result = response.json()
12 | assert result["about"] == "This is the EBRAINS Neuromorphic Computing Job Queue API."
13 | assert result["authentication"]["collaboratory"].startswith("https://wiki")
14 | assert result["authentication"]["server"].startswith("https://iam")
15 | assert result["version"] == "3"
16 | assert result["links"] == {"documentation": "/docs"}
17 |
--------------------------------------------------------------------------------
/api/simqueue/tests/test_integration.py:
--------------------------------------------------------------------------------
1 | """
2 | Integration tests: exercise the full system from API requests to database access to API responses.
3 | """
4 |
5 | import os
6 | from datetime import date
7 | from tempfile import NamedTemporaryFile
8 | from httpx import AsyncClient, ASGITransport
9 | import pytest
10 | import pytest_asyncio
11 |
12 | from simqueue.data_repositories import EBRAINSDrive
13 | from simqueue.main import app
14 | from simqueue import db, settings
15 |
16 |
17 | TEST_COLLAB = "neuromorphic-testing-private"
18 | TEST_USER = "adavison"
19 | TEST_REPOSITORY = "Fake repository used for testing"
20 | TEST_PLATFORM = "TestPlatform"
21 | EXPECTED_TEST_DB_ADDRESS = ("localhost", "postgres")
22 |
23 |
24 | @pytest.fixture(scope="module")
25 | def user_auth():
26 | token = os.environ.get("EBRAINS_AUTH_TOKEN", None)
27 | if token:
28 | return {"Authorization": f"Bearer {token}"}
29 | else:
30 | pytest.skip("Environment variable EBRAINS_AUTH_TOKEN not set")
31 |
32 |
33 | @pytest.fixture(scope="module")
34 | def provider_auth():
35 | api_key = os.environ.get("NMPI_TESTING_APIKEY", None)
36 | if api_key:
37 | return {"x-api-key": api_key}
38 | else:
39 | pytest.skip("Environment variable NMPI_TESTING_APIKEY not set")
40 |
41 |
42 | @pytest_asyncio.fixture()
43 | async def database_connection():
44 | if settings.DATABASE_HOST not in EXPECTED_TEST_DB_ADDRESS:
45 | pytest.skip("Database address does not match the expected one")
46 | await db.database.connect()
47 | yield
48 | await db.database.disconnect()
49 |
50 |
51 | @pytest_asyncio.fixture()
52 | async def adequate_quota(database_connection):
53 | project = await db.create_project(
54 | {
55 | "collab": TEST_COLLAB,
56 | "owner": TEST_USER,
57 | "title": "Test Project created by test_integration - to delete",
58 | "abstract": "This project should be automatically deleted by pytest",
59 | "description": "this is the description",
60 | "submission_date": date.today(),
61 | }
62 | )
63 | project_id = project["context"]
64 | project = await db.update_project(
65 | project_id, {"accepted": True, "decision_date": date.today()}
66 | )
67 | quota = await db.create_quota(
68 | project_id, {"units": "bushels", "limit": 100, "usage": 0, "platform": TEST_PLATFORM}
69 | )
70 |
71 | yield quota
72 |
73 | await db.delete_project(project_id) # this also deletes the quota
74 |
75 |
76 | def fake_download(url):
77 | if "example.com" in str(url):
78 | fp = NamedTemporaryFile(delete=False, mode="w")
79 | fp.write('{"foo": "bar"}\n')
80 | fp.close()
81 | return fp.name
82 | else:
83 | raise Exception(f"Unexpected url {url}")
84 |
85 |
86 | @pytest.mark.asyncio
87 | async def test_job_lifetime(database_connection, adequate_quota, mocker, provider_auth, user_auth):
88 | """
89 | In this test, a user submits a job, which is retrieved and handled by
90 | the compute system provider.
91 | While this is happening the user checks the job status.
92 | When the job is finished the user retrieves the result.
93 | """
94 |
95 | mocker.patch("simqueue.data_repositories.download_file_to_tmp_dir", fake_download)
96 | # first we check the job queue is empty
97 | # if an error in a previous test run has left a submitted job, it may not be
98 | # in that case, we set all submitted jobs to "error"
99 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
100 | response0 = await client.get(
101 | f"/jobs/?status=submitted&hardware_platform={TEST_PLATFORM}", headers=provider_auth
102 | )
103 | assert response0.status_code == 200
104 | queued_jobs = response0.json()
105 | if len(queued_jobs) > 0:
106 | for leftover_job in queued_jobs:
107 | response00 = await client.put(
108 | leftover_job["resource_uri"],
109 | json={
110 | "status": "error",
111 | "log": "Job was left over from previous test failure.",
112 | },
113 | headers=provider_auth,
114 | )
115 | assert response00.status_code == 200
116 |
117 | # user submits a job
118 | initial_job_data = {
119 | "code": "import pyNN\n",
120 | "command": "python run.py --with-figure",
121 | "collab": TEST_COLLAB,
122 | "input_data": None,
123 | "hardware_platform": TEST_PLATFORM,
124 | "hardware_config": {"python_version": "3.9"},
125 | "tags": None,
126 | }
127 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
128 | response1 = await client.post(
129 | "/jobs/",
130 | json=initial_job_data,
131 | headers=user_auth,
132 | )
133 | assert response1.status_code == 201
134 | queued_job = response1.json()
135 | assert queued_job["resource_uri"] == f"/jobs/{queued_job['id']}"
136 |
137 | # user checks the job status
138 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
139 | response2 = await client.get(
140 | queued_job["resource_uri"],
141 | headers=user_auth,
142 | )
143 | assert response2.status_code == 200
144 | assert response2.json()["status"] == "submitted"
145 |
146 | # provider picks up the job and sets it to "running"
147 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
148 | response3 = await client.get(f"/jobs/next/{TEST_PLATFORM}", headers=provider_auth)
149 | assert response3.status_code == 200
150 | retrieved_job = response3.json()
151 | for field in ("code", "collab", "command", "hardware_config", "hardware_platform"):
152 | assert retrieved_job[field] == initial_job_data[field]
153 | assert retrieved_job["resource_uri"] == queued_job["resource_uri"]
154 | assert retrieved_job["timestamp_submission"] is not None
155 | assert retrieved_job["user_id"] == TEST_USER
156 |
157 | response4 = await client.put(
158 | retrieved_job["resource_uri"],
159 | json={"status": "running", "log": "Job started"},
160 | headers=provider_auth,
161 | )
162 | assert response4.status_code == 200
163 |
164 | # user checks the job status again
165 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
166 | response5 = await client.get(
167 | queued_job["resource_uri"],
168 | headers=user_auth,
169 | )
170 | assert response5.status_code == 200
171 | assert response5.json()["status"] == "running"
172 |
173 | # provider finishes handling the job and uploads the results
174 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
175 | job_id = retrieved_job["id"]
176 | job_update_data = {
177 | "status": "finished",
178 | "log": "Job started\nJob completed successfully",
179 | "output_data": {
180 | "repository": TEST_REPOSITORY,
181 | "files": [
182 | {
183 | "url": f"https://example.com/testing/job_{job_id}/results.json",
184 | "path": f"{TEST_COLLAB}/testing/job_{job_id}/results.json",
185 | "content_type": "application/json",
186 | "size": 423,
187 | "hash": "abcdef0123456789",
188 | }
189 | ],
190 | },
191 | "provenance": {"platform_version": "1.2.3"},
192 | "resource_usage": {"value": 42, "units": "bushels"},
193 | }
194 | response6 = await client.put(
195 | retrieved_job["resource_uri"],
196 | json=job_update_data,
197 | headers=provider_auth,
198 | )
199 | assert response6.status_code == 200
200 |
201 | # user retrieves the results
202 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
203 | response7 = await client.get(
204 | queued_job["resource_uri"] + "?with_log=true",
205 | headers=user_auth,
206 | )
207 | assert response7.status_code == 200
208 | final_job = response7.json()
209 | assert final_job["status"] == "finished"
210 | for field in ("code", "collab", "command", "hardware_config", "hardware_platform"):
211 | assert final_job[field] == initial_job_data[field]
212 | assert final_job["resource_uri"] == queued_job["resource_uri"]
213 | assert final_job["timestamp_submission"] is not None
214 | assert final_job["timestamp_completion"] is not None
215 | assert final_job["user_id"] == TEST_USER
216 | for field, expected_value in job_update_data.items():
217 | assert final_job[field] == expected_value
218 |
219 | # user copies data to the Drive
220 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
221 | response8 = await client.put(
222 | final_job["resource_uri"] + "/output_data",
223 | json={
224 | "repository": "EBRAINS Drive",
225 | "files": [], # doesn't matter what goes in 'files'
226 | },
227 | headers=user_auth,
228 | )
229 | assert response8.status_code == 200
230 | for item in response8.json()["files"]:
231 | assert item["url"].startswith(f"https://{settings.EBRAINS_DRIVE_SERVICE_URL}")
232 |
233 | # user checks their quota
234 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
235 | q = adequate_quota
236 | response9 = await client.get(
237 | f"/projects/{q['project_id']}/quotas/{q['id']}",
238 | headers=user_auth,
239 | )
240 | assert response9.status_code == 200
241 | result = response9.json()
242 | assert result["usage"] == 42
243 |
244 | # cleanup
245 | auth_token = user_auth["Authorization"].split(" ")[1]
246 | EBRAINSDrive._delete(TEST_COLLAB, f"/testing/job_{job_id}", auth_token)
247 | # todo: delete the job, which is not part of the normal lifecycle,
248 | # but it's good to clean up after tests
249 |
250 |
251 | @pytest.mark.asyncio
252 | async def test_session_lifetime(database_connection, adequate_quota, provider_auth, user_auth):
253 | """
254 | In this test, a compute system provider starts a session
255 | then closes it some time later, and reports on resource usage.
256 | """
257 |
258 | # start a session
259 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
260 | response1 = await client.post(
261 | "/sessions/",
262 | json={
263 | "collab": TEST_COLLAB,
264 | "user_id": TEST_USER,
265 | "hardware_platform": TEST_PLATFORM,
266 | "hardware_config": {"python_version": "3.9"},
267 | },
268 | headers=provider_auth,
269 | )
270 | assert response1.status_code == 201
271 | result = response1.json()
272 | expected = {
273 | "collab": "neuromorphic-testing-private",
274 | "hardware_config": {"python_version": "3.9"},
275 | "hardware_platform": TEST_PLATFORM,
276 | "resource_usage": {"units": "bushels", "value": 0.0},
277 | "status": "running",
278 | "timestamp_end": None,
279 | "user_id": TEST_USER,
280 | }
281 | session_uri = result.pop("resource_uri")
282 | for field in ("id", "timestamp_start"):
283 | result.pop(field)
284 | assert result == expected
285 |
286 | # close the session, and report resource usage
287 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
288 | response = await client.put(
289 | session_uri,
290 | json={"status": "finished", "resource_usage": {"value": 25, "units": "bushels"}},
291 | headers=provider_auth,
292 | )
293 | assert response.status_code == 200
294 | result = response.json()
295 | expected = None
296 | assert result == expected
297 |
298 | # check the quota has been updated to reflect the resource usage
299 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
300 | q = adequate_quota
301 | response = await client.get(
302 | f"/projects/{q['project_id']}/quotas/{q['id']}",
303 | headers=user_auth,
304 | )
305 | assert response.status_code == 200
306 | result = response.json()
307 | assert result["usage"] == 25
308 |
309 | # todo: delete the session, which is not part of the normal lifecycle,
310 | # but it's good to clean up after tests
311 |
--------------------------------------------------------------------------------
/api/simqueue/tests/test_oauth.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 | from simqueue.oauth import User
4 |
5 |
6 | @pytest.fixture(scope="module")
7 | def token():
8 | try:
9 | return os.environ["EBRAINS_AUTH_TOKEN"]
10 | except KeyError:
11 | pytest.skip("Environment variable EBRAINS_AUTH_TOKEN is not set")
12 |
13 |
14 | @pytest.fixture
15 | def fake_user_data():
16 | return {
17 | "preferred_username": "haroldlloyd",
18 | "roles": {
19 | "group": ["comic-film-actors-from-the-silent-era"],
20 | "team": [
21 | "collab-some-other-collab-viewer",
22 | "collab-neuromorphic-testing-private-editor",
23 | "collab-neuromorphic-platform-admin-administrator",
24 | ],
25 | },
26 | }
27 |
28 |
29 | @pytest.mark.asyncio
30 | async def test_user(token):
31 | user = await User.from_token(token)
32 | assert hasattr(user, "username")
33 |
34 |
35 | def test_user_get_collabs(fake_user_data):
36 | user = User(**fake_user_data)
37 | assert user.get_collabs() == [
38 | "neuromorphic-platform-admin",
39 | "neuromorphic-testing-private",
40 | "some-other-collab",
41 | ]
42 |
43 |
44 | @pytest.mark.asyncio
45 | async def test_user_can_view_as_member(fake_user_data):
46 | user = User(**fake_user_data)
47 | assert await user.can_view("neuromorphic-testing-private")
48 |
49 |
50 | @pytest.mark.asyncio
51 | async def test_user_can_view_public_collab(token):
52 | user = await User.from_token(token)
53 | assert await user.can_view("documentation")
54 |
55 |
56 | @pytest.mark.asyncio
57 | async def test_user_can_view_non_existent_collab(token):
58 | user = await User.from_token(token)
59 | assert not await user.can_view("d0cumentat10n")
60 |
61 |
62 | def test_user_can_edit(fake_user_data):
63 | user = User(**fake_user_data)
64 | assert user.can_edit("neuromorphic-testing-private")
65 | assert not user.can_edit("some-other-collab")
66 |
--------------------------------------------------------------------------------
/api/simqueue/tests/test_repositories.py:
--------------------------------------------------------------------------------
1 | import os
2 | from datetime import datetime
3 | import urllib.request
4 | import requests
5 | import pytest
6 | from simqueue.data_models import DataItem
7 | from simqueue.data_repositories import (
8 | EBRAINSDrive,
9 | EBRAINSBucket,
10 | SourceFileDoesNotExist,
11 | SourceFileIsTooBig,
12 | )
13 |
14 |
15 | class MockUser:
16 | token = {"access_token": os.environ.get("EBRAINS_AUTH_TOKEN", None)}
17 |
18 |
19 | @pytest.fixture(scope="module")
20 | def mock_user():
21 | if MockUser.token["access_token"] is None:
22 | pytest.skip("Environment variable EBRAINS_AUTH_TOKEN is not set")
23 | else:
24 | return MockUser
25 |
26 |
27 | def fake_urlretrieve(url):
28 | raise urllib.request.HTTPError(url=url, code=404, msg="Not Found")
29 |
30 |
31 | class TestDrive:
32 | def test_copy_small_file(self, mock_user):
33 | repo = EBRAINSDrive
34 | file = DataItem(
35 | url="https://drive.ebrains.eu/f/22862ad196dc4f5b9d4c/?dl=1",
36 | path="neuromorphic-testing-private/files_for_API_testing/test_file.md",
37 | content_type="text/markdown",
38 | size=48,
39 | )
40 |
41 | # normally we would copy from some other repository to the Drive
42 | # here we are copying within the same Drive repository,
43 | # so we artificially change the path
44 | target_remote_dir = f"/api-testing-{datetime.now().isoformat()}"
45 | file.path = f"neuromorphic-testing-private{target_remote_dir}/test_file.md"
46 |
47 | updated_url = repo.copy(file, mock_user)
48 | assert updated_url != file.url
49 |
50 | # read file contents from new URL and check contents
51 | response = requests.get(updated_url)
52 | assert response.status_code == 200
53 | assert response.text == "# test_file\n\n\n\nThis file is used for testing.\n\n\n"
54 |
55 | repo._delete(
56 | "neuromorphic-testing-private", target_remote_dir, mock_user.token["access_token"]
57 | )
58 |
59 | def test_copy_file_gone(self, mocker, mock_user):
60 | mocker.patch("urllib.request.urlretrieve", fake_urlretrieve)
61 | repo = EBRAINSDrive
62 | file = DataItem(
63 | url="http://example.com/this_file_does_not_exist.md",
64 | path=f"neuromorphic-testing-private/api-testing-{datetime.now().isoformat()}/test_file.md",
65 | content_type="text/markdown",
66 | size=48,
67 | )
68 | with pytest.raises(SourceFileDoesNotExist):
69 | result = repo.copy(file, mock_user)
70 |
71 | def test_copy_file_too_large(self, mocker, mock_user):
72 | mocker.patch(
73 | "simqueue.data_repositories.get_file_size", return_value=EBRAINSDrive.size_limit * 2
74 | )
75 | repo = EBRAINSDrive
76 | file = DataItem(
77 | url="https://drive.ebrains.eu/f/22862ad196dc4f5b9d4c/?dl=1",
78 | path=f"neuromorphic-testing-private/api-testing-{datetime.now().isoformat()}/test_file.md",
79 | content_type="text/markdown",
80 | size=48,
81 | )
82 | with pytest.raises(SourceFileIsTooBig):
83 | result = repo.copy(file, mock_user)
84 |
85 |
86 | class TestBucket:
87 | def test_copy_small_file(self, mock_user):
88 | repo = EBRAINSBucket
89 | file = DataItem(
90 | url="https://drive.ebrains.eu/f/22862ad196dc4f5b9d4c/?dl=1",
91 | path="neuromorphic-testing-private/files_for_API_testing/test_file.md",
92 | content_type="text/markdown",
93 | size=48,
94 | )
95 |
96 | # normally we would copy from some other repository to the Bucket
97 | # here we are copying within the same Bucket,
98 | # so we artificially change the path
99 | target_remote_dir = f"/api-testing-{datetime.now().isoformat()}"
100 | file.path = f"neuromorphic-testing-private{target_remote_dir}/test_file.md"
101 |
102 | updated_url = repo.copy(file, mock_user)
103 | assert updated_url != file.url
104 |
105 | # get redirect URL
106 | response = requests.get(
107 | updated_url + "?redirect=false",
108 | headers={"Authorization": f"Bearer {mock_user.token['access_token']}"},
109 | )
110 | assert response.status_code == 200
111 | redirect_url = response.json()["url"]
112 |
113 | # read file contents from redirect URL and check contents
114 | response2 = requests.get(redirect_url)
115 | assert response2.status_code == 200
116 | assert response2.text == "# test_file\n\n\n\nThis file is used for testing.\n\n\n"
117 |
118 | repo._delete(
119 | "neuromorphic-testing-private",
120 | f"{target_remote_dir}/test_file.md",
121 | mock_user.token["access_token"],
122 | )
123 |
--------------------------------------------------------------------------------
/api/simqueue/tests/test_statistics_router.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, date
2 | from fastapi.testclient import TestClient
3 | from simqueue.main import app
4 |
5 |
6 | client = TestClient(app)
7 |
8 |
9 | mock_jobs = [
10 | {
11 | "timestamp_submission": datetime.fromisoformat("2022-10-03T02:44:23+00:00"),
12 | "timestamp_completion": datetime.fromisoformat("2022-10-03T02:44:28+00:00"),
13 | "user_id": "haroldlloyd",
14 | "status": "finished",
15 | "resource_usage": 7.07,
16 | },
17 | {
18 | "timestamp_submission": datetime.fromisoformat("2022-10-11T02:44:23+00:00"),
19 | "timestamp_completion": datetime.fromisoformat("2022-10-11T02:44:38+00:00"),
20 | "user_id": "charliechaplin",
21 | "status": "finished",
22 | "resource_usage": 13.13,
23 | },
24 | {
25 | "timestamp_submission": datetime.fromisoformat("2022-10-12T02:44:23+00:00"),
26 | "timestamp_completion": datetime.fromisoformat("2022-10-12T02:44:28+00:00"),
27 | "user_id": "haroldlloyd",
28 | "status": "finished",
29 | "resource_usage": 19.19,
30 | },
31 | {
32 | "timestamp_submission": datetime.fromisoformat("2022-10-16T02:44:23+00:00"),
33 | "timestamp_completion": datetime.fromisoformat("2022-10-16T02:44:48+00:00"),
34 | "user_id": "haroldlloyd",
35 | "status": "error",
36 | "resource_usage": 23.23,
37 | },
38 | ]
39 |
40 |
41 | async def mock_query_jobs(**kwargs):
42 | if kwargs["hardware_platform"] == ["SpiNNaker"]:
43 | if "status" in kwargs:
44 | return [job for job in mock_jobs if job["status"] in kwargs["status"]]
45 | else:
46 | return mock_jobs
47 | else:
48 | return []
49 |
50 |
51 | def test_job_count(mocker):
52 | mocker.patch("simqueue.db.query_jobs", mock_query_jobs)
53 | response = client.get("/statistics/job-count?start=2022-10-01&end=2022-10-15&interval=7")
54 | assert response.status_code == 200
55 | assert response.json() == [
56 | {
57 | "start": "2022-10-01",
58 | "end": "2022-10-08",
59 | "count": {
60 | "BrainScaleS": 0,
61 | "BrainScaleS-ESS": 0,
62 | "Spikey": 0,
63 | "SpiNNaker": 1,
64 | "BrainScaleS-2": 0,
65 | },
66 | },
67 | {
68 | "start": "2022-10-08",
69 | "end": "2022-10-15",
70 | "count": {
71 | "BrainScaleS": 0,
72 | "BrainScaleS-ESS": 0,
73 | "Spikey": 0,
74 | "SpiNNaker": 2,
75 | "BrainScaleS-2": 0,
76 | },
77 | },
78 | ]
79 |
80 |
81 | def test_cumulative_job_count(mocker):
82 | mocker.patch("simqueue.db.query_jobs", mock_query_jobs)
83 | response = client.get(
84 | "/statistics/cumulative-job-count?start=2022-10-01&end=2022-10-15&interval=7"
85 | )
86 | assert response.status_code == 200
87 | assert response.json() == [
88 | {
89 | "start": "2022-10-01",
90 | "end": "2022-10-08",
91 | "count": {
92 | "BrainScaleS": 0,
93 | "BrainScaleS-ESS": 0,
94 | "Spikey": 0,
95 | "SpiNNaker": 1,
96 | "BrainScaleS-2": 0,
97 | },
98 | },
99 | {
100 | "start": "2022-10-08",
101 | "end": "2022-10-15",
102 | "count": {
103 | "BrainScaleS": 0,
104 | "BrainScaleS-ESS": 0,
105 | "Spikey": 0,
106 | "SpiNNaker": 3,
107 | "BrainScaleS-2": 0,
108 | },
109 | },
110 | ]
111 |
112 |
113 | def test_cumulative_users_count(mocker):
114 | mocker.patch(
115 | "simqueue.db.get_users_list",
116 | return_value=[{"user_id": "haroldlloyd"}, {"user_id": "charliechaplin"}],
117 | )
118 | mocker.patch("simqueue.db.query_jobs", mock_query_jobs)
119 | response = client.get("/statistics/cumulative-user-count?hardware_platform=SpiNNaker")
120 | assert response.status_code == 200
121 | assert response.json() == {
122 | "dates": ["2022-10-03", "2022-10-03", date.today().isoformat()],
123 | "values": [1, 2, 2],
124 | }
125 |
126 |
127 | def test_active_users_count(mocker):
128 | async def mock_get_users_count(
129 | status=None, hardware_platform=None, date_range_start=None, date_range_end=None
130 | ):
131 | if hardware_platform == ["SpiNNaker"]:
132 | users = set()
133 | for job in mock_jobs:
134 | if date_range_start <= job["timestamp_completion"].date() < date_range_end:
135 | users.add(job["user_id"])
136 | return len(users)
137 | else:
138 | return 0
139 |
140 | mocker.patch(
141 | "simqueue.db.get_users_count",
142 | mock_get_users_count,
143 | )
144 | response = client.get(
145 | "/statistics/active-user-count?start=2022-10-01&end=2022-10-15&interval=7"
146 | )
147 | assert response.status_code == 200
148 | assert response.json() == [
149 | {
150 | "start": "2022-10-01",
151 | "end": "2022-10-08",
152 | "count": {
153 | "BrainScaleS": 0,
154 | "BrainScaleS-ESS": 0,
155 | "Spikey": 0,
156 | "SpiNNaker": 1,
157 | "BrainScaleS-2": 0,
158 | },
159 | },
160 | {
161 | "start": "2022-10-08",
162 | "end": "2022-10-15",
163 | "count": {
164 | "BrainScaleS": 0,
165 | "BrainScaleS-ESS": 0,
166 | "Spikey": 0,
167 | "SpiNNaker": 2,
168 | "BrainScaleS-2": 0,
169 | },
170 | },
171 | ]
172 |
173 |
174 | def test_queue_length(mocker):
175 | mocker.patch(
176 | "simqueue.db.count_jobs",
177 | return_value=7,
178 | )
179 | response = client.get("/statistics/queue-length")
180 | assert response.status_code == 200
181 | assert response.json() == [
182 | {"queue_name": "BrainScaleS", "running": 7, "submitted": 7},
183 | {"queue_name": "BrainScaleS-ESS", "running": 7, "submitted": 7},
184 | {"queue_name": "Spikey", "running": 7, "submitted": 7},
185 | {"queue_name": "SpiNNaker", "running": 7, "submitted": 7},
186 | {"queue_name": "BrainScaleS-2", "running": 7, "submitted": 7},
187 | ]
188 |
189 |
190 | def test_job_duration(mocker):
191 | mocker.patch("simqueue.db.query_jobs", mock_query_jobs)
192 | response = client.get("/statistics/job-duration?n_bins=5&requested_max=30")
193 | assert response.status_code == 200
194 | # job durations are [5, 15, 5, 25]
195 | assert response.json() == [
196 | {
197 | "values": [2, 0, 1, 0, 0],
198 | "bins": [0.0, 6.0, 12.0, 18.0, 24.0, 30.0],
199 | "platform": "SpiNNaker",
200 | "status": "finished",
201 | "scale": "linear",
202 | "max": 30,
203 | },
204 | {
205 | "values": [0, 0, 0, 0, 1],
206 | "bins": [0.0, 6.0, 12.0, 18.0, 24.0, 30.0],
207 | "platform": "SpiNNaker",
208 | "status": "error",
209 | "scale": "linear",
210 | "max": 30,
211 | },
212 | ]
213 |
214 |
215 | def test_resource_usage(mocker):
216 | mocker.patch("simqueue.db.query_jobs", mock_query_jobs)
217 | response = client.get("/statistics/resource-usage?interval=7&start=2022-10-01&end=2022-10-28")
218 | assert response.status_code == 200
219 | assert response.json() == [
220 | {
221 | "start": "2022-10-01",
222 | "end": "2022-10-08",
223 | "value": {
224 | "BrainScaleS": 0.0,
225 | "BrainScaleS-ESS": 0.0,
226 | "Spikey": 0.0,
227 | "SpiNNaker": 7.07,
228 | "BrainScaleS-2": 0.0,
229 | },
230 | },
231 | {
232 | "start": "2022-10-08",
233 | "end": "2022-10-15",
234 | "value": {
235 | "BrainScaleS": 0.0,
236 | "BrainScaleS-ESS": 0.0,
237 | "Spikey": 0.0,
238 | "SpiNNaker": 39.39,
239 | "BrainScaleS-2": 0.0,
240 | },
241 | },
242 | {
243 | "start": "2022-10-15",
244 | "end": "2022-10-22",
245 | "value": {
246 | "BrainScaleS": 0.0,
247 | "BrainScaleS-ESS": 0.0,
248 | "Spikey": 0.0,
249 | "SpiNNaker": 62.620000000000005,
250 | "BrainScaleS-2": 0.0,
251 | },
252 | },
253 | {
254 | "start": "2022-10-22",
255 | "end": "2022-10-29",
256 | "value": {
257 | "BrainScaleS": 0.0,
258 | "BrainScaleS-ESS": 0.0,
259 | "Spikey": 0.0,
260 | "SpiNNaker": 62.620000000000005,
261 | "BrainScaleS-2": 0.0,
262 | },
263 | },
264 | ]
265 |
--------------------------------------------------------------------------------
/api/simqueue/tests/test_utility_functions.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from .. import utils
4 | from ..data_models import ResourceUsage
5 | import simqueue.db
6 |
7 |
8 | @pytest.fixture()
9 | def mock_quotas():
10 | return [
11 | {"limit": 100, "usage": 100, "id": 101},
12 | {"limit": 50, "usage": 49, "id": 102},
13 | {"limit": 1000, "usage": 0, "id": 103},
14 | ]
15 |
16 |
17 | @pytest.mark.asyncio
18 | async def test_check_quotas(mocker, mock_quotas):
19 | mocker.patch("simqueue.utils.get_available_quotas", return_value=mock_quotas)
20 | assert await utils.check_quotas("some-collab", "TestPlatform") is True
21 |
22 | mocker.patch("simqueue.utils.get_available_quotas", return_value=mock_quotas[0:1])
23 | assert await utils.check_quotas("some-collab", "TestPlatform") is False
24 |
25 |
26 | @pytest.mark.asyncio
27 | async def test_update_quotas_1(mocker, mock_quotas):
28 | mocker.patch("simqueue.utils.get_available_quotas", return_value=mock_quotas)
29 | mocker.patch("simqueue.db.update_quota")
30 |
31 | await utils.update_quotas(
32 | "some-collab", "TestPlatform", ResourceUsage(units="bushels", value=1)
33 | )
34 | assert simqueue.db.update_quota.await_args_list[0].args == (
35 | 102,
36 | {"limit": 50, "usage": 50, "id": 102},
37 | )
38 |
39 |
40 | @pytest.mark.asyncio
41 | async def test_update_quotas_2(mocker, mock_quotas):
42 | mocker.patch("simqueue.utils.get_available_quotas", return_value=mock_quotas)
43 | mocker.patch("simqueue.db.update_quota")
44 | await utils.update_quotas(
45 | "some-collab", "TestPlatform", ResourceUsage(units="bushels", value=2)
46 | )
47 | assert simqueue.db.update_quota.await_args_list[0].args == (
48 | 102,
49 | {"limit": 50, "usage": 50, "id": 102},
50 | )
51 | assert simqueue.db.update_quota.await_args_list[1].args == (
52 | 103,
53 | {"limit": 1000, "usage": 1, "id": 103},
54 | )
55 |
--------------------------------------------------------------------------------
/api/simqueue/utils.py:
--------------------------------------------------------------------------------
1 | from datetime import date
2 | from email.mime.text import MIMEText
3 | from email.mime.multipart import MIMEMultipart
4 | import logging
5 | import smtplib
6 |
7 | from fastapi import HTTPException, status as status_codes
8 |
9 | from .data_models import ProjectStatus, ResourceUsage
10 | from . import db, settings
11 | from .globals import RESOURCE_USAGE_UNITS, PROVIDER_QUEUE_NAMES, DEMO_QUOTA_SIZES
12 |
13 | logger = logging.getLogger("simqueue")
14 |
15 |
16 | async def get_available_quotas(collab, hardware_platform):
17 | available_projects = await db.query_projects(collab=[collab], status=ProjectStatus.accepted)
18 | available_quotas = []
19 | for project in available_projects:
20 | available_quotas.extend(
21 | await db.query_quotas(project["context"], platform=hardware_platform, size=100)
22 | )
23 | return available_quotas
24 |
25 |
26 | async def check_quotas(collab: str, hardware_platform: str, user: str = None):
27 | """
28 | Check if there is a quota for the specified hardware platform associated with the given collab
29 |
30 | If the collab has never had a quota for the given platform and , but a username `user` is specified,
31 | then a new test/demo quota will be created with the owner set to `user`.
32 | """
33 | available_quotas = await get_available_quotas(collab, hardware_platform)
34 | if len(available_quotas) == 0 and user is not None and hardware_platform in DEMO_QUOTA_SIZES:
35 | # if this collab has never had a quota for this platform, we create a default test quota
36 | await create_test_quota(collab, hardware_platform, user)
37 | return True
38 | for quota in available_quotas:
39 | if quota["usage"] < quota["limit"]:
40 | return True
41 | return False
42 |
43 |
44 | async def update_quotas(collab: str, hardware_platform: str, resource_usage: ResourceUsage):
45 | if resource_usage.units != RESOURCE_USAGE_UNITS[hardware_platform]:
46 | raise HTTPException(
47 | status_code=status_codes.HTTP_400_BAD_REQUEST,
48 | detail=f"Invalid units ({resource_usage.units}) for resource usage. Expected units: {RESOURCE_USAGE_UNITS[hardware_platform]}",
49 | )
50 | available_quotas = await get_available_quotas(collab, hardware_platform)
51 | usage = resource_usage.value
52 | quotas_to_update = []
53 | for quota in available_quotas:
54 | remaining = quota["limit"] - quota["usage"]
55 | if remaining > 0:
56 | if usage <= remaining:
57 | quota["usage"] += usage
58 | quotas_to_update.append(quota)
59 | break
60 | else:
61 | quota["usage"] = quota["limit"]
62 | quotas_to_update.append(quota)
63 | usage -= remaining
64 | for quota in quotas_to_update:
65 | await db.update_quota(quota["id"], quota)
66 |
67 |
68 | def check_provider_matches_platform(provider_name: str, hardware_platform: str) -> bool:
69 | allowed_platforms = PROVIDER_QUEUE_NAMES[provider_name]
70 | if hardware_platform not in allowed_platforms:
71 | raise HTTPException(
72 | status_code=status_codes.HTTP_403_FORBIDDEN,
73 | detail=f"The provided API key does not allow access to jobs, sessions, or quotas for {hardware_platform}",
74 | )
75 | return True
76 |
77 |
78 | async def create_test_quota(collab, hardware_platform, owner):
79 | today = date.today()
80 | project = await db.create_project(
81 | {
82 | "collab": collab,
83 | "owner": owner,
84 | "title": f"Test access for the {hardware_platform} platform in collab '{collab}'",
85 | "abstract": (
86 | "This project was created automatically for demonstration/testing purposes. "
87 | f"It gives you a test quota for the {hardware_platform} platform. "
88 | f"All members of the '{collab}' collab workspace can use this quota. "
89 | "When the test quotas are used up, you will need to request a new quota "
90 | "through the Job Manager app or Python client, or by contacting EBRAINS support."
91 | ),
92 | "description": "",
93 | "submission_date": today,
94 | }
95 | )
96 | project_id = project["context"]
97 | project = await db.update_project(project_id, {"accepted": True, "decision_date": today})
98 | # for platform, limit in DEMO_QUOTA_SIZES.items():
99 | quota_data = {
100 | "platform": hardware_platform,
101 | "limit": DEMO_QUOTA_SIZES[hardware_platform],
102 | "usage": 0.0,
103 | "units": RESOURCE_USAGE_UNITS[hardware_platform],
104 | }
105 | quota = await db.create_quota(project_id, quota_data)
106 | return project, quota
107 |
108 |
109 | def send_email(recipient_email: str, body: str):
110 |
111 | # Sender and recipient information
112 | sender_email = settings.EMAIL_SENDER
113 | sender_password = settings.EMAIL_PASSWORD
114 |
115 | success = False
116 |
117 | if sender_password:
118 |
119 | # SMTP server settings
120 | smtp_server = settings.EMAIL_HOST
121 | smtp_port = 587 # Use the appropriate SMTP port (587 for TLS)
122 |
123 | # Create a MIME message
124 | message = MIMEMultipart()
125 | message["From"] = sender_email
126 | message["To"] = recipient_email
127 | message["Subject"] = "[EBRAINS] New neuromorphic computing resource request"
128 |
129 | body = body
130 | message.attach(MIMEText(body, "plain"))
131 |
132 | try:
133 | # Connect to the SMTP server
134 | server = smtplib.SMTP(smtp_server, smtp_port)
135 | server.starttls() # Enable TLS encryption
136 | server.login(sender_email, sender_password)
137 |
138 | # Send the email
139 | server.sendmail(sender_email, recipient_email, message.as_string())
140 | logger.info(f"E-mail to {recipient_email} sent successfully")
141 | success = True
142 |
143 | except Exception as e:
144 | logger.error(f"Failed to send e-mail. Error: {str(e)}")
145 | server = None
146 |
147 | finally:
148 | # Disconnect from the SMTP server
149 | if server:
150 | server.quit()
151 |
152 | else:
153 | logger.info("Did not send e-mail because the password was not set")
154 |
155 | return success
156 |
--------------------------------------------------------------------------------
/documentation/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # docker build -t cnrsunic/neuromorphic_docs .
3 | # docker run -p 443 -d cnrsunic/neuromorphic_docs
4 | #
5 |
6 | FROM nginx:1.9
7 | MAINTAINER andrew.davison@unic.cnrs-gif.fr
8 |
9 | COPY nginx_default /etc/nginx/conf.d/default.conf
10 | COPY splash /usr/share/nginx/html
11 | COPY developer_guide/_build/html /usr/share/nginx/html/developer_guide
12 | COPY issuetracker.html /usr/share/nginx/html/
13 |
14 | RUN chmod a+r /usr/share/nginx/html
15 |
--------------------------------------------------------------------------------
/documentation/developer_guide/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | BUILDDIR = _build
9 |
10 | # User-friendly check for sphinx-build
11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
13 | endif
14 |
15 | # Internal variables.
16 | PAPEROPT_a4 = -D latex_paper_size=a4
17 | PAPEROPT_letter = -D latex_paper_size=letter
18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
19 | # the i18n builder cannot share the environment and doctrees with the others
20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
21 |
22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
23 |
24 | help:
25 | @echo "Please use \`make ' where is one of"
26 | @echo " html to make standalone HTML files"
27 | @echo " dirhtml to make HTML files named index.html in directories"
28 | @echo " singlehtml to make a single large HTML file"
29 | @echo " pickle to make pickle files"
30 | @echo " json to make JSON files"
31 | @echo " htmlhelp to make HTML files and a HTML help project"
32 | @echo " qthelp to make HTML files and a qthelp project"
33 | @echo " devhelp to make HTML files and a Devhelp project"
34 | @echo " epub to make an epub"
35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
36 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
38 | @echo " text to make text files"
39 | @echo " man to make manual pages"
40 | @echo " texinfo to make Texinfo files"
41 | @echo " info to make Texinfo files and run them through makeinfo"
42 | @echo " gettext to make PO message catalogs"
43 | @echo " changes to make an overview of all changed/added/deprecated items"
44 | @echo " xml to make Docutils-native XML files"
45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes"
46 | @echo " linkcheck to check all external links for integrity"
47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
48 |
49 | clean:
50 | rm -rf $(BUILDDIR)/*
51 |
52 | html:
53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
54 | @echo
55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
56 |
57 | dirhtml:
58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
59 | @echo
60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
61 |
62 | singlehtml:
63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
64 | @echo
65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
66 |
67 | pickle:
68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
69 | @echo
70 | @echo "Build finished; now you can process the pickle files."
71 |
72 | json:
73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
74 | @echo
75 | @echo "Build finished; now you can process the JSON files."
76 |
77 | htmlhelp:
78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
79 | @echo
80 | @echo "Build finished; now you can run HTML Help Workshop with the" \
81 | ".hhp project file in $(BUILDDIR)/htmlhelp."
82 |
83 | qthelp:
84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
85 | @echo
86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \
87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/HBPNeuromorphicComputingPlatform.qhcp"
89 | @echo "To view the help file:"
90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/HBPNeuromorphicComputingPlatform.qhc"
91 |
92 | devhelp:
93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
94 | @echo
95 | @echo "Build finished."
96 | @echo "To view the help file:"
97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/HBPNeuromorphicComputingPlatform"
98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/HBPNeuromorphicComputingPlatform"
99 | @echo "# devhelp"
100 |
101 | epub:
102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
103 | @echo
104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
105 |
106 | latex:
107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
108 | @echo
109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
111 | "(use \`make latexpdf' here to do that automatically)."
112 |
113 | latexpdf:
114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
115 | @echo "Running LaTeX files through pdflatex..."
116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
118 |
119 | latexpdfja:
120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
121 | @echo "Running LaTeX files through platex and dvipdfmx..."
122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
124 |
125 | text:
126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
127 | @echo
128 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
129 |
130 | man:
131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
132 | @echo
133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
134 |
135 | texinfo:
136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
137 | @echo
138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
139 | @echo "Run \`make' in that directory to run these through makeinfo" \
140 | "(use \`make info' here to do that automatically)."
141 |
142 | info:
143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
144 | @echo "Running Texinfo files through makeinfo..."
145 | make -C $(BUILDDIR)/texinfo info
146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
147 |
148 | gettext:
149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
150 | @echo
151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
152 |
153 | changes:
154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
155 | @echo
156 | @echo "The overview file is in $(BUILDDIR)/changes."
157 |
158 | linkcheck:
159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
160 | @echo
161 | @echo "Link check complete; look for any errors in the above output " \
162 | "or in $(BUILDDIR)/linkcheck/output.txt."
163 |
164 | doctest:
165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
166 | @echo "Testing of doctests in the sources finished, look at the " \
167 | "results in $(BUILDDIR)/doctest/output.txt."
168 |
169 | xml:
170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
171 | @echo
172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
173 |
174 | pseudoxml:
175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
176 | @echo
177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
178 |
--------------------------------------------------------------------------------
/documentation/developer_guide/_extra/extra.js:
--------------------------------------------------------------------------------
1 | $(document).ready(function(){
2 | $('.external').attr('target', '_parent');
3 | console.info("Added targets to links");
4 | });
5 |
--------------------------------------------------------------------------------
/documentation/developer_guide/_templates/layout.html:
--------------------------------------------------------------------------------
1 | {% extends "!layout.html" %}
2 |
3 | {% set script_files = script_files + ["extra.js"] %}
4 |
--------------------------------------------------------------------------------
/documentation/developer_guide/architecture.rst:
--------------------------------------------------------------------------------
1 | =====================
2 | Platform architecture
3 | =====================
4 |
5 | The platform provides the following components:
6 |
7 | * Running on nmpi.hbpneuromorphic.eu:
8 | * Job queue REST service
9 | * Job manager Collaboratory app
10 | * Dashboard Collaboratory app
11 | * Running on quotas.hbpneuromorphic.eu:
12 | * Quotas REST service
13 | * Resource manager Collaboratory app
14 | * Resource manager coordination Collaboratory app
15 | * Running on benchmarks.hbpneuromorphic.eu:
16 | * Benchmarks REST service
17 | * Benchmarks website
18 | * Running on www.hbpneuromorphic.eu:
19 | * Collaboratory home ("splash") page
20 | * Development and Operations Guidebook (this document)
21 | * Monitoring service (commercial service)
22 | * Python client
23 | * User Guidebook
24 |
25 | In addition to the three web servers listed above, there is a staging server *nmpi-staging.hbpneuromorphic.eu*
26 | (a staging server for quotas is planned) and a database server.
27 |
28 | The REST services are implemented with Django. The Collaboratory apps are implemented with AngularJS.
29 | Both services and apps are served using nginx, running in Docker containers on cloud servers
30 | from Digital Ocean.
31 |
32 | A migration from the commercial cloud provider (Digital Ocean) to servers provided by ICEI is planned for 2019.
33 |
34 |
35 |
36 | .. Coming later
37 |
38 | .. benchmark runner (webhook)
39 | .. nest server (for benchmarks): nest.hbpneuromorphic.eu
40 | .. nest data store: tmp-data.hbpneuromorphic.eu
41 |
42 |
--------------------------------------------------------------------------------
/documentation/developer_guide/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # HBP Neuromorphic Computing Platform documentation build configuration file, created by
4 | # sphinx-quickstart on Wed Jun 24 15:26:43 2015.
5 | #
6 | # This file is execfile()d with the current directory set to its
7 | # containing dir.
8 | #
9 | # Note that not all possible configuration values are present in this
10 | # autogenerated file.
11 | #
12 | # All configuration values have a default; values that are commented out
13 | # serve to show the default.
14 |
15 | import sys
16 | import os
17 | import hbp_sphinx_theme
18 |
19 | # If extensions (or modules to document with autodoc) are in another directory,
20 | # add these directories to sys.path here. If the directory is relative to the
21 | # documentation root, use os.path.abspath to make it absolute, like shown here.
22 | #sys.path.insert(0, os.path.abspath('.'))
23 |
24 | # -- General configuration ------------------------------------------------
25 |
26 | # If your documentation needs a minimal Sphinx version, state it here.
27 | #needs_sphinx = '1.0'
28 |
29 | # Add any Sphinx extension module names here, as strings. They can be
30 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
31 | # ones.
32 | extensions = [
33 | 'sphinx.ext.intersphinx',
34 | 'sphinx.ext.todo',
35 | ]
36 |
37 | # Add any paths that contain templates here, relative to this directory.
38 | templates_path = ['_templates']
39 |
40 | # The suffix of source filenames.
41 | source_suffix = '.rst'
42 |
43 | # The encoding of source files.
44 | #source_encoding = 'utf-8-sig'
45 |
46 | # The master toctree document.
47 | master_doc = 'index'
48 |
49 | # General information about the project.
50 | project = u"HBP Neuromorphic Computing Platform: Developers' Guide"
51 | copyright = u'2015-2016, Andrew P. Davison and Domenico Guarino'
52 |
53 | # The version info for the project you're documenting, acts as replacement for
54 | # |version| and |release|, also used in various other places throughout the
55 | # built documents.
56 | #
57 | # The short X.Y version.
58 | version = '0.1'
59 | # The full version, including alpha/beta/rc tags.
60 | release = '0.1'
61 |
62 | # The language for content autogenerated by Sphinx. Refer to documentation
63 | # for a list of supported languages.
64 | #language = None
65 |
66 | # There are two options for replacing |today|: either, you set today to some
67 | # non-false value, then it is used:
68 | #today = ''
69 | # Else, today_fmt is used as the format for a strftime call.
70 | #today_fmt = '%B %d, %Y'
71 |
72 | # List of patterns, relative to source directory, that match files and
73 | # directories to ignore when looking for source files.
74 | exclude_patterns = ['_build']
75 |
76 | # The reST default role (used for this markup: `text`) to use for all
77 | # documents.
78 | #default_role = None
79 |
80 | # If true, '()' will be appended to :func: etc. cross-reference text.
81 | #add_function_parentheses = True
82 |
83 | # If true, the current module name will be prepended to all description
84 | # unit titles (such as .. function::).
85 | #add_module_names = True
86 |
87 | # If true, sectionauthor and moduleauthor directives will be shown in the
88 | # output. They are ignored by default.
89 | #show_authors = False
90 |
91 | # The name of the Pygments (syntax highlighting) style to use.
92 | pygments_style = 'sphinx'
93 |
94 | # A list of ignored prefixes for module index sorting.
95 | #modindex_common_prefix = []
96 |
97 | # If true, keep warnings as "system message" paragraphs in the built documents.
98 | #keep_warnings = False
99 |
100 |
101 | # -- Options for HTML output ----------------------------------------------
102 |
103 | # The theme to use for HTML and HTML Help pages. See the documentation for
104 | # a list of builtin themes.
105 | html_theme = "hbp_sphinx_theme"
106 |
107 | # Theme options are theme-specific and customize the look and feel of a theme
108 | # further. For a list of options available for each theme, see the
109 | # documentation.
110 | #html_theme_options = {}
111 |
112 | # Add any paths that contain custom themes here, relative to this directory.
113 | html_theme_path = [hbp_sphinx_theme.get_html_theme_path()]
114 |
115 | # The name for this set of Sphinx documents. If None, it defaults to
116 | # " v documentation".
117 | #html_title = None
118 |
119 | # A shorter title for the navigation bar. Default is the same as html_title.
120 | #html_short_title = None
121 |
122 | # The name of an image file (relative to this directory) to place at the top
123 | # of the sidebar.
124 | #html_logo = None
125 |
126 | # The name of an image file (within the static path) to use as favicon of the
127 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
128 | # pixels large.
129 | #html_favicon = None
130 |
131 | # Add any paths that contain custom static files (such as style sheets) here,
132 | # relative to this directory. They are copied after the builtin static files,
133 | # so a file named "default.css" will overwrite the builtin "default.css".
134 | html_static_path = ['_static']
135 |
136 | # Add any extra paths that contain custom files (such as robots.txt or
137 | # .htaccess) here, relative to this directory. These files are copied
138 | # directly to the root of the documentation.
139 | html_extra_path = ['_extra']
140 |
141 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
142 | # using the given strftime format.
143 | #html_last_updated_fmt = '%b %d, %Y'
144 |
145 | # If true, SmartyPants will be used to convert quotes and dashes to
146 | # typographically correct entities.
147 | #html_use_smartypants = True
148 |
149 | # Custom sidebar templates, maps document names to template names.
150 | #html_sidebars = {}
151 |
152 | # Additional templates that should be rendered to pages, maps page names to
153 | # template names.
154 | #html_additional_pages = {}
155 |
156 | # If false, no module index is generated.
157 | #html_domain_indices = True
158 |
159 | # If false, no index is generated.
160 | #html_use_index = True
161 |
162 | # If true, the index is split into individual pages for each letter.
163 | #html_split_index = False
164 |
165 | # If true, links to the reST sources are added to the pages.
166 | #html_show_sourcelink = True
167 |
168 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
169 | #html_show_sphinx = True
170 |
171 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
172 | #html_show_copyright = True
173 |
174 | # If true, an OpenSearch description file will be output, and all pages will
175 | # contain a tag referring to it. The value of this option must be the
176 | # base URL from which the finished HTML is served.
177 | #html_use_opensearch = ''
178 |
179 | # This is the file name suffix for HTML files (e.g. ".xhtml").
180 | #html_file_suffix = None
181 |
182 | # Output file base name for HTML help builder.
183 | htmlhelp_basename = 'HBPNeuromorphicComputingPlatformdoc'
184 |
185 |
186 | # -- Options for LaTeX output ---------------------------------------------
187 |
188 | latex_elements = {
189 | # The paper size ('letterpaper' or 'a4paper').
190 | #'papersize': 'letterpaper',
191 |
192 | # The font size ('10pt', '11pt' or '12pt').
193 | #'pointsize': '10pt',
194 |
195 | # Additional stuff for the LaTeX preamble.
196 | #'preamble': '',
197 | }
198 |
199 | # Grouping the document tree into LaTeX files. List of tuples
200 | # (source start file, target name, title,
201 | # author, documentclass [howto, manual, or own class]).
202 | latex_documents = [
203 | ('index', 'HBPNeuromorphicComputingPlatform.tex', u'HBP Neuromorphic Computing Platform Documentation',
204 | u'Andrew P. Davison and Domenico Guarino', 'manual'),
205 | ]
206 |
207 | # The name of an image file (relative to this directory) to place at the top of
208 | # the title page.
209 | #latex_logo = None
210 |
211 | # For "manual" documents, if this is true, then toplevel headings are parts,
212 | # not chapters.
213 | #latex_use_parts = False
214 |
215 | # If true, show page references after internal links.
216 | #latex_show_pagerefs = False
217 |
218 | # If true, show URL addresses after external links.
219 | #latex_show_urls = False
220 |
221 | # Documents to append as an appendix to all manuals.
222 | #latex_appendices = []
223 |
224 | # If false, no module index is generated.
225 | #latex_domain_indices = True
226 |
227 |
228 | # -- Options for manual page output ---------------------------------------
229 |
230 | # One entry per manual page. List of tuples
231 | # (source start file, name, description, authors, manual section).
232 | man_pages = [
233 | ('index', 'hbpneuromorphiccomputingplatform', u'HBP Neuromorphic Computing Platform Documentation',
234 | [u'Andrew P. Davison and Domenico Guarino'], 1)
235 | ]
236 |
237 | # If true, show URL addresses after external links.
238 | #man_show_urls = False
239 |
240 |
241 | # -- Options for Texinfo output -------------------------------------------
242 |
243 | # Grouping the document tree into Texinfo files. List of tuples
244 | # (source start file, target name, title, author,
245 | # dir menu entry, description, category)
246 | texinfo_documents = [
247 | ('index', 'HBPNeuromorphicComputingPlatform', u'HBP Neuromorphic Computing Platform Documentation',
248 | u'Andrew P. Davison and Domenico Guarino', 'HBPNeuromorphicComputingPlatform', 'One line description of project.',
249 | 'Miscellaneous'),
250 | ]
251 |
252 | # Documents to append as an appendix to all manuals.
253 | #texinfo_appendices = []
254 |
255 | # If false, no module index is generated.
256 | #texinfo_domain_indices = True
257 |
258 | # How to display URL addresses: 'footnote', 'no', or 'inline'.
259 | #texinfo_show_urls = 'footnote'
260 |
261 | # If true, do not generate a @detailmenu in the "Top" node's menu.
262 | #texinfo_no_detailmenu = False
263 |
264 |
265 | # Example configuration for intersphinx: refer to the Python standard library.
266 | intersphinx_mapping = {'http://docs.python.org/': None}
267 |
--------------------------------------------------------------------------------
/documentation/developer_guide/deployment.rst:
--------------------------------------------------------------------------------
1 | ======================
2 | Deploying the platform
3 | ======================
4 |
5 | The job queue server and its web interface are deployed in the cloud using Docker containers.
6 | Building Docker images and deploying the containers is handled by the "cloud-deploy" package,
7 | available at https://github.com/CNRS-UNIC/cloud-deploy. This provides a command-line tool, :file:`cld` script.
8 |
9 |
10 | .. note:: All actions of :file:`cld` are logged to :file:`deploy.log` so you can always
11 | review the actions that have been performed.
12 |
13 | Managing nodes and services
14 | ===========================
15 |
16 | We use the name "node" to refer to an individual virtual server running in the Digital Ocean
17 | cloud. We use the name "service" to refer to an individual Docker container.
18 |
19 | List available server nodes::
20 |
21 | $ cld node list
22 |
23 | Example output::
24 |
25 | Name Ip_Address Created_At Size Location
26 | ----------------- --------------- -------------------- ------ ---------------
27 | bob 146.185.173.96 2016-05-19T14:02:42Z 512 Amsterdam 2
28 | gallaxhar 146.185.169.134 2016-08-10T13:26:47Z 1024 Amsterdam 2
29 | ginormica 95.85.18.21 2016-08-10T14:00:40Z 1024 Amsterdam 2
30 | gigantosaurus 82.196.8.80 2016-09-19T21:22:34Z 512 Amsterdam 2
31 | elastigirl 188.226.128.38 2016-11-23T12:22:08Z 2048 Amsterdam 2
32 | drcockroach 37.139.6.192 2017-05-17T10:34:54Z 512 Amsterdam 2
33 |
34 |
35 | This requires a DigitalOcean API token, which should be stored in your password manager.
36 | (Only the OS X Keychain currently supported).
37 |
38 | To start a new node::
39 |
40 | $ cld node create --size 512mb
41 |
42 | To shut-down and remove a node::
43 |
44 | $ cld node destroy
45 |
46 | List running services (each service corresponds to a Docker container)::
47 |
48 | $ cld services
49 |
50 | Example output::
51 |
52 | Name Image Status Url ID Node Ports
53 | ------------------ ----------------------------------------------- -------- ----------------------- ------------ ------------- --------------
54 | docs cnrsunic/neuromorphic_docs running https://146.185.173.96 3308c939b689 bob 443:443
55 | db tutum/postgresql:latest running https://146.185.169.134 7a22924ecebc gallaxhar 5432:32768
56 | nmpi-blue cnrsunic/nmpi_queue_server:blue running https://95.85.18.21 ca51f676041d ginormica 443:443
57 | benchmarkdb tutum/postgresql:latest running https://95.85.18.21 a5a1e3115ff3 ginormica 5432:32768
58 | benchmarks-service cnrsunic/neuromorphic_benchmarks_service:latest running https://82.196.8.80 19eda6adccbe gigantosaurus 443:443
59 | quotas-green cnrsunic/nmpi_resource_manager:green running https://188.226.128.38 0bea557df8d0 elastigirl 443:443
60 | issuetracker-green cnrsunic/hbp_issuetracker:green running https://37.139.6.192 2618f3e50951 drcockroach 443:443
61 | issuetracker-db tutum/postgresql:latest running https://37.139.6.192 53f310185d2e drcockroach 5432:32768
62 |
63 |
64 | To launch a new service::
65 |
66 | $ cld launch --colour=
67 |
68 | Possible values of ```` are "nmpi", "splash", "quotas", "db", etc.
69 | Each service has a configuration file, e.g. :file:`nmpi.yml`, in the :file:`deployment` subdirectory.
70 |
71 | .. note:: *Colours* For most services we run a "blue" service and a "green" service, each on
72 | a different server node.
73 | These are used to support having "production" and "staging" services, and
74 | easily moving the staging version into production.
75 | For example, if the "blue" service is in production and we wish to deploy a new version,
76 | we deploy the new version as "green" then test it. When we are happy with the new
77 | version, we just update the DNS records so that the production URL points to the green
78 | service. The colour is used both in the name of the service and as a tag for the
79 | Docker image. (See http://martinfowler.com/bliki/BlueGreenDeployment.html for more
80 | information on this approach.)
81 |
82 | To redeploy a service with the latest version::
83 |
84 | $ cld build --colour=
85 | $ cld redeploy --colour=
86 |
87 | To terminate a service::
88 |
89 | $ cld terminate --colour=
90 |
91 | To download the logs of a service::
92 |
93 | $ cld log --colour= --filename=
94 |
95 |
96 | Deploying the database
97 | ======================
98 |
99 | The platform uses a PostgreSQL database in a Docker container.
100 |
101 | Launching the database service
102 | ------------------------------
103 |
104 | ::
105 |
106 | $ cld launch db
107 |
108 | This creates a PostgreSQL service with an empty database and a randomly generated password for
109 | the "postgres" user. To retrieve the password run ``cld log db``.
110 |
111 | .. note:: It is possible to run multiple instances of the database service,
112 | but they must each run on different server nodes.
113 | When choosing which node to run on, first ensure there is
114 | not already an instance of the database service running on it.
115 |
116 |
117 | Restoring the database
118 | ----------------------
119 |
120 | After (re-)deployment, the database is empty. To restore the database from an SQL dump::
121 |
122 | $ cld database restore db
123 |
124 | and then enter the password for the "postgres" user when prompted.
125 |
126 | The backup files are stored on the UNIC cluster in the directory `/home/share/hbp/backups`.
127 |
128 |
129 | Deploying the job queue service
130 | ===============================
131 |
132 | The recipe for building the Docker image for the job queue server is in the
133 | file :file:`job_manager/Dockerfile`.
134 |
135 | To build the image, run::
136 |
137 | $ cld build --colour= nmpi
138 |
139 | This builds the image ``cnrsunic/nmpi_queue_server``, tags it with both the colour and the
140 | latest Git commit id, and pushes the image to `Docker Hub`_.
141 |
142 | .. note:: Pushing to Docker Hub requires that you have already logged in using ``docker login``
143 | using the username "cnrsunic".
144 |
145 | To launch the service::
146 |
147 | $ cld launch --color= nmpi
148 |
149 | The service requires the following environment variables to be defined in your shell.
150 | The deployment script reads these variables and sets them as environment variables for
151 | the Docker container::
152 |
153 | NMPI_DATABASE_HOST
154 | NMPI_DATABASE_PORT
155 |
156 | The service also requires a number of passwords and other secrets, contained in the file
157 | :file:`nmpi-secrets.yml`. For security, this file is not version controlled; it may be
158 | obtained from Andrew Davison.
159 |
160 | To deploy a new version of the service::
161 |
162 | $ cld build --colour= nmpi
163 | $ cld redeploy --colour= nmpi
164 |
165 |
166 | Deploying the quotas service
167 | ============================
168 |
169 | The recipe for building the Docker image for the quotas service is in the
170 | file :file:`resource_manager/Dockerfile`.
171 |
172 | To build the image, run::
173 |
174 | $ cld build --colour= quotas
175 |
176 | This builds the image ``cnrsunic/nmpi_resource_manager``, tags it with both the colour and the
177 | latest Git commit id, and pushes the image to `Docker Hub`_.
178 |
179 | To launch the service::
180 |
181 | $ cld launch --color= nmpi
182 |
183 | The service requires the following environment variables to be defined::
184 |
185 | NMPI_DATABASE_HOST
186 | NMPI_DATABASE_PORT
187 |
188 | The service also requires a number of passwords and other secrets, contained in the file
189 | :file:`quotas-secrets.yml`. For security, this file is not version controlled; it may be
190 | obtained from Andrew Davison.
191 |
192 |
193 | Taking database backups
194 | =======================
195 |
196 | To take a backup of the database, run::
197 |
198 | $ cld database dump db
199 |
200 |
201 | Domain name registration
202 | ========================
203 |
204 | The domain name "hbpneuromorphic.eu" was registered with GoDaddy.
205 | The DNS is configured using the GoDaddy dashboard
206 | (contact Andrew Davison for credentials).
207 |
208 | The e-mail address "webmaster@hbpneuromorphic.eu" forwards to Andrew Davison. Up to 100
209 | forwarded addresses can be created.
210 |
211 |
212 | Certificates
213 | ============
214 |
215 | The SSL certificates for hbpneuromorphic.eu are obtained from Let's Encrypt.
216 | The private keys and the certificates are stored in the :file:`/etc/letsencrypt`
217 | directory of the host servers, and made available to the Docker images via
218 | Docker shared volumes.
219 | Certificates are valid for three months. At the moment, they must be manually renewed.
220 | Automatic renewal (e.g. through a cron job) is planned.
221 |
222 |
223 | Administration of the job queue server
224 | ======================================
225 |
226 | It should not in general be necessary to access the `Django admin interface`_.
227 | However, it is available if needed (for example to delete test jobs or to add/remove API keys).
228 | Contact Andrew Davison for the administrator credentials.
229 |
230 |
231 | .. _`Django admin interface`: https://nmpi.hbpneuromorphic.eu/admin/
232 | .. _`Docker Hub`: https://hub.docker.com
233 |
--------------------------------------------------------------------------------
/documentation/developer_guide/development_environment.rst:
--------------------------------------------------------------------------------
1 | ====================================
2 | Setting up a development environment
3 | ====================================
4 |
5 | The following assumes you are working with Python 2.7 in a virtual environment (using virtualenv,
6 | conda, or similar).
7 |
8 | Requirements
9 | ------------
10 |
11 | We suggest using `pip-tools`_ to install packages::
12 |
13 | $ pip install pip-tools
14 |
15 | The base development requirements are in :file:`deployment/requirements-deploy.txt`.
16 | Install them using::
17 |
18 | $ pip-sync deployment/requirements-deploy.txt
19 |
20 | Some of the project requirements are private HBP/BlueBrain Project packages.
21 | To install these requires either a VPN connection to EPFL or a local copy of the packages.
22 | If you have VPN access, run::
23 |
24 | $ pip download -i https://bbpteam.epfl.ch/repository/devpi/simple --pre -r deployment/requirements-bbp.txt -d packages
25 |
26 | and then un-tar the archives in the :file:`packages` directory.
27 | If you do not have VPN access, contact Andrew Davison to obtain a local copy.
28 |
29 | Finally, to install the remaining dependencies, run::
30 |
31 | $ pip-sync -f packages deployment/requirements-deploy.txt job_manager/requirements.txt resource_manager/requirements.txt
32 |
33 | .. note:: If using conda, you may wish to install some or all of the dependencies with
34 | ``conda install`` instead of ``pip``. On Linux, it may be easier to install
35 | ``psycopg2`` via the package manager rather than using ``pip``.
36 |
37 |
38 | .. todo:: install nodejs, bower, Bower components
39 |
40 | Setting up a database
41 | ---------------------
42 |
43 | Most of the time while developing it is easiest to use SQLite. However, since we use PostgreSQL
44 | in production, it is important to at least test with a PostgreSQL database. This can be
45 | installed directly on your machine, but it may be easier to use Docker, both to minimize the
46 | differences with respect to production, and to make it simple to wipe out and recreate the
47 | test database.
48 |
49 | First, we pull the Docker image from the Docker registry::
50 |
51 | $ docker pull tutum/postgresql
52 |
53 | To run the image::
54 |
55 | $ docker run -d -p 5432:5432 tutum/postgresql
56 |
57 | Run ``docker ps`` to get the container ID, then
58 |
59 | $ docker logs
60 |
61 | to show the randomly-generated password. Now we create the admin user that will be used by
62 | Django to connect::
63 |
64 | $ psql --user postgres --command "CREATE USER nmpi_dbadmin WITH PASSWORD '';"
65 | $ psql --user postgres --command "CREATE DATABASE nmpi OWNER nmpi_dbadmin;"
66 | $ psql --user postgres --command "GRANT ALL PRIVILEGES ON DATABASE nmpi TO nmpi_dbadmin;"
67 |
68 |
69 | Configuring Django
70 | ------------------
71 |
72 | When developing locally, set the following environment variable::
73 |
74 | $ export NMPI_ENV=dev
75 |
76 | By default, when developing locally you will use a local SQLite database. To use a PostgreSQL
77 | database (either a local one or the production database), in :file:`settings.py` for the
78 | project you are working on set ``LOCAL_DB = False``.
79 |
80 | To tell Django which PostgreSQL database you are working on, set the environment
81 | variables ``NMPI_DATABASE_HOST``, ``NMPI_DATABASE_PORT``, ``NMPI_DATABASE_PASSWORD``.
82 |
83 | You also need to set the environment variables ``DJANGO_SECRET_KEY``,
84 | ``HBP_OIDC_CLIENT_ID`` and ``HBP_OIDC_CLIENT_SECRET``. The former can be set to whatever you
85 | wish for development purposes. To obtain the latter two, you should
86 | `register an OpenID Connect client`_ using ``https://localhost:8001/complete/hbp`` as the URL.
87 |
88 | To check everything is working::
89 |
90 | $ python manage.py check
91 |
92 | and to initialize the database::
93 |
94 | $ python manage.py migrate
95 |
96 | Next you should `create a local SSL certificate`_; now you can run the development server using::
97 |
98 | $ python manage.py runsslserver --certificate ~/.ssl/server.crt --key ~/.ssl/server.key 127.0.0.1:8001
99 |
100 |
101 | Running the tests
102 | -----------------
103 |
104 | Unit tests are run as follows. In the :file:`job_manager` directory::
105 |
106 | $ python manage.py test simqueue
107 |
108 | In the :file:`resource_manager` directory::
109 |
110 | $ python manage.py test quotas
111 |
112 |
113 | .. _`pip-tools`: https://github.com/nvie/pip-tools
114 | .. _`register an OpenID Connect client`: https://collab.humanbrainproject.eu/#/collab/54/nav/1051
115 | .. _`create a local SSL certificate`: https://developer.humanbrainproject.eu/docs/projects/HBP%20Collaboratory%20Documentation/1.7/app-developer-manual/quickstart/setup/ssl-certificate.html
--------------------------------------------------------------------------------
/documentation/developer_guide/index.rst:
--------------------------------------------------------------------------------
1 | ===========================================================
2 | The HBP Neuromorphic Computing Platform - Developers' Guide
3 | ===========================================================
4 |
5 | Contents:
6 |
7 | .. toctree::
8 | :maxdepth: 2
9 |
10 | architecture
11 | development_environment
12 | testing
13 | deployment
14 | monitoring
15 |
--------------------------------------------------------------------------------
/documentation/developer_guide/monitoring.rst:
--------------------------------------------------------------------------------
1 | ==========
2 | Monitoring
3 | ==========
4 |
5 | Monitoring of the Platform front-end services, and of the BrainScaleS and SpiNNaker services that
6 | interact with the Job Queue service, makes use of a commercial service, StatusCake_.
7 |
8 | We are currently using the "Superior" plan, which unfortunately does not allow sub-accounts.
9 | The account is held by the CNRS partner.
10 |
11 | The configuration dashboard is available at https://app.statuscake.com/.
12 |
13 | Services monitored
14 | ------------------
15 |
16 | The following URLs are monitored for uptime, response time and SSL certificate validity,
17 | on a 15-minute schedule:
18 |
19 | - https://nmpi.hbpneuromorphic.eu/api/v2/ (Job Queue service)
20 | - https://quotas.hbpneuromorphic.eu/projects/ (Quota service)
21 | - http://neuralensemble.org/docs/PyNN/ (PyNN documentation)
22 | - https://www.hbpneuromorphic.eu/home.html (Collab homepage)
23 | - https://benchmarks.hbpneuromorphic.eu (Benchmarks service)
24 |
25 | In addition, the following services are monitored using "push" monitoring.
26 | Each of these services sends an HTTP GET request to a StatusCake webhook every time it successfully runs.
27 | The monitoring service generates an alert if the "ping" is not received.
28 |
29 | - Database backups (script which performs a database backup hourly)
30 | - SpiNNaker job queue check (1 minute check interval)
31 | - BrainScaleS job queue check (2 minute check interval)
32 |
33 | Three contact groups are defined: front-end, BrainScaleS and SpiNNaker.
34 | Members of these groups receive e-mails when a monitor to which they are subscribed issues an alert.
35 |
36 |
37 | Public monitoring page
38 | ----------------------
39 |
40 | A public monitoring webpage is available:
41 |
42 | - publicly at http://status.hbpneuromorphic.eu
43 | - within the Collaboratory at https://collab.humanbrainproject.eu/#/collab/51/nav/245013
44 |
45 | The monitoring service uses a commercial provider, StatusCake (http://statuscake.com). This service tests all of the Platform web services, from multiple locations, every 15 minutes. In addition, the BrainScaleS and SpiNNaker job retrieval systems notify the monitoring service every time they successfully check for new jobs (every 1-2 minutes). In case any of the services does not respond, the Platform administrators receive an e-mail notification.
46 |
47 |
48 | .. _StatusCake: http://statuscake.com
--------------------------------------------------------------------------------
/documentation/developer_guide/testing.rst:
--------------------------------------------------------------------------------
1 | =======
2 | Testing
3 | =======
4 |
5 | Unit tests are run on TravisCI:
6 |
7 | - https://travis-ci.org/HumanBrainProject/hbp_neuromorphic_platform
8 | - https://travis-ci.org/HumanBrainProject/hbp-neuromorphic-client
9 |
10 | Current test status
11 | -------------------
12 |
13 | Platform
14 |
15 | .. image:: https://travis-ci.org/HumanBrainProject/hbp_neuromorphic_platform.svg?branch=master
16 |
17 | Client
18 |
19 | .. image:: https://travis-ci.org/HumanBrainProject/hbp-neuromorphic-client.svg?branch=master
20 |
21 |
--------------------------------------------------------------------------------
/documentation/issuetracker.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Issue Tracker app
4 |
5 |
6 |
Issue Tracker app retired
7 |
8 |
The Issue Tracker app has been retired.
9 |
Please contact Andrew Davison if you would like a copy of the issues/comments you stored in the app.