├── .dependencies_installed
├── .github
├── pull_request_template.md
└── workflows
│ ├── executor_cd_dev.yml
│ ├── executor_cd_prod.yml
│ ├── miner_cd_dev.yml
│ ├── miner_cd_prod.yml
│ ├── validator_cd_dev.yml
│ └── validator_cd_prod.yml
├── .gitignore
├── .idea
├── .gitignore
├── compute-subnet.iml
├── dataSources.xml
├── inspectionProfiles
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
└── vcs.xml
├── .pre-commit-config.yaml
├── .vscode
└── launch.json
├── LICENSE
├── README.md
├── contrib
├── CODE_REVIEW_DOCS.md
├── CONTRIBUTING.md
├── DEVELOPMENT_WORKFLOW.md
└── STYLE.md
├── datura
├── .gitignore
├── README.md
├── datura
│ ├── __init__.py
│ ├── consumers
│ │ └── base.py
│ ├── errors
│ │ ├── __init__.py
│ │ └── protocol.py
│ └── requests
│ │ ├── base.py
│ │ ├── miner_requests.py
│ │ └── validator_requests.py
├── pyproject.toml
└── tests
│ └── __init__.py
├── neurons
├── __init__.py
├── executor
│ ├── .dockerignore
│ ├── .env.template
│ ├── .gitignore
│ ├── Dockerfile
│ ├── Dockerfile.runner
│ ├── README.md
│ ├── alembic.ini
│ ├── config.toml
│ ├── daemon.json
│ ├── docker-compose.app.dev.yml
│ ├── docker-compose.app.yml
│ ├── docker-compose.dev.yml
│ ├── docker-compose.yml
│ ├── docker_build.sh
│ ├── docker_publish.sh
│ ├── docker_runner_build.sh
│ ├── docker_runner_publish.sh
│ ├── entrypoint.sh
│ ├── libdmcompverify.so
│ ├── migrations
│ │ ├── README
│ │ ├── env.py
│ │ ├── script.py.mako
│ │ └── versions
│ │ │ └── 8b04ab8f89f9_podlog.py
│ ├── nvidia_docker_sysbox_setup.sh
│ ├── pdm.lock
│ ├── pyproject.toml
│ ├── run.sh
│ ├── src
│ │ ├── core
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── db.py
│ │ │ └── logger.py
│ │ ├── daos
│ │ │ ├── base.py
│ │ │ └── pod_log.py
│ │ ├── decrypt_challenge.py
│ │ ├── executor.py
│ │ ├── gpus_utility.py
│ │ ├── middlewares
│ │ │ ├── __init__.py
│ │ │ └── miner.py
│ │ ├── models
│ │ │ ├── __init__.py
│ │ │ └── pod_log.py
│ │ ├── monitor.py
│ │ ├── payloads
│ │ │ ├── __init__.py
│ │ │ └── miner.py
│ │ ├── routes
│ │ │ ├── __init__.py
│ │ │ └── apis.py
│ │ └── services
│ │ │ ├── miner_service.py
│ │ │ ├── pod_log_service.py
│ │ │ └── ssh_service.py
│ ├── sysbox-ce_0.6.6-0.linux_amd64.deb
│ └── version.txt
├── miners
│ ├── .dockerignore
│ ├── .env.template
│ ├── .gitignore
│ ├── Dockerfile
│ ├── Dockerfile.runner
│ ├── README.md
│ ├── alembic.ini
│ ├── assigning_validator_hotkeys.md
│ ├── docker-compose.app.dev.yml
│ ├── docker-compose.app.yml
│ ├── docker-compose.dev.yml
│ ├── docker-compose.local.yml
│ ├── docker-compose.yml
│ ├── docker_build.sh
│ ├── docker_publish.sh
│ ├── docker_runner_build.sh
│ ├── docker_runner_publish.sh
│ ├── entrypoint.sh
│ ├── migrations
│ │ ├── README
│ │ ├── env.py
│ │ ├── script.py.mako
│ │ └── versions
│ │ │ ├── 8e52603bd563_create_validator_model.py
│ │ │ └── eb0b92cbc38e_add_executors_table.py
│ ├── pdm.lock
│ ├── pyproject.toml
│ ├── run.sh
│ ├── src
│ │ ├── cli.py
│ │ ├── consumers
│ │ │ └── validator_consumer.py
│ │ ├── core
│ │ │ ├── __init__.py
│ │ │ ├── config.py
│ │ │ ├── db.py
│ │ │ ├── miner.py
│ │ │ └── utils.py
│ │ ├── daos
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── executor.py
│ │ │ └── validator.py
│ │ ├── gpt2-training-model.py
│ │ ├── miner.py
│ │ ├── models
│ │ │ ├── __init__.py
│ │ │ ├── executor.py
│ │ │ └── validator.py
│ │ ├── routes
│ │ │ ├── __init__.py
│ │ │ ├── debug_routes.py
│ │ │ └── validator_interface.py
│ │ └── services
│ │ │ ├── executor_service.py
│ │ │ ├── ssh_service.py
│ │ │ └── validator_service.py
│ ├── tests
│ │ └── __init__.py
│ └── version.txt
└── validators
│ ├── .dockerignore
│ ├── .env.template
│ ├── .gitignore
│ ├── Dockerfile
│ ├── Dockerfile.runner
│ ├── README.md
│ ├── alembic.ini
│ ├── docker-compose.app.dev.yml
│ ├── docker-compose.app.yml
│ ├── docker-compose.dev.yml
│ ├── docker-compose.local.yml
│ ├── docker-compose.yml
│ ├── docker_build.sh
│ ├── docker_publish.sh
│ ├── docker_runner_build.sh
│ ├── docker_runner_publish.sh
│ ├── entrypoint.sh
│ ├── libdmcompverify.so
│ ├── migrations
│ ├── README
│ ├── env.py
│ ├── script.py.mako
│ └── versions
│ │ ├── 0653dc97382a_add_executors_table.py
│ │ └── d5037a3f7b99_create_task_model.py
│ ├── pdm.lock
│ ├── pyproject.toml
│ ├── run.sh
│ ├── src
│ ├── cli.py
│ ├── clients
│ │ ├── __init__.py
│ │ ├── compute_client.py
│ │ └── miner_client.py
│ ├── connector.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── config.py
│ │ ├── db.py
│ │ ├── utils.py
│ │ └── validator.py
│ ├── daos
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── executor.py
│ │ └── task.py
│ ├── job.py
│ ├── miner_jobs
│ │ ├── machine_scrape.py
│ │ ├── obfuscator.py
│ │ └── score.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── executor.py
│ │ └── task.py
│ ├── payload_models
│ │ ├── __init__.py
│ │ └── payloads.py
│ ├── protocol
│ │ ├── __init__.py
│ │ └── vc_protocol
│ │ │ ├── __init__.py
│ │ │ ├── compute_requests.py
│ │ │ └── validator_requests.py
│ ├── services
│ │ ├── const.py
│ │ ├── docker_service.py
│ │ ├── file_encrypt_service.py
│ │ ├── hash_service.py
│ │ ├── interactive_shell_service.py
│ │ ├── ioc.py
│ │ ├── matrix_validation_service.py
│ │ ├── miner_service.py
│ │ ├── redis_service.py
│ │ ├── ssh_service.py
│ │ └── task_service.py
│ ├── test_validator.py
│ └── validator.py
│ ├── tests
│ └── __init__.py
│ └── version.txt
├── pdm.lock
├── pyproject.toml
├── scripts
├── check_compatibility.sh
├── check_requirements_changes.sh
├── install_dev.sh
├── install_executor_on_ubuntu.sh
├── install_miner_on_runpod.sh
├── install_miner_on_ubuntu.sh
├── install_staging.sh
└── install_validator_on_ubuntu.sh
└── tests
└── __init__.py
/.dependencies_installed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/.dependencies_installed
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | ## Describe your changes
2 |
3 | ## Issue ticket number and link
4 |
5 | [Task Title](https://www.notion.so/Compute-SN-c27d35dd084e4c4d92374f55cdd293f2?p=f9b26856f1a6406892b5db46446260da&pm=s)
6 |
7 | ## Checklist before requesting a review
8 | - [ ] I have performed a self-review of my code
9 | - [ ] I wrote tests.
10 | - [ ] Need to take care of performance?
11 |
--------------------------------------------------------------------------------
/.github/workflows/executor_cd_dev.yml:
--------------------------------------------------------------------------------
1 | name: "CD: executor (dev)"
2 |
3 | on:
4 | push:
5 | branches:
6 | - 'dev_executor'
7 |
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | env:
12 | TAG: "dev"
13 | TARGET_FILE: "docker-compose.app.dev.yml"
14 | DOCKERHUB_PAT: ${{ secrets.DOCKERHUB_PAT }}
15 | DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
16 |
17 | steps:
18 | - uses: actions/checkout@v4
19 |
20 | - name: Make scripts executable
21 | run: |
22 | chmod +x neurons/executor/docker_publish.sh
23 | chmod +x neurons/executor/docker_runner_publish.sh
24 |
25 | - name: Run docker_publish.sh
26 | run: |
27 | cd neurons/executor
28 | ./docker_publish.sh
29 |
30 | - name: Run docker_runner_publish.sh
31 | run: |
32 | cd neurons/executor
33 | ./docker_runner_publish.sh
34 |
--------------------------------------------------------------------------------
/.github/workflows/executor_cd_prod.yml:
--------------------------------------------------------------------------------
1 | name: "CD: executor (prod)"
2 |
3 | on:
4 | push:
5 | branches:
6 | - 'prod_executor'
7 |
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | env:
12 | TAG: "latest"
13 | TARGET_FILE: "docker-compose.app.yml"
14 | DOCKERHUB_PAT: ${{ secrets.DOCKERHUB_PAT }}
15 | DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
16 |
17 | steps:
18 | - uses: actions/checkout@v4
19 |
20 | - name: Make scripts executable
21 | run: |
22 | chmod +x neurons/executor/docker_publish.sh
23 | chmod +x neurons/executor/docker_runner_publish.sh
24 |
25 | - name: Run docker_publish.sh
26 | run: |
27 | cd neurons/executor
28 | ./docker_publish.sh
29 |
30 | - name: Run docker_runner_publish.sh
31 | run: |
32 | cd neurons/executor
33 | ./docker_runner_publish.sh
34 |
--------------------------------------------------------------------------------
/.github/workflows/miner_cd_dev.yml:
--------------------------------------------------------------------------------
1 | name: "CD: miner (dev)"
2 |
3 | on:
4 | push:
5 | branches:
6 | - 'dev_miner'
7 |
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | env:
12 | TAG: "dev"
13 | TARGET_FILE: "docker-compose.app.dev.yml"
14 | DOCKERHUB_PAT: ${{ secrets.DOCKERHUB_PAT }}
15 | DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
16 |
17 | steps:
18 | - uses: actions/checkout@v4
19 |
20 | - name: Make scripts executable
21 | run: |
22 | chmod +x neurons/miners/docker_publish.sh
23 | chmod +x neurons/miners/docker_runner_publish.sh
24 |
25 | - name: Run docker_publish.sh
26 | run: |
27 | cd neurons/miners
28 | ./docker_publish.sh
29 |
30 | - name: Run docker_runner_publish.sh
31 | run: |
32 | cd neurons/miners
33 | ./docker_runner_publish.sh
34 |
--------------------------------------------------------------------------------
/.github/workflows/miner_cd_prod.yml:
--------------------------------------------------------------------------------
1 | name: "CD: miner (prod)"
2 |
3 | on:
4 | push:
5 | branches:
6 | - 'prod_miner'
7 |
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | env:
12 | TAG: "latest"
13 | TARGET_FILE: "docker-compose.app.yml"
14 | DOCKERHUB_PAT: ${{ secrets.DOCKERHUB_PAT }}
15 | DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
16 |
17 | steps:
18 | - uses: actions/checkout@v4
19 |
20 | - name: Make scripts executable
21 | run: |
22 | chmod +x neurons/miners/docker_publish.sh
23 | chmod +x neurons/miners/docker_runner_publish.sh
24 |
25 | - name: Run docker_publish.sh
26 | run: |
27 | cd neurons/miners
28 | ./docker_publish.sh
29 |
30 | - name: Run docker_runner_publish.sh
31 | run: |
32 | cd neurons/miners
33 | ./docker_runner_publish.sh
34 |
--------------------------------------------------------------------------------
/.github/workflows/validator_cd_dev.yml:
--------------------------------------------------------------------------------
1 | name: "CD: validator (dev)"
2 |
3 | on:
4 | push:
5 | branches:
6 | - 'dev_validator'
7 |
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | env:
12 | TAG: "dev"
13 | TARGET_FILE: "docker-compose.app.dev.yml"
14 | DOCKERHUB_PAT: ${{ secrets.DOCKERHUB_PAT }}
15 | DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
16 |
17 | steps:
18 | - uses: actions/checkout@v4
19 |
20 | - name: Make scripts executable
21 | run: |
22 | chmod +x neurons/validators/docker_publish.sh
23 | chmod +x neurons/validators/docker_runner_publish.sh
24 |
25 | - name: Run docker_publish.sh
26 | run: |
27 | cd neurons/validators
28 | ./docker_publish.sh
29 |
30 | - name: Run docker_runner_publish.sh
31 | run: |
32 | cd neurons/validators
33 | ./docker_runner_publish.sh
34 |
--------------------------------------------------------------------------------
/.github/workflows/validator_cd_prod.yml:
--------------------------------------------------------------------------------
1 | name: "CD: validator (prod)"
2 |
3 | on:
4 | push:
5 | branches:
6 | - 'prod_validator'
7 |
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | env:
12 | TAG: "latest"
13 | TARGET_FILE: "docker-compose.app.yml"
14 | DOCKERHUB_PAT: ${{ secrets.DOCKERHUB_PAT }}
15 | DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
16 |
17 | steps:
18 | - uses: actions/checkout@v4
19 |
20 | - name: Make scripts executable
21 | run: |
22 | chmod +x neurons/validators/docker_publish.sh
23 | chmod +x neurons/validators/docker_runner_publish.sh
24 |
25 | - name: Run docker_publish.sh
26 | run: |
27 | cd neurons/validators
28 | ./docker_publish.sh
29 |
30 | - name: Run docker_runner_publish.sh
31 | run: |
32 | cd neurons/validators
33 | ./docker_runner_publish.sh
34 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | env3/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | #.idea/
161 |
162 | testing/
163 |
164 | # Editors
165 | .vscode/settings.json
166 |
167 | .pdm-python
168 | .env.local
169 | .env.test
170 | .env.main
171 | .env
172 |
173 | **/.python-version
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
--------------------------------------------------------------------------------
/.idea/compute-subnet.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/.idea/dataSources.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | postgresql
6 | true
7 | org.postgresql.Driver
8 | jdbc:postgresql://localhost:6432/
9 | $ProjectFileDir$
10 |
11 |
12 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/astral-sh/ruff-pre-commit
3 | # Ruff version.
4 | rev: v0.5.1
5 | hooks:
6 | # Run the linter.
7 | - id: ruff
8 | args: [ --fix ]
9 | # Run the formatter.
10 | - id: ruff-format
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | // Use IntelliSense to learn about possible attributes.
3 | // Hover to view descriptions of existing attributes.
4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5 | "version": "0.2.0",
6 | "configurations": [
7 | {
8 | "name": "Validator: Debug Set Weights",
9 | "type": "debugpy",
10 | "request": "launch",
11 | "program": "src/cli.py",
12 | "console": "integratedTerminal",
13 | "args": ["debug-set-weights"],
14 | "cwd": "${workspaceFolder}/neurons/validators"
15 | },
16 | {
17 | "name": "Validator: Debug Run Job",
18 | "type": "debugpy",
19 | "request": "launch",
20 | "program": "src/cli.py",
21 | "console": "integratedTerminal",
22 | "args": ["debug-send-job-to-miner", "--miner_hotkey", "5Dtbwfafi4cyiDwH5HBFEAWJA913EB6G1rX7wBnfcXwiPssR", "--miner_address", "127.0.0.1", "--miner_port", "8000"],
23 | "cwd": "${workspaceFolder}/neurons/validators"
24 | },
25 | {
26 | "name": "Validator: Connector",
27 | "type": "debugpy",
28 | "request": "launch",
29 | "program": "src/connector.py",
30 | "console": "integratedTerminal",
31 | "cwd": "${workspaceFolder}/neurons/validators"
32 | }
33 | ]
34 | }
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Opentensor
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Datura Compute Subnet
2 |
3 | # Compute Subnet on Bittensor
4 |
5 | Welcome to the **Compute Subnet on Bittensor**! This project enables a decentralized, peer-to-peer GPU rental marketplace, connecting miners who contribute GPU resources with users who need computational power. Our frontend interface is available at [celiumcompute.ai](https://celiumcompute.ai), where you can easily rent machines from the subnet.
6 |
7 | ## Table of Contents
8 |
9 | - [Introduction](#introduction)
10 | - [High-Level Architecture](#high-level-architecture)
11 | - [Getting Started](#getting-started)
12 | - [For Renters](#for-renters)
13 | - [For Miners](#for-miners)
14 | - [For Validators](#for-validators)
15 | - [Contact and Support](#contact-and-support)
16 |
17 | ## Introduction
18 |
19 | The Compute Subnet on Bittensor is a decentralized network that allows miners to contribute their GPU resources to a global pool. Users can rent these resources for computational tasks, such as machine learning, data analysis, and more. The system ensures fair compensation for miners based on the quality and performance of their GPUs.
20 |
21 |
22 | ## High-Level Architecture
23 |
24 | - **Miners**: Provide GPU resources to the network, evaluated and scored by validators.
25 | - **Validators**: Securely connect to miner machines to verify hardware specs and performance. They maintain the network's integrity.
26 | - **Renters**: Rent computational resources from the network to run their tasks.
27 | - **Frontend (celiumcompute.ai)**: The web interface facilitating easy interaction between miners and renters.
28 | - **Bittensor Network**: The decentralized blockchain in which the compensation is managed and paid out by the validators to the miners through its native token, $TAO.
29 |
30 | ## Getting Started
31 |
32 | ### For Renters
33 |
34 | If you are looking to rent computational resources, you can easily do so through the Compute Subnet. Renters can:
35 |
36 | 1. Visit [celiumcompute.ai](https://celiumcompute.ai) and sign up.
37 | 2. **Browse** available GPU resources.
38 | 3. **Select** machines based on GPU type, performance, and price.
39 | 4. **Deploy** and monitor your computational tasks using the platform's tools.
40 |
41 | To start renting machines, visit [celiumcompute.ai](https://celiumcompute.ai) and access the resources you need.
42 |
43 | ### For Miners
44 |
45 | Miners can contribute their GPU-equipped machines to the network. The machines are scored and validated based on factors like GPU type, number of GPUs, bandwidth, and overall GPU performance. Higher performance results in better compensation for miners.
46 |
47 | If you are a miner and want to contribute GPU resources to the subnet, please refer to the [Miner Setup Guide](neurons/miners/README.md) for instructions on how to:
48 |
49 | - Set up your environment.
50 | - Install the miner software.
51 | - Register your miner and connect to the network.
52 | - Get compensated for providing GPUs!
53 |
54 | ### For Validators
55 |
56 | Validators play a crucial role in maintaining the integrity of the Compute Subnet by verifying the hardware specifications and performance of miners’ machines. Validators ensure that miners are fairly compensated based on their GPU contributions and prevent fraudulent activities.
57 |
58 | For more details, visit the [Validator Setup Guide](neurons/validators/README.md).
59 |
60 |
61 | ## Contact and Support
62 |
63 | If you need assistance or have any questions, feel free to reach out:
64 |
65 | - **Discord Support**: [Dedicated Channel within the Bittensor Discord](https://discord.com/channels/799672011265015819/1291754566957928469)
66 |
--------------------------------------------------------------------------------
/contrib/CODE_REVIEW_DOCS.md:
--------------------------------------------------------------------------------
1 | # Code Review
2 | ### Conceptual Review
3 |
4 | A review can be a conceptual review, where the reviewer leaves a comment
5 | * `Concept (N)ACK`, meaning "I do (not) agree with the general goal of this pull
6 | request",
7 | * `Approach (N)ACK`, meaning `Concept ACK`, but "I do (not) agree with the
8 | approach of this change".
9 |
10 | A `NACK` needs to include a rationale why the change is not worthwhile.
11 | NACKs without accompanying reasoning may be disregarded.
12 | After conceptual agreement on the change, code review can be provided. A review
13 | begins with `ACK BRANCH_COMMIT`, where `BRANCH_COMMIT` is the top of the PR
14 | branch, followed by a description of how the reviewer did the review. The
15 | following language is used within pull request comments:
16 |
17 | - "I have tested the code", involving change-specific manual testing in
18 | addition to running the unit, functional, or fuzz tests, and in case it is
19 | not obvious how the manual testing was done, it should be described;
20 | - "I have not tested the code, but I have reviewed it and it looks
21 | OK, I agree it can be merged";
22 | - A "nit" refers to a trivial, often non-blocking issue.
23 |
24 | ### Code Review
25 | Project maintainers reserve the right to weigh the opinions of peer reviewers
26 | using common sense judgement and may also weigh based on merit. Reviewers that
27 | have demonstrated a deeper commitment and understanding of the project over time
28 | or who have clear domain expertise may naturally have more weight, as one would
29 | expect in all walks of life.
30 |
31 | Where a patch set affects consensus-critical code, the bar will be much
32 | higher in terms of discussion and peer review requirements, keeping in mind that
33 | mistakes could be very costly to the wider community. This includes refactoring
34 | of consensus-critical code.
35 |
36 | Where a patch set proposes to change the Bittensor consensus, it must have been
37 | discussed extensively on the discord server and other channels, be accompanied by a widely
38 | discussed BIP and have a generally widely perceived technical consensus of being
39 | a worthwhile change based on the judgement of the maintainers.
40 |
41 | ### Finding Reviewers
42 |
43 | As most reviewers are themselves developers with their own projects, the review
44 | process can be quite lengthy, and some amount of patience is required. If you find
45 | that you've been waiting for a pull request to be given attention for several
46 | months, there may be a number of reasons for this, some of which you can do something
47 | about:
48 |
49 | - It may be because of a feature freeze due to an upcoming release. During this time,
50 | only bug fixes are taken into consideration. If your pull request is a new feature,
51 | it will not be prioritized until after the release. Wait for the release.
52 | - It may be because the changes you are suggesting do not appeal to people. Rather than
53 | nits and critique, which require effort and means they care enough to spend time on your
54 | contribution, thundering silence is a good sign of widespread (mild) dislike of a given change
55 | (because people don't assume *others* won't actually like the proposal). Don't take
56 | that personally, though! Instead, take another critical look at what you are suggesting
57 | and see if it: changes too much, is too broad, doesn't adhere to the
58 | [developer notes](DEVELOPMENT_WORKFLOW.md), is dangerous or insecure, is messily written, etc.
59 | Identify and address any of the issues you find. Then ask e.g. on IRC if someone could give
60 | their opinion on the concept itself.
61 | - It may be because your code is too complex for all but a few people, and those people
62 | may not have realized your pull request even exists. A great way to find people who
63 | are qualified and care about the code you are touching is the
64 | [Git Blame feature](https://docs.github.com/en/github/managing-files-in-a-repository/managing-files-on-github/tracking-changes-in-a-file). Simply
65 | look up who last modified the code you are changing and see if you can find
66 | them and give them a nudge. Don't be incessant about the nudging, though.
67 | - Finally, if all else fails, ask on IRC or elsewhere for someone to give your pull request
68 | a look. If you think you've been waiting for an unreasonably long time (say,
69 | more than a month) for no particular reason (a few lines changed, etc.),
70 | this is totally fine. Try to return the favor when someone else is asking
71 | for feedback on their code, and the universe balances out.
72 | - Remember that the best thing you can do while waiting is give review to others!
--------------------------------------------------------------------------------
/datura/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm-project.org/#use-with-ide
110 | .pdm.toml
111 | .pdm-python
112 | .pdm-build/
113 |
114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115 | __pypackages__/
116 |
117 | # Celery stuff
118 | celerybeat-schedule
119 | celerybeat.pid
120 |
121 | # SageMath parsed files
122 | *.sage.py
123 |
124 | # Environments
125 | .env
126 | .venv
127 | env/
128 | venv/
129 | ENV/
130 | env.bak/
131 | venv.bak/
132 |
133 | # Spyder project settings
134 | .spyderproject
135 | .spyproject
136 |
137 | # Rope project settings
138 | .ropeproject
139 |
140 | # mkdocs documentation
141 | /site
142 |
143 | # mypy
144 | .mypy_cache/
145 | .dmypy.json
146 | dmypy.json
147 |
148 | # Pyre type checker
149 | .pyre/
150 |
151 | # pytype static type analyzer
152 | .pytype/
153 |
154 | # Cython debug symbols
155 | cython_debug/
156 |
157 | # PyCharm
158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160 | # and can be added to the global gitignore or merged into this file. For a more nuclear
161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162 | #.idea/
163 |
--------------------------------------------------------------------------------
/datura/README.md:
--------------------------------------------------------------------------------
1 | # datura
2 |
--------------------------------------------------------------------------------
/datura/datura/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/datura/datura/__init__.py
--------------------------------------------------------------------------------
/datura/datura/consumers/base.py:
--------------------------------------------------------------------------------
1 | import abc
2 | import logging
3 |
4 | from fastapi import WebSocket, WebSocketDisconnect
5 |
6 | from ..requests.base import BaseRequest
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 |
11 | class BaseConsumer(abc.ABC):
12 | def __init__(self, websocket: WebSocket):
13 | self.websocket = websocket
14 |
15 | @abc.abstractmethod
16 | def accepted_request_type(self) -> type[BaseRequest]:
17 | pass
18 |
19 | async def connect(self):
20 | await self.websocket.accept()
21 |
22 | async def receive_message(self) -> BaseRequest:
23 | data = await self.websocket.receive_text()
24 | return self.accepted_request_type().parse(data)
25 |
26 | async def send_message(self, msg: BaseRequest):
27 | await self.websocket.send_text(msg.json())
28 |
29 | async def disconnect(self):
30 | try:
31 | await self.websocket.close()
32 | except Exception:
33 | pass
34 |
35 | @abc.abstractmethod
36 | async def handle_message(self, data: BaseRequest):
37 | raise NotImplementedError
38 |
39 | async def handle(self):
40 | # await self.connect()
41 | try:
42 | while True:
43 | data: BaseRequest = await self.receive_message()
44 | await self.handle_message(data)
45 | except WebSocketDisconnect as ex:
46 | logger.info("Websocket connection closed, e: %s", str(ex))
47 | await self.disconnect()
48 | except Exception as ex:
49 | logger.info("Handling message error: %s", str(ex))
50 | await self.disconnect()
51 |
--------------------------------------------------------------------------------
/datura/datura/errors/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/datura/datura/errors/__init__.py
--------------------------------------------------------------------------------
/datura/datura/errors/protocol.py:
--------------------------------------------------------------------------------
1 | from datura.requests.base import BaseRequest
2 |
3 |
4 | class UnsupportedMessageReceived(Exception):
5 | def __init__(self, msg: BaseRequest):
6 | self.msg = msg
7 |
8 | def __str__(self):
9 | return f"{type(self).__name__}: {self.msg.json()}"
10 |
11 | __repr__ = __str__
12 |
--------------------------------------------------------------------------------
/datura/datura/requests/base.py:
--------------------------------------------------------------------------------
1 | import abc
2 | import enum
3 | import json
4 |
5 | import pydantic
6 |
7 |
8 | class ValidationError(Exception):
9 | def __init__(self, msg):
10 | self.msg = msg
11 |
12 | @classmethod
13 | def from_json_decode_error(cls, exc: json.JSONDecodeError):
14 | return cls(exc.args[0])
15 |
16 | @classmethod
17 | def from_pydantic_validation_error(cls, exc: pydantic.ValidationError):
18 | return cls(json.dumps(exc.json()))
19 |
20 | def __repr__(self):
21 | return f"{type(self).__name__}({self.msg})"
22 |
23 |
24 | def all_subclasses(cls: type):
25 | for subcls in cls.__subclasses__():
26 | yield subcls
27 | yield from all_subclasses(subcls)
28 |
29 |
30 | base_class_to_request_type_mapping = {}
31 |
32 |
33 | class BaseRequest(pydantic.BaseModel, abc.ABC):
34 | message_type: enum.Enum
35 |
36 | @classmethod
37 | def type_to_model(cls, type_: enum.Enum) -> type["BaseRequest"]:
38 | mapping = base_class_to_request_type_mapping.get(cls)
39 | if not mapping:
40 | mapping = {}
41 | for klass in all_subclasses(cls):
42 | if not (message_type := klass.__fields__.get("message_type")):
43 | continue
44 | if not message_type.default:
45 | continue
46 | mapping[message_type.default] = klass
47 | base_class_to_request_type_mapping[cls] = mapping
48 |
49 | return mapping[type_]
50 |
51 | @classmethod
52 | def parse(cls, str_: str):
53 | try:
54 | json_ = json.loads(str_)
55 | except json.JSONDecodeError as exc:
56 | raise ValidationError.from_json_decode_error(exc)
57 |
58 | try:
59 | base_model_object = cls.parse_obj(json_)
60 | except pydantic.ValidationError as exc:
61 | raise ValidationError.from_pydantic_validation_error(exc)
62 |
63 | target_model = cls.type_to_model(base_model_object.message_type)
64 |
65 | try:
66 | return target_model.parse_obj(json_)
67 | except pydantic.ValidationError as exc:
68 | raise ValidationError.from_pydantic_validation_error(exc)
69 |
--------------------------------------------------------------------------------
/datura/datura/requests/miner_requests.py:
--------------------------------------------------------------------------------
1 | import enum
2 |
3 | import pydantic
4 | from datura.requests.base import BaseRequest
5 |
6 |
7 | class RequestType(enum.Enum):
8 | GenericError = "GenericError"
9 | AcceptJobRequest = "AcceptJobRequest"
10 | DeclineJobRequest = "DeclineJobRequest"
11 | AcceptSSHKeyRequest = "AcceptSSHKeyRequest"
12 | FailedRequest = "FailedRequest"
13 | UnAuthorizedRequest = "UnAuthorizedRequest"
14 | SSHKeyRemoved = "SSHKeyRemoved"
15 | PodLogsResponse = "PodLogsResponse"
16 |
17 |
18 | class Executor(pydantic.BaseModel):
19 | uuid: str
20 | address: str
21 | port: int
22 |
23 |
24 | class BaseMinerRequest(BaseRequest):
25 | message_type: RequestType
26 |
27 |
28 | class GenericError(BaseMinerRequest):
29 | message_type: RequestType = RequestType.GenericError
30 | details: str | None = None
31 |
32 |
33 | class AcceptJobRequest(BaseMinerRequest):
34 | message_type: RequestType = RequestType.AcceptJobRequest
35 | executors: list[Executor]
36 |
37 |
38 | class DeclineJobRequest(BaseMinerRequest):
39 | message_type: RequestType = RequestType.DeclineJobRequest
40 |
41 |
42 | class ExecutorSSHInfo(pydantic.BaseModel):
43 | uuid: str
44 | address: str
45 | port: int
46 | ssh_username: str
47 | ssh_port: int
48 | python_path: str
49 | root_dir: str
50 | port_range: str | None = None
51 | port_mappings: str | None = None
52 | price: float | None = None
53 |
54 |
55 | class AcceptSSHKeyRequest(BaseMinerRequest):
56 | message_type: RequestType = RequestType.AcceptSSHKeyRequest
57 | executors: list[ExecutorSSHInfo]
58 |
59 |
60 | class SSHKeyRemoved(BaseMinerRequest):
61 | message_type: RequestType = RequestType.SSHKeyRemoved
62 |
63 |
64 | class FailedRequest(BaseMinerRequest):
65 | message_type: RequestType = RequestType.FailedRequest
66 | details: str | None = None
67 |
68 |
69 | class UnAuthorizedRequest(FailedRequest):
70 | message_type: RequestType = RequestType.UnAuthorizedRequest
71 |
72 |
73 | class PodLog(pydantic.BaseModel):
74 | uuid: str
75 | container_name: str | None = None
76 | container_id: str | None = None
77 | event: str | None = None
78 | exit_code: int | None = None
79 | reason: str | None = None
80 | error: str | None = None
81 | created_at: str
82 |
83 |
84 | class PodLogsResponse(BaseMinerRequest):
85 | message_type: RequestType = RequestType.PodLogsResponse
86 | logs: list[PodLog] = []
87 |
--------------------------------------------------------------------------------
/datura/datura/requests/validator_requests.py:
--------------------------------------------------------------------------------
1 | import enum
2 | import json
3 | from typing import Optional
4 |
5 | import pydantic
6 | from datura.requests.base import BaseRequest
7 |
8 |
9 | class RequestType(enum.Enum):
10 | AuthenticateRequest = "AuthenticateRequest"
11 | SSHPubKeySubmitRequest = "SSHPubKeySubmitRequest"
12 | SSHPubKeyRemoveRequest = "SSHPubKeyRemoveRequest"
13 | GetPodLogsRequest = "GetPodLogsRequest"
14 |
15 |
16 | class BaseValidatorRequest(BaseRequest):
17 | message_type: RequestType
18 |
19 |
20 | class AuthenticationPayload(pydantic.BaseModel):
21 | validator_hotkey: str
22 | miner_hotkey: str
23 | timestamp: int
24 |
25 | def blob_for_signing(self):
26 | instance_dict = self.model_dump()
27 | return json.dumps(instance_dict, sort_keys=True)
28 |
29 |
30 | class AuthenticateRequest(BaseValidatorRequest):
31 | message_type: RequestType = RequestType.AuthenticateRequest
32 | payload: AuthenticationPayload
33 | signature: str
34 |
35 | def blob_for_signing(self):
36 | return self.payload.blob_for_signing()
37 |
38 |
39 | class SSHPubKeySubmitRequest(BaseValidatorRequest):
40 | message_type: RequestType = RequestType.SSHPubKeySubmitRequest
41 | public_key: bytes
42 | executor_id: Optional[str] = None
43 |
44 |
45 | class SSHPubKeyRemoveRequest(BaseValidatorRequest):
46 | message_type: RequestType = RequestType.SSHPubKeyRemoveRequest
47 | public_key: bytes
48 | executor_id: Optional[str] = None
49 |
50 |
51 | class GetPodLogsRequest(BaseValidatorRequest):
52 | message_type: RequestType = RequestType.GetPodLogsRequest
53 | executor_id: str
54 | container_name: str
55 |
--------------------------------------------------------------------------------
/datura/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "datura"
3 | version = "0.1.2"
4 | description = "Compute subnet shared library"
5 | authors = [
6 | {name = "waris", email = "waris0609@outlook.com"},
7 | ]
8 | dependencies = []
9 | requires-python = "==3.11.*"
10 | readme = "README.md"
11 | license = {text = "MIT"}
12 |
13 | [build-system]
14 | requires = ["pdm-backend"]
15 | build-backend = "pdm.backend"
16 |
17 |
18 | [tool.pdm]
19 | distribution = true
20 |
--------------------------------------------------------------------------------
/datura/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/datura/tests/__init__.py
--------------------------------------------------------------------------------
/neurons/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/__init__.py
--------------------------------------------------------------------------------
/neurons/executor/.dockerignore:
--------------------------------------------------------------------------------
1 | .pdm.toml
2 | .pdm-python
3 | .pdm-build/
4 | __pycache__/
5 | .venv
6 | docker_build.sh
7 | docker_publish.sh
8 | docker-compose.yml
9 | .env*
10 | sysbox-ce_*
--------------------------------------------------------------------------------
/neurons/executor/.env.template:
--------------------------------------------------------------------------------
1 | INTERNAL_PORT=8001 # interal port of docker
2 | EXTERNAL_PORT=8001 # external port of docker
3 |
4 | SSH_PORT=2200 # external ssh port of docker map into 22
5 | SSH_PUBLIC_PORT=2200 # Optional. in case you are using proxy and public port is different from internal port in your server
6 |
7 | # NOTE: please use either RENTING_PORT_RANGE or RENTING_PORT_MAPPINGS, both are not allowed
8 | # Note: If you are not using proxy and all ports are available publicly,
9 | # then you don't have to set RENTING_PORT_RANGE and RENTING_PORT_MAPPINGS
10 |
11 | # (optional) If your internal port and external port are THE SAME
12 | # configure available ports for renting.
13 | # define the ports comma separated or range with dash "-"
14 | # Please define at least 2 ports
15 | # example
16 | # RENTING_PORT_RANGE="40000-65535"
17 | # RENTING_PORT_RANGE="9001,9002,9003"
18 |
19 | # (optional) If your internal port and external port are NOT THE SAME
20 | # add an array of [internal_port, external_port] mappings
21 | # example: if internal port 46681 is mapped to 56681 external port
22 | # and internal port 46682 is mapped to 56682 external port, then
23 | # RENTING_PORT_MAPPINGS="[[46681, 56681], [46682, 56682]]"
24 |
25 | MINER_HOTKEY_SS58_ADDRESS=
26 |
27 | RENTING_PRICE=2.5 # renting price per hour in USD
--------------------------------------------------------------------------------
/neurons/executor/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 | !libdmcompverify.so
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm-project.org/#use-with-ide
110 | .pdm.toml
111 | .pdm-python
112 | .pdm-build/
113 |
114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115 | __pypackages__/
116 |
117 | # Celery stuff
118 | celerybeat-schedule
119 | celerybeat.pid
120 |
121 | # SageMath parsed files
122 | *.sage.py
123 |
124 | # Environments
125 | .env
126 | .venv
127 | env/
128 | venv/
129 | ENV/
130 | env.bak/
131 | venv.bak/
132 |
133 | # Spyder project settings
134 | .spyderproject
135 | .spyproject
136 |
137 | # Rope project settings
138 | .ropeproject
139 |
140 | # mkdocs documentation
141 | /site
142 |
143 | # mypy
144 | .mypy_cache/
145 | .dmypy.json
146 | dmypy.json
147 |
148 | # Pyre type checker
149 | .pyre/
150 |
151 | # pytype static type analyzer
152 | .pytype/
153 |
154 | # Cython debug symbols
155 | cython_debug/
156 |
157 | # PyCharm
158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160 | # and can be added to the global gitignore or merged into this file. For a more nuclear
161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162 | #.idea/
163 |
164 | temp/
--------------------------------------------------------------------------------
/neurons/executor/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG BASE_IMAGE=daturaai/ubuntu:24.04-py3.11
2 |
3 |
4 | FROM $BASE_IMAGE AS base-image
5 | LABEL builder=true
6 |
7 | WORKDIR /root/app/
8 |
9 | RUN pip3 install -U pdm
10 | ENV PDM_CHECK_UPDATE=false
11 |
12 | RUN apt-get update
13 | COPY pyproject.toml pdm.lock README.md ./
14 | COPY --from=datura . /datura
15 |
16 | RUN --mount=type=cache,target=/tmp/pdm_cache \
17 | pdm config cache_dir /tmp/pdm_cache && \
18 | pdm config python.use_venv False && \
19 | PDM_BUILD_SCM_VERSION=0 pdm sync --prod --group :all
20 | RUN mkdir -p /opt/ && mv __pypackages__/3.11/ /opt/pypackages/
21 |
22 | FROM $BASE_IMAGE AS secondary-image
23 | LABEL builder=false
24 |
25 | RUN apt-get update -y \
26 | && apt-get upgrade -y \
27 | && apt-get install -y speedtest-cli \
28 | && rm -rf /var/lib/apt/lists/*
29 |
30 | WORKDIR /root/app/
31 |
32 | COPY . .
33 | COPY --from=base-image /opt/pypackages/ /opt/pypackages/
34 | RUN mv /root/app/libdmcompverify.so /usr/lib/
35 |
36 | # cli fix
37 | RUN sed -i '1s|.*|#!/usr/bin/env python3|' /opt/pypackages/bin/alembic
38 | RUN sed -i '1s|.*|#!/usr/bin/env python3|' /opt/pypackages/bin/speedcheck
39 | RUN sed -i '1s|.*|#!/usr/bin/env python3|' /opt/pypackages/bin/netmeasure
40 | # Remove existing SSH host keys
41 | RUN rm -f /etc/ssh/ssh_host_*
42 |
43 | ENV ENV=prod
44 | ENV PYTHONUNBUFFERED=1
45 | ENV PATH=/opt/pypackages/bin:$PATH
46 | ENV PYTHONPATH=/opt/pypackages/lib:/root/app
47 |
48 | RUN echo "export PYTHONPATH=$PYTHONPATH" >> /etc/profile
49 | RUN echo "export PYTHONPATH=$PYTHONPATH" >> ~/.bashrc
50 | RUN echo "export PYTHONPATH=$PYTHONPATH" >> ~/.bash_profile
51 |
52 | RUN mkdir -p /etc/docker
53 | RUN mkdir -p /etc/nvidia-container-runtime
54 | RUN mkdir -p /root/.ssh
55 |
56 | CMD ["bash", "run.sh"]
--------------------------------------------------------------------------------
/neurons/executor/Dockerfile.runner:
--------------------------------------------------------------------------------
1 | FROM docker:26-cli
2 | WORKDIR /root/executor
3 |
4 | ARG targetFile
5 | COPY ${targetFile} docker-compose.yml
6 | COPY entrypoint.sh /entrypoint.sh
7 | COPY version.txt .
8 |
9 | RUN chmod u+x /entrypoint.sh
10 |
11 | ENTRYPOINT ["/entrypoint.sh"]
12 |
--------------------------------------------------------------------------------
/neurons/executor/alembic.ini:
--------------------------------------------------------------------------------
1 | # A generic, single database configuration.
2 |
3 | [alembic]
4 | # path to migration scripts
5 | # Use forward slashes (/) also on windows to provide an os agnostic path
6 | script_location = migrations
7 |
8 | # template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
9 | # Uncomment the line below if you want the files to be prepended with date and time
10 | # see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
11 | # for all available tokens
12 | # file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
13 |
14 | # sys.path path, will be prepended to sys.path if present.
15 | # defaults to the current working directory.
16 | prepend_sys_path = src
17 |
18 | # timezone to use when rendering the date within the migration file
19 | # as well as the filename.
20 | # If specified, requires the python>=3.9 or backports.zoneinfo library.
21 | # Any required deps can installed by adding `alembic[tz]` to the pip requirements
22 | # string value is passed to ZoneInfo()
23 | # leave blank for localtime
24 | # timezone =
25 |
26 | # max length of characters to apply to the "slug" field
27 | # truncate_slug_length = 40
28 |
29 | # set to 'true' to run the environment during
30 | # the 'revision' command, regardless of autogenerate
31 | # revision_environment = false
32 |
33 | # set to 'true' to allow .pyc and .pyo files without
34 | # a source .py file to be detected as revisions in the
35 | # versions/ directory
36 | # sourceless = false
37 |
38 | # version location specification; This defaults
39 | # to migrations/versions. When using multiple version
40 | # directories, initial revisions must be specified with --version-path.
41 | # The path separator used here should be the separator specified by "version_path_separator" below.
42 | # version_locations = %(here)s/bar:%(here)s/bat:migrations/versions
43 |
44 | # version path separator; As mentioned above, this is the character used to split
45 | # version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
46 | # If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
47 | # Valid values for version_path_separator are:
48 | #
49 | # version_path_separator = :
50 | # version_path_separator = ;
51 | # version_path_separator = space
52 | # version_path_separator = newline
53 | version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
54 |
55 | # set to 'true' to search source files recursively
56 | # in each "version_locations" directory
57 | # new in Alembic version 1.10
58 | # recursive_version_locations = false
59 |
60 | # the output encoding used when revision files
61 | # are written from script.py.mako
62 | # output_encoding = utf-8
63 |
64 | sqlalchemy.url = driver://user:pass@localhost/dbname
65 |
66 |
67 | [post_write_hooks]
68 | # post_write_hooks defines scripts or Python functions that are run
69 | # on newly generated revision scripts. See the documentation for further
70 | # detail and examples
71 |
72 | # format using "black" - use the console_scripts runner, against the "black" entrypoint
73 | # hooks = black
74 | # black.type = console_scripts
75 | # black.entrypoint = black
76 | # black.options = -l 79 REVISION_SCRIPT_FILENAME
77 |
78 | # lint with attempts to fix using "ruff" - use the exec runner, execute a binary
79 | # hooks = ruff
80 | # ruff.type = exec
81 | # ruff.executable = %(here)s/.venv/bin/ruff
82 | # ruff.options = --fix REVISION_SCRIPT_FILENAME
83 |
84 | # Logging configuration
85 | [loggers]
86 | keys = root,sqlalchemy,alembic
87 |
88 | [handlers]
89 | keys = console
90 |
91 | [formatters]
92 | keys = generic
93 |
94 | [logger_root]
95 | level = WARNING
96 | handlers = console
97 | qualname =
98 |
99 | [logger_sqlalchemy]
100 | level = WARNING
101 | handlers =
102 | qualname = sqlalchemy.engine
103 |
104 | [logger_alembic]
105 | level = INFO
106 | handlers =
107 | qualname = alembic
108 |
109 | [handler_console]
110 | class = StreamHandler
111 | args = (sys.stderr,)
112 | level = NOTSET
113 | formatter = generic
114 |
115 | [formatter_generic]
116 | format = %(levelname)-5.5s [%(name)s] %(message)s
117 | datefmt = %H:%M:%S
118 |
--------------------------------------------------------------------------------
/neurons/executor/config.toml:
--------------------------------------------------------------------------------
1 | #accept-nvidia-visible-devices-as-volume-mounts = false
2 | #accept-nvidia-visible-devices-envvar-when-unprivileged = true
3 | disable-require = false
4 | supported-driver-capabilities = "compat32,compute,display,graphics,ngx,utility,video"
5 | #swarm-resource = "DOCKER_RESOURCE_GPU"
6 |
7 | [nvidia-container-cli]
8 | #debug = "/var/log/nvidia-container-toolkit.log"
9 | environment = []
10 | #ldcache = "/etc/ld.so.cache"
11 | ldconfig = "@/sbin/ldconfig.real"
12 | load-kmods = true
13 | no-cgroups = false
14 | #path = "/usr/bin/nvidia-container-cli"
15 | #root = "/run/nvidia/driver"
16 | #user = "root:video"
17 |
18 | [nvidia-container-runtime]
19 | #debug = "/var/log/nvidia-container-runtime.log"
20 | log-level = "info"
21 | mode = "auto"
22 | runtimes = ["docker-runc", "runc", "crun"]
23 |
24 | [nvidia-container-runtime.modes]
25 |
26 | [nvidia-container-runtime.modes.cdi]
27 | annotation-prefixes = ["cdi.k8s.io/"]
28 | default-kind = "nvidia.com/gpu"
29 | spec-dirs = ["/etc/cdi", "/var/run/cdi"]
30 |
31 | [nvidia-container-runtime.modes.csv]
32 | mount-spec-path = "/etc/nvidia-container-runtime/host-files-for-container.d"
33 |
34 | [nvidia-container-runtime-hook]
35 | path = "nvidia-container-runtime-hook"
36 | skip-mode-detection = false
37 |
38 | [nvidia-ctk]
39 | path = "nvidia-ctk"
--------------------------------------------------------------------------------
/neurons/executor/daemon.json:
--------------------------------------------------------------------------------
1 | {
2 | "runtimes": {
3 | "nvidia": {
4 | "path": "nvidia-container-runtime",
5 | "runtimeArgs": []
6 | },
7 | "sysbox-runc": {
8 | "path": "/usr/bin/sysbox-runc"
9 | }
10 | },
11 | "exec-opts": [
12 | "native.cgroupdriver=cgroupfs"
13 | ],
14 | "bip": "172.24.0.1/16",
15 | "default-address-pools": [
16 | {
17 | "base": "172.31.0.0/16",
18 | "size": 24
19 | }
20 | ]
21 | }
--------------------------------------------------------------------------------
/neurons/executor/docker-compose.app.dev.yml:
--------------------------------------------------------------------------------
1 | services:
2 | db:
3 | image: postgres:14.0-alpine
4 | healthcheck:
5 | test: pg_isready -U postgres || exit 1
6 | environment:
7 | - POSTGRES_USER=postgres
8 | - POSTGRES_PASSWORD=password
9 | - POSTGRES_DB=compute-subnet-db
10 | ports:
11 | - 8432:5432
12 | volumes:
13 | - db_data:/var/lib/postgresql/data
14 |
15 | executor:
16 | image: daturaai/compute-subnet-executor:dev
17 | env_file: ./.env
18 | restart: always
19 | ports:
20 | - ${EXTERNAL_PORT}:${INTERNAL_PORT}
21 | - ${SSH_PORT}:22
22 | expose:
23 | - ${INTERNAL_PORT}
24 | volumes:
25 | - /var/run/docker.sock:/var/run/docker.sock
26 | - /etc/docker/daemon.json:/etc/docker/daemon.json
27 | - /etc/nvidia-container-runtime/config.toml:/etc/nvidia-container-runtime/config.toml
28 | pid: host
29 | environment:
30 | - DB_URI=postgresql://postgres:password@db:5432/compute-subnet-db
31 | depends_on:
32 | - db
33 | labels:
34 | autoheal-app: true
35 | healthcheck:
36 | test: [ "CMD-SHELL", "nvidia-smi || exit 1" ]
37 | interval: 2m
38 | timeout: 10s
39 | retries: 3
40 | deploy:
41 | resources:
42 | reservations:
43 | devices:
44 | - driver: nvidia
45 | count: all
46 | capabilities: [ gpu ]
47 |
48 | monitor:
49 | image: daturaai/compute-subnet-executor:latest
50 | env_file: ./.env
51 | restart: always
52 | volumes:
53 | - /var/run/docker.sock:/var/run/docker.sock
54 | - /dev/kmsg:/dev/kmsg:ro
55 | pid: host
56 | command: python src/monitor.py
57 | environment:
58 | - DB_URI=postgresql://postgres:password@db:5432/compute-subnet-db
59 | depends_on:
60 | - executor
61 | deploy:
62 | resources:
63 | reservations:
64 | devices:
65 | - driver: nvidia
66 | count: all
67 | capabilities: [ gpu ]
68 |
69 | autoheal:
70 | restart: always
71 | image: willfarrell/autoheal
72 | environment:
73 | - AUTOHEAL_CONTAINER_LABEL=all
74 | volumes:
75 | - /var/run/docker.sock:/var/run/docker.sock
76 |
77 | volumes:
78 | db_data:
79 |
--------------------------------------------------------------------------------
/neurons/executor/docker-compose.app.yml:
--------------------------------------------------------------------------------
1 | services:
2 | db:
3 | image: postgres:14.0-alpine
4 | healthcheck:
5 | test: pg_isready -U postgres || exit 1
6 | environment:
7 | - POSTGRES_USER=postgres
8 | - POSTGRES_PASSWORD=password
9 | - POSTGRES_DB=compute-subnet-db
10 | ports:
11 | - 8432:5432
12 | volumes:
13 | - db_data:/var/lib/postgresql/data
14 |
15 | executor:
16 | image: daturaai/compute-subnet-executor:latest
17 | env_file: ./.env
18 | restart: always
19 | ports:
20 | - ${EXTERNAL_PORT}:${INTERNAL_PORT}
21 | - ${SSH_PORT}:22
22 | expose:
23 | - ${INTERNAL_PORT}
24 | volumes:
25 | - /var/run/docker.sock:/var/run/docker.sock
26 | - /etc/docker/daemon.json:/etc/docker/daemon.json
27 | - /etc/nvidia-container-runtime/config.toml:/etc/nvidia-container-runtime/config.toml
28 | pid: host
29 | environment:
30 | - DB_URI=postgresql://postgres:password@db:5432/compute-subnet-db
31 | depends_on:
32 | - db
33 | labels:
34 | autoheal-app: true
35 | healthcheck:
36 | test: [ "CMD-SHELL", "nvidia-smi || exit 1" ]
37 | interval: 2m
38 | timeout: 10s
39 | retries: 3
40 | deploy:
41 | resources:
42 | reservations:
43 | devices:
44 | - driver: nvidia
45 | count: all
46 | capabilities: [ gpu ]
47 |
48 | monitor:
49 | image: daturaai/compute-subnet-executor:latest
50 | env_file: ./.env
51 | restart: always
52 | volumes:
53 | - /var/run/docker.sock:/var/run/docker.sock
54 | - /dev/kmsg:/dev/kmsg:ro
55 | pid: host
56 | command: python src/monitor.py
57 | environment:
58 | - DB_URI=postgresql://postgres:password@db:5432/compute-subnet-db
59 | depends_on:
60 | - executor
61 | cap_add:
62 | - SYSLOG
63 | deploy:
64 | resources:
65 | reservations:
66 | devices:
67 | - driver: nvidia
68 | count: all
69 | capabilities: [ gpu ]
70 |
71 | autoheal:
72 | restart: always
73 | image: willfarrell/autoheal
74 | environment:
75 | - AUTOHEAL_CONTAINER_LABEL=all
76 | volumes:
77 | - /var/run/docker.sock:/var/run/docker.sock
78 |
79 | volumes:
80 | db_data:
81 |
--------------------------------------------------------------------------------
/neurons/executor/docker-compose.dev.yml:
--------------------------------------------------------------------------------
1 | version: '3.7'
2 |
3 | services:
4 | executor-runner:
5 | image: daturaai/compute-subnet-executor-runner:dev
6 | restart: unless-stopped
7 | volumes:
8 | - /var/run/docker.sock:/var/run/docker.sock
9 | - "$HOME/.bittensor/wallets:/root/.bittensor/wallets"
10 | - ./.env:/root/executor/.env
11 | labels:
12 | - "com.centurylinklabs.watchtower.enable=true"
13 |
14 | watchtower:
15 | image: containrrr/watchtower:1.7.1
16 | restart: unless-stopped
17 | volumes:
18 | - /var/run/docker.sock:/var/run/docker.sock
19 | command: --interval 60 --cleanup --label-enable
20 |
--------------------------------------------------------------------------------
/neurons/executor/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.7'
2 |
3 | services:
4 | executor-runner:
5 | image: daturaai/compute-subnet-executor-runner:latest
6 | restart: unless-stopped
7 | volumes:
8 | - /var/run/docker.sock:/var/run/docker.sock
9 | - "$HOME/.bittensor/wallets:/root/.bittensor/wallets"
10 | - ./.env:/root/executor/.env
11 | labels:
12 | - "com.centurylinklabs.watchtower.enable=true"
13 |
14 | watchtower:
15 | image: containrrr/watchtower:1.7.1
16 | restart: unless-stopped
17 | volumes:
18 | - /var/run/docker.sock:/var/run/docker.sock
19 | command: --interval 60 --cleanup --label-enable
20 |
--------------------------------------------------------------------------------
/neurons/executor/docker_build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eux -o pipefail
3 |
4 | IMAGE_NAME="daturaai/compute-subnet-executor:$TAG"
5 |
6 | docker build --build-context datura=../../datura -t $IMAGE_NAME .
--------------------------------------------------------------------------------
/neurons/executor/docker_publish.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eux -o pipefail
3 |
4 | source ./docker_build.sh
5 |
6 | echo "$DOCKERHUB_PAT" | docker login -u "$DOCKERHUB_USERNAME" --password-stdin
7 | docker push "$IMAGE_NAME"
--------------------------------------------------------------------------------
/neurons/executor/docker_runner_build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eux -o pipefail
3 |
4 | IMAGE_NAME="daturaai/compute-subnet-executor-runner:$TAG"
5 |
6 | docker build --file Dockerfile.runner --build-arg targetFile=$TARGET_FILE -t $IMAGE_NAME .
--------------------------------------------------------------------------------
/neurons/executor/docker_runner_publish.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eux -o pipefail
3 |
4 | source ./docker_runner_build.sh
5 |
6 | echo "$DOCKERHUB_PAT" | docker login -u "$DOCKERHUB_USERNAME" --password-stdin
7 | docker push "$IMAGE_NAME"
--------------------------------------------------------------------------------
/neurons/executor/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -eu
3 |
4 | docker compose up --pull always --detach --wait --force-recreate
5 |
6 | # Clean docker images
7 | docker image prune -f
8 |
9 | while true
10 | do
11 | docker compose logs -f
12 | echo 'All containers died'
13 | sleep 10
14 | done
15 |
--------------------------------------------------------------------------------
/neurons/executor/libdmcompverify.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/executor/libdmcompverify.so
--------------------------------------------------------------------------------
/neurons/executor/migrations/README:
--------------------------------------------------------------------------------
1 | Generic single-database configuration.
--------------------------------------------------------------------------------
/neurons/executor/migrations/env.py:
--------------------------------------------------------------------------------
1 | from logging.config import fileConfig
2 |
3 | from sqlalchemy import engine_from_config
4 | from sqlalchemy import pool
5 |
6 | from alembic import context
7 | from sqlmodel import SQLModel
8 | from core.config import settings
9 |
10 | from models import *
11 |
12 | # this is the Alembic Config object, which provides
13 | # access to the values within the .ini file in use.
14 | config = context.config
15 |
16 | # Interpret the config file for Python logging.
17 | # This line sets up loggers basically.
18 | if config.config_file_name is not None:
19 | fileConfig(config.config_file_name)
20 |
21 | config.set_main_option("sqlalchemy.url", settings.DB_URI)
22 |
23 | # add your model's MetaData object here
24 | # for 'autogenerate' support
25 | # from myapp import mymodel
26 | # target_metadata = mymodel.Base.metadata
27 | target_metadata = SQLModel.metadata
28 |
29 | # other values from the config, defined by the needs of env.py,
30 | # can be acquired:
31 | # my_important_option = config.get_main_option("my_important_option")
32 | # ... etc.
33 |
34 |
35 | def run_migrations_offline() -> None:
36 | """Run migrations in 'offline' mode.
37 |
38 | This configures the context with just a URL
39 | and not an Engine, though an Engine is acceptable
40 | here as well. By skipping the Engine creation
41 | we don't even need a DBAPI to be available.
42 |
43 | Calls to context.execute() here emit the given string to the
44 | script output.
45 |
46 | """
47 | url = config.get_main_option("sqlalchemy.url")
48 | context.configure(
49 | url=url,
50 | target_metadata=target_metadata,
51 | literal_binds=True,
52 | dialect_opts={"paramstyle": "named"},
53 | )
54 |
55 | with context.begin_transaction():
56 | context.run_migrations()
57 |
58 |
59 | def run_migrations_online() -> None:
60 | """Run migrations in 'online' mode.
61 |
62 | In this scenario we need to create an Engine
63 | and associate a connection with the context.
64 |
65 | """
66 | connectable = engine_from_config(
67 | config.get_section(config.config_ini_section, {}),
68 | prefix="sqlalchemy.",
69 | poolclass=pool.NullPool,
70 | )
71 |
72 | with connectable.connect() as connection:
73 | context.configure(
74 | connection=connection, target_metadata=target_metadata
75 | )
76 |
77 | with context.begin_transaction():
78 | context.run_migrations()
79 |
80 |
81 | if context.is_offline_mode():
82 | run_migrations_offline()
83 | else:
84 | run_migrations_online()
85 |
--------------------------------------------------------------------------------
/neurons/executor/migrations/script.py.mako:
--------------------------------------------------------------------------------
1 | """${message}
2 |
3 | Revision ID: ${up_revision}
4 | Revises: ${down_revision | comma,n}
5 | Create Date: ${create_date}
6 |
7 | """
8 | from typing import Sequence, Union
9 |
10 | from alembic import op
11 | import sqlalchemy as sa
12 | import sqlmodel
13 | import sqlmodel.sql.sqltypes
14 | ${imports if imports else ""}
15 |
16 | # revision identifiers, used by Alembic.
17 | revision: str = ${repr(up_revision)}
18 | down_revision: Union[str, None] = ${repr(down_revision)}
19 | branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
20 | depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
21 |
22 |
23 | def upgrade() -> None:
24 | ${upgrades if upgrades else "pass"}
25 |
26 |
27 | def downgrade() -> None:
28 | ${downgrades if downgrades else "pass"}
29 |
--------------------------------------------------------------------------------
/neurons/executor/migrations/versions/8b04ab8f89f9_podlog.py:
--------------------------------------------------------------------------------
1 | """PodLog
2 |
3 | Revision ID: 8b04ab8f89f9
4 | Revises:
5 | Create Date: 2025-05-07 17:50:05.144051
6 |
7 | """
8 | from typing import Sequence, Union
9 |
10 | from alembic import op
11 | import sqlalchemy as sa
12 | import sqlmodel
13 | import sqlmodel.sql.sqltypes
14 |
15 |
16 | # revision identifiers, used by Alembic.
17 | revision: str = '8b04ab8f89f9'
18 | down_revision: Union[str, None] = None
19 | branch_labels: Union[str, Sequence[str], None] = None
20 | depends_on: Union[str, Sequence[str], None] = None
21 |
22 |
23 | def upgrade() -> None:
24 | # ### commands auto generated by Alembic - please adjust! ###
25 | op.create_table('podlog',
26 | sa.Column('uuid', sa.Uuid(), nullable=False),
27 | sa.Column('container_name', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
28 | sa.Column('container_id', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
29 | sa.Column('event', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
30 | sa.Column('exit_code', sa.Integer(), nullable=True),
31 | sa.Column('reason', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
32 | sa.Column('error', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
33 | sa.Column('created_at', sa.DateTime(), nullable=False),
34 | sa.PrimaryKeyConstraint('uuid')
35 | )
36 | # ### end Alembic commands ###
37 |
38 |
39 | def downgrade() -> None:
40 | # ### commands auto generated by Alembic - please adjust! ###
41 | op.drop_table('podlog')
42 | # ### end Alembic commands ###
43 |
--------------------------------------------------------------------------------
/neurons/executor/nvidia_docker_sysbox_setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | sudo apt-get update
5 | sudo apt-get install -y jq nvidia-container-toolkit
6 |
7 | # Copy configuration files
8 | SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
9 |
10 | # Copy daemon.json if it exists
11 | if [ -f "$SCRIPT_DIR/daemon.json" ]; then
12 | sudo mkdir -p /etc/docker
13 | sudo cp "$SCRIPT_DIR/daemon.json" /etc/docker/daemon.json
14 | fi
15 |
16 | # Copy config.toml if it exists
17 | if [ -f "$SCRIPT_DIR/config.toml" ]; then
18 | sudo mkdir -p /etc/nvidia-container-runtime
19 | sudo cp "$SCRIPT_DIR/config.toml" /etc/nvidia-container-runtime/config.toml
20 | fi
21 |
22 |
23 | # Install Sysbox v0.6.6
24 | sudo apt-get install -y ./sysbox-ce_0.6.6-0.linux_amd64.deb
25 |
--------------------------------------------------------------------------------
/neurons/executor/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "executor"
3 | version = "3.0.0"
4 | description = "Compute subnet executor"
5 | authors = [
6 | {name = "waris", email = "waris0609@outlook.com"},
7 | ]
8 | dependencies = [
9 | "aiohappyeyeballs==2.4.6",
10 | "aiohttp==3.10.11",
11 | "aiosignal==1.3.2",
12 | "alembic==1.14.0",
13 | "annotated-types==0.7.0",
14 | "anyio==4.8.0",
15 | "async-property==0.2.2",
16 | "async-substrate-interface==1.0.0",
17 | "asyncstdlib==3.13.0",
18 | "attrs==25.1.0",
19 | "backoff==2.2.1",
20 | "base58==2.1.1",
21 | "bittensor==9.0.0",
22 | "bittensor-cli==9.0.0",
23 | "bittensor-commit-reveal==0.2.0",
24 | "bittensor-wallet==3.0.3",
25 | "bt-decode==0.5.0a2",
26 | "certifi==2025.1.31",
27 | "cffi==1.17.1",
28 | "charset-normalizer==3.4.1",
29 | "click==8.1.8",
30 | "colorama==0.4.6",
31 | "cryptography==43.0.3",
32 | "cytoolz==1.0.1",
33 | "datura @ file:///${PROJECT_ROOT}/../../datura",
34 | "decorator==5.1.1",
35 | "eth-hash==0.7.1",
36 | "eth-typing==5.1.0",
37 | "eth-utils==2.2.2",
38 | "fastapi==0.110.3",
39 | "frozenlist==1.5.0",
40 | "fuzzywuzzy==0.18.0",
41 | "gitdb==4.0.12",
42 | "GitPython==3.1.44",
43 | "h11==0.14.0",
44 | "idna==3.10",
45 | "iniconfig==2.0.0",
46 | "Jinja2==3.1.5",
47 | "Levenshtein==0.26.1",
48 | "markdown-it-py==3.0.0",
49 | "MarkupSafe==3.0.2",
50 | "mdurl==0.1.2",
51 | "more-itertools==10.6.0",
52 | "msgpack==1.1.0",
53 | "msgpack-numpy-opentensor==0.5.0",
54 | "multidict==6.1.0",
55 | "munch==2.5.0",
56 | "narwhals==1.26.0",
57 | "nest-asyncio==1.6.0",
58 | "netaddr==1.3.0",
59 | "numpy==2.0.2",
60 | "nvidia-ml-py==12.570.86",
61 | "packaging==24.2",
62 | "password-strength==0.0.3.post2",
63 | "plotille==5.0.0",
64 | "plotly==6.0.0",
65 | "pluggy==1.5.0",
66 | "propcache==0.2.1",
67 | "psycopg2-binary==2.9.10",
68 | "py==1.11.0",
69 | "py-bip39-bindings==0.1.11",
70 | "pycparser==2.22",
71 | "pycryptodome==3.21.0",
72 | "pydantic==2.10.6",
73 | "pydantic-settings==2.7.1",
74 | "pydantic_core==2.27.2",
75 | "Pygments==2.19.1",
76 | "pytest==8.3.4",
77 | "python-dotenv==1.0.1",
78 | "python-Levenshtein==0.26.1",
79 | "python-statemachine==2.5.0",
80 | "pywry==0.6.2",
81 | "PyYAML==6.0.2",
82 | "RapidFuzz==3.12.1",
83 | "requests==2.32.3",
84 | "retry==0.9.2",
85 | "rich==13.9.4",
86 | "scalecodec==1.2.11",
87 | "setproctitle==1.3.4",
88 | "setuptools==70.0.0",
89 | "shellingham==1.5.4",
90 | "six==1.17.0",
91 | "smmap==5.0.2",
92 | "sniffio==1.3.1",
93 | "SQLAlchemy==2.0.36",
94 | "sqlmodel==0.0.22",
95 | "starlette==0.37.2",
96 | "termcolor==2.5.0",
97 | "toml==0.10.0",
98 | "toolz==1.0.0",
99 | "typer==0.15.1",
100 | "typing_extensions==4.12.2",
101 | "urllib3==2.3.0",
102 | "uvicorn==0.34.0",
103 | "websockets==14.2",
104 | "wheel==0.45.1",
105 | "xxhash==3.5.0",
106 | "yarl==1.18.3",
107 | "pynvml==12.0.0",
108 | "psutil==6.1.1",
109 | "docker>=7.1.0",
110 | "netmeasure>=1.4.0",
111 | "speedcheck>=0.0.5",
112 | ]
113 | requires-python = "==3.11.11"
114 | readme = "README.md"
115 | license = {text = "MIT"}
116 |
117 | [build-system]
118 | requires = ["pdm-backend"]
119 | build-backend = "pdm.backend"
120 |
121 |
122 | [tool.pdm]
123 | distribution = true
124 |
--------------------------------------------------------------------------------
/neurons/executor/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -eux -o pipefail
3 |
4 | # start ssh service
5 | ssh-keygen -A
6 | service ssh start
7 |
8 | # db migrate
9 | alembic upgrade head
10 |
11 | # run fastapi app
12 | python src/executor.py
--------------------------------------------------------------------------------
/neurons/executor/src/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/executor/src/core/__init__.py
--------------------------------------------------------------------------------
/neurons/executor/src/core/config.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | from pydantic import Field
3 | from pydantic_settings import BaseSettings, SettingsConfigDict
4 |
5 |
6 | class Settings(BaseSettings):
7 | model_config = SettingsConfigDict(env_file=".env", extra="ignore")
8 | PROJECT_NAME: str = "compute-subnet-executor"
9 |
10 | INTERNAL_PORT: int = Field(env="INTERNAL_PORT", default=8001)
11 | SSH_PORT: int = Field(env="SSH_PORT", default=2200)
12 | SSH_PUBLIC_PORT: Optional[int] = Field(env="SSH_PUBLIC_PORT", default=None)
13 |
14 | MINER_HOTKEY_SS58_ADDRESS: str = Field(env="MINER_HOTKEY_SS58_ADDRESS")
15 |
16 | RENTING_PORT_RANGE: Optional[str] = Field(env="RENTING_PORT_RANGE", default=None)
17 | RENTING_PORT_MAPPINGS: Optional[str] = Field(env="RENTING_PORT_MAPPINGS", default=None)
18 | RENTING_PRICE: Optional[float] = Field(env="RENTING_PRICE", default=None)
19 |
20 | ENV: str = Field(env="ENV", default="dev")
21 |
22 | DB_URI: str = Field(env="DB_URI")
23 |
24 |
25 | settings = Settings()
26 |
--------------------------------------------------------------------------------
/neurons/executor/src/core/db.py:
--------------------------------------------------------------------------------
1 | from collections.abc import Generator
2 | from typing import Annotated
3 | from contextlib import contextmanager
4 |
5 | from fastapi import Depends
6 | from sqlmodel import Session, SQLModel, create_engine
7 |
8 | from core.config import settings
9 |
10 | engine = create_engine(
11 | settings.DB_URI,
12 | pool_size=10,
13 | max_overflow=20,
14 | pool_pre_ping=True,
15 | pool_reset_on_return="rollback",
16 | pool_timeout=30,
17 | pool_recycle=1800,
18 | pool_use_lifo=True,
19 | )
20 |
21 |
22 | SQLModel.metadata.create_all(engine)
23 |
24 |
25 | @contextmanager
26 | def get_session() -> Generator[Session, None, None]:
27 | with Session(engine) as session:
28 | yield session
29 |
30 |
--------------------------------------------------------------------------------
/neurons/executor/src/core/logger.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import json
3 |
4 |
5 | def get_logger(name: str):
6 | logger = logging.getLogger(name)
7 | handler = logging.StreamHandler()
8 | formatter = logging.Formatter(
9 | "Name: %(name)s | Time: %(asctime)s | Level: %(levelname)s | File: %(filename)s | Function: %(funcName)s | Line: %(lineno)s | Process: %(process)d | Message: %(message)s"
10 | )
11 | handler.setFormatter(formatter)
12 | logger.addHandler(handler)
13 | logger.setLevel(logging.INFO)
14 | return logger
15 |
16 |
17 | class StructuredMessage:
18 | def __init__(self, message, extra: dict):
19 | self.message = message
20 | self.extra = extra
21 |
22 | def __str__(self):
23 | return "%s >>> %s" % (self.message, json.dumps(self.extra)) # noqa
24 |
25 |
26 | _m = StructuredMessage
27 |
--------------------------------------------------------------------------------
/neurons/executor/src/daos/base.py:
--------------------------------------------------------------------------------
1 | from typing import Generic, TypeVar
2 | from uuid import UUID
3 |
4 | from sqlmodel import select, Session
5 |
6 | from core.logger import get_logger
7 |
8 | T = TypeVar("T")
9 | logger = get_logger(name="app")
10 |
11 |
12 | class BaseDao(Generic[T]):
13 | model: type[T] = None
14 | primary_key_field = "id"
15 |
16 | def get_primary_key_value(self, instance):
17 | return getattr(instance, self.primary_key_field)
18 |
19 | def find_by_id(self, session: Session, id: UUID | str) -> T | None:
20 | if self.model is None:
21 | raise NotImplementedError
22 |
23 | return session.exec(
24 | select(self.model).where(getattr(self.model, self.primary_key_field) == id)
25 | ).first()
26 |
27 | def save(self, session: Session, instance) -> T:
28 | try:
29 | session.add(instance)
30 | session.commit()
31 | session.refresh(instance)
32 | return instance
33 | except Exception as e:
34 | self.safe_rollback(session)
35 | logger.error("Error saving instance: %s", e, exc_info=True)
36 | raise
37 |
38 | def safe_rollback(self, session: Session):
39 | try:
40 | if session.in_transaction():
41 | session.rollback()
42 | except Exception as e:
43 | logger.error("Error rolling back transaction: %s", e, exc_info=True)
44 |
45 | def delete(self, session: Session, instance):
46 | try:
47 | session.delete(instance)
48 | session.commit()
49 | except Exception as e:
50 | self.safe_rollback(session)
51 | logger.error("Error deleting instance: %s", e, exc_info=True)
52 | raise
53 |
54 | def update(self, session: Session, id: UUID | str, payload: dict) -> T | None:
55 | """
56 | Update an instance by ID with the provided payload.
57 |
58 | :param id (UUID | str): The ID of the instance to update.
59 | :param payload (dict): The payload to update the instance with.
60 | """
61 | instance = self.find_by_id(session, id)
62 | if instance:
63 | try:
64 | for key, value in payload.items():
65 | setattr(instance, key, value)
66 | session.commit()
67 | session.refresh(instance)
68 | return instance
69 | except Exception as e:
70 | self.safe_rollback(session)
71 | logger.error("Error deleting instance: %s", e, exc_info=True)
72 | raise
73 | return None
74 |
--------------------------------------------------------------------------------
/neurons/executor/src/daos/pod_log.py:
--------------------------------------------------------------------------------
1 | from sqlmodel import select, Session
2 |
3 | from daos.base import BaseDao
4 | from models.pod_log import PodLog
5 |
6 |
7 | class PodLogDao(BaseDao[PodLog]):
8 | def find_by_continer_name(self, session: Session, container_name: str) -> list[PodLog]:
9 | statement = select(PodLog).where(
10 | (PodLog.container_name == container_name) | (PodLog.container_name == None)
11 | ).order_by(PodLog.created_at)
12 | return session.exec(statement).all()
13 |
--------------------------------------------------------------------------------
/neurons/executor/src/decrypt_challenge.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | from ctypes import CDLL, c_longlong, POINTER, c_int, c_void_p, c_char_p
4 |
5 | class DMCompVerifyWrapper:
6 | def __init__(self, lib_name: str):
7 | """
8 | Constructor, differentiate miner vs validator libs.
9 | """
10 | self._initialized = False
11 | lib_path = os.path.join(os.path.dirname(__file__), lib_name)
12 | self._lib = CDLL(lib_path)
13 | self._setup_lib_functions()
14 |
15 | def _setup_lib_functions(self):
16 | """
17 | Set up function signatures for the library.
18 | """
19 | # Set up function signatures for the library.
20 | self._lib.DMCompVerify_new.argtypes = [c_longlong, c_longlong] # Parameters (long m_dim_n, long m_dim_k)
21 | self._lib.DMCompVerify_new.restype = POINTER(c_void_p) # Return type is a pointer to a structure.
22 |
23 | self._lib.generateChallenge.argtypes = [POINTER(c_void_p), c_longlong, c_char_p, c_char_p]
24 | self._lib.generateChallenge.restype = None
25 |
26 | self._lib.processChallengeResult.argtypes = [POINTER(c_void_p), c_longlong, c_char_p]
27 | self._lib.processChallengeResult.restype = c_char_p
28 |
29 | self._lib.getUUID.argtypes = [c_void_p]
30 | self._lib.getUUID.restype = c_char_p
31 |
32 | self._lib.free.argtypes = [c_void_p]
33 | self._lib.free.restype = None
34 |
35 | self._initialized = True
36 |
37 | def DMCompVerify_new(self, m_dim_n: int, m_dim_k: int):
38 | """
39 | Wrap the C++ function DMCompVerify_new.
40 | Creates a new DMCompVerify object in C++.
41 | """
42 | return self._lib.DMCompVerify_new(m_dim_n, m_dim_k)
43 |
44 | def generateChallenge(self, verifier_ptr: POINTER(c_void_p), seed: int, machine_info: str, uuid: str):
45 | """
46 | Wrap the C++ function generateChallenge.
47 | Generates a challenge using the provided DMCompVerify pointer.
48 | """
49 | machine_info_bytes = machine_info.encode('utf-8')
50 | uuid_bytes = uuid.encode('utf-8')
51 | self._lib.generateChallenge(verifier_ptr, seed, machine_info_bytes, uuid_bytes)
52 |
53 | def processChallengeResult(self, verifier_ptr: POINTER(c_void_p), seed: int, cipher_text: str) -> int:
54 | """
55 | Wrap the C++ function processChallengeResult.
56 | Processes the challenge result using the provided DMCompVerify pointer.
57 | """
58 | self._lib.processChallengeResult(verifier_ptr, seed, cipher_text)
59 |
60 | def getUUID(self, verifier_ptr: POINTER(c_void_p)) -> str:
61 | """
62 | Wrap the C++ function getUUID.
63 | Retrieves the UUID as a string.
64 | """
65 | # Extract the pointer returned by the C++ function, and convert it to a C string (char*) using c_char_p
66 | uuid_ptr = self._lib.getUUID(verifier_ptr)
67 |
68 | if uuid_ptr:
69 | uuid = c_char_p(uuid_ptr).value # Decode the C string
70 | return uuid.decode('utf-8')
71 | else:
72 | return None
73 |
74 | def free(self, ptr: c_void_p):
75 | """
76 | Frees memory allocated for the given pointer.
77 | """
78 | self._lib.free(ptr)
79 |
80 | def decrypt_challenge():
81 | parser = argparse.ArgumentParser(description="DMCompVerify Python Wrapper")
82 | parser.add_argument("--lib", type=str, default="/usr/lib/libdmcompverify.so", help="Path to the shared library")
83 | parser.add_argument("--dim_n", type=int, default=1981, help="Matrix dimension n")
84 | parser.add_argument("--dim_k", type=int, default=1555929, help="Matrix dimension k")
85 | parser.add_argument("--seed", type=int, default=1743502434, help="Random seed")
86 | parser.add_argument("--cipher_text", type=str, default="e28702c2f187f34d56744d64a4399e00cbecbde2d3f6ca53a8abec5cbc40481d42a1a505", help="Cipher Text")
87 |
88 | args = parser.parse_args()
89 |
90 | # Example of usage:
91 | wrapper = DMCompVerifyWrapper(args.lib)
92 |
93 | # Create a new DMCompVerify object
94 | verifier_ptr = wrapper.DMCompVerify_new(args.dim_n, args.dim_k)
95 |
96 | # Example of processing challenge result
97 | wrapper.processChallengeResult(verifier_ptr, args.seed, args.cipher_text.encode('utf-8'))
98 |
99 | # Example to get the UUID
100 | uuid = wrapper.getUUID(verifier_ptr)
101 | print("UUID: ", uuid)
102 |
103 | # Free resources
104 | wrapper.free(verifier_ptr)
105 |
106 | return uuid
107 |
108 | if __name__ == "__main__":
109 | decrypt_challenge()
--------------------------------------------------------------------------------
/neurons/executor/src/executor.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from fastapi import FastAPI
4 | import uvicorn
5 |
6 | from core.config import settings
7 | from middlewares.miner import MinerMiddleware
8 | from routes.apis import apis_router
9 |
10 | # Set up logging
11 | logging.basicConfig(level=logging.INFO)
12 |
13 | app = FastAPI(
14 | title=settings.PROJECT_NAME,
15 | )
16 |
17 | app.add_middleware(MinerMiddleware)
18 | app.include_router(apis_router)
19 |
20 | reload = True if settings.ENV == "dev" else False
21 |
22 | if __name__ == "__main__":
23 | uvicorn.run("executor:app", host="0.0.0.0", port=settings.INTERNAL_PORT, reload=reload)
24 |
--------------------------------------------------------------------------------
/neurons/executor/src/middlewares/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/executor/src/middlewares/__init__.py
--------------------------------------------------------------------------------
/neurons/executor/src/middlewares/miner.py:
--------------------------------------------------------------------------------
1 | import bittensor
2 | from fastapi.responses import JSONResponse
3 | from payloads.miner import MinerAuthPayload
4 | from pydantic import ValidationError
5 | from starlette.middleware.base import BaseHTTPMiddleware
6 |
7 | from core.config import settings
8 | from core.logger import _m, get_logger
9 |
10 | logger = get_logger(__name__)
11 |
12 |
13 | class MinerMiddleware(BaseHTTPMiddleware):
14 | def __init__(self, app) -> None:
15 | super().__init__(app)
16 |
17 | async def dispatch(self, request, call_next):
18 | try:
19 | body_bytes = await request.body()
20 | miner_ip = request.client.host
21 | default_extra = {"miner_ip": miner_ip}
22 |
23 | # Parse it into the Pydantic model
24 | payload = MinerAuthPayload.model_validate_json(body_bytes)
25 |
26 | logger.info(_m("miner ip", extra=default_extra))
27 |
28 | keypair = bittensor.Keypair(ss58_address=settings.MINER_HOTKEY_SS58_ADDRESS)
29 | if not keypair.verify(payload.data_to_sign, payload.signature):
30 | logger.error(
31 | _m(
32 | "Auth failed. incorrect signature",
33 | extra={
34 | **default_extra,
35 | "signature": payload.signature,
36 | "data_to_sign": payload.data_to_sign,
37 | "miner_hotkey": settings.MINER_HOTKEY_SS58_ADDRESS,
38 | },
39 | )
40 | )
41 | return JSONResponse(status_code=401, content="Unauthorized")
42 |
43 | response = await call_next(request)
44 | return response
45 | except ValidationError as e:
46 | # Handle validation error if needed
47 | error_message = str(_m("Validation Error", extra={"errors": str(e.errors())}))
48 | logger.error(error_message)
49 | return JSONResponse(status_code=422, content=error_message)
50 |
--------------------------------------------------------------------------------
/neurons/executor/src/models/__init__.py:
--------------------------------------------------------------------------------
1 | from models.pod_log import * # noqa
--------------------------------------------------------------------------------
/neurons/executor/src/models/pod_log.py:
--------------------------------------------------------------------------------
1 | import uuid
2 | from uuid import UUID
3 | from datetime import datetime
4 | from sqlmodel import Field, SQLModel
5 |
6 |
7 | class PodLog(SQLModel, table=True):
8 | """Task model."""
9 |
10 | uuid: UUID = Field(default_factory=uuid.uuid4, primary_key=True)
11 | container_name: str | None = None
12 | container_id: str | None = None
13 | event: str | None = None
14 | exit_code: int | None = None
15 | reason: str | None = None
16 | error: str | None = None
17 | created_at: datetime = Field(default_factory=datetime.utcnow)
18 |
19 |
--------------------------------------------------------------------------------
/neurons/executor/src/payloads/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/executor/src/payloads/__init__.py
--------------------------------------------------------------------------------
/neurons/executor/src/payloads/miner.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel
2 |
3 |
4 | class MinerAuthPayload(BaseModel):
5 | data_to_sign: str
6 | signature: str
7 |
8 |
9 | class UploadSShKeyPayload(MinerAuthPayload):
10 | public_key: str
11 |
12 |
13 | class GetPodLogsPaylod(MinerAuthPayload):
14 | container_name: str
15 |
--------------------------------------------------------------------------------
/neurons/executor/src/routes/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/executor/src/routes/__init__.py
--------------------------------------------------------------------------------
/neurons/executor/src/routes/apis.py:
--------------------------------------------------------------------------------
1 | from typing import Annotated
2 |
3 | from fastapi import APIRouter, Depends
4 | from services.miner_service import MinerService
5 | from services.pod_log_service import PodLogService
6 |
7 | from payloads.miner import UploadSShKeyPayload, GetPodLogsPaylod
8 |
9 | apis_router = APIRouter()
10 |
11 |
12 | @apis_router.post("/upload_ssh_key")
13 | async def upload_ssh_key(
14 | payload: UploadSShKeyPayload, miner_service: Annotated[MinerService, Depends(MinerService)]
15 | ):
16 | return await miner_service.upload_ssh_key(payload)
17 |
18 |
19 | @apis_router.post("/remove_ssh_key")
20 | async def remove_ssh_key(
21 | payload: UploadSShKeyPayload, miner_service: Annotated[MinerService, Depends(MinerService)]
22 | ):
23 | return await miner_service.remove_ssh_key(payload)
24 |
25 |
26 | @apis_router.post("/pod_logs")
27 | async def get_pod_logs(
28 | payload: GetPodLogsPaylod, pod_log_service: Annotated[PodLogService, Depends(PodLogService)]
29 | ):
30 | return await pod_log_service.find_by_continer_name(payload.container_name)
31 |
--------------------------------------------------------------------------------
/neurons/executor/src/services/miner_service.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import sys
3 | import logging
4 | from pathlib import Path
5 |
6 | from typing import Annotated
7 | from fastapi import Depends
8 |
9 | from core.config import settings
10 | from services.ssh_service import SSHService
11 |
12 | from payloads.miner import UploadSShKeyPayload
13 |
14 | logger = logging.getLogger(__name__)
15 |
16 |
17 | class MinerService:
18 | def __init__(
19 | self,
20 | ssh_service: Annotated[SSHService, Depends(SSHService)],
21 | ):
22 | self.ssh_service = ssh_service
23 |
24 | async def upload_ssh_key(self, paylod: UploadSShKeyPayload):
25 | self.ssh_service.add_pubkey_to_host(paylod.public_key)
26 |
27 | return {
28 | "ssh_username": self.ssh_service.get_current_os_user(),
29 | "ssh_port": settings.SSH_PUBLIC_PORT or settings.SSH_PORT,
30 | "python_path": sys.executable,
31 | "root_dir": str(Path(__file__).resolve().parents[2]),
32 | "port_range": settings.RENTING_PORT_RANGE,
33 | "port_mappings": settings.RENTING_PORT_MAPPINGS,
34 | "price": settings.RENTING_PRICE,
35 | }
36 |
37 | async def remove_ssh_key(self, paylod: UploadSShKeyPayload):
38 | return self.ssh_service.remove_pubkey_from_host(paylod.public_key)
39 |
--------------------------------------------------------------------------------
/neurons/executor/src/services/pod_log_service.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from typing import Annotated
4 | from fastapi import Depends
5 |
6 | from core.db import get_session
7 | from daos.pod_log import PodLogDao
8 |
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 |
13 | class PodLogService:
14 | def __init__(
15 | self,
16 | pod_log_dao: Annotated[PodLogDao, Depends(PodLogDao)]
17 | ):
18 | self.pod_log_dao = pod_log_dao
19 |
20 | async def find_by_continer_name(self, container_name: str):
21 | with get_session() as session:
22 | return self.pod_log_dao.find_by_continer_name(session, container_name)
23 |
--------------------------------------------------------------------------------
/neurons/executor/src/services/ssh_service.py:
--------------------------------------------------------------------------------
1 | import getpass
2 | import os
3 |
4 |
5 | class SSHService:
6 | def add_pubkey_to_host(self, pub_key: str):
7 | with open(os.path.expanduser("~/.ssh/authorized_keys"), "a") as file:
8 | file.write(pub_key + "\n")
9 |
10 | def remove_pubkey_from_host(self, pub_key: str):
11 | authorized_keys_path = os.path.expanduser("~/.ssh/authorized_keys")
12 |
13 | with open(authorized_keys_path, "r") as file:
14 | lines = file.readlines()
15 |
16 | with open(authorized_keys_path, "w") as file:
17 | for line in lines:
18 | if line.strip() != pub_key:
19 | file.write(line)
20 |
21 | def get_current_os_user(self) -> str:
22 | return getpass.getuser()
23 |
--------------------------------------------------------------------------------
/neurons/executor/sysbox-ce_0.6.6-0.linux_amd64.deb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/executor/sysbox-ce_0.6.6-0.linux_amd64.deb
--------------------------------------------------------------------------------
/neurons/executor/version.txt:
--------------------------------------------------------------------------------
1 | 3.7.5
--------------------------------------------------------------------------------
/neurons/miners/.dockerignore:
--------------------------------------------------------------------------------
1 | .pdm.toml
2 | .pdm-python
3 | .pdm-build/
4 | __pycache__/
5 | .venv
6 | docker_build.sh
7 | docker_publish.sh
8 | docker-compose.yml
9 | .env*
--------------------------------------------------------------------------------
/neurons/miners/.env.template:
--------------------------------------------------------------------------------
1 | BITTENSOR_WALLET_NAME=default
2 | BITTENSOR_WALLET_HOTKEY_NAME=default
3 |
4 | POSTGRES_DB=compute-subnet-db
5 | POSTGRES_PORT=7432
6 | POSTGRES_USER=postgres
7 | POSTGRES_PASSWORD=password
8 | SQLALCHEMY_DATABASE_URI=postgresql://postgres:password@localhost:7432/compute-subnet-db
9 |
10 | BITTENSOR_NETUID=51
11 | BITTENSOR_NETWORK=finney
12 |
13 | EXTERNAL_IP_ADDRESS= # pro tip: use `curl ifconfig.me` to find this out
14 | INTERNAL_PORT=8000
15 | EXTERNAL_PORT=8000 # make sure this port is open to external connections
16 |
17 | HOST_WALLET_DIR=/home/ubuntu/.bittensor/wallets
18 |
--------------------------------------------------------------------------------
/neurons/miners/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm-project.org/#use-with-ide
110 | .pdm.toml
111 | .pdm-python
112 | .pdm-build/
113 |
114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115 | __pypackages__/
116 |
117 | # Celery stuff
118 | celerybeat-schedule
119 | celerybeat.pid
120 |
121 | # SageMath parsed files
122 | *.sage.py
123 |
124 | # Environments
125 | .env
126 | .venv
127 | env/
128 | venv/
129 | ENV/
130 | env.bak/
131 | venv.bak/
132 |
133 | # Spyder project settings
134 | .spyderproject
135 | .spyproject
136 |
137 | # Rope project settings
138 | .ropeproject
139 |
140 | # mkdocs documentation
141 | /site
142 |
143 | # mypy
144 | .mypy_cache/
145 | .dmypy.json
146 | dmypy.json
147 |
148 | # Pyre type checker
149 | .pyre/
150 |
151 | # pytype static type analyzer
152 | .pytype/
153 |
154 | # Cython debug symbols
155 | cython_debug/
156 |
157 | # PyCharm
158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160 | # and can be added to the global gitignore or merged into this file. For a more nuclear
161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162 | #.idea/
163 |
164 | temp/
--------------------------------------------------------------------------------
/neurons/miners/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG BASE_IMAGE=python:3.11-slim
2 |
3 | FROM $BASE_IMAGE
4 |
5 | WORKDIR /root/app
6 |
7 | RUN apt-get update \
8 | && apt-get install -y wget git \
9 | && rm -rf /var/lib/apt/lists/*
10 |
11 | RUN pip install -U pdm
12 | ENV PDM_CHECK_UPDATE=false
13 |
14 | COPY pyproject.toml pdm.lock README.md ./
15 | COPY --from=datura . /datura
16 |
17 | RUN pdm lock --check
18 | RUN pdm install --prod --no-editable
19 |
20 | COPY . .
21 |
22 | ENV ENV=prod
23 | ENV PYTHONUNBUFFERED=1
24 |
25 | CMD ["bash", "run.sh"]
--------------------------------------------------------------------------------
/neurons/miners/Dockerfile.runner:
--------------------------------------------------------------------------------
1 | FROM docker:26-cli
2 | WORKDIR /root/miner
3 | ARG targetFile
4 | COPY ${targetFile} docker-compose.yml
5 | COPY entrypoint.sh /entrypoint.sh
6 | COPY version.txt .
7 |
8 | RUN chmod u+x /entrypoint.sh
9 | ENTRYPOINT ["/entrypoint.sh"]
10 |
--------------------------------------------------------------------------------
/neurons/miners/alembic.ini:
--------------------------------------------------------------------------------
1 | # A generic, single database configuration.
2 |
3 | [alembic]
4 | # path to migration scripts
5 | # Use forward slashes (/) also on windows to provide an os agnostic path
6 | script_location = migrations
7 |
8 | # template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
9 | # Uncomment the line below if you want the files to be prepended with date and time
10 | # see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
11 | # for all available tokens
12 | # file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
13 |
14 | # sys.path path, will be prepended to sys.path if present.
15 | # defaults to the current working directory.
16 | prepend_sys_path = src
17 |
18 | # timezone to use when rendering the date within the migration file
19 | # as well as the filename.
20 | # If specified, requires the python>=3.9 or backports.zoneinfo library.
21 | # Any required deps can installed by adding `alembic[tz]` to the pip requirements
22 | # string value is passed to ZoneInfo()
23 | # leave blank for localtime
24 | # timezone =
25 |
26 | # max length of characters to apply to the "slug" field
27 | # truncate_slug_length = 40
28 |
29 | # set to 'true' to run the environment during
30 | # the 'revision' command, regardless of autogenerate
31 | # revision_environment = false
32 |
33 | # set to 'true' to allow .pyc and .pyo files without
34 | # a source .py file to be detected as revisions in the
35 | # versions/ directory
36 | # sourceless = false
37 |
38 | # version location specification; This defaults
39 | # to migrations/versions. When using multiple version
40 | # directories, initial revisions must be specified with --version-path.
41 | # The path separator used here should be the separator specified by "version_path_separator" below.
42 | # version_locations = %(here)s/bar:%(here)s/bat:migrations/versions
43 |
44 | # version path separator; As mentioned above, this is the character used to split
45 | # version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
46 | # If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
47 | # Valid values for version_path_separator are:
48 | #
49 | # version_path_separator = :
50 | # version_path_separator = ;
51 | # version_path_separator = space
52 | version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
53 |
54 | # set to 'true' to search source files recursively
55 | # in each "version_locations" directory
56 | # new in Alembic version 1.10
57 | # recursive_version_locations = false
58 |
59 | # the output encoding used when revision files
60 | # are written from script.py.mako
61 | # output_encoding = utf-8
62 |
63 | sqlalchemy.url = driver://user:pass@localhost/dbname
64 |
65 |
66 | [post_write_hooks]
67 | # post_write_hooks defines scripts or Python functions that are run
68 | # on newly generated revision scripts. See the documentation for further
69 | # detail and examples
70 |
71 | # format using "black" - use the console_scripts runner, against the "black" entrypoint
72 | # hooks = black
73 | # black.type = console_scripts
74 | # black.entrypoint = black
75 | # black.options = -l 79 REVISION_SCRIPT_FILENAME
76 |
77 | # lint with attempts to fix using "ruff" - use the exec runner, execute a binary
78 | # hooks = ruff
79 | # ruff.type = exec
80 | # ruff.executable = %(here)s/.venv/bin/ruff
81 | # ruff.options = --fix REVISION_SCRIPT_FILENAME
82 |
83 | # Logging configuration
84 | [loggers]
85 | keys = root,sqlalchemy,alembic
86 |
87 | [handlers]
88 | keys = console
89 |
90 | [formatters]
91 | keys = generic
92 |
93 | [logger_root]
94 | level = WARN
95 | handlers = console
96 | qualname =
97 |
98 | [logger_sqlalchemy]
99 | level = WARN
100 | handlers =
101 | qualname = sqlalchemy.engine
102 |
103 | [logger_alembic]
104 | level = INFO
105 | handlers =
106 | qualname = alembic
107 |
108 | [handler_console]
109 | class = StreamHandler
110 | args = (sys.stderr,)
111 | level = NOTSET
112 | formatter = generic
113 |
114 | [formatter_generic]
115 | format = %(levelname)-5.5s [%(name)s] %(message)s
116 | datefmt = %H:%M:%S
117 |
--------------------------------------------------------------------------------
/neurons/miners/assigning_validator_hotkeys.md:
--------------------------------------------------------------------------------
1 | # Best Practices for Assigning Validator Hotkeys
2 |
3 | In the Compute Subnet, validators play a critical role in ensuring the performance and security of the network. However, miners must assign executors carefully to the validators to maximize incentives. This guide explains the best strategy for assigning validator hotkeys based on stake distribution within the network.
4 |
5 | ## Why Validator Hotkey Assignment Matters
6 |
7 | You will **not receive any rewards** if your executors are not assigned to validators that control a **majority of the stake** in the network. Therefore, it’s crucial to understand how stake distribution works and how to assign your executors effectively.
8 |
9 | ## Step-by-Step Strategy for Assigning Validator Hotkeys
10 |
11 | ### 1. Check the Validator Stakes
12 |
13 | The first step is to determine how much stake each validator controls in the network. You can find the current stake distribution of all validators by visiting:
14 |
15 | [**TaoMarketCap Subnet 51 Validators**](https://taomarketcap.com/subnets/51/validators)
16 |
17 | This page lists each validator and their respective stake, which is essential for making decisions about hotkey assignments.
18 |
19 | ### 2. Assign Executors to Cover at Least 50% of the Stake
20 |
21 | To begin, you need to ensure that your executors are covering **at least 50%** of the total network stake. This guarantees that your executors will be actively validated and you’ll receive rewards.
22 |
23 | #### Example:
24 |
25 | Suppose you have **100 executors** (GPUs) and the stake distribution of the validators is as follows:
26 |
27 | | Validator | Stake (%) |
28 | |-----------|-----------|
29 | | Validator 1 | 50% |
30 | | Validator 2 | 25% |
31 | | Validator 3 | 15% |
32 | | Validator 4 | 5% |
33 | | Validator 5 | 1% |
34 |
35 | - To cover 50% of the total stake, assign **enough executors** to cover **Validator 1** (50% stake).
36 | - In this case, assign at least **one executor** to **Validator 1** because they control 50% of the network stake.
37 |
38 | ### 3. Stake-Weighted Assignment for Remaining Executors
39 |
40 | Once you’ve ensured that you’re covering at least 50% of the network stake, the remaining executors should be assigned in a **stake-weighted** fashion to maximize rewards.
41 |
42 | #### Continuing the Example:
43 |
44 | You have **99 remaining executors** to assign to validators. Here's the distribution of executors you should follow based on the stake:
45 |
46 | - **Validator 1 (50% stake)**: Assign **50% of executors** to Validator 1.
47 | - Assign 50 executors.
48 | - **Validator 2 (25% stake)**: Assign **25% of executors** to Validator 2.
49 | - Assign 25 executors.
50 | - **Validator 3 (15% stake)**: Assign **15% of executors** to Validator 3.
51 | - Assign 15 executors.
52 | - **Validator 4 (5% stake)**: Assign **5% of executors** to Validator 4.
53 | - Assign 5 executors.
54 | - **Validator 5 (1% stake)**: Assign **1% of executors** to Validator 5.
55 | - Assign 1 executor.
56 |
57 | ### 4. Adjust Based on Network Dynamics
58 |
59 | The stake of validators can change over time. Make sure to periodically check the **validator stakes** on [TaoMarketCap](https://taomarketcap.com/subnets/51/validators) and **reassign your executors** as needed to maintain optimal rewards. If a validator’s stake increases significantly, you may want to adjust your assignments accordingly.
60 |
61 | ## Summary of the Best Strategy
62 |
63 | - **Step 1**: Check the validator stakes on [TaoMarketCap](https://taomarketcap.com/subnets/51/validators).
64 | - **Step 2**: Ensure your executors are covering at least **50% of the total network stake**.
65 | - **Step 3**: Use a **stake-weighted** strategy to assign your remaining executors, matching the proportion of the stake each validator controls.
66 | - **Step 4**: Periodically recheck the stake distribution and adjust assignments as needed.
67 |
68 | By following this strategy, you’ll ensure that your executors are assigned to validators in the most efficient way possible, maximizing your chances of receiving rewards.
69 |
70 | ## Additional Resources
71 |
72 | - [TaoMarketCap Subnet 51 Validators](https://taomarketcap.com/subnets/51/validators)
73 | - [Compute Subnet Miner README](README.md)
74 |
75 |
--------------------------------------------------------------------------------
/neurons/miners/docker-compose.app.dev.yml:
--------------------------------------------------------------------------------
1 | services:
2 | db:
3 | image: postgres:14.0-alpine
4 | healthcheck:
5 | test: pg_isready -U postgres || exit 1
6 | environment:
7 | - POSTGRES_DB=${POSTGRES_DB}
8 | - POSTGRES_USER=${POSTGRES_USER}
9 | - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
10 | env_file: ./.env
11 | volumes:
12 | - db_data:/var/lib/postgresql/data
13 |
14 | miner:
15 | image: daturaai/compute-subnet-miner:dev
16 | env_file: ./.env
17 | environment:
18 | - SQLALCHEMY_DATABASE_URI=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}
19 | ports:
20 | - ${EXTERNAL_PORT}:${INTERNAL_PORT}
21 | expose:
22 | - ${INTERNAL_PORT}
23 | volumes:
24 | - ${HOST_WALLET_DIR}:/root/.bittensor/wallets
25 | depends_on:
26 | - db
27 |
28 | volumes:
29 | db_data:
--------------------------------------------------------------------------------
/neurons/miners/docker-compose.app.yml:
--------------------------------------------------------------------------------
1 | services:
2 | db:
3 | image: postgres:14.0-alpine
4 | healthcheck:
5 | test: pg_isready -U postgres || exit 1
6 | environment:
7 | - POSTGRES_DB=${POSTGRES_DB}
8 | - POSTGRES_USER=${POSTGRES_USER}
9 | - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
10 | env_file: ./.env
11 | volumes:
12 | - db_data:/var/lib/postgresql/data
13 |
14 | miner:
15 | image: daturaai/compute-subnet-miner:latest
16 | env_file: ./.env
17 | environment:
18 | - SQLALCHEMY_DATABASE_URI=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}
19 | ports:
20 | - ${EXTERNAL_PORT}:${INTERNAL_PORT}
21 | expose:
22 | - ${INTERNAL_PORT}
23 | volumes:
24 | - ${HOST_WALLET_DIR}:/root/.bittensor/wallets
25 | depends_on:
26 | - db
27 |
28 | volumes:
29 | db_data:
--------------------------------------------------------------------------------
/neurons/miners/docker-compose.dev.yml:
--------------------------------------------------------------------------------
1 | version: '3.7'
2 |
3 | services:
4 | miner-runner:
5 | image: daturaai/compute-subnet-miner-runner:dev
6 | restart: unless-stopped
7 | volumes:
8 | - /var/run/docker.sock:/var/run/docker.sock
9 | - "$HOME/.bittensor/wallets:/root/.bittensor/wallets"
10 | - ./.env:/root/miner/.env
11 | labels:
12 | - "com.centurylinklabs.watchtower.enable=true"
13 |
14 | watchtower:
15 | image: containrrr/watchtower:1.7.1
16 | restart: unless-stopped
17 | volumes:
18 | - /var/run/docker.sock:/var/run/docker.sock
19 | command: --interval 60 --cleanup --label-enable
20 |
--------------------------------------------------------------------------------
/neurons/miners/docker-compose.local.yml:
--------------------------------------------------------------------------------
1 | version: '3.7'
2 |
3 | services:
4 | db:
5 | image: postgres:14.0-alpine
6 | healthcheck:
7 | test: pg_isready -U postgres || exit 1
8 | environment:
9 | - POSTGRES_DB=${POSTGRES_DB}
10 | - POSTGRES_USER=${POSTGRES_USER}
11 | - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
12 | env_file: ./.env
13 | volumes:
14 | - db_data:/var/lib/postgresql/data
15 | ports:
16 | - ${POSTGRES_PORT}:5432
17 |
18 | volumes:
19 | db_data:
--------------------------------------------------------------------------------
/neurons/miners/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.7'
2 |
3 | services:
4 | miner-runner:
5 | image: daturaai/compute-subnet-miner-runner:latest
6 | restart: unless-stopped
7 | volumes:
8 | - /var/run/docker.sock:/var/run/docker.sock
9 | - "$HOME/.bittensor/wallets:/root/.bittensor/wallets"
10 | - ./.env:/root/miner/.env
11 | labels:
12 | - "com.centurylinklabs.watchtower.enable=true"
13 |
14 | watchtower:
15 | image: containrrr/watchtower:1.7.1
16 | restart: unless-stopped
17 | volumes:
18 | - /var/run/docker.sock:/var/run/docker.sock
19 | command: --interval 60 --cleanup --label-enable
20 |
--------------------------------------------------------------------------------
/neurons/miners/docker_build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eux -o pipefail
3 |
4 | IMAGE_NAME="daturaai/compute-subnet-miner:$TAG"
5 |
6 | docker build --build-context datura=../../datura -t $IMAGE_NAME .
--------------------------------------------------------------------------------
/neurons/miners/docker_publish.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eux -o pipefail
3 |
4 | source ./docker_build.sh
5 |
6 | echo "$DOCKERHUB_PAT" | docker login -u "$DOCKERHUB_USERNAME" --password-stdin
7 | docker push "$IMAGE_NAME"
--------------------------------------------------------------------------------
/neurons/miners/docker_runner_build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eux -o pipefail
3 |
4 | IMAGE_NAME="daturaai/compute-subnet-miner-runner:$TAG"
5 |
6 | docker build --file Dockerfile.runner --build-arg targetFile=$TARGET_FILE -t $IMAGE_NAME .
--------------------------------------------------------------------------------
/neurons/miners/docker_runner_publish.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eux -o pipefail
3 |
4 | source ./docker_runner_build.sh
5 |
6 | echo "$DOCKERHUB_PAT" | docker login -u "$DOCKERHUB_USERNAME" --password-stdin
7 | docker push "$IMAGE_NAME"
--------------------------------------------------------------------------------
/neurons/miners/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -eu
3 |
4 | docker compose up --pull always --detach --wait --force-recreate
5 |
6 | # Clean docker images
7 | docker image prune -f
8 |
9 | while true
10 | do
11 | docker compose logs -f
12 | echo 'All containers died'
13 | sleep 10
14 | done
15 |
--------------------------------------------------------------------------------
/neurons/miners/migrations/README:
--------------------------------------------------------------------------------
1 | Generic single-database configuration.
--------------------------------------------------------------------------------
/neurons/miners/migrations/env.py:
--------------------------------------------------------------------------------
1 | import os
2 | from logging.config import fileConfig
3 | from pathlib import Path
4 |
5 | from alembic import context
6 | from dotenv import load_dotenv
7 | from sqlalchemy import engine_from_config, pool
8 | from sqlmodel import SQLModel
9 |
10 | from models.executor import * # noqa
11 | from models.validator import * # noqa
12 |
13 | # this is the Alembic Config object, which provides
14 | # access to the values within the .ini file in use.
15 | config = context.config
16 |
17 | # Interpret the config file for Python logging.
18 | # This line sets up loggers basically.
19 | if config.config_file_name is not None:
20 | fileConfig(config.config_file_name)
21 |
22 | # add your model's MetaData object here
23 | # for 'autogenerate' support
24 | # from myapp import mymodel
25 | # target_metadata = mymodel.Base.metadata
26 |
27 | target_metadata = SQLModel.metadata
28 |
29 | # other values from the config, defined by the needs of env.py,
30 | # can be acquired:
31 | # my_important_option = config.get_main_option("my_important_option")
32 | # ... etc.
33 |
34 | current_dir = Path(__file__).parent
35 |
36 | load_dotenv(str(current_dir / ".." / ".env"))
37 |
38 |
39 | def get_url():
40 | url = os.getenv("SQLALCHEMY_DATABASE_URI")
41 | return url
42 |
43 |
44 | def run_migrations_offline() -> None:
45 | """Run migrations in 'offline' mode.
46 |
47 | This configures the context with just a URL
48 | and not an Engine, though an Engine is acceptable
49 | here as well. By skipping the Engine creation
50 | we don't even need a DBAPI to be available.
51 |
52 | Calls to context.execute() here emit the given string to the
53 | script output.
54 |
55 | """
56 | url = get_url()
57 | context.configure(
58 | url=url,
59 | target_metadata=target_metadata,
60 | literal_binds=True,
61 | dialect_opts={"paramstyle": "named"},
62 | )
63 |
64 | with context.begin_transaction():
65 | context.run_migrations()
66 |
67 |
68 | def run_migrations_online() -> None:
69 | """Run migrations in 'online' mode.
70 |
71 | In this scenario we need to create an Engine
72 | and associate a connection with the context.
73 |
74 | """
75 | configuration = config.get_section(config.config_ini_section)
76 | configuration["sqlalchemy.url"] = get_url()
77 | connectable = engine_from_config(
78 | configuration,
79 | prefix="sqlalchemy.",
80 | poolclass=pool.NullPool,
81 | )
82 |
83 | with connectable.connect() as connection:
84 | context.configure(connection=connection, target_metadata=target_metadata)
85 |
86 | with context.begin_transaction():
87 | context.run_migrations()
88 |
89 |
90 | if context.is_offline_mode():
91 | run_migrations_offline()
92 | else:
93 | run_migrations_online()
94 |
--------------------------------------------------------------------------------
/neurons/miners/migrations/script.py.mako:
--------------------------------------------------------------------------------
1 | """${message}
2 |
3 | Revision ID: ${up_revision}
4 | Revises: ${down_revision | comma,n}
5 | Create Date: ${create_date}
6 |
7 | """
8 | from typing import Sequence, Union
9 |
10 | from alembic import op
11 | import sqlalchemy as sa
12 | import sqlmodel
13 | import sqlmodel.sql.sqltypes
14 | ${imports if imports else ""}
15 |
16 | # revision identifiers, used by Alembic.
17 | revision: str = ${repr(up_revision)}
18 | down_revision: Union[str, None] = ${repr(down_revision)}
19 | branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
20 | depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
21 |
22 |
23 | def upgrade() -> None:
24 | ${upgrades if upgrades else "pass"}
25 |
26 |
27 | def downgrade() -> None:
28 | ${downgrades if downgrades else "pass"}
29 |
--------------------------------------------------------------------------------
/neurons/miners/migrations/versions/8e52603bd563_create_validator_model.py:
--------------------------------------------------------------------------------
1 | """create validator model
2 |
3 | Revision ID: 8e52603bd563
4 | Revises:
5 | Create Date: 2024-07-15 10:47:41.596221
6 |
7 | """
8 |
9 | from collections.abc import Sequence
10 |
11 | import sqlalchemy as sa
12 | import sqlmodel
13 | import sqlmodel.sql.sqltypes
14 | from alembic import op
15 |
16 | # revision identifiers, used by Alembic.
17 | revision: str = "8e52603bd563"
18 | down_revision: str | None = None
19 | branch_labels: str | Sequence[str] | None = None
20 | depends_on: str | Sequence[str] | None = None
21 |
22 |
23 | def upgrade() -> None:
24 | # ### commands auto generated by Alembic - please adjust! ###
25 | op.create_table(
26 | "validator",
27 | sa.Column('uuid', sa.Uuid(), nullable=False),
28 | sa.Column('validator_hotkey', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
29 | sa.Column('active', sa.Boolean(), nullable=False),
30 | sa.PrimaryKeyConstraint('uuid'),
31 | sa.UniqueConstraint('validator_hotkey')
32 | )
33 | # ### end Alembic commands ###
34 |
35 |
36 | def downgrade() -> None:
37 | # ### commands auto generated by Alembic - please adjust! ###
38 | op.drop_table("validator")
39 | # ### end Alembic commands ###
40 |
--------------------------------------------------------------------------------
/neurons/miners/migrations/versions/eb0b92cbc38e_add_executors_table.py:
--------------------------------------------------------------------------------
1 | """Add executors table
2 |
3 | Revision ID: eb0b92cbc38e
4 | Revises: 8e52603bd563
5 | Create Date: 2024-09-06 06:56:04.990324
6 |
7 | """
8 |
9 | from collections.abc import Sequence
10 |
11 | import sqlalchemy as sa
12 | import sqlmodel
13 | import sqlmodel.sql.sqltypes
14 | from alembic import op
15 |
16 | # revision identifiers, used by Alembic.
17 | revision: str = "eb0b92cbc38e"
18 | down_revision: str | None = "8e52603bd563"
19 | branch_labels: str | Sequence[str] | None = None
20 | depends_on: str | Sequence[str] | None = None
21 |
22 |
23 | def upgrade() -> None:
24 | # ### commands auto generated by Alembic - please adjust! ###
25 | op.create_table(
26 | "executor",
27 | sa.Column("uuid", sa.Uuid(), nullable=False),
28 | sa.Column("address", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
29 | sa.Column("port", sa.Integer(), nullable=False),
30 | sa.Column("validator", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
31 | sa.PrimaryKeyConstraint("uuid"),
32 | sa.UniqueConstraint("address", "port", name="unique_contraint_address_port"),
33 | )
34 | # ### end Alembic commands ###
35 |
36 |
37 | def downgrade() -> None:
38 | # ### commands auto generated by Alembic - please adjust! ###
39 | op.drop_table("executor")
40 | # ### end Alembic commands ###
41 |
--------------------------------------------------------------------------------
/neurons/miners/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "miners"
3 | version = "3.0.0"
4 | description = "Compute subnet miner"
5 | authors = [
6 | {name = "waris", email = "waris0609@outlook.com"},
7 | ]
8 | dependencies = [
9 | "aiohappyeyeballs==2.4.6",
10 | "aiohttp==3.10.11",
11 | "aiosignal==1.3.2",
12 | "alembic==1.14.0",
13 | "annotated-types==0.7.0",
14 | "anyio==4.8.0",
15 | "asgiref==3.8.1",
16 | "async-property==0.2.2",
17 | "async-substrate-interface==1.0.0",
18 | "asyncpg==0.30.0",
19 | "asyncstdlib==3.13.0",
20 | "attrs==25.1.0",
21 | "backoff==2.2.1",
22 | "base58==2.1.1",
23 | "bittensor==9.0.0",
24 | "bittensor-cli==9.0.0",
25 | "bittensor-commit-reveal==0.2.0",
26 | "bittensor-wallet==3.0.3",
27 | "bt-decode==0.5.0a2",
28 | "certifi==2025.1.31",
29 | "cffi==1.17.1",
30 | "charset-normalizer==3.4.1",
31 | "click==8.1.8",
32 | "colorama==0.4.6",
33 | "cryptography==43.0.3",
34 | "cytoolz==1.0.1",
35 | "databases==0.9.0",
36 | "datura @ file:///${PROJECT_ROOT}/../../datura",
37 | "decorator==5.1.1",
38 | "eth-hash==0.7.1",
39 | "eth-typing==5.1.0",
40 | "eth-utils==2.2.2",
41 | "fastapi==0.110.3",
42 | "frozenlist==1.5.0",
43 | "fuzzywuzzy==0.18.0",
44 | "gitdb==4.0.12",
45 | "GitPython==3.1.44",
46 | "greenlet==3.1.1",
47 | "h11==0.14.0",
48 | "idna==3.10",
49 | "iniconfig==2.0.0",
50 | "Jinja2==3.1.5",
51 | "Levenshtein==0.26.1",
52 | "Mako==1.3.9",
53 | "markdown-it-py==3.0.0",
54 | "MarkupSafe==3.0.2",
55 | "mdurl==0.1.2",
56 | "more-itertools==10.6.0",
57 | "msgpack==1.1.0",
58 | "msgpack-numpy-opentensor==0.5.0",
59 | "multidict==6.1.0",
60 | "munch==2.5.0",
61 | "narwhals==1.26.0",
62 | "nest-asyncio==1.6.0",
63 | "netaddr==1.3.0",
64 | "numpy==2.0.2",
65 | "packaging==24.2",
66 | "password-strength==0.0.3.post2",
67 | "plotille==5.0.0",
68 | "plotly==6.0.0",
69 | "pluggy==1.5.0",
70 | "propcache==0.2.1",
71 | "psycopg2-binary==2.9.10",
72 | "py==1.11.0",
73 | "py-bip39-bindings==0.1.11",
74 | "pycparser==2.22",
75 | "pycryptodome==3.21.0",
76 | "pydantic==2.10.6",
77 | "pydantic-settings==2.7.1",
78 | "pydantic_core==2.27.2",
79 | "Pygments==2.19.1",
80 | "pytest==8.3.4",
81 | "python-dotenv==1.0.1",
82 | "python-Levenshtein==0.26.1",
83 | "python-statemachine==2.5.0",
84 | "pywry==0.6.2",
85 | "PyYAML==6.0.2",
86 | "RapidFuzz==3.12.1",
87 | "requests==2.32.3",
88 | "retry==0.9.2",
89 | "rich==13.9.4",
90 | "scalecodec==1.2.11",
91 | "setproctitle==1.3.4",
92 | "setuptools==70.0.0",
93 | "shellingham==1.5.4",
94 | "six==1.17.0",
95 | "smmap==5.0.2",
96 | "sniffio==1.3.1",
97 | "SQLAlchemy==2.0.36",
98 | "sqlmodel==0.0.22",
99 | "starlette==0.37.2",
100 | "termcolor==2.5.0",
101 | "toml==0.10.0",
102 | "toolz==1.0.0",
103 | "typer==0.15.1",
104 | "typing_extensions==4.12.2",
105 | "urllib3==2.3.0",
106 | "uvicorn==0.34.0",
107 | "websockets==14.2",
108 | "wheel==0.45.1",
109 | "xxhash==3.5.0",
110 | "yarl==1.18.3"
111 | ]
112 | requires-python = "==3.11.11"
113 | readme = "README.md"
114 | license = {text = "MIT"}
115 |
116 | [build-system]
117 | requires = ["pdm-backend"]
118 | build-backend = "pdm.backend"
119 |
120 |
121 | [tool.pdm]
122 | distribution = true
123 |
--------------------------------------------------------------------------------
/neurons/miners/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # db migrate
4 | pdm run alembic upgrade head
5 |
6 | # run fastapi app
7 | pdm run src/miner.py
--------------------------------------------------------------------------------
/neurons/miners/src/cli.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import logging
3 | import uuid
4 |
5 | import click
6 | import sqlalchemy
7 |
8 | from core.db import get_db
9 | from daos.executor import ExecutorDao
10 | from models.executor import Executor
11 |
12 | logging.basicConfig(level=logging.INFO)
13 | logger = logging.getLogger(__name__)
14 |
15 |
16 | @click.group()
17 | def cli():
18 | pass
19 |
20 |
21 | @cli.command()
22 | @click.option("--address", prompt="IP Address", help="IP address of executor")
23 | @click.option("--port", type=int, prompt="Port", help="Port of executor")
24 | @click.option(
25 | "--validator", prompt="Validator Hotkey", help="Validator hotkey that executor opens to."
26 | )
27 | def add_executor(address: str, port: int, validator: str):
28 | """Add executor machine to the database"""
29 | logger.info("Add an new executor (%s:%d) that opens to validator(%s)", address, port, validator)
30 | executor_dao = ExecutorDao(session=next(get_db()))
31 | try:
32 | executor = executor_dao.save(
33 | Executor(uuid=uuid.uuid4(), address=address, port=port, validator=validator)
34 | )
35 | except Exception as e:
36 | logger.error("Failed in adding an executor: %s", str(e))
37 | else:
38 | logger.info("Added an executor(id=%s)", str(executor.uuid))
39 |
40 |
41 | @cli.command()
42 | @click.option("--address", prompt="IP Address", help="IP address of executor")
43 | @click.option("--port", type=int, prompt="Port", help="Port of executor")
44 | def remove_executor(address: str, port: int):
45 | """Remove executor machine to the database"""
46 | if click.confirm('Are you sure you want to remove this executor? This may lead to unexpected results'):
47 | logger.info("Removing executor (%s:%d)", address, port)
48 | executor_dao = ExecutorDao(session=next(get_db()))
49 | try:
50 | executor_dao.delete_by_address_port(address, port)
51 | except Exception as e:
52 | logger.error("Failed in removing an executor: %s", str(e))
53 | else:
54 | logger.info("Removed an executor(%s:%d)", address, port)
55 | else:
56 | logger.info("Executor removal cancelled.")
57 |
58 |
59 | @cli.command()
60 | @click.option("--address", prompt="IP Address", help="IP address of executor")
61 | @click.option("--port", type=int, prompt="Port", help="Port of executor")
62 | @click.option(
63 | "--validator", prompt="Validator Hotkey", help="Validator hotkey that executor opens to."
64 | )
65 | def switch_validator(address: str, port: int, validator: str):
66 | """Switch validator"""
67 | if click.confirm('Are you sure you want to switch validator? This may lead to unexpected results'):
68 | logger.info("Switching validator(%s) of an executor (%s:%d)", validator, address, port)
69 | executor_dao = ExecutorDao(session=next(get_db()))
70 | try:
71 | executor_dao.update(
72 | Executor(uuid=uuid.uuid4(), address=address, port=port, validator=validator)
73 | )
74 | except Exception as e:
75 | logger.error("Failed in switching validator: %s", str(e))
76 | else:
77 | logger.info("Validator switched")
78 | else:
79 | logger.info("Cancelled.")
80 |
81 |
82 | @cli.command()
83 | def show_executors():
84 | """Show executors to the database"""
85 | executor_dao = ExecutorDao(session=next(get_db()))
86 | try:
87 | for executor in executor_dao.get_all_executors():
88 | logger.info("%s %s:%d -> %s", executor.uuid, executor.address, executor.port, executor.validator)
89 | except Exception as e:
90 | logger.error("Failed in showing an executor: %s", str(e))
91 |
92 |
93 | if __name__ == "__main__":
94 | cli()
95 |
--------------------------------------------------------------------------------
/neurons/miners/src/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/miners/src/core/__init__.py
--------------------------------------------------------------------------------
/neurons/miners/src/core/config.py:
--------------------------------------------------------------------------------
1 | from typing import TYPE_CHECKING
2 | import argparse
3 | import pathlib
4 |
5 | import bittensor
6 | from pydantic import Field
7 | from pydantic_settings import BaseSettings, SettingsConfigDict
8 |
9 | if TYPE_CHECKING:
10 | from bittensor_wallet import bittensor_wallet
11 |
12 |
13 | class Settings(BaseSettings):
14 | model_config = SettingsConfigDict(env_file=".env", extra="ignore")
15 | PROJECT_NAME: str = "compute-subnet-miner"
16 |
17 | BITTENSOR_WALLET_DIRECTORY: pathlib.Path = Field(
18 | env="BITTENSOR_WALLET_DIRECTORY",
19 | default=pathlib.Path("~").expanduser() / ".bittensor" / "wallets",
20 | )
21 | BITTENSOR_WALLET_NAME: str = Field(env="BITTENSOR_WALLET_NAME")
22 | BITTENSOR_WALLET_HOTKEY_NAME: str = Field(env="BITTENSOR_WALLET_HOTKEY_NAME")
23 | BITTENSOR_NETUID: int = Field(env="BITTENSOR_NETUID")
24 | BITTENSOR_CHAIN_ENDPOINT: str | None = Field(env="BITTENSOR_CHAIN_ENDPOINT", default=None)
25 | BITTENSOR_NETWORK: str = Field(env="BITTENSOR_NETWORK")
26 |
27 | SQLALCHEMY_DATABASE_URI: str = Field(env="SQLALCHEMY_DATABASE_URI")
28 |
29 | EXTERNAL_IP_ADDRESS: str = Field(env="EXTERNAL_IP_ADDRESS")
30 | INTERNAL_PORT: int = Field(env="INTERNAL_PORT", default=8000)
31 | EXTERNAL_PORT: int = Field(env="EXTERNAL_PORT", default=8000)
32 | ENV: str = Field(env="ENV", default="dev")
33 | DEBUG: bool = Field(env="DEBUG", default=False)
34 |
35 | MIN_ALPHA_STAKE: int = Field(env="MIN_ALPHA_STAKE", default=10)
36 | MIN_TOTAL_STAKE: int = Field(env="MIN_TOTAL_STAKE", default=20000)
37 |
38 | def get_bittensor_wallet(self) -> "bittensor_wallet":
39 | if not self.BITTENSOR_WALLET_NAME or not self.BITTENSOR_WALLET_HOTKEY_NAME:
40 | raise RuntimeError("Wallet not configured")
41 | wallet = bittensor.wallet(
42 | name=self.BITTENSOR_WALLET_NAME,
43 | hotkey=self.BITTENSOR_WALLET_HOTKEY_NAME,
44 | path=str(self.BITTENSOR_WALLET_DIRECTORY),
45 | )
46 | wallet.hotkey_file.get_keypair() # this raises errors if the keys are inaccessible
47 | return wallet
48 |
49 | def get_bittensor_config(self) -> bittensor.config:
50 | parser = argparse.ArgumentParser()
51 | # bittensor.wallet.add_args(parser)
52 | # bittensor.subtensor.add_args(parser)
53 | # bittensor.axon.add_args(parser)
54 |
55 | if self.BITTENSOR_NETWORK:
56 | if "--subtensor.network" in parser._option_string_actions:
57 | parser._handle_conflict_resolve(
58 | None,
59 | [("--subtensor.network", parser._option_string_actions["--subtensor.network"])],
60 | )
61 |
62 | parser.add_argument(
63 | "--subtensor.network",
64 | type=str,
65 | help="network",
66 | default=self.BITTENSOR_NETWORK,
67 | )
68 |
69 | if self.BITTENSOR_CHAIN_ENDPOINT:
70 | if "--subtensor.chain_endpoint" in parser._option_string_actions:
71 | parser._handle_conflict_resolve(
72 | None,
73 | [
74 | (
75 | "--subtensor.chain_endpoint",
76 | parser._option_string_actions["--subtensor.chain_endpoint"],
77 | )
78 | ],
79 | )
80 |
81 | parser.add_argument(
82 | "--subtensor.chain_endpoint",
83 | type=str,
84 | help="chain endpoint",
85 | default=self.BITTENSOR_CHAIN_ENDPOINT,
86 | )
87 |
88 | return bittensor.config(parser)
89 |
90 |
91 | settings = Settings()
92 |
--------------------------------------------------------------------------------
/neurons/miners/src/core/db.py:
--------------------------------------------------------------------------------
1 | from collections.abc import Generator
2 | from typing import Annotated
3 |
4 | from fastapi import Depends
5 | from sqlmodel import Session, create_engine
6 |
7 | from core.config import settings
8 |
9 | engine = create_engine(str(settings.SQLALCHEMY_DATABASE_URI))
10 |
11 |
12 | def get_db() -> Generator[Session, None, None]:
13 | with Session(engine) as session:
14 | yield session
15 |
16 |
17 | SessionDep = Annotated[Session, Depends(get_db)]
18 |
--------------------------------------------------------------------------------
/neurons/miners/src/core/utils.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import contextvars
3 | import json
4 | import logging
5 |
6 | from core.config import settings
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 | # Create a ContextVar to hold the context information
11 | context = contextvars.ContextVar("context", default="ValidatorService")
12 | context.set("ValidatorService")
13 |
14 |
15 | def wait_for_services_sync(timeout=30):
16 | """Wait until PostgreSQL connections are working."""
17 | from sqlalchemy import create_engine, text
18 |
19 | from core.config import settings
20 |
21 | logger.info("Waiting for services to be available...")
22 |
23 | while True:
24 | try:
25 | # Check PostgreSQL connection using SQLAlchemy
26 | engine = create_engine(settings.SQLALCHEMY_DATABASE_URI)
27 | with engine.connect() as connection:
28 | connection.execute(text("SELECT 1"))
29 | logger.info("Connected to PostgreSQL.")
30 |
31 | break
32 | except Exception as e:
33 | logger.error("Failed to connect to PostgreSQL.")
34 | raise e
35 |
36 |
37 | def get_extra_info(extra: dict) -> dict:
38 | task = asyncio.current_task()
39 | coro_name = task.get_coro().__name__ if task else "NoTask"
40 | task_id = id(task) if task else "NoTaskID"
41 | extra_info = {
42 | "coro_name": coro_name,
43 | "task_id": task_id,
44 | **extra,
45 | }
46 | return extra_info
47 |
48 |
49 | def configure_logs_of_other_modules():
50 | miner_hotkey = settings.get_bittensor_wallet().get_hotkey().ss58_address
51 |
52 | logging.basicConfig(
53 | level=logging.INFO,
54 | format=f"Miner: {miner_hotkey} | Name: %(name)s | Time: %(asctime)s | Level: %(levelname)s | File: %(filename)s | Function: %(funcName)s | Line: %(lineno)s | Process: %(process)d | Message: %(message)s",
55 | )
56 |
57 | sqlalchemy_logger = logging.getLogger("sqlalchemy")
58 | sqlalchemy_logger.setLevel(logging.WARNING)
59 |
60 | # Create a custom formatter that adds the context to the log messages
61 | class CustomFormatter(logging.Formatter):
62 | def format(self, record):
63 | try:
64 | task = asyncio.current_task()
65 | coro_name = task.get_coro().__name__ if task else "NoTask"
66 | task_id = id(task) if task else "NoTaskID"
67 | return f"{getattr(record, 'context', 'Default')} | {coro_name} | {task_id} | {super().format(record)}"
68 | except Exception:
69 | return ""
70 |
71 | # Create a handler for the logger
72 | handler = logging.StreamHandler()
73 |
74 | # Set the formatter for the handler
75 | handler.setFormatter(
76 | CustomFormatter("%(name)s %(asctime)s %(levelname)s %(filename)s %(process)d %(message)s")
77 | )
78 |
79 |
80 | class StructuredMessage:
81 | def __init__(self, message, extra: dict):
82 | self.message = message
83 | self.extra = extra
84 |
85 | def __str__(self):
86 | return "%s >>> %s" % (self.message, json.dumps(self.extra)) # noqa
87 |
88 |
89 | _m = StructuredMessage
90 |
--------------------------------------------------------------------------------
/neurons/miners/src/daos/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/miners/src/daos/__init__.py
--------------------------------------------------------------------------------
/neurons/miners/src/daos/base.py:
--------------------------------------------------------------------------------
1 | from core.db import SessionDep
2 |
3 |
4 | class BaseDao:
5 | def __init__(self, session: SessionDep):
6 | self.session = session
7 |
--------------------------------------------------------------------------------
/neurons/miners/src/daos/executor.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | from daos.base import BaseDao
3 | from models.executor import Executor
4 |
5 |
6 | class ExecutorDao(BaseDao):
7 | def save(self, executor: Executor) -> Executor:
8 | self.session.add(executor)
9 | self.session.commit()
10 | self.session.refresh(executor)
11 | return executor
12 |
13 | def findOne(self, address: str, port: int):
14 | executor = self.session.query(Executor).filter_by(
15 | address=address, port=port).first()
16 | if not executor:
17 | raise Exception('Not found executor')
18 |
19 | return executor
20 |
21 | def update(self, executor: Executor) -> Executor:
22 | existing_executor = self.findOne(executor.address, executor.port)
23 |
24 | existing_executor.address = executor.address
25 | existing_executor.port = executor.port
26 | existing_executor.validator = executor.validator
27 |
28 | self.session.commit()
29 | self.session.refresh(existing_executor)
30 | return existing_executor
31 |
32 | def delete_by_address_port(self, address: str, port: int) -> None:
33 | executor = self.findOne(address, port)
34 |
35 | self.session.delete(executor)
36 | self.session.commit()
37 |
38 | def get_executors_for_validator(self, validator_key: str, executor_id: Optional[str] = None) -> list[Executor]:
39 | """Get executors that opened to valdiator
40 |
41 | Args:
42 | validator_key (str): validator hotkey string
43 |
44 | Return:
45 | List[Executor]: list of Executors
46 | """
47 | if executor_id:
48 | return list(self.session.query(Executor).filter_by(validator=validator_key, uuid=executor_id))
49 |
50 | return list(self.session.query(Executor).filter_by(validator=validator_key))
51 |
52 | def get_all_executors(self) -> list[Executor]:
53 | return list(self.session.query(Executor).all())
54 |
--------------------------------------------------------------------------------
/neurons/miners/src/daos/validator.py:
--------------------------------------------------------------------------------
1 | from daos.base import BaseDao
2 |
3 | from models.validator import Validator
4 |
5 |
6 | class ValidatorDao(BaseDao):
7 | def save(self, validator: Validator) -> Validator:
8 | self.session.add(validator)
9 | self.session.commit()
10 | self.session.refresh(validator)
11 | return validator
12 |
13 | def get_validator_by_hotkey(self, hotkey: str):
14 | return self.session.query(Validator).filter_by(validator_hotkey=hotkey).first()
15 |
--------------------------------------------------------------------------------
/neurons/miners/src/gpt2-training-model.py:
--------------------------------------------------------------------------------
1 | import time
2 | import torch
3 | from datasets import load_dataset
4 | from torch.utils.data import DataLoader
5 | from transformers import AdamW, GPT2LMHeadModel, GPT2Tokenizer
6 |
7 | # Load a small dataset
8 | dataset = load_dataset("wikitext", "wikitext-2-raw-v1", split="train[:1000]")
9 |
10 | # Initialize tokenizer and model
11 | tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
12 | model = GPT2LMHeadModel.from_pretrained("gpt2")
13 |
14 | tokenizer.pad_token = tokenizer.eos_token
15 |
16 |
17 | # Tokenize the dataset
18 | def tokenize_function(examples):
19 | return tokenizer(examples["text"], truncation=True, max_length=128, padding="max_length")
20 |
21 |
22 | start_time = time.time()
23 | tokenized_dataset = dataset.map(tokenize_function, batched=True)
24 | tokenized_dataset = tokenized_dataset.remove_columns(["text"])
25 | tokenized_dataset.set_format("torch")
26 |
27 | # Create DataLoader
28 | dataloader = DataLoader(tokenized_dataset, batch_size=4, shuffle=True)
29 |
30 | # Training loop
31 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
32 | print("device", device)
33 | model.to(device)
34 |
35 |
36 | # Evaluation function
37 | def evaluate(model, dataloader):
38 | model.eval()
39 | total_loss = 0
40 | with torch.no_grad():
41 | for batch in dataloader:
42 | inputs = batch["input_ids"].to(device)
43 | outputs = model(input_ids=inputs, labels=inputs)
44 | total_loss += outputs.loss.item()
45 | return total_loss / len(dataloader)
46 |
47 |
48 | # Initial evaluation
49 | initial_loss = evaluate(model, dataloader)
50 | print(f"Initial Loss: {initial_loss:.4f}")
51 | print(f"Initial Perplexity: {torch.exp(torch.tensor(initial_loss)):.4f}")
52 | optimizer = AdamW(model.parameters(), lr=5e-5, no_deprecation_warning=True)
53 |
54 | num_epochs = 1
55 | for epoch in range(num_epochs):
56 | model.train()
57 | for batch in dataloader:
58 | batch = {k: v.to(device) for k, v in batch.items()}
59 | outputs = model(input_ids=batch["input_ids"], labels=batch["input_ids"])
60 | loss = outputs.loss
61 | loss.backward()
62 | optimizer.step()
63 | optimizer.zero_grad()
64 | print(f"Epoch {epoch+1}/{num_epochs} completed")
65 |
66 | # Final evaluation
67 | final_loss = evaluate(model, dataloader)
68 | print(f"Final Loss: {final_loss:.4f}")
69 | print(f"Final Perplexity: {torch.exp(torch.tensor(final_loss)):.4f}")
70 |
71 | print(f"Loss decreased by: {initial_loss - final_loss:.4f}")
72 | print(
73 | f"Perplexity decreased by: {torch.exp(torch.tensor(initial_loss)) - torch.exp(torch.tensor(final_loss)):.4f}"
74 | )
75 |
76 | print("Job finished")
77 | print(time.time() - start_time)
--------------------------------------------------------------------------------
/neurons/miners/src/miner.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import logging
3 | from contextlib import asynccontextmanager
4 |
5 | import uvicorn
6 | from fastapi import FastAPI
7 |
8 | from core.config import settings
9 | from core.miner import Miner
10 | from routes.debug_routes import debug_apis_router
11 | from routes.validator_interface import validator_router
12 | from core.utils import configure_logs_of_other_modules, wait_for_services_sync
13 |
14 | configure_logs_of_other_modules()
15 | wait_for_services_sync()
16 |
17 |
18 | @asynccontextmanager
19 | async def app_lifespan(app: FastAPI):
20 | miner = Miner()
21 | # Run the miner in the background
22 | task = asyncio.create_task(miner.start())
23 |
24 | try:
25 | yield
26 | finally:
27 | await miner.stop() # Ensure proper cleanup
28 | await task # Wait for the background task to complete
29 | logging.info("Miner exited successfully.")
30 |
31 |
32 | app = FastAPI(
33 | title=settings.PROJECT_NAME,
34 | lifespan=app_lifespan,
35 | )
36 |
37 | app.include_router(validator_router)
38 | app.include_router(debug_apis_router)
39 |
40 | reload = True if settings.ENV == "dev" else False
41 |
42 | if __name__ == "__main__":
43 | uvicorn.run("miner:app", host="0.0.0.0", port=settings.INTERNAL_PORT, reload=reload)
44 |
--------------------------------------------------------------------------------
/neurons/miners/src/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/miners/src/models/__init__.py
--------------------------------------------------------------------------------
/neurons/miners/src/models/executor.py:
--------------------------------------------------------------------------------
1 | import uuid
2 | from uuid import UUID
3 |
4 | from sqlmodel import Field, SQLModel, UniqueConstraint
5 |
6 |
7 | class Executor(SQLModel, table=True):
8 | """Task model."""
9 |
10 | __table_args__ = (UniqueConstraint("address", "port", name="unique_contraint_address_port"),)
11 |
12 | uuid: UUID | None = Field(default_factory=uuid.uuid4, primary_key=True)
13 | address: str
14 | port: int
15 | validator: str
16 |
17 | def __str__(self):
18 | return f"{self.address}:{self.port}"
19 |
--------------------------------------------------------------------------------
/neurons/miners/src/models/validator.py:
--------------------------------------------------------------------------------
1 | import uuid
2 | from uuid import UUID
3 |
4 | from sqlmodel import Field, SQLModel
5 |
6 |
7 | class Validator(SQLModel, table=True):
8 | """Task model."""
9 |
10 | uuid: UUID | None = Field(default_factory=uuid.uuid4, primary_key=True)
11 | validator_hotkey: str = Field(unique=True)
12 | active: bool
13 |
--------------------------------------------------------------------------------
/neurons/miners/src/routes/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/miners/src/routes/__init__.py
--------------------------------------------------------------------------------
/neurons/miners/src/routes/debug_routes.py:
--------------------------------------------------------------------------------
1 | from typing import Annotated
2 |
3 | from fastapi import APIRouter, Depends
4 |
5 | from core.config import settings
6 | from services.executor_service import ExecutorService
7 |
8 | debug_apis_router = APIRouter()
9 |
10 |
11 | @debug_apis_router.get("/debug/get-executors-for-validator/{validator_hotkey}")
12 | async def get_executors_for_validator(
13 | validator_hotkey: str, executor_service: Annotated[ExecutorService, Depends(ExecutorService)]
14 | ):
15 | if not settings.DEBUG:
16 | return None
17 | return executor_service.get_executors_for_validator(validator_hotkey)
18 |
19 |
20 | @debug_apis_router.post("/debug/register_pubkey/{validator_hotkey}")
21 | async def register_pubkey(
22 | validator_hotkey: str, executor_service: Annotated[ExecutorService, Depends(ExecutorService)]
23 | ):
24 | if not settings.DEBUG:
25 | return None
26 | pub_key = "Test Pubkey"
27 | return await executor_service.register_pubkey(validator_hotkey, pub_key.encode("utf-8"))
28 |
29 |
30 | @debug_apis_router.post("/debug/remove_pubkey/{validator_hotkey}")
31 | async def remove_pubkey_from_executor(
32 | validator_hotkey: str, executor_service: Annotated[ExecutorService, Depends(ExecutorService)]
33 | ):
34 | if not settings.DEBUG:
35 | return None
36 | pub_key = "Test Pubkey"
37 | await executor_service.deregister_pubkey(validator_hotkey, pub_key.encode("utf-8"))
38 |
--------------------------------------------------------------------------------
/neurons/miners/src/routes/validator_interface.py:
--------------------------------------------------------------------------------
1 | from typing import Annotated
2 |
3 | from fastapi import APIRouter, Depends, WebSocket
4 |
5 | from consumers.validator_consumer import ValidatorConsumer
6 | validator_router = APIRouter()
7 |
8 |
9 | @validator_router.websocket("/websocket/{validator_key}")
10 | async def validator_interface(consumer: Annotated[ValidatorConsumer, Depends(ValidatorConsumer)]):
11 | await consumer.connect()
12 | await consumer.handle()
13 |
--------------------------------------------------------------------------------
/neurons/miners/src/services/ssh_service.py:
--------------------------------------------------------------------------------
1 | import getpass
2 | import os
3 |
4 |
5 | class MinerSSHService:
6 | def add_pubkey_to_host(self, pub_key: bytes):
7 | with open(os.path.expanduser("~/.ssh/authorized_keys"), "a") as file:
8 | file.write(pub_key.decode() + "\n")
9 |
10 | def remove_pubkey_from_host(self, pub_key: bytes):
11 | pub_key_str = pub_key.decode().strip()
12 | authorized_keys_path = os.path.expanduser("~/.ssh/authorized_keys")
13 |
14 | with open(authorized_keys_path, "r") as file:
15 | lines = file.readlines()
16 |
17 | with open(authorized_keys_path, "w") as file:
18 | for line in lines:
19 | if line.strip() != pub_key_str:
20 | file.write(line)
21 |
22 | def get_current_os_user(self) -> str:
23 | return getpass.getuser()
24 |
--------------------------------------------------------------------------------
/neurons/miners/src/services/validator_service.py:
--------------------------------------------------------------------------------
1 | from typing import Annotated
2 |
3 | from fastapi import Depends
4 |
5 | from daos.validator import ValidatorDao
6 |
7 |
8 | class ValidatorService:
9 | def __init__(self, validator_dao: Annotated[ValidatorDao, Depends(ValidatorDao)]):
10 | self.validator_dao = validator_dao
11 |
12 | def is_valid_validator(self, validator_hotkey: str) -> bool:
13 | return not (not self.validator_dao.get_validator_by_hotkey(validator_hotkey))
14 |
--------------------------------------------------------------------------------
/neurons/miners/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/miners/tests/__init__.py
--------------------------------------------------------------------------------
/neurons/miners/version.txt:
--------------------------------------------------------------------------------
1 | 3.7.3
2 |
--------------------------------------------------------------------------------
/neurons/validators/.dockerignore:
--------------------------------------------------------------------------------
1 | .pdm.toml
2 | .pdm-python
3 | .pdm-build/
4 | __pycache__/
5 | .venv
6 | docker_build.sh
7 | docker_publish.sh
8 | docker-compose.yml
9 | .env*
10 | logs
--------------------------------------------------------------------------------
/neurons/validators/.env.template:
--------------------------------------------------------------------------------
1 | BITTENSOR_WALLET_NAME=default
2 | BITTENSOR_WALLET_HOTKEY_NAME=default
3 |
4 | POSTGRES_DB=compute-subnet-db
5 | POSTGRES_PORT=6432
6 | POSTGRES_USER=postgres
7 | POSTGRES_PASSWORD=password
8 | SQLALCHEMY_DATABASE_URI=postgresql://postgres:password@localhost:6432/compute-subnet-db
9 | ASYNC_SQLALCHEMY_DATABASE_URI=postgresql+asyncpg://postgres:password@localhost:6432/compute-subnet-db
10 |
11 | BITTENSOR_NETUID=51
12 | BITTENSOR_NETWORK=finney
13 |
14 | INTERNAL_PORT=8010
15 | EXTERNAL_PORT=8010
16 |
17 | REDIS_HOST=localhost
18 | REDIS_PORT=6379
19 |
20 | COMPUTE_APP_URI=wss://celiumcompute.ai
21 |
22 | HOST_WALLET_DIR=/home/ubuntu/.bittensor/wallets
23 |
--------------------------------------------------------------------------------
/neurons/validators/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 | !libdmcompverify.so
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm-project.org/#use-with-ide
110 | .pdm.toml
111 | .pdm-python
112 | .pdm-build/
113 |
114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115 | __pypackages__/
116 |
117 | # Celery stuff
118 | celerybeat-schedule
119 | celerybeat.pid
120 |
121 | # SageMath parsed files
122 | *.sage.py
123 |
124 | # Environments
125 | .env
126 | .venv
127 | env/
128 | venv/
129 | ENV/
130 | env.bak/
131 | venv.bak/
132 |
133 | # Spyder project settings
134 | .spyderproject
135 | .spyproject
136 |
137 | # Rope project settings
138 | .ropeproject
139 |
140 | # mkdocs documentation
141 | /site
142 |
143 | # mypy
144 | .mypy_cache/
145 | .dmypy.json
146 | dmypy.json
147 |
148 | # Pyre type checker
149 | .pyre/
150 |
151 | # pytype static type analyzer
152 | .pytype/
153 |
154 | # Cython debug symbols
155 | cython_debug/
156 |
157 | # PyCharm
158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160 | # and can be added to the global gitignore or merged into this file. For a more nuclear
161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162 | #.idea/
163 | logs
164 | temp/
165 |
166 | obfuscated_machine_scrape.py
167 |
--------------------------------------------------------------------------------
/neurons/validators/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG BASE_IMAGE=python:3.11-slim
2 |
3 | FROM $BASE_IMAGE
4 |
5 | RUN apt-get update \
6 | && apt-get install -y make wget gcc ccache patchelf openssh-client g++ \
7 | && rm -rf /var/lib/apt/lists/*
8 |
9 | WORKDIR /app
10 |
11 | RUN pip install -U pdm
12 | ENV PDM_CHECK_UPDATE=false
13 |
14 | COPY pyproject.toml pdm.lock README.md ./
15 | COPY --from=datura . /datura
16 |
17 | RUN pdm lock --check
18 | RUN pdm install --prod --no-editable
19 |
20 | COPY . .
21 |
22 | RUN mv libdmcompverify.so /usr/lib/
23 |
24 | ENV ENV=prod
25 | ENV USE_TORCH=0
26 | ENV PYTHONUNBUFFERED=1
27 |
28 | CMD ["bash", "run.sh"]
--------------------------------------------------------------------------------
/neurons/validators/Dockerfile.runner:
--------------------------------------------------------------------------------
1 | FROM docker:26-cli
2 | WORKDIR /root/validator
3 | ARG targetFile
4 | COPY ${targetFile} docker-compose.yml
5 | COPY entrypoint.sh /entrypoint.sh
6 | COPY version.txt .
7 |
8 | RUN chmod u+x /entrypoint.sh
9 | ENTRYPOINT ["/entrypoint.sh"]
10 |
--------------------------------------------------------------------------------
/neurons/validators/README.md:
--------------------------------------------------------------------------------
1 | # Validator
2 |
3 | ## System Requirements
4 |
5 | For validation, a validator machine will need:
6 |
7 | - **CPU**: 4 cores
8 | - **RAM**: 8 GB
9 |
10 | Ensure that your machine meets these requirements before proceeding with the setup.
11 |
12 | ---
13 |
14 | First, register and regen your bittensor wallet and validator hotkey onto the machine.
15 |
16 | For installation of btcli, check [this guide](https://github.com/opentensor/bittensor/blob/master/README.md#install-bittensor-sdk)
17 | ```
18 | btcli s register --netuid 51
19 | ```
20 | ```
21 | btcli w regen_coldkeypub
22 | ```
23 | ```
24 | btcli w regen_hotkey
25 | ```
26 |
27 | ## Installation
28 |
29 | ### Using Docker
30 |
31 | #### Step 1: Clone Git repo
32 |
33 | ```
34 | git clone https://github.com/Datura-ai/compute-subnet.git
35 | ```
36 |
37 | #### Step 2: Install Required Tools
38 |
39 | ```
40 | cd compute-subnet && chmod +x scripts/install_validator_on_ubuntu.sh && ./scripts/install_validator_on_ubuntu.sh
41 | ```
42 |
43 | Verify docker installation
44 |
45 | ```
46 | docker --version
47 | ```
48 | If did not correctly install, follow [this link](https://docs.docker.com/engine/install/)
49 |
50 | #### Step 3: Setup ENV
51 | ```
52 | cp neurons/validators/.env.template neurons/validators/.env
53 | ```
54 |
55 | Replace with your information for `BITTENSOR_WALLET_NAME`, `BITTENSOR_WALLET_HOTKEY_NAME`, `HOST_WALLET_DIR`.
56 | If you want you can use different port for `INTERNAL_PORT`, `EXTERNAL_PORT`.
57 |
58 | #### Step 4: Docker Compose Up
59 |
60 | ```
61 | cd neurons/validators && docker compose up -d
62 | ```
63 |
--------------------------------------------------------------------------------
/neurons/validators/alembic.ini:
--------------------------------------------------------------------------------
1 | # A generic, single database configuration.
2 |
3 | [alembic]
4 | # path to migration scripts
5 | # Use forward slashes (/) also on windows to provide an os agnostic path
6 | script_location = migrations
7 |
8 | # template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
9 | # Uncomment the line below if you want the files to be prepended with date and time
10 | # see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
11 | # for all available tokens
12 | # file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
13 |
14 | # sys.path path, will be prepended to sys.path if present.
15 | # defaults to the current working directory.
16 | prepend_sys_path = src
17 |
18 | # timezone to use when rendering the date within the migration file
19 | # as well as the filename.
20 | # If specified, requires the python>=3.9 or backports.zoneinfo library.
21 | # Any required deps can installed by adding `alembic[tz]` to the pip requirements
22 | # string value is passed to ZoneInfo()
23 | # leave blank for localtime
24 | # timezone =
25 |
26 | # max length of characters to apply to the "slug" field
27 | # truncate_slug_length = 40
28 |
29 | # set to 'true' to run the environment during
30 | # the 'revision' command, regardless of autogenerate
31 | # revision_environment = false
32 |
33 | # set to 'true' to allow .pyc and .pyo files without
34 | # a source .py file to be detected as revisions in the
35 | # versions/ directory
36 | # sourceless = false
37 |
38 | # version location specification; This defaults
39 | # to migrations/versions. When using multiple version
40 | # directories, initial revisions must be specified with --version-path.
41 | # The path separator used here should be the separator specified by "version_path_separator" below.
42 | # version_locations = %(here)s/bar:%(here)s/bat:migrations/versions
43 |
44 | # version path separator; As mentioned above, this is the character used to split
45 | # version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
46 | # If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
47 | # Valid values for version_path_separator are:
48 | #
49 | # version_path_separator = :
50 | # version_path_separator = ;
51 | # version_path_separator = space
52 | version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
53 |
54 | # set to 'true' to search source files recursively
55 | # in each "version_locations" directory
56 | # new in Alembic version 1.10
57 | # recursive_version_locations = false
58 |
59 | # the output encoding used when revision files
60 | # are written from script.py.mako
61 | # output_encoding = utf-8
62 |
63 | sqlalchemy.url = driver://user:pass@localhost/dbname
64 |
65 |
66 | [post_write_hooks]
67 | # post_write_hooks defines scripts or Python functions that are run
68 | # on newly generated revision scripts. See the documentation for further
69 | # detail and examples
70 |
71 | # format using "black" - use the console_scripts runner, against the "black" entrypoint
72 | # hooks = black
73 | # black.type = console_scripts
74 | # black.entrypoint = black
75 | # black.options = -l 79 REVISION_SCRIPT_FILENAME
76 |
77 | # lint with attempts to fix using "ruff" - use the exec runner, execute a binary
78 | # hooks = ruff
79 | # ruff.type = exec
80 | # ruff.executable = %(here)s/.venv/bin/ruff
81 | # ruff.options = --fix REVISION_SCRIPT_FILENAME
82 |
83 | # Logging configuration
84 | [loggers]
85 | keys = root,sqlalchemy,alembic
86 |
87 | [handlers]
88 | keys = console
89 |
90 | [formatters]
91 | keys = generic
92 |
93 | [logger_root]
94 | level = WARN
95 | handlers = console
96 | qualname =
97 |
98 | [logger_sqlalchemy]
99 | level = WARN
100 | handlers =
101 | qualname = sqlalchemy.engine
102 |
103 | [logger_alembic]
104 | level = INFO
105 | handlers =
106 | qualname = alembic
107 |
108 | [handler_console]
109 | class = StreamHandler
110 | args = (sys.stderr,)
111 | level = NOTSET
112 | formatter = generic
113 |
114 | [formatter_generic]
115 | format = %(levelname)-5.5s [%(name)s] %(message)s
116 | datefmt = %H:%M:%S
117 |
--------------------------------------------------------------------------------
/neurons/validators/docker-compose.app.dev.yml:
--------------------------------------------------------------------------------
1 | services:
2 | db:
3 | image: postgres:14.0-alpine
4 | healthcheck:
5 | test: pg_isready -U postgres || exit 1
6 | environment:
7 | - POSTGRES_DB=${POSTGRES_DB}
8 | - POSTGRES_USER=${POSTGRES_USER}
9 | - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
10 | restart: unless-stopped
11 | env_file: ./.env
12 | volumes:
13 | - db_data:/var/lib/postgresql/data
14 |
15 | redis:
16 | image: daturaai/redis:7.4.2
17 | healthcheck:
18 | test: redis-cli ping
19 | environment:
20 | - REDIS_MODE=master
21 | - REDIS_REPLICATION_MODE=master
22 | restart: unless-stopped
23 | volumes:
24 | - redis_data:/data
25 |
26 | validator:
27 | image: daturaai/compute-subnet-validator:dev
28 | env_file: ./.env
29 | environment:
30 | - SQLALCHEMY_DATABASE_URI=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}
31 | - ASYNC_SQLALCHEMY_DATABASE_URI=postgresql+asyncpg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}
32 | - REDIS_HOST=redis
33 | - REDIS_PORT=6379
34 | ports:
35 | - ${EXTERNAL_PORT}:${INTERNAL_PORT}
36 | expose:
37 | - ${INTERNAL_PORT}
38 | volumes:
39 | - ${HOST_WALLET_DIR}:/root/.bittensor/wallets
40 | depends_on:
41 | - db
42 | - redis
43 |
44 | connector:
45 | image: daturaai/compute-subnet-validator:dev
46 | env_file: ./.env
47 | environment:
48 | - SQLALCHEMY_DATABASE_URI=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}
49 | - ASYNC_SQLALCHEMY_DATABASE_URI=postgresql+asyncpg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}
50 | - REDIS_HOST=redis
51 | - REDIS_PORT=6379
52 | - COMPUTE_APP_URI=wss://dev.celiumcompute.ai
53 | command: pdm run src/connector.py
54 | volumes:
55 | - ${HOST_WALLET_DIR}:/root/.bittensor/wallets
56 | depends_on:
57 | - db
58 | - redis
59 |
60 | volumes:
61 | db_data:
62 | redis_data:
63 |
--------------------------------------------------------------------------------
/neurons/validators/docker-compose.app.yml:
--------------------------------------------------------------------------------
1 | services:
2 | db:
3 | image: postgres:14.0-alpine
4 | healthcheck:
5 | test: pg_isready -U postgres || exit 1
6 | restart: unless-stopped
7 | environment:
8 | - POSTGRES_DB=${POSTGRES_DB}
9 | - POSTGRES_USER=${POSTGRES_USER}
10 | - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
11 | env_file: ./.env
12 | volumes:
13 | - db_data:/var/lib/postgresql/data
14 |
15 | redis:
16 | image: daturaai/redis:7.4.2
17 | healthcheck:
18 | test: redis-cli ping
19 | environment:
20 | - REDIS_MODE=master
21 | - REDIS_REPLICATION_MODE=master
22 | restart: unless-stopped
23 | volumes:
24 | - redis_data:/data
25 |
26 | validator:
27 | image: daturaai/compute-subnet-validator:latest
28 | env_file: ./.env
29 | environment:
30 | - SQLALCHEMY_DATABASE_URI=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}
31 | - ASYNC_SQLALCHEMY_DATABASE_URI=postgresql+asyncpg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}
32 | - REDIS_HOST=redis
33 | - REDIS_PORT=6379
34 | ports:
35 | - ${EXTERNAL_PORT}:${INTERNAL_PORT}
36 | expose:
37 | - ${INTERNAL_PORT}
38 | volumes:
39 | - ${HOST_WALLET_DIR}:/root/.bittensor/wallets
40 | depends_on:
41 | - db
42 | - redis
43 |
44 | connector:
45 | image: daturaai/compute-subnet-validator:latest
46 | env_file: ./.env
47 | environment:
48 | - SQLALCHEMY_DATABASE_URI=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}
49 | - ASYNC_SQLALCHEMY_DATABASE_URI=postgresql+asyncpg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}
50 | - REDIS_HOST=redis
51 | - REDIS_PORT=6379
52 | - COMPUTE_APP_URI=wss://celiumcompute.ai
53 | command: pdm run src/connector.py
54 | volumes:
55 | - ${HOST_WALLET_DIR}:/root/.bittensor/wallets
56 | depends_on:
57 | - db
58 | - redis
59 |
60 | volumes:
61 | db_data:
62 | redis_data:
63 |
--------------------------------------------------------------------------------
/neurons/validators/docker-compose.dev.yml:
--------------------------------------------------------------------------------
1 | version: '3.7'
2 |
3 | services:
4 | validator-runner:
5 | image: daturaai/compute-subnet-validator-runner:dev
6 | restart: unless-stopped
7 | volumes:
8 | - /var/run/docker.sock:/var/run/docker.sock
9 | - "$HOME/.bittensor/wallets:/root/.bittensor/wallets"
10 | - ./.env:/root/validator/.env
11 | labels:
12 | - "com.centurylinklabs.watchtower.enable=true"
13 |
14 | watchtower:
15 | image: containrrr/watchtower:1.7.1
16 | restart: unless-stopped
17 | volumes:
18 | - /var/run/docker.sock:/var/run/docker.sock
19 | command: --interval 60 --cleanup --label-enable
20 |
--------------------------------------------------------------------------------
/neurons/validators/docker-compose.local.yml:
--------------------------------------------------------------------------------
1 | services:
2 | db:
3 | image: postgres:14.0-alpine
4 | healthcheck:
5 | test: pg_isready -U postgres || exit 1
6 | environment:
7 | - POSTGRES_DB=${POSTGRES_DB}
8 | - POSTGRES_USER=${POSTGRES_USER}
9 | - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
10 | restart: unless-stopped
11 | env_file: ./.env
12 | volumes:
13 | - db_data:/var/lib/postgresql/data
14 | ports:
15 | - ${POSTGRES_PORT}:5432
16 |
17 | redis:
18 | image: daturaai/redis:7.4.2
19 | environment:
20 | - REDIS_MODE=master
21 | - REDIS_REPLICATION_MODE=master
22 | healthcheck:
23 | test: redis-cli ping
24 | restart: unless-stopped
25 | volumes:
26 | - redis_data:/data
27 | ports:
28 | - 6379:6379
29 |
30 | volumes:
31 | db_data:
32 | redis_data:
--------------------------------------------------------------------------------
/neurons/validators/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.7'
2 |
3 | services:
4 | validator-runner:
5 | image: daturaai/compute-subnet-validator-runner:latest
6 | restart: unless-stopped
7 | volumes:
8 | - /var/run/docker.sock:/var/run/docker.sock
9 | - "$HOME/.bittensor/wallets:/root/.bittensor/wallets"
10 | - ./.env:/root/validator/.env
11 | labels:
12 | - "com.centurylinklabs.watchtower.enable=true"
13 |
14 | watchtower:
15 | image: containrrr/watchtower:1.7.1
16 | restart: unless-stopped
17 | volumes:
18 | - /var/run/docker.sock:/var/run/docker.sock
19 | command: --interval 60 --cleanup --label-enable
20 |
--------------------------------------------------------------------------------
/neurons/validators/docker_build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eux -o pipefail
3 |
4 | IMAGE_NAME="daturaai/compute-subnet-validator:$TAG"
5 |
6 | docker build --build-context datura=../../datura -t $IMAGE_NAME .
--------------------------------------------------------------------------------
/neurons/validators/docker_publish.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eux -o pipefail
3 |
4 | source ./docker_build.sh
5 |
6 | echo "$DOCKERHUB_PAT" | docker login -u "$DOCKERHUB_USERNAME" --password-stdin
7 | docker push "$IMAGE_NAME"
8 |
9 | docker rmi "$IMAGE_NAME"
10 | docker builder prune -f
--------------------------------------------------------------------------------
/neurons/validators/docker_runner_build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eux -o pipefail
3 |
4 | IMAGE_NAME="daturaai/compute-subnet-validator-runner:$TAG"
5 |
6 | docker build --file Dockerfile.runner --build-arg targetFile=$TARGET_FILE -t $IMAGE_NAME .
--------------------------------------------------------------------------------
/neurons/validators/docker_runner_publish.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eux -o pipefail
3 |
4 | source ./docker_runner_build.sh
5 |
6 | echo "$DOCKERHUB_PAT" | docker login -u "$DOCKERHUB_USERNAME" --password-stdin
7 | docker push "$IMAGE_NAME"
--------------------------------------------------------------------------------
/neurons/validators/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -eu
3 |
4 | docker compose up --pull always --detach --wait --force-recreate
5 |
6 | # Clean docker images
7 | docker image prune -f
8 |
9 | # Remove all Docker images with a name but no tag
10 | # docker images --filter "dangling=false" --format "{{.Repository}}:{{.Tag}} {{.ID}}" | grep ":" | awk '{print $2}' | xargs -r docker rmi
11 |
12 | while true
13 | do
14 | docker compose logs -f
15 | echo 'All containers died'
16 | sleep 10
17 | done
18 |
--------------------------------------------------------------------------------
/neurons/validators/libdmcompverify.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/validators/libdmcompverify.so
--------------------------------------------------------------------------------
/neurons/validators/migrations/README:
--------------------------------------------------------------------------------
1 | Generic single-database configuration.
--------------------------------------------------------------------------------
/neurons/validators/migrations/env.py:
--------------------------------------------------------------------------------
1 | import os
2 | from logging.config import fileConfig
3 | from pathlib import Path
4 |
5 | from alembic import context
6 | from dotenv import load_dotenv
7 | from sqlalchemy import engine_from_config, pool
8 | from sqlmodel import SQLModel
9 |
10 | from models.executor import * # noqa
11 | from models.task import * # noqa
12 |
13 | # this is the Alembic Config object, which provides
14 | # access to the values within the .ini file in use.
15 | config = context.config
16 |
17 | # Interpret the config file for Python logging.
18 | # This line sets up loggers basically.
19 | if config.config_file_name is not None:
20 | fileConfig(config.config_file_name)
21 |
22 | # add your model's MetaData object here
23 | # for 'autogenerate' support
24 | # from myapp import mymodel
25 | # target_metadata = mymodel.Base.metadata
26 |
27 | target_metadata = SQLModel.metadata
28 |
29 | # other values from the config, defined by the needs of env.py,
30 | # can be acquired:
31 | # my_important_option = config.get_main_option("my_important_option")
32 | # ... etc.
33 |
34 | current_dir = Path(__file__).parent
35 |
36 | load_dotenv(str(current_dir / ".." / ".env"))
37 |
38 |
39 | def get_url():
40 | url = os.getenv("SQLALCHEMY_DATABASE_URI")
41 | return url
42 |
43 |
44 | def run_migrations_offline() -> None:
45 | """Run migrations in 'offline' mode.
46 |
47 | This configures the context with just a URL
48 | and not an Engine, though an Engine is acceptable
49 | here as well. By skipping the Engine creation
50 | we don't even need a DBAPI to be available.
51 |
52 | Calls to context.execute() here emit the given string to the
53 | script output.
54 |
55 | """
56 | url = get_url()
57 | context.configure(
58 | url=url,
59 | target_metadata=target_metadata,
60 | literal_binds=True,
61 | dialect_opts={"paramstyle": "named"},
62 | )
63 |
64 | with context.begin_transaction():
65 | context.run_migrations()
66 |
67 |
68 | def run_migrations_online() -> None:
69 | """Run migrations in 'online' mode.
70 |
71 | In this scenario we need to create an Engine
72 | and associate a connection with the context.
73 |
74 | """
75 | configuration = config.get_section(config.config_ini_section)
76 | configuration["sqlalchemy.url"] = get_url()
77 | connectable = engine_from_config(
78 | configuration,
79 | prefix="sqlalchemy.",
80 | poolclass=pool.NullPool,
81 | )
82 |
83 | with connectable.connect() as connection:
84 | context.configure(connection=connection, target_metadata=target_metadata)
85 |
86 | with context.begin_transaction():
87 | context.run_migrations()
88 |
89 |
90 | if context.is_offline_mode():
91 | run_migrations_offline()
92 | else:
93 | run_migrations_online()
94 |
--------------------------------------------------------------------------------
/neurons/validators/migrations/script.py.mako:
--------------------------------------------------------------------------------
1 | """${message}
2 |
3 | Revision ID: ${up_revision}
4 | Revises: ${down_revision | comma,n}
5 | Create Date: ${create_date}
6 |
7 | """
8 | from typing import Sequence, Union
9 |
10 | from alembic import op
11 | import sqlalchemy as sa
12 | import sqlmodel
13 | import sqlmodel.sql.sqltypes
14 | ${imports if imports else ""}
15 |
16 | # revision identifiers, used by Alembic.
17 | revision: str = ${repr(up_revision)}
18 | down_revision: Union[str, None] = ${repr(down_revision)}
19 | branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
20 | depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
21 |
22 |
23 | def upgrade() -> None:
24 | ${upgrades if upgrades else "pass"}
25 |
26 |
27 | def downgrade() -> None:
28 | ${downgrades if downgrades else "pass"}
29 |
--------------------------------------------------------------------------------
/neurons/validators/migrations/versions/0653dc97382a_add_executors_table.py:
--------------------------------------------------------------------------------
1 | """Add executors table
2 |
3 | Revision ID: 0653dc97382a
4 | Revises: d5037a3f7b99
5 | Create Date: 2024-09-10 09:42:38.878136
6 |
7 | """
8 | from typing import Sequence, Union
9 |
10 | from alembic import op
11 | import sqlalchemy as sa
12 | import sqlmodel
13 | import sqlmodel.sql.sqltypes
14 |
15 |
16 | # revision identifiers, used by Alembic.
17 | revision: str = '0653dc97382a'
18 | down_revision: Union[str, None] = 'd5037a3f7b99'
19 | branch_labels: Union[str, Sequence[str], None] = None
20 | depends_on: Union[str, Sequence[str], None] = None
21 |
22 |
23 | def upgrade() -> None:
24 | # ### commands auto generated by Alembic - please adjust! ###
25 | op.create_table('executor',
26 | sa.Column('uuid', sa.Uuid(), nullable=False),
27 | sa.Column('miner_address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
28 | sa.Column('miner_port', sa.Integer(), nullable=False),
29 | sa.Column('miner_hotkey', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
30 | sa.Column('executor_id', sa.Uuid(), nullable=False),
31 | sa.Column('executor_ip_address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
32 | sa.Column('executor_ssh_username', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
33 | sa.Column('executor_ssh_port', sa.Integer(), nullable=False),
34 | sa.Column('rented', sa.Boolean(), nullable=True),
35 | sa.PrimaryKeyConstraint('uuid')
36 | )
37 | op.add_column('task', sa.Column('executor_id', sa.Uuid(), nullable=False))
38 | op.drop_column('task', 'ssh_private_key')
39 | # ### end Alembic commands ###
40 |
41 |
42 | def downgrade() -> None:
43 | # ### commands auto generated by Alembic - please adjust! ###
44 | op.add_column('task', sa.Column('ssh_private_key', sa.VARCHAR(), autoincrement=False, nullable=False))
45 | op.drop_column('task', 'executor_id')
46 | op.drop_table('executor')
47 | # ### end Alembic commands ###
48 |
--------------------------------------------------------------------------------
/neurons/validators/migrations/versions/d5037a3f7b99_create_task_model.py:
--------------------------------------------------------------------------------
1 | """create task model
2 |
3 | Revision ID: d5037a3f7b99
4 | Revises:
5 | Create Date: 2024-08-19 17:57:42.735518
6 |
7 | """
8 | from typing import Sequence, Union
9 |
10 | from alembic import op
11 | import sqlalchemy as sa
12 | import sqlmodel
13 | import sqlmodel.sql.sqltypes
14 |
15 |
16 | # revision identifiers, used by Alembic.
17 | revision: str = 'd5037a3f7b99'
18 | down_revision: Union[str, None] = None
19 | branch_labels: Union[str, Sequence[str], None] = None
20 | depends_on: Union[str, Sequence[str], None] = None
21 |
22 |
23 | def upgrade() -> None:
24 | # ### commands auto generated by Alembic - please adjust! ###
25 | op.create_table('task',
26 | sa.Column('uuid', sa.Uuid(), nullable=False),
27 | sa.Column('task_status', sa.Enum('Initiated', 'SSHConnected', 'Failed', 'Finished', name='taskstatus'), nullable=True),
28 | sa.Column('miner_hotkey', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
29 | sa.Column('ssh_private_key', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
30 | sa.Column('created_at', sa.DateTime(), nullable=False),
31 | sa.Column('proceed_time', sa.Integer(), nullable=True),
32 | sa.Column('score', sa.Float(), nullable=True),
33 | sa.PrimaryKeyConstraint('uuid')
34 | )
35 | # ### end Alembic commands ###
36 |
37 |
38 | def downgrade() -> None:
39 | # ### commands auto generated by Alembic - please adjust! ###
40 | op.drop_table('task')
41 | # ### end Alembic commands ###
42 |
--------------------------------------------------------------------------------
/neurons/validators/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "validators"
3 | version = "3.0.0"
4 | description = "Compute Subnet validator"
5 | authors = [
6 | {name = "waris", email = "waris0609@outlook.com"},
7 | ]
8 | dependencies = [
9 | "aiohappyeyeballs==2.4.6",
10 | "aiohttp==3.10.11",
11 | "aiosignal==1.3.2",
12 | "alembic==1.14.0",
13 | "altgraph==0.17.4",
14 | "annotated-types==0.7.0",
15 | "anyio==4.8.0",
16 | "asgiref==3.8.1",
17 | "async-property==0.2.2",
18 | "async-substrate-interface==1.0.0",
19 | "asyncpg==0.30.0",
20 | "asyncssh==2.19.0",
21 | "asyncstdlib==3.13.0",
22 | "attrs==25.1.0",
23 | "backoff==2.2.1",
24 | "base58==2.1.1",
25 | "bittensor==9.0.0",
26 | "bittensor-cli==9.0.0",
27 | "bittensor-commit-reveal==0.2.0",
28 | "bittensor-wallet==3.0.3",
29 | "bt-decode==0.5.0a2",
30 | "certifi==2025.1.31",
31 | "cffi==1.17.1",
32 | "charset-normalizer==3.4.1",
33 | "click==8.1.8",
34 | "colorama==0.4.6",
35 | "cryptography==43.0.3",
36 | "cytoolz==1.0.1",
37 | "databases==0.9.0",
38 | "datura @ file:///${PROJECT_ROOT}/../../datura",
39 | "decorator==5.1.1",
40 | "eth-hash==0.7.1",
41 | "eth-typing==5.1.0",
42 | "eth-utils==2.2.2",
43 | "fastapi==0.110.3",
44 | "frozenlist==1.5.0",
45 | "fuzzywuzzy==0.18.0",
46 | "gitdb==4.0.12",
47 | "GitPython==3.1.44",
48 | "greenlet==3.1.1",
49 | "h11==0.14.0",
50 | "idna==3.10",
51 | "iniconfig==2.0.0",
52 | "Jinja2==3.1.5",
53 | "Levenshtein==0.26.1",
54 | "Mako==1.3.9",
55 | "markdown-it-py==3.0.0",
56 | "MarkupSafe==3.0.2",
57 | "mdurl==0.1.2",
58 | "more-itertools==10.6.0",
59 | "msgpack==1.1.0",
60 | "msgpack-numpy-opentensor==0.5.0",
61 | "multidict==6.1.0",
62 | "munch==2.5.0",
63 | "narwhals==1.26.0",
64 | "nest-asyncio==1.6.0",
65 | "netaddr==1.3.0",
66 | "Nuitka==2.6.1",
67 | "numpy==2.0.2",
68 | "ordered-set==4.1.0",
69 | "packaging==24.2",
70 | "password-strength==0.0.3.post2",
71 | "pexpect==4.9.0",
72 | "plotille==5.0.0",
73 | "plotly==6.0.0",
74 | "pluggy==1.5.0",
75 | "propcache==0.2.1",
76 | "psutil==6.1.1",
77 | "psycopg2-binary==2.9.10",
78 | "ptyprocess==0.7.0",
79 | "py==1.11.0",
80 | "py-bip39-bindings==0.1.11",
81 | "pyarmor==9.0.6",
82 | "pyarmor.cli.core==7.6.3",
83 | "pycparser==2.22",
84 | "pycryptodome==3.21.0",
85 | "pydantic==2.10.6",
86 | "pydantic-settings==2.7.1",
87 | "pydantic_core==2.27.2",
88 | "Pygments==2.19.1",
89 | "pyinstaller==6.11.1",
90 | "pyinstaller-hooks-contrib==2025.1",
91 | "pytest==8.3.4",
92 | "python-dotenv==1.0.1",
93 | "python-json-logger==3.2.1",
94 | "python-Levenshtein==0.26.1",
95 | "python-statemachine==2.5.0",
96 | "pywry==0.6.2",
97 | "PyYAML==6.0.2",
98 | "RapidFuzz==3.12.1",
99 | "redis==5.2.1",
100 | "requests==2.32.3",
101 | "retry==0.9.2",
102 | "rich==13.9.4",
103 | "scalecodec==1.2.11",
104 | "setproctitle==1.3.4",
105 | "setuptools==70.0.0",
106 | "shellingham==1.5.4",
107 | "six==1.17.0",
108 | "smmap==5.0.2",
109 | "sniffio==1.3.1",
110 | "SQLAlchemy==2.0.36",
111 | "sqlmodel==0.0.22",
112 | "starlette==0.37.2",
113 | "tenacity==9.0.0",
114 | "termcolor==2.5.0",
115 | "toml==0.10.0",
116 | "toolz==1.0.0",
117 | "typer==0.15.1",
118 | "typing_extensions==4.12.2",
119 | "urllib3==2.3.0",
120 | "uvicorn==0.34.0",
121 | "websockets==14.2",
122 | "wheel==0.45.1",
123 | "xxhash==3.5.0",
124 | "yarl==1.18.3",
125 | "zstandard==0.23.0",
126 | ]
127 | requires-python = "==3.11.11"
128 | readme = "README.md"
129 | license = {text = "MIT"}
130 |
131 | [build-system]
132 | requires = ["pdm-backend"]
133 | build-backend = "pdm.backend"
134 |
135 |
136 | [tool.pdm]
137 | distribution = true
138 |
--------------------------------------------------------------------------------
/neurons/validators/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # db migrate
4 | pdm run alembic upgrade head
5 |
6 | # run fastapi app
7 | pdm run src/validator.py
--------------------------------------------------------------------------------
/neurons/validators/src/clients/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/validators/src/clients/__init__.py
--------------------------------------------------------------------------------
/neurons/validators/src/connector.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import time
3 |
4 | from clients.compute_client import ComputeClient
5 |
6 | from core.config import settings
7 | from core.utils import get_logger, wait_for_services_sync
8 | from services.ioc import initiate_services, ioc
9 |
10 | logger = get_logger(__name__)
11 | wait_for_services_sync()
12 |
13 |
14 | async def run_forever():
15 | logger.info("Compute app connector started")
16 | await initiate_services()
17 | keypair = settings.get_bittensor_wallet().get_hotkey()
18 | compute_app_client = ComputeClient(
19 | keypair, f"{settings.COMPUTE_APP_URI}/validator/{keypair.ss58_address}", ioc["MinerService"]
20 | )
21 | async with compute_app_client:
22 | await compute_app_client.run_forever()
23 |
24 |
25 | def start_process():
26 | while True:
27 | try:
28 | loop = asyncio.new_event_loop()
29 | asyncio.set_event_loop(loop)
30 | loop.run_until_complete(run_forever())
31 | except Exception as e:
32 | logger.error(f"Compute app connector crashed: {e}", exc_info=True)
33 | time.sleep(1)
34 |
35 |
36 | if __name__ == "__main__":
37 | start_process()
38 |
39 | # def start_connector_process():
40 | # p = multiprocessing.Process(target=start_process)
41 | # p.start()
42 | # return p
43 |
--------------------------------------------------------------------------------
/neurons/validators/src/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/validators/src/core/__init__.py
--------------------------------------------------------------------------------
/neurons/validators/src/core/db.py:
--------------------------------------------------------------------------------
1 | from collections.abc import AsyncGenerator
2 | from typing import Annotated
3 |
4 | from fastapi import Depends
5 | from sqlalchemy.ext.asyncio import create_async_engine
6 | from sqlalchemy.orm import sessionmaker
7 | from sqlmodel.ext.asyncio.session import AsyncSession
8 |
9 | from core.config import settings
10 |
11 | engine = create_async_engine(str(settings.ASYNC_SQLALCHEMY_DATABASE_URI), echo=True, future=True)
12 |
13 |
14 | async def get_db() -> AsyncGenerator[AsyncSession, None]:
15 | async_session = sessionmaker(bind=engine, class_=AsyncSession, expire_on_commit=False)
16 | async with async_session() as session:
17 | yield session
18 |
19 |
20 | SessionDep = Annotated[AsyncSession, Depends(get_db)]
21 |
--------------------------------------------------------------------------------
/neurons/validators/src/daos/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/validators/src/daos/__init__.py
--------------------------------------------------------------------------------
/neurons/validators/src/daos/base.py:
--------------------------------------------------------------------------------
1 | from core.db import SessionDep
2 |
3 |
4 | class BaseDao:
5 | def __init__(self, session: SessionDep):
6 | self.session = session
7 |
--------------------------------------------------------------------------------
/neurons/validators/src/daos/executor.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from sqlalchemy import select
4 |
5 | from daos.base import BaseDao
6 | from models.executor import Executor
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 |
11 | class ExecutorDao(BaseDao):
12 | async def upsert(self, executor: Executor) -> Executor:
13 | try:
14 | existing_executor = await self.get_executor(
15 | executor_id=executor.executor_id, miner_hotkey=executor.miner_hotkey
16 | )
17 |
18 | if existing_executor:
19 | # Update the fields of the existing executor
20 | existing_executor.miner_address = executor.miner_address
21 | existing_executor.miner_port = executor.miner_port
22 | existing_executor.executor_ip_address = executor.executor_ip_address
23 | existing_executor.executor_ssh_username = executor.executor_ssh_username
24 | existing_executor.executor_ssh_port = executor.executor_ssh_port
25 |
26 | await self.session.commit()
27 | await self.session.refresh(existing_executor)
28 | return existing_executor
29 | else:
30 | # Insert the new executor
31 | self.session.add(executor)
32 | await self.session.commit()
33 | await self.session.refresh(executor)
34 |
35 | return executor
36 | except Exception as e:
37 | await self.session.rollback()
38 | logger.error("Error upsert executor: %s", e)
39 | raise
40 |
41 | async def rent(self, executor_id: str, miner_hotkey: str) -> Executor:
42 | try:
43 | executor = await self.get_executor(executor_id=executor_id, miner_hotkey=miner_hotkey)
44 | if executor:
45 | executor.rented = True
46 | await self.session.commit()
47 | await self.session.refresh(executor)
48 |
49 | return executor
50 | except Exception as e:
51 | await self.session.rollback()
52 | logger.error("Error rent executor: %s", e)
53 | raise
54 |
55 | async def unrent(self, executor_id: str, miner_hotkey: str) -> Executor:
56 | try:
57 | executor = await self.get_executor(executor_id=executor_id, miner_hotkey=miner_hotkey)
58 | if executor:
59 | executor.rented = False
60 | await self.session.commit()
61 | await self.session.refresh(executor)
62 |
63 | return executor
64 | except Exception as e:
65 | await self.session.rollback()
66 | logger.error("Error unrent executor: %s", e)
67 | raise
68 |
69 | async def get_executor(self, executor_id: str, miner_hotkey: str) -> Executor:
70 | try:
71 | statement = select(Executor).where(
72 | Executor.miner_hotkey == miner_hotkey, Executor.executor_id == executor_id
73 | )
74 | result = await self.session.exec(statement)
75 | return result.scalar_one_or_none()
76 | except Exception as e:
77 | await self.session.rollback()
78 | logger.error("Error get executor: %s", e)
79 | raise
80 |
--------------------------------------------------------------------------------
/neurons/validators/src/daos/task.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 |
3 | import sqlalchemy
4 | from pydantic import BaseModel
5 | from sqlalchemy import func, select
6 |
7 | from daos.base import BaseDao
8 | from models.task import Task, TaskStatus
9 |
10 |
11 | class MinerScore(BaseModel):
12 | miner_hotkey: str
13 | total_score: float
14 |
15 |
16 | class TaskDao(BaseDao):
17 | async def save(self, task: Task) -> Task:
18 | try:
19 | self.session.add(task)
20 | await self.session.commit()
21 | await self.session.refresh(task)
22 | return task
23 | except Exception as e:
24 | await self.session.rollback()
25 | raise e
26 |
27 | async def update(self, uuid: str, **kwargs) -> Task:
28 | task = await self.get_task_by_uuid(uuid)
29 | if not task:
30 | return None # Or raise an exception if task is not found
31 |
32 | for key, value in kwargs.items():
33 | if hasattr(task, key):
34 | setattr(task, key, value)
35 |
36 | try:
37 | await self.session.commit()
38 | await self.session.refresh(task)
39 | return task
40 | except Exception as e:
41 | await self.session.rollback()
42 | raise e
43 |
44 | async def get_scores_for_last_epoch(self, tempo: int) -> list[MinerScore]:
45 | last_epoch = datetime.utcnow() - timedelta(seconds=tempo * 12)
46 |
47 | statement = (
48 | select(Task.miner_hotkey, func.sum(Task.score).label("total_score"))
49 | .where(
50 | Task.task_status.in_([TaskStatus.Finished, TaskStatus.Failed]),
51 | Task.created_at >= last_epoch,
52 | )
53 | .group_by(Task.miner_hotkey)
54 | )
55 | results: sqlalchemy.engine.result.ChunkedIteratorResult = await self.session.exec(statement)
56 | results = results.all()
57 |
58 | return [
59 | MinerScore(
60 | miner_hotkey=result[0],
61 | total_score=result[1],
62 | )
63 | for result in results
64 | ]
65 |
66 | async def get_task_by_uuid(self, uuid: str) -> Task:
67 | statement = select(Task).where(Task.uuid == uuid)
68 | results = await self.session.exec(statement)
69 | return results.scalar_one_or_none()
70 |
--------------------------------------------------------------------------------
/neurons/validators/src/job.py:
--------------------------------------------------------------------------------
1 | import time
2 | import random
3 |
4 | start_time = time.time()
5 |
6 | wait_time = random.uniform(10, 30)
7 | time.sleep(wait_time)
8 |
9 | # print("Job finished")
10 | print(time.time() - start_time)
--------------------------------------------------------------------------------
/neurons/validators/src/miner_jobs/score.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 | import subprocess
4 | import tempfile
5 | import json
6 | import hashlib
7 | from base64 import b64encode
8 | import asyncio
9 |
10 |
11 | def gen_hash(s: bytes) -> bytes:
12 | return b64encode(hashlib.sha256(s).digest(), altchars=b"-_")
13 |
14 |
15 | payload = sys.argv[1]
16 | data = json.loads(payload)
17 |
18 | gpu_count = data["gpu_count"]
19 | num_job_params = data["num_job_params"]
20 | jobs = data["jobs"]
21 | timeout = data["timeout"]
22 |
23 |
24 | def run_hashcat(device_id: int, job: dict) -> list[str]:
25 | answers = []
26 | for i in range(num_job_params):
27 | payload = job["payloads"][i]
28 | mask = job["masks"][i]
29 | algorithm = job["algorithms"][i]
30 |
31 | with tempfile.NamedTemporaryFile(delete=True, suffix='.txt') as payload_file:
32 | payload_file.write(payload.encode('utf-8'))
33 | payload_file.flush()
34 | os.fsync(payload_file.fileno())
35 |
36 | if not os.path.exists(f"/usr/bin/hashcat{device_id}"):
37 | subprocess.check_output(f"cp /usr/bin/hashcat /usr/bin/hashcat{device_id}", shell=True)
38 |
39 | cmd = f'hashcat{device_id} --session=hashcat{device_id} --potfile-disable --restore-disable --attack-mode 3 -d {device_id} --workload-profile 3 --optimized-kernel-enable --hash-type {algorithm} --hex-salt -1 "?l?d?u" --outfile-format 2 --quiet --hwmon-disable {payload_file.name} "{mask}"'
40 | stdout = subprocess.check_output(cmd, shell=True, text=True)
41 | if stdout:
42 | passwords = [p for p in sorted(stdout.split("\n")) if p != ""]
43 | answers.append(passwords)
44 |
45 | return answers
46 |
47 |
48 | async def run_jobs():
49 | tasks = [
50 | asyncio.to_thread(
51 | run_hashcat,
52 | i+1,
53 | jobs[i]
54 | )
55 | for i in range(gpu_count)
56 | ]
57 |
58 | results = await asyncio.wait_for(asyncio.gather(*tasks, return_exceptions=True), timeout=timeout)
59 | result = {
60 | "answer": gen_hash("".join([
61 | "".join([
62 | "".join(passwords)
63 | for passwords in answers
64 | ])
65 | for answers in results
66 | ]).encode("utf-8")).decode("utf-8")
67 | }
68 |
69 | print(json.dumps(result))
70 |
71 | if __name__ == "__main__":
72 | asyncio.run(run_jobs())
73 |
--------------------------------------------------------------------------------
/neurons/validators/src/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/validators/src/models/__init__.py
--------------------------------------------------------------------------------
/neurons/validators/src/models/executor.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | from uuid import UUID, uuid4
3 | from sqlmodel import Field, SQLModel
4 |
5 |
6 | class Executor(SQLModel, table=True):
7 | """Miner model."""
8 |
9 | uuid: UUID | None = Field(default_factory=uuid4, primary_key=True)
10 | miner_address: str
11 | miner_port: int
12 | miner_hotkey: str
13 | executor_id: UUID
14 | executor_ip_address: str
15 | executor_ssh_username: str
16 | executor_ssh_port: int
17 | rented: Optional[bool] = None
18 |
--------------------------------------------------------------------------------
/neurons/validators/src/models/task.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | import enum
3 | import uuid
4 | from uuid import UUID
5 | from datetime import datetime
6 |
7 | from sqlmodel import Column, Enum, Field, SQLModel
8 |
9 |
10 | class TaskStatus(str, enum.Enum):
11 | Initiated = "Initiated"
12 | SSHConnected = "SSHConnected"
13 | Failed = "Failed"
14 | Finished = "Finished"
15 |
16 |
17 | class Task(SQLModel, table=True):
18 | """Task model."""
19 |
20 | uuid: UUID | None = Field(default_factory=uuid.uuid4, primary_key=True)
21 | task_status: TaskStatus = Field(sa_column=Column(Enum(TaskStatus)))
22 | miner_hotkey: str
23 | executor_id: UUID
24 | created_at: datetime = Field(default_factory=datetime.utcnow)
25 | proceed_time: Optional[int] = Field(default=None)
26 | score: Optional[float] = None
27 |
--------------------------------------------------------------------------------
/neurons/validators/src/payload_models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/validators/src/payload_models/__init__.py
--------------------------------------------------------------------------------
/neurons/validators/src/protocol/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/validators/src/protocol/__init__.py
--------------------------------------------------------------------------------
/neurons/validators/src/protocol/vc_protocol/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/validators/src/protocol/vc_protocol/__init__.py
--------------------------------------------------------------------------------
/neurons/validators/src/protocol/vc_protocol/compute_requests.py:
--------------------------------------------------------------------------------
1 | from typing import Literal
2 |
3 | from pydantic import BaseModel
4 |
5 |
6 | class Error(BaseModel, extra="allow"):
7 | msg: str
8 | type: str
9 | help: str = ""
10 |
11 |
12 | class Response(BaseModel, extra="forbid"):
13 | """Message sent from compute app to validator in response to AuthenticateRequest"""
14 |
15 | status: Literal["error", "success"]
16 | errors: list[Error] = []
17 |
18 |
19 | class RentedMachine(BaseModel):
20 | miner_hotkey: str
21 | executor_id: str
22 | executor_ip_address: str
23 | executor_ip_port: str
24 | container_name: str
25 | owner_flag: bool = False
26 |
27 |
28 | class RentedMachineResponse(BaseModel):
29 | machines: list[RentedMachine]
30 |
31 |
32 | class ExecutorUptimeResponse(BaseModel):
33 | executor_ip_address: str
34 | executor_ip_port: str
35 | uptime_in_minutes: int
36 |
37 |
38 | class RevenuePerGpuTypeResponse(BaseModel):
39 | revenues: dict[str, float]
40 |
--------------------------------------------------------------------------------
/neurons/validators/src/protocol/vc_protocol/validator_requests.py:
--------------------------------------------------------------------------------
1 | import enum
2 | import json
3 | import time
4 |
5 | import bittensor
6 | import pydantic
7 | from datura.requests.base import BaseRequest
8 |
9 |
10 | class RequestType(enum.Enum):
11 | AuthenticateRequest = "AuthenticateRequest"
12 | MachineSpecRequest = "MachineSpecRequest"
13 | ExecutorSpecRequest = "ExecutorSpecRequest"
14 | RentedMachineRequest = "RentedMachineRequest"
15 | LogStreamRequest = "LogStreamRequest"
16 | ResetVerifiedJobRequest = "ResetVerifiedJobRequest"
17 | DuplicateExecutorsRequest = "DuplicateExecutorsRequest"
18 | NormalizedScoreRequest = "NormalizedScoreRequest"
19 | RevenuePerGpuTypeRequest = "RevenuePerGpuTypeRequest"
20 | ScorePortionPerGpuTypeRequest = "ScorePortionPerGpuTypeRequest"
21 |
22 |
23 | class BaseValidatorRequest(BaseRequest):
24 | message_type: RequestType
25 |
26 |
27 | class AuthenticationPayload(pydantic.BaseModel):
28 | validator_hotkey: str
29 | timestamp: int
30 |
31 | def blob_for_signing(self):
32 | instance_dict = self.model_dump()
33 | return json.dumps(instance_dict, sort_keys=True)
34 |
35 |
36 | class AuthenticateRequest(BaseValidatorRequest):
37 | message_type: RequestType = RequestType.AuthenticateRequest
38 | payload: AuthenticationPayload
39 | signature: str
40 |
41 | def blob_for_signing(self):
42 | return self.payload.blob_for_signing()
43 |
44 | @classmethod
45 | def from_keypair(cls, keypair: bittensor.Keypair):
46 | payload = AuthenticationPayload(
47 | validator_hotkey=keypair.ss58_address,
48 | timestamp=int(time.time()),
49 | )
50 | return cls(payload=payload, signature=f"0x{keypair.sign(payload.blob_for_signing()).hex()}")
51 |
52 |
53 | class ExecutorSpecRequest(BaseValidatorRequest):
54 | message_type: RequestType = RequestType.ExecutorSpecRequest
55 | miner_hotkey: str
56 | miner_coldkey: str
57 | validator_hotkey: str
58 | executor_uuid: str
59 | executor_ip: str
60 | executor_port: int
61 | executor_price: float | None = None
62 | specs: dict | None
63 | score: float | None
64 | synthetic_job_score: float | None
65 | log_text: str | None
66 | log_status: str | None
67 | job_batch_id: str
68 |
69 |
70 | class RentedMachineRequest(BaseValidatorRequest):
71 | message_type: RequestType = RequestType.RentedMachineRequest
72 |
73 |
74 | class LogStreamRequest(BaseValidatorRequest):
75 | message_type: RequestType = RequestType.LogStreamRequest
76 | miner_hotkey: str
77 | validator_hotkey: str
78 | executor_uuid: str
79 | logs: list[dict]
80 |
81 |
82 | class ResetVerifiedJobReason(int, enum.Enum):
83 | DEFAULT = 0
84 | POD_NOT_RUNNING = 1 # container for pod is not running
85 |
86 |
87 | class ResetVerifiedJobRequest(BaseValidatorRequest):
88 | message_type: RequestType = RequestType.ResetVerifiedJobRequest
89 | validator_hotkey: str
90 | miner_hotkey: str
91 | executor_uuid: str
92 | reason: ResetVerifiedJobReason = ResetVerifiedJobReason.DEFAULT
93 |
94 |
95 | class DuplicateExecutorsRequest(BaseValidatorRequest):
96 | message_type: RequestType = RequestType.DuplicateExecutorsRequest
97 |
98 |
99 | class NormalizedScoreRequest(BaseValidatorRequest):
100 | message_type: RequestType = RequestType.NormalizedScoreRequest
101 | normalized_scores: list[dict]
102 |
103 |
104 | class RevenuePerGpuTypeRequest(BaseValidatorRequest):
105 | message_type: RequestType = RequestType.RevenuePerGpuTypeRequest
106 |
107 |
108 | class ScorePortionPerGpuTypeRequest(BaseValidatorRequest):
109 | message_type: RequestType = RequestType.ScorePortionPerGpuTypeRequest
110 | portions: dict[str, float]
111 |
--------------------------------------------------------------------------------
/neurons/validators/src/services/ioc.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from services.docker_service import DockerService
4 | from services.miner_service import MinerService
5 | from services.ssh_service import SSHService
6 | from services.task_service import TaskService
7 | from services.redis_service import RedisService
8 | from services.file_encrypt_service import FileEncryptService
9 | from services.matrix_validation_service import ValidationService
10 |
11 | ioc = {}
12 |
13 |
14 | async def initiate_services():
15 | ioc["SSHService"] = SSHService()
16 | ioc["RedisService"] = RedisService()
17 | ioc["FileEncryptService"] = FileEncryptService(
18 | ssh_service=ioc["SSHService"],
19 | )
20 | ioc["ValidationService"] = ValidationService()
21 | ioc["TaskService"] = TaskService(
22 | ssh_service=ioc["SSHService"],
23 | redis_service=ioc["RedisService"],
24 | validation_service=ioc["ValidationService"]
25 | )
26 | ioc["DockerService"] = DockerService(
27 | ssh_service=ioc["SSHService"],
28 | redis_service=ioc["RedisService"]
29 | )
30 | ioc["MinerService"] = MinerService(
31 | ssh_service=ioc["SSHService"],
32 | task_service=ioc["TaskService"],
33 | redis_service=ioc["RedisService"]
34 | )
35 |
36 |
37 | def sync_initiate():
38 | loop = asyncio.get_event_loop()
39 | loop.run_until_complete(initiate_services())
40 |
41 |
42 | sync_initiate()
43 |
--------------------------------------------------------------------------------
/neurons/validators/src/services/ssh_service.py:
--------------------------------------------------------------------------------
1 | import hashlib
2 | from base64 import b64encode
3 | import random
4 | import string
5 |
6 | from cryptography.fernet import Fernet
7 | from cryptography.hazmat.primitives import serialization
8 | from cryptography.hazmat.primitives.asymmetric import ed25519
9 |
10 |
11 | class SSHService:
12 | def generate_random_string(self, length=30, string_only=False):
13 | if string_only:
14 | characters = string.ascii_letters
15 | else:
16 | characters = (
17 | string.ascii_letters + string.digits + "/ +_"
18 | )
19 | random_string = ''.join(random.choices(characters, k=length))
20 | return random_string
21 |
22 | def _hash(self, s: bytes) -> bytes:
23 | return b64encode(hashlib.sha256(s).digest(), altchars=b"-_")
24 |
25 | def _encrypt(self, key: str, payload: str) -> str:
26 | key_bytes = self._hash(key.encode("utf-8"))
27 | return Fernet(key_bytes).encrypt(payload.encode("utf-8")).decode("utf-8")
28 |
29 | def decrypt_payload(self, key: str, encrypted_payload: str) -> str:
30 | key_bytes = self._hash(key.encode("utf-8"))
31 | return Fernet(key_bytes).decrypt(encrypted_payload.encode("utf-8")).decode("utf-8")
32 |
33 | def generate_ssh_key(self, encryption_key: str) -> (bytes, bytes):
34 | """Generate SSH key pair.
35 |
36 | Args:
37 | encryption_key (str): key to encrypt the private key.
38 |
39 | Returns:
40 | (bytes, bytes): return (private key bytes, public key bytes)
41 | """
42 | # Generate a new private-public key pair
43 | private_key = ed25519.Ed25519PrivateKey.generate()
44 | public_key = private_key.public_key()
45 |
46 | private_key_bytes = private_key.private_bytes(
47 | encoding=serialization.Encoding.PEM,
48 | format=serialization.PrivateFormat.OpenSSH,
49 | # encryption_algorithm=BestAvailableEncryption(encryption_key.encode()),
50 | encryption_algorithm=serialization.NoEncryption(),
51 | )
52 | public_key_bytes = public_key.public_bytes(
53 | encoding=serialization.Encoding.OpenSSH,
54 | format=serialization.PublicFormat.OpenSSH,
55 | )
56 |
57 | # extract pub key content, excluding first line and end line
58 | # pub_key_str = "".join(public_key_bytes.decode().split("\n")[1:-2])
59 |
60 | return self._encrypt(encryption_key, private_key_bytes.decode("utf-8")).encode(
61 | "utf-8"
62 | ), public_key_bytes
63 |
--------------------------------------------------------------------------------
/neurons/validators/src/test_validator.py:
--------------------------------------------------------------------------------
1 | # import asyncio
2 | # import bittensor
3 |
4 | # from core.config import settings
5 | # from fastapi.testclient import TestClient
6 | # from concurrent.futures import ThreadPoolExecutor, as_completed
7 | # from services.docker_service import DockerService
8 | # from services.ioc import ioc
9 |
10 | # from validator import app
11 | from protocol.vc_protocol.compute_requests import RentedMachine
12 | import json
13 |
14 | # client = TestClient(app)
15 |
16 |
17 | # def send_post_request():
18 | # response = client.post(
19 | # "/miner_request",
20 | # json={
21 | # "miner_hotkey": "5EHgHZBfx4ZwU7GzGCS8VCMBLBEKo5eaCvXKiu6SASwWT6UY",
22 | # "miner_address": "localhost",
23 | # "miner_port": 8000
24 | # },
25 | # )
26 | # assert response.status_code == 200
27 |
28 |
29 | # def test_socket_connections():
30 | # num_requests = 10 # Number of simultaneous requests
31 | # with ThreadPoolExecutor(max_workers=num_requests) as executor:
32 | # futures = [executor.submit(send_post_request) for _ in range(num_requests)]
33 |
34 | # for future in as_completed(futures):
35 | # response = future.result()
36 | # assert response.status_code == 200
37 |
38 |
39 | # async def check_docker_port_mappings():
40 | # docker_service: DockerService = ioc["DockerService"]
41 | # miner_hotkey = '5Df8qGLMd19BXByefGCZFN57fWv6jDm5hUbnQeUTu2iqNBhT'
42 | # executor_id = 'c272060f-8eae-4265-8e26-1d83ac96b498'
43 | # port_mappings = await docker_service.generate_portMappings(miner_hotkey, executor_id)
44 | # print('port_mappings ==>', port_mappings)
45 |
46 | if __name__ == "__main__":
47 | # test_socket_connections()
48 | # asyncio.run(check_docker_port_mappings())
49 |
50 | # config = settings.get_bittensor_config()
51 | # subtensor = bittensor.subtensor(config=config)
52 | # node = subtensor.substrate
53 |
54 | # netuid = settings.BITTENSOR_NETUID
55 | # tempo = subtensor.tempo(netuid)
56 | # weights_rate_limit = node.query("SubtensorModule", "WeightsSetRateLimit", [netuid]).value
57 | # server_rate_limit = node.query("SubtensorModule", "WeightsSetRateLimit", [netuid]).value
58 | # serving_rate_limit = node.query("SubtensorModule", "ServingRateLimit", [netuid]).value
59 | # print('rate limit ===>', tempo, weights_rate_limit, serving_rate_limit)
60 |
61 | rented_machine = RentedMachine(
62 | miner_hotkey="miner_hotkey",
63 | executor_id='executor_id',
64 | executor_ip_address='executor_ip_address',
65 | executor_ip_port='2000',
66 | container_name='container_name',
67 | )
68 | rented_machine_str =rented_machine.model_dump_json()
69 | machine = RentedMachine.model_validate(json.loads(rented_machine_str))
70 | print(json.loads(rented_machine_str))
71 |
--------------------------------------------------------------------------------
/neurons/validators/src/validator.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import logging
3 |
4 | import uvicorn
5 | from fastapi import FastAPI
6 |
7 | from core.config import settings
8 | from core.utils import configure_logs_of_other_modules, wait_for_services_sync
9 | from core.validator import Validator
10 |
11 | configure_logs_of_other_modules()
12 | wait_for_services_sync()
13 |
14 |
15 | async def app_lifespan(app: FastAPI):
16 | if settings.DEBUG:
17 | validator = Validator(debug_miner=settings.get_debug_miner())
18 | else:
19 | validator = Validator()
20 | # Run the miner in the background
21 | task = asyncio.create_task(validator.start())
22 |
23 | try:
24 | yield
25 | finally:
26 | await validator.stop() # Ensure proper cleanup
27 | await task # Wait for the background task to complete
28 | logging.info("Validator exited successfully.")
29 |
30 |
31 | app = FastAPI(
32 | title=settings.PROJECT_NAME,
33 | lifespan=app_lifespan,
34 | )
35 |
36 | reload = True if settings.ENV == "dev" else False
37 |
38 | if __name__ == "__main__":
39 | uvicorn.run("validator:app", host="0.0.0.0", port=settings.INTERNAL_PORT, reload=reload)
40 |
--------------------------------------------------------------------------------
/neurons/validators/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/neurons/validators/tests/__init__.py
--------------------------------------------------------------------------------
/neurons/validators/version.txt:
--------------------------------------------------------------------------------
1 | 3.8.6
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "compute-subnet"
3 | version = "0.1.0"
4 | description = "Datura compute subnet"
5 | authors = [
6 | {name = "waris", email = "waris0609@outlook.com"},
7 | ]
8 | dependencies = [
9 | ]
10 | requires-python = "==3.11.*"
11 | readme = "README.md"
12 | license = {text = "MIT"}
13 |
14 | [build-system]
15 | requires = ["pdm-backend"]
16 | build-backend = "pdm.backend"
17 |
18 |
19 | [tool.pdm]
20 | distribution = true
21 |
22 | [tool.pdm.dev-dependencies]
23 | dev = [
24 | "ruff>=0.5.1",
25 | "pre-commit>=3.7.1",
26 | ]
27 |
28 | format = [
29 | "ruff",
30 | ]
31 | lint = [
32 | "ruff",
33 | ]
34 |
35 | [tool.ruff]
36 | # TODO: validator project path
37 | src = ["neurons/miners/src"]
38 | line-length = 100
39 |
40 | [tool.ruff.lint]
41 | # TODO add D
42 | select = ["E", "F", "I", "UP"]
43 | # TODO: remove E501 once docstrings are formatted
44 | ignore = [
45 | "D100", "D105", "D107", "D200", "D202", "D203", "D205", "D212", "D400", "D401", "D415",
46 | "D101", "D102","D103", "D104", # TODO remove once we have docstring for all public methods
47 | "E501", # TODO: remove E501 once docstrings are formatted
48 | ]
49 |
50 | [tool.ruff.lint.per-file-ignores]
51 | "__init__.py" = ["F401"]
52 | "test/**" = ["D", "F403", "F405"]
--------------------------------------------------------------------------------
/scripts/check_compatibility.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ -z "$1" ]; then
4 | echo "Please provide a Python version as an argument."
5 | exit 1
6 | fi
7 |
8 | python_version="$1"
9 | all_passed=true
10 |
11 | GREEN='\033[0;32m'
12 | YELLOW='\033[0;33m'
13 | RED='\033[0;31m'
14 | NC='\033[0m' # No Color
15 |
16 | check_compatibility() {
17 | all_supported=0
18 |
19 | while read -r requirement; do
20 | # Skip lines starting with git+
21 | if [[ "$requirement" == git+* ]]; then
22 | continue
23 | fi
24 |
25 | package_name=$(echo "$requirement" | awk -F'[!=<>]' '{print $1}' | awk -F'[' '{print $1}') # Strip off brackets
26 | echo -n "Checking $package_name... "
27 |
28 | url="https://pypi.org/pypi/$package_name/json"
29 | response=$(curl -s $url)
30 | status_code=$(curl -s -o /dev/null -w "%{http_code}" $url)
31 |
32 | if [ "$status_code" != "200" ]; then
33 | echo -e "${RED}Information not available for $package_name. Failure.${NC}"
34 | all_supported=1
35 | continue
36 | fi
37 |
38 | classifiers=$(echo "$response" | jq -r '.info.classifiers[]')
39 | requires_python=$(echo "$response" | jq -r '.info.requires_python')
40 |
41 | base_version="Programming Language :: Python :: ${python_version%%.*}"
42 | specific_version="Programming Language :: Python :: $python_version"
43 |
44 | if echo "$classifiers" | grep -q "$specific_version" || echo "$classifiers" | grep -q "$base_version"; then
45 | echo -e "${GREEN}Supported${NC}"
46 | elif [ "$requires_python" != "null" ]; then
47 | if echo "$requires_python" | grep -Eq "==$python_version|>=$python_version|<=$python_version"; then
48 | echo -e "${GREEN}Supported${NC}"
49 | else
50 | echo -e "${RED}Not compatible with Python $python_version due to constraint $requires_python.${NC}"
51 | all_supported=1
52 | fi
53 | else
54 | echo -e "${YELLOW}Warning: Specific version not listed, assuming compatibility${NC}"
55 | fi
56 | done < requirements.txt
57 |
58 | return $all_supported
59 | }
60 |
61 | echo "Checking compatibility for Python $python_version..."
62 | check_compatibility
63 | if [ $? -eq 0 ]; then
64 | echo -e "${GREEN}All requirements are compatible with Python $python_version.${NC}"
65 | else
66 | echo -e "${RED}Some requirements are NOT compatible with Python $python_version.${NC}"
67 | all_passed=false
68 | fi
69 |
70 | echo ""
71 | if $all_passed; then
72 | echo -e "${GREEN}All tests passed.${NC}"
73 | else
74 | echo -e "${RED}All tests did not pass.${NC}"
75 | exit 1
76 | fi
77 |
--------------------------------------------------------------------------------
/scripts/check_requirements_changes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Check if requirements files have changed in the last commit
4 | if git diff --name-only HEAD~1 | grep -E 'requirements.txt|requirements.txt'; then
5 | echo "Requirements files have changed. Running compatibility checks..."
6 | echo 'export REQUIREMENTS_CHANGED="true"' >> $BASH_ENV
7 | else
8 | echo "Requirements files have not changed. Skipping compatibility checks..."
9 | echo 'export REQUIREMENTS_CHANGED="false"' >> $BASH_ENV
10 | fi
11 |
--------------------------------------------------------------------------------
/scripts/install_dev.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -u
4 |
5 | # enable command completion
6 | set -o history -o histexpand
7 |
8 | abort() {
9 | printf "%s\n" "$1"
10 | exit 1
11 | }
12 |
13 | getc() {
14 | local save_state
15 | save_state=$(/bin/stty -g)
16 | /bin/stty raw -echo
17 | IFS= read -r -n 1 -d '' "$@"
18 | /bin/stty "$save_state"
19 | }
20 |
21 | exit_on_error() {
22 | exit_code=$1
23 | last_command=${@:2}
24 | if [ $exit_code -ne 0 ]; then
25 | >&2 echo "\"${last_command}\" command failed with exit code ${exit_code}."
26 | exit $exit_code
27 | fi
28 | }
29 |
30 | shell_join() {
31 | local arg
32 | printf "%s" "$1"
33 | shift
34 | for arg in "$@"; do
35 | printf " "
36 | printf "%s" "${arg// /\ }"
37 | done
38 | }
39 |
40 | # string formatters
41 | if [[ -t 1 ]]; then
42 | tty_escape() { printf "\033[%sm" "$1"; }
43 | else
44 | tty_escape() { :; }
45 | fi
46 | tty_mkbold() { tty_escape "1;$1"; }
47 | tty_underline="$(tty_escape "4;39")"
48 | tty_blue="$(tty_mkbold 34)"
49 | tty_red="$(tty_mkbold 31)"
50 | tty_bold="$(tty_mkbold 39)"
51 | tty_reset="$(tty_escape 0)"
52 |
53 | ohai() {
54 | printf "${tty_blue}==>${tty_bold} %s${tty_reset}\n" "$(shell_join "$@")"
55 | }
56 |
57 | wait_for_user() {
58 | local c
59 | echo
60 | echo "Press RETURN to continue or any other key to abort"
61 | getc c
62 | # we test for \r and \n because some stuff does \r instead
63 | if ! [[ "$c" == $'\r' || "$c" == $'\n' ]]; then
64 | exit 1
65 | fi
66 | }
67 |
68 | #install pre
69 | install_pre() {
70 | sudo apt update
71 | sudo apt install --no-install-recommends --no-install-suggests -y sudo apt-utils curl git cmake build-essential
72 | exit_on_error $?
73 | }
74 |
75 | # check if python is installed, if not install it
76 | install_python() {
77 | # Check if python3.11 is installed
78 | if command -v python3.11 &> /dev/null
79 | then
80 | # Check the version
81 | PYTHON_VERSION=$(python3.11 --version 2>&1)
82 | if [[ $PYTHON_VERSION == *"Python 3.11"* ]]; then
83 | ohai "Python 3.11 is already installed."
84 | else
85 | ohai "Linking python to python 3.11"
86 | sudo update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1
87 | python -m pip install cffi
88 | python -m pip install cryptography
89 | fi
90 | else
91 | ohai "Installing Python 3.11"
92 | add-apt-repository ppa:deadsnakes/ppa
93 | apt install python3.11
94 | sudo update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1
95 | python -m pip install cffi
96 | python -m pip install cryptography
97 | fi
98 |
99 | # check if PDM is installed
100 | if command -v pdm &> /dev/null
101 | then
102 | ohai "PDM is already installed."
103 | echo "Checking PDM version..."
104 | pdm --version
105 | else
106 | ohai "Installing PDM..."
107 | curl -sSL https://pdm-project.org/install-pdm.py | python3 -
108 |
109 | local bashrc_file="/root/.bashrc"
110 | local path_string="export PATH=/root/.local/bin:\$PATH"
111 |
112 | if ! grep -Fxq "$path_string" $bashrc_file; then
113 | echo "$path_string" >> $bashrc_file
114 | echo "Added $path_string to $bashrc_file"
115 | else
116 | echo "$path_string already present in $bashrc_file"
117 | fi
118 |
119 | export PATH=/root/.local/bin:$PATH
120 |
121 | echo "Checking PDM version..."
122 | pdm --version
123 | fi
124 |
125 | PROJECT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
126 | PROJECT_DIR=${PROJECT_DIR}/../
127 | cd ${PROJECT_DIR}
128 |
129 | ohai "Installing PDM packages in root folder."
130 | pdm install -d
131 |
132 | ohai "Installing pre-commit for the project."
133 | pdm run pre-commit install
134 | }
135 |
136 |
137 |
138 | ohai "This script will install:"
139 | echo "git"
140 | echo "curl"
141 | echo "python3.11 and pdm"
142 | echo "python3-pip"
143 | echo "pre-commit with ruff"
144 |
145 | wait_for_user
146 | install_pre
147 | install_python
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Datura-ai/compute-subnet/39f006fd39394b9972ab509cc7ab558a8a374516/tests/__init__.py
--------------------------------------------------------------------------------