├── .devcontainer └── devcontainer.json ├── .flake8 ├── .github └── workflows │ └── publish-docker.yaml ├── .gitignore ├── .isort.cfg ├── .pre-commit-config.yaml ├── .pylintrc ├── .vscode └── launch.json ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── README.md ├── __init__.py ├── competition_circuit └── .gitkeep ├── cspell.json ├── dependabot.yml ├── docs ├── README.md ├── command_line_arguments.md ├── competitions │ ├── QUICKSTART.md │ └── README.md ├── custom_circuit_integrations.md ├── images │ ├── proof_of_weights_architecture.png │ └── proof_of_weights_workflow.png ├── integration_plan.md ├── notebooks │ ├── dummy_age_detection.onnx │ ├── sn27_ezkl.ipynb │ ├── sn27_jolt.ipynb │ └── sn48_ezkl.ipynb ├── pm2_configuration.md ├── prometheus.md ├── running_on_mainnet.md ├── running_on_staging.md ├── running_on_testnet.md ├── shared_setup_steps.md └── versioning.md ├── ecosystem.config.tmpl.js ├── makefile ├── neurons ├── __init__.py ├── _miner │ ├── circuit_manager.py │ └── miner_session.py ├── _validator │ ├── api │ │ ├── __init__.py │ │ ├── cache.py │ │ ├── certificate_manager.py │ │ └── websocket_manager.py │ ├── competitions │ │ ├── 1 │ │ │ ├── age.onnx │ │ │ ├── competition_config.json │ │ │ ├── data_processor.py │ │ │ ├── input.json │ │ │ └── settings.json │ │ ├── __init__.py │ │ ├── competition.py │ │ ├── competition_manager.py │ │ ├── models │ │ │ ├── circuit.py │ │ │ ├── neuron.py │ │ │ └── sota.py │ │ ├── services │ │ │ ├── circuit_evaluator.py │ │ │ ├── circuit_manager.py │ │ │ ├── circuit_validator.py │ │ │ ├── data_source.py │ │ │ ├── onnx_runner.py │ │ │ └── sota_manager.py │ │ └── utils │ │ │ └── cleanup.py │ ├── config │ │ ├── __init__.py │ │ └── api.py │ ├── core │ │ ├── prometheus.py │ │ ├── request.py │ │ ├── request_pipeline.py │ │ ├── response_processor.py │ │ └── validator_loop.py │ ├── models │ │ ├── base_rpc_request.py │ │ ├── completed_proof_of_weights.py │ │ ├── miner_response.py │ │ ├── poc_rpc_request.py │ │ ├── pow_rpc_request.py │ │ └── request_type.py │ ├── pow │ │ └── proof_of_weights_handler.py │ ├── scoring │ │ ├── reward.py │ │ ├── score_manager.py │ │ └── weights.py │ ├── utils │ │ ├── api.py │ │ ├── axon.py │ │ ├── hash_guard.py │ │ ├── logging.py │ │ ├── pps.py │ │ ├── proof_of_weights.py │ │ └── uid.py │ └── validator_session.py ├── cli_parser.py ├── constants.py ├── deployment_layer │ ├── circuit_store.py │ ├── model_1876cfa9fb3c418b2559f3f7074db20565b5ca7237efdd43b907d9d697a452c4 │ │ ├── input.py │ │ ├── metadata.json │ │ ├── model.compiled │ │ ├── settings.json │ │ └── vk.key │ ├── model_1e6fcdaea58741e7248b631718dda90398a17b294480beb12ce8232e27ca3bff │ │ ├── circuit.wasm │ │ ├── input.py │ │ ├── metadata.json │ │ ├── settings.json │ │ └── verification_key.json │ ├── model_43ecaacaded5ed16c9e08bc054366e409c7925245eca547472b27f2a61469cc5 │ │ ├── input.py │ │ ├── metadata.json │ │ ├── model.compiled │ │ ├── settings.json │ │ └── vk.key │ ├── model_f5b6043594f46ae6bd176ce60c7a099291cc6a3f6436fecd46142b1b1ecca5fb │ │ ├── input.py │ │ ├── metadata.json │ │ ├── model.compiled │ │ ├── network.onnx │ │ ├── proof.json │ │ ├── settings.json │ │ └── vk.key │ └── model_fa0d509d52abe2d1e809124f8aba46258a02f7253582f7b7f5a22e1e0bca0dfb │ │ ├── circuit.wasm │ │ ├── input.py │ │ ├── metadata.json │ │ ├── settings.json │ │ └── verification_key.json ├── dry_run.py ├── execution_layer │ ├── base_input.py │ ├── circuit.py │ ├── generic_input.py │ ├── input_registry.py │ ├── proof_handlers │ │ ├── base_handler.py │ │ ├── circom_handler.py │ │ ├── ezkl_handler.py │ │ ├── factory.py │ │ └── jolt_handler.py │ ├── session_storage.py │ └── verified_model_session.py ├── generate_test_input.py ├── incentive_mechanism.ipynb ├── miner.py ├── protocol.py ├── scripts │ ├── check_miner_axon.py │ ├── create_competition_circuit.py │ ├── plot_model_stats.py │ └── sign_api_request.py ├── utils │ ├── __init__.py │ ├── auto_update.py │ ├── epoch.py │ ├── gc_logging.py │ ├── metrics_logger.py │ ├── pre_flight.py │ ├── rate_limiter.py │ ├── system.py │ └── wandb_logger.py └── validator.py ├── pyproject.toml ├── requirements.txt ├── setup.sh ├── sync_model_files.sh ├── tach.toml └── uv.lock /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the 2 | // README at: https://github.com/devcontainers/templates/tree/main/src/ubuntu 3 | { 4 | "name": "Omron Dev", 5 | // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile 6 | // Built from https://github.com/inference-labs-inc/tensor-tools/blob/main/bittensor-devcontainer/Dockerfile 7 | "image": "ghcr.io/inference-labs-inc/bittensor-devcontainer:latest", 8 | "customizations": { 9 | "vscode": { 10 | "settings": { 11 | "terminal.integrated.defaultProfile.linux": "fish", 12 | "[python]": { 13 | "editor.tabSize": 4, 14 | "files.trimTrailingWhitespace": true, 15 | "editor.formatOnSave": true, 16 | "editor.defaultFormatter": "ms-python.black-formatter" 17 | }, 18 | "isort.args": [ 19 | "--settings-file", 20 | "./.isort.cfg" 21 | ] 22 | }, 23 | "extensions": [ 24 | "ms-python.black-formatter", 25 | "ms-python.python", 26 | "ms-python.isort", 27 | "ms-toolsai.jupyter" 28 | ] 29 | } 30 | }, 31 | // Install ezkl cli, done here instead of the Dockerfile to test new versions without rebuilding the image. 32 | "onCreateCommand": "curl https://raw.githubusercontent.com/zkonduit/ezkl/main/install_ezkl_cli.sh | bash -s v19.0.7", 33 | "remoteEnv": { "PATH": "${containerEnv:PATH}:/home/vscode/.ezkl" }, 34 | // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. 35 | // "remoteUser": "root", 36 | "mounts": [ 37 | { 38 | "source": "${localEnv:HOME}/.bittensor/omron", 39 | "target": "/home/vscode/.bittensor/omron", 40 | "type": "bind" 41 | }, 42 | { 43 | "source": "${localEnv:HOME}/.ezkl/srs", 44 | "target": "/home/vscode/.ezkl/srs", 45 | "type": "bind" 46 | } 47 | ] 48 | } -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | ignore = E203, W503 4 | exclude = .git, __pycache__, venv 5 | 6 | [pycodestyle] 7 | max-line-length = 120 8 | ignore = E203, W503 9 | exclude = .git, __pycache__, venv 10 | -------------------------------------------------------------------------------- /.github/workflows/publish-docker.yaml: -------------------------------------------------------------------------------- 1 | #https://docs.github.com/en/actions/use-cases-and-examples/publishing-packages/publishing-docker-images#publishing-images-to-github-packages 2 | name: Create and publish a Docker image 3 | 4 | on: 5 | push: 6 | branches: 7 | - testnet 8 | release: 9 | types: [created] 10 | workflow_dispatch: null 11 | 12 | env: 13 | REGISTRY: ghcr.io 14 | IMAGE_NAME: ${{ github.repository_owner }}/omron 15 | 16 | jobs: 17 | build-and-push-image: 18 | runs-on: ubuntu-latest 19 | permissions: 20 | contents: read 21 | packages: write 22 | attestations: write 23 | id-token: write 24 | 25 | steps: 26 | - name: Checkout repository 27 | uses: actions/checkout@v4 28 | 29 | - name: Log in to the Container registry 30 | uses: docker/login-action@v3 31 | with: 32 | registry: ${{ env.REGISTRY }} 33 | username: ${{ github.actor }} 34 | password: ${{ secrets.GITHUB_TOKEN }} 35 | 36 | - name: Extract metadata (tags, labels) for Docker 37 | id: meta 38 | uses: docker/metadata-action@v5 39 | with: 40 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 41 | 42 | - name: Build and push Docker image for testnet branch 43 | id: push-testnet 44 | uses: docker/build-push-action@v5 45 | with: 46 | context: . 47 | file: ./Dockerfile 48 | push: true 49 | tags: | 50 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:dev-${{ github.sha }} 51 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:dev 52 | labels: ${{ steps.meta.outputs.labels }} 53 | 54 | - name: Generate artifact attestation for testnet 55 | id: attest-testnet 56 | uses: actions/attest-build-provenance@v1 57 | with: 58 | subject-name: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME}} 59 | subject-digest: ${{ steps.push-testnet.outputs.digest }} 60 | push-to-registry: true 61 | 62 | - name: Build and push Docker image for release 63 | if: github.event_name == 'release' 64 | id: push-release 65 | uses: docker/build-push-action@v5 66 | with: 67 | context: . 68 | file: ./Dockerfile 69 | push: true 70 | tags: | 71 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.event.release.tag_name }} 72 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest 73 | labels: ${{ steps.meta.outputs.labels }} 74 | 75 | - name: Generate artifact attestation for release 76 | if: github.event_name == 'release' 77 | id: attest-release 78 | uses: actions/attest-build-provenance@v1 79 | with: 80 | subject-name: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME}} 81 | subject-digest: ${{ steps.push-release.outputs.digest }} 82 | push-to-registry: true 83 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | **/__pycache__ 6 | **/**/__pycache__ 7 | 8 | .DS_Store 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | wheels/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | neurons/output.csv 33 | neurons/reddit_data.db 34 | 35 | # PyInstaller 36 | # Usually these files are written by a python script from a template 37 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 38 | *.manifest 39 | *.spec 40 | /wandb 41 | neurons/wandb_config.json 42 | 43 | # Installer logs 44 | pip-log.txt 45 | pip-delete-this-directory.txt 46 | 47 | # Unit test / coverage reports 48 | htmlcov/ 49 | .tox/ 50 | .nox/ 51 | .coverage 52 | .coverage.* 53 | .cache 54 | nosetests.xml 55 | coverage.xml 56 | *.cover 57 | *.py,cover 58 | .hypothesis/ 59 | .pytest_cache/ 60 | cover/ 61 | model_stats.png 62 | neurons/execution_layer/temp/* 63 | # Translations 64 | *.mo 65 | *.pot 66 | # Django stuff: 67 | *.log 68 | local_settings.py 69 | db.sqlite3 70 | db.sqlite3-journal 71 | 72 | # Flask stuff: 73 | instance/ 74 | .webassets-cache 75 | 76 | # Sphinx documentation 77 | docs/_build/ 78 | 79 | # PyBuilder 80 | .pybuilder/ 81 | target/ 82 | 83 | # Jupyter Notebook 84 | .ipynb_checkpoints 85 | 86 | # IPython 87 | profile_default/ 88 | ipython_config.py 89 | neurons/wandb/ 90 | neurons/accounts.db 91 | 92 | # pyenv 93 | # For a library or package, you might want to ignore these files since the code is 94 | # intended to run in multiple environments; otherwise, check them in: 95 | # .python-version 96 | 97 | # pipenv 98 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 99 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 100 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 101 | # install all needed dependencies. 102 | #Pipfile.lock 103 | 104 | # poetry 105 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 106 | # This is especially recommended for binary packages to ensure reproducibility, and is more 107 | # commonly ignored for libraries. 108 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 109 | #poetry.lock 110 | 111 | # pdm 112 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 113 | #pdm.lock 114 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 115 | # in version control. 116 | # https://pdm.fming.dev/#use-with-ide 117 | .pdm.toml 118 | 119 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 120 | __pypackages__/ 121 | 122 | # Celery stuff 123 | celerybeat-schedule 124 | celerybeat.pid 125 | 126 | # SageMath parsed files 127 | *.sage.py 128 | 129 | # Environments 130 | .env 131 | .venv 132 | env/ 133 | venv/ 134 | ENV/ 135 | env.bak/ 136 | venv.bak/ 137 | omron-venv/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | # 170 | scores.pt 171 | Makefile 172 | 173 | # Huge PKs 174 | pk.key 175 | pk_aggr.key 176 | kzg.srs 177 | # pm2 config file 178 | ecosystem.config.js 179 | neurons/_validator/proof_of_weights/ 180 | **.zkey 181 | **/target/ 182 | *.ptau 183 | docs/notebooks/input.json 184 | docs/notebooks/calibration.json 185 | docs/notebooks/network.onnx 186 | docs/notebooks/witness.json 187 | docs/notebooks/proof.json 188 | docs/notebooks/model.compiled 189 | docs/notebooks/settings.json 190 | docs/notebooks/*.key 191 | **/witness.json 192 | **/input.json 193 | competition_circuit/** 194 | !competition_circuit 195 | 196 | # intellij files 197 | .idea/ 198 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | py_version=39 3 | sections=FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER 4 | no_lines_before=LOCALFOLDER 5 | multi_line_output=3 6 | include_trailing_comma=True 7 | force_grid_wrap=0 8 | use_parentheses=True 9 | ensure_newline_before_comments=True 10 | line_length=88 11 | split_on_trailing_comma=True 12 | known_third_party=fastapi,bittensor 13 | known_first_party=_validator,_miner,constants,utils,execution_layer,deployment_layer,miner,validator,protocol,models,constants,cli_parser 14 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/psf/black 3 | rev: 24.8.0 4 | hooks: 5 | - id: black 6 | - repo: https://github.com/pycqa/flake8 7 | rev: 7.1.1 8 | hooks: 9 | - id: flake8 10 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Python Debugger: Remote Attach", 9 | "type": "debugpy", 10 | "request": "attach", 11 | "connect": { 12 | "host": "localhost", 13 | "port": 5678 14 | }, 15 | "pathMappings": [ 16 | { 17 | "localRoot": "${workspaceFolder}", 18 | "remoteRoot": "." 19 | } 20 | ] 21 | } 22 | ] 23 | } -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | For Omron miners, validators, and operators of the subnet. 4 | 5 | ## General Principles 6 | 7 | - All parties shall conduct activities with transparency and accountability. 8 | - All parties shall not engage in any activity that could be construed as a conflict of interest. 9 | - All parties shall report any potential issues or exploits to the owners and team members through the official bug bounty program. 10 | 11 | > The official bug bounty program can be found at https://immunefi.com/bug-bounty/omron/ 12 | 13 | ## Subnet Operators 14 | 15 | - Shall conduct all development within the public `omron-subnet` repository. 16 | - Shall not disclose any sensitive information about future updates to third parties. 17 | - Shall make every reasonable effort to ensure that the subnet is secure and stable at all times. 18 | - Shall provide support to the community as needed. 19 | - Shall not give hints, tips, or tricks to any miners nor withhold information which may advantage any party. 20 | - Shall put all pending or scheduled code updates in the official publicly available code repository before and leading up to their release. 21 | - Shall provide reasonable timing between the release of pending updates and their official release, except for critical security updates. 22 | 23 | ## Miners 24 | 25 | - Shall not collude with other miners or validators to manipulate the subnet. 26 | - Shall not participate in activities that are directly adversarial to other miners or validators, such as DDoS or malicious requests. 27 | 28 | ## Validators 29 | 30 | - Shall update their subnet codebases per the instructions provided by the owners and team members in a timely manner. 31 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Developer Guide 2 | 3 | This document provides a guide for developers who contribute to the Omron subnet. 4 | 5 | ## Adding Dependencies 6 | 7 | We use `uv` to manage dependencies. To add new dependencies, follow the steps below: 8 | 9 | 1. Add the package to `pyproject.toml`: 10 | 11 | ```sh 12 | uv add 13 | ``` 14 | 15 | 2. Lock dependencies and generate `requirements.txt`: 16 | 17 | ```sh 18 | uv lock 19 | uv export -o requirements.txt 20 | ``` 21 | 22 | 3. Sync dependencies: 23 | 24 | ```sh 25 | uv sync 26 | ``` 27 | 28 | ## Updating Dependencies 29 | 30 | To force uv to update all packages in an existing `pyproject.toml`, run `uv sync --upgrade`. 31 | 32 | ```sh 33 | # only update the bittensor package 34 | $ uv sync --upgrade-package bittensor 35 | 36 | # update both the bittensor and requests packages 37 | $ uv sync --upgrade-package bittensor --upgrade-package requests 38 | 39 | # update the bittensor package to the latest, and requests to v2.0.0 40 | $ uv sync --upgrade-package bittensor --upgrade-package requests==2.0.0 41 | ``` 42 | 43 | ## Running Locally for Development 44 | 45 | For local development, we recommend using our devcontainer which provides a pre-configured development environment. The devcontainer image is pulled from `ghcr.io/inference-labs-inc/bittensor-devcontainer:latest`. 46 | 47 | 1. Create the `~/.bittensor/omron` directory on your host machine if it doesn't exist 48 | 2. Open the project in VS Code with the Dev Containers extension installed 49 | 3. VS Code will prompt you to "Reopen in Container" - click this to start the devcontainer 50 | 4. Once the container starts, run: 51 | ```sh 52 | uv sync 53 | ``` 54 | This will create and activate a virtual environment in `.venv` 55 | 5. In separate terminal windows, run: 56 | 57 | ```sh 58 | # Terminal 1: Start the local subnet 59 | start_localnet.sh 60 | 61 | # Terminal 2: Start the miner 62 | python neurons/miner.py --localnet 63 | 64 | # Terminal 3: Start the validator 65 | python neurons/validator.py --localnet 66 | ``` 67 | 68 | Note: btcli is pre-configured to use `ws://127.0.0.1:9944` in `~/.bittensor/config.yml` 69 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=linux/amd64 ubuntu:noble 2 | 3 | # Install dependencies 4 | RUN apt update && \ 5 | apt install -y \ 6 | pipx \ 7 | build-essential \ 8 | jq \ 9 | git \ 10 | aria2 \ 11 | curl \ 12 | make \ 13 | clang \ 14 | pkg-config \ 15 | libssl-dev \ 16 | llvm \ 17 | libudev-dev \ 18 | protobuf-compiler \ 19 | ffmpeg \ 20 | gosu \ 21 | && apt clean && rm -rf /var/lib/apt/lists/* 22 | 23 | # Make directories under opt and set owner to ubuntu 24 | RUN mkdir -p /opt/.cargo /opt/.rustup /opt/.nvm /opt/.npm /opt/.snarkjs /opt/omron/neurons && \ 25 | chown -R ubuntu:ubuntu /opt && \ 26 | chmod -R 775 /opt/omron && \ 27 | chown root:root /opt 28 | 29 | # Use ubuntu user 30 | USER ubuntu 31 | WORKDIR /opt 32 | 33 | # Install Rust 34 | ENV RUST_TOOLCHAIN=nightly-2024-09-30 35 | ENV CARGO_HOME=/opt/.cargo 36 | ENV RUSTUP_HOME=/opt/.rustup 37 | RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y && \ 38 | /opt/.cargo/bin/rustup toolchain install ${RUST_TOOLCHAIN} && \ 39 | /opt/.cargo/bin/rustup default ${RUST_TOOLCHAIN} && \ 40 | /opt/.cargo/bin/rustup toolchain remove stable && \ 41 | chmod -R 775 /opt/.cargo /opt/.rustup 42 | ENV PATH="/opt/.cargo/bin:${PATH}" 43 | 44 | # Install Jolt 45 | #ENV JOLT_VERSION=dd9e5c4bcf36ffeb75a576351807f8d86c33ec66 46 | #RUN cargo +${RUST_TOOLCHAIN} install --git https://github.com/a16z/jolt --rev ${JOLT_VERSION} --force --bins jolt 47 | 48 | # Install node et al. 49 | ENV NVM_DIR=/opt/.nvm 50 | RUN curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.0/install.sh | bash && \ 51 | [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" && \ 52 | [ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" && \ 53 | nvm install 20 && \ 54 | npm install --prefix /opt/.snarkjs snarkjs@0.7.4 && \ 55 | mkdir -p ~/.local/bin && \ 56 | ln -s $(which node) /home/ubuntu/.local/bin/node && \ 57 | ln -s $(which npm) /home/ubuntu/.local/bin/npm && \ 58 | chmod -R 775 /opt/.nvm /opt/.npm /opt/.snarkjs 59 | ENV PATH="/home/ubuntu/.local/bin:${PATH}" 60 | 61 | # Copy omron and install Python dependencies (make sure owner is ubuntu) 62 | COPY --chown=ubuntu:ubuntu --chmod=775 neurons /opt/omron/neurons 63 | COPY --chown=ubuntu:ubuntu --chmod=775 pyproject.toml /opt/omron/pyproject.toml 64 | COPY --chown=ubuntu:ubuntu --chmod=775 uv.lock /opt/omron/uv.lock 65 | RUN pipx install uv && \ 66 | cd /opt/omron && \ 67 | ~/.local/bin/uv sync --frozen --no-dev --compile-bytecode && \ 68 | ~/.local/bin/uv cache clean && \ 69 | echo "source /opt/omron/.venv/bin/activate" >> ~/.bashrc && \ 70 | chmod -R 775 /opt/omron/.venv 71 | ENV PATH="/opt/omron/.venv/bin:${PATH}" 72 | 73 | # Set workdir for running miner.py or validator.py and compile circuits 74 | WORKDIR /opt/omron/neurons 75 | ENV OMRON_NO_AUTO_UPDATE=1 76 | RUN OMRON_DOCKER_BUILD=1 /opt/omron/.venv/bin/python3 miner.py && \ 77 | rm -rf /opt/omron/neurons/deployment_layer/*/target/release/build && \ 78 | rm -rf /opt/omron/neurons/deployment_layer/*/target/release/deps && \ 79 | rm -rf /opt/omron/neurons/deployment_layer/*/target/release/examples && \ 80 | rm -rf /opt/omron/neurons/deployment_layer/*/target/release/incremental && \ 81 | rm -rf ~/.bittensor && \ 82 | rm -rf /tmp/omron 83 | USER root 84 | RUN cat <<'EOF' > /entrypoint.sh 85 | #!/usr/bin/env bash 86 | set -e 87 | if [ -n "$PUID" ]; then 88 | if [ "$PUID" = "0" ]; then 89 | echo "Running as root user" 90 | /opt/omron/.venv/bin/python3 "$@" 91 | else 92 | echo "Changing ubuntu user id to $PUID" 93 | usermod -u "$PUID" ubuntu 94 | gosu ubuntu /opt/omron/.venv/bin/python3 "$@" 95 | fi 96 | else 97 | gosu ubuntu /opt/omron/.venv/bin/python3 "$@" 98 | fi 99 | EOF 100 | RUN chmod +x /entrypoint.sh 101 | ENTRYPOINT ["/entrypoint.sh"] 102 | CMD ["-c", "import subprocess; \ 103 | subprocess.run(['/opt/omron/.venv/bin/python3', '/opt/omron/neurons/miner.py', '--help']); \ 104 | subprocess.run(['/opt/omron/.venv/bin/python3', '/opt/omron/neurons/validator.py', '--help']);" \ 105 | ] 106 | # Axon server 107 | EXPOSE 8091/tcp 108 | # API server 109 | EXPOSE 8443/tcp 110 | # Prometheus server 111 | EXPOSE 9090/tcp 112 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Inference Labs 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /competition_circuit/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inference-labs-inc/omron-subnet/e5aeb7987054f91989cf5129fc6d128b37af3d8f/competition_circuit/.gitkeep -------------------------------------------------------------------------------- /cspell.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2", 3 | "ignorePaths": [], 4 | "dictionaryDefinitions": [], 5 | "dictionaries": [], 6 | "words": [ 7 | "alist", 8 | "bittensor", 9 | "blocktime", 10 | "btcli", 11 | "btlogging", 12 | "CIRCOM", 13 | "circuitized", 14 | "coldkey", 15 | "dtype", 16 | "ezkl", 17 | "fastapi", 18 | "Gbps", 19 | "hexsha", 20 | "incentivizes", 21 | "Keypair", 22 | "libudev", 23 | "localnet", 24 | "logrows", 25 | "Mbps", 26 | "metagraph", 27 | "ndarray", 28 | "netuid", 29 | "Omron", 30 | "onnxruntime", 31 | "Opentensor", 32 | "pydantic", 33 | "rustup", 34 | "setdefaulttimeout", 35 | "snarkjs", 36 | "starlette", 37 | "substrateinterface", 38 | "subtensor", 39 | "timespan", 40 | "tlsv", 41 | "uids", 42 | "uvicorn", 43 | "venv", 44 | "wandb", 45 | "zkproof" 46 | ], 47 | "ignoreWords": [], 48 | "import": [] 49 | } 50 | -------------------------------------------------------------------------------- /dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "pip" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | file: "requirements.txt" 8 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | [← Back to home] 2 | 3 |
4 | 5 | # Omron Docs 6 | 7 |
8 | 9 | ## [Setup instructions] 10 | 11 | Instructions for setting up your machine to validate or mine on the Omron subnet. 12 | 13 | [View setup instructions →] 14 | 15 | ## [Running on Mainnet] 16 | 17 | Instructions for mining or validating on Omron mainnet. 18 | 19 | [View mainnet instructions →] 20 | 21 | ## [Running on Testnet] 22 | 23 | Instructions for mining or validating on Omron testnet. 24 | 25 | [View testnet instructions →] 26 | 27 | ## [Running locally (on "staging")] 28 | 29 | Instructions for mining or validating on a local bittensor instance, called Omron staging. 30 | 31 | [View local instructions →] 32 | 33 | ## [Command Line Arguments] 34 | 35 | An overview of command line configuration options supported by miner and validator software. 36 | 37 | [View CLI Arguments →] 38 | 39 | ## [PM2 Configuration] 40 | 41 | Instructions for configuring a PM2 ecosystem file which allows for streamlined startup of miners and validators. 42 | 43 | [View PM2 Configuration →] 44 | 45 | ## [Prometheus and Grafana Configuration] 46 | 47 | Instructions for configuring Prometheus and Grafana to monitor your validator. 48 | 49 | [View Prometheus and Grafana Configuration →] 50 | 51 | ## [Versioning] 52 | 53 | Information about Omron's versioning system and how to stay up-to-date with the latest releases. 54 | 55 | [View versioning →] 56 | 57 | ## [Custom Circuit Integrations] 58 | 59 | Instructions for how to integrate your own zero-knowledge circuits into the Omron subnet. 60 | 61 | [View Custom Circuit Integrations →] 62 | 63 | [Setup instructions]: ./shared_setup_steps.md 64 | [Running on Mainnet]: ./running_on_mainnet.md 65 | [Running on Testnet]: ./running_on_testnet.md 66 | [Running locally (on "staging")]: ./running_on_staging.md 67 | [Command Line Arguments]: ./command_line_arguments.md 68 | [PM2 Configuration]: ./pm2_configuration.md 69 | [View setup instructions →]: ./shared_setup_steps.md 70 | [View CLI Arguments →]: ./command_line_arguments.md 71 | [View PM2 Configuration →]: ./pm2_configuration.md 72 | [View mainnet instructions →]: ./running_on_mainnet.md 73 | [View testnet instructions →]: ./running_on_testnet.md 74 | [View local instructions →]: ./running_on_staging.md 75 | [Prometheus and Grafana Configuration]: ./prometheus.md 76 | [View Prometheus and Grafana Configuration →]: ./prometheus.md 77 | [Custom Circuit Integrations]: ./custom_circuit_integrations.md 78 | [View Custom Circuit Integrations →]: ./custom_circuit_integrations.md 79 | [Versioning]: ./versioning.md 80 | [View versioning →]: ./versioning.md 81 | [← Back to home]: ../ 82 | -------------------------------------------------------------------------------- /docs/competitions/QUICKSTART.md: -------------------------------------------------------------------------------- 1 | # Competition Quick Start Guide 2 | 3 | ## Prerequisites 4 | 5 | - Python 3.8+ 6 | - Storage provider credentials (R2/S3) 7 | - Registration on subnet 2 (use `btcli register` if not already registered) 8 | - At least 16GB RAM (32GB recommended) 9 | - 50GB free SSD storage 10 | - Apple Silicon (M1/M2/M3) CPU 11 | 12 | > [!NOTE] 13 | > While miners can run on any platform, macOS arm64 with Metal acceleration is the recommended configuration since validators use this architecture for evaluation. This helps ensure your circuit will perform consistently when being evaluated. 14 | 15 | > [!IMPORTANT] 16 | > Ensure your system meets the minimum resource requirements before proceeding. Circuit compilation and proving can be resource-intensive, especially during initial setup. 17 | 18 | ## Getting Started 19 | 20 | ### Configure Storage 21 | 22 | Create a `.env` file in your project root with the following variables: 23 | 24 | ```bash 25 | # Required - choose either R2 or S3 26 | STORAGE_PROVIDER="r2" # or "s3" 27 | STORAGE_BUCKET="your-bucket" 28 | 29 | # For Cloudflare R2 30 | STORAGE_ACCOUNT_ID="your-account-id" 31 | STORAGE_ACCESS_KEY="your-access-key" 32 | STORAGE_SECRET_KEY="your-secret-key" 33 | STORAGE_REGION="auto" 34 | 35 | # For AWS S3 36 | # STORAGE_REGION="your-aws-region" 37 | ``` 38 | 39 | ### Start the Miner 40 | 41 | > [!IMPORTANT] 42 | > 43 | > **Port `8091` must be open** for your circuit to function properly. This requires configuration on your local machine and router, or, if you're using a cloud provider, adjustments to your network settings. Validators cannot query your circuit if port `8091` remains closed. 44 | 45 | ```bash 46 | pm2 start neurons/miner.py --name omron_miner -- --netuid 2 --wallet.name your_wallet --logging.debug 47 | ``` 48 | 49 | ## Circuit Submission Flow 50 | 51 | ### Prepare Circuit Files 52 | 53 | Required files in your circuit directory: 54 | 55 | - `vk.key` - Verification key 56 | - `pk.key` - Proving key - **must be less than 50GB** 57 | - `settings.json` - Circuit configuration with required settings: 58 | ```json 59 | { 60 | "run_args": { 61 | "input_visibility": "Private", 62 | "output_visibility": "Public", 63 | "param_visibility": "Private", 64 | "commitment": "KZG" 65 | } 66 | } 67 | ``` 68 | - `model.compiled` - Compiled model file 69 | 70 | For an example of how to compile this circuit, see the following. 71 | 72 | > [!NOTE] 73 | > This script generates it's own model and is for demonstration purposes only. To compile your own model based on the competition template ONNX, please find it within `neurons/_validator/competitions/1/age.onnx` along with an example `input.json` file. 74 | 75 | ```bash 76 | ./neurons/scripts/create_competition_circuit.py 77 | ``` 78 | 79 | ### Deploy Circuit 80 | 81 | Place circuit files in the `./competition_circuit/` directory: 82 | 83 | ```bash 84 | mkdir -p competition_circuit/ 85 | cp -r your_circuit/* competition_circuit/ 86 | ``` 87 | 88 | The miner will automatically: 89 | 90 | - Monitor circuit directory for changes 91 | - Upload modified files to R2/S3 92 | - Create on-chain commitment using the hash of the `vk.key` file 93 | - Generate time-limited signed URLs for validators upon request 94 | 95 | ### Monitor Evaluation 96 | 97 | - Watch validator requests: `pm2 logs miner` 98 | - View metrics: https://wandb.ai/inferencelabs/omron 99 | - View leaderboard: https://accelerate.omron.ai 100 | 101 | ### 🏆 EZKL Performance Evaluation 102 | 103 | Upon completion of the Subnet 2 competition phase, [EZKL](https://ezkl.xyz) will conduct an independent evaluation of all submitted circuits through their automated CI/CD pipeline. Your implementation will be assessed across three critical dimensions: 104 | 105 | - ⚡ **Performance** - Circuit proving time optimization 106 | - 📊 **Resource Efficiency** - Memory utilization and management 107 | - 🎯 **Accuracy** - Age recognition precision and reliability 108 | 109 | #### Submission Process 110 | 111 | 1. Submit your implementation via PR to https://github.com/zkonduit/ezkl 112 | 2. Include the tag `omron-subnet-competition-1` 113 | 3. Await automated evaluation results 114 | 115 | High-performing circuits that demonstrate excellence across these metrics will be eligible for additional grant funding. EZKL's evaluation criteria are designed to identify implementations that achieve an optimal balance of performance, efficiency, and accuracy. 116 | 117 | ## Troubleshooting 118 | 119 | **Circuit Upload Fails** 120 | 121 | - Verify storage credentials in .env 122 | - Check network connectivity 123 | - Verify all required files are present 124 | - Check storage bucket permissions 125 | 126 | **Validation Errors** 127 | 128 | - Review validator logs for specific failures via WandB 129 | - Verify input schema matches implementation 130 | 131 | ## FAQ 132 | 133 | ### How do I benchmark my circuit? 134 | 135 | To benchmark your circuit before submission to mainnet, it's recommended to run a validator and miner locally, pointed towards testnet or a local network. For guidance on how to do this, please refer to [testnet] and [localnet] guides respectively. 136 | 137 | ### Which hardware do validators run to benchmark my circuit? 138 | 139 | Validators are instructed to run using macOS arm64 architectures. M1 or M2 processors are recommended for best performance and the majority of validators will be using these processors to test your circuits. 140 | For specific hardware requirements, please refer to the main [README] document. 141 | 142 | ### What is the maximum proof time allowed for a circuit? 143 | 144 | The maximum proof time is a configurable property and is subject to change over time, however as it stands this value is set at 300 seconds (5 minutes). 145 | 146 | ### What is the maximum `pk.key` size allowed for a circuit? 147 | 148 | The maximum `pk.key` size is a configurable property and is subject to change over time, however as it stands this value is set at 50GB. 149 | 150 | ### When does the competition end? 151 | 152 | The competition will end on 2025-04-27. 153 | 154 | ## For additional assistance 155 | 156 | - Join Discord and reach out via the Subnet 2 channel: https://discord.gg/bittensor 157 | - For security reports, please see our bug bounty program: https://immunefi.com/bug-bounty/omron/ 158 | - Otherwise, feel free to open a GitHub issue within the repository. 159 | 160 | [testnet]: ../running_on_testnet.md 161 | [localnet]: ../running_on_staging.md 162 | [README]: ../../README.md#minimum-1 163 | -------------------------------------------------------------------------------- /docs/competitions/README.md: -------------------------------------------------------------------------------- 1 | # Competition Technical Guide 2 | 3 | ## Overview 4 | 5 | This guide provides a deep technical dive into participating in competitions as a miner. Competitions are a mechanism for miners to submit optimized zero-knowledge circuits that prove neural network execution, with rewards based on circuit performance across multiple metrics. 6 | 7 | ## Circuit Evaluation 8 | 9 | The scoring system evaluates circuits based on accuracy (40% weight), proof size (30% weight), and response time (30% weight). Accuracy measures how closely circuit outputs match the baseline model using MSE loss and exponential transformation. Proof size evaluates the compactness of generated zero-knowledge proofs relative to current SOTA. Response time measures proof generation speed normalized against SOTA performance. 10 | 11 | The final score calculation uses an exponential decay formula that creates a score between 0 and 1, where higher scores indicate better performance relative to the current SOTA. The formula penalizes poor performance exponentially, encouraging continuous improvement and optimization: 12 | 13 | ``` 14 | score = exp(-( 15 | 0.4 * max(0, sota_accuracy - accuracy) + 16 | 0.3 * max(0, (proof_size - sota_proof_size)/sota_proof_size) + 17 | 0.3 * max(0, (response_time - sota_response_time)/sota_response_time) 18 | )) 19 | ``` 20 | 21 | ## Technical Requirements 22 | 23 | Your circuit must process inputs matching the competition config shape and produce a matching output shape. 24 | 25 | The submission package must include several key files: a compiled circuit (model.compiled), proving and verification keys (pk.key and vk.key), and a settings.json configuration file. These files work together to enable proof generation and verification. 26 | 27 | ## Evaluation Process 28 | 29 | The evaluation process runs through multiple rounds of testing to ensure consistent performance. Each round generates random test inputs that are fed through both your circuit and a baseline model. The baseline comparison uses either PyTorch or ONNX models, supporting flexible implementation approaches. 30 | 31 | Your circuit must generate valid proofs that verify successfully. The system measures proof generation time and size across 10 evaluation rounds, averaging the metrics to determine final scores. All verifications must pass for a valid submission - a single failure results in disqualification. 32 | 33 | ## Deployment Architecture 34 | 35 | The competition system uses cloud storage (R2/S3) for circuit file management. When validators request your circuit, they receive signed URLs for secure file access. 36 | 37 | The commitment process anchors your verification key hash on-chain. This creates an immutable record of your submission and prevents tampering. The system verifies that local and chain commitments match before proceeding with evaluation. 38 | 39 | ## Optimization Guidelines 40 | 41 | Circuit optimization requires balancing multiple competing factors. Reducing circuit complexity generally improves proof generation speed and size but may impact accuracy. The scoring formula's weights guide this tradeoff - accuracy carries the highest weight at 40%. 42 | 43 | Resource management plays a crucial role in performance. Proof generation demands significant GPU power and memory. Monitor system resources during testing to ensure your circuit operates within validator timeout limits. Profile your operations to identify and eliminate bottlenecks. 44 | 45 | ## Platform Requirements 46 | 47 | Currently, validators run using macOS arm64 architecture. This requirement ensures consistent evaluation environments across all participants. While you can develop and test on other platforms, final submissions must be validated on the required architecture to maintain consensus and provide the most optimal benchmark for the use case. 48 | -------------------------------------------------------------------------------- /docs/images/proof_of_weights_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inference-labs-inc/omron-subnet/e5aeb7987054f91989cf5129fc6d128b37af3d8f/docs/images/proof_of_weights_architecture.png -------------------------------------------------------------------------------- /docs/images/proof_of_weights_workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inference-labs-inc/omron-subnet/e5aeb7987054f91989cf5129fc6d128b37af3d8f/docs/images/proof_of_weights_workflow.png -------------------------------------------------------------------------------- /docs/notebooks/dummy_age_detection.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inference-labs-inc/omron-subnet/e5aeb7987054f91989cf5129fc6d128b37af3d8f/docs/notebooks/dummy_age_detection.onnx -------------------------------------------------------------------------------- /docs/pm2_configuration.md: -------------------------------------------------------------------------------- 1 | # PM2 Configuration 2 | 3 | > [!NOTE] 4 | > Setting up a PM2 configuration file is completely optional. By default, all documentation assumes running omron without a configuration file. 5 | 6 | To simplify the process of running miners and validators, we offer a template PM2 file at `ecosystem.config.tmpl.js`. This file can be modified and copied into a `ecosystem.config.js` for convenient use when starting a miner or validator. 7 | 8 | ## 1. Copy the template file 9 | 10 | Use the below command to copy the template file into a new file called `ecosystem.config.js`. 11 | 12 | ```console 13 | cp ecosystem.config.tmpl.js ecosystem.config.js 14 | ``` 15 | 16 | ## 2. Modify the ecosystem file with your configuration 17 | 18 | Comments are provided within the ecosystem file which outline relevant fields which need to be updated with values unique to your configuration. We also provide a full list of valid command line arguments in the [Command Line Arguments](./command_line_arguments.md) section. 19 | 20 | You can edit the file in any text editor of your choice. 21 | 22 | ## 3. Start your miner or validator 23 | 24 | Once your miner or validator is configured, use the following commands to easily start them. 25 | 26 | ### Miner 27 | 28 | ```console 29 | pm2 start ecosystem.config.js --only miner 30 | ``` 31 | 32 | ### Validator 33 | 34 | ```console 35 | pm2 start ecosystem.config.js --only validator 36 | ``` 37 | -------------------------------------------------------------------------------- /docs/running_on_mainnet.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | # Running on Mainnet 4 | 5 |
6 | 7 | ## Setup 8 | 9 | Please find relevant setup documentation over in the [`shared_setup_steps.md`] file. These steps will prepare the miner and validator for use in the following steps. 10 | 11 | ## Mining 12 | 13 | Run the following command to start a miner on mainnet 14 | 15 | ```console 16 | cd neurons 17 | pm2 start miner.py --name miner --interpreter ../.venv/bin/python -- \ 18 | --netuid 2 \ 19 | --wallet.name {your_miner_key_name} \ 20 | --wallet.hotkey {your_miner_hotkey_name} 21 | ``` 22 | 23 | Or run this command with `make pm2-miner WALLET_NAME={your_miner_key_name} HOTKEY_NAME={your_miner_hotkey_name}` 24 | 25 | [View all acceptable CLI arguments →] 26 | 27 | ## Validating 28 | 29 | Run the following command to start a validator on mainnet 30 | 31 | ```console 32 | cd neurons 33 | pm2 start validator.py --name validator --interpreter ../.venv/bin/python -- \ 34 | --netuid 2 \ 35 | --wallet.name {your_validator_key_name} \ 36 | --wallet.hotkey {your_validator_hotkey_name} 37 | ``` 38 | 39 | Or run this command with `make pm2-validator WALLET_NAME={validator_key_name} HOTKEY_NAME={validator_hot_key_name}` 40 | 41 | [View all acceptable CLI arguments →] 42 | 43 | [View all acceptable CLI arguments →]: ./command_line_arguments.md 44 | [`shared_setup_steps.md`]: ./shared_setup_steps.md 45 | -------------------------------------------------------------------------------- /docs/running_on_testnet.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | # Running on Testnet 4 | 5 |
6 | 7 | ## Setup 8 | 9 | Please find relevant setup documentation over in the [`shared_setup_steps.md`] file. These steps will prepare the miner and validator for use in the following steps. 10 | 11 | ## Mining 12 | 13 | Run the following command to start a miner on testnet 14 | 15 | ```console 16 | cd neurons 17 | pm2 start miner.py --name miner --interpreter ../.venv/bin/python -- \ 18 | --netuid 118 \ 19 | --wallet.name {your_miner_key_name} \ 20 | --wallet.hotkey {your_miner_hotkey_name} \ 21 | --subtensor.network test 22 | ``` 23 | 24 | Or run this command with `make pm2-test-miner WALLET_NAME={your_miner_key_name} HOTKEY_NAME={your_miner_hotkey_name}` 25 | 26 | [View all acceptable CLI arguments →] 27 | 28 | ## Validating 29 | 30 | Run the following command to start a validator on testnet 31 | 32 | ```console 33 | cd neurons 34 | pm2 start validator.py --name validator --interpreter ../.venv/bin/python -- \ 35 | --netuid 118 \ 36 | --wallet.name {your_validator_key_name} \ 37 | --wallet.hotkey {your_validator_hotkey_name} \ 38 | --subtensor.network test 39 | ``` 40 | 41 | Or run this command with `make pm2-test-validator WALLET_NAME={validator_key_name} HOTKEY_NAME={validator_hot_key_name}` 42 | 43 | [View all acceptable CLI arguments →] 44 | 45 | [View all acceptable CLI arguments →]: ./command_line_arguments.md 46 | [`shared_setup_steps.md`]: ./shared_setup_steps.md 47 | -------------------------------------------------------------------------------- /docs/shared_setup_steps.md: -------------------------------------------------------------------------------- 1 | # Setup Instructions 2 | 3 | For miners and validators. 4 | 5 | ## 1. Install Prerequisites 6 | 7 | To mine and validate for the Omron subnet, you'll need to install several prerequisite tools. For convenience, we offer a shell script to install all of the required tools automatically. To run the script, use the below command. Some dependencies will be installed automatically upon starting the miner or validator, as part of pre-flight checks. Otherwise, to manually install the necessary tools, please find links to all relevant installation documentation below. 8 | 9 | > [!IMPORTANT] 10 | > When starting the miner or validator, you must monitor initial startup logs. If any dependencies are missing, the script will automatically attempt to install them. It _may_ prompt you to restart your system if necessary. Once all dependencies are installed, the pre-flight checks will pass without any further action required from you. 11 | 12 | ```console 13 | /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/inference-labs-inc/omron-subnet/main/setup.sh)" 14 | ``` 15 | 16 | | Tool | Description | 17 | | ----------- | --------------------------------------------------------------------------------------------------------- | 18 | | [`NodeJS`] | A JavaScript runtime that is widely used for building web applications. | 19 | | [`pm2`] | A process manager for Node.js applications that is used to run and manage applications in the background. | 20 | | [`Python`] | A programming language that is widely used for scientific computing and data analysis. | 21 | | [`pip`] | A package manager for Python that is used to install and manage Python packages. | 22 | | [`btcli`] | A command-line interface for interacting with the Bittensor network. | 23 | | [`rust`] | A systems programming language focused on safety, speed, and concurrency. | 24 | | [`nightly`] | The nightly version of Rust, required for jolt. | 25 | | [`jolt`] | A zkVM for RISC-V, used by several models on the Omron subnet. | 26 | 27 | ## 2. Create a new wallet 28 | 29 | > [!NOTE] 30 | > Skip this step if you already have a wallet configured in [`btcli`]. 31 | 32 | > [!WARNING] 33 | > This step will create a new seed phrase. If lost, it will no longer be possible to access your account. Please write it down and store it in a secure location. 34 | 35 | Use the below commands to create a new coldkey and hotkey for use within the Bittensor network. 36 | 37 | ```console 38 | btcli w new_coldkey 39 | btcli w new_hotkey 40 | ``` 41 | 42 | ## 3. Register on the subnet 43 | 44 | Run the following command to register on the subnet. You are required to register in order to mine or validate on the subnet. 45 | 46 | > [!CAUTION] 47 | > When registering on a subnet, you are required to burn ('recycle') a dynamic amount of tao. This tao will not be refunded in the event that you are deregistered. After running the below command, you will be asked to confirm the value for recycle two times before registering. 48 | 49 | Replace `default` values below with your wallet and hotkey names if they are not `default`. 50 | 51 | | Variable | Description | 52 | | --------- | ------------------------------------------------------------------------------------------------------------------------ | 53 | | `NETWORK` | The network you are registering on. This can be either `finney` for mainnet or `test` for testnet. | 54 | | `NETUID` | The network ID of the subnet you are registering on. For testnet, our netuid is `118` and on mainnet, our netuid is `2`. | 55 | 56 | ```console 57 | btcli subnet register --subtensor.network {NETWORK} --netuid {NETUID} --wallet.name default --wallet.hotkey default 58 | ``` 59 | 60 | ## 4. Run your miner or validator 61 | 62 | To run your miner or validator, follow the instructions linked below based on the network you intend to mine or validate on. 63 | 64 | [Local "Staging" Network →](./running_on_staging.md) 65 | [Mainnet "Finney" →](./running_on_mainnet.md) 66 | [Testnet →](./running_on_testnet.md) 67 | 68 | [`NodeJS`]: https://nodejs.org/en/download/ 69 | [`pm2`]: https://pm2.keymetrics.io/docs/usage/quick-start/ 70 | [`Python`]: https://www.python.org/downloads/ 71 | [`pip`]: https://pip.pypa.io/en/stable/installation/ 72 | [`btcli`]: https://docs.bittensor.com/getting-started/installation 73 | [`rust`]: https://www.rust-lang.org/tools/install 74 | [`jolt`]: https://github.com/a16z/jolt#installation 75 | [`nightly`]: https://rust-lang.github.io/rustup/concepts/channels.html#working-with-nightly-rust 76 | -------------------------------------------------------------------------------- /docs/versioning.md: -------------------------------------------------------------------------------- 1 | # Versioning and Auto-Update 2 | 3 | > [!NOTE] 4 | > Semantic Versioning was adopted by Omron starting from version 1.0.0. 5 | 6 | This project uses Semantic Versioning (SemVer) for version numbering and includes an auto-update feature to ensure users are running the latest version. 7 | 8 | ## Semantic Versioning 9 | 10 | We follow the Semantic Versioning 2.0.0 specification (https://semver.org/). Our version numbers take the form of MAJOR.MINOR.PATCH, where: 11 | 12 | 1. MAJOR version increments indicate incompatible API changes 13 | 2. MINOR version increments indicate new functionality in a backwards-compatible manner 14 | 3. PATCH version increments indicate backwards-compatible bug fixes 15 | 16 | ## Auto-Update Feature 17 | 18 | The project includes an auto-update utility (`AutoUpdate` class in `neurons/utils.py`) that performs the following tasks: 19 | 20 | 1. Checks the remote repository for a newer version 21 | 2. Compares the remote version with the local version 22 | 3. Automatically updates the local repository if a newer version is available 23 | 4. Handles potential merge conflicts 24 | 5. Updates package dependencies if necessary 25 | 26 | ### Version Checking 27 | 28 | The auto-update feature compares the `__version__` string in the local and remote `neurons/__init__.py` files. It converts these version strings to integers for comparison (e.g., "1.2.3" becomes 123). 29 | 30 | ### Update Process 31 | 32 | If a newer version is detected, the auto-update feature: 33 | 34 | 1. Pulls the latest changes from the remote repository 35 | 2. Attempts to resolve any merge conflicts automatically 36 | 3. Updates package dependencies if the `requirements.txt` file has changed 37 | 4. Restarts the application to apply the updates 38 | 39 | ## Manual Updates 40 | 41 | While the auto-update feature is designed to keep the application up-to-date automatically, users can also perform manual updates by pulling the latest changes from the repository and updating their dependencies. 42 | 43 | ```bash 44 | git fetch origin 45 | git checkout main 46 | git pull origin main 47 | pip install -r requirements.txt 48 | pm2 restart all 49 | ``` 50 | 51 | ## Version History 52 | 53 | For a detailed changelog of version updates, please refer to [the releases section of the repository] or [release notes on Omron's GitBook]. 54 | 55 | 56 | [the releases section of the repository](https://github.com/inference-labs-inc/omron-subnet/releases) 57 | [release notes on Omron's GitBook](https://docs.omron.ai/release-notes) 58 | -------------------------------------------------------------------------------- /ecosystem.config.tmpl.js: -------------------------------------------------------------------------------- 1 | // DO NOT EDIT THIS FILE DIRECTLY 2 | // This file is a template for the ecosystem.config.js file that should be created in the same directory. 3 | // Replace the placeholders with your own values and save it as ecosystem.config.js. 4 | // The ecosystem.config.js file is used by PM2 to manage the processes. 5 | // To start the miner process, run `pm2 start ecosystem.config.js --only miner`. 6 | // To start the validator process, run `pm2 start ecosystem.config.js --only validator`. 7 | module.exports = { 8 | apps: [ 9 | { 10 | name: "miner", 11 | script: "miner.py", 12 | interpreter: "python3", // replace with "python3" if you don't use a virtual environment 13 | cwd: "neurons", 14 | kill_timeout: 3000, // just to allow the miner to clean up before being killed 15 | args: [ 16 | "--netuid 2", 17 | "--wallet.name {your_miner_key_name}", // replace with your miner key name 18 | "--wallet.hotkey {your_miner_hotkey_name}", // replace with your miner hotkey name 19 | ].join(" "), 20 | }, 21 | { 22 | name: "validator", 23 | script: "validator.py", 24 | interpreter: "python3", // replace with "python3" if you don't use a virtual environment 25 | cwd: "neurons", 26 | kill_timeout: 3000, // just to allow the miner to clean up before being killed 27 | args: [ 28 | "--netuid 2", 29 | "--wallet.name {your_validator_key_name}", // replace with your validator key name 30 | "--wallet.hotkey {your_validator_hotkey_name}", // replace with your validator hotkey name 31 | ].join(" "), 32 | }, 33 | ], 34 | }; 35 | -------------------------------------------------------------------------------- /neurons/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This version number is used to trigger automatic updates. 3 | """ 4 | 5 | __version__ = "8.2.1" 6 | -------------------------------------------------------------------------------- /neurons/_validator/api/cache.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import asyncio 3 | from _validator.config import ValidatorConfig 4 | import bittensor as bt 5 | 6 | 7 | class ValidatorKeysCache: 8 | """ 9 | A thread-safe cache for validator keys to reduce the number of requests to the metagraph. 10 | """ 11 | 12 | def __init__(self, config: ValidatorConfig) -> None: 13 | self.cached_keys: dict[int, list[str]] = {} 14 | self.cached_timestamps: dict[int, datetime.datetime] = {} 15 | self.config: ValidatorConfig = config 16 | self._lock = asyncio.Lock() 17 | 18 | async def fetch_validator_keys(self, netuid: int) -> None: 19 | """ 20 | Fetch the validator keys for a given netuid and cache them. 21 | Thread-safe implementation using a lock. 22 | """ 23 | subtensor = bt.subtensor(config=self.config.bt_config) 24 | self.cached_keys[netuid] = [ 25 | neuron.hotkey 26 | for neuron in subtensor.neurons_lite(netuid) 27 | if neuron.validator_permit 28 | ] 29 | self.cached_timestamps[netuid] = datetime.datetime.now() + datetime.timedelta( 30 | hours=12 31 | ) 32 | 33 | async def check_validator_key(self, ss58_address: str, netuid: int) -> bool: 34 | """ 35 | Thread-safe check if a given key is a validator key for a given netuid. 36 | """ 37 | if ( 38 | self.config.api.whitelisted_public_keys 39 | and ss58_address in self.config.api.whitelisted_public_keys 40 | ): 41 | # If the sender is whitelisted, we don't need to check the key 42 | return True 43 | 44 | cache_timestamp = self.cached_timestamps.get(netuid, None) 45 | if cache_timestamp is None or cache_timestamp < datetime.datetime.now(): 46 | await self.fetch_validator_keys(netuid) 47 | return ss58_address in self.cached_keys.get(netuid, []) 48 | 49 | async def check_whitelisted_key(self, ss58_address: str) -> bool: 50 | if not self.config.api.whitelisted_public_keys: 51 | return False 52 | return ss58_address in self.config.api.whitelisted_public_keys 53 | -------------------------------------------------------------------------------- /neurons/_validator/api/certificate_manager.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | from OpenSSL import crypto 4 | import bittensor as bt 5 | from constants import ONE_YEAR 6 | 7 | 8 | class CertificateManager: 9 | def __init__(self, cert_path: str): 10 | self.cert_path = cert_path 11 | self.key_path = os.path.join(cert_path, "key.pem") 12 | self.cert_file = os.path.join(cert_path, "cert.pem") 13 | 14 | def ensure_valid_certificate(self, external_ip: str) -> None: 15 | if not os.path.exists(self.cert_file): 16 | bt.logging.warning( 17 | "Certificate not found. Generating new self-signed certificate." 18 | ) 19 | os.makedirs(self.cert_path, exist_ok=True) 20 | self._generate_certificate(external_ip) 21 | 22 | def _generate_certificate(self, cn: str) -> None: 23 | key = crypto.PKey() 24 | key.generate_key(crypto.TYPE_RSA, 4096) 25 | 26 | cert = crypto.X509() 27 | cert.get_subject().CN = cn 28 | cert.set_serial_number(int(time.time())) 29 | cert.gmtime_adj_notBefore(0) 30 | cert.gmtime_adj_notAfter(2 * ONE_YEAR) 31 | cert.set_issuer(cert.get_subject()) 32 | cert.set_pubkey(key) 33 | cert.sign(key, "sha256") 34 | 35 | with open(self.cert_file, "wb") as f: 36 | f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) 37 | 38 | with open(self.key_path, "wb") as f: 39 | f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, key)) 40 | -------------------------------------------------------------------------------- /neurons/_validator/api/websocket_manager.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from fastapi import WebSocket 3 | import logging 4 | from starlette.websockets import WebSocketState 5 | 6 | 7 | class WebSocketManager: 8 | def __init__(self): 9 | self.active_connections: set[WebSocket] = set() 10 | 11 | async def connect(self, websocket: WebSocket) -> None: 12 | await websocket.accept() 13 | self.active_connections.add(websocket) 14 | 15 | async def disconnect(self, websocket: WebSocket) -> None: 16 | if websocket in self.active_connections: 17 | try: 18 | if websocket.application_state != WebSocketState.DISCONNECTED: 19 | await websocket.close() 20 | except RuntimeError as e: 21 | if "Unexpected ASGI message 'websocket.close'" in str(e): 22 | logging.info( 23 | f"WebSocket close attempt on already closing/closed connection: {e}" 24 | ) 25 | else: 26 | logging.error( 27 | f"Unexpected RuntimeError during websocket close: {e}" 28 | ) 29 | raise 30 | except Exception as e: 31 | logging.error(f"Unexpected error during websocket.close(): {e}") 32 | finally: 33 | self.active_connections.discard(websocket) 34 | else: 35 | logging.debug( 36 | f"Attempted to disconnect websocket not in active_connections: {websocket}" 37 | ) 38 | 39 | async def close_all(self) -> None: 40 | for connection in self.active_connections.copy(): 41 | await self.disconnect(connection) 42 | -------------------------------------------------------------------------------- /neurons/_validator/competitions/1/age.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inference-labs-inc/omron-subnet/e5aeb7987054f91989cf5129fc6d128b37af3d8f/neurons/_validator/competitions/1/age.onnx -------------------------------------------------------------------------------- /neurons/_validator/competitions/1/competition_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": 1, 3 | "name": "Age Detection Competition", 4 | "description": "Age detection model evaluation with 64x64x3 inputs", 5 | "version": "1.0.0", 6 | "start_timestamp": 1746633600, 7 | "end_timestamp": 1749264870, 8 | "baseline_model_path": "age.onnx", 9 | "circuit_settings": { 10 | "input_shape": [ 11 | 1, 12 | 3, 13 | 64, 14 | 64 15 | ], 16 | "output_names": [ 17 | "prob_stage_1", 18 | "prob_stage_2", 19 | "prob_stage_3", 20 | "stage1_delta_k", 21 | "stage2_delta_k", 22 | "stage3_delta_k", 23 | "index_offset_stage1", 24 | "index_offset_stage2", 25 | "index_offset_stage3" 26 | ], 27 | "output_shapes": { 28 | "prob_stage_1": [ 29 | 1, 30 | 3 31 | ], 32 | "prob_stage_2": [ 33 | 1, 34 | 3 35 | ], 36 | "prob_stage_3": [ 37 | 1, 38 | 3 39 | ], 40 | "stage1_delta_k": [ 41 | 1, 42 | 1 43 | ], 44 | "stage2_delta_k": [ 45 | 1, 46 | 1 47 | ], 48 | "stage3_delta_k": [ 49 | 1, 50 | 1 51 | ], 52 | "index_offset_stage1": [ 53 | 1, 54 | 3 55 | ], 56 | "index_offset_stage2": [ 57 | 1, 58 | 3 59 | ], 60 | "index_offset_stage3": [ 61 | 1, 62 | 3 63 | ] 64 | } 65 | }, 66 | "data_source": { 67 | "url": "https://storage.omron.ai/age.zip", 68 | "format": "zip", 69 | "type": "remote" 70 | }, 71 | "evaluation": { 72 | "metric": "mean_squared_error", 73 | "num_iterations": 10, 74 | "num_total_evaluations": 100, 75 | "num_proof_evaluations": 10, 76 | "scoring_weights": { 77 | "accuracy": 0.95, 78 | "proof_size": 0.0, 79 | "response_time": 0.05 80 | }, 81 | "output_shape": [ 82 | 1, 83 | 21 84 | ] 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /neurons/_validator/competitions/1/data_processor.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | 4 | from _validator.competitions.services.data_source import ( 5 | CompetitionDataProcessor, 6 | ) 7 | 8 | 9 | class DatasetProcessor(CompetitionDataProcessor): 10 | def __init__(self, noise_scale: float = 0.01, jitter_scale: float = 0.1): 11 | self.noise_scale = noise_scale 12 | self.jitter_scale = jitter_scale 13 | 14 | def process(self, inputs: torch.Tensor) -> torch.Tensor: 15 | batch_size = inputs.shape[0] 16 | 17 | if torch.rand(1) > 0.5: 18 | inputs = torch.flip(inputs, dims=[3]) 19 | 20 | jitter = ( 21 | 1.0 22 | + (torch.rand(batch_size, 1, 1, 1, device=inputs.device) * 2 - 1) 23 | * self.jitter_scale 24 | ) 25 | inputs = inputs * jitter 26 | 27 | angle = (torch.rand(1) * 20 - 10) * (3.14159 / 180) 28 | cos_theta = torch.cos(angle) 29 | sin_theta = torch.sin(angle) 30 | rotation_matrix = torch.tensor( 31 | [[cos_theta, -sin_theta, 0], [sin_theta, cos_theta, 0]], 32 | device=inputs.device, 33 | ) 34 | grid = F.affine_grid( 35 | rotation_matrix.unsqueeze(0).expand(batch_size, -1, -1), 36 | inputs.size(), 37 | align_corners=True, 38 | ) 39 | inputs = F.grid_sample(inputs, grid, align_corners=True) 40 | 41 | noise = torch.randn_like(inputs) * self.noise_scale 42 | perturbed = inputs + noise 43 | 44 | return torch.clamp(perturbed, -1, 1) 45 | -------------------------------------------------------------------------------- /neurons/_validator/competitions/1/settings.json: -------------------------------------------------------------------------------- 1 | {"run_args":{"tolerance":{"val":0.0,"scale":1.0},"input_scale":7,"param_scale":7,"scale_rebase_multiplier":1,"lookup_range":[-32768,32768],"logrows":17,"num_inner_cols":2,"variables":[["batch_size",1]],"input_visibility":"Private","output_visibility":"Public","param_visibility":"Private","rebase_frac_zero_constants":false,"check_mode":"UNSAFE","commitment":"KZG","decomp_base":16384,"decomp_legs":2,"bounded_log_lookup":false,"ignore_range_check_inputs_outputs":false},"num_rows":24343506,"total_assignments":48687013,"total_const_size":67,"total_dynamic_col_size":0,"max_dynamic_input_len":0,"num_dynamic_lookups":0,"num_shuffles":21824,"total_shuffle_col_size":98816,"model_instance_shapes":[[1,3],[1,3],[1,3],[1,1],[1,1],[1,1],[1,3],[1,3],[1,3]],"model_output_scales":[7,7,7,7,7,7,7,7,7],"model_input_scales":[7],"module_sizes":{"polycommit":[],"poseidon":[0,[0]]},"required_lookups":[{"Tanh":{"scale":128.0}}],"required_range_checks":[[0,16383],[-1,1]],"check_mode":"UNSAFE","version":"19.0.7","num_blinding_factors":null,"timestamp":1738867088320} -------------------------------------------------------------------------------- /neurons/_validator/competitions/__init__.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class Competition(Enum): 5 | """ 6 | Enum for competitions. 7 | """ 8 | 9 | FIRST = 1 10 | -------------------------------------------------------------------------------- /neurons/_validator/competitions/models/circuit.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | 4 | class CircuitFiles(BaseModel): 5 | verification_key: str 6 | proving_key: str 7 | settings: str 8 | circuit: str 9 | hash: str 10 | -------------------------------------------------------------------------------- /neurons/_validator/competitions/models/neuron.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | 3 | 4 | @dataclass 5 | class NeuronState: 6 | hotkey: str 7 | uid: int 8 | sota_relative_score: float 9 | proof_size: float 10 | response_time: float 11 | verification_result: bool 12 | raw_accuracy: float 13 | hash: str 14 | rank_overall: int = 999 15 | rank_accuracy: int = 999 16 | rank_proof_size: int = 999 17 | rank_response_time: int = 999 18 | historical_best_sota_score: int = 0 19 | historical_improvement_rate: float = 0.0 20 | verification_rate: float = 1.0 21 | relative_to_sota: dict[str, float] = field(default_factory=dict) 22 | -------------------------------------------------------------------------------- /neurons/_validator/competitions/models/sota.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | 4 | @dataclass 5 | class SotaState: 6 | sota_relative_score: float = 0.0 7 | hash: str | None = None 8 | hotkey: str | None = None 9 | uid: int | None = None 10 | proof_size: float = float("inf") 11 | response_time: float = float("inf") 12 | timestamp: int = 0 13 | raw_accuracy: float = 0.0 14 | -------------------------------------------------------------------------------- /neurons/_validator/competitions/services/circuit_validator.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import bittensor as bt 4 | from constants import MAX_CIRCUIT_SIZE_GB 5 | 6 | 7 | class CircuitValidator: 8 | REQUIRED_FILES = [ 9 | "vk.key", 10 | "pk.key", 11 | "settings.json", 12 | "model.compiled", 13 | ] 14 | 15 | REQUIRED_SETTINGS = { 16 | "run_args": { 17 | "input_visibility": "Private", 18 | "output_visibility": "Public", 19 | "param_visibility": "Private", 20 | "commitment": "KZG", 21 | } 22 | } 23 | 24 | @classmethod 25 | def validate_files(cls, circuit_dir: str) -> bool: 26 | try: 27 | if not cls._validate_size(circuit_dir): 28 | return False 29 | 30 | if not cls._validate_required_files(circuit_dir): 31 | return False 32 | 33 | if not cls._validate_settings(circuit_dir): 34 | return False 35 | 36 | return True 37 | 38 | except Exception as e: 39 | bt.logging.error(f"Error validating circuit files: {e}") 40 | return False 41 | 42 | @classmethod 43 | def _validate_size(cls, circuit_dir: str) -> bool: 44 | total_size = sum( 45 | os.path.getsize(os.path.join(circuit_dir, f)) 46 | for f in os.listdir(circuit_dir) 47 | if os.path.isfile(os.path.join(circuit_dir, f)) 48 | ) 49 | if total_size > MAX_CIRCUIT_SIZE_GB * 1024 * 1024 * 1024: 50 | bt.logging.error( 51 | f"Circuit files too large: {total_size / (1024 * 1024 * 1024):.2f} GB" 52 | ) 53 | return False 54 | return True 55 | 56 | @classmethod 57 | def _validate_required_files(cls, circuit_dir: str) -> bool: 58 | for f in cls.REQUIRED_FILES: 59 | if not os.path.exists(os.path.join(circuit_dir, f)): 60 | bt.logging.error(f"Missing required file: {f}") 61 | return False 62 | return True 63 | 64 | @classmethod 65 | def _validate_settings(cls, circuit_dir: str) -> bool: 66 | try: 67 | with open(os.path.join(circuit_dir, "settings.json")) as f: 68 | settings = json.load(f) 69 | if "run_args" not in settings: 70 | bt.logging.error("Missing run_args in settings.json") 71 | return False 72 | 73 | run_args = settings["run_args"] 74 | required_args = cls.REQUIRED_SETTINGS["run_args"] 75 | 76 | for key, value in required_args.items(): 77 | if key not in run_args: 78 | bt.logging.error(f"Missing required run_args setting: {key}") 79 | return False 80 | if run_args[key] != value: 81 | bt.logging.error( 82 | f"Invalid value for {key}: expected {value}, got {run_args[key]}" 83 | ) 84 | return False 85 | 86 | return True 87 | except json.JSONDecodeError: 88 | bt.logging.error("Invalid JSON in settings.json") 89 | return False 90 | -------------------------------------------------------------------------------- /neurons/_validator/competitions/services/onnx_runner.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | import onnxruntime as ort 4 | import traceback 5 | 6 | 7 | def run_inference(model_path: str, input_path: str, output_path: str) -> None: 8 | try: 9 | session = ort.InferenceSession(model_path) 10 | input_name = session.get_inputs()[0].name 11 | input_data = np.load(input_path) 12 | 13 | options = ort.RunOptions() 14 | options.log_severity_level = 3 15 | 16 | output_names = [output.name for output in session.get_outputs()] 17 | outputs = session.run(output_names, {input_name: input_data}, options) 18 | 19 | flattened = [] 20 | for out in outputs: 21 | flattened.extend(out.flatten()) 22 | final_output = np.array(flattened) 23 | np.save(output_path, final_output) 24 | except Exception as e: 25 | print(f"Error running inference: {str(e)}") 26 | print(f"Traceback:\n{traceback.format_exc()}") 27 | sys.exit(1) 28 | 29 | 30 | if __name__ == "__main__": 31 | if len(sys.argv) != 4: 32 | print("Usage: python onnx_runner.py ") 33 | sys.exit(1) 34 | run_inference(sys.argv[1], sys.argv[2], sys.argv[3]) 35 | -------------------------------------------------------------------------------- /neurons/_validator/competitions/utils/cleanup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | from utils.system import get_temp_folder 4 | import bittensor as bt 5 | 6 | 7 | def cleanup_temp_dir(signum=None, frame=None, specific_dir=None): 8 | temp_folder = get_temp_folder() 9 | if not os.path.exists(temp_folder): 10 | return 11 | 12 | if specific_dir: 13 | dir_path = os.path.join(temp_folder, specific_dir) 14 | if os.path.exists(dir_path): 15 | try: 16 | if os.path.isfile(dir_path) or os.path.islink(dir_path): 17 | os.unlink(dir_path) 18 | elif os.path.isdir(dir_path): 19 | shutil.rmtree(dir_path) 20 | except Exception as e: 21 | bt.logging.error(f"Error cleaning up directory {dir_path}: {e}") 22 | else: 23 | bt.logging.debug("No specific directory provided for cleanup, skipping...") 24 | -------------------------------------------------------------------------------- /neurons/_validator/config/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import bittensor as bt 3 | from constants import DEFAULT_NETUID, COMPETITION_SYNC_INTERVAL 4 | 5 | from utils import wandb_logger 6 | from _validator.config.api import ApiConfig 7 | 8 | 9 | class ValidatorConfig: 10 | """ 11 | Configuration class for the Validator. 12 | 13 | This class initializes and manages the configuration settings for the Omron validator. 14 | 15 | Attributes: 16 | config (bt.config): The Bittensor configuration object. 17 | subnet_uid (int): The unique identifier for the subnet. 18 | wallet (bt.wallet): The Bittensor wallet object. 19 | subtensor (bt.subtensor): The Bittensor subtensor object. 20 | dendrite (bt.dendrite): The Bittensor dendrite object. 21 | metagraph (bt.metagraph): The Bittensor metagraph object. 22 | user_uid (int): The unique identifier for the validator within the subnet's metagraph. 23 | api_enabled (bool): Whether the API is enabled. 24 | """ 25 | 26 | def __init__(self, config: bt.config): 27 | """ 28 | Initialize the ValidatorConfig object. 29 | 30 | Args: 31 | config (bt.config): The Bittensor configuration object. 32 | """ 33 | for key, value in vars(config).items(): 34 | setattr(self, key, value) 35 | 36 | self.bt_config: bt.Config = config 37 | self.subnet_uid = int( 38 | self.bt_config.netuid if self.bt_config.netuid else DEFAULT_NETUID 39 | ) 40 | self.wallet = bt.wallet(config=self.bt_config) 41 | self.dendrite = bt.dendrite(wallet=self.wallet) 42 | self.subtensor = bt.subtensor(config=self.bt_config) 43 | try: 44 | self.metagraph = self.subtensor.metagraph(self.subnet_uid) 45 | except Exception as e: 46 | bt.logging.error(f"Error getting metagraph: {e}") 47 | self.metagraph = None 48 | self.user_uid = int( 49 | self.metagraph.hotkeys.index(self.wallet.hotkey.ss58_address) 50 | ) 51 | self.localnet = self.bt_config.localnet 52 | self.api = ApiConfig(self.bt_config) 53 | self.competition_sync_interval = ( 54 | COMPETITION_SYNC_INTERVAL 55 | if self.bt_config.competition_sync_interval is None 56 | else self.bt_config.competition_sync_interval 57 | ) 58 | 59 | # Initialize wandb logger 60 | wandb_logger.safe_init( 61 | "Validator", 62 | self.wallet, 63 | self.metagraph, 64 | self.bt_config, 65 | ) 66 | 67 | def check_register(self): 68 | """ 69 | Check if the validator is registered on the subnet. 70 | 71 | This method verifies if the validator's hotkey is registered in the metagraph. 72 | If not registered, it logs an error and exits. 73 | If registered, it sets the user_uid and logs it. 74 | 75 | Raises: 76 | SystemExit: If the validator is not registered on the network. 77 | """ 78 | if self.wallet.hotkey.ss58_address not in self.metagraph.hotkeys: 79 | bt.logging.error( 80 | f"\nYour validator: {self.wallet} is not registered to the chain: " 81 | f"{self.subtensor} \nRun btcli register and try again." 82 | ) 83 | sys.exit(1) 84 | else: 85 | uid = self.metagraph.hotkeys.index(self.wallet.hotkey.ss58_address) 86 | bt.logging.info(f"Running validator on uid: {uid}") 87 | self.user_uid = uid 88 | -------------------------------------------------------------------------------- /neurons/_validator/config/api.py: -------------------------------------------------------------------------------- 1 | import bittensor as bt 2 | 3 | 4 | class ApiConfig: 5 | """ 6 | Configuration class for the API. 7 | 8 | Attributes: 9 | enabled (bool): Whether the API is enabled. 10 | host (str): The host for the API. 11 | port (int): The port for the API. 12 | workers (int): The number of workers for the API. 13 | verify_external_signatures (bool): Whether to verify external signatures. 14 | certificate_path (str): The path to the certificate directory. 15 | serve_axon (bool): Whether to serve the axon displaying your API information. 16 | """ 17 | 18 | def __init__(self, config: bt.config): 19 | self.enabled = not config.ignore_external_requests 20 | self.host = config.external_api_host 21 | self.port = config.external_api_port 22 | self.workers = config.external_api_workers 23 | self.verify_external_signatures = not config.do_not_verify_external_signatures 24 | self.certificate_path = config.certificate_path 25 | self.whitelisted_public_keys = config.whitelisted_public_keys 26 | self.serve_axon = config.serve_axon 27 | -------------------------------------------------------------------------------- /neurons/_validator/core/request.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from execution_layer.circuit import Circuit 4 | from _validator.models.request_type import RequestType 5 | from protocol import QueryZkProof, ProofOfWeightsSynapse 6 | from execution_layer.generic_input import GenericInput 7 | import bittensor as bt 8 | 9 | 10 | @dataclass 11 | class Request: 12 | """ 13 | A request to be sent to a miner. 14 | """ 15 | 16 | uid: int 17 | axon: bt.axon 18 | synapse: QueryZkProof | ProofOfWeightsSynapse 19 | circuit: Circuit 20 | request_type: RequestType 21 | inputs: GenericInput | None = None 22 | request_hash: str | None = None 23 | response_time: float | None = None 24 | deserialized: dict[str, object] | None = None 25 | result: bt.Synapse | None = None 26 | save: bool = False 27 | -------------------------------------------------------------------------------- /neurons/_validator/core/response_processor.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import traceback 3 | import time 4 | import bittensor as bt 5 | from _validator.core.request import Request 6 | from _validator.models.completed_proof_of_weights import CompletedProofOfWeightsItem 7 | from _validator.models.miner_response import MinerResponse 8 | from _validator.models.request_type import RequestType 9 | from _validator.scoring.score_manager import ScoreManager 10 | from execution_layer.generic_input import GenericInput 11 | from execution_layer.verified_model_session import VerifiedModelSession 12 | 13 | 14 | class ResponseProcessor: 15 | def __init__( 16 | self, 17 | metagraph, 18 | score_manager: ScoreManager, 19 | user_uid, 20 | hotkey: substrateinterface.Keypair, 21 | ): 22 | self.metagraph = metagraph 23 | self.score_manager = score_manager 24 | self.user_uid = user_uid 25 | self.hotkey = hotkey 26 | self.proof_batches_queue = [] 27 | self.completed_proof_of_weights_queue: list[CompletedProofOfWeightsItem] = [] 28 | 29 | def process_single_response(self, response: Request) -> MinerResponse: 30 | miner_response = MinerResponse.from_raw_response(response) 31 | if miner_response.proof_content is None: 32 | bt.logging.debug( 33 | f"Miner at UID: {miner_response.uid} failed to provide a valid proof for " 34 | f"{str(miner_response.circuit)}." 35 | f"Response from miner: {miner_response.raw}" 36 | ) 37 | elif miner_response.proof_content: 38 | bt.logging.debug( 39 | f"Attempting to verify proof for UID: {miner_response.uid} " 40 | f"using {str(miner_response.circuit)}." 41 | ) 42 | try: 43 | start_time = time.time() 44 | verification_result = self.verify_proof_string( 45 | miner_response, response.inputs 46 | ) 47 | miner_response.verification_time = time.time() - start_time 48 | miner_response.set_verification_result(verification_result) 49 | if not verification_result: 50 | bt.logging.warning( 51 | f"Miner at UID: {miner_response.uid} provided a proof" 52 | f" for {str(miner_response.circuit)}" 53 | ", but verification failed." 54 | ) 55 | except Exception as e: 56 | bt.logging.warning( 57 | f"Unable to verify proof for UID: {miner_response.uid}. Error: {e}" 58 | ) 59 | traceback.print_exc() 60 | 61 | if miner_response.verification_result: 62 | bt.logging.success( 63 | f"Miner at UID: {miner_response.uid} provided a valid proof " 64 | f"for {str(miner_response.circuit)} " 65 | f"in {miner_response.response_time} seconds." 66 | ) 67 | return miner_response 68 | 69 | def verify_proof_string( 70 | self, response: MinerResponse, validator_inputs: GenericInput 71 | ) -> bool: 72 | if not response.proof_content or not response.public_json: 73 | bt.logging.error(f"Proof or public json not found for UID: {response.uid}") 74 | return False 75 | try: 76 | inference_session = VerifiedModelSession( 77 | GenericInput(RequestType.RWR, response.public_json), 78 | response.circuit, 79 | ) 80 | res: bool = inference_session.verify_proof( 81 | validator_inputs, response.proof_content 82 | ) 83 | inference_session.end() 84 | return res 85 | except Exception as e: 86 | raise e 87 | -------------------------------------------------------------------------------- /neurons/_validator/models/base_rpc_request.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | from execution_layer.circuit import Circuit 3 | from _validator.utils.api import hash_inputs 4 | 5 | 6 | class RealWorldRequest(BaseModel): 7 | circuit: Circuit 8 | inputs: dict 9 | 10 | model_config = {"arbitrary_types_allowed": True} 11 | 12 | @property 13 | def hash(self) -> str: 14 | return hash_inputs(self.inputs) 15 | -------------------------------------------------------------------------------- /neurons/_validator/models/completed_proof_of_weights.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from dataclasses import dataclass 3 | 4 | from attrs import field 5 | 6 | 7 | @dataclass 8 | class CompletedProofOfWeightsItem: 9 | """ 10 | A completed proof of weights item, to be logged to the chain. 11 | """ 12 | 13 | signals: list[str] | None = field(default=None) 14 | proof: dict | str | None = field(default=None) 15 | model_id: str | None = field(default=None) 16 | netuid: int | None = field(default=None) 17 | 18 | def __post_init__(self): 19 | self.signals = self.signals 20 | self.proof = self.proof 21 | 22 | def to_remark(self) -> dict: 23 | return { 24 | "type": "proof_of_weights", 25 | "signals": self.signals, 26 | "proof": self.proof, 27 | "verification_key": self.model_id, 28 | "netuid": self.netuid, 29 | } 30 | -------------------------------------------------------------------------------- /neurons/_validator/models/poc_rpc_request.py: -------------------------------------------------------------------------------- 1 | from _validator.models.base_rpc_request import RealWorldRequest 2 | from pydantic import Field 3 | from deployment_layer.circuit_store import circuit_store 4 | from execution_layer.circuit import CircuitType 5 | 6 | 7 | class ProofOfComputationRPCRequest(RealWorldRequest): 8 | """ 9 | Request for the Proof of Computation RPC method. 10 | """ 11 | 12 | circuit_id: str = Field(..., description="The ID of the circuit to use") 13 | 14 | class Config: 15 | arbitrary_types_allowed = True 16 | extra = "allow" 17 | 18 | def __init__(self, **data): 19 | circuit_id = data.get("circuit_id") 20 | if not circuit_id: 21 | raise ValueError("circuit_id is required") 22 | 23 | circuit = circuit_store.get_circuit(circuit_id) 24 | if circuit is None: 25 | raise ValueError(f"No circuit found for ID {circuit_id}") 26 | 27 | if circuit.metadata.type != CircuitType.PROOF_OF_COMPUTATION: 28 | raise ValueError( 29 | f"Circuit {circuit_id} is not a proof of computation circuit" 30 | ) 31 | 32 | super().__init__( 33 | circuit=circuit, inputs=data.get("inputs"), circuit_id=circuit_id 34 | ) 35 | -------------------------------------------------------------------------------- /neurons/_validator/models/pow_rpc_request.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from _validator.models.base_rpc_request import RealWorldRequest 3 | from pydantic import Field 4 | from deployment_layer.circuit_store import circuit_store 5 | 6 | 7 | class ProofOfWeightsRPCRequest(RealWorldRequest): 8 | """ 9 | Request for the Proof of Weights RPC method. 10 | """ 11 | 12 | weights_version: int | None = Field( 13 | None, description="The version of weights in use by the origin subnet" 14 | ) 15 | netuid: int = Field(..., description="The origin subnet UID") 16 | evaluation_data: dict = Field(default_factory=dict) 17 | 18 | class Config: 19 | arbitrary_types_allowed = True 20 | extra = "allow" 21 | 22 | def __init__(self, **data): 23 | netuid = data.get("netuid") 24 | weights_version = data.get("weights_version") 25 | evaluation_data = data.get("evaluation_data") 26 | 27 | circuit = None 28 | if weights_version is None: 29 | circuit = circuit_store.get_latest_circuit_for_netuid(netuid) 30 | weights_version = circuit.metadata.weights_version 31 | else: 32 | circuit = circuit_store.get_circuit_for_netuid_and_version( 33 | netuid=netuid, version=weights_version 34 | ) 35 | if circuit is None: 36 | raise ValueError( 37 | f"No circuit found for netuid {netuid} and weights version {weights_version}" 38 | ) 39 | 40 | super().__init__( 41 | circuit=circuit, 42 | inputs=evaluation_data, 43 | evaluation_data=evaluation_data, 44 | netuid=netuid, 45 | weights_version=weights_version, 46 | ) 47 | -------------------------------------------------------------------------------- /neurons/_validator/models/request_type.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class RequestType(Enum): 5 | BENCHMARK = "benchmark_request" 6 | RWR = "real_world_request" 7 | 8 | def __str__(self) -> str: 9 | if self == RequestType.BENCHMARK: 10 | return "Benchmark" 11 | elif self == RequestType.RWR: 12 | return "Real World Request" 13 | else: 14 | raise ValueError(f"Unknown request type: {self}") 15 | 16 | 17 | class ValidatorMessage(Enum): 18 | WINDDOWN = "winddown" 19 | WINDDOWN_COMPLETE = "winddown_complete" 20 | COMPETITION_COMPLETE = "competition_complete" 21 | 22 | def __str__(self) -> str: 23 | return self.value 24 | -------------------------------------------------------------------------------- /neurons/_validator/pow/proof_of_weights_handler.py: -------------------------------------------------------------------------------- 1 | from bittensor import logging 2 | from _validator.utils.proof_of_weights import ProofOfWeightsItem 3 | from execution_layer.circuit import Circuit, CircuitType 4 | from constants import ( 5 | BATCHED_PROOF_OF_WEIGHTS_MODEL_ID, 6 | ) 7 | from protocol import ProofOfWeightsSynapse, QueryZkProof 8 | from _validator.models.request_type import RequestType 9 | 10 | 11 | class ProofOfWeightsHandler: 12 | """ 13 | Handles internal proof of weights 14 | This covers the case where the origin validator is a validator on Omron; 15 | no external requests are needed as this internal mechanism is used to generate the proof of weights. 16 | """ 17 | 18 | @staticmethod 19 | def prepare_pow_request( 20 | circuit: Circuit, score_manager 21 | ) -> ProofOfWeightsSynapse | QueryZkProof: 22 | queue = score_manager.get_pow_queue() 23 | batch_size = 1024 24 | 25 | if circuit.id != BATCHED_PROOF_OF_WEIGHTS_MODEL_ID: 26 | logging.debug("Not a batched PoW model. Defaulting to benchmark.") 27 | return None, False 28 | 29 | if len(queue) < batch_size: 30 | logging.debug( 31 | f"Queue is less than {batch_size} items. Defaulting to benchmark." 32 | ) 33 | return None, False 34 | 35 | pow_items = ProofOfWeightsItem.pad_items( 36 | queue[:batch_size], target_item_count=batch_size 37 | ) 38 | 39 | logging.info(f"Preparing PoW request for {str(circuit)}") 40 | score_manager.remove_processed_items(batch_size) 41 | return ( 42 | ProofOfWeightsHandler._create_request_from_items(circuit, pow_items), 43 | True, 44 | ) 45 | 46 | @staticmethod 47 | def _create_request_from_items( 48 | circuit: Circuit, pow_items: list[ProofOfWeightsItem] 49 | ) -> ProofOfWeightsSynapse | QueryZkProof: 50 | inputs = circuit.input_handler( 51 | RequestType.RWR, ProofOfWeightsItem.to_dict_list(pow_items) 52 | ).to_json() 53 | 54 | if circuit.metadata.type == CircuitType.PROOF_OF_WEIGHTS: 55 | return ProofOfWeightsSynapse( 56 | subnet_uid=circuit.metadata.netuid, 57 | verification_key_hash=circuit.id, 58 | proof_system=circuit.proof_system, 59 | inputs=inputs, 60 | proof="", 61 | public_signals="", 62 | ) 63 | return QueryZkProof( 64 | query_input={"public_inputs": inputs, "model_id": circuit.id}, 65 | query_output="", 66 | ) 67 | -------------------------------------------------------------------------------- /neurons/_validator/scoring/weights.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from dataclasses import dataclass, field 3 | import torch 4 | import bittensor as bt 5 | from constants import ( 6 | WEIGHT_RATE_LIMIT, 7 | WEIGHTS_VERSION, 8 | ONE_MINUTE, 9 | WEIGHT_UPDATE_BUFFER, 10 | ) 11 | from _validator.utils.logging import log_weights 12 | from _validator.utils.proof_of_weights import ProofOfWeightsItem 13 | from utils.epoch import get_current_epoch_info 14 | 15 | 16 | @dataclass 17 | class WeightsManager: 18 | """ 19 | Manages weight setting for the Omron validator. 20 | 21 | Attributes: 22 | subtensor (bt.subtensor): The Bittensor subtensor instance. 23 | metagraph (bt.metagraph): The Bittensor metagraph instance. 24 | wallet (bt.wallet): The Bittensor wallet instance. 25 | user_uid (int): The unique identifier of the validator. 26 | weights (Optional[torch.Tensor]): The current weights tensor. 27 | last_update_weights_block (int): The last block number when weights were updated. 28 | proof_of_weights_queue (List[ProofOfWeightsItem]): Queue for proof of weights items. 29 | """ 30 | 31 | subtensor: bt.subtensor 32 | metagraph: bt.metagraph 33 | wallet: bt.wallet 34 | user_uid: int 35 | last_update_weights_block: int = 0 36 | proof_of_weights_queue: list[ProofOfWeightsItem] = field(default_factory=list) 37 | 38 | def set_weights(self, netuid, wallet, uids, weights, version_key): 39 | return self.subtensor.set_weights( 40 | netuid=netuid, 41 | wallet=wallet, 42 | uids=uids, 43 | weights=weights, 44 | wait_for_inclusion=True, 45 | version_key=version_key, 46 | ) 47 | 48 | def should_update_weights(self) -> tuple[bool, str]: 49 | """Check if weights should be updated based on rate limiting and epoch timing.""" 50 | blocks_since_last_update = self.subtensor.blocks_since_last_update( 51 | self.metagraph.netuid, self.user_uid 52 | ) 53 | if blocks_since_last_update < WEIGHT_RATE_LIMIT: 54 | blocks_until_update = WEIGHT_RATE_LIMIT - blocks_since_last_update 55 | minutes_until_update = round((blocks_until_update * 12) / ONE_MINUTE, 1) 56 | return ( 57 | False, 58 | f"Next weight update in {blocks_until_update} blocks " 59 | f"(approximately {minutes_until_update:.1f} minutes)", 60 | ) 61 | 62 | current_block = self.subtensor.get_current_block() 63 | _, blocks_until_next_epoch, _ = get_current_epoch_info( 64 | current_block, self.metagraph.netuid 65 | ) 66 | 67 | if blocks_until_next_epoch > WEIGHT_UPDATE_BUFFER: 68 | return ( 69 | False, 70 | f"Weight updates only allowed in last {WEIGHT_UPDATE_BUFFER} blocks of epoch. " 71 | f"Wait {blocks_until_next_epoch - WEIGHT_UPDATE_BUFFER} more blocks.", 72 | ) 73 | 74 | return True, "" 75 | 76 | def update_weights(self, scores: torch.Tensor) -> bool: 77 | """Updates the weights based on the given scores and sets them on the chain.""" 78 | should_update, message = self.should_update_weights() 79 | if not should_update: 80 | bt.logging.info(message) 81 | return True 82 | 83 | bt.logging.info("Updating weights") 84 | weights = torch.zeros(self.metagraph.n) 85 | nonzero_indices = scores.nonzero() 86 | bt.logging.debug( 87 | f"Weights: {weights}, Nonzero indices: {nonzero_indices}, Scores: {scores}" 88 | ) 89 | if nonzero_indices.sum() > 0: 90 | weights[nonzero_indices] = scores[nonzero_indices] 91 | 92 | try: 93 | success, message = self.set_weights( 94 | netuid=self.metagraph.netuid, 95 | wallet=self.wallet, 96 | uids=self.metagraph.uids.tolist(), 97 | weights=weights.tolist(), 98 | version_key=WEIGHTS_VERSION, 99 | ) 100 | 101 | if message: 102 | bt.logging.info(f"Set weights message: {message}") 103 | 104 | if success: 105 | bt.logging.success("Weights were set successfully") 106 | log_weights(weights) 107 | self.last_update_weights_block = int(self.metagraph.block.item()) 108 | return True 109 | return False 110 | 111 | except Exception as e: 112 | bt.logging.error(f"Failed to set weights on chain with exception: {e}") 113 | return False 114 | -------------------------------------------------------------------------------- /neurons/_validator/utils/api.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | from execution_layer.generic_input import GenericInput 3 | 4 | 5 | def hash_inputs(inputs: GenericInput | dict) -> str: 6 | """ 7 | Hashes inputs to proof of weights, excluding dynamic fields. 8 | 9 | Args: 10 | inputs (dict): The inputs to hash. 11 | 12 | Returns: 13 | str: The hashed inputs. 14 | """ 15 | if isinstance(inputs, GenericInput): 16 | inputs = inputs.to_json() 17 | filtered_inputs = { 18 | k: v 19 | for k, v in inputs.items() 20 | if k not in ["validator_uid", "nonce", "uid_responsible_for_proof"] 21 | } 22 | return hashlib.sha256(str(filtered_inputs).encode()).hexdigest() 23 | -------------------------------------------------------------------------------- /neurons/_validator/utils/axon.py: -------------------------------------------------------------------------------- 1 | import traceback 2 | import bittensor as bt 3 | from aiohttp.client_exceptions import InvalidUrlClientError 4 | from _validator.core.request import Request 5 | 6 | 7 | async def query_single_axon(dendrite: bt.dendrite, request: Request) -> Request | None: 8 | """ 9 | Query a single axon with a request. Per Circuit query. 10 | 11 | Args: 12 | dendrite (bt.dendrite): The dendrite to use for querying. 13 | request (Request): The request to send. 14 | 15 | Returns: 16 | Request | None: The request with results populated, or None if the request failed. 17 | """ 18 | 19 | try: 20 | result = await dendrite.call( 21 | target_axon=request.axon, 22 | synapse=request.synapse, 23 | timeout=request.circuit.timeout, 24 | deserialize=False, 25 | ) 26 | 27 | if not result: 28 | return None 29 | request.result = result 30 | request.response_time = ( 31 | result.dendrite.process_time 32 | if result.dendrite.process_time is not None 33 | else request.circuit.timeout 34 | ) 35 | 36 | request.deserialized = result.deserialize() 37 | return request 38 | 39 | except InvalidUrlClientError: 40 | bt.logging.warning( 41 | f"Ignoring UID as axon is not a valid URL: {request.uid}. {request.axon.ip}:{request.axon.port}" 42 | ) 43 | return None 44 | 45 | except Exception as e: 46 | bt.logging.warning(f"Failed to query axon for UID: {request.uid}. Error: {e}") 47 | traceback.print_exc() 48 | return None 49 | -------------------------------------------------------------------------------- /neurons/_validator/utils/hash_guard.py: -------------------------------------------------------------------------------- 1 | from execution_layer.base_input import BaseInput 2 | import bittensor as bt 3 | import json 4 | import hashlib 5 | from collections import deque 6 | 7 | 8 | class HashGuard: 9 | """ 10 | A safety checker to ensure input data is never repeated. 11 | Uses SHA-256 for consistent hashing across sessions and sorted keys for deterministic JSON. 12 | Uses a set for O(1) lookups and a deque for FIFO order. 13 | """ 14 | 15 | MAX_HASHES = 32768 16 | 17 | def __init__(self): 18 | self.hash_set = set() 19 | self.hash_queue = deque(maxlen=self.MAX_HASHES) 20 | 21 | def check_hash(self, input: BaseInput) -> None: 22 | 23 | if isinstance(input, BaseInput): 24 | input = input.to_json() 25 | 26 | def sort_dict(d): 27 | if isinstance(d, dict): 28 | return {k: sort_dict(v) for k, v in sorted(d.items())} 29 | if isinstance(d, list): 30 | return [sort_dict(x) for x in d] 31 | return d 32 | 33 | sorted_input = sort_dict(input) 34 | json_str = json.dumps(sorted_input, sort_keys=True) 35 | hash_value = hashlib.sha256(json_str.encode()).hexdigest() 36 | 37 | if hash_value in self.hash_set: 38 | bt.logging.error(f"Hash already exists: {hash_value}. Inputs: {input}") 39 | raise ValueError("Hash already exists") 40 | 41 | if len(self.hash_queue) == self.MAX_HASHES: 42 | old_hash = self.hash_queue.popleft() 43 | self.hash_set.remove(old_hash) 44 | 45 | self.hash_set.add(hash_value) 46 | self.hash_queue.append(hash_value) 47 | -------------------------------------------------------------------------------- /neurons/_validator/utils/logging.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import torch 4 | from rich.console import Console, JustifyMethod 5 | from rich.table import Table 6 | 7 | from utils import wandb_logger 8 | from _validator.models.miner_response import MinerResponse 9 | from _validator.competitions.models.neuron import NeuronState 10 | 11 | 12 | def create_and_print_table( 13 | title: str, columns: list[tuple[str, JustifyMethod, str]], rows: list[list[str]] 14 | ): 15 | """ 16 | Create and print a table. 17 | 18 | Args: 19 | title (str): The title of the table. 20 | columns (list[tuple[str, JustifyMethod, str]]): A list of tuples containing column information. 21 | Each tuple should contain (column_name, justification, style). 22 | rows (list[list[str]]): A list of rows, where each row is a list of string values. 23 | 24 | """ 25 | table = Table(title=title) 26 | for col_name, justify, style in columns: 27 | table.add_column(col_name, justify=justify, style=style, no_wrap=True) 28 | for row in rows: 29 | table.add_row(*row) 30 | console = Console(color_system="truecolor") 31 | console.width = 120 32 | console.print(table) 33 | 34 | 35 | def log_tensor_data(title: str, data: torch.Tensor, log_key: str): 36 | """ 37 | Log tensor data to a table and Weights & Biases. 38 | 39 | Args: 40 | title (str): The title of the table. 41 | data (torch.Tensor): The tensor data to be logged. 42 | log_key (str): The key used for logging in Weights & Biases. 43 | """ 44 | rows = [[str(uid), f"{value.item():.6f}"] for uid, value in enumerate(data)] 45 | create_and_print_table( 46 | title, [("uid", "right", "cyan"), (log_key, "right", "yellow")], rows 47 | ) 48 | 49 | 50 | def log_scores(scores: torch.Tensor): 51 | """ 52 | Log scores to a table and Weights & Biases. 53 | 54 | Args: 55 | scores (torch.Tensor): The scores tensor to be logged. 56 | 57 | """ 58 | log_tensor_data("scores", scores, "scores") 59 | 60 | 61 | def log_weights(weights: torch.Tensor): 62 | """ 63 | Log weights to a table and Weights & Biases. 64 | 65 | Args: 66 | weights (torch.Tensor): The weights tensor to be logged. 67 | """ 68 | log_tensor_data("weights", weights, "weights") 69 | 70 | 71 | def log_verify_result(results: list[tuple[int, bool]]): 72 | """ 73 | Log verification results to a table and Weights & Biases. 74 | 75 | Args: 76 | results (list[tuple[int, bool]]): A list of tuples containing (uid, verification_result). 77 | 78 | """ 79 | rows = [[str(uid), str(result)] for uid, result in results] 80 | create_and_print_table( 81 | "proof verification result", 82 | [("uid", "right", "cyan"), ("Verified?", "right", "green")], 83 | rows, 84 | ) 85 | wandb_logger.safe_log( 86 | {"verification_results": {uid: int(result) for uid, result in results}} 87 | ) 88 | 89 | 90 | def log_responses(responses: list[MinerResponse]): 91 | """ 92 | Log miner responses to a table and Weights & Biases. 93 | 94 | Args: 95 | responses (list[MinerResponse]): A list of MinerResponse objects to be logged. 96 | """ 97 | columns = [ 98 | ("UID", "right", "cyan"), 99 | ("Verification Result", "right", "green"), 100 | ("Response Time", "right", "yellow"), 101 | ("Proof Size", "right", "blue"), 102 | ("Circuit Name", "left", "magenta"), 103 | ("Proof System", "left", "red"), 104 | ] 105 | 106 | sorted_responses = sorted(responses, key=lambda x: x.uid) 107 | rows = [ 108 | [ 109 | str(response.uid), 110 | str(response.verification_result), 111 | str(response.response_time), 112 | str(response.proof_size), 113 | (response.circuit.metadata.name if response.circuit else "Unknown"), 114 | (response.circuit.metadata.proof_system if response.circuit else "Unknown"), 115 | ] 116 | for response in sorted_responses 117 | ] 118 | create_and_print_table("Responses", columns, rows) 119 | 120 | wandb_log = { 121 | "responses": { 122 | response.uid: { 123 | str(response.circuit): { 124 | "verification_result": int(response.verification_result), 125 | "response_time": response.response_time, 126 | "proof_size": response.proof_size, 127 | } 128 | } 129 | for response in sorted_responses 130 | if response.verification_result 131 | } 132 | } 133 | wandb_logger.safe_log(wandb_log) 134 | 135 | 136 | def log_sota_scores( 137 | performance_scores: list[tuple[str, float]], 138 | miner_states: dict[str, NeuronState], 139 | decay_rate: float = 3.0, 140 | ): 141 | table = Table(title="SOTA Scores") 142 | table.add_column("Hotkey", style="cyan") 143 | table.add_column("Score", justify="right", style="green") 144 | table.add_column("Raw Accuracy", justify="right", style="yellow") 145 | table.add_column("Proof Size", justify="right", style="blue") 146 | table.add_column("Response Time", justify="right", style="magenta") 147 | 148 | for rank, (hotkey, _) in enumerate(performance_scores): 149 | rank_score = torch.exp(torch.tensor(-decay_rate * rank)).item() 150 | miner_states[hotkey].sota_relative_score = rank_score 151 | 152 | state = miner_states[hotkey] 153 | table.add_row( 154 | hotkey[:8] + "...", 155 | f"{rank_score:.6f}", 156 | f"{state.raw_accuracy:.4f}", 157 | f"{state.proof_size:.0f}", 158 | f"{state.response_time:.4f}", 159 | ) 160 | 161 | console = Console(color_system="truecolor") 162 | console.width = 120 163 | console.print(table) 164 | -------------------------------------------------------------------------------- /neurons/_validator/utils/pps.py: -------------------------------------------------------------------------------- 1 | import time 2 | import bittensor as bt 3 | import requests 4 | from substrateinterface import Keypair 5 | 6 | 7 | class ProofPublishingService: 8 | def __init__(self, url: str): 9 | self.url = url 10 | 11 | def publish_proof(self, proof_json: dict, hotkey: Keypair): 12 | """ 13 | Publishes a proof to the proof publishing service. 14 | 15 | Args: 16 | proof_json (dict): The proof data as a JSON object 17 | hotkey (Keypair): The hotkey used to sign the proof 18 | """ 19 | try: 20 | timestamp = str(int(time.time())) 21 | message = timestamp.encode("utf-8") 22 | signature = hotkey.sign(message) 23 | 24 | response = requests.post( 25 | f"{self.url}/proof", 26 | json={"proof": proof_json}, 27 | headers={ 28 | "x-timestamp": timestamp, 29 | "x-origin-ss58": hotkey.ss58_address, 30 | "x-signature": signature.hex(), 31 | "Content-Type": "application/json", 32 | }, 33 | ) 34 | 35 | if response.status_code == 200: 36 | response_json = response.json() 37 | bt.logging.success(f"Proof of weights uploaded to {self.url}") 38 | bt.logging.info(f"Response: {response_json}") 39 | return response_json 40 | else: 41 | bt.logging.warning( 42 | f"Failed to upload proof of weights to {self.url}. Status code: {response.status_code}" 43 | ) 44 | return None 45 | except Exception as e: 46 | bt.logging.warning(f"Error uploading proof of weights: {e}") 47 | return None 48 | -------------------------------------------------------------------------------- /neurons/_validator/utils/uid.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Generator, Iterable 2 | import bittensor as bt 3 | import torch 4 | import ipaddress 5 | 6 | from constants import VALIDATOR_STAKE_THRESHOLD, MAINNET_TESTNET_UIDS, DEFAULT_NETUID 7 | 8 | 9 | def is_valid_ip(ip: str) -> bool: 10 | try: 11 | address = ipaddress.IPv4Address(ip) 12 | return address.is_global and not address.is_multicast 13 | except ValueError: 14 | return False 15 | 16 | 17 | def get_queryable_uids(metagraph: bt.metagraph) -> Generator[int, None, None]: 18 | """ 19 | Returns the uids of the miners that are queryable 20 | """ 21 | uids = metagraph.uids.tolist() 22 | stake_threshold = VALIDATOR_STAKE_THRESHOLD 23 | if metagraph.netuid in [ 24 | i[1] for i in MAINNET_TESTNET_UIDS if i[0] == DEFAULT_NETUID 25 | ]: 26 | stake_threshold = 1e19 27 | total_stake = ( 28 | torch.tensor(metagraph.total_stake, dtype=torch.float32) 29 | if not isinstance(metagraph.total_stake, torch.Tensor) 30 | else metagraph.total_stake 31 | ) 32 | total_stake = total_stake[uids] 33 | queryable_flags: Iterable[bool] = ( 34 | (total_stake < stake_threshold) 35 | & torch.tensor([is_valid_ip(metagraph.axons[i].ip) for i in uids]) 36 | ).tolist() 37 | for uid, is_queryable in zip(uids, queryable_flags): 38 | if is_queryable: 39 | yield uid 40 | -------------------------------------------------------------------------------- /neurons/_validator/validator_session.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import sys 4 | 5 | import bittensor as bt 6 | 7 | import cli_parser 8 | from _validator.config import ValidatorConfig 9 | from _validator.core.validator_loop import ValidatorLoop 10 | from utils import clean_temp_files 11 | import asyncio 12 | 13 | 14 | class ValidatorSession: 15 | def __init__(self): 16 | self.config = ValidatorConfig(cli_parser.config) 17 | self.validator_loop = ValidatorLoop(self.config) 18 | 19 | def run(self): 20 | """ 21 | Start the validator session and run the main loop 22 | """ 23 | bt.logging.debug("Validator session started") 24 | 25 | try: 26 | asyncio.run(self.validator_loop.run()) 27 | except KeyboardInterrupt: 28 | bt.logging.info("KeyboardInterrupt caught. Exiting validator.") 29 | clean_temp_files() 30 | sys.exit(0) 31 | -------------------------------------------------------------------------------- /neurons/constants.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dataclasses import dataclass 3 | 4 | 5 | @dataclass 6 | class Roles: 7 | VALIDATOR = "validator" 8 | MINER = "miner" 9 | 10 | 11 | # The model ID for a batched proof of weights model 12 | BATCHED_PROOF_OF_WEIGHTS_MODEL_ID = ( 13 | "1e6fcdaea58741e7248b631718dda90398a17b294480beb12ce8232e27ca3bff" 14 | ) 15 | # The model ID for a single proof of weights model 16 | SINGLE_PROOF_OF_WEIGHTS_MODEL_ID = ( 17 | "fa0d509d52abe2d1e809124f8aba46258a02f7253582f7b7f5a22e1e0bca0dfb" 18 | ) 19 | 20 | IGNORED_MODEL_HASHES = [ 21 | "0", 22 | "0a92bc32ea02abe54159da70aeb541d52c3cba27c8708669eda634e096a86f8b", 23 | "b7d33e7c19360c042d94c5a7360d7dc68c36dd56c449f7c49164a0098769c01f", 24 | "55de10a6bcf638af4bc79901d63204a9e5b1c6534670aa03010bae6045e3d0e8", 25 | "9998a12b8194d3e57d332b484ede57c3d871d42a176456c4e10da2995791d181", 26 | "ed8ba401d709ee31f6b9272163c71451da171c7d71800313fe5db58d0f6c483a", 27 | "1d60d545b7c5123fd60524dcbaf57081ca7dc4a9ec36c892927a3153328d17c0", 28 | "37320fc74fec80805eedc8e92baf3c58842a2cb2a4ae127ad6e930f0c8441c7a", 29 | "1d60d545b7c5123fd60524dcbaf57081ca7dc4a9ec36c892927a3153328d17c0", 30 | "33b92394b18412622adad75733a6fc659b4e202b01ee8a5465958a6bad8ded62", 31 | "37320fc74fec80805eedc8e92baf3c58842a2cb2a4ae127ad6e930f0c8441c7a", 32 | "8dcff627a782525ea86196941a694ffbead179905f0cd4550ddc3df9e2b90924", 33 | "a4bcecaf699fd9212600a1f2fcaa40c444e1aeaab409ea240a38c33ed356f4e2", 34 | "e84b2e5f223621fa20078eb9f920d8d4d3a4ff95fa6e2357646fdbb43a2557c9", 35 | "a849500803abdbb86a9460e18684a6411dc7ae0b75f1f6330e3028081a497dea", 36 | ] 37 | 38 | # The maximum timespan allowed for miners to respond to a query 39 | VALIDATOR_REQUEST_TIMEOUT_SECONDS = 120 40 | # The maximum timespan allowed for miners to process through a circuit 41 | CIRCUIT_TIMEOUT_SECONDS = 60 42 | # Whether to penalize miners for missing resets 43 | RESET_PENALTY_ENABLED = False 44 | # An additional queueing time for external requests 45 | EXTERNAL_REQUEST_QUEUE_TIME_SECONDS = 10 46 | # Maximum number of concurrent requests that the validator will handle 47 | MAX_CONCURRENT_REQUESTS = 16 48 | # Default proof size when we're unable to determine the actual size 49 | DEFAULT_PROOF_SIZE = 5000 50 | # Size in percent of the sample to be used for the maximum score median 51 | MAXIMUM_SCORE_MEDIAN_SAMPLE = 0.05 52 | # Shift in seconds to apply to the minimum response time for vertical asymptote adjustment 53 | MINIMUM_SCORE_SHIFT = 0.0 54 | # Weights version hyperparameter 55 | WEIGHTS_VERSION = 1821 56 | # Rate limit for weight updates 57 | WEIGHT_RATE_LIMIT: int = 100 58 | # Delay between loop iterations 59 | LOOP_DELAY_SECONDS = 0.1 60 | # Exception delay for loop 61 | EXCEPTION_DELAY_SECONDS = 10 62 | # Default maximum score 63 | DEFAULT_MAX_SCORE = 1 / 235 64 | # Default subnet UID 65 | DEFAULT_NETUID = 2 66 | # Validator stake threshold 67 | VALIDATOR_STAKE_THRESHOLD = 1024 68 | # 🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩🥩 69 | STEAK = "🥩" 70 | # Field modulus 71 | FIELD_MODULUS = ( 72 | 21888242871839275222246405745257275088548364400416034343698204186575808495617 73 | ) 74 | # Number of miner groups for reset events 75 | NUM_MINER_GROUPS = 8 76 | # Subnet tempo for epochs 77 | EPOCH_TEMPO = 360 78 | # Weight update buffer 79 | WEIGHT_UPDATE_BUFFER = 15 80 | # How close to the end of the epoch the boost is applied 81 | BOOST_BUFFER = 50 82 | # The window in blocks before an epoch boundary where a miner can reset. 83 | MINER_RESET_WINDOW_BLOCKS = 10 84 | # Whether on-chain proof of weights is enabled by default 85 | ONCHAIN_PROOF_OF_WEIGHTS_ENABLED = False 86 | # Frequency in terms of blocks at which proof of weights are posted 87 | PROOF_OF_WEIGHTS_INTERVAL = 1000 88 | # Maximum number of proofs to log at once 89 | MAX_PROOFS_TO_LOG = 0 90 | # Era period for proof of weights (mortality of the pow log) 91 | PROOF_OF_WEIGHTS_LIFESPAN = 2 92 | # Active competition 93 | ACTIVE_COMPETITION = 0 94 | # Frequency in terms of seconds at which the competition is synced and evaluated 95 | COMPETITION_SYNC_INTERVAL = 60 * 60 * 24 96 | # Maximum signature lifespan for WebSocket requests 97 | MAX_SIGNATURE_LIFESPAN = 300 98 | # Whitelisted public keys (ss58 addresses) we accept external requests from by default 99 | # (even if an address is not in the metagraph) 100 | WHITELISTED_PUBLIC_KEYS = [] 101 | # Mainnet <> Testnet UID mapping 102 | MAINNET_TESTNET_UIDS = [ 103 | (1, 61), # apex 104 | (2, 118), # omron 105 | (3, 223), # templar 106 | (4, 40), # targon 107 | (5, 88), # kaito 108 | (6, 155), # infinite 109 | (7, 92), # subvortex 110 | (8, 3), # ptn 111 | (8, 116), # ptn (PTN) 112 | (10, 104), # sturdy 113 | (11, 135), # dippy 114 | (12, 174), # horde 115 | (13, 254), # dataverse 116 | (14, 203), # palaidn 117 | (15, 202), # deval 118 | (16, 120), # bitads 119 | (17, 89), # 3gen 120 | (18, 24), # cortex 121 | (19, 176), # inference 122 | (20, 76), # bitagent 123 | (21, 157), # any-any 124 | (23, 119), # social 125 | (24, 96), # omega 126 | (25, 141), # protein 127 | (26, 25), # alchemy 128 | (27, 15), # compute 129 | (28, 93), # oracle 130 | (31, 123), # naschain 131 | (32, 87), # itsai 132 | (33, 138), # ready 133 | (34, 168), # mind 134 | (35, 78), # logic 135 | (39, 159), # edge 136 | (40, 166), # chunk 137 | (41, 172), # sportstensor 138 | (42, 165), # masa 139 | (43, 65), # graphite 140 | (44, 180), # score 141 | (45, 171), # gen42 142 | (46, 182), # neural 143 | (48, 208), # nextplace 144 | (49, 100), # automl 145 | (50, 31), # audio 146 | (52, 98), # dojo 147 | (53, 232), # efficient-frontier 148 | (54, 236), # docs-insights 149 | (57, 237), # gaia 150 | (59, 249), # agent-arena 151 | ] 152 | # Proof publishing service URL 153 | PPS_URL = os.getenv( 154 | "OMRON_PPS_URL", 155 | "https://pps.omron.ai/", 156 | ) 157 | # Testnet PPS URL 158 | TESTNET_PPS_URL = os.getenv( 159 | "OMRON_PPS_URL", 160 | "https://cllswjfpzmg67rwythmiiufvtm0gsthd.lambda-url.us-east-1.on.aws/", 161 | ) 162 | # EZKL path 163 | LOCAL_EZKL_PATH = os.path.join(os.path.expanduser("~"), ".ezkl", "ezkl") 164 | # GitHub repository URL 165 | REPO_URL = "https://github.com/inference-labs-inc/omron-subnet" 166 | # Various time constants in seconds 167 | ONE_SECOND = 1 168 | ONE_MINUTE = 60 169 | FIVE_MINUTES = ONE_MINUTE * 5 170 | ONE_HOUR = ONE_MINUTE * 60 171 | ONE_DAY = ONE_HOUR * 24 172 | ONE_YEAR = ONE_DAY * 365 173 | # Temporary folder for storing proof files 174 | TEMP_FOLDER = "/tmp/omron" 175 | 176 | # Queue size limits 177 | MAX_POW_QUEUE_SIZE = 1024 178 | MAX_EVALUATION_ITEMS = 1024 179 | 180 | # Maximum circuit size in GB for competitions 181 | MAX_CIRCUIT_SIZE_GB = 50 182 | -------------------------------------------------------------------------------- /neurons/deployment_layer/model_1876cfa9fb3c418b2559f3f7074db20565b5ca7237efdd43b907d9d697a452c4/input.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from pydantic import BaseModel 3 | from execution_layer.base_input import BaseInput 4 | from execution_layer.input_registry import InputRegistry 5 | from _validator.models.request_type import RequestType 6 | import random 7 | import secrets 8 | 9 | SUCCESS_WEIGHT = 1 10 | DIFFICULTY_WEIGHT = 1 11 | TIME_ELAPSED_WEIGHT = 0.3 12 | FAILED_PENALTY_WEIGHT = 0.4 13 | ALLOCATION_WEIGHT = 0.21 14 | POW_MIN_DIFFICULTY = 7 15 | POW_MAX_DIFFICULTY = 12 16 | POW_TIMEOUT = 30.0 17 | BATCH_SIZE = 256 18 | 19 | 20 | class CircuitInputSchema(BaseModel): 21 | challenge_attempts: list[int] 22 | challenge_successes: list[int] 23 | last_20_challenge_failed: list[int] 24 | challenge_elapsed_time_avg: list[float] 25 | last_20_difficulty_avg: list[float] 26 | has_docker: list[bool] 27 | uid: list[int] 28 | allocated_uids: list[int] 29 | penalized_uids: list[int] 30 | validator_uids: list[int] 31 | success_weight: list[float] 32 | difficulty_weight: list[float] 33 | time_elapsed_weight: list[float] 34 | failed_penalty_weight: list[float] 35 | allocation_weight: list[float] 36 | pow_timeout: list[float] 37 | pow_min_difficulty: list[float] 38 | pow_max_difficulty: list[float] 39 | nonce: list[int] 40 | 41 | 42 | @InputRegistry.register( 43 | "1876cfa9fb3c418b2559f3f7074db20565b5ca7237efdd43b907d9d697a452c4" 44 | ) 45 | class CircuitInput(BaseInput): 46 | 47 | schema = CircuitInputSchema 48 | 49 | def __init__( 50 | self, request_type: RequestType, data: dict[str, object] | None = None 51 | ): 52 | super().__init__(request_type, data) 53 | 54 | @staticmethod 55 | def generate() -> dict[str, object]: 56 | return { 57 | "challenge_attempts": [random.randint(5, 10) for _ in range(BATCH_SIZE)], 58 | "challenge_successes": [random.randint(4, 8) for _ in range(BATCH_SIZE)], 59 | "last_20_challenge_failed": [ 60 | random.randint(0, 20) for _ in range(BATCH_SIZE) 61 | ], 62 | "challenge_elapsed_time_avg": [ 63 | 4.0 + random.random() * 4.0 for _ in range(BATCH_SIZE) 64 | ], 65 | "last_20_difficulty_avg": [ 66 | POW_MIN_DIFFICULTY 67 | + random.random() * (POW_MAX_DIFFICULTY - POW_MIN_DIFFICULTY) 68 | for _ in range(BATCH_SIZE) 69 | ], 70 | "has_docker": [random.random() < 0.5 for _ in range(BATCH_SIZE)], 71 | "uid": [random.randint(0, 255) for _ in range(BATCH_SIZE)], 72 | "allocated_uids": [random.randint(0, 255) for _ in range(256)], 73 | "penalized_uids": [random.randint(0, 255) for _ in range(256)], 74 | "validator_uids": [random.randint(0, 255) for _ in range(256)], 75 | "success_weight": [SUCCESS_WEIGHT], 76 | "difficulty_weight": [DIFFICULTY_WEIGHT], 77 | "time_elapsed_weight": [TIME_ELAPSED_WEIGHT], 78 | "failed_penalty_weight": [FAILED_PENALTY_WEIGHT], 79 | "allocation_weight": [ALLOCATION_WEIGHT], 80 | "pow_timeout": [POW_TIMEOUT], 81 | "pow_min_difficulty": [POW_MIN_DIFFICULTY], 82 | "pow_max_difficulty": [POW_MAX_DIFFICULTY], 83 | "nonce": [secrets.randbits(32)], 84 | } 85 | 86 | @staticmethod 87 | def validate(data: dict[str, object]) -> None: 88 | return CircuitInputSchema(**data) 89 | 90 | @staticmethod 91 | def process(data: dict[str, object]) -> dict[str, object]: 92 | """ 93 | Add a random nonce to ensure that the request is not reused. 94 | """ 95 | data["nonce"] = [secrets.randbits(32)] 96 | return data 97 | -------------------------------------------------------------------------------- /neurons/deployment_layer/model_1876cfa9fb3c418b2559f3f7074db20565b5ca7237efdd43b907d9d697a452c4/metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SN27 PoW 256", 3 | "description": "The validator's reward model for SN27, designed to batch a single round of weight updates (256 changes per proof).", 4 | "author": "Inference Labs", 5 | "version": "0.0.9", 6 | "proof_system": "EZKL", 7 | "netuid": 27, 8 | "weights_version": 160, 9 | "type": "proof_of_weights", 10 | "external_files": { 11 | "pk.key": "https://storage.omron.ai/1876cfa9fb3c418b2559f3f7074db20565b5ca7237efdd43b907d9d697a452c4/pk.key" 12 | }, 13 | "benchmark_choice_weight": 0.2 14 | } 15 | -------------------------------------------------------------------------------- /neurons/deployment_layer/model_1876cfa9fb3c418b2559f3f7074db20565b5ca7237efdd43b907d9d697a452c4/model.compiled: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inference-labs-inc/omron-subnet/e5aeb7987054f91989cf5129fc6d128b37af3d8f/neurons/deployment_layer/model_1876cfa9fb3c418b2559f3f7074db20565b5ca7237efdd43b907d9d697a452c4/model.compiled -------------------------------------------------------------------------------- /neurons/deployment_layer/model_1876cfa9fb3c418b2559f3f7074db20565b5ca7237efdd43b907d9d697a452c4/settings.json: -------------------------------------------------------------------------------- 1 | {"run_args":{"input_scale":19,"param_scale":19,"scale_rebase_multiplier":1,"lookup_range":[0,1048584],"logrows":21,"num_inner_cols":2,"variables":[["batch_size",1]],"input_visibility":"Public","output_visibility":"Public","param_visibility":"Fixed","rebase_frac_zero_constants":false,"check_mode":"UNSAFE","commitment":"KZG","decomp_base":16384,"decomp_legs":3,"bounded_log_lookup":false,"ignore_range_check_inputs_outputs":false,"epsilon":null},"num_rows":759861,"total_assignments":1519723,"total_const_size":13,"total_dynamic_col_size":0,"max_dynamic_input_len":0,"num_dynamic_lookups":0,"num_shuffles":0,"total_shuffle_col_size":0,"model_instance_shapes":[[256],[256],[256],[256],[256],[256],[256],[256],[256],[256],[1],[1],[1],[1],[1],[1],[1],[1],[1],[256],[1]],"model_output_scales":[19,0],"model_input_scales":[0,0,0,19,19,0,0,0,0,0,19,19,19,19,19,19,19,19,0],"module_sizes":{"polycommit":[],"poseidon":[0,[0]]},"required_lookups":[{"Pow":{"scale":524288.0,"a":1.5}}],"required_range_checks":[[0,16383],[-1,1],[0,1]],"check_mode":"UNSAFE","version":"22.0.1","num_blinding_factors":null,"timestamp":1745940392009,"input_types":["Int","Int","Int","F32","F32","Bool","Int","Int","Int","Int","F32","F32","F32","F32","F32","F32","F32","F32","Int"],"output_types":["F32","Int"]} -------------------------------------------------------------------------------- /neurons/deployment_layer/model_1876cfa9fb3c418b2559f3f7074db20565b5ca7237efdd43b907d9d697a452c4/vk.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inference-labs-inc/omron-subnet/e5aeb7987054f91989cf5129fc6d128b37af3d8f/neurons/deployment_layer/model_1876cfa9fb3c418b2559f3f7074db20565b5ca7237efdd43b907d9d697a452c4/vk.key -------------------------------------------------------------------------------- /neurons/deployment_layer/model_1e6fcdaea58741e7248b631718dda90398a17b294480beb12ce8232e27ca3bff/circuit.wasm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inference-labs-inc/omron-subnet/e5aeb7987054f91989cf5129fc6d128b37af3d8f/neurons/deployment_layer/model_1e6fcdaea58741e7248b631718dda90398a17b294480beb12ce8232e27ca3bff/circuit.wasm -------------------------------------------------------------------------------- /neurons/deployment_layer/model_1e6fcdaea58741e7248b631718dda90398a17b294480beb12ce8232e27ca3bff/input.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from pydantic import BaseModel 3 | from execution_layer.base_input import BaseInput 4 | from execution_layer.input_registry import InputRegistry 5 | from _validator.models.request_type import RequestType 6 | from constants import ONE_MINUTE 7 | import random 8 | import secrets 9 | 10 | BATCH_SIZE = 1024 11 | RATE_OF_DECAY = 0.4 12 | RATE_OF_RECOVERY = 0.1 13 | FLATTENING_COEFFICIENT = 0.9 14 | PROOF_SIZE_THRESHOLD = 3648 15 | PROOF_SIZE_WEIGHT = 0 16 | RESPONSE_TIME_WEIGHT = 1 17 | COMPETITION_WEIGHT = 0 18 | MAXIMUM_RESPONSE_TIME_DECIMAL = 0.99 19 | SCALING = 100000000 20 | 21 | 22 | class CircuitInputSchema(BaseModel): 23 | maximum_score: list[float] 24 | previous_score: list[float] 25 | verified: list[bool] 26 | proof_size: list[float] 27 | response_time: list[float] 28 | competition: list[float] 29 | maximum_response_time: list[float] 30 | minimum_response_time: list[float] 31 | validator_uid: list[int] 32 | block_number: list[int] 33 | miner_uid: list[int] 34 | scaling: int 35 | RATE_OF_DECAY: int 36 | RATE_OF_RECOVERY: int 37 | FLATTENING_COEFFICIENT: int 38 | COMPETITION_WEIGHT: int 39 | PROOF_SIZE_WEIGHT: int 40 | PROOF_SIZE_THRESHOLD: int 41 | RESPONSE_TIME_WEIGHT: int 42 | MAXIMUM_RESPONSE_TIME_DECIMAL: int 43 | 44 | 45 | @InputRegistry.register( 46 | "1e6fcdaea58741e7248b631718dda90398a17b294480beb12ce8232e27ca3bff" 47 | ) 48 | class CircuitInput(BaseInput): 49 | 50 | schema = CircuitInputSchema 51 | 52 | def __init__( 53 | self, request_type: RequestType, data: dict[str, object] | None = None 54 | ): 55 | if request_type == RequestType.RWR and data is not None: 56 | data = self._add_missing_constants(data) 57 | super().__init__(request_type, data) 58 | 59 | @staticmethod 60 | def generate() -> dict[str, object]: 61 | 62 | minimum_response_time = int(random.random() * ONE_MINUTE * SCALING) 63 | 64 | maximum_response_time = minimum_response_time + int( 65 | random.random() * ONE_MINUTE * SCALING 66 | ) 67 | 68 | response_time = ( 69 | int(random.random() * (maximum_response_time - minimum_response_time)) 70 | + minimum_response_time 71 | ) 72 | max_score = int(1 / 256 * SCALING) 73 | return { 74 | "maximum_score": [max_score for _ in range(BATCH_SIZE)], 75 | "previous_score": [ 76 | int(random.random() * max_score) for _ in range(BATCH_SIZE) 77 | ], 78 | "verified": [random.choice([True, False]) for _ in range(BATCH_SIZE)], 79 | "proof_size": [ 80 | int(random.randint(0, 5000) * SCALING) for _ in range(BATCH_SIZE) 81 | ], 82 | "validator_uid": [random.randint(0, 255) for _ in range(BATCH_SIZE)], 83 | "block_number": [ 84 | random.randint(3000000, 10000000) for _ in range(BATCH_SIZE) 85 | ], 86 | "miner_uid": [random.randint(0, 255) for _ in range(BATCH_SIZE)], 87 | "minimum_response_time": [minimum_response_time for _ in range(BATCH_SIZE)], 88 | "maximum_response_time": [maximum_response_time for _ in range(BATCH_SIZE)], 89 | "response_time": [response_time for _ in range(BATCH_SIZE)], 90 | "competition": [int(random.random() * SCALING) for _ in range(BATCH_SIZE)], 91 | "scaling": SCALING, 92 | "RATE_OF_DECAY": int(RATE_OF_DECAY * SCALING), 93 | "RATE_OF_RECOVERY": int(RATE_OF_RECOVERY * SCALING), 94 | "FLATTENING_COEFFICIENT": int(FLATTENING_COEFFICIENT * SCALING), 95 | "PROOF_SIZE_WEIGHT": int(PROOF_SIZE_WEIGHT * SCALING), 96 | "PROOF_SIZE_THRESHOLD": int(PROOF_SIZE_THRESHOLD * SCALING), 97 | "COMPETITION_WEIGHT": int(COMPETITION_WEIGHT * SCALING), 98 | "RESPONSE_TIME_WEIGHT": int(RESPONSE_TIME_WEIGHT * SCALING), 99 | "MAXIMUM_RESPONSE_TIME_DECIMAL": int( 100 | MAXIMUM_RESPONSE_TIME_DECIMAL * SCALING 101 | ), 102 | } 103 | 104 | @staticmethod 105 | def validate(data: dict[str, object]) -> None: 106 | return CircuitInputSchema(**data) 107 | 108 | def _add_missing_constants(self, data: dict[str, object]) -> dict[str, object]: 109 | for i in range(16): 110 | data["validator_uid"][BATCH_SIZE - 16 + i] = secrets.randbits(16) 111 | 112 | constants = [ 113 | "RATE_OF_DECAY", 114 | "RATE_OF_RECOVERY", 115 | "FLATTENING_COEFFICIENT", 116 | "PROOF_SIZE_WEIGHT", 117 | "PROOF_SIZE_THRESHOLD", 118 | "RESPONSE_TIME_WEIGHT", 119 | "COMPETITION_WEIGHT", 120 | "MAXIMUM_RESPONSE_TIME_DECIMAL", 121 | ] 122 | 123 | for constant in constants: 124 | if constant not in data: 125 | data[constant] = int(globals()[constant] * SCALING) 126 | 127 | if "scaling" not in data: 128 | data["scaling"] = SCALING 129 | 130 | return data 131 | 132 | @staticmethod 133 | def process(data: dict[str, object]) -> dict[str, object]: 134 | 135 | data["maximum_score"] = [ 136 | int(max_score * SCALING) for max_score in data["maximum_score"] 137 | ] 138 | data["previous_score"] = [ 139 | int(previous_score * SCALING) for previous_score in data["previous_score"] 140 | ] 141 | data["proof_size"] = [ 142 | int(proof_size * SCALING) for proof_size in data["proof_size"] 143 | ] 144 | data["minimum_response_time"] = [ 145 | int(minimum_response_time * SCALING) 146 | for minimum_response_time in data["minimum_response_time"] 147 | ] 148 | data["maximum_response_time"] = [ 149 | int(maximum_response_time * SCALING) 150 | for maximum_response_time in data["maximum_response_time"] 151 | ] 152 | data["response_time"] = [ 153 | int(response_time * SCALING) for response_time in data["response_time"] 154 | ] 155 | data["competition"] = [ 156 | int(competition * SCALING) for competition in data["competition"] 157 | ] 158 | 159 | return data 160 | -------------------------------------------------------------------------------- /neurons/deployment_layer/model_1e6fcdaea58741e7248b631718dda90398a17b294480beb12ce8232e27ca3bff/metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SN2 PoW 1024 Circom", 3 | "description": "Generates batched proof of weights based on miner evaluation data from SN2.", 4 | "author": "Inference Labs", 5 | "version": "0.0.6", 6 | "proof_system": "CIRCOM", 7 | "netuid": "2", 8 | "type": "proof_of_weights", 9 | "external_files": { 10 | "circuit.zkey": "https://storage.omron.ai/1e6fcdaea58741e7248b631718dda90398a17b294480beb12ce8232e27ca3bff/circuit.zkey" 11 | }, 12 | "benchmark_choice_weight": 0.2 13 | } 14 | -------------------------------------------------------------------------------- /neurons/deployment_layer/model_1e6fcdaea58741e7248b631718dda90398a17b294480beb12ce8232e27ca3bff/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "scaling": 100000000, 3 | "public_inputs": { 4 | "order": [ 5 | "new_score", 6 | "block_number_out", 7 | "miner_uid_out", 8 | "validator_uid_out", 9 | "RATE_OF_DECAY", 10 | "RATE_OF_RECOVERY", 11 | "FLATTENING_COEFFICIENT", 12 | "PROOF_SIZE_THRESHOLD", 13 | "PROOF_SIZE_WEIGHT", 14 | "RESPONSE_TIME_WEIGHT", 15 | "MAXIMUM_RESPONSE_TIME_DECIMAL", 16 | "COMPETITION_WEIGHT", 17 | "maximum_score", 18 | "previous_score", 19 | "verified", 20 | "proof_size", 21 | "response_time", 22 | "competition", 23 | "maximum_response_time", 24 | "minimum_response_time", 25 | "block_number", 26 | "validator_uid", 27 | "miner_uid", 28 | "scaling" 29 | ], 30 | "sizes": { 31 | "new_score": 1024, 32 | "block_number_out": 1024, 33 | "miner_uid_out": 1024, 34 | "validator_uid_out": 1024, 35 | "RATE_OF_DECAY": 1, 36 | "RATE_OF_RECOVERY": 1, 37 | "FLATTENING_COEFFICIENT": 1, 38 | "PROOF_SIZE_THRESHOLD": 1, 39 | "PROOF_SIZE_WEIGHT": 1, 40 | "RESPONSE_TIME_WEIGHT": 1, 41 | "COMPETITION_WEIGHT": 1, 42 | "MAXIMUM_RESPONSE_TIME_DECIMAL": 1, 43 | "maximum_score": 1024, 44 | "previous_score": 1024, 45 | "verified": 1024, 46 | "proof_size": 1024, 47 | "response_time": 1024, 48 | "competition": 1024, 49 | "maximum_response_time": 1024, 50 | "minimum_response_time": 1024, 51 | "block_number": 1024, 52 | "validator_uid": 1024, 53 | "miner_uid": 1024, 54 | "scaling": 1 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /neurons/deployment_layer/model_43ecaacaded5ed16c9e08bc054366e409c7925245eca547472b27f2a61469cc5/input.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from pydantic import BaseModel 3 | from execution_layer.base_input import BaseInput 4 | from execution_layer.input_registry import InputRegistry 5 | from _validator.models.request_type import RequestType 6 | import random 7 | import secrets 8 | 9 | TOP_TIER_PCT = 0.1 10 | NEXT_TIER_PCT = 0.4 11 | TOP_TIER_WEIGHT = 0.7 12 | NEXT_TIER_WEIGHT = 0.2 13 | BOTTOM_TIER_WEIGHT = 0.1 14 | BATCH_SIZE = 256 15 | 16 | 17 | class CircuitInputSchema(BaseModel): 18 | scores: list[float] 19 | top_tier_pct: list[float] 20 | next_tier_pct: list[float] 21 | top_tier_weight: list[float] 22 | next_tier_weight: list[float] 23 | bottom_tier_weight: list[float] 24 | nonce: list[float] 25 | 26 | 27 | @InputRegistry.register( 28 | "43ecaacaded5ed16c9e08bc054366e409c7925245eca547472b27f2a61469cc5" 29 | ) 30 | class CircuitInput(BaseInput): 31 | 32 | schema = CircuitInputSchema 33 | 34 | def __init__( 35 | self, request_type: RequestType, data: dict[str, object] | None = None 36 | ): 37 | super().__init__(request_type, data) 38 | 39 | @staticmethod 40 | def generate() -> dict[str, object]: 41 | return { 42 | "scores": [random.random() for _ in range(BATCH_SIZE)], 43 | "top_tier_pct": [TOP_TIER_PCT], 44 | "next_tier_pct": [NEXT_TIER_PCT], 45 | "top_tier_weight": [TOP_TIER_WEIGHT], 46 | "next_tier_weight": [NEXT_TIER_WEIGHT], 47 | "bottom_tier_weight": [BOTTOM_TIER_WEIGHT], 48 | "nonce": [secrets.randbits(32)], 49 | } 50 | 51 | @staticmethod 52 | def validate(data: dict[str, object]) -> None: 53 | return CircuitInputSchema(**data) 54 | 55 | @staticmethod 56 | def process(data: dict[str, object]) -> dict[str, object]: 57 | """ 58 | Add a random nonce to ensure that the request is not reused. 59 | """ 60 | data["nonce"] = [secrets.randbits(32)] 61 | return data 62 | -------------------------------------------------------------------------------- /neurons/deployment_layer/model_43ecaacaded5ed16c9e08bc054366e409c7925245eca547472b27f2a61469cc5/metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SN48 PoW", 3 | "description": "Generates weights based on miner scoring data from SN48.", 4 | "author": "Inference Labs", 5 | "version": "0.0.4", 6 | "proof_system": "EZKL", 7 | "netuid": 48, 8 | "weights_version": 0, 9 | "type": "proof_of_weights", 10 | "external_files": { 11 | "pk.key": "https://storage.omron.ai/43ecaacaded5ed16c9e08bc054366e409c7925245eca547472b27f2a61469cc5/pk.key" 12 | }, 13 | "benchmark_choice_weight": 0.2 14 | } 15 | -------------------------------------------------------------------------------- /neurons/deployment_layer/model_43ecaacaded5ed16c9e08bc054366e409c7925245eca547472b27f2a61469cc5/model.compiled: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inference-labs-inc/omron-subnet/e5aeb7987054f91989cf5129fc6d128b37af3d8f/neurons/deployment_layer/model_43ecaacaded5ed16c9e08bc054366e409c7925245eca547472b27f2a61469cc5/model.compiled -------------------------------------------------------------------------------- /neurons/deployment_layer/model_43ecaacaded5ed16c9e08bc054366e409c7925245eca547472b27f2a61469cc5/settings.json: -------------------------------------------------------------------------------- 1 | {"run_args":{"input_scale":20,"param_scale":20,"scale_rebase_multiplier":1,"lookup_range":[0,0],"logrows":21,"num_inner_cols":2,"variables":[["batch_size",1]],"input_visibility":"Public","output_visibility":"Public","param_visibility":"Fixed","rebase_frac_zero_constants":false,"check_mode":"UNSAFE","commitment":"KZG","decomp_base":16384,"decomp_legs":3,"bounded_log_lookup":false,"ignore_range_check_inputs_outputs":false,"epsilon":null},"num_rows":1054691,"total_assignments":2109383,"total_const_size":8,"total_dynamic_col_size":0,"max_dynamic_input_len":0,"num_dynamic_lookups":0,"num_shuffles":0,"total_shuffle_col_size":0,"model_instance_shapes":[[256],[1],[1],[1],[1],[1],[1],[256],[1]],"model_output_scales":[20,0],"model_input_scales":[20,20,20,20,20,20,0],"module_sizes":{"polycommit":[],"poseidon":[0,[0]]},"required_lookups":[],"required_range_checks":[[-1,1],[0,1],[0,16383],[0,1048575]],"check_mode":"UNSAFE","version":"22.0.1","num_blinding_factors":null,"timestamp":1745944163640,"input_types":["F32","F32","F32","F32","F32","F32","Int"],"output_types":["F32","Int"]} -------------------------------------------------------------------------------- /neurons/deployment_layer/model_43ecaacaded5ed16c9e08bc054366e409c7925245eca547472b27f2a61469cc5/vk.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inference-labs-inc/omron-subnet/e5aeb7987054f91989cf5129fc6d128b37af3d8f/neurons/deployment_layer/model_43ecaacaded5ed16c9e08bc054366e409c7925245eca547472b27f2a61469cc5/vk.key -------------------------------------------------------------------------------- /neurons/deployment_layer/model_f5b6043594f46ae6bd176ce60c7a099291cc6a3f6436fecd46142b1b1ecca5fb/input.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from pydantic import BaseModel 3 | from execution_layer.base_input import BaseInput 4 | from execution_layer.input_registry import InputRegistry 5 | from _validator.models.request_type import RequestType 6 | import random 7 | 8 | LIST_SIZE = 5 9 | 10 | 11 | class CircuitInputSchema(BaseModel): 12 | list_items: list[float] 13 | 14 | 15 | @InputRegistry.register( 16 | "f5b6043594f46ae6bd176ce60c7a099291cc6a3f6436fecd46142b1b1ecca5fb" 17 | ) 18 | class CircuitInput(BaseInput): 19 | 20 | schema = CircuitInputSchema 21 | 22 | def __init__( 23 | self, request_type: RequestType, data: dict[str, object] | None = None 24 | ): 25 | super().__init__(request_type, data) 26 | 27 | @staticmethod 28 | def generate() -> dict[str, object]: 29 | return { 30 | "list_items": [random.uniform(0.0, 0.85) for _ in range(LIST_SIZE)], 31 | } 32 | 33 | @staticmethod 34 | def validate(data: dict[str, object]) -> None: 35 | return CircuitInputSchema(**data) 36 | 37 | @staticmethod 38 | def process(data: dict[str, object]) -> dict[str, object]: 39 | """ 40 | No processing needs to take place, as all inputs are randomized. 41 | """ 42 | return data 43 | -------------------------------------------------------------------------------- /neurons/deployment_layer/model_f5b6043594f46ae6bd176ce60c7a099291cc6a3f6436fecd46142b1b1ecca5fb/metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "LSTM", 3 | "description": "Default LSTM benchmarker model.", 4 | "author": "Inference Labs", 5 | "version": "0.0.6", 6 | "proof_system": "EZKL", 7 | "type": "proof_of_computation", 8 | "external_files": { 9 | "pk.key": "https://storage.omron.ai/f5b6043594f46ae6bd176ce60c7a099291cc6a3f6436fecd46142b1b1ecca5fb/pk.key" 10 | }, 11 | "benchmark_choice_weight": 0.2 12 | } 13 | -------------------------------------------------------------------------------- /neurons/deployment_layer/model_f5b6043594f46ae6bd176ce60c7a099291cc6a3f6436fecd46142b1b1ecca5fb/model.compiled: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inference-labs-inc/omron-subnet/e5aeb7987054f91989cf5129fc6d128b37af3d8f/neurons/deployment_layer/model_f5b6043594f46ae6bd176ce60c7a099291cc6a3f6436fecd46142b1b1ecca5fb/model.compiled -------------------------------------------------------------------------------- /neurons/deployment_layer/model_f5b6043594f46ae6bd176ce60c7a099291cc6a3f6436fecd46142b1b1ecca5fb/network.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inference-labs-inc/omron-subnet/e5aeb7987054f91989cf5129fc6d128b37af3d8f/neurons/deployment_layer/model_f5b6043594f46ae6bd176ce60c7a099291cc6a3f6436fecd46142b1b1ecca5fb/network.onnx -------------------------------------------------------------------------------- /neurons/deployment_layer/model_f5b6043594f46ae6bd176ce60c7a099291cc6a3f6436fecd46142b1b1ecca5fb/settings.json: -------------------------------------------------------------------------------- 1 | {"run_args":{"input_scale":13,"param_scale":13,"scale_rebase_multiplier":1,"lookup_range":[-8582,21116],"logrows":19,"num_inner_cols":2,"variables":[["batch_size",1]],"input_visibility":"Private","output_visibility":"Public","param_visibility":"Private","rebase_frac_zero_constants":false,"check_mode":"UNSAFE","commitment":"KZG","decomp_base":16384,"decomp_legs":2,"bounded_log_lookup":false,"ignore_range_check_inputs_outputs":false,"epsilon":null},"num_rows":264011,"total_assignments":528023,"total_const_size":4,"total_dynamic_col_size":0,"max_dynamic_input_len":0,"num_dynamic_lookups":0,"num_shuffles":0,"total_shuffle_col_size":0,"model_instance_shapes":[[1,1]],"model_output_scales":[13],"model_input_scales":[13],"module_sizes":{"polycommit":[],"poseidon":[0,[0]]},"required_lookups":[{"Tanh":{"scale":8192.0}},{"Sigmoid":{"scale":8192.0}}],"required_range_checks":[[-1,1],[0,16383]],"check_mode":"UNSAFE","version":"22.0.1","num_blinding_factors":null,"timestamp":1745957986440,"input_types":["F32"],"output_types":["F32"]} -------------------------------------------------------------------------------- /neurons/deployment_layer/model_f5b6043594f46ae6bd176ce60c7a099291cc6a3f6436fecd46142b1b1ecca5fb/vk.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inference-labs-inc/omron-subnet/e5aeb7987054f91989cf5129fc6d128b37af3d8f/neurons/deployment_layer/model_f5b6043594f46ae6bd176ce60c7a099291cc6a3f6436fecd46142b1b1ecca5fb/vk.key -------------------------------------------------------------------------------- /neurons/deployment_layer/model_fa0d509d52abe2d1e809124f8aba46258a02f7253582f7b7f5a22e1e0bca0dfb/circuit.wasm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/inference-labs-inc/omron-subnet/e5aeb7987054f91989cf5129fc6d128b37af3d8f/neurons/deployment_layer/model_fa0d509d52abe2d1e809124f8aba46258a02f7253582f7b7f5a22e1e0bca0dfb/circuit.wasm -------------------------------------------------------------------------------- /neurons/deployment_layer/model_fa0d509d52abe2d1e809124f8aba46258a02f7253582f7b7f5a22e1e0bca0dfb/input.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from pydantic import BaseModel 3 | from execution_layer.base_input import BaseInput 4 | from execution_layer.input_registry import InputRegistry 5 | from _validator.models.request_type import RequestType 6 | from constants import ONE_MINUTE 7 | import random 8 | import secrets 9 | 10 | BATCH_SIZE = 256 11 | RATE_OF_DECAY = 0.4 12 | RATE_OF_RECOVERY = 0.1 13 | FLATTENING_COEFFICIENT = 0.9 14 | PROOF_SIZE_THRESHOLD = 3648 15 | PROOF_SIZE_WEIGHT = 0 16 | RESPONSE_TIME_WEIGHT = 1 17 | COMPETITION_WEIGHT = 0 18 | MAXIMUM_RESPONSE_TIME_DECIMAL = 0.99 19 | SCALING = 100000000 20 | 21 | 22 | class CircuitInputSchema(BaseModel): 23 | maximum_score: list[float] 24 | previous_score: list[float] 25 | verified: list[bool] 26 | proof_size: list[float] 27 | response_time: list[float] 28 | competition: list[float] 29 | maximum_response_time: list[float] 30 | minimum_response_time: list[float] 31 | validator_uid: list[int] 32 | block_number: list[int] 33 | miner_uid: list[int] 34 | scaling: int 35 | RATE_OF_DECAY: int 36 | RATE_OF_RECOVERY: int 37 | FLATTENING_COEFFICIENT: int 38 | COMPETITION_WEIGHT: int 39 | PROOF_SIZE_WEIGHT: int 40 | PROOF_SIZE_THRESHOLD: int 41 | RESPONSE_TIME_WEIGHT: int 42 | MAXIMUM_RESPONSE_TIME_DECIMAL: int 43 | 44 | 45 | @InputRegistry.register( 46 | "fa0d509d52abe2d1e809124f8aba46258a02f7253582f7b7f5a22e1e0bca0dfb" 47 | ) 48 | class CircuitInput(BaseInput): 49 | 50 | schema = CircuitInputSchema 51 | 52 | def __init__( 53 | self, request_type: RequestType, data: dict[str, object] | None = None 54 | ): 55 | if request_type == RequestType.RWR and data is not None: 56 | data = self._add_missing_constants(data) 57 | super().__init__(request_type, data) 58 | 59 | @staticmethod 60 | def generate() -> dict[str, object]: 61 | 62 | minimum_response_time = int(random.random() * ONE_MINUTE * SCALING) 63 | 64 | maximum_response_time = minimum_response_time + int( 65 | random.random() * ONE_MINUTE * SCALING 66 | ) 67 | 68 | response_time = ( 69 | int(random.random() * (maximum_response_time - minimum_response_time)) 70 | + minimum_response_time 71 | ) 72 | max_score = int(1 / 256 * SCALING) 73 | return { 74 | "maximum_score": [max_score for _ in range(BATCH_SIZE)], 75 | "previous_score": [ 76 | int(random.random() * max_score) for _ in range(BATCH_SIZE) 77 | ], 78 | "verified": [random.choice([True, False]) for _ in range(BATCH_SIZE)], 79 | "proof_size": [ 80 | int(random.randint(0, 5000) * SCALING) for _ in range(BATCH_SIZE) 81 | ], 82 | "validator_uid": [random.randint(0, 255) for _ in range(BATCH_SIZE)], 83 | "block_number": [ 84 | random.randint(3000000, 10000000) for _ in range(BATCH_SIZE) 85 | ], 86 | "miner_uid": [random.randint(0, 255) for _ in range(BATCH_SIZE)], 87 | "minimum_response_time": [minimum_response_time for _ in range(BATCH_SIZE)], 88 | "maximum_response_time": [maximum_response_time for _ in range(BATCH_SIZE)], 89 | "response_time": [response_time for _ in range(BATCH_SIZE)], 90 | "competition": [int(random.random() * SCALING) for _ in range(BATCH_SIZE)], 91 | "scaling": SCALING, 92 | "RATE_OF_DECAY": int(RATE_OF_DECAY * SCALING), 93 | "RATE_OF_RECOVERY": int(RATE_OF_RECOVERY * SCALING), 94 | "FLATTENING_COEFFICIENT": int(FLATTENING_COEFFICIENT * SCALING), 95 | "PROOF_SIZE_WEIGHT": int(PROOF_SIZE_WEIGHT * SCALING), 96 | "PROOF_SIZE_THRESHOLD": int(PROOF_SIZE_THRESHOLD * SCALING), 97 | "COMPETITION_WEIGHT": int(COMPETITION_WEIGHT * SCALING), 98 | "RESPONSE_TIME_WEIGHT": int(RESPONSE_TIME_WEIGHT * SCALING), 99 | "MAXIMUM_RESPONSE_TIME_DECIMAL": int( 100 | MAXIMUM_RESPONSE_TIME_DECIMAL * SCALING 101 | ), 102 | } 103 | 104 | @staticmethod 105 | def validate(data: dict[str, object]) -> None: 106 | return CircuitInputSchema(**data) 107 | 108 | def _add_missing_constants(self, data: dict[str, object]) -> dict[str, object]: 109 | for i in range(16): 110 | data["validator_uid"][BATCH_SIZE - 16 + i] = secrets.randbits(16) 111 | 112 | constants = [ 113 | "RATE_OF_DECAY", 114 | "RATE_OF_RECOVERY", 115 | "FLATTENING_COEFFICIENT", 116 | "PROOF_SIZE_WEIGHT", 117 | "PROOF_SIZE_THRESHOLD", 118 | "RESPONSE_TIME_WEIGHT", 119 | "COMPETITION_WEIGHT", 120 | "MAXIMUM_RESPONSE_TIME_DECIMAL", 121 | ] 122 | 123 | for constant in constants: 124 | if constant not in data: 125 | data[constant] = int(globals()[constant] * SCALING) 126 | 127 | if "scaling" not in data: 128 | data["scaling"] = SCALING 129 | 130 | return data 131 | 132 | @staticmethod 133 | def process(data: dict[str, object]) -> dict[str, object]: 134 | 135 | data["maximum_score"] = [ 136 | int(max_score * SCALING) for max_score in data["maximum_score"] 137 | ] 138 | data["previous_score"] = [ 139 | int(previous_score * SCALING) for previous_score in data["previous_score"] 140 | ] 141 | data["proof_size"] = [ 142 | int(proof_size * SCALING) for proof_size in data["proof_size"] 143 | ] 144 | data["minimum_response_time"] = [ 145 | int(minimum_response_time * SCALING) 146 | for minimum_response_time in data["minimum_response_time"] 147 | ] 148 | data["maximum_response_time"] = [ 149 | int(maximum_response_time * SCALING) 150 | for maximum_response_time in data["maximum_response_time"] 151 | ] 152 | data["response_time"] = [ 153 | int(response_time * SCALING) for response_time in data["response_time"] 154 | ] 155 | data["competition"] = [ 156 | int(competition * SCALING) for competition in data["competition"] 157 | ] 158 | 159 | return data 160 | -------------------------------------------------------------------------------- /neurons/deployment_layer/model_fa0d509d52abe2d1e809124f8aba46258a02f7253582f7b7f5a22e1e0bca0dfb/metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SN2 PoW 256 Circom", 3 | "description": "Generates one round of weights based on miner evaluation data from SN2.", 4 | "author": "Inference Labs", 5 | "version": "0.0.5", 6 | "proof_system": "CIRCOM", 7 | "type": "proof_of_computation", 8 | "external_files": { 9 | "circuit.zkey": "https://storage.omron.ai/fa0d509d52abe2d1e809124f8aba46258a02f7253582f7b7f5a22e1e0bca0dfb/circuit.zkey" 10 | }, 11 | "benchmark_choice_weight": 0.2 12 | } 13 | -------------------------------------------------------------------------------- /neurons/deployment_layer/model_fa0d509d52abe2d1e809124f8aba46258a02f7253582f7b7f5a22e1e0bca0dfb/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "scaling": 100000000, 3 | "public_inputs": { 4 | "order": [ 5 | "new_score", 6 | "block_number_out", 7 | "miner_uid_out", 8 | "validator_uid_out", 9 | "RATE_OF_DECAY", 10 | "RATE_OF_RECOVERY", 11 | "FLATTENING_COEFFICIENT", 12 | "PROOF_SIZE_THRESHOLD", 13 | "PROOF_SIZE_WEIGHT", 14 | "RESPONSE_TIME_WEIGHT", 15 | "MAXIMUM_RESPONSE_TIME_DECIMAL", 16 | "COMPETITION_WEIGHT", 17 | "maximum_score", 18 | "previous_score", 19 | "verified", 20 | "proof_size", 21 | "response_time", 22 | "competition", 23 | "maximum_response_time", 24 | "minimum_response_time", 25 | "block_number", 26 | "validator_uid", 27 | "miner_uid", 28 | "scaling" 29 | ], 30 | "sizes": { 31 | "new_score": 256, 32 | "block_number_out": 256, 33 | "miner_uid_out": 256, 34 | "validator_uid_out": 256, 35 | "RATE_OF_DECAY": 1, 36 | "RATE_OF_RECOVERY": 1, 37 | "FLATTENING_COEFFICIENT": 1, 38 | "PROOF_SIZE_THRESHOLD": 1, 39 | "PROOF_SIZE_WEIGHT": 1, 40 | "RESPONSE_TIME_WEIGHT": 1, 41 | "COMPETITION_WEIGHT": 1, 42 | "MAXIMUM_RESPONSE_TIME_DECIMAL": 1, 43 | "maximum_score": 256, 44 | "previous_score": 256, 45 | "verified": 256, 46 | "proof_size": 256, 47 | "response_time": 256, 48 | "competition": 256, 49 | "maximum_response_time": 256, 50 | "minimum_response_time": 256, 51 | "block_number": 256, 52 | "validator_uid": 256, 53 | "miner_uid": 256, 54 | "scaling": 1 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /neurons/dry_run.py: -------------------------------------------------------------------------------- 1 | """ 2 | Entry point for running just preflight checks: 3 | - Check CLI args (in fact completely unnecessary here) 4 | - Model files are synced up 5 | - Node.js >= 20 is installed 6 | - SnarkJS is installed 7 | - Rust and Cargo are installed 8 | - Rust nightly toolchain is installed 9 | - Jolt is installed 10 | 11 | This script is created to be called during the Docker image build process 12 | to ensure that all dependencies are installed. 13 | """ 14 | 15 | # isort: off 16 | import cli_parser # <- this need to stay before bittensor import 17 | 18 | import bittensor as bt 19 | 20 | # isort: on 21 | 22 | from utils import run_shared_preflight_checks 23 | 24 | if __name__ == "__main__": 25 | cli_parser.init_config() 26 | # Run preflight checks, and that's it 27 | run_shared_preflight_checks() 28 | 29 | bt.logging.info("Preflight checks completed. Exiting...") 30 | -------------------------------------------------------------------------------- /neurons/execution_layer/base_input.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from abc import ABC, abstractmethod 3 | from _validator.models.request_type import RequestType 4 | from pydantic import BaseModel 5 | 6 | 7 | class BaseInput(ABC): 8 | """ 9 | Base class for circuit-specific input data. Stores and provides interface 10 | for manipulating circuit input data. 11 | """ 12 | 13 | schema: type[BaseModel] 14 | 15 | def __init__( 16 | self, 17 | request_type: RequestType, 18 | data: dict[str, object] | None = None, 19 | ): 20 | self.request_type = request_type 21 | if request_type == RequestType.BENCHMARK: 22 | self.data = self.generate() 23 | else: 24 | if data is None: 25 | raise ValueError("Data must be provided for non-benchmark requests") 26 | self.validate(data) 27 | self.data = self.process(data) 28 | 29 | @staticmethod 30 | @abstractmethod 31 | def generate() -> dict[str, object]: 32 | """Generates new benchmarking input data for this circuit""" 33 | pass 34 | 35 | @staticmethod 36 | @abstractmethod 37 | def validate(self, data: dict[str, object]) -> None: 38 | """Validates input data against circuit-specific schema. Raises ValueError if invalid.""" 39 | pass 40 | 41 | @staticmethod 42 | @abstractmethod 43 | def process(self, data: dict[str, object]) -> dict[str, object]: 44 | """Processes raw input data into standardized format""" 45 | pass 46 | 47 | def to_array(self) -> list: 48 | """Converts the data to array format""" 49 | return list(self.data.values()) 50 | 51 | def to_json(self) -> dict[str, object]: 52 | """Returns the data in JSON-compatible format""" 53 | return self.data 54 | -------------------------------------------------------------------------------- /neurons/execution_layer/generic_input.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from execution_layer.base_input import BaseInput 3 | from _validator.models.request_type import RequestType 4 | from pydantic import BaseModel 5 | 6 | 7 | class GenericInput(BaseInput): 8 | 9 | schema = BaseModel 10 | 11 | def __init__( 12 | self, request_type: RequestType, data: dict[str, object] | None = None 13 | ): 14 | super().__init__(request_type, data) 15 | 16 | @staticmethod 17 | def generate() -> dict[str, object]: 18 | raise NotImplementedError("Generic input does not support generation") 19 | 20 | @staticmethod 21 | def validate(data: dict[str, object]) -> None: 22 | pass 23 | 24 | @staticmethod 25 | def process(data: dict[str, object]) -> dict[str, object]: 26 | return data 27 | -------------------------------------------------------------------------------- /neurons/execution_layer/input_registry.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from importlib import import_module 3 | from .base_input import BaseInput 4 | 5 | 6 | class InputRegistry: 7 | """Registry for circuit-specific input handlers""" 8 | 9 | _handlers: dict[str, type[BaseInput]] = {} 10 | 11 | @classmethod 12 | def register(cls, circuit_id: str): 13 | """Registers a circuit input handler class for the given circuit ID""" 14 | 15 | def decorator(handler_class: type[BaseInput]): 16 | cls._handlers[circuit_id] = handler_class 17 | return handler_class 18 | 19 | return decorator 20 | 21 | @classmethod 22 | def get_handler(cls, circuit_id: str) -> type[BaseInput]: 23 | """ 24 | Gets the registered input handler for a circuit ID. 25 | Attempts to import the handler module if not already registered. 26 | 27 | Args: 28 | circuit_id: The ID of the circuit to get the handler for 29 | 30 | Returns: 31 | The input handler class for the circuit 32 | 33 | Raises: 34 | ValueError: If no handler is found or registration fails 35 | """ 36 | if circuit_id not in cls._handlers: 37 | try: 38 | import_module(f"deployment_layer.model_{circuit_id}.input") 39 | if circuit_id not in cls._handlers: 40 | raise ValueError( 41 | f"Input handler for circuit {circuit_id} was not registered" 42 | ) 43 | except ImportError as e: 44 | raise ValueError( 45 | f"No input handler found for circuit {circuit_id}: {e}" 46 | ) 47 | 48 | return cls._handlers[circuit_id] 49 | -------------------------------------------------------------------------------- /neurons/execution_layer/proof_handlers/base_handler.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from abc import ABC, abstractmethod 3 | from typing import TYPE_CHECKING 4 | 5 | from execution_layer.generic_input import GenericInput 6 | 7 | if TYPE_CHECKING: 8 | from execution_layer.verified_model_session import VerifiedModelSession 9 | 10 | 11 | class ProofSystemHandler(ABC): 12 | """ 13 | An abstract base class for proof system handlers. 14 | """ 15 | 16 | @abstractmethod 17 | def gen_input_file(self, session: VerifiedModelSession): 18 | """ 19 | Generate an input file for the proof system. 20 | 21 | Args: 22 | session (VerifiedModelSession): The current handler session. 23 | """ 24 | 25 | @abstractmethod 26 | def gen_proof(self, session: VerifiedModelSession) -> tuple[str, str]: 27 | """ 28 | Generate a proof for the given session. 29 | 30 | Args: 31 | session (VerifiedModelSession): The current handler session. 32 | 33 | Returns: 34 | tuple[str, str]: A tuple containing the proof content (str), 35 | the public data (str). 36 | """ 37 | 38 | @abstractmethod 39 | def verify_proof( 40 | self, 41 | session: VerifiedModelSession, 42 | validator_inputs: GenericInput, 43 | proof: dict | str, 44 | ) -> bool: 45 | """ 46 | Verify a proof for the given session. 47 | 48 | Args: 49 | session (VerifiedModelSession): The current handler session. 50 | validator_inputs (GenericInput): The validator inputs to verify the proof against. 51 | proof (dict | str): The proof to verify. 52 | """ 53 | 54 | @abstractmethod 55 | def generate_witness( 56 | self, session: VerifiedModelSession, return_content: bool = False 57 | ) -> list | dict: 58 | """ 59 | Generate a witness for the given session. 60 | 61 | Args: 62 | session (VerifiedModelSession): The current handler session. 63 | return_content (bool): Whether to return the witness content. 64 | """ 65 | 66 | @abstractmethod 67 | def aggregate_proofs( 68 | self, session: VerifiedModelSession, proofs: list[str] 69 | ) -> tuple[str, float]: 70 | """ 71 | Aggregate multiple proofs into a single proof for the given session. 72 | 73 | Returns: 74 | tuple[str, float]: A tuple containing the aggregated proof content (str) 75 | and the time taken to aggregate the proofs (float). 76 | """ 77 | -------------------------------------------------------------------------------- /neurons/execution_layer/proof_handlers/ezkl_handler.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import json 3 | import os 4 | from typing import TYPE_CHECKING 5 | import subprocess 6 | import bittensor as bt 7 | import traceback 8 | import ezkl 9 | from enum import Enum 10 | 11 | from execution_layer.proof_handlers.base_handler import ProofSystemHandler 12 | from execution_layer.generic_input import GenericInput 13 | 14 | if TYPE_CHECKING: 15 | from execution_layer.verified_model_session import VerifiedModelSession 16 | 17 | LOCAL_EZKL_PATH = os.path.join(os.path.expanduser("~"), ".ezkl", "ezkl") 18 | 19 | 20 | class EZKLInputType(Enum): 21 | F16 = ezkl.PyInputType.F16 22 | F32 = ezkl.PyInputType.F32 23 | F64 = ezkl.PyInputType.F64 24 | Int = ezkl.PyInputType.Int 25 | Bool = ezkl.PyInputType.Bool 26 | TDim = ezkl.PyInputType.TDim 27 | 28 | 29 | class EZKLHandler(ProofSystemHandler): 30 | """ 31 | Handler for the EZKL proof system. 32 | This class provides methods for generating and verifying proofs using EZKL. 33 | """ 34 | 35 | def gen_input_file(self, session: VerifiedModelSession): 36 | bt.logging.trace("Generating input file") 37 | if isinstance(session.inputs.data, list): 38 | input_data = session.inputs.data 39 | else: 40 | input_data = session.inputs.to_array() 41 | data = {"input_data": input_data} 42 | os.makedirs(os.path.dirname(session.session_storage.input_path), exist_ok=True) 43 | with open(session.session_storage.input_path, "w", encoding="utf-8") as f: 44 | json.dump(data, f) 45 | bt.logging.trace(f"Generated input.json with data: {data}") 46 | 47 | def gen_proof(self, session: VerifiedModelSession) -> tuple[str, str]: 48 | try: 49 | bt.logging.debug("Starting proof generation...") 50 | 51 | self.generate_witness(session) 52 | bt.logging.trace("Generating proof") 53 | 54 | result = subprocess.run( 55 | [ 56 | LOCAL_EZKL_PATH, 57 | "prove", 58 | "--witness", 59 | session.session_storage.witness_path, 60 | "--compiled-circuit", 61 | session.model.paths.compiled_model, 62 | "--pk-path", 63 | session.model.paths.pk, 64 | "--proof-path", 65 | session.session_storage.proof_path, 66 | ], 67 | check=True, 68 | capture_output=True, 69 | text=True, 70 | ) 71 | 72 | bt.logging.trace( 73 | f"Proof generated: {session.session_storage.proof_path}, result: {result.stdout}" 74 | ) 75 | 76 | with open(session.session_storage.proof_path, "r", encoding="utf-8") as f: 77 | proof = json.load(f) 78 | 79 | return json.dumps(proof), json.dumps(proof["instances"]) 80 | 81 | except Exception as e: 82 | bt.logging.error(f"An error occurred during proof generation: {e}") 83 | traceback.print_exc() 84 | raise 85 | 86 | def verify_proof( 87 | self, 88 | session: VerifiedModelSession, 89 | validator_inputs: GenericInput, 90 | proof: str | dict, 91 | ) -> bool: 92 | if not proof: 93 | return False 94 | 95 | if isinstance(proof, str): 96 | proof_json = json.loads(proof) 97 | else: 98 | proof_json = proof 99 | 100 | input_instances = self.translate_inputs_to_instances(session, validator_inputs) 101 | 102 | proof_json["instances"] = [ 103 | input_instances[:] + proof_json["instances"][0][len(input_instances) :] 104 | ] 105 | 106 | proof_json["transcript_type"] = "EVM" 107 | 108 | with open(session.session_storage.proof_path, "w", encoding="utf-8") as f: 109 | json.dump(proof_json, f) 110 | 111 | try: 112 | result = subprocess.run( 113 | [ 114 | LOCAL_EZKL_PATH, 115 | "verify", 116 | "--settings-path", 117 | session.model.paths.settings, 118 | "--proof-path", 119 | session.session_storage.proof_path, 120 | "--vk-path", 121 | session.model.paths.vk, 122 | ], 123 | check=True, 124 | capture_output=True, 125 | text=True, 126 | timeout=60, 127 | ) 128 | return "verified: true" in result.stdout 129 | except subprocess.TimeoutExpired: 130 | bt.logging.error("Verification process timed out after 60 seconds") 131 | return False 132 | except subprocess.CalledProcessError: 133 | return False 134 | 135 | def generate_witness( 136 | self, session: VerifiedModelSession, return_content: bool = False 137 | ) -> list | dict: 138 | bt.logging.trace("Generating witness") 139 | 140 | result = subprocess.run( 141 | [ 142 | LOCAL_EZKL_PATH, 143 | "gen-witness", 144 | "--data", 145 | session.session_storage.input_path, 146 | "--compiled-circuit", 147 | session.model.paths.compiled_model, 148 | "--output", 149 | session.session_storage.witness_path, 150 | "--vk-path", 151 | session.model.paths.vk, 152 | ], 153 | check=True, 154 | capture_output=True, 155 | text=True, 156 | ) 157 | 158 | bt.logging.debug(f"Gen witness result: {result.stdout}") 159 | 160 | if return_content: 161 | with open(session.session_storage.witness_path, "r", encoding="utf-8") as f: 162 | return json.load(f) 163 | return result.stdout 164 | 165 | def translate_inputs_to_instances( 166 | self, session: VerifiedModelSession, validator_inputs: GenericInput 167 | ) -> list[int]: 168 | scale_map = session.model.settings.get("model_input_scales", []) 169 | type_map = session.model.settings.get("input_types", []) 170 | return [ 171 | ezkl.float_to_felt(x, scale_map[i], EZKLInputType[type_map[i]].value) 172 | for i, arr in enumerate(validator_inputs.to_array()) 173 | for x in arr 174 | ] 175 | 176 | def aggregate_proofs( 177 | self, session: VerifiedModelSession, proofs: list[str] 178 | ) -> tuple[str, float]: 179 | raise NotImplementedError("Proof aggregation not supported at this time.") 180 | -------------------------------------------------------------------------------- /neurons/execution_layer/proof_handlers/factory.py: -------------------------------------------------------------------------------- 1 | from execution_layer.proof_handlers.circom_handler import CircomHandler 2 | from execution_layer.circuit import ProofSystem 3 | from execution_layer.proof_handlers.jolt_handler import JoltHandler 4 | from execution_layer.proof_handlers.ezkl_handler import EZKLHandler 5 | 6 | 7 | class ProofSystemFactory: 8 | _handlers = { 9 | ProofSystem.CIRCOM: CircomHandler, 10 | ProofSystem.JOLT: JoltHandler, 11 | ProofSystem.EZKL: EZKLHandler, 12 | } 13 | 14 | @classmethod 15 | def get_handler(cls, proof_system): 16 | if isinstance(proof_system, str): 17 | try: 18 | proof_system = ProofSystem[proof_system.upper()] 19 | except KeyError as e: 20 | raise ValueError(f"Invalid proof system string: {proof_system}") from e 21 | 22 | handler_class = cls._handlers.get(proof_system) 23 | if handler_class is None: 24 | raise ValueError(f"Unsupported proof system: {proof_system}") 25 | return handler_class() 26 | -------------------------------------------------------------------------------- /neurons/execution_layer/proof_handlers/jolt_handler.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import subprocess 4 | import traceback 5 | from typing import TYPE_CHECKING 6 | import bittensor as bt 7 | from execution_layer.proof_handlers.base_handler import ProofSystemHandler 8 | from execution_layer.generic_input import GenericInput 9 | 10 | if TYPE_CHECKING: 11 | from execution_layer.verified_model_session import VerifiedModelSession 12 | 13 | # Custom home directory for Jolt 14 | JOLT_HOME = os.path.join(os.path.expanduser("~"), ".jolt_home") 15 | 16 | if not os.path.exists(JOLT_HOME): 17 | os.makedirs(JOLT_HOME) 18 | 19 | 20 | class JoltHandler(ProofSystemHandler): 21 | def gen_input_file(self, session): 22 | bt.logging.trace("Generating input file") 23 | data = session.inputs.to_json() 24 | dir_name = os.path.dirname(session.session_storage.input_path) 25 | os.makedirs(dir_name, exist_ok=True) 26 | with open(session.session_storage.input_path, "w", encoding="utf-8") as f: 27 | json.dump(data, f) 28 | bt.logging.trace(f"Generated input.json with data: {data}") 29 | 30 | def generate_witness( 31 | self, session: "VerifiedModelSession", return_content: bool = False 32 | ) -> None: 33 | raise NotImplementedError("JoltHandler does not implement generate_witness") 34 | 35 | def gen_proof(self, session): 36 | try: 37 | bt.logging.debug( 38 | f"Starting proof generation with paths: {session.session_storage.input_path}, " 39 | f"{session.model.paths.compiled_model}, {session.session_storage.proof_path}, " 40 | f"{session.session_storage.public_path}" 41 | ) 42 | proof, out = self.proof_worker( 43 | input_path=session.session_storage.input_path, 44 | circuit_path=session.model.paths.compiled_model, 45 | proof_path=session.session_storage.proof_path, 46 | public_path=session.session_storage.public_path, 47 | ) 48 | return proof, out 49 | except Exception as e: 50 | bt.logging.error(f"An error occurred during proof generation: {e}") 51 | raise 52 | 53 | def verify_proof( 54 | self, 55 | session: "VerifiedModelSession", 56 | public_data: GenericInput, 57 | proof: str, 58 | ) -> bool: 59 | try: 60 | proof_bytes = bytes.fromhex(proof) 61 | with open(session.session_storage.proof_path, "wb") as proof_file: 62 | proof_file.write(proof_bytes) 63 | 64 | with open( 65 | session.session_storage.public_path, "w", encoding="utf-8" 66 | ) as public_file: 67 | json.dump(public_data.to_json(), public_file) 68 | 69 | result = subprocess.run( 70 | [ 71 | session.model.paths.compiled_model, 72 | "verify", 73 | "--input", 74 | session.session_storage.input_path, 75 | "--proof", 76 | session.session_storage.proof_path, 77 | "--output", 78 | session.session_storage.public_path, 79 | ], 80 | check=True, 81 | capture_output=True, 82 | text=True, 83 | cwd=os.path.dirname(session.model.paths.compiled_model), 84 | ) 85 | bt.logging.trace(f"Proof verification stdout: {result.stdout}") 86 | bt.logging.trace(f"Proof verification stderr: {result.stderr}") 87 | return result.returncode == 0 88 | except subprocess.CalledProcessError as e: 89 | bt.logging.error(f"Proof verification failed: {e}") 90 | bt.logging.error(f"Proof verification stdout: {e.stdout}") 91 | bt.logging.error(f"Proof verification stderr: {e.stderr}") 92 | return False 93 | except Exception as e: 94 | bt.logging.error(f"Unexpected error during proof verification: {e}") 95 | bt.logging.error(f"Error traceback: {traceback.format_exc()}") 96 | return False 97 | 98 | @staticmethod 99 | def proof_worker( 100 | input_path, circuit_path, proof_path, public_path 101 | ) -> tuple[bytes, str]: 102 | try: 103 | result = subprocess.run( 104 | [ 105 | circuit_path, 106 | "prove", 107 | "--input", 108 | input_path, 109 | "--output", 110 | public_path, 111 | "--proof", 112 | proof_path, 113 | ], 114 | check=True, 115 | capture_output=True, 116 | text=True, 117 | cwd=os.path.dirname(circuit_path), 118 | ) 119 | bt.logging.debug(f"Proof generated: {proof_path}") 120 | bt.logging.trace(f"Proof generation stdout: {result.stdout}") 121 | bt.logging.trace(f"Proof generation stderr: {result.stderr}") 122 | with open(proof_path, "rb") as proof_file: 123 | proof = proof_file.read() 124 | with open(public_path, "r", encoding="utf-8") as public_file: 125 | public_data = public_file.read() 126 | return proof, public_data 127 | except subprocess.CalledProcessError as e: 128 | bt.logging.error(f"Error generating proof: {e}") 129 | bt.logging.error(f"Proof generation stdout: {e.stdout}") 130 | bt.logging.error(f"Proof generation stderr: {e.stderr}") 131 | raise 132 | 133 | @staticmethod 134 | def aggregate_proofs( 135 | session: "VerifiedModelSession", proofs: list[str] 136 | ) -> tuple[str, float]: 137 | raise NotImplementedError( 138 | "Aggregation of proofs is not implemented for JoltHandler" 139 | ) 140 | -------------------------------------------------------------------------------- /neurons/execution_layer/session_storage.py: -------------------------------------------------------------------------------- 1 | import os 2 | import bittensor as bt 3 | from dataclasses import dataclass, field 4 | from utils.system import get_temp_folder 5 | 6 | dir_path = os.path.dirname(os.path.realpath(__file__)) 7 | 8 | 9 | @dataclass 10 | class SessionStorage: 11 | model_id: str 12 | session_uuid: str 13 | base_path: str = field(default_factory=get_temp_folder) 14 | input_path: str = field(init=False) 15 | witness_path: str = field(init=False) 16 | proof_path: str = field(init=False) 17 | aggregated_proof_path: str = field(init=False) 18 | public_path: str = field(init=False) 19 | 20 | def __post_init__(self): 21 | if not os.path.exists(self.base_path): 22 | os.makedirs(self.base_path) 23 | self.input_path = os.path.join( 24 | self.base_path, f"input_{self.model_id}_{self.session_uuid}.json" 25 | ) 26 | self.witness_path = os.path.join( 27 | self.base_path, f"witness_{self.model_id}_{self.session_uuid}.json" 28 | ) 29 | self.proof_path = os.path.join( 30 | self.base_path, f"proof_{self.model_id}_{self.session_uuid}.json" 31 | ) 32 | self.aggregated_proof_path = os.path.join( 33 | self.base_path, f"aggregated_proof_{self.model_id}_{self.session_uuid}.json" 34 | ) 35 | self.public_path = os.path.join( 36 | self.base_path, f"proof_{self.model_id}_{self.session_uuid}.public.json" 37 | ) 38 | bt.logging.debug( 39 | f"SessionStorage initialized with model_id: {self.model_id} and session_uuid: {self.session_uuid}" 40 | ) 41 | bt.logging.trace(f"Input path: {self.input_path}") 42 | bt.logging.trace(f"Witness path: {self.witness_path}") 43 | bt.logging.trace(f"Proof path: {self.proof_path}") 44 | bt.logging.trace(f"Aggregated proof path: {self.aggregated_proof_path}") 45 | 46 | def get_proof_path_for_iteration(self, iteration: int) -> str: 47 | return os.path.join( 48 | self.base_path, 49 | f"proof_{self.model_id}_{self.session_uuid}_{iteration}.json", 50 | ) 51 | 52 | def get_session_path(self, session_id: str) -> str: 53 | session_path = os.path.join(self.base_path, session_id) 54 | if not os.path.exists(session_path): 55 | os.makedirs(session_path) 56 | return session_path 57 | -------------------------------------------------------------------------------- /neurons/execution_layer/verified_model_session.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import asyncio 3 | import multiprocessing 4 | 5 | import os 6 | import time 7 | import traceback 8 | import uuid 9 | 10 | import bittensor as bt 11 | from attr import define, field 12 | from execution_layer.circuit import Circuit 13 | from execution_layer.proof_handlers.base_handler import ProofSystemHandler 14 | from execution_layer.proof_handlers.factory import ProofSystemFactory 15 | from execution_layer.session_storage import SessionStorage 16 | from execution_layer.base_input import BaseInput 17 | from execution_layer.generic_input import GenericInput 18 | 19 | # Ensure new processes do not copy the main process 20 | multiprocessing.set_start_method("fork", force=True) 21 | 22 | 23 | @define 24 | class VerifiedModelSession: 25 | """ 26 | Represents a session for a verified model execution. 27 | 28 | This class encapsulates the necessary components and operations for running 29 | and verifying a model within a proof system. It handles input preparation, 30 | proof generation, and verification. It facilitates interaction with multiple 31 | proof systems and circuits in a consistent manner. 32 | 33 | Attributes: 34 | model (Circuit): The circuit representation of the model. 35 | session_storage (SessionStorage): Manages storage for session-related files. 36 | inputs (list[float | list[float]]): Input data for the model. 37 | session_id (str): Unique identifier for the session. 38 | proof_handler (ProofSystemHandler): Handles proof-related operations. 39 | 40 | """ 41 | 42 | model: Circuit = field() 43 | session_storage: SessionStorage = field(init=False) 44 | inputs: BaseInput | None = field(factory=None) 45 | session_id: str = field(init=False, factory=lambda: str(uuid.uuid4())) 46 | proof_handler: ProofSystemHandler = field(init=False) 47 | 48 | def __init__( 49 | self, 50 | inputs: BaseInput | None = None, 51 | model: Circuit | None = None, 52 | ): 53 | if model is None: 54 | raise ValueError("Model must be provided") 55 | self.model = model 56 | self.inputs = inputs 57 | self.session_id = str(uuid.uuid4()) 58 | self.session_storage = SessionStorage(self.model.id, self.session_id) 59 | self.proof_handler = ProofSystemFactory.get_handler( 60 | self.model.metadata.proof_system 61 | ) 62 | self.gen_input_file() 63 | 64 | def gen_input_file(self): 65 | """ 66 | Generate an input file for use in witness creation. 67 | """ 68 | self.proof_handler.gen_input_file(self) 69 | 70 | def gen_proof(self) -> tuple[str, str, float]: 71 | """ 72 | Generate a proof for a given inference. 73 | """ 74 | try: 75 | bt.logging.debug("Starting proof generation process...") 76 | start_time = time.time() 77 | 78 | with multiprocessing.Pool(1) as p: 79 | proof_content = p.apply( 80 | func=self._proof_worker, 81 | args=[self], 82 | ) 83 | 84 | proof_time = time.time() - start_time 85 | bt.logging.info(f"Proof generation took {proof_time} seconds") 86 | bt.logging.trace(f"Proof content: {proof_content}") 87 | return proof_content[0], proof_content[1], proof_time 88 | 89 | except Exception as e: 90 | bt.logging.error(f"An error occurred during proof generation: {e}") 91 | traceback.print_exc() 92 | raise 93 | 94 | def aggregate_proofs(self, proofs: list[str]) -> tuple[str, float]: 95 | """ 96 | Aggregate multiple proofs into a single proof. 97 | """ 98 | return self.proof_handler.aggregate_proofs(self, proofs) 99 | 100 | @staticmethod 101 | def _proof_worker(session: VerifiedModelSession) -> tuple[str, str]: 102 | """ 103 | Handle the proof generation process in a separate process. 104 | """ 105 | bt.logging.debug("Starting proof_worker") 106 | loop = asyncio.new_event_loop() 107 | asyncio.set_event_loop(loop) 108 | try: 109 | result = loop.run_until_complete(VerifiedModelSession._proof_task(session)) 110 | bt.logging.debug("proof_task completed successfully") 111 | return result 112 | except Exception as e: 113 | bt.logging.error(f"Error in proof_worker: {str(e)}") 114 | raise 115 | finally: 116 | loop.close() 117 | 118 | @staticmethod 119 | async def _proof_task(session: VerifiedModelSession) -> tuple[str, str]: 120 | """ 121 | Asynchronous task for generating a proof. 122 | """ 123 | return session.proof_handler.gen_proof(session) 124 | 125 | def verify_proof(self, validator_inputs: GenericInput, proof: dict | str) -> bool: 126 | """ 127 | Verify a proven inference. 128 | """ 129 | try: 130 | bt.logging.debug("Starting proof verification process...") 131 | with multiprocessing.Pool(1) as p: 132 | verification_result = p.apply( 133 | func=self._verify_worker, 134 | args=[self, validator_inputs, proof], 135 | ) 136 | return verification_result 137 | 138 | except Exception as e: 139 | bt.logging.error(f"An error occurred during proof verification: {e}") 140 | traceback.print_exc() 141 | raise 142 | 143 | @staticmethod 144 | def _verify_worker( 145 | session: VerifiedModelSession, 146 | validator_inputs: GenericInput, 147 | proof: dict | str, 148 | ) -> bool: 149 | """ 150 | Handle the proof verification process in a separate process. 151 | """ 152 | bt.logging.debug("Starting verify_worker") 153 | return session.proof_handler.verify_proof(session, validator_inputs, proof) 154 | 155 | def generate_witness(self, return_content: bool = False) -> list | dict: 156 | """ 157 | Generate a witness file for use in proof generation. 158 | This performs an inference through the circuitized model. 159 | """ 160 | return self.proof_handler.generate_witness(self, return_content) 161 | 162 | def __enter__(self): 163 | return self 164 | 165 | def end(self): 166 | self.remove_temp_files() 167 | 168 | def remove_temp_files(self): 169 | for path in ( 170 | self.session_storage.input_path, 171 | self.session_storage.witness_path, 172 | self.session_storage.proof_path, 173 | self.session_storage.public_path, 174 | ): 175 | if os.path.exists(path): 176 | os.remove(path) 177 | 178 | def __exit__(self, exc_type, exc_val, exc_tb): 179 | return None 180 | -------------------------------------------------------------------------------- /neurons/generate_test_input.py: -------------------------------------------------------------------------------- 1 | from deployment_layer.model_50818a54b31b3e0fe3306a7fb7826156fc2c42c9d64c6ba106ba135fbe7b7b19.input import ( 2 | CircuitInput, 3 | ) 4 | from _validator.models.request_type import RequestType 5 | 6 | 7 | def main(): 8 | input_instance = CircuitInput(RequestType.BENCHMARK) 9 | generated_data = input_instance.generate() 10 | print(f"Generated list items: {generated_data['list_items']}") 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /neurons/miner.py: -------------------------------------------------------------------------------- 1 | import traceback 2 | 3 | # isort: off 4 | import cli_parser # <- this need to stay before bittensor import 5 | 6 | import bittensor as bt 7 | 8 | # isort: on 9 | 10 | from _miner.miner_session import MinerSession 11 | from constants import Roles 12 | from utils import run_shared_preflight_checks 13 | 14 | if __name__ == "__main__": 15 | cli_parser.init_config(Roles.MINER) 16 | run_shared_preflight_checks(Roles.MINER) 17 | 18 | try: 19 | # Initialize the circuit store and load external models 20 | from deployment_layer.circuit_store import circuit_store 21 | 22 | circuit_store.load_circuits() 23 | 24 | bt.logging.info("Creating miner session...") 25 | miner_session = MinerSession() 26 | bt.logging.debug("Running main loop...") 27 | miner_session.run() 28 | except Exception: 29 | bt.logging.error( 30 | f"CRITICAL: Failed to run miner session\n{traceback.format_exc()}" 31 | ) 32 | -------------------------------------------------------------------------------- /neurons/protocol.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Dict, Optional 3 | 4 | import bittensor as bt 5 | 6 | from execution_layer.circuit import ProofSystem 7 | 8 | 9 | class QueryZkProof(bt.Synapse): 10 | """ 11 | QueryZkProof class inherits from bt.Synapse. 12 | It is used to query zkproof of certain model. 13 | """ 14 | 15 | # Required request input, filled by sending dendrite caller. 16 | query_input: Optional[Dict] = None 17 | 18 | # Optional request output, filled by receiving axon. 19 | query_output: Optional[str] = None 20 | 21 | def deserialize(self: QueryZkProof) -> str | None: 22 | """ 23 | unpack query_output 24 | """ 25 | return self.query_output 26 | 27 | 28 | class QueryForProvenInference(bt.Synapse): 29 | """ 30 | A Synapse for querying proven inferences. 31 | DEV: This synapse is a placeholder. 32 | """ 33 | 34 | query_input: Optional[dict] = None 35 | query_output: Optional[dict] = None 36 | 37 | def deserialize(self) -> dict | None: 38 | """ 39 | Deserialize the query_output into a dictionary. 40 | """ 41 | return self.query_output 42 | 43 | 44 | class ProofOfWeightsSynapse(bt.Synapse): 45 | """ 46 | A synapse for conveying proof of weights messages 47 | """ 48 | 49 | subnet_uid: int = 2 50 | verification_key_hash: str 51 | proof_system: ProofSystem = ProofSystem.CIRCOM 52 | inputs: dict 53 | proof: str 54 | public_signals: str 55 | 56 | def deserialize(self) -> dict | None: 57 | """ 58 | Return the proof 59 | """ 60 | return { 61 | "inputs": self.inputs, 62 | "proof": self.proof, 63 | "public_signals": self.public_signals, 64 | } 65 | 66 | 67 | class Competition(bt.Synapse): 68 | """ 69 | A synapse for conveying competition messages and circuit files 70 | """ 71 | 72 | id: int # Competition ID 73 | hash: str # Circuit hash 74 | file_name: str # Name of file being requested 75 | file_content: Optional[str] = None # Hex encoded file content 76 | commitment: Optional[str] = None # Circuit commitment data from miner 77 | error: Optional[str] = None # Error message if something goes wrong 78 | 79 | def deserialize(self) -> dict: 80 | """Return all fields including required ones""" 81 | return { 82 | "id": self.id, 83 | "hash": self.hash, 84 | "file_name": self.file_name, 85 | "file_content": self.file_content, 86 | "commitment": self.commitment, 87 | "error": self.error, 88 | } 89 | 90 | 91 | class QueryForProofAggregation(bt.Synapse): 92 | """ 93 | Query for aggregation of multiple proofs into a single proof 94 | """ 95 | 96 | proofs: list[str] = [] 97 | model_id: str or int 98 | aggregation_proof: Optional[str] = None 99 | 100 | def deserialize(self) -> str | None: 101 | """ 102 | Return the aggregation proof 103 | """ 104 | return self.aggregation_proof 105 | -------------------------------------------------------------------------------- /neurons/scripts/check_miner_axon.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | """ 3 | Usage instructions 4 | 5 | In your command line, navigate into the neurons directory 6 | cd neurons 7 | 8 | Then, run the following command to check the axon of a miner 9 | 10 | External IP and Port: Enter the target WAN IP and port of the miner 11 | Wallet and Hotkey: Enter your wallet name and hotkey name 12 | 13 | scripts/check_miner_axon.py --external_ip --port --wallet --hotkey 14 | 15 | To debug an issue with the script or see more information, include --trace in the command line arguments. 16 | """ 17 | from constants import ONE_MINUTE 18 | 19 | import argparse 20 | import os 21 | import sys 22 | import bittensor as bt 23 | import requests 24 | 25 | sys.path.append(os.path.join(os.path.dirname(__file__), "..")) 26 | 27 | # flake8: noqa 28 | from protocol import QueryZkProof 29 | 30 | # Parse external IP and port from command line arguments 31 | parser = argparse.ArgumentParser(description="Check miner axon", add_help=False) 32 | required_named = parser.add_argument_group("required named arguments") 33 | required_named.add_argument( 34 | "--external_ip", type=str, required=True, help="External IP of the miner" 35 | ) 36 | parser.add_argument( 37 | "--port", 38 | type=int, 39 | help="Port on which the miner's axon is running", 40 | default=8091, 41 | ) 42 | parser.add_argument( 43 | "--wallet", 44 | type=str, 45 | help="Wallet name", 46 | default="default", 47 | ) 48 | parser.add_argument( 49 | "--hotkey", 50 | type=str, 51 | help="Hotkey name", 52 | default="default", 53 | ) 54 | parser.add_argument( 55 | "--trace", 56 | help="Enable trace logging", 57 | action="store_true", 58 | ) 59 | 60 | args, unknown = parser.parse_known_args() 61 | 62 | 63 | if args.trace: 64 | bt.logging.set_trace(True) 65 | 66 | query_input = {"model_id": [0], "public_inputs": [1, 1, 1, 1, 1]} 67 | 68 | if __name__ == "__main__": 69 | bt.logging.info( 70 | f"Checking miner axon at {args.external_ip}:{args.port} using wallet {args.wallet} and hotkey {args.hotkey}" 71 | ) 72 | try: 73 | url = f"http://{args.external_ip}:{args.port}/QueryZkProof" 74 | bt.logging.trace(f"Attempting HTTP connection via URL: {url}") 75 | http_response = requests.get(url, timeout=30) 76 | bt.logging.trace(f"HTTP Response Body: {http_response.text}") 77 | bt.logging.success( 78 | "HTTP connection established. Your port is open and your axon is responding." 79 | ) 80 | 81 | except Exception as e: 82 | bt.logging.exception( 83 | "Failed to establish HTTP connection. This could indicate that the axon is not running or your port is not exposed." 84 | "Please check your configuration.\n", 85 | e, 86 | ) 87 | raise e 88 | 89 | wallet = bt.wallet(name=args.wallet, hotkey=args.hotkey) 90 | axon = bt.axon(wallet=wallet, external_ip=args.external_ip, external_port=args.port) 91 | bt.logging.trace(f"Attempting to query axon: {axon}") 92 | response = bt.dendrite(wallet=wallet).query( 93 | [axon], 94 | QueryZkProof(query_input=query_input), 95 | deserialize=False, 96 | timeout=ONE_MINUTE, 97 | ) 98 | bt.logging.trace(f"Dendrite query response: {response}") 99 | if response[0] is not None and not response[0].dendrite.status_message.startswith( 100 | "Failed" 101 | ): 102 | bt.logging.trace(f"Status Message: {response[0].dendrite.status_message}") 103 | bt.logging.success("Axon is running and ready to query.") 104 | else: 105 | bt.logging.error( 106 | "Failed to query axon. Check your port is exposed correctly and the axon is running." 107 | ) 108 | -------------------------------------------------------------------------------- /neurons/scripts/create_competition_circuit.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.onnx 4 | import json 5 | import subprocess 6 | import logging 7 | 8 | 9 | class DummyModel(nn.Module): 10 | def __init__(self): 11 | super(DummyModel, self).__init__() 12 | self.conv = nn.Conv2d(3, 3, 1) 13 | 14 | def forward(self, x): 15 | x = self.conv(x) 16 | # Stage 1 outputs 17 | prob_stage_1 = torch.ones(x.shape[0], 3) 18 | prob_stage_2 = torch.ones(x.shape[0], 3) 19 | prob_stage_3 = torch.ones(x.shape[0], 3) 20 | stage1_delta_k = torch.ones(x.shape[0], 1) 21 | stage2_delta_k = torch.ones(x.shape[0], 1) 22 | stage3_delta_k = torch.ones(x.shape[0], 1) 23 | index_offset_stage1 = torch.ones(x.shape[0], 3) 24 | index_offset_stage2 = torch.ones(x.shape[0], 3) 25 | index_offset_stage3 = torch.ones(x.shape[0], 3) 26 | return ( 27 | prob_stage_1, 28 | prob_stage_2, 29 | prob_stage_3, 30 | stage1_delta_k, 31 | stage2_delta_k, 32 | stage3_delta_k, 33 | index_offset_stage1, 34 | index_offset_stage2, 35 | index_offset_stage3, 36 | ) 37 | 38 | 39 | model = DummyModel() 40 | dummy_input = torch.randn(1, 3, 64, 64) 41 | 42 | # Export ONNX model 43 | torch.onnx.export( 44 | model, 45 | dummy_input, 46 | "network.onnx", 47 | input_names=["input"], 48 | output_names=[ 49 | "prob_stage_1", 50 | "prob_stage_2", 51 | "prob_stage_3", 52 | "stage1_delta_k", 53 | "stage2_delta_k", 54 | "stage3_delta_k", 55 | "index_offset_stage1", 56 | "index_offset_stage2", 57 | "index_offset_stage3", 58 | ], 59 | dynamic_axes={ 60 | "input": {0: "batch_size"}, 61 | "prob_stage_1": {0: "batch_size"}, 62 | "prob_stage_2": {0: "batch_size"}, 63 | "prob_stage_3": {0: "batch_size"}, 64 | "stage1_delta_k": {0: "batch_size"}, 65 | "stage2_delta_k": {0: "batch_size"}, 66 | "stage3_delta_k": {0: "batch_size"}, 67 | "index_offset_stage1": {0: "batch_size"}, 68 | "index_offset_stage2": {0: "batch_size"}, 69 | "index_offset_stage3": {0: "batch_size"}, 70 | }, 71 | ) 72 | input_data = { 73 | "input_data": [dummy_input.numpy().flatten().tolist()], 74 | } 75 | 76 | with open("input.json", "w") as f: 77 | json.dump(input_data, f) 78 | 79 | logging.basicConfig(level=logging.INFO) 80 | logger = logging.getLogger(__name__) 81 | 82 | commands = [ 83 | "ezkl gen-settings", 84 | "ezkl calibrate-settings --data input.json", 85 | "ezkl compile-circuit", 86 | "ezkl setup", 87 | "ezkl gen-witness --data input.json", 88 | "ezkl prove", 89 | "ezkl verify", 90 | ] 91 | 92 | for cmd in commands: 93 | logger.info(f"Running command: {cmd}") 94 | try: 95 | result = subprocess.run(cmd.split(), capture_output=True, text=True) 96 | logger.info(f"Output:\n{result.stdout}") 97 | if result.stderr: 98 | logger.error(f"Errors:\n{result.stderr}") 99 | if result.returncode != 0: 100 | logger.error(f"Command failed with return code {result.returncode}") 101 | break 102 | except Exception as e: 103 | logger.error(f"Failed to run command: {e}") 104 | break 105 | -------------------------------------------------------------------------------- /neurons/scripts/sign_api_request.py: -------------------------------------------------------------------------------- 1 | import bittensor as bt 2 | import base64 3 | import time 4 | import argparse 5 | 6 | bt.logging.on() 7 | bt.logging.set_console() 8 | 9 | 10 | def sign_timestamp(wallet_name: str, hotkey_name: str) -> tuple[str, str, str]: 11 | """ 12 | Signs the current timestamp using a bittensor wallet's hotkey. 13 | 14 | Args: 15 | wallet_name: Name of the wallet to use 16 | hotkey_name: Name of the hotkey to use 17 | 18 | Returns: 19 | tuple containing (timestamp, ss58_address, base64_signature) 20 | """ 21 | wallet = bt.wallet(name=wallet_name, hotkey=hotkey_name) 22 | timestamp = str(int(time.time())) 23 | signature = wallet.hotkey.sign(timestamp.encode()) 24 | return timestamp, wallet.hotkey.ss58_address, base64.b64encode(signature).decode() 25 | 26 | 27 | def main(): 28 | parser = argparse.ArgumentParser() 29 | parser.add_argument("--wallet", type=str, required=True, help="Name of the wallet") 30 | parser.add_argument("--hotkey", type=str, required=True, help="Name of the hotkey") 31 | args = parser.parse_args() 32 | 33 | try: 34 | timestamp, ss58_address, signature = sign_timestamp(args.wallet, args.hotkey) 35 | print("\nAPI Request Headers:") 36 | print(f"x-timestamp: {timestamp}") 37 | print(f"x-origin-ss58: {ss58_address}") 38 | print(f"x-signature: {signature}") 39 | print("\nThese headers are valid for API requests to the validator.") 40 | except Exception as e: 41 | print(f"Error: {str(e)}") 42 | 43 | 44 | if __name__ == "__main__": 45 | main() 46 | -------------------------------------------------------------------------------- /neurons/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .pre_flight import ( 2 | run_shared_preflight_checks, 3 | ensure_snarkjs_installed, 4 | sync_model_files, 5 | ) 6 | from .system import restart_app, clean_temp_files 7 | from .auto_update import AutoUpdate 8 | from . import wandb_logger 9 | from .rate_limiter import with_rate_limit 10 | 11 | __all__ = [ 12 | "run_shared_preflight_checks", 13 | "ensure_snarkjs_installed", 14 | "sync_model_files", 15 | "restart_app", 16 | "clean_temp_files", 17 | "AutoUpdate", 18 | "wandb_logger", 19 | "with_rate_limit", 20 | ] 21 | -------------------------------------------------------------------------------- /neurons/utils/epoch.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | from constants import EPOCH_TEMPO 4 | 5 | 6 | def get_current_epoch_info(current_block: int, netuid: int) -> Tuple[int, int, int]: 7 | """ 8 | Calculates epoch information for the current block based on the Subtensor epoch logic. 9 | 10 | Args: 11 | current_block (int): The current block number. 12 | netuid (int): The netuid of the subnet. 13 | 14 | Returns: 15 | Tuple[int, int, int]: A tuple containing: 16 | - current_epoch (int): The current epoch number. 17 | - blocks_until_next_epoch (int): The number of blocks until the next epoch. 18 | - epoch_start_block (int): The starting block of the current epoch. 19 | """ 20 | tempo_plus_one = EPOCH_TEMPO + 1 21 | adjusted_block = current_block + netuid + 1 22 | 23 | current_epoch = adjusted_block // tempo_plus_one 24 | remainder = adjusted_block % tempo_plus_one 25 | blocks_until_next_epoch = EPOCH_TEMPO - remainder 26 | epoch_start_block = current_block - (EPOCH_TEMPO - blocks_until_next_epoch) 27 | 28 | return current_epoch, blocks_until_next_epoch, epoch_start_block 29 | 30 | 31 | def get_epoch_start_block(epoch: int, netuid: int) -> int: 32 | """ 33 | Calculates the starting block of a given epoch. 34 | 35 | Args: 36 | epoch (int): The epoch number to get the start block for. 37 | netuid (int): The netuid of the subnet. 38 | 39 | Returns: 40 | int: The starting block of the specified epoch. 41 | """ 42 | tempo_plus_one = EPOCH_TEMPO + 1 43 | return (epoch * tempo_plus_one) - (netuid + 1) 44 | -------------------------------------------------------------------------------- /neurons/utils/gc_logging.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import json 3 | import os 4 | from typing import Optional, TYPE_CHECKING 5 | 6 | import bittensor as bt 7 | import requests 8 | import torch 9 | from requests.adapters import HTTPAdapter 10 | from urllib3.util.retry import Retry 11 | 12 | if TYPE_CHECKING: 13 | from _validator.models.miner_response import MinerResponse 14 | 15 | LOGGING_URL = os.getenv( 16 | "OMRON_LOGGING_URL", 17 | "https://api.omron.ai/statistics/log/", 18 | ) 19 | 20 | COMPETITION_LOGGING_URL = os.getenv( 21 | "COMPETITION_LOGGING_URL", 22 | "https://api.omron.ai/statistics/competition/log/", 23 | ) 24 | 25 | EVAL_LOGGING_URL = os.getenv( 26 | "EVAL_LOGGING_URL", 27 | "https://api.omron.ai/statistics/eval/log/", 28 | ) 29 | 30 | session = requests.Session() 31 | retries = Retry(total=3, backoff_factor=0.1) 32 | session.mount("https://", HTTPAdapter(max_retries=retries)) 33 | 34 | 35 | def log_responses( 36 | metagraph: bt.metagraph, 37 | hotkey: bt.Keypair, 38 | uid: int, 39 | responses: list["MinerResponse"], 40 | overhead_time: float, 41 | block: int, 42 | scores: torch.Tensor, 43 | ) -> Optional[requests.Response]: 44 | """ 45 | Log miner responses to the centralized logging server. 46 | """ 47 | 48 | data = { 49 | "validator_key": hotkey.ss58_address, 50 | "validator_uid": uid, 51 | "overhead_duration": overhead_time, 52 | "block": block, 53 | "responses": [response.to_log_dict(metagraph) for response in responses], 54 | "scores": {k: float(v.item()) for k, v in enumerate(scores) if v.item() > 0}, 55 | } 56 | 57 | input_bytes = json.dumps(data).encode("utf-8") 58 | # sign the inputs with your hotkey 59 | signature = hotkey.sign(input_bytes) 60 | # encode the inputs and signature as base64 61 | signature_str = base64.b64encode(signature).decode("utf-8") 62 | 63 | try: 64 | return session.post( 65 | LOGGING_URL, 66 | data=input_bytes, 67 | headers={ 68 | "X-Request-Signature": signature_str, 69 | "Content-Type": "application/json", 70 | }, 71 | timeout=5, 72 | ) 73 | except requests.exceptions.RequestException as e: 74 | bt.logging.error(f"Failed to log responses: {e}") 75 | return None 76 | 77 | 78 | def gc_log_competition_metrics( 79 | summary_data: dict, hotkey: bt.Keypair 80 | ) -> Optional[requests.Response]: 81 | """ 82 | Logs a pre-formatted competition summary 83 | """ 84 | try: 85 | 86 | if "validator_key" not in summary_data: 87 | bt.logging.warning( 88 | "Validator key not found in competition summary data, adding it." 89 | ) 90 | summary_data["validator_key"] = hotkey.ss58_address 91 | 92 | input_bytes = json.dumps(summary_data).encode("utf-8") 93 | signature = hotkey.sign(input_bytes) 94 | signature_str = base64.b64encode(signature).decode("utf-8") 95 | 96 | bt.logging.trace(f"Logging competition summary: {summary_data}") 97 | 98 | return session.post( 99 | COMPETITION_LOGGING_URL, 100 | data=input_bytes, 101 | headers={ 102 | "Content-Type": "application/json", 103 | "X-Request-Signature": signature_str, 104 | }, 105 | timeout=10, 106 | ) 107 | except requests.exceptions.RequestException as e: 108 | bt.logging.error(f"Failed to log competition summary: {e}") 109 | return None 110 | except Exception as e: 111 | bt.logging.error( 112 | f"Unexpected error logging competition summary: {e}", exc_info=True 113 | ) 114 | return None 115 | 116 | 117 | def gc_log_eval_metrics( 118 | model_id: str, 119 | model_name: str, 120 | netuid: int, 121 | weights_version: int, 122 | proof_system: str, 123 | circuit_type: str, 124 | proof_size: int, 125 | timeout: float, 126 | benchmark_weight: float, 127 | total_verifications: int, 128 | successful_verifications: int, 129 | min_response_time: float, 130 | max_response_time: float, 131 | avg_response_time: float, 132 | last_verification_time: int, 133 | last_block: int, 134 | verification_ratio: float, 135 | hotkey: bt.Keypair, 136 | ) -> Optional[requests.Response]: 137 | """ 138 | Log circuit evaluation metrics to the centralized logging server. 139 | """ 140 | try: 141 | data = { 142 | "validator_key": hotkey.ss58_address, 143 | "model_id": model_id, 144 | "model_name": model_name, 145 | "netuid": netuid, 146 | "weights_version": weights_version, 147 | "proof_system": proof_system, 148 | "circuit_type": circuit_type, 149 | "proof_size": proof_size, 150 | "timeout": timeout, 151 | "benchmark_weight": benchmark_weight, 152 | "total_verifications": total_verifications, 153 | "successful_verifications": successful_verifications, 154 | "min_response_time": min_response_time, 155 | "max_response_time": max_response_time, 156 | "avg_response_time": avg_response_time, 157 | "last_verification_time": last_verification_time, 158 | "last_block": last_block, 159 | "verification_ratio": verification_ratio, 160 | } 161 | 162 | input_bytes = json.dumps(data).encode("utf-8") 163 | signature = hotkey.sign(input_bytes) 164 | signature_str = base64.b64encode(signature).decode("utf-8") 165 | 166 | return session.post( 167 | EVAL_LOGGING_URL, 168 | data=input_bytes, 169 | headers={ 170 | "Content-Type": "application/json", 171 | "X-Request-Signature": signature_str, 172 | }, 173 | timeout=5, 174 | ) 175 | except requests.exceptions.RequestException as e: 176 | bt.logging.error(f"Failed to log eval metrics: {e}") 177 | return None 178 | -------------------------------------------------------------------------------- /neurons/utils/metrics_logger.py: -------------------------------------------------------------------------------- 1 | from utils import wandb_logger 2 | 3 | 4 | def log_circuit_metrics( 5 | response_times: list[float], verified_count: int, circuit_name: str 6 | ) -> None: 7 | """ 8 | Log circuit-specific metrics to wandb. 9 | 10 | Args: 11 | response_times (list[float]): List of response times for successful verifications 12 | verified_count (int): Number of verified responses 13 | circuit_name (str): Name of the circuit 14 | """ 15 | if response_times: 16 | max_response_time = max(response_times) 17 | min_response_time = min(response_times) 18 | mean_response_time = sum(response_times) / len(response_times) 19 | median_response_time = sorted(response_times)[len(response_times) // 2] 20 | wandb_logger.safe_log( 21 | { 22 | f"{circuit_name}": { 23 | "max_response_time": max_response_time, 24 | "min_response_time": min_response_time, 25 | "mean_response_time": mean_response_time, 26 | "median_response_time": median_response_time, 27 | "total_responses": len(response_times), 28 | "verified_responses": verified_count, 29 | } 30 | } 31 | ) 32 | -------------------------------------------------------------------------------- /neurons/utils/rate_limiter.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, ParamSpec, TypeVar 2 | from functools import wraps 3 | import time 4 | import asyncio 5 | 6 | P = ParamSpec("P") 7 | T = TypeVar("T") 8 | 9 | 10 | class RateLimiter: 11 | _instances = {} 12 | 13 | def __init__(self, period: float): 14 | self.period = period 15 | self.last_call = 0.0 16 | 17 | @classmethod 18 | def get_limiter(cls, func_name: str, period: float) -> "RateLimiter": 19 | if func_name not in cls._instances: 20 | cls._instances[func_name] = cls(period) 21 | return cls._instances[func_name] 22 | 23 | 24 | def with_rate_limit(period: float): 25 | def decorator(func: Callable[P, T]) -> Callable[P, T]: 26 | limiter = RateLimiter.get_limiter(func.__name__, period) 27 | last_result = None 28 | 29 | if asyncio.iscoroutinefunction(func): 30 | 31 | @wraps(func) 32 | async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> T: 33 | nonlocal last_result 34 | now = time.time() 35 | if now - limiter.last_call < period: 36 | return last_result 37 | 38 | limiter.last_call = now 39 | last_result = await func(*args, **kwargs) 40 | return last_result 41 | 42 | return async_wrapper 43 | else: 44 | 45 | @wraps(func) 46 | def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> T: 47 | nonlocal last_result 48 | now = time.time() 49 | if now - limiter.last_call < period: 50 | return last_result 51 | 52 | limiter.last_call = now 53 | last_result = func(*args, **kwargs) 54 | return last_result 55 | 56 | return sync_wrapper 57 | 58 | return decorator 59 | -------------------------------------------------------------------------------- /neurons/utils/system.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import shutil 4 | import functools 5 | import multiprocessing 6 | from bittensor import logging 7 | from constants import TEMP_FOLDER 8 | 9 | 10 | def restart_app(): 11 | """ 12 | Restart the application to apply the updated changes 13 | """ 14 | logging.success("App restarting due to auto-update") 15 | python = sys.executable 16 | # trunk-ignore(bandit/B606) 17 | os.execl(python, python, *sys.argv) 18 | 19 | 20 | def clean_temp_files(): 21 | """ 22 | Clean temporary files 23 | """ 24 | logging.info("Deleting temp folder...") 25 | folder_path = TEMP_FOLDER 26 | if os.path.exists(folder_path): 27 | logging.debug("Removing temp folder...") 28 | shutil.rmtree(folder_path) 29 | else: 30 | logging.info("Temp folder does not exist") 31 | 32 | 33 | def timeout_with_multiprocess_retry(seconds, retries=3): 34 | """Executes a function with timeout and automatic retries using multiprocessing. 35 | 36 | Args: 37 | seconds (int): Maximum execution time in seconds before timeout 38 | retries (int, optional): Number of retry attempts. Defaults to 3. 39 | 40 | Returns: 41 | Decorator that wraps function with timeout and retry logic 42 | """ 43 | 44 | def decorator(func): 45 | @functools.wraps(func) 46 | def wrapper(*args, **kwargs): 47 | for attempt in range(retries): 48 | logging.info(f"Attempt {attempt + 1} of {retries}") 49 | 50 | manager = multiprocessing.Manager() 51 | result_dict = manager.dict() 52 | process = multiprocessing.Process( 53 | target=lambda d: d.update({"result": func(*args, **kwargs)}), 54 | args=(result_dict,), 55 | ) 56 | 57 | try: 58 | process.start() 59 | process.join(seconds) 60 | 61 | if process.is_alive(): 62 | process.terminate() 63 | process.join() 64 | logging.warning( 65 | f"Function '{func.__name__}' timed out after {seconds} seconds" 66 | ) 67 | if attempt < retries - 1: 68 | continue 69 | return None 70 | 71 | result = result_dict.get("result") 72 | if result: 73 | return result 74 | 75 | if attempt < retries - 1: 76 | continue 77 | 78 | error_msg = ( 79 | "Another attempt will be made after the next request cycle." 80 | if func.__name__ == "update_weights" 81 | else f"Function returned {result}" 82 | ) 83 | logging.error(f"Failed after {retries} attempts. {error_msg}") 84 | return None 85 | 86 | finally: 87 | if process.is_alive(): 88 | process.terminate() 89 | manager.shutdown() 90 | 91 | return None 92 | 93 | return wrapper 94 | 95 | return decorator 96 | 97 | 98 | def get_temp_folder() -> str: 99 | if not os.path.exists(TEMP_FOLDER): 100 | os.makedirs(TEMP_FOLDER, exist_ok=True) 101 | return TEMP_FOLDER 102 | -------------------------------------------------------------------------------- /neurons/utils/wandb_logger.py: -------------------------------------------------------------------------------- 1 | """ 2 | Safe methods for WandB logging 3 | """ 4 | 5 | import bittensor as bt 6 | import psutil 7 | import torch 8 | import wandb 9 | import os 10 | import threading 11 | from queue import Queue 12 | from typing import Dict, Any 13 | 14 | ENTITY_NAME = "inferencelabs" 15 | PROJECT_NAME = "omron" 16 | WANDB_ENABLED = False 17 | _log_queue = Queue() 18 | _log_thread = None 19 | 20 | 21 | def _log_worker(): 22 | while True: 23 | try: 24 | data = _log_queue.get() 25 | if data is None: 26 | break 27 | wandb.log(data) 28 | except Exception as e: 29 | bt.logging.debug(f"Failed to log to WandB in worker thread: {e}") 30 | finally: 31 | _log_queue.task_done() 32 | 33 | 34 | def _ensure_log_thread(): 35 | global _log_thread 36 | if _log_thread is None or not _log_thread.is_alive(): 37 | _log_thread = threading.Thread(target=_log_worker, daemon=True) 38 | _log_thread.start() 39 | 40 | 41 | def safe_login(api_key): 42 | """ 43 | Attempts to log into WandB using a provided API key 44 | """ 45 | try: 46 | bt.logging.debug("Attempting to log into WandB using provided API Key") 47 | wandb.login(key=api_key) 48 | except Exception as e: 49 | bt.logging.error(e) 50 | bt.logging.error("Failed to login to WandB. Your run will not be logged.") 51 | 52 | 53 | def safe_init(name=None, wallet=None, metagraph=None, config=None): 54 | """ 55 | Attempts to initialize WandB, and logs if unsuccessful 56 | """ 57 | global WANDB_ENABLED 58 | if config and config.disable_wandb: 59 | bt.logging.warning("WandB logging disabled.") 60 | WANDB_ENABLED = False 61 | return 62 | try: 63 | bt.logging.debug("Attempting to initialize WandB") 64 | config_dict = {} 65 | 66 | if wallet and metagraph and config: 67 | config_dict.update( 68 | { 69 | "netuid": config.netuid, 70 | "hotkey": wallet.hotkey.ss58_address, 71 | "coldkey": wallet.coldkeypub.ss58_address, 72 | "uid": metagraph.hotkeys.index(wallet.hotkey.ss58_address), 73 | "cpu_physical": psutil.cpu_count(logical=False), 74 | "cpu_logical": psutil.cpu_count(logical=True), 75 | "cpu_freq": psutil.cpu_freq().max, 76 | "memory": psutil.virtual_memory().total, 77 | } 78 | ) 79 | 80 | # Log GPU specs if available 81 | if torch.cuda.is_available(): 82 | config_dict.update( 83 | { 84 | "gpu_name": torch.cuda.get_device_name(0), 85 | "gpu_memory": torch.cuda.get_device_properties(0).total_memory, 86 | } 87 | ) 88 | os.environ["WANDB_CONSOLE"] = "off" 89 | wandb.init( 90 | entity=ENTITY_NAME, 91 | project=PROJECT_NAME 92 | + ("-testnet" if config.subtensor.network == "test" else ""), 93 | name=name, 94 | config=config_dict, 95 | reinit=True, 96 | ) 97 | WANDB_ENABLED = True 98 | _ensure_log_thread() 99 | except Exception as e: 100 | bt.logging.error(e) 101 | bt.logging.error("Failed to initialize WandB. Your run will not be logged.") 102 | WANDB_ENABLED = False 103 | 104 | 105 | def safe_log(data: Dict[str, Any]): 106 | """ 107 | Safely log data to WandB 108 | - Ignores request to log if WandB isn't configured 109 | - Logs to WandB if it is configured 110 | """ 111 | 112 | if not WANDB_ENABLED: 113 | bt.logging.debug("Skipping log due to WandB logging disabled.") 114 | return 115 | 116 | try: 117 | bt.logging.debug("Attempting to log data to WandB") 118 | _log_queue.put(data) 119 | except Exception as e: 120 | bt.logging.debug("Failed to queue WandB log.") 121 | bt.logging.debug(e) 122 | -------------------------------------------------------------------------------- /neurons/validator.py: -------------------------------------------------------------------------------- 1 | import traceback 2 | 3 | # isort: off 4 | import cli_parser # <- this need to stay before bittensor import 5 | 6 | import bittensor as bt 7 | 8 | # isort: on 9 | 10 | from _validator.validator_session import ValidatorSession 11 | from constants import Roles 12 | from utils import run_shared_preflight_checks 13 | 14 | if __name__ == "__main__": 15 | cli_parser.init_config(Roles.VALIDATOR) 16 | run_shared_preflight_checks(Roles.VALIDATOR) 17 | 18 | try: 19 | # Initialize the circuit store and load external models 20 | from deployment_layer.circuit_store import circuit_store 21 | 22 | circuit_store.load_circuits() 23 | 24 | bt.logging.info("Creating validator session...") 25 | validator_session = ValidatorSession() 26 | bt.logging.debug("Running main loop...") 27 | validator_session.run() 28 | except Exception as e: 29 | bt.logging.error("Critical error while attempting to run validator: ", e) 30 | traceback.print_exc() 31 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools >= 61.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "omron" 7 | dynamic = ["version"] 8 | description = "" 9 | readme = "README.md" 10 | requires-python = "==3.12.*" 11 | dependencies = [ 12 | "aiohttp>=3.10.11", 13 | "async-substrate-interface>=1.0.8", 14 | "attrs==24.3.0", 15 | "bittensor==9.4.0", 16 | "boto3>=1.36.13", 17 | "botocore>=1.37.19", 18 | "ezkl==22.0.1", 19 | "fastapi==0.110.3", 20 | "gitpython>=3.1.44", 21 | "jsonrpcserver>=5.0.9", 22 | "matplotlib>=3.10.1", 23 | "numpy==2.0.2", 24 | "onnxruntime>=1.21.0", 25 | "opencv-contrib-python-headless>=4.11.0.86", 26 | "opencv-python>=4.11.0.86", 27 | "packaging==24.2", 28 | "prometheus_client==0.21.1", 29 | "psutil==6.1.1", 30 | "pydantic==2.10.6", 31 | "pyopenssl>=25.0.0", 32 | "requests==2.32.3", 33 | "rich==13.8.1", 34 | "substrate-interface>=1.7.11", 35 | "torch==2.4.1", 36 | "tqdm>=4.67.1", 37 | "urllib3>=2.3.0", 38 | "uvicorn==0.34.0", 39 | "wandb==0.19.8", 40 | "websocket-client>=1.8.0", 41 | ] 42 | 43 | [dependency-groups] 44 | dev = [ 45 | "black>=24.10.0", 46 | "bpython>=0.25", 47 | "debugpy>=1.8.13", 48 | "ipykernel>=6.29.5", 49 | "nbdev>=2.3.34", 50 | "onnx>=1.17.0", 51 | "pytest>=8.0.0", 52 | "snoop>=0.6.0", 53 | "tach>=0.27.2", 54 | ] 55 | 56 | [tool.setuptools] 57 | package-dir = { "omron" = "neurons" } 58 | packages = ["omron"] 59 | 60 | [tool.setuptools.dynamic] 61 | version = { attr = "omron.__version__" } 62 | 63 | [tool.pytest.ini_options] 64 | testpaths = ["tests"] 65 | python_files = ["test_*.py"] 66 | python_classes = ["Test*"] 67 | python_functions = ["test_*"] 68 | addopts = "-v --tb=short" 69 | pythonpath = [".", "neurons"] 70 | 71 | [tool.uv.sources] 72 | torch = { index = "pytorch" } 73 | 74 | [[tool.uv.index]] 75 | name = "pytorch" 76 | url = "https://download.pytorch.org/whl/cpu" 77 | explicit = true 78 | -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eo pipefail 4 | 5 | 6 | NODE_VERSION="20" 7 | INSTALL_PATH="./omron" 8 | if git rev-parse --is-inside-work-tree &>/dev/null; then 9 | INSTALL_PATH="." 10 | fi 11 | 12 | 13 | BREW_PACKAGES=( 14 | "node@${NODE_VERSION}" 15 | "jq" 16 | "aria2" 17 | "pkg-config" 18 | "certifi" 19 | "ca-certificates" 20 | "openssl" 21 | "pipx" 22 | ) 23 | 24 | APT_PACKAGES=( 25 | "jq" 26 | "aria2" 27 | "pkg-config" 28 | "libssl-dev" 29 | "openssl" 30 | "pipx" 31 | ) 32 | 33 | case "$(uname)" in 34 | "Darwin") 35 | if ! command -v brew &>/dev/null; then 36 | /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" 37 | eval "$(/opt/homebrew/bin/brew shellenv)" 38 | fi 39 | 40 | echo "Installing brew packages..." 41 | brew update 42 | for pkg in "${BREW_PACKAGES[@]}"; do 43 | brew install "$pkg" || brew upgrade "$pkg" 44 | done 45 | 46 | brew link --force "node@${NODE_VERSION}" 47 | 48 | npm config set cafile /etc/ssl/cert.pem 49 | ;; 50 | 51 | "Linux") 52 | echo "Installing apt packages..." 53 | sudo apt update 54 | sudo apt install -y "${APT_PACKAGES[@]}" 55 | 56 | curl -fsSL "https://deb.nodesource.com/setup_${NODE_VERSION}.x" | sudo -E bash - 57 | sudo apt install -y nodejs 58 | ;; 59 | 60 | *) 61 | echo "Unsupported OS" 62 | exit 1 63 | ;; 64 | esac 65 | 66 | echo "Checking for SnarkJS..." 67 | local_snarkjs_dir="${HOME}/.snarkjs" 68 | local_snarkjs_path="${local_snarkjs_dir}/node_modules/.bin/snarkjs" 69 | if ! command -v "${local_snarkjs_path} r1cs info --help" >/dev/null 2>&1; then 70 | echo "SnarkJS 0.7.4 not found in local directory. Installing..." 71 | mkdir -p "${local_snarkjs_dir}" 72 | npm install --prefix "${local_snarkjs_dir}" snarkjs@0.7.4 73 | echo "SnarkJS has been installed in the local directory." 74 | fi 75 | 76 | echo "Installing pm2..." 77 | sudo npm install -g pm2 78 | 79 | pipx ensurepath 80 | export PATH="$HOME/.local/bin:$PATH" 81 | 82 | echo "Installing uv..." 83 | pipx install uv 84 | 85 | echo "Installing btcli..." 86 | "$HOME/.local/bin/uv" tool install --python 3.12 bittensor-cli 87 | 88 | if [[ ! -d ${INSTALL_PATH} ]]; then 89 | echo "Cloning omron-subnet repository..." 90 | if ! git clone https://github.com/inference-labs-inc/omron-subnet.git "${INSTALL_PATH}"; then 91 | echo "Failed to clone repository. Check your internet connection and try again." 92 | exit 1 93 | fi 94 | fi 95 | 96 | cd "${INSTALL_PATH}" || { 97 | echo "Failed to change to ${INSTALL_PATH} directory" 98 | exit 1 99 | } 100 | 101 | "$HOME/.local/bin/uv" sync --frozen --no-dev 102 | 103 | echo " 104 | @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ 105 | @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ 106 | @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ 107 | @@@@@@@@@@@%#%@@@@@@@@@@@@@@@@@@ 108 | @@@@@@@@*.......*@@@@@@@@@@@@@@@ 109 | @@@@@@@+.........+@@@@@@@@@@@@@@ 110 | @@@@@@@:.....-#%%+..:=%@@@@@@@@@ 111 | @@@@@@@*...:#@@@@=.....=@@@@@@@@ 112 | @@@@@@@@#-.#@@@#-.......+@@@@@@@ 113 | @@@@@@@@@@*.-:..........:@@@@@@@ 114 | @@@@@@@@@@%.............-@@@@@@@ 115 | @@@@@@@@@@@*...........:%@@@@@@@ 116 | @@@@@@@@@@@@@-........*@@@@@@@@@ 117 | @@@@@@@@@@@@@@@%###%%@@@@@@@@@@@ 118 | @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ 119 | @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ 120 | @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ 121 | ██████╗ ███╗ ███╗██████╗ ██████╗ ███╗ ██╗ 122 | ██╔═══██╗████╗ ████║██╔══██╗██╔═══██╗████╗ ██║ 123 | ██║ ██║██╔████╔██║██████╔╝██║ ██║██╔██╗ ██║ 124 | ██║ ██║██║╚██╔╝██║██╔══██╗██║ ██║██║╚██╗██║ 125 | ╚██████╔╝██║ ╚═╝ ██║██║ ██║╚██████╔╝██║ ╚████║ 126 | ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ 127 | ███████╗██╗ ██╗██████╗ ███╗ ██╗███████╗████████╗ 128 | ██╔════╝██║ ██║██╔══██╗████╗ ██║██╔════╝╚══██╔══╝ 129 | ███████╗██║ ██║██████╔╝██╔██╗ ██║█████╗ ██║ 130 | ╚════██║██║ ██║██╔══██╗██║╚██╗██║██╔══╝ ██║ 131 | ███████║╚██████╔╝██████╔╝██║ ╚████║███████╗ ██║ 132 | ╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝╚══════╝ ╚═╝ 133 | " 134 | echo "🥩 Setup complete! 🥩" 135 | echo "Next steps:" 136 | echo "1. Re-login for PATH changes to take effect, or run 'source ~/.bashrc' or 'source ~/.zshrc'" 137 | echo "2. Check ${INSTALL_PATH}/docs/shared_setup_steps.md to setup your wallet and register on the subnet" 138 | echo "3. cd ${INSTALL_PATH}" 139 | echo "4. make WALLET_NAME= WALLET_HOTKEY=" 140 | -------------------------------------------------------------------------------- /sync_model_files.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Search for models in the deployment layer 4 | MODEL_DIR="neurons/deployment_layer" 5 | for MODEL_FOLDER in $(find "$MODEL_DIR" -maxdepth 1 -type d -name 'model_*'); do 6 | # See if the model has metadata attached 7 | METADATA_FILE="${MODEL_FOLDER}/metadata.json" 8 | 9 | if [ ! -f "$METADATA_FILE" ]; then 10 | echo "Error: Metadata file not found at $METADATA_FILE" 11 | continue 12 | fi 13 | # If the model has a metadata file, check external_files to determine which files are needed 14 | external_files=$(jq -r '.external_files | to_entries[] | "\(.key) \(.value)"' "$METADATA_FILE") 15 | 16 | if [ $? -ne 0 ]; then 17 | echo "Error: Failed to parse JSON from $METADATA_FILE" 18 | continue 19 | fi 20 | 21 | while IFS=' ' read -r key url; do 22 | # If the external file already exists then do nothing 23 | if [ -f "${MODEL_FOLDER}/${key}" ]; then 24 | echo "File ${key} already downloaded at ${MODEL_FOLDER}/${key}, skipping..." 25 | continue 26 | fi 27 | # If the file doesn't exist we'll pull from the URL specified 28 | echo "Downloading ${url} to ${MODEL_FOLDER}/${key}..." 29 | aria2c ${url} -d "${MODEL_FOLDER}" -o "${key}" 30 | # If the file doesn't download then we'll skip this file and echo the error 31 | if [ $? -ne 0 ]; then 32 | echo "Error: Failed to download ${url} to ${MODEL_FOLDER}/${key}" 33 | continue 34 | fi 35 | done <<< "$external_files" 36 | done 37 | --------------------------------------------------------------------------------