├── .coveragerc ├── .dockerignore ├── .github ├── PULL_REQUEST_TEMPLATE.md ├── dependabot.yml ├── openssl-ci.cnf └── workflows │ ├── build.yml │ ├── code-quality.yml │ ├── nix-ci.yml │ ├── pytest.yml │ └── test-keygen.yml ├── .gitignore ├── .readthedocs.yml ├── AUTHORS.rst ├── CHANGELOG.rst ├── LICENSE.txt ├── README.md ├── alembic.ini ├── code-of-conduct.md ├── deployment ├── __init__.py ├── docker-build │ ├── README.md │ ├── build.sh │ ├── config.yml │ ├── dev │ │ ├── Dockerfile │ │ ├── config.yml │ │ └── docker-compose.yml │ ├── docker-compose.yml │ ├── openssl.cnf.patch │ ├── publish-alpha.sh │ ├── publish.sh │ ├── pyaleph.dockerfile │ └── test │ │ ├── Dockerfile │ │ ├── config.yml │ │ └── docker-compose.yml ├── migrations │ ├── README │ ├── env.py │ ├── script.py.mako │ └── versions │ │ ├── 0001_dbcc4e3ff1e0_initial_migration_to_postgres.py │ │ ├── 0002_7365b4898472_programs.py │ │ ├── 0003_89d74331994c_balances.py │ │ ├── 0004_82e0f9c7a952_error_codes.py │ │ ├── 0005_68fd4bed8a8e_program_versions.py │ │ ├── 0006_7b547d707a2f_exponential_retries.py │ │ ├── 0007_0bfde82697c8_balance_views.py │ │ ├── 0008_eef5f95853bf_trusted_messages.py │ │ ├── 0009_8edf69c47884_file_pin_owner_index.py │ │ ├── 0010_8a5eaab15d40_address_stats_view.py │ │ ├── 0011_b8e019bf7710_split_sync_and_message_chain_sync_.py │ │ ├── 0012_7a7704f044db_indexer_sync_status.py │ │ ├── 0013_7ab62bd0a3b1_messages_post_index.py │ │ ├── 0014_daa92b500049_message_content_in_file_pins.py │ │ ├── 0015_039c56d3b33e_file_size_not_null.py │ │ ├── 0016_77e68941d36c_fix_ipfs_files_size.py │ │ ├── 0017_f9fa39b6bdef_vm_instances.py │ │ ├── 0018_7bcb8e5fe186_fix_vm_cost_view.py │ │ ├── 0019_3bf484f2cc95_tx_hash_in_rejected_messages.py │ │ ├── 0020_88a25ae08ebf_grace_period_file_pins.py │ │ ├── 0021_08602db6c78f_ccn_metric_view.py │ │ ├── 0022_e682fc8f9506_fix_vm_costs_view.py │ │ ├── 0023_add_trusted_execution_fields_to_vms_.py │ │ ├── 0024__update_price_instance.py │ │ ├── 0025_update_vms_tables.py │ │ ├── 0026_d3bba5c2bfa0_fix_join_exclude_vm_without_volume.py │ │ ├── 0027_bafd49315934_add_pending_messages_origin.py │ │ ├── 0028_edb195b0ed62_fix_add_unique_constraint_on_pending_.py │ │ ├── 0029_46f7e55ff55c_fix_uniqueconstraint_should_have_sender_.py │ │ ├── 0030_9e600f404aa1_add_new_error_code_on_db.py │ │ ├── 0031_d8e9852e5775_fix_compute_message_status_on_vm_cost_.py │ │ ├── 0032_a3ef27f0db81_fix_duplicated_forgotten_messages.py │ │ ├── 0033_1c06d0ade60c_calculate_costs_statically.py │ │ └── 0034_8ece21fbeb47_balance_tracker.py ├── samples │ ├── README.md │ ├── docker-compose │ │ ├── README.md │ │ ├── docker-compose.yml │ │ └── sample-config.yml │ └── docker-monitoring │ │ ├── README.md │ │ ├── docker-compose.yml │ │ ├── grafana │ │ ├── dashboard.yaml │ │ ├── dashboards │ │ │ ├── aleph │ │ │ │ └── dashboard.json │ │ │ └── node │ │ │ │ └── dashboard.json │ │ └── prometheus.yaml │ │ ├── prometheus.yml │ │ └── sample-config.yml └── scripts │ ├── extract_requirements.py │ ├── get_config_value.py │ ├── run_aleph_ccn.sh │ ├── run_aleph_ccn_api.sh │ ├── sync_initial_messages.py │ ├── wait-for-it.sh │ └── wait_for_services.sh ├── docs ├── Makefile ├── _static │ └── .gitignore ├── architecture.rst ├── authors.rst ├── changelog.rst ├── conf.py ├── figures │ ├── PyAleph architecture.graphml │ ├── architecture-stack.pdf │ └── architecture-stack.png ├── guides │ ├── index.rst │ ├── install.rst │ ├── private_net.rst │ └── upgrade.rst ├── index.rst ├── license.rst ├── metrics.rst ├── node-synchronisation.rst ├── protocol │ ├── authorizations.rst │ ├── index.rst │ ├── messages │ │ ├── aggregate.rst │ │ ├── forget.rst │ │ ├── index.rst │ │ ├── post.rst │ │ ├── program.rst │ │ └── store.rst │ └── payment.rst └── requirements.txt ├── mypy.ini ├── pyproject.toml ├── shell.nix ├── src └── aleph │ ├── __init__.py │ ├── api_entrypoint.py │ ├── cache.py │ ├── chains │ ├── __init__.py │ ├── abc.py │ ├── assets │ │ ├── ethereum_sc.sol │ │ └── ethereum_sc_abi.json │ ├── avalanche.py │ ├── bsc.py │ ├── chain_data_service.py │ ├── common.py │ ├── connector.py │ ├── cosmos.py │ ├── ethereum.py │ ├── evm.py │ ├── indexer_reader.py │ ├── nuls.py │ ├── nuls2.py │ ├── nuls_aleph_sdk.py │ ├── signature_verifier.py │ ├── solana.py │ ├── substrate.py │ └── tezos.py │ ├── cli │ ├── __init__.py │ └── args.py │ ├── commands.py │ ├── config.py │ ├── db │ ├── __init__.py │ ├── accessors │ │ ├── __init__.py │ │ ├── aggregates.py │ │ ├── balances.py │ │ ├── chains.py │ │ ├── cost.py │ │ ├── cron_jobs.py │ │ ├── files.py │ │ ├── messages.py │ │ ├── metrics.py │ │ ├── peers.py │ │ ├── pending_messages.py │ │ ├── pending_txs.py │ │ ├── posts.py │ │ └── vms.py │ ├── connection.py │ └── models │ │ ├── __init__.py │ │ ├── account_costs.py │ │ ├── aggregates.py │ │ ├── balances.py │ │ ├── base.py │ │ ├── chains.py │ │ ├── cron_jobs.py │ │ ├── files.py │ │ ├── messages.py │ │ ├── peers.py │ │ ├── pending_messages.py │ │ ├── pending_txs.py │ │ ├── posts.py │ │ └── vms.py │ ├── exceptions.py │ ├── handlers │ ├── __init__.py │ ├── content │ │ ├── __init__.py │ │ ├── aggregate.py │ │ ├── content_handler.py │ │ ├── forget.py │ │ ├── post.py │ │ ├── store.py │ │ └── vm.py │ └── message_handler.py │ ├── jobs │ ├── __init__.py │ ├── cron │ │ ├── balance_job.py │ │ └── cron_job.py │ ├── fetch_pending_messages.py │ ├── job_utils.py │ ├── process_pending_messages.py │ ├── process_pending_txs.py │ └── reconnect_ipfs.py │ ├── network.py │ ├── permissions.py │ ├── schemas │ ├── __init__.py │ ├── api │ │ ├── __init__.py │ │ ├── accounts.py │ │ ├── costs.py │ │ └── messages.py │ ├── base_messages.py │ ├── chains │ │ ├── __init__.py │ │ ├── indexer_response.py │ │ ├── sync_events.py │ │ ├── tezos_indexer_response.py │ │ └── tx_context.py │ ├── cost_estimation_messages.py │ ├── message_confirmation.py │ ├── message_content.py │ └── pending_messages.py │ ├── services │ ├── __init__.py │ ├── cache │ │ ├── __init__.py │ │ ├── materialized_views.py │ │ └── node_cache.py │ ├── cost.py │ ├── ipfs │ │ ├── __init__.py │ │ ├── common.py │ │ ├── pubsub.py │ │ └── service.py │ ├── keys.py │ ├── p2p │ │ ├── __init__.py │ │ ├── http.py │ │ ├── jobs.py │ │ ├── manager.py │ │ ├── peers.py │ │ ├── protocol.py │ │ └── pubsub.py │ ├── peers │ │ ├── __init__.py │ │ ├── monitor.py │ │ └── publish.py │ ├── storage │ │ ├── __init__.py │ │ ├── engine.py │ │ ├── fileystem_engine.py │ │ └── garbage_collector.py │ └── utils.py │ ├── settings.py │ ├── storage.py │ ├── toolkit │ ├── __init__.py │ ├── aggregates.py │ ├── batch.py │ ├── constants.py │ ├── costs.py │ ├── exceptions.py │ ├── json.py │ ├── libp2p_stubs │ │ ├── README.md │ │ ├── __init__.py │ │ ├── crypto │ │ │ ├── __init__.py │ │ │ ├── keys.py │ │ │ ├── pb │ │ │ │ ├── __init__.py │ │ │ │ ├── crypto.proto │ │ │ │ ├── crypto_pb2.py │ │ │ │ └── crypto_pb2.pyi │ │ │ └── rsa.py │ │ └── peer │ │ │ ├── __init__.py │ │ │ ├── id.py │ │ │ └── peerinfo.py │ ├── logging.py │ ├── monitoring.py │ ├── rabbitmq.py │ ├── range.py │ ├── shield.py │ ├── split.py │ ├── timer.py │ └── timestamp.py │ ├── types │ ├── __init__.py │ ├── chain_sync.py │ ├── channel.py │ ├── cost.py │ ├── db_session.py │ ├── files.py │ ├── message_processing_result.py │ ├── message_status.py │ ├── protocol.py │ ├── settings.py │ ├── sort_order.py │ └── vms.py │ ├── utils.py │ └── web │ ├── __init__.py │ ├── controllers │ ├── __init__.py │ ├── accounts.py │ ├── aggregates.py │ ├── app_state_getters.py │ ├── channels.py │ ├── info.py │ ├── ipfs.py │ ├── main.py │ ├── messages.py │ ├── metrics.py │ ├── p2p.py │ ├── posts.py │ ├── prices.py │ ├── programs.py │ ├── routes.py │ ├── storage.py │ ├── utils.py │ └── version.py │ ├── static │ ├── IBM_Plex_Mono │ │ ├── IBMPlexMono-Bold.ttf │ │ └── IBMPlexMono-Light.ttf │ ├── __init__.py │ ├── aleph-cloud-v1.svg │ ├── aleph-cloud-v2.svg │ └── aleph-logo.svg │ └── templates │ ├── __init__.py │ └── index.html └── tests ├── api ├── __init__.py ├── conftest.py ├── fixtures │ ├── fixture_aggregates.json │ ├── fixture_messages.json │ ├── fixture_posts.json │ └── test-metric.json ├── test_aggregates.py ├── test_balance.py ├── test_get_message.py ├── test_list_messages.py ├── test_new_metric.py ├── test_p2p.py ├── test_posts.py ├── test_storage.py ├── test_version.py └── utils │ └── __init__.py ├── balances └── test_balances.py ├── chains ├── test_aleph_indexer.py ├── test_avalanche.py ├── test_chain_data_service.py ├── test_common.py ├── test_confirmation.py ├── test_cosmos.py ├── test_ethereum.py ├── test_evm.py ├── test_nuls2.py ├── test_solana.py ├── test_substrate.py └── test_tezos.py ├── conftest.py ├── db ├── test_accounts.py ├── test_aggregates.py ├── test_chains.py ├── test_cost.py ├── test_error_codes.py ├── test_files.py ├── test_messages.py ├── test_peers.py ├── test_pending_messages_db.py ├── test_pending_txs.py ├── test_posts.py └── test_programs_db.py ├── helpers ├── in_memory_storage_engine.py └── message_test_helpers.py ├── jobs ├── test_balance_job.py ├── test_check_removing_messages.py └── test_cron_job.py ├── message_processing ├── __init__.py ├── conftest.py ├── fixtures │ ├── test-data-aggregates.json │ ├── test-data-forgotten-messages.json │ ├── test-data-pending-messaging.json │ ├── test-data-pending-tx-messages.json │ └── test-data-posts.json ├── load_fixtures.py ├── test_process_aggregates.py ├── test_process_confidential.py ├── test_process_forgets.py ├── test_process_forgotten_messages.py ├── test_process_instances.py ├── test_process_pending_messages.py ├── test_process_pending_txs.py ├── test_process_posts.py ├── test_process_programs.py └── test_process_stores.py ├── permissions └── test_check_sender_authorization.py ├── schemas └── test_pending_messages.py ├── services ├── test_cost_service.py ├── test_garbage_collector.py ├── test_ipfs_service.py ├── test_node_cache.py └── test_utils.py ├── storage ├── test_get_content.py └── test_store_message.py ├── test_network.py ├── toolkit ├── test_batch.py ├── test_ignore_exceptions.py ├── test_json.py ├── test_range.py ├── test_timer.py └── test_timestamp.py └── web └── controllers ├── fixtures └── messages │ └── program.json ├── test_metrics.py ├── test_programs.py └── test_pub_json.py /.coveragerc: -------------------------------------------------------------------------------- 1 | # .coveragerc to control coverage.py 2 | [run] 3 | branch = True 4 | source = aleph 5 | # omit = bad_file.py 6 | 7 | [paths] 8 | source = 9 | src/ 10 | */site-packages/ 11 | 12 | [report] 13 | # Regexes for lines to exclude from consideration 14 | exclude_lines = 15 | # Have to re-enable the standard pragma 16 | pragma: no cover 17 | 18 | # Don't complain about missing debug-only code: 19 | def __repr__ 20 | if self\.debug 21 | 22 | # Don't complain if tests don't hit defensive assertion code: 23 | raise AssertionError 24 | raise NotImplementedError 25 | 26 | # Don't complain if non-runnable code isn't run: 27 | if 0: 28 | if __name__ == .__main__.: 29 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Temporary and binary files 2 | *~ 3 | *.py[cod] 4 | *.so 5 | *.cfg 6 | !.isort.cfg 7 | !setup.cfg 8 | *.orig 9 | *.log 10 | *.pot 11 | __pycache__/* 12 | .cache/* 13 | .*.swp 14 | */.ipynb_checkpoints/* 15 | 16 | 17 | # data folder 18 | data* 19 | 20 | # Project files 21 | .ropeproject 22 | .project 23 | .pydevproject 24 | .settings 25 | .idea 26 | tags 27 | 28 | # Package files 29 | *.egg 30 | *.eggs/ 31 | .installed.cfg 32 | *.egg-info 33 | 34 | # Unittest and coverage 35 | htmlcov/* 36 | .coverage 37 | .tox 38 | junit.xml 39 | coverage.xml 40 | .pytest_cache/ 41 | 42 | # Build and docs folder/files 43 | build/* 44 | dist/* 45 | sdist/* 46 | docs/api/* 47 | docs/_rst/* 48 | docs/_build/* 49 | cover/* 50 | MANIFEST 51 | .github/ 52 | 53 | # Per-project virtualenvs 54 | .venv*/ 55 | venv*/ 56 | 57 | # User configuration with secrets 58 | /config.yml 59 | node-secret.key 60 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | Explain what problem this PR is resolving 2 | 3 | Related Clickup or Jira tickets : ALEPH-XXX 4 | 5 | ## Self proofreading checklist 6 | 7 | - [ ] Is my code clear enough and well documented 8 | - [ ] Are my files well typed 9 | - [ ] New translations have been added or updated if new strings have been introduced in the frontend 10 | - [ ] Database migrations file are included 11 | - [ ] Are there enough tests 12 | - [ ] Documentation has been included (for new feature) 13 | 14 | ## Changes 15 | 16 | Explain the changes that were made. The idea is not to list exhaustively all the changes made (GitHub already provides a full diff), but to help the reviewers better understand: 17 | - which specific file changes go together, e.g: when creating a table in the front-end, there usually is a config file that goes with it 18 | - the reasoning behind some changes, e.g: deleted files because they are now redundant 19 | - the behaviour to expect, e.g: tooltip has purple background color because the client likes it so, changed a key in the API response to be consistent with other endpoints 20 | 21 | ## How to test 22 | 23 | Explain how to test your PR. 24 | If a specific config is required explain it here (account, data entry, ...) 25 | 26 | ## Print screen / video 27 | 28 | Upload here print screens or videos showing the changes if relevant. 29 | 30 | ## Notes 31 | 32 | Things that the reviewers should know: known bugs that are out of the scope of the PR, other trade-offs that were made. 33 | If the PR depends on a PR in another repo, or merges into another PR (i.o. main), it should also be mentioned here 34 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | 4 | 5 | updates: 6 | - package-ecosystem: "pip" 7 | directory: "/" 8 | open-pull-requests-limit: 10 9 | schedule: 10 | interval: "daily" 11 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Build 3 | 4 | 5 | on: 6 | push: 7 | branches: 8 | - dev 9 | - main 10 | pull_request: 11 | branches: 12 | - "*" 13 | 14 | 15 | jobs: 16 | build: 17 | runs-on: ubuntu-24.04 18 | steps: 19 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 20 | - uses: actions/checkout@v2 21 | 22 | - name: Log in to registry 23 | run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} 24 | --password-stdin 25 | 26 | - name: Download Docker cache image (if available) 27 | run: docker pull ghcr.io/$GITHUB_REPOSITORY/build-cache || true 28 | 29 | - name: Build the Docker image 30 | run: | 31 | git fetch --prune --unshallow --tags 32 | docker build . -t pyaleph-node:${GITHUB_REF##*/} -f deployment/docker-build/pyaleph.dockerfile --cache-from=ghcr.io/$GITHUB_REPOSITORY/build-cache 33 | 34 | - name: Push the image to the cache 35 | # It's not possible to push packages from fork PRs. 36 | if: (github.ref == 'refs/heads/main' || github.event.pull_request.head.repo.full_name 37 | == github.repository) && github.actor != 'dependabot[bot]' 38 | run: |- 39 | docker tag pyaleph-node:${GITHUB_REF##*/} ghcr.io/$GITHUB_REPOSITORY/build-cache 40 | docker push ghcr.io/$GITHUB_REPOSITORY/build-cache 41 | -------------------------------------------------------------------------------- /.github/workflows/code-quality.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Test code quality 3 | 4 | 5 | on: 6 | push: 7 | branches: 8 | - "*" 9 | 10 | 11 | jobs: 12 | code-quality: 13 | runs-on: ubuntu-24.04 14 | 15 | steps: 16 | - uses: actions/checkout@v4 17 | 18 | - name: Install pip and hatch 19 | run: | 20 | sudo apt-get install -y python3-pip 21 | pip3 install hatch hatch-vcs 22 | 23 | - name: Cache dependencies 24 | uses: actions/cache@v4 25 | with: 26 | path: ~/.cache/pip 27 | key: ${{ runner.os }}-code-quality-${{ hashFiles('pyproject.toml') }} 28 | restore-keys: | 29 | ${{ runner.os }}-code-quality-${{ hashFiles('pyproject.toml') }} 30 | 31 | - name: Run Hatch lint 32 | run: hatch run linting:all 33 | -------------------------------------------------------------------------------- /.github/workflows/nix-ci.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Test nix-shell 3 | 4 | 5 | on: 6 | push: 7 | branches: 8 | - '*' 9 | 10 | 11 | jobs: 12 | nix-shell: 13 | continue-on-error: true 14 | strategy: 15 | matrix: 16 | os: [ubuntu-latest] 17 | # os: [ubuntu-latest, macos-latest] 18 | fail-fast: false 19 | runs-on: ${{ matrix.os }} 20 | 21 | steps: 22 | - uses: actions/checkout@v4 23 | with: 24 | # Fetch the whole history for all tags and branches (required for aleph.__version__) 25 | fetch-depth: 0 26 | 27 | - name: Set up Nix 28 | uses: cachix/install-nix-action@v27 29 | with: 30 | # Use channel nixos-23.11 for Linux and nixpkgs-23.11-darwin for macOS 31 | nix_path: nixpkgs=channel:${{ matrix.os == 'macos-latest' && 'nixpkgs-24.05-darwin' || 'nixos-24.05' }} 32 | 33 | - name: Disable incompatible dependency of `nuls2` 34 | run: | 35 | sed -i.bak '/py-ed25519-bindings/ s/^[[:space:]]*/# /' pyproject.toml 36 | rm pyproject.toml.bak 37 | 38 | - name: Run tests 39 | run: nix-shell --run "hatch run testing:test" 40 | -------------------------------------------------------------------------------- /.github/workflows/pytest.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Test/Coverage with Python 3 | 4 | 5 | on: 6 | push: 7 | branches: 8 | - dev 9 | - main 10 | pull_request: 11 | branches: 12 | - "*" 13 | 14 | 15 | jobs: 16 | tests: 17 | runs-on: ubuntu-24.04 18 | services: 19 | postgres: 20 | image: postgres:15.1 21 | ports: 22 | - 5432:5432 23 | env: 24 | POSTGRES_USER: aleph 25 | POSTGRES_PASSWORD: decentralize-everything 26 | POSTGRES_DATABASE: aleph 27 | redis: 28 | image: redis:7.0.10 29 | ports: 30 | - 127.0.0.1:6379:6379 31 | 32 | steps: 33 | - uses: actions/checkout@v4 34 | with: 35 | # Fetch the whole history for all tags and branches (required for aleph.__version__) 36 | fetch-depth: 0 37 | 38 | - name: Set up Python 3.12 39 | id: setup-python 40 | uses: actions/setup-python@v2 41 | with: 42 | python-version: 3.12 43 | 44 | - name: Install latest Rust nightly toolchain 45 | uses: actions-rs/toolchain@v1 46 | with: 47 | toolchain: nightly 48 | override: true 49 | 50 | - name: Set rust to nightly 51 | run: | 52 | rustup default nightly # Required to build some dependencies 53 | 54 | - name: Cache dependencies 55 | uses: actions/cache@v4 56 | with: 57 | path: ~/.cache/pip 58 | key: ${{ runner.os }}-pytest-${{ hashFiles('pyproject.toml') }} 59 | restore-keys: | 60 | ${{ runner.os }}-pytest-${{ hashFiles('pyproject.toml') }} 61 | - name: Install needed dependencies 62 | run: | 63 | sudo apt-get update 64 | sudo apt-get install -y libpq-dev libsodium-dev libgmp-dev 65 | - run: | 66 | pip install hatch coverage 67 | 68 | - run: | 69 | sudo cp .github/openssl-ci.cnf /etc/ssl/openssl.cnf 70 | export OPENSSL_CONF=/etc/ssl/openssl.cnf 71 | touch config.yml # Fake config file for alembic 72 | # TODO: determine why ResourceWarning warnings occur in some tests. 73 | 74 | - run: | 75 | hatch run testing:cov 76 | 77 | - uses: codecov/codecov-action@v4.0.1 78 | with: 79 | token: ${{ secrets.CODECOV_TOKEN }} 80 | slug: aleph-im/aleph-sdk-python 81 | -------------------------------------------------------------------------------- /.github/workflows/test-keygen.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Test keys generation instruction from the README by booting the docker image 3 | 4 | 5 | on: 6 | push: 7 | branches: 8 | - main 9 | pull_request: 10 | branches: 11 | - "*" 12 | 13 | 14 | jobs: 15 | generate-keys: 16 | runs-on: ubuntu-24.04 17 | steps: 18 | - uses: actions/checkout@v4 19 | 20 | - name: Log in to registry 21 | run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} 22 | --password-stdin 23 | 24 | - name: Download Docker cache image (if available) 25 | run: docker pull ghcr.io/$GITHUB_REPOSITORY/build-cache || true 26 | 27 | - name: Build the Docker image 28 | run: | 29 | git fetch --prune --unshallow --tags 30 | docker build . -t alephim/pyaleph-node:${GITHUB_REF##*/} -f deployment/docker-build/pyaleph.dockerfile --cache-from=ghcr.io/$GITHUB_REPOSITORY/build-cache 31 | 32 | - name: Tag the image 33 | run: | 34 | docker tag alephim/pyaleph-node:${GITHUB_REF##*/} ghcr.io/$GITHUB_REPOSITORY/build-cache 35 | 36 | - name: Generate keys 37 | run: | 38 | mkdir keys 39 | docker run --rm --user root --entrypoint "" -v $(pwd)/keys:/opt/pyaleph/keys alephim/pyaleph-node:${GITHUB_REF##*/} chown aleph:aleph /opt/pyaleph/keys 40 | docker run --rm --entrypoint "" -v $(pwd)/keys:/opt/pyaleph/keys alephim/pyaleph-node:${GITHUB_REF##*/} pyaleph --gen-keys --key-dir /opt/pyaleph/keys 41 | 42 | - name: Ensure keys exists 43 | run: |- 44 | ls keys/node-pub.key 45 | ls keys/node-secret.pkcs8.der 46 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Temporary and binary files 2 | *~ 3 | *.py[cod] 4 | *.so 5 | *.cfg 6 | !.isort.cfg 7 | !setup.cfg 8 | *.orig 9 | *.log 10 | *.pot 11 | __pycache__/* 12 | .cache/* 13 | .*.swp 14 | */.ipynb_checkpoints/* 15 | .mypy_cache/* 16 | logfile 17 | 18 | # data folder 19 | data* 20 | 21 | # Project files 22 | .ropeproject 23 | .project 24 | .pydevproject 25 | .settings 26 | .idea 27 | tags 28 | 29 | # Package files 30 | *.egg 31 | *.eggs/ 32 | .installed.cfg 33 | *.egg-info 34 | 35 | # Unittest and coverage 36 | htmlcov/* 37 | .coverage 38 | .tox 39 | junit.xml 40 | coverage.xml 41 | .pytest_cache/ 42 | 43 | # Build and docs folder/files 44 | build/* 45 | dist/* 46 | sdist/* 47 | docs/api/* 48 | docs/_rst/* 49 | docs/_build/* 50 | cover/* 51 | MANIFEST 52 | 53 | # Per-project virtualenvs 54 | venv*/ 55 | 56 | # Secret files 57 | /config.yml 58 | node-secret.key 59 | keys/ 60 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # .readthedocs.yml 3 | # Read the Docs configuration file 4 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 5 | 6 | # Required 7 | version: 2 8 | 9 | # Build documentation in the docs/ directory with Sphinx 10 | sphinx: 11 | configuration: docs/conf.py 12 | 13 | # Build documentation with MkDocs 14 | # mkdocs: 15 | # configuration: mkdocs.yml 16 | 17 | # Optionally build your docs in additional formats such as PDF and ePub 18 | formats: all 19 | 20 | 21 | build: 22 | os: ubuntu-22.04 23 | tools: 24 | python: "3.12" 25 | 26 | # Optionally set the version of Python and requirements required to build your docs 27 | python: 28 | install: 29 | - requirements: docs/requirements.txt 30 | -------------------------------------------------------------------------------- /AUTHORS.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Contributors 3 | ============ 4 | 5 | * Moshe Malawach 6 | * Hugo Herter 7 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2019 Moshe Malawach 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /deployment/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/deployment/__init__.py -------------------------------------------------------------------------------- /deployment/docker-build/README.md: -------------------------------------------------------------------------------- 1 | # Aleph Core Channel Node (CCN) Docker (Beta) 2 | 3 | This directory contains the `Dockerfile` to build and run the CCN in production, 4 | as well as a Docker Compose file for use during development. 5 | 6 | ## Build the Docker image 7 | 8 | You can build the Docker image simply using: 9 | ```shell script 10 | ./deployment/docker-build/build.sh 11 | ``` 12 | 13 | or by running the Docker build command from the root of the repository: 14 | ```shell script 15 | docker build -t alephim/pyaleph-node:0.5.7 -f deployment/docker-build/pyaleph.dockerfile . 16 | ``` 17 | 18 | ## Configure the CCN 19 | 20 | We provide a template configuration in the file `deployment/docker-build/config.yml`, 21 | which you will want to customize for your system. 22 | 23 | Change the Ethereum API URL to the endpoint you want the CCN to use. 24 | 25 | To run the local dev environment, you will need to set the P2P daemon and IPFS hosts to `127.0.0.1`. 26 | 27 | ### Generate your node's private key 28 | 29 | Please refer to the installation documentation for that: 30 | https://pyaleph.readthedocs.io/en/latest/guides/install.html#node-secret-keys 31 | 32 | ## Start the dev environment 33 | 34 | Please refer to the installtation documentation for that: 35 | https://pyaleph.readthedocs.io/en/latest/guides/install.html#run-the-node-with-docker-compose 36 | -------------------------------------------------------------------------------- /deployment/docker-build/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Use this script to build the Docker image of the Core Channel Node 4 | 5 | set -euo pipefail 6 | 7 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 8 | 9 | cd "$SCRIPT_DIR/../.." 10 | 11 | # Use Podman if installed, else use Docker 12 | if hash podman 2> /dev/null 13 | then 14 | DOCKER_COMMAND=podman 15 | else 16 | DOCKER_COMMAND=docker 17 | fi 18 | 19 | $DOCKER_COMMAND build -t pyaleph-node:0.5.7 -f "$SCRIPT_DIR/pyaleph.dockerfile" . 20 | -------------------------------------------------------------------------------- /deployment/docker-build/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nuls2: 3 | chain_id: 1 4 | enabled: false 5 | packing_node: false 6 | sync_address: NULSd6HgUkssMi6oSjwEn3puNSijLKnyiRV7H 7 | api_url: https://apiserver.nuls.io/ 8 | explorer_url: https://nuls.world/ 9 | token_contract: NULSd6Hh1FjbnAktH1FFvFnTzfgFnZtgAhYut 10 | 11 | 12 | ethereum: 13 | enabled: false 14 | 15 | # api_url: {{ ALEPH_ETHEREUM_URL }} 16 | chain_id: 1 17 | packing_node: false 18 | sync_contract: "0x166fd4299364B21c7567e163d85D78d2fb2f8Ad5" 19 | start_height: 11474360 20 | token_contract: "0x27702a26126e0B3702af63Ee09aC4d1A084EF628" 21 | token_start_height: 10939074 22 | 23 | 24 | binancechain: 25 | enabled: false 26 | packing_node: false 27 | 28 | 29 | mongodb: 30 | uri: "mongodb://127.0.0.1:27017" 31 | database: aleph 32 | 33 | 34 | storage: 35 | store_files: true 36 | engine: mongodb 37 | 38 | 39 | ipfs: 40 | enabled: true 41 | host: 127.0.0.1 42 | port: 5001 43 | gateway_port: 8080 44 | 45 | 46 | aleph: 47 | queue_topic: ALEPH-TEST 48 | 49 | 50 | p2p: 51 | host: 0.0.0.0 52 | http_port: 4024 53 | port: 4025 54 | control_port: 4030 55 | reconnect_delay: 60 56 | 57 | 58 | sentry: 59 | dsn: 60 | -------------------------------------------------------------------------------- /deployment/docker-build/dev/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:24.04 as base 2 | 3 | ENV DEBIAN_FRONTEND noninteractive 4 | 5 | RUN apt-get update && apt-get -y upgrade && apt-get install -y software-properties-common 6 | RUN add-apt-repository -y ppa:deadsnakes/ppa 7 | 8 | # Runtime + build packages 9 | RUN apt-get update && apt-get -y upgrade && apt-get install -y \ 10 | git \ 11 | libgmp-dev \ 12 | libpq5 \ 13 | python3.12 14 | 15 | FROM base as builder 16 | 17 | RUN openssl version 18 | RUN cat /etc/ssl/openssl.cnf 19 | RUN echo "$OPENSSL_CONF" 20 | 21 | # Build-only packages 22 | RUN apt-get update && apt-get install -y \ 23 | build-essential \ 24 | curl \ 25 | pkg-config \ 26 | python3.12-dev \ 27 | python3.12-venv \ 28 | libpq-dev \ 29 | software-properties-common 30 | 31 | # Install Rust to build Python packages 32 | RUN curl https://sh.rustup.rs > rustup-installer.sh 33 | RUN sh rustup-installer.sh -y 34 | ENV PATH="/root/.cargo/bin:${PATH}" 35 | 36 | # Some packages (py-ed25519-bindings, required by substrate-interface) need the nightly 37 | # Rust toolchain to be built at this time 38 | RUN rustup default nightly 39 | 40 | # Create virtualenv 41 | RUN python3.12 -m venv /opt/venv 42 | 43 | # Install pip 44 | ENV PIP_NO_CACHE_DIR yes 45 | RUN /opt/venv/bin/python3.12 -m pip install --upgrade pip wheel 46 | ENV PATH="/opt/venv/bin:${PATH}" 47 | 48 | WORKDIR /opt/pyaleph 49 | COPY alembic.ini pyproject.toml ./ 50 | COPY LICENSE.txt README.md ./ 51 | COPY deployment/migrations ./deployment/migrations 52 | COPY deployment/scripts ./deployment/scripts 53 | COPY .git ./.git 54 | COPY src ./src 55 | 56 | RUN pip install -e . 57 | RUN pip install hatch 58 | 59 | FROM base 60 | 61 | COPY --from=builder /opt/venv /opt/venv 62 | COPY --from=builder /opt/pyaleph /opt/pyaleph 63 | 64 | RUN apt-get update && apt-get install -y \ 65 | libsodium23 \ 66 | libsodium-dev \ 67 | libgmp-dev 68 | 69 | # OpenSSL 3 disabled some hash algorithms by default. They must be reenabled 70 | # by enabling the "legacy" providers in /etc/ssl/openssl.cnf. 71 | COPY ./deployment/docker-build/openssl.cnf.patch /etc/ssl/openssl.cnf.patch 72 | RUN patch /etc/ssl/openssl.cnf /etc/ssl/openssl.cnf.patch 73 | 74 | RUN mkdir /var/lib/pyaleph 75 | 76 | ENV PATH="/opt/venv/bin:${PATH}" 77 | WORKDIR /opt/pyaleph 78 | 79 | RUN hatch build 80 | ENTRYPOINT ["bash", "deployment/scripts/run_aleph_ccn.sh"] 81 | -------------------------------------------------------------------------------- /deployment/docker-build/dev/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nuls2: 3 | chain_id: 1 4 | enabled: false 5 | packing_node: false 6 | sync_address: NULSd6HgUkssMi6oSjwEn3puNSijLKnyiRV7H 7 | api_url: https://apiserver.nuls.io/ 8 | explorer_url: https://nuls.world/ 9 | token_contract: NULSd6Hh1FjbnAktH1FFvFnTzfgFnZtgAhYut 10 | 11 | 12 | ethereum: 13 | enabled: true 14 | # api_url: {{ ALEPH_ETHEREUM_URL }} 15 | chain_id: 1 16 | packing_node: false 17 | sync_contract: "0x166fd4299364B21c7567e163d85D78d2fb2f8Ad5" 18 | start_height: 21614811 19 | token_contract: "0x27702a26126e0B3702af63Ee09aC4d1A084EF628" 20 | token_start_height: 21614792 21 | 22 | 23 | postgres: 24 | host: postgres 25 | port: 5432 26 | user: aleph 27 | password: decentralize-everything 28 | name: aleph-test 29 | 30 | 31 | storage: 32 | store_files: true 33 | engine: filesystem 34 | folder: /var/lib/pyaleph 35 | 36 | 37 | ipfs: 38 | enabled: true 39 | host: ipfs 40 | port: 5001 41 | gateway_port: 8080 42 | 43 | 44 | aleph: 45 | queue_topic: ALEPH-TEST 46 | 47 | 48 | p2p: 49 | daemon_host: p2p-service 50 | http_port: 4024 51 | port: 4025 52 | control_port: 4030 53 | listen_port: 4031 54 | reconnect_delay: 60 55 | 56 | 57 | rabbitmq: 58 | host: rabbitmq 59 | port: 5672 60 | username: aleph-p2p 61 | password: decentralize-everything 62 | 63 | 64 | sentry: 65 | dsn: "" 66 | -------------------------------------------------------------------------------- /deployment/docker-build/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Starts all the services used by pyaleph, minus pyaleph itself. This is used for local development. 3 | # Use the docker-compose/docker-compose.yml file for deployment. 4 | 5 | version: '2.2' 6 | 7 | 8 | volumes: 9 | pyaleph-ipfs: 10 | pyaleph-postgres: 11 | 12 | 13 | services: 14 | 15 | rabbitmq: 16 | restart: always 17 | image: rabbitmq:3.11.15-management 18 | networks: 19 | - pyaleph 20 | environment: 21 | RABBITMQ_DEFAULT_USER: aleph-p2p 22 | RABBITMQ_DEFAULT_PASS: change-me! 23 | ports: 24 | - "127.0.0.1:5672:5672" 25 | - "127.0.0.1:15672:15672" 26 | 27 | p2p-service: 28 | restart: always 29 | image: alephim/p2p-service:0.1.3 30 | networks: 31 | - pyaleph 32 | volumes: 33 | - ../../config.yml:/etc/p2p-service/config.yml 34 | - ../../keys/node-secret.pkcs8.der:/etc/p2p-service/node-secret.pkcs8.der 35 | depends_on: 36 | - rabbitmq 37 | environment: 38 | RUST_LOG: info 39 | ports: 40 | - "4025:4025" 41 | - "127.0.0.1:4030:4030" 42 | command: 43 | - "--config" 44 | - "/etc/p2p-service/config.yml" 45 | - "--private-key-file" 46 | - "/etc/p2p-service/node-secret.pkcs8.der" 47 | 48 | ipfs: 49 | restart: always 50 | image: ipfs/kubo:v0.34.1 51 | ports: 52 | - "4001:4001" 53 | - "4001:4001/udp" 54 | - "127.0.0.1:5001:5001" 55 | - "127.0.0.1:8080:8080" 56 | volumes: 57 | - "pyaleph-ipfs:/data/ipfs" 58 | environment: 59 | - IPFS_PROFILE=server 60 | networks: 61 | - pyaleph 62 | command: ["daemon", "--enable-pubsub-experiment", "--enable-gc", "--migrate"] 63 | 64 | postgres: 65 | restart: always 66 | image: postgres:15.1 67 | ports: 68 | - "127.0.0.1:5432:5432" 69 | volumes: 70 | - pyaleph-postgres:/var/lib/postgresql/data 71 | environment: 72 | POSTGRES_USER: aleph 73 | POSTGRES_PASSWORD: decentralize-everything 74 | POSTGRES_DB: aleph 75 | networks: 76 | - pyaleph 77 | shm_size: "2gb" 78 | 79 | redis: 80 | restart: always 81 | image: redis:7.0.10 82 | ports: 83 | - "127.0.0.1:6380:6379" 84 | networks: 85 | - pyaleph 86 | 87 | 88 | networks: 89 | pyaleph: 90 | -------------------------------------------------------------------------------- /deployment/docker-build/openssl.cnf.patch: -------------------------------------------------------------------------------- 1 | 59a60 2 | > legacy = legacy_sect 3 | 73c74,78 4 | < # activate = 1 5 | --- 6 | > activate = 1 7 | > 8 | > [legacy_sect] 9 | > activate = 1 10 | > 11 | -------------------------------------------------------------------------------- /deployment/docker-build/publish-alpha.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Use this script to publish the current Docker image of the CCN on Docker Hub 4 | 5 | set -euo pipefail 6 | 7 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 8 | 9 | cd "$SCRIPT_DIR/../.." 10 | 11 | # Use Podman if installed, else use Docker 12 | if hash podman 2> /dev/null 13 | then 14 | DOCKER_COMMAND=podman 15 | else 16 | DOCKER_COMMAND=docker 17 | fi 18 | 19 | VERSION=$(git describe --tags)-alpha 20 | 21 | $DOCKER_COMMAND tag alephim/pyaleph-node alephim/pyaleph-node:$VERSION 22 | $DOCKER_COMMAND push alephim/pyaleph-node:$VERSION docker.io/alephim/pyaleph-node:$VERSION 23 | echo docker.io/alephim/pyaleph-node:$VERSION 24 | -------------------------------------------------------------------------------- /deployment/docker-build/publish.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Use this script to publish the current Docker image of the CCN on Docker Hub 4 | 5 | set -euo pipefail 6 | 7 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 8 | 9 | cd "$SCRIPT_DIR/../.." 10 | 11 | # Use Podman if installed, else use Docker 12 | if hash podman 2> /dev/null 13 | then 14 | DOCKER_COMMAND=podman 15 | else 16 | DOCKER_COMMAND=docker 17 | fi 18 | 19 | VERSION=$(git describe --tags) 20 | 21 | $DOCKER_COMMAND tag alephim/pyaleph-node alephim/pyaleph-node:$VERSION 22 | #$DOCKER_COMMAND push alephim/pyaleph-node:$VERSION docker.io/alephim/pyaleph-node:$VERSION 23 | $DOCKER_COMMAND push alephim/pyaleph-node:$VERSION 24 | echo docker.io/alephim/pyaleph-node:$VERSION 25 | -------------------------------------------------------------------------------- /deployment/docker-build/test/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:24.04 2 | 3 | ENV DEBIAN_FRONTEND noninteractive 4 | 5 | RUN apt-get update && apt-get -y upgrade && apt-get install -y software-properties-common 6 | RUN add-apt-repository -y ppa:deadsnakes/ppa 7 | 8 | # Runtime + build packages 9 | RUN apt-get update && apt-get -y upgrade && apt-get install -y \ 10 | git \ 11 | libgmp-dev \ 12 | libpq5 \ 13 | python3.12 14 | 15 | RUN openssl version 16 | RUN cat /etc/ssl/openssl.cnf 17 | RUN echo "$OPENSSL_CONF" 18 | 19 | # Build-only packages 20 | RUN apt-get update && apt-get install -y \ 21 | build-essential \ 22 | curl \ 23 | pkg-config \ 24 | python3.12-dev \ 25 | python3.12-venv \ 26 | libpq-dev \ 27 | software-properties-common 28 | 29 | # Install Rust to build Python packages 30 | RUN curl https://sh.rustup.rs > rustup-installer.sh 31 | RUN sh rustup-installer.sh -y 32 | ENV PATH="/root/.cargo/bin:${PATH}" 33 | 34 | # Some packages (py-ed25519-bindings, required by substrate-interface) need the nightly 35 | # Rust toolchain to be built at this time 36 | RUN rustup default nightly 37 | 38 | # Create virtualenv 39 | RUN python3.12 -m venv /opt/venv 40 | 41 | # Install pip 42 | ENV PIP_NO_CACHE_DIR yes 43 | RUN /opt/venv/bin/python3.12 -m pip install --upgrade pip wheel 44 | ENV PATH="/opt/venv/bin:${PATH}" 45 | 46 | WORKDIR /opt/pyaleph 47 | COPY alembic.ini pyproject.toml ./ 48 | COPY LICENSE.txt README.md ./ 49 | COPY deployment/migrations ./deployment/migrations 50 | COPY deployment/scripts ./deployment/scripts 51 | COPY .git ./.git 52 | COPY src ./src 53 | 54 | # Install project deps and test deps 55 | RUN pip install -e .[testing,docs] 56 | RUN pip install hatch 57 | 58 | # Install project test deps 59 | RUN apt-get update && apt-get install -y \ 60 | libsodium23 \ 61 | libsodium-dev \ 62 | libgmp-dev \ 63 | postgresql \ 64 | redis \ 65 | curl 66 | 67 | # OpenSSL 3 disabled some hash algorithms by default. They must be reenabled 68 | # by enabling the "legacy" providers in /etc/ssl/openssl.cnf. 69 | COPY ./deployment/docker-build/openssl.cnf.patch /etc/ssl/openssl.cnf.patch 70 | RUN patch /etc/ssl/openssl.cnf /etc/ssl/openssl.cnf.patch 71 | 72 | RUN mkdir /var/lib/pyaleph 73 | ENV PATH="/opt/venv/bin:${PATH}" 74 | WORKDIR /opt/pyaleph 75 | 76 | RUN hatch build 77 | CMD ["hatch", "run", "testing:test"] 78 | 79 | -------------------------------------------------------------------------------- /deployment/docker-build/test/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | postgres: 3 | host: postgres 4 | port: 5432 5 | user: aleph 6 | password: decentralize-everything 7 | database: aleph 8 | 9 | 10 | redis: 11 | host: redis 12 | port: 6379 13 | -------------------------------------------------------------------------------- /deployment/docker-build/test/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | volumes: 4 | pyaleph-ipfs: 5 | pyaleph-local-storage: 6 | pyaleph-postgres: 7 | 8 | 9 | services: 10 | pyaleph: 11 | image: localhost/alephim/pyaleph-node-test:build 12 | build: 13 | dockerfile: ./deployment/docker-build/test/Dockerfile 14 | context: ../../.. 15 | volumes: 16 | - pyaleph-local-storage:/var/lib/pyaleph 17 | - ./config.yml:/opt/pyaleph/config.yml 18 | - ../../..:/opt/pyaleph 19 | depends_on: 20 | - postgres 21 | - redis 22 | networks: 23 | - pyaleph 24 | logging: 25 | options: 26 | max-size: 50m 27 | 28 | postgres: 29 | image: postgres:15.1 30 | volumes: 31 | - pyaleph-postgres:/var/lib/postgresql/data 32 | environment: 33 | POSTGRES_USER: aleph 34 | POSTGRES_PASSWORD: decentralize-everything 35 | POSTGRES_DB: aleph 36 | networks: 37 | - pyaleph 38 | shm_size: "2gb" 39 | 40 | redis: 41 | restart: always 42 | image: redis:7.0.10 43 | networks: 44 | - pyaleph 45 | 46 | 47 | networks: 48 | pyaleph: 49 | -------------------------------------------------------------------------------- /deployment/migrations/README: -------------------------------------------------------------------------------- 1 | Generic single-database configuration. -------------------------------------------------------------------------------- /deployment/migrations/script.py.mako: -------------------------------------------------------------------------------- 1 | """${message} 2 | 3 | Revision ID: ${up_revision} 4 | Revises: ${down_revision | comma,n} 5 | Create Date: ${create_date} 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | ${imports if imports else ""} 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = ${repr(up_revision)} 14 | down_revision = ${repr(down_revision)} 15 | branch_labels = ${repr(branch_labels)} 16 | depends_on = ${repr(depends_on)} 17 | 18 | 19 | def upgrade() -> None: 20 | ${upgrades if upgrades else "pass"} 21 | 22 | 23 | def downgrade() -> None: 24 | ${downgrades if downgrades else "pass"} 25 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0003_89d74331994c_balances.py: -------------------------------------------------------------------------------- 1 | """balances 2 | 3 | Revision ID: 89d74331994c 4 | Revises: 7365b4898472 5 | Create Date: 2022-12-29 01:55:43.413214 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = "89d74331994c" 14 | down_revision = "7365b4898472" 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade() -> None: 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.create_table( 22 | "balances", 23 | sa.Column("id", sa.BigInteger(), nullable=False), 24 | sa.Column("address", sa.String(), nullable=False), 25 | sa.Column("chain", sa.String(), nullable=False), 26 | sa.Column("dapp", sa.String(), nullable=True), 27 | sa.Column("balance", sa.DECIMAL(), nullable=False), 28 | sa.Column("eth_height", sa.Integer(), nullable=False), 29 | sa.PrimaryKeyConstraint("id"), 30 | ) 31 | 32 | op.create_index(op.f("ix_balances_address"), "balances", ["address"], unique=False) 33 | op.execute( 34 | """ 35 | ALTER TABLE balances ADD CONSTRAINT balances_address_chain_dapp_uindex 36 | UNIQUE NULLS NOT DISTINCT (address, chain, dapp) 37 | """ 38 | ) 39 | 40 | # ### end Alembic commands ### 41 | 42 | 43 | def downgrade() -> None: 44 | # ### commands auto generated by Alembic - please adjust! ### 45 | op.drop_index("ix_balances_address", "balances") 46 | op.drop_constraint("balances_address_chain_dapp_uindex", "balances") 47 | op.drop_table("balances") 48 | # ### end Alembic commands ### 49 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0006_7b547d707a2f_exponential_retries.py: -------------------------------------------------------------------------------- 1 | """exponential retries 2 | 3 | Revision ID: 7b547d707a2f 4 | Revises: 68fd4bed8a8e 5 | Create Date: 2023-01-20 15:55:33.581234 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = "7b547d707a2f" 14 | down_revision = "68fd4bed8a8e" 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade() -> None: 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column( 22 | "pending_messages", 23 | sa.Column("next_attempt", sa.TIMESTAMP(timezone=True), nullable=False), 24 | ) 25 | op.drop_index("ix_retries_time", table_name="pending_messages") 26 | op.create_index( 27 | "ix_next_attempt", 28 | "pending_messages", 29 | [sa.text("next_attempt ASC")], 30 | unique=False, 31 | ) 32 | # ### end Alembic commands ### 33 | 34 | 35 | def downgrade() -> None: 36 | # ### commands auto generated by Alembic - please adjust! ### 37 | op.drop_index("ix_next_attempt", table_name="pending_messages") 38 | op.create_index( 39 | "ix_retries_time", "pending_messages", ["retries", "time"], unique=False 40 | ) 41 | op.drop_column("pending_messages", "next_attempt") 42 | # ### end Alembic commands ### 43 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0008_eef5f95853bf_trusted_messages.py: -------------------------------------------------------------------------------- 1 | """trusted messages 2 | 3 | Revision ID: eef5f95853bf 4 | Revises: 0bfde82697c8 5 | Create Date: 2023-01-23 22:10:41.623055 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = "eef5f95853bf" 13 | down_revision = "0bfde82697c8" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade() -> None: 19 | # ### commands auto generated by Alembic - please adjust! ### 20 | # 21 | op.alter_column( 22 | "forgotten_messages", "signature", existing_type=sa.VARCHAR(), nullable=True 23 | ) 24 | op.alter_column("messages", "signature", existing_type=sa.VARCHAR(), nullable=True) 25 | op.alter_column( 26 | "pending_messages", "signature", existing_type=sa.VARCHAR(), nullable=True 27 | ) 28 | op.execute( 29 | """ 30 | alter table pending_messages add constraint signature_not_null_if_check_message 31 | CHECK(signature is not null or not check_message) 32 | """ 33 | ) 34 | # ### end Alembic commands ### 35 | 36 | 37 | def downgrade() -> None: 38 | # ### commands auto generated by Alembic - please adjust! ### 39 | op.execute( 40 | "alter table pending_messages drop constraint signature_not_null_if_check_message" 41 | ) 42 | op.alter_column( 43 | "pending_messages", "signature", existing_type=sa.VARCHAR(), nullable=False 44 | ) 45 | op.alter_column("messages", "signature", existing_type=sa.VARCHAR(), nullable=False) 46 | op.alter_column( 47 | "forgotten_messages", "signature", existing_type=sa.VARCHAR(), nullable=False 48 | ) 49 | # ### end Alembic commands ### 50 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0009_8edf69c47884_file_pin_owner_index.py: -------------------------------------------------------------------------------- 1 | """file pin owner index 2 | 3 | Revision ID: 8edf69c47884 4 | Revises: eef5f95853bf 5 | Create Date: 2023-01-25 00:55:29.953070 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = "8edf69c47884" 13 | down_revision = "eef5f95853bf" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade() -> None: 19 | # ### commands auto generated by Alembic - please adjust! ### 20 | op.create_index( 21 | "ix_file_pins_owner", 22 | "file_pins", 23 | ["owner"], 24 | unique=False, 25 | postgresql_where=sa.text("owner IS NOT NULL"), 26 | postgresql_using="HASH", 27 | ) 28 | # ### end Alembic commands ### 29 | 30 | 31 | def downgrade() -> None: 32 | # ### commands auto generated by Alembic - please adjust! ### 33 | op.drop_index( 34 | "ix_file_pins_owner", 35 | table_name="file_pins", 36 | postgresql_where=sa.text("owner IS NOT NULL"), 37 | postgresql_using="HASH", 38 | ) 39 | # ### end Alembic commands ### 40 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0010_8a5eaab15d40_address_stats_view.py: -------------------------------------------------------------------------------- 1 | """address stats view 2 | 3 | Revision ID: 8a5eaab15d40 4 | Revises: 8edf69c47884 5 | Create Date: 2023-03-06 17:27:14.514803 6 | 7 | """ 8 | from alembic import op 9 | 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = '8a5eaab15d40' 13 | down_revision = '8edf69c47884' 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade() -> None: 19 | op.execute( 20 | """ 21 | create materialized view address_stats_mat_view as 22 | select sender as address, type, count(*) as nb_messages 23 | from messages 24 | group by sender, type 25 | """) 26 | op.execute("create unique index ix_address_type on address_stats_mat_view(address, type)") 27 | 28 | 29 | def downgrade() -> None: 30 | op.execute("drop materialized view address_stats_mat_view") 31 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0011_b8e019bf7710_split_sync_and_message_chain_sync_.py: -------------------------------------------------------------------------------- 1 | """split sync and message chain sync statuses 2 | 3 | Revision ID: b8e019bf7710 4 | Revises: 8a5eaab15d40 5 | Create Date: 2023-03-08 14:48:26.581627 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = "b8e019bf7710" 14 | down_revision = "8a5eaab15d40" 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade() -> None: 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | 22 | # Add the new type column 23 | op.add_column("chains_sync_status", sa.Column("type", sa.String(), nullable=True)) 24 | # We only support message events on Tezos, every other chain connector fetches sync events 25 | op.execute("update chains_sync_status set type = 'sync' where chain != 'TEZOS'") 26 | op.execute("update chains_sync_status set type = 'message' where chain = 'TEZOS'") 27 | op.alter_column("chains_sync_status", "type", nullable=False) 28 | 29 | # Recreate the primary key 30 | op.drop_constraint("chains_sync_status_pkey", "chains_sync_status", type_="primary") 31 | op.create_primary_key( 32 | "chains_sync_status_pkey", "chains_sync_status", ["chain", "type"] 33 | ) 34 | # ### end Alembic commands ### 35 | 36 | 37 | def downgrade() -> None: 38 | # ### commands auto generated by Alembic - please adjust! ### 39 | op.drop_constraint("chains_sync_status_pkey", "chains_sync_status", type_="primary") 40 | op.drop_column("chains_sync_status", "type") 41 | op.create_primary_key( 42 | "chains_sync_status_pkey", 43 | "chains_sync_status", 44 | ["chain"], 45 | ) 46 | # ### end Alembic commands ### 47 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0012_7a7704f044db_indexer_sync_status.py: -------------------------------------------------------------------------------- 1 | """indexer sync status 2 | 3 | Revision ID: 7a7704f044db 4 | Revises: b8e019bf7710 5 | Create Date: 2023-03-10 12:09:40.409813 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = "7a7704f044db" 14 | down_revision = "b8e019bf7710" 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade() -> None: 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.create_table( 22 | "indexer_sync_status", 23 | sa.Column("chain", sa.String(), nullable=False), 24 | sa.Column("event_type", sa.String(), nullable=False), 25 | sa.Column("start_block_datetime", sa.TIMESTAMP(timezone=True), nullable=False), 26 | sa.Column("end_block_datetime", sa.TIMESTAMP(timezone=True), nullable=False), 27 | sa.Column("start_included", sa.Boolean, nullable=False), 28 | sa.Column("end_included", sa.Boolean, nullable=False), 29 | sa.Column("last_updated", sa.TIMESTAMP(timezone=True), nullable=False), 30 | sa.PrimaryKeyConstraint("chain", "event_type", "start_block_datetime"), 31 | ) 32 | # ### end Alembic commands ### 33 | 34 | 35 | def downgrade() -> None: 36 | # ### commands auto generated by Alembic - please adjust! ### 37 | op.drop_table("indexer_sync_status") 38 | # ### end Alembic commands ### 39 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0013_7ab62bd0a3b1_messages_post_index.py: -------------------------------------------------------------------------------- 1 | """messages post index 2 | 3 | Revision ID: 7ab62bd0a3b1 4 | Revises: 8a5eaab15d40 5 | Create Date: 2023-03-23 12:48:36.687433 6 | 7 | """ 8 | from alembic import op 9 | 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = "7ab62bd0a3b1" 13 | down_revision = "7a7704f044db" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade() -> None: 19 | op.execute( 20 | """ 21 | CREATE INDEX ix_messages_posts_type_tags 22 | ON messages((content->>'type'),(content->'content'->>'tags')) WHERE type = 'POST' 23 | """ 24 | ) 25 | 26 | 27 | def downgrade() -> None: 28 | op.drop_index("ix_messages_posts_type_tags") 29 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0014_daa92b500049_message_content_in_file_pins.py: -------------------------------------------------------------------------------- 1 | """message content in file pins 2 | 3 | Revision ID: daa92b500049 4 | Revises: 7ab62bd0a3b1 5 | Create Date: 2023-04-12 14:33:55.891990 6 | 7 | """ 8 | from alembic import op 9 | 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = "daa92b500049" 13 | down_revision = "7ab62bd0a3b1" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade() -> None: 19 | # We now store a file + file pin object for the content of each non-inline message. 20 | 21 | # The existing unique constraint on item_hash will fail because of some non-inline STORE messages. 22 | op.drop_constraint("file_pins_item_hash_key", "file_pins", type_="unique") 23 | op.create_unique_constraint( 24 | "file_pins_item_hash_type_key", "file_pins", ["item_hash", "type"] 25 | ) 26 | 27 | op.execute( 28 | """ 29 | INSERT INTO files(hash, size, type) 30 | SELECT messages.item_hash, messages.size, 'file' 31 | FROM messages WHERE item_type != 'inline' 32 | """ 33 | ) 34 | op.execute( 35 | """ 36 | INSERT INTO file_pins(file_hash, created, type, tx_hash, owner, item_hash, ref) 37 | SELECT messages.item_hash, 38 | to_timestamp((messages.content ->> 'time')::float), 39 | 'content', 40 | null, 41 | messages.sender, 42 | messages.item_hash, 43 | null 44 | FROM messages 45 | WHERE item_type != 'inline' 46 | """ 47 | ) 48 | 49 | 50 | def downgrade() -> None: 51 | op.execute("DELETE FROM file_pins WHERE type = 'content'") 52 | op.execute( 53 | "DELETE FROM files WHERE EXISTS (SELECT 1 FROM messages WHERE messages.item_hash = hash)" 54 | ) 55 | op.drop_constraint("file_pins_item_hash_type_key", "file_pins", type_="unique") 56 | op.create_unique_constraint("file_pins_item_hash_key", "file_pins", ["item_hash"]) 57 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0015_039c56d3b33e_file_size_not_null.py: -------------------------------------------------------------------------------- 1 | """file size not null 2 | 3 | Revision ID: 039c56d3b33e 4 | Revises: daa92b500049 5 | Create Date: 2023-04-13 17:13:01.353182 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = "039c56d3b33e" 14 | down_revision = "daa92b500049" 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade() -> None: 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.alter_column("files", "size", existing_type=sa.BIGINT(), nullable=False) 22 | # ### end Alembic commands ### 23 | 24 | 25 | def downgrade() -> None: 26 | # ### commands auto generated by Alembic - please adjust! ### 27 | op.alter_column("files", "size", existing_type=sa.BIGINT(), nullable=True) 28 | # ### end Alembic commands ### 29 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0016_77e68941d36c_fix_ipfs_files_size.py: -------------------------------------------------------------------------------- 1 | """fix ipfs files size 2 | 3 | Revision ID: 77e68941d36c 4 | Revises: 039c56d3b33e 5 | Create Date: 2023-04-25 10:53:44.111572 6 | 7 | """ 8 | import asyncio 9 | import logging 10 | from threading import Thread 11 | 12 | import aioipfs 13 | from alembic import op 14 | from sqlalchemy import select, update 15 | 16 | from aleph.config import get_config 17 | from aleph.db.models import StoredFileDb 18 | from aleph.services.ipfs.common import make_ipfs_client 19 | from aleph.types.files import FileType 20 | 21 | # revision identifiers, used by Alembic. 22 | revision = "77e68941d36c" 23 | down_revision = "039c56d3b33e" 24 | branch_labels = None 25 | depends_on = None 26 | 27 | 28 | logger = logging.getLogger("alembic") 29 | 30 | 31 | async def stat_ipfs(ipfs_client: aioipfs.AsyncIPFS, cid: str): 32 | try: 33 | return await asyncio.wait_for(ipfs_client.files.stat(f"/ipfs/{cid}"), 5) 34 | except TimeoutError: 35 | return None 36 | 37 | 38 | async def upgrade_async() -> None: 39 | conn = op.get_bind() 40 | files = conn.execute( 41 | select(StoredFileDb.hash).where( 42 | (StoredFileDb.hash.like("Qm%") | StoredFileDb.hash.like("bafy%")) 43 | & (StoredFileDb.type == FileType.FILE) 44 | ) 45 | ).all() 46 | 47 | config = get_config() 48 | ipfs_client = make_ipfs_client(config) 49 | 50 | for file in files: 51 | stats = await stat_ipfs(ipfs_client, cid=file.hash) 52 | if stats is None: 53 | logger.warning("Could not stat file: %s", file.hash) 54 | 55 | op.execute( 56 | update(StoredFileDb) 57 | .where(StoredFileDb.hash == file.hash) 58 | .values(size=stats["Size"]) 59 | ) 60 | 61 | await ipfs_client.close() 62 | 63 | 64 | def upgrade_thread(): 65 | asyncio.run(upgrade_async()) 66 | 67 | 68 | def upgrade() -> None: 69 | # We can reach this point from sync and async code, resulting in errors if an event loop 70 | # is already running if we just try to run the upgrade_async coroutine. The easiest 71 | # solution here is to start another thread and run the migration from there. 72 | thread = Thread(target=upgrade_thread, daemon=True) 73 | thread.start() 74 | thread.join() 75 | 76 | 77 | def downgrade() -> None: 78 | # Don't reset sizes, it's pointless. 79 | pass 80 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0019_3bf484f2cc95_tx_hash_in_rejected_messages.py: -------------------------------------------------------------------------------- 1 | """tx_hash in rejected messages 2 | 3 | Revision ID: 3bf484f2cc95 4 | Revises: 7bcb8e5fe186 5 | Create Date: 2023-07-31 00:08:17.990537 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = "3bf484f2cc95" 14 | down_revision = "7bcb8e5fe186" 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade() -> None: 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column("rejected_messages", sa.Column("tx_hash", sa.String(), nullable=True)) 22 | op.create_foreign_key(None, "rejected_messages", "chain_txs", ["tx_hash"], ["hash"]) 23 | # ### end Alembic commands ### 24 | 25 | 26 | def downgrade() -> None: 27 | # ### commands auto generated by Alembic - please adjust! ### 28 | op.drop_constraint( 29 | "rejected_messages_tx_hash_fkey", "rejected_messages", type_="foreignkey" 30 | ) 31 | op.drop_column("rejected_messages", "tx_hash") 32 | # ### end Alembic commands ### 33 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0020_88a25ae08ebf_grace_period_file_pins.py: -------------------------------------------------------------------------------- 1 | """grace period file pins 2 | 3 | Revision ID: 88a25ae08ebf 4 | Revises: 3bf484f2cc95 5 | Create Date: 2023-11-02 22:43:40.223477 6 | 7 | """ 8 | import sqlalchemy as sa 9 | from alembic import op 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = "88a25ae08ebf" 13 | down_revision = "3bf484f2cc95" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade() -> None: 19 | # ### commands auto generated by Alembic - please adjust! ### 20 | op.add_column( 21 | "file_pins", sa.Column("delete_by", sa.TIMESTAMP(timezone=True), nullable=True) 22 | ) 23 | op.create_index( 24 | "ix_file_pins_delete_by", 25 | "file_pins", 26 | ["delete_by"], 27 | unique=False, 28 | postgresql_where=sa.text("delete_by IS NOT NULL"), 29 | ) 30 | # ### end Alembic commands ### 31 | 32 | 33 | def downgrade() -> None: 34 | # ### commands auto generated by Alembic - please adjust! ### 35 | op.drop_index( 36 | "ix_file_pins_delete_by", 37 | table_name="file_pins", 38 | postgresql_where=sa.text("delete_by IS NOT NULL"), 39 | ) 40 | op.drop_column("file_pins", "delete_by") 41 | # ### end Alembic commands ### 42 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0023_add_trusted_execution_fields_to_vms_.py: -------------------------------------------------------------------------------- 1 | """Add trusted execution fields to vms table 2 | 3 | Revision ID: 2543def8f601 4 | Revises: e682fc8f9506 5 | Create Date: 2024-07-02 13:19:10.675168 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = "2543def8f601" 15 | down_revision = "e682fc8f9506" 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade() -> None: 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | op.add_column( 23 | "vms", 24 | sa.Column("environment_trusted_execution_policy", sa.Integer(), nullable=True), 25 | ) 26 | op.add_column( 27 | "vms", 28 | sa.Column("environment_trusted_execution_firmware", sa.String(), nullable=True), 29 | ) 30 | op.add_column( 31 | "vms", 32 | sa.Column("node_hash", sa.String(), nullable=True), 33 | ) 34 | # ### end Alembic commands ### 35 | 36 | 37 | def downgrade() -> None: 38 | # ### commands auto generated by Alembic - please adjust! ### 39 | op.drop_column("vms", "environment_trusted_execution_firmware") 40 | op.drop_column("vms", "environment_trusted_execution_policy") 41 | op.drop_column("vms", "node_hash") 42 | 43 | # ### end Alembic commands ### 44 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0025_update_vms_tables.py: -------------------------------------------------------------------------------- 1 | """ 2 | Update vms tables 3 | 4 | Revision ID: 63b767213bfa 5 | Revises: 8b27064157d7 6 | Create Date: 2024-11-19 14:30:13.877818 7 | 8 | """ 9 | 10 | import asyncio 11 | import json 12 | from threading import Thread 13 | 14 | import sqlalchemy as sa 15 | from alembic import op 16 | from sqlalchemy import column, table, text 17 | 18 | revision = "63b767213bfa" 19 | down_revision = "8b27064157d7" 20 | branch_labels = None 21 | depends_on = None 22 | 23 | 24 | async def update_payment_types() -> None: 25 | """ 26 | Update the `payment_type` column in the `vms` table based on the `messages` table. 27 | """ 28 | conn = op.get_bind() 29 | 30 | vms_table = table( 31 | "vms", column("item_hash", sa.String), column("payment_type", sa.String) 32 | ) 33 | 34 | query = text( 35 | """ 36 | SELECT 37 | vms.item_hash AS vm_item_hash, 38 | messages.item_content AS message_content 39 | FROM 40 | vms 41 | LEFT JOIN 42 | messages ON vms.item_hash = messages.item_hash 43 | """ 44 | ) 45 | 46 | rows = conn.execute(query).fetchall() 47 | 48 | for row in rows: 49 | vm_item_hash = row["vm_item_hash"] 50 | message_content = row["message_content"] 51 | 52 | payment_type = "hold" 53 | 54 | if message_content: 55 | message_data = json.loads(message_content) 56 | 57 | payment = message_data.get("payment") 58 | if payment: 59 | payment_type = payment.get("type", "hold") 60 | 61 | conn.execute( 62 | vms_table.update() 63 | .where(vms_table.c.item_hash == vm_item_hash) 64 | .values(payment_type=payment_type) 65 | ) 66 | 67 | 68 | def upgrade_thread(): 69 | asyncio.run(update_payment_types()) 70 | 71 | 72 | def upgrade() -> None: 73 | thread = Thread(target=upgrade_thread, daemon=True) 74 | thread.start() 75 | thread.join() 76 | 77 | 78 | def downgrade() -> None: 79 | pass 80 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0027_bafd49315934_add_pending_messages_origin.py: -------------------------------------------------------------------------------- 1 | """add_pending_messages_origin 2 | 3 | Revision ID: bafd49315934 4 | Revises: d3bba5c2bfa0 5 | Create Date: 2025-01-13 15:05:05.309960 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'bafd49315934' 14 | down_revision = 'd3bba5c2bfa0' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade() -> None: 20 | op.add_column("pending_messages", sa.Column("origin", sa.String(), nullable=True, default="p2p")) 21 | 22 | 23 | def downgrade() -> None: 24 | op.drop_column("pending_messages", "origin") 25 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0028_edb195b0ed62_fix_add_unique_constraint_on_pending_.py: -------------------------------------------------------------------------------- 1 | """Fix: add Unique Constraint on pending messsage 2 | 3 | Revision ID: edb195b0ed62 4 | Revises: bafd49315934 5 | Create Date: 2025-01-14 12:16:10.920697 6 | 7 | """ 8 | from alembic import op 9 | 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = 'edb195b0ed62' 13 | down_revision = 'bafd49315934' 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade() -> None: 19 | op.execute("DELETE FROM pending_messages a USING pending_messages b WHERE a.id < b.id AND a.item_hash = b.item_hash;") 20 | op.create_unique_constraint('uq_pending_message', 'pending_messages', ['item_hash']) 21 | 22 | 23 | def downgrade() -> None: 24 | op.drop_constraint('uq_pending_message', 'pending_messages', type_='unique') 25 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0029_46f7e55ff55c_fix_uniqueconstraint_should_have_sender_.py: -------------------------------------------------------------------------------- 1 | """Fix: UniqueCOnstraint should have sender item_hash and signature 2 | 3 | Revision ID: 46f7e55ff55c 4 | Revises: edb195b0ed62 5 | Create Date: 2025-01-14 17:51:43.357255 6 | 7 | """ 8 | from alembic import op 9 | 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = '46f7e55ff55c' 13 | down_revision = 'edb195b0ed62' 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade() -> None: 19 | op.drop_constraint('uq_pending_message', 'pending_messages', type_='unique') 20 | op.create_unique_constraint('uq_pending_message', 'pending_messages', ['sender', 'item_hash', 'signature']) 21 | 22 | 23 | def downgrade() -> None: 24 | op.drop_constraint('uq_pending_message', 'pending_messages', type_='unique') 25 | op.create_unique_constraint('uq_pending_message', 'pending_messages', ['item_hash']) 26 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0030_9e600f404aa1_add_new_error_code_on_db.py: -------------------------------------------------------------------------------- 1 | """add_new_error_code_on_db 2 | 3 | Revision ID: 9e600f404aa1 4 | Revises: 46f7e55ff55c 5 | Create Date: 2025-01-16 13:51:36.699939 6 | 7 | """ 8 | from alembic import op 9 | 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = '9e600f404aa1' 13 | down_revision = '46f7e55ff55c' 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade() -> None: 19 | op.execute( 20 | """ 21 | INSERT INTO error_codes(code, description) VALUES 22 | (503, 'Cannot forget a used message') 23 | """ 24 | ) 25 | 26 | 27 | def downgrade() -> None: 28 | op.execute("DELETE FROM error_codes WHERE code = 503") 29 | -------------------------------------------------------------------------------- /deployment/migrations/versions/0034_8ece21fbeb47_balance_tracker.py: -------------------------------------------------------------------------------- 1 | """empty message 2 | 3 | Revision ID: 8ece21fbeb47 4 | Revises: 1c06d0ade60c 5 | Create Date: 2025-03-18 09:58:57.469799 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.sql import func 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '8ece21fbeb47' 15 | down_revision = '1c06d0ade60c' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade() -> None: 21 | op.add_column( 22 | "balances", sa.Column("last_update", sa.TIMESTAMP( 23 | timezone=True), nullable=False, server_default=func.now(), onupdate=func.now()) 24 | ) 25 | 26 | op.create_table( 27 | "cron_jobs", 28 | sa.Column("id", sa.String(), nullable=False), 29 | # Interval is specified in seconds 30 | sa.Column("interval", sa.Integer(), nullable=False, default=24), 31 | sa.Column("last_run", sa.TIMESTAMP(timezone=True), nullable=False), 32 | sa.PrimaryKeyConstraint("id"), 33 | ) 34 | 35 | op.execute( 36 | """ 37 | INSERT INTO cron_jobs(id, interval, last_run) VALUES ('balance', 3600, '2025-01-01 00:00:00') 38 | """ 39 | ) 40 | 41 | op.execute( 42 | """ 43 | INSERT INTO balances(address, chain, balance, eth_height) 44 | SELECT distinct m.sender, 'ETH', 0, 22196000 FROM messages m 45 | INNER JOIN message_status ms ON m.item_hash = ms.item_hash 46 | LEFT JOIN balances b ON m.sender = b.address 47 | WHERE m."type" = 'STORE' AND ms.status = 'processed' AND b.address is null AND m."time" > '2025-04-04T0:0:0.000Z' 48 | """ 49 | ) 50 | 51 | pass 52 | 53 | 54 | def downgrade() -> None: 55 | op.drop_column("balances", "last_update") 56 | 57 | op.drop_table("cron_jobs") 58 | 59 | op.execute( 60 | """ 61 | DELETE FROM balances b WHERE b.eth_height = 22196000 62 | """ 63 | ) 64 | 65 | pass 66 | -------------------------------------------------------------------------------- /deployment/samples/README.md: -------------------------------------------------------------------------------- 1 | # Deployment samples 2 | 3 | This directory provides examples that demonstrate how you can deploy your own Aleph Core Channel Node (CCN). 4 | 5 | * docker-compose: Deploy the CCN in the simplest form using Docker Compose 6 | * docker-monitoring: Deploy the CCN along with Grafana and Prometheus to monitor your node 7 | * native-install: Deploy the CCN on a Ubuntu server and run it manually. 8 | -------------------------------------------------------------------------------- /deployment/samples/docker-compose/README.md: -------------------------------------------------------------------------------- 1 | # Core Channel Node Deployment 2 | 3 | This directory contains the [Docker Compose](https://docs.docker.com/compose/) file 4 | to run an Aleph Node in production using the official Docker images on [Docker Hub](https://hub.docker.com/). 5 | 6 | See the [Docker-Compose documentation on readthedocs.io](https://pyaleph.readthedocs.io/en/latest/guides/docker-compose.html) 7 | for the documentation. 8 | 9 | See [deployment/docker-build](../../docker-build) to build your own image of the Core Channel Node 10 | and run it with Docker-Compose. 11 | -------------------------------------------------------------------------------- /deployment/samples/docker-compose/sample-config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nuls2: 3 | chain_id: 1 4 | enabled: false 5 | packing_node: false 6 | sync_address: NULSd6HgUkssMi6oSjwEn3puNSijLKnyiRV7H 7 | api_url: https://apiserver.nuls.io/ 8 | explorer_url: https://nuls.world/ 9 | token_contract: NULSd6Hh1FjbnAktH1FFvFnTzfgFnZtgAhYut 10 | 11 | 12 | ethereum: 13 | enabled: true 14 | # api_url: https://mainnet.infura.io/v3/ 15 | chain_id: 1 16 | packing_node: false 17 | sync_contract: "0x166fd4299364B21c7567e163d85D78d2fb2f8Ad5" 18 | start_height: 11474360 19 | token_contract: "0x27702a26126e0B3702af63Ee09aC4d1A084EF628" 20 | token_start_height: 10939074 21 | 22 | 23 | postgres: 24 | host: postgres 25 | 26 | 27 | storage: 28 | store_files: true 29 | engine: filesystem 30 | folder: /var/lib/pyaleph 31 | 32 | 33 | ipfs: 34 | enabled: true 35 | host: ipfs 36 | port: 5001 37 | gateway_port: 8080 38 | 39 | 40 | aleph: 41 | queue_topic: ALEPH-TEST 42 | 43 | 44 | p2p: 45 | daemon_host: p2p-service 46 | http_port: 4024 47 | port: 4025 48 | control_port: 4030 49 | listen_port: 4031 50 | reconnect_delay: 60 51 | peers: 52 | - /dns/api2.aleph.im/tcp/4025/p2p/QmZkurbY2G2hWay59yiTgQNaQxHSNzKZFt2jbnwJhQcKgV 53 | - /dns/api3.aleph.im/tcp/4025/p2p/Qmb5b2ZwJm9pVWrppf3D3iMF1bXbjZhbJTwGvKEBMZNxa2 54 | 55 | 56 | rabbitmq: 57 | host: rabbitmq 58 | port: 5672 59 | username: aleph-p2p 60 | password: change-me! 61 | 62 | 63 | sentry: 64 | dsn: "" 65 | -------------------------------------------------------------------------------- /deployment/samples/docker-monitoring/README.md: -------------------------------------------------------------------------------- 1 | # Aleph Core Channel Node (CCN) deployment with Monitoring 2 | 3 | This directory contains a configuration to run a CCN in production with Monitoring. 4 | It is aimed at a starting point for node operators interested in easily getting pre-made basic 5 | metrics on their node. 6 | 7 | This directory contains the [Docker Compose](https://docs.docker.com/compose/) file 8 | to run an Aleph Node in production using the official Docker images on [Docker Hub](https://hub.docker.com/) 9 | with performance monitoring using [Prometheus](https://prometheus.io/) and [Grafana](https://grafana.com/). 10 | [Caddy](https://caddyserver.com/) is used as a reverse proxy. 11 | 12 | ### Other links 13 | 14 | See [../docker-compose](../docker-compose) to run a CCN without the monitoring. 15 | 16 | See the [Docker-Compose documentation on readthedocs.io](https://pyaleph.readthedocs.io/en/latest/guides/docker-compose.html) 17 | for the documentation. 18 | 19 | See [../docker-build](../../docker-build) to build your own image of the CCN and run it with Docker-Compose. 20 | 21 | ## Configuration 22 | 23 | ### Password 24 | 25 | Grafana is configured by default with a default username and insecure password, 26 | defined in the `docker-compose.yml` file. You are encouraged to change them. 27 | 28 | ### Hostnames 29 | 30 | By default, Grafana will be available on HTTP, port 80. 31 | 32 | For more secure setup, create a public domain name pointing to the server, 33 | uncomment and edit the command to configure the Caddy reverse proxy in `docker-compose.yml`: 34 | ``` 35 | caddy reverse-proxy --from grafana.aleph-node.example.org --to grafana:3000 36 | ``` 37 | Where `grafana.aleph-node.example.org` should be replaced with your domain name. 38 | 39 | Restart `docker-compose` with `docker-compose up -d`. 40 | Caddy should now expose Grafana using secure HTTPS. 41 | 42 | ### System metrics 43 | 44 | Monitoring the performance of the host system requires installing `prometheus-node-exporter`. 45 | On a Debian/Ubuntu system: 46 | 47 | ```shell 48 | sudo apt-get install prometheus-node-exporter 49 | ``` 50 | 51 | **Security**: The default configuration of `prometheus-node-exporter` publishes the 52 | metrics of the system publicly on HTTP port 9100. You mway want to use change this 53 | or use a firewall to restrict access to it. 54 | 55 | ## Dashboards 56 | 57 | Two dashboards are provided out of the box: one to monitor the internals of the Aleph Node, 58 | and one to monitor the performance of the host system. 59 | -------------------------------------------------------------------------------- /deployment/samples/docker-monitoring/grafana/dashboard.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: 1 3 | 4 | 5 | providers: 6 | - name: 'aleph.im' 7 | orgId: 1 8 | folder: '' 9 | folderUid: '' 10 | type: file 11 | disableDeletion: false 12 | updateIntervalSeconds: 10 13 | allowUiUpdates: false 14 | options: 15 | path: /etc/grafana/provisioning/dashboards/ 16 | foldersFromFilesStructure: true 17 | -------------------------------------------------------------------------------- /deployment/samples/docker-monitoring/grafana/prometheus.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: 1 3 | 4 | 5 | datasources: 6 | - name: Prometheus 7 | type: prometheus 8 | access: proxy 9 | url: http://prometheus:9090 10 | editable: false 11 | -------------------------------------------------------------------------------- /deployment/samples/docker-monitoring/prometheus.yml: -------------------------------------------------------------------------------- 1 | --- 2 | global: 3 | scrape_interval: 5s 4 | evaluation_interval: 5s 5 | 6 | 7 | scrape_configs: 8 | - job_name: pyaleph 9 | static_configs: 10 | - targets: ['pyaleph-api:4024'] 11 | - job_name: system 12 | static_configs: 13 | - targets: ['172.17.0.1:9100'] 14 | -------------------------------------------------------------------------------- /deployment/samples/docker-monitoring/sample-config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nuls2: 3 | chain_id: 1 4 | enabled: false 5 | packing_node: false 6 | sync_address: NULSd6HgUkssMi6oSjwEn3puNSijLKnyiRV7H 7 | api_url: https://apiserver.nuls.io/ 8 | explorer_url: https://nuls.world/ 9 | token_contract: NULSd6Hh1FjbnAktH1FFvFnTzfgFnZtgAhYut 10 | 11 | 12 | ethereum: 13 | enabled: true 14 | # api_url: https://mainnet.infura.io/v3/ 15 | chain_id: 1 16 | packing_node: false 17 | sync_contract: "0x166fd4299364B21c7567e163d85D78d2fb2f8Ad5" 18 | start_height: 11474360 19 | token_contract: "0x27702a26126e0B3702af63Ee09aC4d1A084EF628" 20 | token_start_height: 10939074 21 | 22 | 23 | storage: 24 | store_files: true 25 | engine: filesystem 26 | folder: /var/lib/pyaleph 27 | 28 | 29 | ipfs: 30 | enabled: true 31 | host: ipfs 32 | port: 5001 33 | gateway_port: 8080 34 | 35 | 36 | aleph: 37 | queue_topic: ALEPH-TEST 38 | 39 | 40 | p2p: 41 | daemon_host: p2p-service 42 | http_port: 4024 43 | port: 4025 44 | control_port: 4030 45 | reconnect_delay: 60 46 | 47 | 48 | rabbitmq: 49 | host: rabbitmq 50 | port: 5672 51 | username: aleph-p2p 52 | password: change-me! 53 | 54 | 55 | sentry: 56 | dsn: "" 57 | -------------------------------------------------------------------------------- /deployment/scripts/extract_requirements.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from configparser import ConfigParser 3 | 4 | 5 | def cli_parse() -> argparse.Namespace: 6 | parser = argparse.ArgumentParser( 7 | description="Extracts dependencies from setup.cfg into a requirements.txt file." 8 | ) 9 | parser.add_argument( 10 | "config_file", action="store", nargs=1, type=str, help="Path to setup.cfg." 11 | ) 12 | parser.add_argument( 13 | "--output-file", 14 | "-o", 15 | action="store", 16 | default="requirements.txt", 17 | type=str, 18 | help="Path to the requirements file to create.", 19 | ) 20 | return parser.parse_args() 21 | 22 | 23 | def main(args: argparse.Namespace): 24 | config_file = args.config_file 25 | output_file = args.output_file 26 | 27 | parser = ConfigParser() 28 | parser.read(config_file) 29 | 30 | requirements = parser["options"]["install_requires"] 31 | with open(output_file, "w") as f: 32 | f.write(requirements.strip()) 33 | 34 | 35 | if __name__ == "__main__": 36 | main(cli_parse()) 37 | -------------------------------------------------------------------------------- /deployment/scripts/get_config_value.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script reads a configuration value from the CCN config. This enables reading 3 | configuration values from shell scripts without launching the CCN itself. 4 | """ 5 | 6 | import argparse 7 | import sys 8 | from functools import partial 9 | 10 | import configmanager.exceptions 11 | from configmanager import Config 12 | 13 | from aleph.config import get_defaults 14 | 15 | 16 | def cli_parse() -> argparse.Namespace: 17 | parser = argparse.ArgumentParser( 18 | description="Reads the specified CCN configuration key." 19 | ) 20 | parser.add_argument( 21 | "--config-file", 22 | action="store", 23 | type=str, 24 | required=True, 25 | help="Path to the user configuration file.", 26 | ) 27 | parser.add_argument( 28 | "config_key", 29 | action="store", 30 | type=str, 31 | help="Configuration key to retrieve.", 32 | ) 33 | return parser.parse_args() 34 | 35 | 36 | def load_config(config_file: str) -> Config: 37 | config = Config(schema=get_defaults()) 38 | config.yaml.load(config_file) 39 | return config 40 | 41 | 42 | print_err = partial(print, file=sys.stderr) 43 | 44 | 45 | def main(args: argparse.Namespace): 46 | config_file = args.config_file 47 | config_key = args.config_key 48 | 49 | config = load_config(config_file) 50 | 51 | current_section = config 52 | sections = config_key.split(".") 53 | try: 54 | for section_name in sections: 55 | current_section = getattr(current_section, section_name) 56 | print(current_section.value) 57 | except configmanager.exceptions.NotFound: 58 | print_err(f"Configuration key not found: '{config_key}'.") 59 | sys.exit(-1) 60 | 61 | 62 | if __name__ == "__main__": 63 | main(cli_parse()) 64 | -------------------------------------------------------------------------------- /deployment/scripts/run_aleph_ccn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Starts an Aleph Core Channel Node. 3 | 4 | set -euo pipefail 5 | 6 | function help() { 7 | pyaleph -h 8 | exit 1 9 | } 10 | 11 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 12 | CONFIG_FILE="/var/pyaleph/config.yml" 13 | 14 | PYALEPH_ARGS=("$@") 15 | 16 | while test $# -gt 0; do 17 | case "$1" in 18 | --help) 19 | help 20 | ;; 21 | --config) 22 | CONFIG_FILE="$2" 23 | shift 24 | ;; 25 | esac 26 | shift 27 | done 28 | 29 | source ${SCRIPT_DIR}/wait_for_services.sh 30 | wait_for_services "${CONFIG_FILE}" 31 | 32 | exec pyaleph "${PYALEPH_ARGS[@]}" 33 | -------------------------------------------------------------------------------- /deployment/scripts/run_aleph_ccn_api.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Starts an Aleph Core Channel Node API server. 3 | 4 | set -euo pipefail 5 | 6 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 7 | CONFIG_FILE="/var/pyaleph/config.yml" 8 | 9 | while test $# -gt 0; do 10 | case "$1" in 11 | --help) 12 | help 13 | ;; 14 | --config) 15 | CONFIG_FILE="$2" 16 | shift 17 | ;; 18 | esac 19 | shift 20 | done 21 | 22 | source ${SCRIPT_DIR}/wait_for_services.sh 23 | wait_for_services "${CONFIG_FILE}" 24 | 25 | NB_WORKERS="${CCN_CONFIG_API_NB_WORKERS:-4}" 26 | PORT=${CCN_CONFIG_API_PORT:-4024} 27 | TIMEOUT="${CCN_CONFIG_API_TIMEOUT:-300}" 28 | 29 | echo "Starting aleph.im CCN API server on port ${PORT} (${NB_WORKERS} workers)" 30 | 31 | exec gunicorn \ 32 | "aleph.api_entrypoint:create_app" \ 33 | --bind 0.0.0.0:${PORT} \ 34 | --worker-class aiohttp.worker.GunicornUVLoopWebWorker \ 35 | --workers ${NB_WORKERS} \ 36 | --timeout ${TIMEOUT} \ 37 | --access-logfile "-" 38 | -------------------------------------------------------------------------------- /deployment/scripts/sync_initial_messages.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import logging 4 | 5 | logging.basicConfig(level=logging.DEBUG) 6 | logger = logging.getLogger(__name__) 7 | 8 | initial_messages_list = [ 9 | # Diagnostic VMs 10 | "cad11970efe9b7478300fd04d7cc91c646ca0a792b9cc718650f86e1ccfac73e", # Initial program 11 | "3fc0aa9569da840c43e7bd2033c3c580abb46b007527d6d20f2d4e98e867f7af", # Old DiagVM Debian 12 12 | "63faf8b5db1cf8d965e6a464a0cb8062af8e7df131729e48738342d956f29ace", # Current Debian 12 DiagVM 13 | "67705389842a0a1b95eaa408b009741027964edc805997475e95c505d642edd8", # Legacy Diag VM 14 | # Volumes like runtimes, data, code, etc 15 | "6b8618f5b8913c0f582f1a771a154a556ee3fa3437ef3cf91097819910cf383b", # Old Diag VM code volume 16 | "f873715dc2feec3833074bd4b8745363a0e0093746b987b4c8191268883b2463", # Old Diag VM runtime volume 17 | "79f19811f8e843f37ff7535f634b89504da3d8f03e1f0af109d1791cf6add7af", # Diag VM code volume 18 | "63f07193e6ee9d207b7d1fcf8286f9aee34e6f12f101d2ec77c1229f92964696", # Diag VM runtime volume 19 | "a92c81992e885d7a554fa78e255a5802404b7fdde5fbff20a443ccd13020d139", # Legacy Diag VM code volume 20 | "bd79839bf96e595a06da5ac0b6ba51dea6f7e2591bb913deccded04d831d29f4", # Legacy Diag VM runtime volume 21 | ] 22 | 23 | FROM_CCN = "http://api3.aleph.im" 24 | TO_CCN = "http://api2.aleph.im" 25 | PUB_SUB_TOPIC = "ALEPH-TEST" 26 | item_hashes_to_sync = ",".join(initial_messages_list) 27 | 28 | logger.debug(f"Fetching messages from {FROM_CCN}...") 29 | m1 = requests.get(f"{FROM_CCN}/api/v0/messages.json?pagination=50000&hashes={item_hashes_to_sync}") 30 | m1 = m1.json()['messages'] 31 | logger.debug(f"Fetched {len(m1)} messages from {FROM_CCN}") 32 | 33 | logger.debug(f"Fetching messages from {TO_CCN}") 34 | m2 = requests.get(f"{TO_CCN}/api/v0/messages.json?pagination=50000&hashes={item_hashes_to_sync}") 35 | m2 = m2.json()['messages'] 36 | logger.debug(f"Fetched {len(m2)} messages from {TO_CCN}") 37 | 38 | m1_hashes = set(m["item_hash"] for m in m1) 39 | m2_hashes = set(m["item_hash"] for m in m2) 40 | hashes_to_sync = m1_hashes - m2_hashes 41 | messages_to_sync = (m for m in m1 if m["item_hash"] in hashes_to_sync) # Use a generator to avoid memory issues 42 | 43 | logger.info(f"Messages to sync to {TO_CCN}: {len(hashes_to_sync)}") 44 | for message in messages_to_sync: 45 | requests.post(f"{TO_CCN}/api/v0/ipfs/pubsub/pub", 46 | json={"topic": PUB_SUB_TOPIC, "data": json.dumps(message)}) 47 | -------------------------------------------------------------------------------- /deployment/scripts/wait_for_services.sh: -------------------------------------------------------------------------------- 1 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 2 | 3 | function get_config() { 4 | config_file="$1" 5 | config_key="$2" 6 | config_value=$(python3 "${SCRIPT_DIR}/get_config_value.py" --config-file "${CONFIG_FILE}" "${config_key}") 7 | echo "${config_value}" 8 | } 9 | 10 | function wait_for_it() { 11 | "${SCRIPT_DIR}"/wait-for-it.sh "$@" 12 | } 13 | 14 | function wait_for_services() { 15 | config_file="$1" 16 | 17 | POSTGRES_HOST=$(get_config "${config_file}" postgres.host) 18 | POSTGRES_PORT=$(get_config "${config_file}" postgres.port) 19 | IPFS_HOST=$(get_config "${config_file}" ipfs.host) 20 | IPFS_PORT=$(get_config "${config_file}" ipfs.port) 21 | RABBITMQ_HOST=$(get_config "${config_file}" rabbitmq.host) 22 | RABBITMQ_PORT=$(get_config "${config_file}" rabbitmq.port) 23 | REDIS_HOST=$(get_config "${config_file}" redis.host) 24 | REDIS_PORT=$(get_config "${config_file}" redis.port) 25 | P2P_SERVICE_HOST=$(get_config "${config_file}" p2p.daemon_host) 26 | P2P_SERVICE_CONTROL_PORT=$(get_config "${config_file}" p2p.control_port) 27 | 28 | if [ "$(get_config "${config_file}" ipfs.enabled)" = "True" ]; then 29 | wait_for_it -h "${IPFS_HOST}" -p "${IPFS_PORT}" 30 | fi 31 | 32 | wait_for_it -h "${POSTGRES_HOST}" -p "${POSTGRES_PORT}" 33 | wait_for_it -h "${RABBITMQ_HOST}" -p "${RABBITMQ_PORT}" 34 | wait_for_it -h "${REDIS_HOST}" -p "${REDIS_PORT}" 35 | wait_for_it -h "${P2P_SERVICE_HOST}" -p "${P2P_SERVICE_CONTROL_PORT}" 36 | } 37 | -------------------------------------------------------------------------------- /docs/_static/.gitignore: -------------------------------------------------------------------------------- 1 | # Empty directory 2 | -------------------------------------------------------------------------------- /docs/architecture.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Architecture 3 | ============ 4 | 5 | .. image:: figures/architecture-stack.* 6 | :width: 100% 7 | -------------------------------------------------------------------------------- /docs/authors.rst: -------------------------------------------------------------------------------- 1 | .. _authors: 2 | .. include:: ../AUTHORS.rst 3 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | .. _changes: 2 | .. include:: ../CHANGELOG.rst 3 | -------------------------------------------------------------------------------- /docs/figures/architecture-stack.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/docs/figures/architecture-stack.pdf -------------------------------------------------------------------------------- /docs/figures/architecture-stack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/docs/figures/architecture-stack.png -------------------------------------------------------------------------------- /docs/guides/index.rst: -------------------------------------------------------------------------------- 1 | ######## 2 | Guides 3 | ######## 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | 8 | install 9 | upgrade 10 | private_net 11 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | pyaleph 3 | ======= 4 | 5 | This is the documentation of **pyaleph**. 6 | **pyaleph** is the reference client node implementation of the Aleph.im protocol. 7 | 8 | .. note:: 9 | 10 | This documentation is under construction. Please get in touch if you find 11 | any issue with the documentation or if you would like to contribute. 12 | 13 | Contents 14 | ======== 15 | 16 | .. toctree:: 17 | :maxdepth: 2 18 | 19 | architecture 20 | guides/index 21 | node-synchronisation 22 | protocol/index 23 | metrics 24 | 25 | License 26 | Authors 27 | Changelog 28 | Module Reference 29 | 30 | 31 | Indices and tables 32 | ================== 33 | 34 | * :ref:`genindex` 35 | * :ref:`modindex` 36 | * :ref:`search` 37 | 38 | .. _toctree: http://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html 39 | .. _reStructuredText: http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html 40 | .. _references: http://www.sphinx-doc.org/en/stable/markup/inline.html 41 | .. _Python domain syntax: http://sphinx-doc.org/domains.html#the-python-domain 42 | .. _Sphinx: http://www.sphinx-doc.org/ 43 | .. _Python: http://docs.python.org/ 44 | .. _Numpy: http://docs.scipy.org/doc/numpy 45 | .. _SciPy: http://docs.scipy.org/doc/scipy/reference/ 46 | .. _matplotlib: https://matplotlib.org/contents.html# 47 | .. _Pandas: http://pandas.pydata.org/pandas-docs/stable 48 | .. _Scikit-Learn: http://scikit-learn.org/stable 49 | .. _autodoc: http://www.sphinx-doc.org/en/stable/ext/autodoc.html 50 | .. _Google style: https://github.com/google/styleguide/blob/gh-pages/pyguide.md#38-comments-and-docstrings 51 | .. _NumPy style: https://numpydoc.readthedocs.io/en/latest/format.html 52 | .. _classical style: http://www.sphinx-doc.org/en/stable/domains.html#info-field-lists 53 | -------------------------------------------------------------------------------- /docs/license.rst: -------------------------------------------------------------------------------- 1 | .. _license: 2 | 3 | ======= 4 | License 5 | ======= 6 | 7 | .. include:: ../LICENSE.txt 8 | -------------------------------------------------------------------------------- /docs/node-synchronisation.rst: -------------------------------------------------------------------------------- 1 | ==================== 2 | Node Synchronisation 3 | ==================== 4 | 5 | After being deployed, a node needs to synchronise the messages from Aleph. 6 | 7 | A few metrics are exposed to monitor this synchronisation, on URL `/metrics`: 8 | 9 | 1. Total number of messages synchronised: `pyaleph_status_sync_messages_total` 10 | 2. Messages downloaded but not processed yet: `pyaleph_status_sync_pending_messages_total` 11 | 3. Transactions downloaded but not processed yet: `pyaleph_status_sync_pending_txs_total` 12 | 13 | The total number of messages [1] should reach the same value for every node in the Aleph 14 | network. Compare it to other nodes such as https://api2.aleph.im/metrics.json to evaluate 15 | how many messages still need to be synchronised. 16 | 17 | The number of pending messages [2] and transactions [3] should reach a value close to zero 18 | when the node is operating, since messages in these queues should be processed by the node. 19 | 20 | Ethereum height 21 | --------------- 22 | 23 | The Aleph node can look for messages on the Ethereum blockchain. 24 | The metric `pyaleph_status_chain_eth_last_committed_height` indicates the number of the 25 | last block synced by the Aleph Node, to be compared with the number of the last block 26 | on the Ethereum chain behind the URL specified in the configuration (mainnet, rinkeby, ...). 27 | -------------------------------------------------------------------------------- /docs/protocol/authorizations.rst: -------------------------------------------------------------------------------- 1 | ************** 2 | Authorizations 3 | ************** 4 | 5 | Inside most message types there is an "address" field. 6 | This is the address for which the message applies (for which address apply this 7 | aggregate, who posted that item...). 8 | 9 | The client validates that the message sender (the one signing the message) has 10 | the right to publish on this address behalf. 11 | 12 | 1. obvious case: if the sender == the content address, it is authorized. 13 | 2. the "security" key in the address aggregate has an entry for this address 14 | 15 | Aggregate "security" key 16 | ======================== 17 | 18 | This key is a special case in the Aggregate system. It can only be changed 19 | by sending an AGGREGATE message on the "security" channel. 20 | 21 | For now, only the address itself (sender == content.address) has the right 22 | to send an AGGREGATE message on this channel ("security") with this key ("security"). 23 | This behaviour might change in the future. 24 | 25 | "authorizations" subkey 26 | ----------------------- 27 | 28 | It's an array of objects being built like this: 29 | 30 | =============== ======================================================= 31 | address the address to authorize 32 | chain optional. only accept this address on a specific chain 33 | channels optional. authorized channel list 34 | types optional. the authorized message types 35 | post_types optional. specific post types authorized 36 | aggregate_keys optional. specific aggregate keys authorized 37 | =============== ======================================================= 38 | 39 | .. note:: 40 | 41 | If some filter is set, it is exclusive, only those will be accepted. 42 | All filters specified must pass with one exclusion: type specific filters only apply 43 | to this type (post_types only apply to POST, aggregate_keys only apply to AGGREGATE type) 44 | 45 | Example:: 46 | 47 | channels: ['blog'] => only post in this channel will be accepted 48 | types: ['POST'] => only POST will be accepted from this sender 49 | aggregate_keys: ['profile', 'preferences'] => only those keys will be writeable. 50 | -------------------------------------------------------------------------------- /docs/protocol/index.rst: -------------------------------------------------------------------------------- 1 | ######## 2 | Protocol 3 | ######## 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | 8 | messages/index 9 | authorizations 10 | payment 11 | -------------------------------------------------------------------------------- /docs/protocol/messages/aggregate.rst: -------------------------------------------------------------------------------- 1 | Aggregates 2 | ========== 3 | 4 | AGGREGATE messages are a global key/value store mechanism. 5 | 6 | Content format 7 | -------------- 8 | 9 | The `content` field of a FORGET message must contain the following fields: 10 | 11 | * `address` [str]: The address to which the aggregate belongs. Reserved for future developments. 12 | * `time` [float]: The epoch timestamp of the message. 13 | * `key` [str]: The user-defined ID of the aggregate. 14 | * `content` [Dict]: The key/value pairs making up the aggregate, as a dictionary. 15 | 16 | Update aggregates 17 | ----------------- 18 | 19 | Users can update aggregates by sending additional AGGREGATE messages with the same content key. 20 | Updates are ordered by their content time field to match the order in which the user sent 21 | the messages originally. 22 | 23 | Retrieve aggregates 24 | ------------------- 25 | 26 | Users can retrieve aggregates by using the `/api/v0/aggregates/{address}.json` endpoint. 27 | Specify the `keys` URL parameter to restrict the response to one or more aggregates. 28 | -------------------------------------------------------------------------------- /docs/protocol/messages/forget.rst: -------------------------------------------------------------------------------- 1 | Forgets 2 | ======= 3 | 4 | FORGET messages are meant to make the Aleph network forget/drop one or more messages 5 | sent previously. 6 | Users can forget any type of message, except for FORGET messages themselves. 7 | 8 | When a FORGET message is received by a node, it will immediately: 9 | * remove the ‘content’ and ‘item_content’ sections of the targeted messages 10 | * add a field ‘removed_by’ that references to the processed FORGET message 11 | 12 | In addition, any content related to the forgotten message currently stored in the DB 13 | will be deleted, if no other message points to the same content. For example, a file 14 | stored in local storage will be deleted, or a file pinned in IPFS will be unpinned. 15 | 16 | Content format 17 | -------------- 18 | 19 | The `content` field of a FORGET message must contain the following fields: 20 | 21 | * `address` [str]: The address to which the aggregate belongs. Reserved for future developments. 22 | * `time` [float]: The epoch timestamp of the message. 23 | * `hashes` [List[str]]: The list of message hashes to forget 24 | * `reason` [Optional[str]]: An optional explanation of why the user wants to forget these hashes. 25 | 26 | Limitations 27 | ----------- 28 | 29 | * At the moment, a user can only forget messages he sent himself. 30 | -------------------------------------------------------------------------------- /docs/protocol/messages/post.rst: -------------------------------------------------------------------------------- 1 | Posts 2 | ===== 3 | 4 | Posts are unique data entries, that can be amended later on, like blog posts, comments, events... 5 | Internally, POST messages are similar to STORE messages but differ in that they support amending 6 | and only support JSON content. 7 | 8 | 9 | Content format 10 | -------------- 11 | 12 | The `content` field of a POST message must contain the following fields: 13 | 14 | * `address` [str]: The address to which the aggregate belongs. Reserved for future developments. 15 | * `time` [float]: The epoch timestamp of the message. 16 | * `content` [Dict]: The JSON content of the post. 17 | * `ref` [Optional[str]]: Used for amending. If specified, must be set to the item hash of the original 18 | message that created the post to modify. 19 | * `type` [str]: User-defined content type. 20 | 21 | Amend posts 22 | ----------- 23 | 24 | Users can amend posts by sending additional POST messages referencing the original message. 25 | To do so, the user must send a new POST message with the content `ref` field set to 26 | the item hash of the original POST message. 27 | Note that even if the user amends the message multiple times, the `ref` field must always 28 | reference the original message, not the amendments. 29 | Amendments are applied in the order of the content `time` field. 30 | 31 | Retrieve posts 32 | -------------- 33 | 34 | Users can retrieve posts by using the `/api/v0/posts.json` endpoint. 35 | -------------------------------------------------------------------------------- /docs/protocol/messages/program.rst: -------------------------------------------------------------------------------- 1 | Programs 2 | ======== 3 | 4 | PROGRAM messages create a new application that can then be run on Aleph VMs. 5 | 6 | Content format 7 | -------------- 8 | 9 | The `content` field of a PROGRAM message must contain the following fields: 10 | 11 | .. code-block:: json 12 | 13 | "code": { 14 | "encoding": "plain | zip | tar.gzip", 15 | "entrypoint": "application", 16 | "ref": "str", 17 | "ref": "str", 18 | "use_latest": true, 19 | }, 20 | "on": { 21 | "http": true, 22 | "cron": "5 4 * * *", 23 | "aleph": [ 24 | {"type": "POST", "channel": "FOUNDATION", "content": {"type": "calculation"}} 25 | ] 26 | }, 27 | "environment":{ 28 | "reproducible": true, 29 | "internet": false, 30 | "aleph_api": false 31 | }, 32 | "resources": { 33 | "vcpus": 1, 34 | "memory": 128, 35 | "seconds": 1 36 | }, 37 | "runtime": { 38 | "address": "0x4cB66fDf10971De5c7598072024FFd33482907a5", 39 | "comment": "Aleph Alpine Linux with Python 3.8" 40 | }, 41 | "data": { 42 | "encoding": "tar.gzip", 43 | "mount": "/mnt", 44 | "address": "0xED9d5B040386F394B9ABd34fD59152756b126710" 45 | }, 46 | "export": { 47 | "encoding": "tar.gzip", 48 | "mount": "/mnt" 49 | } 50 | -------------------------------------------------------------------------------- /docs/protocol/messages/store.rst: -------------------------------------------------------------------------------- 1 | Stores 2 | ====== 3 | 4 | STORE messages tell the Aleph network to store data on behalf of the user. 5 | The data can either be pinned to IPFS or stored in the native Aleph storage system depending 6 | on the content item type. 7 | 8 | Content format 9 | -------------- 10 | 11 | The `content` field of a STORE message must contain the following fields: 12 | 13 | * `address` [str]: The address to which the aggregate belongs. Reserved for future developments. 14 | * `time` [float]: The epoch timestamp of the message. 15 | * `item_type` [str]: `storage` or `ipfs`. Determines the network to use to fetch and store the file. 16 | * `item_hash` [str]: Hash of the file to store. Must be a CIDv0 for IPFS, or a SHA256 hash for native storage. 17 | 18 | Retrieve stored content 19 | ----------------------- 20 | 21 | Users can retrieve uploaded files by using the `/api/v0/storage/raw/{hash}` endpoint. 22 | -------------------------------------------------------------------------------- /docs/protocol/payment.rst: -------------------------------------------------------------------------------- 1 | ******* 2 | Payment 3 | ******* 4 | 5 | .. note:: 6 | 7 | Currently being implemented. 8 | 9 | 10 | Storage and message processing payment providers can be installed as modules. 11 | 12 | There will be a "recurring" one added as well in the future. Details need to be decided. 13 | 14 | - `INCOMING_REGISTER` holds the incoming message providers 15 | They are processed after checking message is valid but before signature verification 16 | Any provider returning true and the message will be processed further 17 | - `PROCESSED_REGISTER` holds the processed message providers (actually charge the amounts) 18 | 19 | The MVP network (current implementation of Aleph.im network code) has three providers: 20 | 21 | - **ChannelEndorsement**: an address holding Aleph tokens in one of the underlying chains 22 | endorses this channel and incentivizes (X being the Aleph token count): 23 | 24 | - up to X post/aggregates messages (of up to 100kb each) 25 | - up to Xmb of files 26 | 27 | - **PersonalStorage**: and address holding aleph tokens can post itself (or someone on it behalf), 28 | messages with (X being the Aleph token count): 29 | 30 | - up to X POST/AGGREGATES messages (of up to 100kb each) 31 | - up to Xmb of files in STORE messages 32 | 33 | - **Core**: Core channels (dedicated to identity and security) have their messages free 34 | if they belong to correct types and pass "anti-spam" checks. 35 | 36 | Ideally (not done yet), a garbage collecting process will come and clean the data if the checks 37 | don't pass anymore. 38 | 39 | Those amounts while high are here because the MVP network has a full replication and doesn't actually "spend" those tokens. 40 | Once the Aleph token is held on the network, and nodes can actually make the addresses pay 41 | for storage, much smaller amounts will be requested in an "open market" fashion. 42 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinxcontrib-plantuml -------------------------------------------------------------------------------- /mypy.ini: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/mypy.ini -------------------------------------------------------------------------------- /src/aleph/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import subprocess 3 | 4 | from pkg_resources import DistributionNotFound, get_distribution 5 | 6 | 7 | def _get_git_version() -> str: 8 | output = subprocess.check_output(("git", "describe", "--tags")) 9 | return output.decode().strip() 10 | 11 | 12 | try: 13 | # Change here if project is renamed and does not equal the package name 14 | dist_name = __name__ 15 | __version__ = get_distribution(dist_name).version 16 | except DistributionNotFound: 17 | __version__ = _get_git_version() 18 | finally: 19 | del get_distribution, DistributionNotFound 20 | -------------------------------------------------------------------------------- /src/aleph/cache.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | 3 | 4 | class Cache: 5 | def __init__(self): 6 | self._cache = defaultdict(dict) 7 | 8 | def get(self, key, namespace): 9 | return self._cache[namespace].get(key) 10 | 11 | def set(self, key, value, namespace): 12 | self._cache[namespace][key] = value 13 | 14 | def exists(self, key, namespace): 15 | return key in self._cache[namespace] 16 | 17 | def delete_namespace(self, namespace): 18 | if namespace in self._cache: 19 | self._cache[namespace] = {} 20 | 21 | def delete(self, key, namespace): 22 | if self.exists(key, namespace): 23 | del self._cache[namespace] 24 | 25 | 26 | # simple in memory cache 27 | # we can't use aiocache here because most of ORM methods are not async compatible 28 | cache = Cache() 29 | -------------------------------------------------------------------------------- /src/aleph/chains/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/chains/__init__.py -------------------------------------------------------------------------------- /src/aleph/chains/abc.py: -------------------------------------------------------------------------------- 1 | import abc 2 | 3 | from configmanager import Config 4 | 5 | from aleph.schemas.pending_messages import BasePendingMessage 6 | 7 | 8 | class Verifier(abc.ABC): 9 | @abc.abstractmethod 10 | async def verify_signature(self, message: BasePendingMessage) -> bool: ... 11 | 12 | 13 | class ChainReader(abc.ABC): 14 | @abc.abstractmethod 15 | async def fetcher(self, config: Config): ... 16 | 17 | 18 | class ChainWriter(ChainReader): 19 | @abc.abstractmethod 20 | async def packer(self, config: Config): ... 21 | -------------------------------------------------------------------------------- /src/aleph/chains/assets/ethereum_sc.sol: -------------------------------------------------------------------------------- 1 | pragma solidity ^0.5.11; 2 | 3 | contract AlephSync{ 4 | 5 | event SyncEvent(uint256 timestamp, address addr, string message); 6 | event MessageEvent(uint256 timestamp, address addr, string msgtype, string msgcontent); 7 | 8 | function doEmit(string memory message) public { 9 | emit SyncEvent(block.timestamp, msg.sender, message); 10 | } 11 | 12 | function doMessage(string memory msgtype, string memory msgcontent) public { 13 | emit MessageEvent(block.timestamp, msg.sender, msgtype, msgcontent); 14 | } 15 | 16 | } 17 | -------------------------------------------------------------------------------- /src/aleph/chains/assets/ethereum_sc_abi.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "anonymous": false, 4 | "inputs": [ 5 | { 6 | "indexed": false, 7 | "internalType": "uint256", 8 | "name": "timestamp", 9 | "type": "uint256" 10 | }, 11 | { 12 | "indexed": false, 13 | "internalType": "address", 14 | "name": "addr", 15 | "type": "address" 16 | }, 17 | { 18 | "indexed": false, 19 | "internalType": "string", 20 | "name": "msgtype", 21 | "type": "string" 22 | }, 23 | { 24 | "indexed": false, 25 | "internalType": "string", 26 | "name": "msgcontent", 27 | "type": "string" 28 | } 29 | ], 30 | "name": "MessageEvent", 31 | "type": "event" 32 | }, 33 | { 34 | "anonymous": false, 35 | "inputs": [ 36 | { 37 | "indexed": false, 38 | "internalType": "uint256", 39 | "name": "timestamp", 40 | "type": "uint256" 41 | }, 42 | { 43 | "indexed": false, 44 | "internalType": "address", 45 | "name": "addr", 46 | "type": "address" 47 | }, 48 | { 49 | "indexed": false, 50 | "internalType": "string", 51 | "name": "message", 52 | "type": "string" 53 | } 54 | ], 55 | "name": "SyncEvent", 56 | "type": "event" 57 | }, 58 | { 59 | "constant": false, 60 | "inputs": [ 61 | { 62 | "internalType": "string", 63 | "name": "message", 64 | "type": "string" 65 | } 66 | ], 67 | "name": "doEmit", 68 | "outputs": [], 69 | "payable": false, 70 | "stateMutability": "nonpayable", 71 | "type": "function" 72 | }, 73 | { 74 | "constant": false, 75 | "inputs": [ 76 | { 77 | "internalType": "string", 78 | "name": "msgtype", 79 | "type": "string" 80 | }, 81 | { 82 | "internalType": "string", 83 | "name": "msgcontent", 84 | "type": "string" 85 | } 86 | ], 87 | "name": "doMessage", 88 | "outputs": [], 89 | "payable": false, 90 | "stateMutability": "nonpayable", 91 | "type": "function" 92 | } 93 | ] -------------------------------------------------------------------------------- /src/aleph/chains/bsc.py: -------------------------------------------------------------------------------- 1 | from aleph_message.models import Chain 2 | from configmanager import Config 3 | 4 | from aleph.chains.abc import ChainReader 5 | from aleph.chains.chain_data_service import PendingTxPublisher 6 | from aleph.chains.indexer_reader import AlephIndexerReader 7 | from aleph.types.chain_sync import ChainEventType 8 | from aleph.types.db_session import DbSessionFactory 9 | 10 | 11 | class BscConnector(ChainReader): 12 | def __init__( 13 | self, 14 | session_factory: DbSessionFactory, 15 | pending_tx_publisher: PendingTxPublisher, 16 | ): 17 | self.indexer_reader = AlephIndexerReader( 18 | chain=Chain.BSC, 19 | session_factory=session_factory, 20 | pending_tx_publisher=pending_tx_publisher, 21 | ) 22 | 23 | async def fetcher(self, config: Config): 24 | await self.indexer_reader.fetcher( 25 | indexer_url=config.aleph.indexer_url.value, 26 | smart_contract_address=config.bsc.sync_contract.value, 27 | event_type=ChainEventType.MESSAGE, 28 | ) 29 | -------------------------------------------------------------------------------- /src/aleph/chains/common.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from aleph.schemas.pending_messages import BasePendingMessage 4 | 5 | LOGGER = logging.getLogger("chains.common") 6 | 7 | 8 | def get_verification_buffer(message: BasePendingMessage) -> bytes: 9 | """ 10 | Returns the serialized string that was signed by the user when sending an Aleph message. 11 | """ 12 | buffer = f"{message.chain.value}\n{message.sender}\n{message.type.value}\n{message.item_hash}" 13 | return buffer.encode("utf-8") 14 | -------------------------------------------------------------------------------- /src/aleph/chains/evm.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import logging 3 | 4 | from eth_account import Account 5 | from eth_account.messages import encode_defunct 6 | 7 | from aleph.chains.common import get_verification_buffer 8 | from aleph.schemas.pending_messages import BasePendingMessage 9 | from aleph.utils import run_in_executor 10 | 11 | from .abc import Verifier 12 | 13 | LOGGER = logging.getLogger("chains.evm") 14 | 15 | 16 | class EVMVerifier(Verifier): 17 | async def verify_signature(self, message: BasePendingMessage) -> bool: 18 | """Verifies a signature of a message, return True if verified, false if not""" 19 | 20 | verification = get_verification_buffer(message) 21 | 22 | message_hash = await run_in_executor( 23 | None, functools.partial(encode_defunct, text=verification.decode("utf-8")) 24 | ) 25 | 26 | verified = False 27 | try: 28 | # we assume the signature is a valid string 29 | address = await run_in_executor( 30 | None, 31 | functools.partial( 32 | Account.recover_message, message_hash, signature=message.signature 33 | ), 34 | ) 35 | if address == message.sender: 36 | verified = True 37 | else: 38 | LOGGER.warning( 39 | "Received bad signature from %s for %s" % (address, message.sender) 40 | ) 41 | return False 42 | 43 | except Exception: 44 | LOGGER.exception("Error processing signature for %s" % message.sender) 45 | verified = False 46 | 47 | return verified 48 | -------------------------------------------------------------------------------- /src/aleph/chains/nuls.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import struct 3 | 4 | from aleph.chains.common import get_verification_buffer 5 | from aleph.schemas.pending_messages import BasePendingMessage 6 | from aleph.utils import run_in_executor 7 | 8 | from .abc import Verifier 9 | from .nuls_aleph_sdk import ( 10 | NulsSignature, 11 | address_from_hash, 12 | hash_from_address, 13 | public_key_to_hash, 14 | ) 15 | 16 | LOGGER = logging.getLogger("chains.nuls") 17 | CHAIN_NAME = "NULS" 18 | 19 | 20 | class NulsConnector(Verifier): 21 | async def verify_signature(self, message: BasePendingMessage) -> bool: 22 | """Verifies a signature of a message, return True if verified, false if not""" 23 | 24 | if message.signature is None: 25 | LOGGER.warning("'%s': missing signature.", message.item_hash) 26 | return False 27 | 28 | sig_raw = bytes(bytearray.fromhex(message.signature)) 29 | sig = NulsSignature(sig_raw) 30 | 31 | sender_hash = hash_from_address(message.sender) 32 | (sender_chain_id,) = struct.unpack("h", sender_hash[:2]) 33 | 34 | hash = public_key_to_hash(sig.pub_key, sender_chain_id) 35 | 36 | address = address_from_hash(hash) 37 | if address != message.sender: 38 | LOGGER.warning( 39 | "Received bad signature from %s for %s" % (address, message.sender) 40 | ) 41 | return False 42 | 43 | verification = get_verification_buffer(message) 44 | try: 45 | result = await run_in_executor(None, sig.verify, verification) 46 | except Exception: 47 | LOGGER.exception("NULS Signature verification error") 48 | result = False 49 | 50 | return result 51 | -------------------------------------------------------------------------------- /src/aleph/chains/solana.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | 4 | import base58 5 | from nacl.exceptions import BadSignatureError 6 | from nacl.signing import VerifyKey 7 | 8 | from aleph.chains.common import get_verification_buffer 9 | from aleph.schemas.pending_messages import BasePendingMessage 10 | 11 | from .abc import Verifier 12 | 13 | LOGGER = logging.getLogger("chains.solana") 14 | CHAIN_NAME = "SOL" 15 | 16 | 17 | class SolanaConnector(Verifier): 18 | async def verify_signature(self, message: BasePendingMessage) -> bool: 19 | """Verifies a signature of a message, return True if verified, false if not""" 20 | 21 | if message.signature is None: 22 | LOGGER.warning("'%s': missing signature.", message.item_hash) 23 | return False 24 | 25 | try: 26 | signature = json.loads(message.signature) 27 | sigdata = base58.b58decode(signature["signature"]) 28 | public_key = base58.b58decode(signature["publicKey"]) 29 | except ValueError: 30 | LOGGER.warning("Solana signature deserialization error") 31 | return False 32 | 33 | if signature.get("version", 1) != 1: 34 | LOGGER.warning( 35 | "Unsupported signature version %s" % signature.get("version") 36 | ) 37 | return False 38 | 39 | if message.sender != signature["publicKey"]: 40 | LOGGER.warning("Solana signature source error") 41 | return False 42 | 43 | try: 44 | verify_key = VerifyKey(public_key) 45 | verification_buffer = get_verification_buffer(message) 46 | verif = verify_key.verify(verification_buffer, signature=sigdata) 47 | result = verif == verification_buffer 48 | except BadSignatureError: 49 | result = False 50 | except Exception: 51 | LOGGER.exception("Solana Signature verification error") 52 | result = False 53 | 54 | return result 55 | -------------------------------------------------------------------------------- /src/aleph/chains/substrate.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | 4 | from substrateinterface import Keypair 5 | 6 | from aleph.chains.common import get_verification_buffer 7 | from aleph.schemas.pending_messages import BasePendingMessage 8 | 9 | from .abc import Verifier 10 | 11 | LOGGER = logging.getLogger("chains.substrate") 12 | 13 | 14 | class SubstrateConnector(Verifier): 15 | async def verify_signature(self, message: BasePendingMessage) -> bool: 16 | """Verifies a signature of a message, return True if verified, false if not""" 17 | 18 | if message.signature is None: 19 | LOGGER.warning("'%s': missing signature.", message.item_hash) 20 | return False 21 | 22 | try: 23 | signature = json.loads(message.signature) 24 | except Exception: 25 | LOGGER.exception("Substrate signature deserialization error") 26 | return False 27 | 28 | try: 29 | if signature.get("curve", "sr25519") != "sr25519": 30 | LOGGER.warning("Unsupported curve %s" % signature.get("curve")) 31 | except Exception: 32 | LOGGER.exception("Substrate signature Key error") 33 | return False 34 | 35 | try: 36 | keypair = Keypair(ss58_address=message.sender) 37 | verif = (get_verification_buffer(message)).decode("utf-8") 38 | result = keypair.verify(verif, signature["data"]) 39 | except Exception: 40 | LOGGER.exception("Substrate Signature verification error") 41 | result = False 42 | 43 | return result 44 | -------------------------------------------------------------------------------- /src/aleph/cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/cli/__init__.py -------------------------------------------------------------------------------- /src/aleph/db/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/db/__init__.py -------------------------------------------------------------------------------- /src/aleph/db/accessors/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Database access module. 3 | 4 | This module abstracts queries for the rest of the application. The idea is that any query that 5 | is more complex than `select(ModelClass)` or `session.add(model_instance)` should be abstracted 6 | by a function in this module. 7 | 8 | Suggested practices: 9 | * Functions should usually take a `DbSession` parameter instead of a `DbSessionFactory`. 10 | This allows callers to reuse the same session through multiple calls to this module 11 | if they need it. 12 | * It is a good idea to separate the generation of the queries from their execution, 13 | especially for complex queries. This simplifies the audit of the queries generated by the ORM. 14 | * All the functions should be unit tested. 15 | * 1 file in `db.models` = `db.accessors`. Makes it easier to find where queries are implemented 16 | for each model. 17 | """ 18 | -------------------------------------------------------------------------------- /src/aleph/db/accessors/cron_jobs.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | from typing import List, Optional 3 | 4 | from sqlalchemy import delete, select, update 5 | 6 | from aleph.db.models.cron_jobs import CronJobDb 7 | from aleph.types.db_session import DbSession 8 | 9 | 10 | def get_cron_jobs(session: DbSession) -> List[CronJobDb]: 11 | select_stmt = select(CronJobDb) 12 | 13 | return (session.execute(select_stmt)).scalars().all() 14 | 15 | 16 | def get_cron_job(session: DbSession, id: str) -> Optional[CronJobDb]: 17 | select_stmt = select(CronJobDb).where(CronJobDb.id == id) 18 | 19 | return (session.execute(select_stmt)).scalar_one_or_none() 20 | 21 | 22 | def update_cron_job(session: DbSession, id: str, last_run: dt.datetime) -> None: 23 | update_stmt = update(CronJobDb).values(last_run=last_run).where(CronJobDb.id == id) 24 | 25 | session.execute(update_stmt) 26 | 27 | 28 | def delete_cron_job(session: DbSession, id: str) -> None: 29 | delete_stmt = delete(CronJobDb).where(CronJobDb.id == id) 30 | 31 | session.execute(delete_stmt) 32 | -------------------------------------------------------------------------------- /src/aleph/db/accessors/peers.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | from typing import Optional, Sequence 3 | 4 | from sqlalchemy import select 5 | from sqlalchemy.dialects.postgresql import insert 6 | 7 | from aleph.toolkit.timestamp import utc_now 8 | from aleph.types.db_session import DbSession 9 | 10 | from ..models.peers import PeerDb, PeerType 11 | 12 | 13 | def get_all_addresses_by_peer_type( 14 | session: DbSession, peer_type: PeerType 15 | ) -> Sequence[str]: 16 | select_peers_stmt = select(PeerDb.address).where(PeerDb.peer_type == peer_type) 17 | 18 | addresses = session.execute(select_peers_stmt) 19 | return addresses.scalars().all() 20 | 21 | 22 | def upsert_peer( 23 | session: DbSession, 24 | peer_id: str, 25 | peer_type: PeerType, 26 | address: str, 27 | source: PeerType, 28 | last_seen: Optional[dt.datetime] = None, 29 | ) -> None: 30 | last_seen = last_seen or utc_now() 31 | 32 | upsert_stmt = ( 33 | insert(PeerDb) 34 | .values( 35 | peer_id=peer_id, 36 | address=address, 37 | peer_type=peer_type, 38 | source=source, 39 | last_seen=last_seen, 40 | ) 41 | .on_conflict_do_update( 42 | constraint="peers_pkey", 43 | set_={"address": address, "source": source, "last_seen": last_seen}, 44 | ) 45 | ) 46 | session.execute(upsert_stmt) 47 | -------------------------------------------------------------------------------- /src/aleph/db/accessors/pending_txs.py: -------------------------------------------------------------------------------- 1 | from typing import Iterable, Optional 2 | 3 | from aleph_message.models import Chain 4 | from sqlalchemy import delete, func, select 5 | from sqlalchemy.dialects.postgresql import insert 6 | from sqlalchemy.orm import selectinload 7 | 8 | from aleph.db.models import ChainTxDb, PendingTxDb 9 | from aleph.types.db_session import DbSession 10 | 11 | 12 | def get_pending_tx(session: DbSession, tx_hash: str) -> Optional[PendingTxDb]: 13 | select_stmt = ( 14 | select(PendingTxDb) 15 | .where(PendingTxDb.tx_hash == tx_hash) 16 | .options(selectinload(PendingTxDb.tx)) 17 | ) 18 | return (session.execute(select_stmt)).scalar_one_or_none() 19 | 20 | 21 | def get_pending_txs(session: DbSession, limit: int = 200) -> Iterable[PendingTxDb]: 22 | select_stmt = ( 23 | select(PendingTxDb) 24 | .join(ChainTxDb, PendingTxDb.tx_hash == ChainTxDb.hash) 25 | .order_by(ChainTxDb.datetime.asc()) 26 | .limit(limit) 27 | .options(selectinload(PendingTxDb.tx)) 28 | ) 29 | return (session.execute(select_stmt)).scalars() 30 | 31 | 32 | def count_pending_txs(session: DbSession, chain: Optional[Chain] = None) -> int: 33 | select_stmt = select(func.count(PendingTxDb.tx_hash)) 34 | if chain: 35 | select_stmt = select_stmt.join( 36 | ChainTxDb, PendingTxDb.tx_hash == ChainTxDb.hash 37 | ).where(ChainTxDb.chain == chain) 38 | 39 | return (session.execute(select_stmt)).scalar_one() 40 | 41 | 42 | def upsert_pending_tx(session: DbSession, tx_hash: str) -> None: 43 | upsert_stmt = insert(PendingTxDb).values(tx_hash=tx_hash).on_conflict_do_nothing() 44 | session.execute(upsert_stmt) 45 | 46 | 47 | def delete_pending_tx(session: DbSession, tx_hash: str) -> None: 48 | delete_stmt = delete(PendingTxDb).where(PendingTxDb.tx_hash == tx_hash) 49 | session.execute(delete_stmt) 50 | -------------------------------------------------------------------------------- /src/aleph/db/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .aggregates import * # noqa 2 | from .balances import * # noqa 3 | from .base import Base # noqa 4 | from .chains import * # noqa 5 | from .files import * # noqa 6 | from .messages import * # noqa 7 | from .peers import * # noqa 8 | from .pending_messages import * # noqa 9 | from .pending_txs import * # noqa 10 | from .posts import * # noqa 11 | from .vms import * # noqa 12 | -------------------------------------------------------------------------------- /src/aleph/db/models/account_costs.py: -------------------------------------------------------------------------------- 1 | from decimal import Decimal 2 | from typing import Optional 3 | 4 | from aleph_message.models import PaymentType 5 | from sqlalchemy import DECIMAL, BigInteger, Column, ForeignKey, String, UniqueConstraint 6 | from sqlalchemy_utils.types.choice import ChoiceType 7 | 8 | from aleph.types.cost import CostType 9 | 10 | from .base import Base 11 | 12 | 13 | class AccountCostsDb(Base): 14 | __tablename__ = "account_costs" 15 | 16 | id: int = Column(BigInteger, primary_key=True) 17 | owner: str = Column(String, nullable=False, index=True) 18 | # item_hash: str = Column(String, nullable=False) 19 | item_hash: str = Column( 20 | ForeignKey("messages.item_hash", ondelete="CASCADE"), nullable=False 21 | ) 22 | type: CostType = Column(ChoiceType(CostType), nullable=False) 23 | name: str = Column(String, nullable=False) 24 | ref: Optional[str] = Column(String, nullable=True) 25 | payment_type: PaymentType = Column(ChoiceType(PaymentType), nullable=False) 26 | cost_hold: Decimal = Column(DECIMAL, nullable=False) 27 | cost_stream: Decimal = Column(DECIMAL, nullable=False) 28 | 29 | __table_args__ = (UniqueConstraint("owner", "item_hash", "type", "name"),) 30 | -------------------------------------------------------------------------------- /src/aleph/db/models/aggregates.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | from typing import Any 3 | 4 | from sqlalchemy import TIMESTAMP, Boolean, Column, ForeignKey, Index, String 5 | from sqlalchemy.dialects.postgresql import JSONB 6 | from sqlalchemy.orm import relationship 7 | 8 | from .base import Base 9 | 10 | 11 | class AggregateElementDb(Base): 12 | """ 13 | The individual message contents that make up an aggregate. 14 | 15 | Aggregates are compacted in the `aggregates` table for usage by the API, this table 16 | is here only to keep track of the history of an aggregate and to recompute it in case 17 | messages are received out of order. 18 | """ 19 | 20 | __tablename__ = "aggregate_elements" 21 | 22 | item_hash: str = Column(String, primary_key=True) 23 | key: str = Column(String, nullable=False) 24 | owner: str = Column(String, nullable=False) 25 | content: Any = Column(JSONB, nullable=False) 26 | creation_datetime: dt.datetime = Column(TIMESTAMP(timezone=True), nullable=False) 27 | 28 | __table_args__ = ( 29 | Index("ix_time_desc", creation_datetime.desc()), 30 | Index("ix_key_owner", key, owner), 31 | ) 32 | 33 | 34 | class AggregateDb(Base): 35 | """ 36 | Compacted aggregates, to be served to users. 37 | 38 | Each row of this table contains an aggregate as it stands up to its last revision. 39 | """ 40 | 41 | __tablename__ = "aggregates" 42 | 43 | key: str = Column(String, primary_key=True) 44 | owner: str = Column(String, primary_key=True) 45 | content: Any = Column(JSONB, nullable=False) 46 | creation_datetime: dt.datetime = Column(TIMESTAMP(timezone=True), nullable=False) 47 | last_revision_hash: str = Column( 48 | ForeignKey(AggregateElementDb.item_hash), nullable=False 49 | ) 50 | dirty = Column(Boolean, nullable=False) 51 | 52 | __table_args__ = (Index("ix_aggregates_owner", owner),) 53 | 54 | last_revision: AggregateElementDb = relationship(AggregateElementDb) 55 | -------------------------------------------------------------------------------- /src/aleph/db/models/balances.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | from decimal import Decimal 3 | from typing import Optional 4 | 5 | from aleph_message.models import Chain 6 | from sqlalchemy import ( 7 | DECIMAL, 8 | TIMESTAMP, 9 | BigInteger, 10 | Column, 11 | Integer, 12 | String, 13 | UniqueConstraint, 14 | ) 15 | from sqlalchemy.sql import func 16 | from sqlalchemy_utils.types.choice import ChoiceType 17 | 18 | from .base import Base 19 | 20 | 21 | class AlephBalanceDb(Base): 22 | __tablename__ = "balances" 23 | 24 | id: int = Column(BigInteger, primary_key=True) 25 | 26 | address: str = Column(String, nullable=False, index=True) 27 | chain: Chain = Column(ChoiceType(Chain), nullable=False) 28 | dapp: Optional[str] = Column(String, nullable=True) 29 | eth_height: int = Column(Integer, nullable=False) 30 | balance: Decimal = Column(DECIMAL, nullable=False) 31 | last_update: dt.datetime = Column( 32 | TIMESTAMP(timezone=True), 33 | nullable=False, 34 | server_default=func.now(), 35 | onupdate=func.now(), 36 | ) 37 | 38 | __table_args__ = ( 39 | UniqueConstraint( 40 | "address", "chain", "dapp", name="balances_address_chain_dapp_uindex" 41 | ), 42 | ) 43 | -------------------------------------------------------------------------------- /src/aleph/db/models/cron_jobs.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | 3 | from sqlalchemy import TIMESTAMP, Column, Integer, String 4 | 5 | from .base import Base 6 | 7 | 8 | class CronJobDb(Base): 9 | __tablename__ = "cron_jobs" 10 | 11 | id: str = Column(String, primary_key=True) 12 | # Interval is specified in seconds 13 | interval: int = Column(Integer, nullable=False) 14 | last_run: dt.datetime = Column(TIMESTAMP(timezone=True), nullable=False) 15 | -------------------------------------------------------------------------------- /src/aleph/db/models/peers.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | from enum import Enum 3 | 4 | from sqlalchemy import TIMESTAMP, Column, String 5 | from sqlalchemy_utils.types.choice import ChoiceType 6 | 7 | from .base import Base 8 | 9 | 10 | class PeerType(str, Enum): 11 | HTTP = "HTTP" 12 | IPFS = "IPFS" 13 | P2P = "P2P" 14 | 15 | 16 | class PeerDb(Base): 17 | __tablename__ = "peers" 18 | 19 | peer_id = Column(String, primary_key=True) 20 | peer_type: PeerType = Column(ChoiceType(PeerType), primary_key=True) 21 | address = Column(String, nullable=False) 22 | source: PeerType = Column(ChoiceType(PeerType), nullable=False) 23 | last_seen: dt.datetime = Column(TIMESTAMP(timezone=True), nullable=False) 24 | -------------------------------------------------------------------------------- /src/aleph/db/models/pending_txs.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import Column, ForeignKey 2 | from sqlalchemy.orm import relationship 3 | 4 | from .base import Base 5 | from .chains import ChainTxDb 6 | 7 | 8 | class PendingTxDb(Base): 9 | __tablename__ = "pending_txs" 10 | 11 | tx_hash: str = Column(ForeignKey("chain_txs.hash"), primary_key=True) 12 | 13 | tx: "ChainTxDb" = relationship("ChainTxDb") 14 | -------------------------------------------------------------------------------- /src/aleph/db/models/posts.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | from typing import Any, Optional 3 | 4 | from sqlalchemy import TIMESTAMP, Column, ForeignKey, String 5 | from sqlalchemy.dialects.postgresql import JSONB 6 | 7 | from aleph.types.channel import Channel 8 | 9 | from .base import Base 10 | 11 | 12 | class PostDb(Base): 13 | __tablename__ = "posts" 14 | 15 | item_hash: str = Column(String, primary_key=True) 16 | owner: str = Column(String, nullable=False, index=True) 17 | type: Optional[str] = Column(String, nullable=True, index=True) 18 | ref: Optional[str] = Column(String, nullable=True) 19 | amends: Optional[str] = Column( 20 | ForeignKey("posts.item_hash"), nullable=True, index=True 21 | ) 22 | channel: Optional[Channel] = Column(String, nullable=True) 23 | content: Any = Column(JSONB, nullable=False) 24 | creation_datetime: dt.datetime = Column(TIMESTAMP(timezone=True), nullable=False) 25 | 26 | latest_amend: Optional[str] = Column(String, nullable=True) 27 | -------------------------------------------------------------------------------- /src/aleph/exceptions.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | 4 | class AlephException(Exception): ... 5 | 6 | 7 | class AlephStorageException(AlephException): 8 | """ 9 | Base exception class for all errors related to the storage 10 | and retrieval of Aleph messages. 11 | """ 12 | 13 | ... 14 | 15 | 16 | class InvalidConfigException(AlephException): ... 17 | 18 | 19 | class KeyNotFoundException(AlephException): ... 20 | 21 | 22 | class InvalidContent(AlephStorageException): 23 | """ 24 | The content requested by the user is invalid. Examples: 25 | * its integrity is compromised 26 | * it does not match the Aleph message specification. 27 | """ 28 | 29 | ... 30 | 31 | 32 | class ContentCurrentlyUnavailable(AlephStorageException): 33 | """ 34 | The content is currently unavailable, for example because of a 35 | synchronisation issue. 36 | """ 37 | 38 | ... 39 | 40 | 41 | class UnknownHashError(AlephException): ... 42 | -------------------------------------------------------------------------------- /src/aleph/handlers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/handlers/__init__.py -------------------------------------------------------------------------------- /src/aleph/handlers/content/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/handlers/content/__init__.py -------------------------------------------------------------------------------- /src/aleph/jobs/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from multiprocessing import Process 3 | from typing import Coroutine, List 4 | 5 | from aleph.jobs.fetch_pending_messages import fetch_pending_messages_subprocess 6 | from aleph.jobs.process_pending_messages import ( 7 | fetch_and_process_messages_task, 8 | pending_messages_subprocess, 9 | ) 10 | from aleph.jobs.process_pending_txs import handle_txs_task, pending_txs_subprocess 11 | from aleph.jobs.reconnect_ipfs import reconnect_ipfs_job 12 | from aleph.services.ipfs import IpfsService 13 | from aleph.types.db_session import DbSessionFactory 14 | 15 | LOGGER = logging.getLogger("jobs") 16 | 17 | 18 | def start_jobs( 19 | config, 20 | session_factory: DbSessionFactory, 21 | ipfs_service: IpfsService, 22 | use_processes=True, 23 | ) -> List[Coroutine]: 24 | LOGGER.info("starting jobs") 25 | tasks: List[Coroutine] = [] 26 | 27 | if use_processes: 28 | config_values = config.dump_values() 29 | p1 = Process( 30 | target=fetch_pending_messages_subprocess, 31 | args=(config_values,), 32 | ) 33 | p2 = Process( 34 | target=pending_messages_subprocess, 35 | args=(config_values,), 36 | ) 37 | p3 = Process( 38 | target=pending_txs_subprocess, 39 | args=(config_values,), 40 | ) 41 | p1.start() 42 | p2.start() 43 | p3.start() 44 | else: 45 | tasks.append(fetch_and_process_messages_task(config=config)) 46 | tasks.append(handle_txs_task(config)) 47 | 48 | if config.ipfs.enabled.value: 49 | tasks.append( 50 | reconnect_ipfs_job( 51 | config=config, 52 | session_factory=session_factory, 53 | ipfs_service=ipfs_service, 54 | ) 55 | ) 56 | 57 | return tasks 58 | -------------------------------------------------------------------------------- /src/aleph/jobs/reconnect_ipfs.py: -------------------------------------------------------------------------------- 1 | """ 2 | Job in charge of reconnecting to IPFS peers periodically. 3 | """ 4 | 5 | import asyncio 6 | import logging 7 | 8 | import aioipfs 9 | from configmanager import Config 10 | 11 | from aleph.db.accessors.peers import get_all_addresses_by_peer_type 12 | from aleph.db.models import PeerType 13 | from aleph.services.ipfs import IpfsService 14 | from aleph.types.db_session import DbSessionFactory 15 | 16 | LOGGER = logging.getLogger("jobs.reconnect_ipfs") 17 | 18 | 19 | async def reconnect_ipfs_job( 20 | config: Config, session_factory: DbSessionFactory, ipfs_service: IpfsService 21 | ): 22 | from aleph.services.utils import get_IP 23 | 24 | my_ip = await get_IP() 25 | await asyncio.sleep(2) 26 | while True: 27 | try: 28 | LOGGER.info("Reconnecting to peers") 29 | for peer in config.ipfs.peers.value: 30 | try: 31 | ret = await ipfs_service.connect(peer) 32 | if "Strings" in ret: 33 | LOGGER.info("\n".join(ret["Strings"])) 34 | except aioipfs.APIError: 35 | LOGGER.warning("Can't reconnect to %s" % peer) 36 | 37 | with session_factory() as session: 38 | peers = get_all_addresses_by_peer_type( 39 | session=session, peer_type=PeerType.IPFS 40 | ) 41 | 42 | for peer in peers: 43 | if peer in config.ipfs.peers.value: 44 | continue 45 | 46 | if my_ip in peer: 47 | continue 48 | 49 | try: 50 | ret = await ipfs_service.connect(peer) 51 | if ret and "Strings" in ret: 52 | LOGGER.info("\n".join(ret["Strings"])) 53 | except aioipfs.APIError: 54 | LOGGER.warning("Can't reconnect to %s" % peer) 55 | 56 | except Exception: 57 | LOGGER.exception("Error reconnecting to peers") 58 | 59 | await asyncio.sleep(config.ipfs.reconnect_delay.value) 60 | -------------------------------------------------------------------------------- /src/aleph/permissions.py: -------------------------------------------------------------------------------- 1 | from aleph_message.models import MessageType 2 | 3 | from aleph.db.accessors.aggregates import get_aggregate_by_key 4 | from aleph.db.models import MessageDb 5 | from aleph.types.db_session import DbSession 6 | 7 | 8 | async def check_sender_authorization(session: DbSession, message: MessageDb) -> bool: 9 | """Checks a content against a message to verify if sender is authorized. 10 | 11 | TODO: implement "security" aggregate key check. 12 | """ 13 | 14 | content = message.parsed_content 15 | 16 | sender = message.sender 17 | address = content.address 18 | 19 | # if sender is the content address, all good. 20 | if sender == address: 21 | return True 22 | 23 | aggregate = get_aggregate_by_key( 24 | session=session, key="security", owner=address 25 | ) # do we need anything else here? 26 | 27 | if not aggregate: 28 | return False 29 | 30 | authorizations = aggregate.content.get("authorizations", []) 31 | 32 | for auth in authorizations: 33 | if auth.get("address", "") != sender: 34 | continue # not applicable, move on. 35 | 36 | if auth.get("chain") and message.chain != auth.get("chain"): 37 | continue 38 | 39 | channels = auth.get("channels", []) 40 | mtypes = auth.get("types", []) 41 | ptypes = auth.get("post_types", []) 42 | akeys = auth.get("aggregate_keys", []) 43 | 44 | if len(channels) and message.channel not in channels: 45 | continue 46 | 47 | if len(mtypes) and message.type not in mtypes: 48 | continue 49 | 50 | if message.type == MessageType.post: 51 | if len(ptypes) and message.parsed_content.type not in ptypes: 52 | continue 53 | 54 | if message.type == MessageType.aggregate: 55 | if len(akeys) and message.parsed_content.key not in akeys: 56 | continue 57 | 58 | return True 59 | 60 | return False 61 | -------------------------------------------------------------------------------- /src/aleph/schemas/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/schemas/__init__.py -------------------------------------------------------------------------------- /src/aleph/schemas/api/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/schemas/api/__init__.py -------------------------------------------------------------------------------- /src/aleph/schemas/api/costs.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from pydantic import BaseModel, ConfigDict, field_validator 4 | 5 | from aleph.toolkit.costs import format_cost_str 6 | 7 | 8 | class EstimatedCostDetailResponse(BaseModel): 9 | model_config = ConfigDict(from_attributes=True) 10 | 11 | type: str 12 | name: str 13 | cost_hold: str 14 | cost_stream: str 15 | 16 | @field_validator("cost_hold", "cost_stream", mode="before") 17 | def check_format_price(cls, v): 18 | return format_cost_str(v) 19 | 20 | 21 | class EstimatedCostsResponse(BaseModel): 22 | model_config = ConfigDict(from_attributes=True) 23 | 24 | required_tokens: float 25 | payment_type: str 26 | cost: str 27 | detail: List[EstimatedCostDetailResponse] 28 | -------------------------------------------------------------------------------- /src/aleph/schemas/chains/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/schemas/chains/__init__.py -------------------------------------------------------------------------------- /src/aleph/schemas/chains/indexer_response.py: -------------------------------------------------------------------------------- 1 | """ 2 | Schemas for the generic Aleph message indexer. 3 | """ 4 | 5 | import datetime as dt 6 | from enum import Enum 7 | from typing import Annotated, List, Protocol, Tuple 8 | 9 | from pydantic import BaseModel, BeforeValidator, Field 10 | 11 | 12 | class GenericMessageEvent(Protocol): 13 | @property 14 | def address(self) -> str: ... 15 | @property 16 | def type(self) -> str: ... 17 | @property 18 | def content(self) -> str: ... 19 | @property 20 | def timestamp_seconds(self) -> float: ... 21 | 22 | 23 | class IndexerBlockchain(str, Enum): 24 | BSC = "bsc" 25 | ETHEREUM = "ethereum" 26 | SOLANA = "solana" 27 | 28 | 29 | class EntityType(str, Enum): 30 | BLOCK = "block" 31 | TRANSACTION = "transaction" 32 | LOG = "log" 33 | STATE = "state" 34 | 35 | 36 | def split_datetime_ranges(v): 37 | if isinstance(v, str): 38 | return v.split("/") 39 | return v 40 | 41 | 42 | DateTimeRange = Annotated[ 43 | Tuple[dt.datetime, dt.datetime], BeforeValidator(split_datetime_ranges) 44 | ] 45 | 46 | 47 | class AccountEntityState(BaseModel): 48 | blockchain: IndexerBlockchain 49 | type: EntityType 50 | indexer: str 51 | account: str 52 | completeHistory: bool 53 | progress: float 54 | pending: List[DateTimeRange] 55 | processed: List[DateTimeRange] 56 | 57 | 58 | class IndexerAccountStateResponseData(BaseModel): 59 | state: List[AccountEntityState] 60 | 61 | 62 | class IndexerAccountStateResponse(BaseModel): 63 | data: IndexerAccountStateResponseData 64 | 65 | 66 | class IndexerEvent(BaseModel): 67 | id: str 68 | timestamp: float 69 | address: str 70 | height: int 71 | transaction: str 72 | 73 | @property 74 | def timestamp_seconds(self) -> float: 75 | return self.timestamp / 1000 76 | 77 | 78 | class MessageEvent(IndexerEvent): 79 | type: str 80 | content: str 81 | 82 | 83 | class SyncEvent(IndexerEvent): 84 | message: str 85 | 86 | 87 | class IndexerEventResponseData(BaseModel): 88 | message_events: List[MessageEvent] = Field( 89 | alias="messageEvents", default_factory=list 90 | ) 91 | sync_events: List[SyncEvent] = Field(alias="syncEvents", default_factory=list) 92 | 93 | 94 | class IndexerEventResponse(BaseModel): 95 | data: IndexerEventResponseData 96 | -------------------------------------------------------------------------------- /src/aleph/schemas/chains/sync_events.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | from typing import Annotated, List, Literal, Optional, Union 3 | 4 | from aleph_message.models import Chain, ItemHash, ItemType, MessageType 5 | from pydantic import BaseModel, ConfigDict, Field, field_validator 6 | 7 | from aleph.types.chain_sync import ChainSyncProtocol 8 | from aleph.types.channel import Channel 9 | 10 | 11 | class OnChainMessage(BaseModel): 12 | model_config = ConfigDict(from_attributes=True) 13 | 14 | sender: str 15 | chain: Chain 16 | signature: Optional[str] = None 17 | type: MessageType 18 | item_content: Optional[str] = None 19 | item_type: ItemType 20 | item_hash: ItemHash 21 | time: float 22 | channel: Optional[Channel] = None 23 | 24 | @field_validator("time", mode="before") 25 | def check_time(cls, v, info): 26 | if isinstance(v, dt.datetime): 27 | return v.timestamp() 28 | 29 | return v 30 | 31 | 32 | class OnChainContent(BaseModel): 33 | messages: List[OnChainMessage] 34 | 35 | 36 | class OnChainSyncEventPayload(BaseModel): 37 | protocol: Literal[ChainSyncProtocol.ON_CHAIN_SYNC] 38 | version: int 39 | content: OnChainContent 40 | 41 | 42 | class OffChainSyncEventPayload(BaseModel): 43 | protocol: Literal[ChainSyncProtocol.OFF_CHAIN_SYNC] 44 | version: int 45 | content: str 46 | 47 | 48 | SyncEventPayload = Annotated[ 49 | Union[OnChainSyncEventPayload, OffChainSyncEventPayload], 50 | Field(discriminator="protocol"), 51 | ] 52 | -------------------------------------------------------------------------------- /src/aleph/schemas/chains/tezos_indexer_response.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | from enum import Enum 3 | from typing import Generic, List, TypeVar 4 | 5 | from pydantic import BaseModel, ConfigDict, Field 6 | 7 | PayloadType = TypeVar("PayloadType") 8 | 9 | 10 | class SyncStatus(str, Enum): 11 | SYNCED = "synced" 12 | IN_PROGRESS = "in_progress" 13 | DOWN = "down" 14 | 15 | 16 | class IndexerStatus(BaseModel): 17 | oldest_block: str = Field(alias="oldestBlock") 18 | recent_block: str = Field(alias="recentBlock") 19 | status: SyncStatus 20 | 21 | 22 | class IndexerStats(BaseModel): 23 | total_events: int = Field(alias="totalEvents") 24 | 25 | 26 | class IndexerEvent(BaseModel, Generic[PayloadType]): 27 | source: str 28 | timestamp: dt.datetime 29 | block_level: int = Field(alias="blockLevel") 30 | operation_hash: str = Field(alias="operationHash") 31 | type: str 32 | payload: PayloadType 33 | 34 | 35 | class MessageEventPayload(BaseModel): 36 | model_config = ConfigDict(from_attributes=True, populate_by_name=True) 37 | 38 | timestamp: float 39 | addr: str 40 | message_type: str = Field(alias="msgtype") 41 | message_content: str = Field(alias="msgcontent") 42 | 43 | # The following properties are defined for interoperability with the generic 44 | # MessageEvent class. 45 | @property 46 | def address(self) -> str: 47 | return self.addr 48 | 49 | @property 50 | def type(self) -> str: 51 | return self.message_type 52 | 53 | @property 54 | def content(self) -> str: 55 | return self.message_content 56 | 57 | @property 58 | def timestamp_seconds(self) -> float: 59 | return self.timestamp 60 | 61 | 62 | IndexerMessageEvent = IndexerEvent[MessageEventPayload] 63 | 64 | 65 | IndexerEventType = TypeVar("IndexerEventType", bound=IndexerEvent) 66 | 67 | 68 | class IndexerResponseData(BaseModel, Generic[IndexerEventType]): 69 | index_status: IndexerStatus = Field(alias="indexStatus") 70 | stats: IndexerStats 71 | events: List[IndexerEventType] 72 | 73 | 74 | class IndexerResponse(BaseModel, Generic[IndexerEventType]): 75 | data: IndexerResponseData[IndexerEventType] 76 | -------------------------------------------------------------------------------- /src/aleph/schemas/chains/tx_context.py: -------------------------------------------------------------------------------- 1 | from aleph.schemas.message_confirmation import MessageConfirmation 2 | 3 | 4 | # At the moment, confirmation = chain transaction. This might change, but in the meantime 5 | # having TxContext inherit MessageConfirmation avoids code duplication. 6 | class TxContext(MessageConfirmation): 7 | pass 8 | -------------------------------------------------------------------------------- /src/aleph/schemas/message_confirmation.py: -------------------------------------------------------------------------------- 1 | from aleph_message.models import Chain 2 | from pydantic import BaseModel, Field 3 | 4 | 5 | class MessageConfirmation(BaseModel): 6 | chain: Chain = Field( 7 | ..., description="Chain from which the confirmation was fetched." 8 | ) 9 | height: int = Field( 10 | ..., description="Block in which the confirmation was published." 11 | ) 12 | hash: str = Field( 13 | ..., 14 | description="Hash of the transaction/block in which the confirmation was published.", 15 | ) 16 | time: float = Field( 17 | ..., 18 | description="Transaction timestamp, in Unix time (number of seconds since epoch).", 19 | ) 20 | publisher: str = Field(..., description="Publisher of the confirmation on chain.") 21 | -------------------------------------------------------------------------------- /src/aleph/schemas/message_content.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from enum import Enum 3 | from typing import Any, Optional, Union 4 | 5 | 6 | class ContentSource(str, Enum): 7 | """ 8 | Defines the source of the content of a message. 9 | 10 | Message content can be fetched from different sources depending on the procedure followed by the user sending 11 | a particular message. This enum determines where the node found the content. 12 | """ 13 | 14 | DB = "DB" 15 | P2P = "P2P" 16 | IPFS = "IPFS" 17 | INLINE = "inline" 18 | 19 | 20 | @dataclass 21 | class StoredContent: 22 | hash: str 23 | source: Optional[ContentSource] 24 | 25 | 26 | @dataclass 27 | class RawContent(StoredContent): 28 | value: bytes 29 | 30 | def __len__(self): 31 | return len(self.value) 32 | 33 | 34 | @dataclass 35 | class MessageContent(StoredContent): 36 | value: Any 37 | raw_value: Union[bytes, str] 38 | -------------------------------------------------------------------------------- /src/aleph/services/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/services/__init__.py -------------------------------------------------------------------------------- /src/aleph/services/cache/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/services/cache/__init__.py -------------------------------------------------------------------------------- /src/aleph/services/cache/materialized_views.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | 4 | from aleph.db.accessors.messages import refresh_address_stats_mat_view 5 | from aleph.types.db_session import DbSessionFactory 6 | 7 | LOGGER = logging.getLogger(__name__) 8 | 9 | 10 | async def refresh_cache_materialized_views(session_factory: DbSessionFactory) -> None: 11 | """ 12 | Refresh DB materialized views used as caches, periodically. 13 | 14 | Materialized views are a simple solution to cache expensive DB queries, at the cost 15 | of refreshing them manually once in a while. This background task does exactly that. 16 | Note that materialized views used by the API should support concurrent refreshing 17 | to reduce latency. 18 | """ 19 | 20 | while True: 21 | try: 22 | with session_factory() as session: 23 | refresh_address_stats_mat_view(session) 24 | session.commit() 25 | LOGGER.info("Refreshed address stats materialized view") 26 | 27 | except Exception: 28 | LOGGER.exception("Error refreshing cache materialized views") 29 | 30 | await asyncio.sleep(10 * 60) 31 | -------------------------------------------------------------------------------- /src/aleph/services/ipfs/__init__.py: -------------------------------------------------------------------------------- 1 | from .service import IpfsService 2 | 3 | __all__ = ["IpfsService"] 4 | -------------------------------------------------------------------------------- /src/aleph/services/ipfs/common.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import aioipfs 4 | from configmanager import Config 5 | 6 | 7 | async def get_base_url(config): 8 | return "http://{}:{}".format(config.ipfs.host.value, config.ipfs.port.value) 9 | 10 | 11 | def make_ipfs_client(config: Config, timeout: int = 60) -> aioipfs.AsyncIPFS: 12 | host = config.ipfs.host.value 13 | port = config.ipfs.port.value 14 | 15 | return aioipfs.AsyncIPFS( 16 | host=host, 17 | port=port, 18 | read_timeout=timeout, 19 | conns_max=25, 20 | conns_max_per_host=10, 21 | debug=(config.logging.level.value <= logging.DEBUG), 22 | ) 23 | 24 | 25 | def get_cid_version(ipfs_hash: str) -> int: 26 | if ipfs_hash.startswith("Qm") and 44 <= len(ipfs_hash) <= 46: # CIDv0 27 | return 0 28 | 29 | if ipfs_hash.startswith("bafy") and len(ipfs_hash) == 59: # CIDv1 30 | return 1 31 | 32 | raise ValueError(f"Not a IPFS hash: '{ipfs_hash}'.") 33 | -------------------------------------------------------------------------------- /src/aleph/services/ipfs/pubsub.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | 4 | from aleph.toolkit.timestamp import utc_now 5 | from aleph.types.message_status import InvalidMessageException 6 | 7 | from .service import IpfsService 8 | 9 | LOGGER = logging.getLogger(__name__) 10 | 11 | 12 | # TODO: add type hint for message_processor, it currently causes a cyclical import 13 | async def incoming_channel( 14 | ipfs_service: IpfsService, topic: str, message_publisher 15 | ) -> None: 16 | from aleph.network import decode_pubsub_message 17 | 18 | while True: 19 | try: 20 | async for mvalue in ipfs_service.sub(topic): 21 | try: 22 | message_dict = await decode_pubsub_message(mvalue["data"]) 23 | await message_publisher.add_pending_message( 24 | message_dict=message_dict, reception_time=utc_now() 25 | ) 26 | except InvalidMessageException: 27 | LOGGER.warning(f"Invalid message {mvalue}") 28 | except Exception: 29 | LOGGER.exception("Exception in IPFS pubsub, reconnecting in 100 ms...") 30 | await asyncio.sleep(0.1) 31 | -------------------------------------------------------------------------------- /src/aleph/services/keys.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | from aleph.toolkit.libp2p_stubs.crypto.keys import KeyPair 4 | from aleph.toolkit.libp2p_stubs.crypto.rsa import create_new_key_pair 5 | 6 | 7 | def generate_keypair(print_key: bool) -> KeyPair: 8 | """ 9 | Generates a new key pair for the node. 10 | """ 11 | key_pair = create_new_key_pair() 12 | if print_key: 13 | # Print the armored key pair for archiving 14 | print(key_pair.private_key.impl.export_key().decode("utf-8")) # type: ignore[attr-defined] 15 | 16 | return key_pair 17 | 18 | 19 | def save_keys(key_pair: KeyPair, key_dir: str) -> None: 20 | """ 21 | Saves the private and public keys to the specified directory. The keys are stored in 2 formats: 22 | - The private key is stored in PKCS8 DER (binary) format for compatibility with the Aleph.im P2P service. 23 | - The public key is stored in PEM format. 24 | 25 | TODO review: do we really need to store the public key? If so, in which format, PEM or DER? 26 | """ 27 | # Create the key directory if it does not exist 28 | if os.path.exists(key_dir): 29 | if not os.path.isdir(key_dir): 30 | raise NotADirectoryError(f"Key directory ({key_dir}) is not a directory") 31 | else: 32 | os.makedirs(key_dir) 33 | 34 | # Save the private and public keys in the key directory, as well as the serialized private key for p2pd. 35 | private_key_path = os.path.join(key_dir, "node-secret.pkcs8.der") 36 | public_key_path = os.path.join(key_dir, "node-pub.key") 37 | 38 | with open(private_key_path, "wb") as key_file: 39 | key_file.write(key_pair.private_key.impl.export_key(format="DER", pkcs=8)) # type: ignore[attr-defined] 40 | 41 | with open(public_key_path, "wb") as key_file: 42 | key_file.write(key_pair.public_key.impl.export_key()) # type: ignore[attr-defined] 43 | -------------------------------------------------------------------------------- /src/aleph/services/p2p/__init__.py: -------------------------------------------------------------------------------- 1 | from typing import Coroutine, List, Tuple 2 | 3 | from aleph_p2p_client import AlephP2PServiceClient, make_p2p_service_client 4 | from configmanager import Config 5 | 6 | from aleph.services.ipfs import IpfsService 7 | from aleph.types.db_session import DbSessionFactory 8 | 9 | from ..cache.node_cache import NodeCache 10 | from .manager import initialize_host 11 | 12 | 13 | async def init_p2p_client(config: Config, service_name: str) -> AlephP2PServiceClient: 14 | p2p_client = await make_p2p_service_client( 15 | service_name=service_name, 16 | mq_host=config.p2p.mq_host.value, 17 | mq_port=config.rabbitmq.port.value, 18 | mq_username=config.rabbitmq.username.value, 19 | mq_password=config.rabbitmq.password.value, 20 | mq_pub_exchange_name=config.rabbitmq.pub_exchange.value, 21 | mq_sub_exchange_name=config.rabbitmq.sub_exchange.value, 22 | http_host=config.p2p.daemon_host.value, 23 | http_port=config.p2p.control_port.value, 24 | ) 25 | 26 | return p2p_client 27 | 28 | 29 | async def init_p2p( 30 | config: Config, 31 | session_factory: DbSessionFactory, 32 | service_name: str, 33 | ipfs_service: IpfsService, 34 | node_cache: NodeCache, 35 | listen: bool = True, 36 | ) -> Tuple[AlephP2PServiceClient, List[Coroutine]]: 37 | 38 | p2p_client = await init_p2p_client(config, service_name) 39 | 40 | port = config.p2p.port.value 41 | tasks = await initialize_host( 42 | config=config, 43 | session_factory=session_factory, 44 | p2p_client=p2p_client, 45 | ipfs_service=ipfs_service, 46 | node_cache=node_cache, 47 | host=config.p2p.daemon_host.value, 48 | port=port, 49 | listen=listen, 50 | ) 51 | 52 | return p2p_client, tasks 53 | -------------------------------------------------------------------------------- /src/aleph/services/p2p/http.py: -------------------------------------------------------------------------------- 1 | """ While our own streamer libp2p protocol is still unstable, use direct 2 | HTTP connection to standard rest API. 3 | """ 4 | 5 | import asyncio 6 | import base64 7 | import logging 8 | from random import sample 9 | from typing import List, Optional, Sequence 10 | 11 | import aiohttp 12 | 13 | LOGGER = logging.getLogger("P2P.HTTP") 14 | 15 | SESSIONS = dict() 16 | 17 | 18 | async def api_get_request(base_uri, method, timeout=1): 19 | if timeout not in SESSIONS: 20 | connector = aiohttp.TCPConnector(limit_per_host=5) 21 | SESSIONS[timeout] = aiohttp.ClientSession( 22 | read_timeout=timeout, connector=connector 23 | ) 24 | 25 | uri = f"{base_uri}/api/v0/{method}" 26 | try: 27 | async with SESSIONS[timeout].get(uri) as resp: 28 | if resp.status != 200: 29 | result = None 30 | else: 31 | result = await resp.json() 32 | except ( 33 | TimeoutError, 34 | asyncio.TimeoutError, 35 | ConnectionRefusedError, 36 | aiohttp.ClientError, 37 | OSError, 38 | ): 39 | result = None 40 | except Exception: 41 | LOGGER.exception("Error in retrieval") 42 | result = None 43 | return result 44 | 45 | 46 | async def get_peer_hash_content( 47 | base_uri: str, item_hash: str, timeout: int = 1 48 | ) -> Optional[bytes]: 49 | result = None 50 | item = await api_get_request(base_uri, f"storage/{item_hash}", timeout=timeout) 51 | if item is not None and item["status"] == "success" and item["content"] is not None: 52 | # TODO: IMPORTANT /!\ verify the hash of received data! 53 | return base64.decodebytes(item["content"].encode("utf-8")) 54 | else: 55 | LOGGER.debug(f"can't get hash {item_hash}") 56 | 57 | return result 58 | 59 | 60 | async def request_hash( 61 | api_servers: Sequence[str], item_hash: str, timeout: int = 1 62 | ) -> Optional[bytes]: 63 | uris: List[str] = sample(api_servers, k=len(api_servers)) 64 | 65 | for uri in uris: 66 | content = await get_peer_hash_content(uri, item_hash, timeout=timeout) 67 | if content is not None: 68 | return content 69 | 70 | return None # Nothing found... 71 | -------------------------------------------------------------------------------- /src/aleph/services/p2p/peers.py: -------------------------------------------------------------------------------- 1 | from aleph_p2p_client import AlephP2PServiceClient 2 | from multiaddr import Multiaddr 3 | 4 | from aleph.toolkit.libp2p_stubs.peer.peerinfo import info_from_p2p_addr 5 | 6 | 7 | async def connect_peer(p2p_client: AlephP2PServiceClient, peer_maddr: str) -> None: 8 | """ 9 | Connects to the specified peer. 10 | 11 | :param p2p_client: P2P daemon client. 12 | :param peer_maddr: Fully qualified multi-address of the peer to connect to: 13 | /ip4//tcp//p2p/ 14 | """ 15 | peer_info = info_from_p2p_addr(Multiaddr(peer_maddr)) 16 | peer_id = (await p2p_client.identify()).peer_id 17 | 18 | # Discard attempts to connect to self. 19 | if str(peer_info.peer_id) == str(peer_id): 20 | return 21 | 22 | for multiaddr in peer_info.addrs: 23 | await p2p_client.dial(peer_id=str(peer_info.peer_id), multiaddr=str(multiaddr)) 24 | -------------------------------------------------------------------------------- /src/aleph/services/p2p/pubsub.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Union 3 | 4 | from aleph_p2p_client import AlephP2PServiceClient 5 | 6 | LOGGER = logging.getLogger("P2P.pubsub") 7 | 8 | 9 | async def publish( 10 | p2p_client: AlephP2PServiceClient, 11 | topic: str, 12 | message: Union[bytes, str], 13 | loopback: bool = False, 14 | ) -> None: 15 | """ 16 | Publishes a message on the specified topic. 17 | :param p2p_client: P2P daemon client. 18 | :param topic: Topic on which to send the message. 19 | :param message: The message itself. Can be provided as bytes or as a string. 20 | :param loopback: Whether the message should also be forwarded/processed on this node. 21 | """ 22 | 23 | data = message if isinstance(message, bytes) else message.encode("UTF-8") 24 | await p2p_client.publish(data=data, topic=topic, loopback=loopback) 25 | -------------------------------------------------------------------------------- /src/aleph/services/peers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/services/peers/__init__.py -------------------------------------------------------------------------------- /src/aleph/services/peers/publish.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | import logging 4 | from typing import List, Optional 5 | 6 | from aleph_p2p_client import AlephP2PServiceClient 7 | 8 | from aleph.services.ipfs import IpfsService 9 | 10 | LOGGER = logging.getLogger("peers.publish") 11 | 12 | 13 | async def publish_host( 14 | address: str, 15 | p2p_client: AlephP2PServiceClient, 16 | ipfs_service: IpfsService, 17 | p2p_alive_topic: str, 18 | ipfs_alive_topic: str, 19 | interests: Optional[List[str]] = None, 20 | delay: int = 120, 21 | peer_type: str = "P2P", 22 | use_ipfs: bool = True, 23 | ): 24 | """Publish our multiaddress regularly, saying we are alive.""" 25 | await asyncio.sleep(2) 26 | from aleph import __version__ 27 | 28 | msg = { 29 | "address": address, 30 | "interests": interests, 31 | "peer_type": peer_type, 32 | "version": __version__, 33 | } 34 | msg = json.dumps(msg).encode("utf-8") 35 | while True: 36 | try: 37 | if use_ipfs: 38 | LOGGER.debug("Publishing alive message on ipfs pubsub") 39 | await asyncio.wait_for( 40 | ipfs_service.pub(ipfs_alive_topic, msg.decode("utf-8")), 1 41 | ) 42 | except Exception: 43 | LOGGER.warning("Can't publish alive message on ipfs") 44 | 45 | try: 46 | LOGGER.debug("Publishing alive message on p2p pubsub") 47 | await asyncio.wait_for( 48 | p2p_client.publish(data=msg, topic=p2p_alive_topic), 10 49 | ) 50 | except Exception: 51 | LOGGER.warning("Can't publish alive message on p2p") 52 | 53 | await asyncio.sleep(delay) 54 | -------------------------------------------------------------------------------- /src/aleph/services/storage/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/services/storage/__init__.py -------------------------------------------------------------------------------- /src/aleph/services/storage/engine.py: -------------------------------------------------------------------------------- 1 | import abc 2 | from typing import Optional 3 | 4 | 5 | class StorageEngine(abc.ABC): 6 | @abc.abstractmethod 7 | async def read(self, filename: str) -> Optional[bytes]: ... 8 | 9 | @abc.abstractmethod 10 | async def write(self, filename: str, content: bytes): ... 11 | 12 | @abc.abstractmethod 13 | async def delete(self, filename: str): ... 14 | 15 | @abc.abstractmethod 16 | async def exists(self, filename: str) -> bool: ... 17 | -------------------------------------------------------------------------------- /src/aleph/services/storage/fileystem_engine.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from typing import Optional, Union 3 | 4 | from .engine import StorageEngine 5 | 6 | 7 | class FileSystemStorageEngine(StorageEngine): 8 | def __init__(self, folder: Union[Path, str]): 9 | self.folder = folder if isinstance(folder, Path) else Path(folder) 10 | 11 | if self.folder.exists() and not self.folder.is_dir(): 12 | raise ValueError(f"'{self.folder}' exists and is not a directory.") 13 | 14 | self.folder.mkdir(parents=True, exist_ok=True) 15 | 16 | async def read(self, filename: str) -> Optional[bytes]: 17 | file_path = self.folder / filename 18 | 19 | if not file_path.is_file(): 20 | return None 21 | 22 | return file_path.read_bytes() 23 | 24 | async def write(self, filename: str, content: bytes): 25 | file_path = self.folder / filename 26 | file_path.write_bytes(content) 27 | 28 | async def delete(self, filename: str): 29 | file_path = self.folder / filename 30 | file_path.unlink(missing_ok=True) 31 | 32 | async def exists(self, filename: str) -> bool: 33 | file_path = self.folder / filename 34 | return file_path.exists() 35 | -------------------------------------------------------------------------------- /src/aleph/services/utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re 3 | import socket 4 | 5 | import aiohttp 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | IP4_SERVICE_URL = "https://v4.ident.me/" 10 | IP4_SOCKET_ENDPOINT = "8.8.8.8" 11 | 12 | 13 | def is_valid_ip4(ip: str) -> bool: 14 | return bool(re.match(r"\d+\.\d+\.\d+\.\d+", ip)) 15 | 16 | 17 | async def get_ip4_from_service() -> str: 18 | """Get the public IPv4 of this system by calling a third-party service""" 19 | async with aiohttp.ClientSession() as session: 20 | async with session.get(IP4_SERVICE_URL) as resp: 21 | resp.raise_for_status() 22 | ip = await resp.text(encoding="utf-8") 23 | 24 | if is_valid_ip4(ip): 25 | return ip 26 | else: 27 | raise ValueError(f"Response does not match IPv4 format: {ip}") 28 | 29 | 30 | def get_ip4_from_socket() -> str: 31 | """Get the public IPv4 of this system by inspecting a socket connection. 32 | Warning: This returns a local IP address when running behind a NAT, e.g. on Docker. 33 | """ 34 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 35 | try: 36 | s.connect((IP4_SOCKET_ENDPOINT, 80)) 37 | return s.getsockname()[0] 38 | finally: 39 | s.close() 40 | 41 | 42 | async def get_IP() -> str: 43 | """Get the public IPv4 of this system.""" 44 | try: 45 | return await get_ip4_from_service() 46 | except Exception: 47 | logging.exception("Error when fetching IPv4 from service") 48 | return get_ip4_from_socket() 49 | -------------------------------------------------------------------------------- /src/aleph/settings.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | 4 | @dataclass(frozen=True, eq=True) 5 | class Settings: 6 | use_executors: bool = True 7 | 8 | 9 | # Singleton 10 | settings = Settings() 11 | -------------------------------------------------------------------------------- /src/aleph/toolkit/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/toolkit/__init__.py -------------------------------------------------------------------------------- /src/aleph/toolkit/aggregates.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/toolkit/aggregates.py -------------------------------------------------------------------------------- /src/aleph/toolkit/batch.py: -------------------------------------------------------------------------------- 1 | from typing import AsyncIterator, List, TypeVar 2 | 3 | T = TypeVar("T") 4 | 5 | 6 | async def async_batch( 7 | async_iterable: AsyncIterator[T], n: int 8 | ) -> AsyncIterator[List[T]]: 9 | batch = [] 10 | async for item in async_iterable: 11 | batch.append(item) 12 | if len(batch) == n: 13 | yield batch 14 | batch = [] 15 | 16 | # Yield the last batch 17 | if batch: 18 | yield batch 19 | -------------------------------------------------------------------------------- /src/aleph/toolkit/costs.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | from decimal import ROUND_FLOOR, Decimal 3 | from typing import Optional 4 | 5 | from aleph.db.models.messages import MessageDb 6 | from aleph.toolkit.constants import ( 7 | PRICE_PRECISION, 8 | STORE_AND_PROGRAM_COST_CUTOFF_HEIGHT, 9 | STORE_AND_PROGRAM_COST_CUTOFF_TIMESTAMP, 10 | ) 11 | from aleph.toolkit.timestamp import timestamp_to_datetime 12 | 13 | 14 | def format_cost(v: Decimal | str, p: int = PRICE_PRECISION) -> Decimal: 15 | return Decimal(v).quantize(Decimal(1) / Decimal(10**p), ROUND_FLOOR) 16 | 17 | 18 | def format_cost_str(v: Decimal | str, p: int = PRICE_PRECISION) -> str: 19 | n = format_cost(v, p) 20 | return "{:.{p}f}".format(n, p=p) 21 | 22 | 23 | def are_store_and_program_free(message: MessageDb) -> bool: 24 | height: Optional[int] = ( 25 | message.confirmations[0].height if len(message.confirmations) > 0 else None 26 | ) 27 | date: dt.datetime = message.time 28 | 29 | if height is not None: 30 | return height < STORE_AND_PROGRAM_COST_CUTOFF_HEIGHT 31 | else: 32 | return date < timestamp_to_datetime(STORE_AND_PROGRAM_COST_CUTOFF_TIMESTAMP) 33 | -------------------------------------------------------------------------------- /src/aleph/toolkit/exceptions.py: -------------------------------------------------------------------------------- 1 | from contextlib import contextmanager 2 | from typing import Callable, Optional, Type 3 | 4 | 5 | @contextmanager 6 | def ignore_exceptions( 7 | *exceptions: Type[BaseException], 8 | on_error: Optional[Callable[[BaseException], None]] = None, 9 | ): 10 | try: 11 | yield 12 | except exceptions as e: 13 | if on_error: 14 | on_error(e) 15 | pass 16 | -------------------------------------------------------------------------------- /src/aleph/toolkit/json.py: -------------------------------------------------------------------------------- 1 | """ 2 | An abstraction layer for JSON serialization/deserialization. 3 | Makes swapping between JSON implementations easier. 4 | """ 5 | 6 | import json 7 | from datetime import date, datetime, time 8 | from typing import IO, Any, Union 9 | 10 | import orjson 11 | import pydantic 12 | 13 | # The actual type of serialized JSON as returned by the JSON serializer. 14 | SerializedJson = bytes 15 | 16 | # All the possible types for serialized JSON. This type is useful to force functions 17 | # to handle all possible cases when using serialized JSON as input in order to make 18 | # serializer changes easier. 19 | SerializedJsonInput = Union[bytes, str] 20 | 21 | 22 | # Note: JSONDecodeError is a subclass of ValueError, but the JSON module sometimes throws 23 | # raw value errors, including on NaN because of our custom parse_constant. 24 | DecodeError = orjson.JSONDecodeError 25 | 26 | 27 | def load(fp: IO) -> Any: 28 | raise NotImplementedError("orjson does not provide load") 29 | 30 | 31 | def loads(s: Union[bytes, str]) -> Any: 32 | try: 33 | return orjson.loads(s) 34 | except TypeError: 35 | return json.loads(s) 36 | 37 | 38 | def dump(fp: IO, obj: Any) -> None: 39 | raise NotImplementedError("orjson does not provide dump") 40 | 41 | 42 | def extended_json_encoder(obj: Any) -> Any: 43 | """ 44 | Extended JSON encoder for dumping objects that contain pydantic models and datetime objects. 45 | """ 46 | if isinstance(obj, datetime): 47 | return obj.timestamp() 48 | elif isinstance(obj, date): 49 | return obj.toordinal() 50 | elif isinstance(obj, time): 51 | return obj.hour * 3600 + obj.minute * 60 + obj.second + obj.microsecond / 1e6 52 | elif isinstance(obj, pydantic.BaseModel): 53 | return obj.model_dump() 54 | else: 55 | raise TypeError(f"Object of type {type(obj)} is not JSON serializable") 56 | 57 | 58 | def dumps(obj: Any) -> bytes: 59 | try: 60 | return orjson.dumps(obj) 61 | except TypeError: 62 | return json.dumps(obj, default=extended_json_encoder).encode() 63 | -------------------------------------------------------------------------------- /src/aleph/toolkit/libp2p_stubs/README.md: -------------------------------------------------------------------------------- 1 | # Copies of files from libp2p 2 | 3 | The files under this directory/module are all copied directly from [py-libp2p](https://github.com/libp2p/py-libp2p). 4 | py-libp2p is unmaintained at the moment, and its dependency tree makes it (nearly) uninstallable. 5 | As we only require a small set of files from libp2p (basically, the peer ID classes and the public/private key classes), 6 | we are working around this issue by integrating the code we need. 7 | 8 | The libp2p license (dual license Apache + MIT) applies for these files: 9 | * libp2p Apache: https://github.com/libp2p/py-libp2p/blob/master/LICENSE-APACHE 10 | * libp2p MIT: https://github.com/libp2p/py-libp2p/blob/master/LICENSE-MIT 11 | 12 | Do not modify these files manually. -------------------------------------------------------------------------------- /src/aleph/toolkit/libp2p_stubs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/toolkit/libp2p_stubs/__init__.py -------------------------------------------------------------------------------- /src/aleph/toolkit/libp2p_stubs/crypto/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/toolkit/libp2p_stubs/crypto/__init__.py -------------------------------------------------------------------------------- /src/aleph/toolkit/libp2p_stubs/crypto/pb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/toolkit/libp2p_stubs/crypto/pb/__init__.py -------------------------------------------------------------------------------- /src/aleph/toolkit/libp2p_stubs/crypto/pb/crypto.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto2"; 2 | 3 | package crypto.pb; 4 | 5 | enum KeyType { 6 | RSA = 0; 7 | Ed25519 = 1; 8 | Secp256k1 = 2; 9 | ECDSA = 3; 10 | } 11 | 12 | message PublicKey { 13 | required KeyType key_type = 1; 14 | required bytes data = 2; 15 | } 16 | 17 | message PrivateKey { 18 | required KeyType key_type = 1; 19 | required bytes data = 2; 20 | } -------------------------------------------------------------------------------- /src/aleph/toolkit/libp2p_stubs/crypto/pb/crypto_pb2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by the protocol buffer compiler. DO NOT EDIT! 3 | # source: crypto.proto 4 | """Generated protocol buffer code.""" 5 | from google.protobuf import descriptor as _descriptor 6 | from google.protobuf import descriptor_pool as _descriptor_pool 7 | from google.protobuf import message as _message 8 | from google.protobuf import reflection as _reflection 9 | from google.protobuf import symbol_database as _symbol_database 10 | from google.protobuf.internal import enum_type_wrapper 11 | 12 | # @@protoc_insertion_point(imports) 13 | 14 | _sym_db = _symbol_database.Default() 15 | 16 | 17 | DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( 18 | b'\n\x0c\x63rypto.proto\x12\tcrypto.pb"?\n\tPublicKey\x12$\n\x08key_type\x18\x01 \x02(\x0e\x32\x12.crypto.pb.KeyType\x12\x0c\n\x04\x64\x61ta\x18\x02 \x02(\x0c"@\n\nPrivateKey\x12$\n\x08key_type\x18\x01 \x02(\x0e\x32\x12.crypto.pb.KeyType\x12\x0c\n\x04\x64\x61ta\x18\x02 \x02(\x0c*9\n\x07KeyType\x12\x07\n\x03RSA\x10\x00\x12\x0b\n\x07\x45\x64\x32\x35\x35\x31\x39\x10\x01\x12\r\n\tSecp256k1\x10\x02\x12\t\n\x05\x45\x43\x44SA\x10\x03' 19 | ) 20 | 21 | _KEYTYPE = DESCRIPTOR.enum_types_by_name["KeyType"] 22 | KeyType = enum_type_wrapper.EnumTypeWrapper(_KEYTYPE) 23 | RSA = 0 24 | Ed25519 = 1 25 | Secp256k1 = 2 26 | ECDSA = 3 27 | 28 | 29 | _PUBLICKEY = DESCRIPTOR.message_types_by_name["PublicKey"] 30 | _PRIVATEKEY = DESCRIPTOR.message_types_by_name["PrivateKey"] 31 | PublicKey = _reflection.GeneratedProtocolMessageType( 32 | "PublicKey", 33 | (_message.Message,), 34 | { 35 | "DESCRIPTOR": _PUBLICKEY, 36 | "__module__": "crypto_pb2", 37 | # @@protoc_insertion_point(class_scope:crypto.pb.PublicKey) 38 | }, 39 | ) 40 | _sym_db.RegisterMessage(PublicKey) 41 | 42 | PrivateKey = _reflection.GeneratedProtocolMessageType( 43 | "PrivateKey", 44 | (_message.Message,), 45 | { 46 | "DESCRIPTOR": _PRIVATEKEY, 47 | "__module__": "crypto_pb2", 48 | # @@protoc_insertion_point(class_scope:crypto.pb.PrivateKey) 49 | }, 50 | ) 51 | _sym_db.RegisterMessage(PrivateKey) 52 | 53 | if not _descriptor._USE_C_DESCRIPTORS: 54 | 55 | DESCRIPTOR._options = None 56 | _KEYTYPE._serialized_start = 158 57 | _KEYTYPE._serialized_end = 215 58 | _PUBLICKEY._serialized_start = 27 59 | _PUBLICKEY._serialized_end = 90 60 | _PRIVATEKEY._serialized_start = 92 61 | _PRIVATEKEY._serialized_end = 156 62 | # @@protoc_insertion_point(module_scope) 63 | -------------------------------------------------------------------------------- /src/aleph/toolkit/libp2p_stubs/crypto/rsa.py: -------------------------------------------------------------------------------- 1 | import Crypto.PublicKey.RSA as RSA 2 | from Crypto.Hash import SHA256 3 | from Crypto.PublicKey.RSA import RsaKey 4 | from Crypto.Signature import pkcs1_15 5 | 6 | from .keys import KeyPair, KeyType, PrivateKey, PublicKey 7 | 8 | 9 | class RSAPublicKey(PublicKey): 10 | def __init__(self, impl: RsaKey) -> None: 11 | self.impl = impl 12 | 13 | def to_bytes(self) -> bytes: 14 | return self.impl.export_key("DER") 15 | 16 | @classmethod 17 | def from_bytes(cls, key_bytes: bytes) -> "RSAPublicKey": 18 | rsakey = RSA.import_key(key_bytes) 19 | return cls(rsakey) 20 | 21 | def get_type(self) -> KeyType: 22 | return KeyType.RSA 23 | 24 | def verify(self, data: bytes, signature: bytes) -> bool: 25 | h = SHA256.new(data) 26 | try: 27 | pkcs1_15.new(self.impl).verify(h, signature) 28 | except (ValueError, TypeError): 29 | return False 30 | return True 31 | 32 | 33 | class RSAPrivateKey(PrivateKey): 34 | def __init__(self, impl: RsaKey) -> None: 35 | self.impl = impl 36 | 37 | @classmethod 38 | def new(cls, bits: int = 2048, e: int = 65537) -> "RSAPrivateKey": 39 | private_key_impl = RSA.generate(bits, e=e) 40 | return cls(private_key_impl) 41 | 42 | def to_bytes(self) -> bytes: 43 | return self.impl.export_key("DER") 44 | 45 | def get_type(self) -> KeyType: 46 | return KeyType.RSA 47 | 48 | def sign(self, data: bytes) -> bytes: 49 | h = SHA256.new(data) 50 | return pkcs1_15.new(self.impl).sign(h) 51 | 52 | def get_public_key(self) -> PublicKey: 53 | return RSAPublicKey(self.impl.publickey()) 54 | 55 | 56 | def create_new_key_pair(bits: int = 2048, e: int = 65537) -> KeyPair: 57 | """ 58 | Returns a new RSA keypair with the requested key size (``bits``) and the 59 | given public exponent ``e``. 60 | 61 | Sane defaults are provided for both values. 62 | """ 63 | private_key = RSAPrivateKey.new(bits, e) 64 | public_key = private_key.get_public_key() 65 | return KeyPair(private_key, public_key) 66 | -------------------------------------------------------------------------------- /src/aleph/toolkit/libp2p_stubs/peer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/toolkit/libp2p_stubs/peer/__init__.py -------------------------------------------------------------------------------- /src/aleph/toolkit/libp2p_stubs/peer/peerinfo.py: -------------------------------------------------------------------------------- 1 | from typing import Any, List, Sequence 2 | 3 | import multiaddr 4 | 5 | from .id import ID 6 | 7 | 8 | class PeerInfo: 9 | peer_id: ID 10 | addrs: List[multiaddr.Multiaddr] 11 | 12 | def __init__(self, peer_id: ID, addrs: Sequence[multiaddr.Multiaddr]) -> None: 13 | self.peer_id = peer_id 14 | self.addrs = list(addrs) 15 | 16 | def __eq__(self, other: Any) -> bool: 17 | return ( 18 | isinstance(other, PeerInfo) 19 | and self.peer_id == other.peer_id 20 | and self.addrs == other.addrs 21 | ) 22 | 23 | 24 | def info_from_p2p_addr(addr: multiaddr.Multiaddr) -> PeerInfo: 25 | if not addr: 26 | raise InvalidAddrError("`addr` should not be `None`") 27 | 28 | parts = addr.split() 29 | if not parts: 30 | raise InvalidAddrError( 31 | f"`parts`={parts} should at least have a protocol `P_P2P`" 32 | ) 33 | 34 | p2p_part = parts[-1] 35 | last_protocol_code = p2p_part.protocols()[0].code 36 | if last_protocol_code != multiaddr.protocols.P_P2P: 37 | raise InvalidAddrError( 38 | f"The last protocol should be `P_P2P` instead of `{last_protocol_code}`" 39 | ) 40 | 41 | # make sure the /p2p value parses as a peer.ID 42 | peer_id_str: str = p2p_part.value_for_protocol(multiaddr.protocols.P_P2P) 43 | peer_id: ID = ID.from_base58(peer_id_str) 44 | 45 | # we might have received just an / p2p part, which means there's no addr. 46 | if len(parts) > 1: 47 | addr = multiaddr.Multiaddr.join(*parts[:-1]) 48 | 49 | return PeerInfo(peer_id, [addr]) 50 | 51 | 52 | class InvalidAddrError(ValueError): 53 | pass 54 | -------------------------------------------------------------------------------- /src/aleph/toolkit/logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | from logging.handlers import RotatingFileHandler 4 | from typing import Any, Dict, Optional 5 | 6 | 7 | def setup_logging( 8 | loglevel: int, 9 | filename: Optional[str] = None, 10 | max_log_file_size: Optional[int] = None, 11 | ) -> None: 12 | """ 13 | Generic logging setup to be used by all processes. 14 | 15 | :param loglevel: Minimum loglevel for emitting messages. 16 | :param filename: Destination file for the logs, if specified. Defaults to stdout. 17 | :param max_log_file_size: Maximum size of the log file. Only applies if filename is specified. 18 | """ 19 | 20 | # Some kwargs fiddling is required because basicConfig does not like it when stream 21 | # and handlers are specified at the same time. 22 | kwargs: Dict[str, Any] 23 | 24 | if filename: 25 | if not max_log_file_size: 26 | raise ValueError( 27 | "When logging to a log file, a max log file must be specified." 28 | ) 29 | 30 | handler = RotatingFileHandler( 31 | filename, maxBytes=max_log_file_size, backupCount=4 32 | ) 33 | kwargs = {"handlers": [handler]} 34 | else: 35 | kwargs = {"stream": sys.stdout} 36 | 37 | logformat = "%(asctime)s [%(levelname)s] %(name)s: %(message)s" 38 | logging.basicConfig( 39 | **kwargs, level=loglevel, format=logformat, datefmt="%Y-%m-%d %H:%M:%S" 40 | ) 41 | -------------------------------------------------------------------------------- /src/aleph/toolkit/monitoring.py: -------------------------------------------------------------------------------- 1 | import sentry_sdk 2 | from configmanager import Config 3 | from sentry_sdk.integrations.aiohttp import AioHttpIntegration 4 | from sentry_sdk.integrations.asyncio import AsyncioIntegration 5 | 6 | 7 | def setup_sentry(config: Config, traces_sample_rate=None): 8 | if dsn := config.sentry.dsn.value: 9 | if traces_sample_rate: 10 | if ( 11 | config_sample_rate := config.sentry.traces_sample_rate.value 12 | ) is not None: 13 | traces_sample_rate = float(config_sample_rate) 14 | else: 15 | traces_sample_rate = None 16 | 17 | sentry_sdk.init( 18 | dsn=dsn, 19 | traces_sample_rate=traces_sample_rate, 20 | ignore_errors=[KeyboardInterrupt], 21 | integrations=[ 22 | AioHttpIntegration(), 23 | AsyncioIntegration(), 24 | ], 25 | ) 26 | -------------------------------------------------------------------------------- /src/aleph/toolkit/rabbitmq.py: -------------------------------------------------------------------------------- 1 | import aio_pika 2 | 3 | 4 | async def make_mq_conn(config) -> aio_pika.abc.AbstractConnection: 5 | mq_conn = await aio_pika.connect_robust( 6 | host=config.p2p.mq_host.value, 7 | port=config.rabbitmq.port.value, 8 | login=config.rabbitmq.username.value, 9 | password=config.rabbitmq.password.value, 10 | ) 11 | return mq_conn 12 | -------------------------------------------------------------------------------- /src/aleph/toolkit/shield.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from functools import wraps 3 | 4 | 5 | def shielded(func): 6 | """ 7 | Protects a coroutine from cancellation. 8 | """ 9 | 10 | @wraps(func) 11 | async def wrapped(*args, **kwargs): 12 | return await asyncio.shield(func(*args, **kwargs)) 13 | 14 | return wrapped 15 | -------------------------------------------------------------------------------- /src/aleph/toolkit/split.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, Iterable, List, Tuple, TypeVar 2 | 3 | T = TypeVar("T") 4 | 5 | 6 | def split_iterable( 7 | iterable: Iterable[T], cond: Callable[[T], bool] 8 | ) -> Tuple[List[T], List[T]]: 9 | matches = [] 10 | others = [] 11 | 12 | for x in iterable: 13 | if cond(x): 14 | matches.append(x) 15 | else: 16 | others.append(x) 17 | 18 | return matches, others 19 | -------------------------------------------------------------------------------- /src/aleph/toolkit/timer.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | 4 | class Timer: 5 | """ 6 | A context manager to measure the time of any operation. 7 | 8 | Usage: 9 | >>> with Timer() as timer: 10 | >>> do_something() 11 | >>> print(f"Did something in {timer.elapsed()} seconds.") 12 | """ 13 | 14 | def __enter__(self): 15 | self.start_time = time.time() 16 | return self 17 | 18 | def __exit__(self, exc_type, exc_val, exc_tb): 19 | self.end_time = time.time() 20 | 21 | def elapsed(self) -> float: 22 | return self.end_time - self.start_time 23 | -------------------------------------------------------------------------------- /src/aleph/toolkit/timestamp.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | from typing import Optional, Union 3 | 4 | import pytz 5 | 6 | 7 | def timestamp_to_datetime(timestamp: float) -> dt.datetime: 8 | """ 9 | Utility function that transforms a UNIX timestamp into a UTC-localized datetime 10 | object. 11 | """ 12 | 13 | return pytz.utc.localize(dt.datetime.utcfromtimestamp(timestamp)) 14 | 15 | 16 | def coerce_to_datetime( 17 | datetime_or_timestamp: Optional[Union[float, dt.datetime]] 18 | ) -> Optional[dt.datetime]: 19 | # None for datetimes or 0 for timestamps results in returning None 20 | if datetime_or_timestamp is None or not datetime_or_timestamp: 21 | return None 22 | 23 | if isinstance(datetime_or_timestamp, dt.datetime): 24 | return datetime_or_timestamp 25 | 26 | return timestamp_to_datetime(datetime_or_timestamp) 27 | 28 | 29 | def utc_now() -> dt.datetime: 30 | """ 31 | Returns the current time as a UTC-localized datetime object. 32 | This differs from datetime.utcnow() because `utcnow()` is not localized. 33 | """ 34 | return pytz.utc.localize(dt.datetime.utcnow()) 35 | -------------------------------------------------------------------------------- /src/aleph/types/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/types/__init__.py -------------------------------------------------------------------------------- /src/aleph/types/chain_sync.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class ChainSyncProtocol(str, Enum): 5 | # Message sync tx where the messages are in the tx data 6 | ON_CHAIN_SYNC = "aleph" 7 | # Message sync tx where the messages to fetch are in an IPFS hash 8 | OFF_CHAIN_SYNC = "aleph-offchain" 9 | # Messages sent by a smart contract 10 | SMART_CONTRACT = "smart-contract" 11 | 12 | 13 | class ChainEventType(str, Enum): 14 | # Messages sent on-chain using the Aleph smart contract. 15 | MESSAGE = "message" 16 | # Synchronisation messages sent by a CCN to the Aleph smart contract. 17 | SYNC = "sync" 18 | -------------------------------------------------------------------------------- /src/aleph/types/channel.py: -------------------------------------------------------------------------------- 1 | from typing import NewType 2 | 3 | Channel = NewType("Channel", str) 4 | -------------------------------------------------------------------------------- /src/aleph/types/db_session.py: -------------------------------------------------------------------------------- 1 | from typing import AsyncContextManager, Callable, ContextManager 2 | 3 | from sqlalchemy.ext.asyncio import AsyncSession 4 | from sqlalchemy.orm import Session 5 | from typing_extensions import TypeAlias 6 | 7 | DbSession: TypeAlias = Session 8 | DbSessionFactory = Callable[[], ContextManager[DbSession]] 9 | 10 | AsyncDbSession: TypeAlias = AsyncSession 11 | AsyncDbSessionFactory = Callable[[], AsyncContextManager[AsyncDbSession]] 12 | -------------------------------------------------------------------------------- /src/aleph/types/files.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import NewType 3 | 4 | FileTag = NewType("FileTag", str) 5 | 6 | 7 | class FileType(str, Enum): 8 | FILE = "file" 9 | DIRECTORY = "dir" 10 | -------------------------------------------------------------------------------- /src/aleph/types/protocol.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from enum import Enum 4 | 5 | 6 | class Protocol(str, Enum): 7 | """P2P Protocol""" 8 | 9 | IPFS = "ipfs" 10 | P2P = "p2p" 11 | -------------------------------------------------------------------------------- /src/aleph/types/settings.py: -------------------------------------------------------------------------------- 1 | from typing import List, Union 2 | 3 | from pydantic import BaseModel 4 | 5 | from aleph.db.models import AggregateDb 6 | 7 | 8 | class CompatibleGPU(BaseModel): 9 | name: str 10 | model: str 11 | vendor: str 12 | device_id: str 13 | 14 | 15 | class Settings(BaseModel): 16 | compatible_gpus: List[CompatibleGPU] 17 | community_wallet_address: str 18 | community_wallet_timestamp: int 19 | 20 | @staticmethod 21 | def from_aggregate(aggregate: Union[AggregateDb, dict]): 22 | content = aggregate.content if isinstance(aggregate, AggregateDb) else aggregate 23 | 24 | community_wallet_address = content.get("community_wallet_address", "") 25 | community_wallet_timestamp = content.get("community_wallet_timestamp", 0) 26 | compatible_gpus = content.get("compatible_gpus", []) 27 | 28 | settings = Settings( 29 | community_wallet_address=community_wallet_address, 30 | community_wallet_timestamp=community_wallet_timestamp, 31 | compatible_gpus=compatible_gpus, 32 | ) 33 | 34 | return settings 35 | -------------------------------------------------------------------------------- /src/aleph/types/sort_order.py: -------------------------------------------------------------------------------- 1 | from enum import Enum, IntEnum 2 | 3 | 4 | class SortOrder(IntEnum): 5 | ASCENDING = 1 6 | DESCENDING = -1 7 | 8 | 9 | class SortBy(str, Enum): 10 | TIME = "time" 11 | TX_TIME = "tx-time" 12 | -------------------------------------------------------------------------------- /src/aleph/types/vms.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import NewType 3 | 4 | VmVersion = NewType("VmVersion", str) 5 | 6 | 7 | class VmType(str, Enum): 8 | INSTANCE = "instance" 9 | PROGRAM = "program" 10 | 11 | 12 | class CpuArchitecture(str, Enum): 13 | X86_64 = "x86_64" 14 | ARM64 = "arm64" 15 | -------------------------------------------------------------------------------- /src/aleph/web/__init__.py: -------------------------------------------------------------------------------- 1 | import pprint 2 | import time 3 | from datetime import date, datetime, timedelta 4 | 5 | import aiohttp_cors 6 | import aiohttp_jinja2 7 | import jinja2 8 | import pkg_resources 9 | from aiohttp import web 10 | 11 | from aleph.web.controllers.routes import register_routes 12 | 13 | 14 | def init_cors(app: web.Application): 15 | # Configure default CORS settings. 16 | cors = aiohttp_cors.setup( 17 | app, 18 | defaults={ 19 | "*": aiohttp_cors.ResourceOptions( 20 | allow_methods=["GET", "POST"], 21 | allow_credentials=True, 22 | expose_headers="*", 23 | allow_headers="*", 24 | ) 25 | }, 26 | ) 27 | 28 | # Configure CORS on all routes. 29 | for route in list(app.router.routes()): 30 | if "/socket.io/" not in repr(route.resource): 31 | cors.add(route) 32 | 33 | 34 | def create_aiohttp_app() -> web.Application: 35 | app = web.Application(client_max_size=1024**2 * 64) 36 | 37 | tpl_path = pkg_resources.resource_filename("aleph.web", "templates") 38 | jinja_loader = jinja2.ChoiceLoader( 39 | [ 40 | jinja2.FileSystemLoader(tpl_path), 41 | ] 42 | ) 43 | aiohttp_jinja2.setup(app, loader=jinja_loader) 44 | env = aiohttp_jinja2.get_env(app) 45 | env.globals.update( 46 | { 47 | "app": app, 48 | "date": date, 49 | "datetime": datetime, 50 | "time": time, 51 | "timedelta": timedelta, 52 | "int": int, 53 | "float": float, 54 | "len": len, 55 | "pprint": pprint, 56 | } 57 | ) 58 | 59 | register_routes(app) 60 | 61 | init_cors(app) 62 | 63 | return app 64 | -------------------------------------------------------------------------------- /src/aleph/web/controllers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/web/controllers/__init__.py -------------------------------------------------------------------------------- /src/aleph/web/controllers/channels.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from aiocache import SimpleMemoryCache, cached 4 | from aiohttp import web 5 | 6 | from aleph.db.accessors.messages import get_distinct_channels 7 | from aleph.types.channel import Channel 8 | from aleph.types.db_session import DbSession 9 | from aleph.web.controllers.app_state_getters import get_session_factory_from_request 10 | 11 | 12 | @cached(ttl=60 * 120, cache=SimpleMemoryCache, timeout=120) 13 | async def get_channels(session: DbSession) -> List[Channel]: 14 | channels = get_distinct_channels(session) 15 | return list(channels) 16 | 17 | 18 | async def used_channels(request: web.Request) -> web.Response: 19 | """All used channels list 20 | 21 | TODO: do we need pagination? 22 | """ 23 | 24 | session_factory = get_session_factory_from_request(request) 25 | 26 | with session_factory() as session: 27 | channels = await get_channels(session) 28 | 29 | response = web.json_response({"channels": channels}) 30 | response.enable_compression() 31 | return response 32 | -------------------------------------------------------------------------------- /src/aleph/web/controllers/info.py: -------------------------------------------------------------------------------- 1 | from aiohttp import web 2 | 3 | from aleph.web.controllers.app_state_getters import get_node_cache_from_request 4 | 5 | 6 | async def public_multiaddress(request): 7 | """Broadcast public node addresses 8 | 9 | According to multiaddr spec https://multiformats.io/multiaddr/ 10 | """ 11 | 12 | node_cache = get_node_cache_from_request(request) 13 | public_addresses = await node_cache.get_public_addresses() 14 | 15 | output = {"node_multi_addresses": public_addresses} 16 | return web.json_response(output) 17 | -------------------------------------------------------------------------------- /src/aleph/web/controllers/programs.py: -------------------------------------------------------------------------------- 1 | from aiohttp import web 2 | from pydantic import BaseModel, ConfigDict, ValidationError 3 | 4 | from aleph.db.accessors.messages import get_programs_triggered_by_messages 5 | from aleph.types.db_session import DbSessionFactory 6 | from aleph.types.sort_order import SortOrder 7 | 8 | 9 | class GetProgramQueryFields(BaseModel): 10 | sort_order: SortOrder = SortOrder.DESCENDING 11 | 12 | model_config = ConfigDict(extra="forbid") 13 | 14 | 15 | async def get_programs_on_message(request: web.Request) -> web.Response: 16 | try: 17 | query = GetProgramQueryFields(**request.query) 18 | except ValidationError as error: 19 | return web.json_response( 20 | data=error.json(), status=web.HTTPBadRequest.status_code 21 | ) 22 | 23 | session_factory: DbSessionFactory = request.app["session_factory"] 24 | 25 | with session_factory() as session: 26 | messages = [ 27 | { 28 | "item_hash": result.item_hash, 29 | "content": { 30 | "on": {"message": result.message_subscriptions}, 31 | }, 32 | } 33 | for result in get_programs_triggered_by_messages( 34 | session=session, sort_order=query.sort_order 35 | ) 36 | ] 37 | 38 | response = web.json_response(data=messages) 39 | response.enable_compression() 40 | return response 41 | -------------------------------------------------------------------------------- /src/aleph/web/controllers/version.py: -------------------------------------------------------------------------------- 1 | from aiohttp import web 2 | 3 | from aleph import __version__ 4 | 5 | 6 | async def version(request): 7 | """Version endpoint.""" 8 | 9 | response = web.json_response({"version": __version__}) 10 | return response 11 | -------------------------------------------------------------------------------- /src/aleph/web/static/IBM_Plex_Mono/IBMPlexMono-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/web/static/IBM_Plex_Mono/IBMPlexMono-Bold.ttf -------------------------------------------------------------------------------- /src/aleph/web/static/IBM_Plex_Mono/IBMPlexMono-Light.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/web/static/IBM_Plex_Mono/IBMPlexMono-Light.ttf -------------------------------------------------------------------------------- /src/aleph/web/static/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/web/static/__init__.py -------------------------------------------------------------------------------- /src/aleph/web/templates/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/src/aleph/web/templates/__init__.py -------------------------------------------------------------------------------- /tests/api/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/tests/api/__init__.py -------------------------------------------------------------------------------- /tests/api/test_version.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aleph import __version__ 4 | 5 | 6 | @pytest.mark.asyncio 7 | async def test_get_version(ccn_api_client): 8 | response = await ccn_api_client.get("/api/v0/version") 9 | assert response.status == 200, await response.text() 10 | 11 | data = await response.json() 12 | assert data["version"] == __version__ 13 | -------------------------------------------------------------------------------- /tests/api/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Iterable, List 2 | 3 | 4 | def get_messages_by_keys(messages: Iterable[Dict], **keys) -> List[Dict]: 5 | """ 6 | Filters messages based on user-provided keys. 7 | 8 | Example: 9 | >>> filtered_messages = get_messages_by_keys( 10 | >>> message_list, item_hash="some-hash", channel="MY-CHANNEL" 11 | >>> ) 12 | 13 | """ 14 | return list( 15 | filter( 16 | lambda msg: all(msg[k] == v for k, v in keys.items()), 17 | messages, 18 | ) 19 | ) 20 | -------------------------------------------------------------------------------- /tests/chains/test_aleph_indexer.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | import json 3 | 4 | import pytest 5 | from configmanager import Config 6 | 7 | from aleph.chains.indexer_reader import AlephIndexerClient 8 | from aleph.schemas.chains.indexer_response import EntityType, IndexerBlockchain 9 | from aleph.types.chain_sync import ChainEventType 10 | 11 | 12 | @pytest.fixture 13 | def indexer_client(mock_config: Config): 14 | return AlephIndexerClient(indexer_url=mock_config.aleph.indexer_url.value) 15 | 16 | 17 | @pytest.mark.skip("Indexer client tests are deactivated by default.") 18 | @pytest.mark.asyncio 19 | async def test_aleph_indexer_fetch_account(indexer_client: AlephIndexerClient): 20 | account = "0x166fd4299364b21c7567e163d85d78d2fb2f8ad5" 21 | 22 | async with indexer_client: 23 | response = await indexer_client.fetch_account_state( 24 | blockchain=IndexerBlockchain.ETHEREUM, 25 | accounts=[account], 26 | ) 27 | 28 | assert len(response.data.state) == 1 29 | account_state = response.data.state[0] 30 | 31 | assert account_state.blockchain == IndexerBlockchain.ETHEREUM 32 | assert account_state.type == EntityType.LOG 33 | assert account_state.account == account 34 | 35 | 36 | @pytest.mark.skip("Indexer client tests are deactivated by default.") 37 | @pytest.mark.asyncio 38 | async def test_aleph_indexer_fetch_events(indexer_client: AlephIndexerClient): 39 | async with indexer_client: 40 | response = await indexer_client.fetch_events( 41 | blockchain=IndexerBlockchain.ETHEREUM, 42 | event_type=ChainEventType.SYNC, 43 | datetime_range=( 44 | dt.datetime(2023, 2, 24, 14, 16, 35, tzinfo=dt.timezone.utc), 45 | dt.datetime(2023, 2, 24, 17, 49, 10, tzinfo=dt.timezone.utc), 46 | ), 47 | ) 48 | 49 | assert len(response.data.sync_events) == 1 50 | assert len(response.data.message_events) == 0 51 | 52 | sync_event = response.data.sync_events[0] 53 | 54 | assert ( 55 | sync_event.id 56 | == "ethereum_16698727_0x166fd4299364b21c7567e163d85d78d2fb2f8ad5_52" 57 | ) 58 | assert sync_event.timestamp == 1677248195000 59 | assert sync_event.address == "0x23eC28598DCeB2f7082Cc3a9D670592DfEd6e0dC" 60 | assert sync_event.height == 16698727 61 | 62 | assert json.loads(sync_event.message) == { 63 | "protocol": "aleph-offchain", 64 | "version": 1, 65 | "content": "QmV9tkuBEoSnmSuh7SakL7J33zCuUgDTckA17qyRpz3oDx", 66 | } 67 | -------------------------------------------------------------------------------- /tests/chains/test_avalanche.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aleph.chains.avalanche import AvalancheConnector 4 | from aleph.schemas.pending_messages import BasePendingMessage, parse_message 5 | 6 | 7 | @pytest.fixture 8 | def avax_message() -> BasePendingMessage: 9 | return parse_message( 10 | { 11 | "item_hash": "3c4d948a22c3d41b7d189555ee4a285cb490ec553f3d135cc3b2f0cfddf5c0f2", 12 | "type": "POST", 13 | "chain": "AVAX", 14 | "sender": "X-avax14x5a42stua94l2vxjcag6c9ftd8ea0y8fltdwv", 15 | "signature": "3WRUvPbp7euNQvxuhV2YaFUJHN2Xoo8yku67MTuhfk8bRvDQz6hysQrrkfyKweXSCDNzfjrYzd1PwhGWdTJGZAvuMPiEJvJ", 16 | "item_type": "inline", 17 | "item_content": '{"type":"avalanche","address":"X-avax14x5a42stua94l2vxjcag6c9ftd8ea0y8fltdwv","content":{"body":"This message was posted from the typescript-SDK test suite"},"time":1689163528.372}', 18 | "time": 1689163528.372, 19 | "channel": "TEST", 20 | } 21 | ) 22 | 23 | 24 | @pytest.mark.asyncio 25 | async def test_verify_signature_real(avax_message: BasePendingMessage): 26 | connector = AvalancheConnector() 27 | result = await connector.verify_signature(avax_message) 28 | assert result is True 29 | 30 | 31 | @pytest.mark.asyncio 32 | async def test_verify_signature_bad_base58(avax_message: BasePendingMessage): 33 | connector = AvalancheConnector() 34 | avax_message.signature = "baba" 35 | result = await connector.verify_signature(avax_message) 36 | assert result is False 37 | -------------------------------------------------------------------------------- /tests/chains/test_ethereum.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from configmanager import Config 3 | from web3 import Web3 4 | 5 | from aleph.chains.ethereum import get_contract 6 | 7 | 8 | @pytest.fixture 9 | def web3(): 10 | return Web3() 11 | 12 | 13 | @pytest.mark.asyncio 14 | async def test_get_contract(mock_config: Config, web3: Web3): 15 | contract = await get_contract(config=mock_config, web3=web3) 16 | # The type hint provided by the web3 library is clearly wrong. This is a simple check 17 | # to ensure that we get a proper web3 object. Improve as needed. 18 | assert contract.w3 == web3 19 | -------------------------------------------------------------------------------- /tests/chains/test_evm.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aleph.chains.evm import EVMVerifier 4 | from aleph.schemas.pending_messages import BasePendingMessage, parse_message 5 | 6 | 7 | @pytest.fixture 8 | def evm_message() -> BasePendingMessage: 9 | return parse_message( 10 | { 11 | "item_hash": "f524a258d87f1771e8538fd4fd91acdcc527c3b7f138fafd6ff89a5fcf97c3b7", 12 | "type": "POST", 13 | "chain": "ETH", 14 | "sender": "0xA07B1214bAe0D5ccAA25449C3149c0aC83658874", 15 | "signature": "0x99efc66c781c889e1f21c680869c832141dcee90189e75e85f570b8b49e72dee0338d77c214ae55bfcb886bbd7bac6dc4dcfda4eb0d2c47ed93d51b36b7259b01c", 16 | "time": 1730410918.092607, 17 | "item_type": "inline", 18 | "item_content": '{"address":"0xA07B1214bAe0D5ccAA25449C3149c0aC83658874","time":1730410918.0924816,"content":{"type":"polygon","address":"0xA07B1214bAe0D5ccAA25449C3149c0aC83658874","content":{"body":"This message was posted from the typescript-SDK test suite"},"time":1689163528.372},"type":"test"}', 19 | "channel": "ALEPH-CLOUDSOLUTIONS", 20 | } 21 | ) 22 | 23 | 24 | @pytest.mark.asyncio 25 | async def test_verify_evm_signature_real(evm_message: BasePendingMessage): 26 | verifier = EVMVerifier() 27 | result = await verifier.verify_signature(evm_message) 28 | assert result is True 29 | 30 | 31 | @pytest.mark.asyncio 32 | async def test_verify_bad_evm_signature(evm_message: BasePendingMessage): 33 | verifier = EVMVerifier() 34 | evm_message.signature = "baba" 35 | result = await verifier.verify_signature(evm_message) 36 | assert result is False 37 | -------------------------------------------------------------------------------- /tests/chains/test_nuls2.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aleph.chains.nuls2 import Nuls2Verifier 4 | from aleph.schemas.pending_messages import parse_message 5 | 6 | 7 | @pytest.mark.asyncio 8 | async def test_verify_signature_nuls2(mocker): 9 | message_dict = { 10 | "time": 1574266270.022, 11 | "type": "POST", 12 | "chain": "NULS2", 13 | "sender": "NULSd6HgeZVDvQ2pKQLakAsStYvGAT6WVFu9K", 14 | "channel": "MYALEPH", 15 | "content": { 16 | "ref": "43eef54be4a92c65ca24d3f2419414224129b7944ecaefed088897787aed70b4", 17 | "time": 1574266270.022, 18 | "type": "amend", 19 | "address": "NULSd6HgeZVDvQ2pKQLakAsStYvGAT6WVFu9K", 20 | "content": {"body": "test", "title": "Mutsi Test", "private": False}, 21 | }, 22 | "item_hash": "43094c3309791a5aa92ff6e1de337f23242103e1dffdc1941c5b6d4131da3a7e", 23 | "item_type": "inline", 24 | "signature": "HG4dsFDNGfgjKQX1qorGjxYfK8qEoKF0SfnBSNc8KbpCJ9jET58Rrvc8k3yK8XRl7syoT5gMRmoswOdbSCesmxo=", 25 | "item_content": '{"type":"amend","address":"NULSd6HgeZVDvQ2pKQLakAsStYvGAT6WVFu9K","content":{"body":"test","title":"Mutsi Test","private":false},"time":1574266270.022,"ref":"43eef54be4a92c65ca24d3f2419414224129b7944ecaefed088897787aed70b4"}', 26 | } 27 | 28 | verifier = Nuls2Verifier() 29 | message = parse_message(message_dict) 30 | assert await verifier.verify_signature(message) 31 | -------------------------------------------------------------------------------- /tests/db/test_error_codes.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import select 2 | 3 | from aleph.db.models import ErrorCodeDb 4 | from aleph.types.db_session import DbSessionFactory 5 | from aleph.types.message_status import ErrorCode 6 | 7 | 8 | def test_all_error_codes_mapped_in_db(session_factory: DbSessionFactory): 9 | """ 10 | Check that the ErrorCode enum values are all mapped in the database and vice-versa. 11 | Sanity check for developers. 12 | """ 13 | 14 | with session_factory() as session: 15 | db_error_codes = session.execute(select(ErrorCodeDb)).scalars() 16 | db_error_codes_dict = {e.code: e for e in db_error_codes} 17 | 18 | # All error code enum values must be mapped in the DB 19 | for error_code in ErrorCode: 20 | assert error_code.value in db_error_codes_dict 21 | 22 | # All DB entries must be mapped in the error code enum 23 | for db_error_code in db_error_codes_dict.keys(): 24 | _ = ErrorCode(db_error_code) 25 | -------------------------------------------------------------------------------- /tests/helpers/in_memory_storage_engine.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Optional 2 | 3 | from aleph.services.storage.engine import StorageEngine 4 | 5 | 6 | # TODO: remove duplication between this class and MockStorageEngine 7 | class InMemoryStorageEngine(StorageEngine): 8 | """ 9 | A storage engine that stores files in a dictionary. 10 | """ 11 | 12 | def __init__(self, files: Dict[str, bytes]): 13 | self.files = files 14 | 15 | async def read(self, filename: str) -> Optional[bytes]: 16 | try: 17 | return self.files[filename] 18 | except KeyError: 19 | return None 20 | 21 | async def write(self, filename: str, content: bytes): 22 | self.files[filename] = content 23 | 24 | async def delete(self, filename: str): 25 | del self.files[filename] 26 | 27 | async def exists(self, filename: str) -> bool: 28 | return filename in self.files 29 | -------------------------------------------------------------------------------- /tests/jobs/test_cron_job.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | 3 | import pytest 4 | 5 | from aleph.jobs.cron.balance_job import BalanceCronJob 6 | from aleph.jobs.cron.cron_job import CronJob 7 | from aleph.types.db_session import DbSessionFactory 8 | 9 | 10 | @pytest.fixture 11 | def cron_job(session_factory: DbSessionFactory) -> CronJob: 12 | return CronJob( 13 | session_factory=session_factory, 14 | jobs={"balance": BalanceCronJob(session_factory=session_factory)}, 15 | ) 16 | 17 | 18 | @pytest.mark.asyncio 19 | @pytest.mark.parametrize( 20 | "cron_run_datetime", 21 | [ 22 | dt.datetime(2040, 1, 1, tzinfo=dt.timezone.utc), 23 | dt.datetime(2023, 6, 1, tzinfo=dt.timezone.utc), 24 | dt.datetime(2020, 1, 1, tzinfo=dt.timezone.utc), 25 | ], 26 | ) 27 | async def test_balance_job_run( 28 | session_factory: DbSessionFactory, 29 | cron_job: CronJob, 30 | cron_run_datetime: dt.datetime, 31 | ): 32 | with session_factory() as session: 33 | await cron_job.run(now=cron_run_datetime) 34 | session.commit() 35 | -------------------------------------------------------------------------------- /tests/message_processing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/pyaleph/e706cbd42ca217a13716534d0f2887dc0626932b/tests/message_processing/__init__.py -------------------------------------------------------------------------------- /tests/message_processing/fixtures/test-data-forgotten-messages.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "chain": "ETH", 4 | "sender": "0x696879aE4F6d8DaDD5b8F1cbb1e663B89b08f106", 5 | "type": "POST", 6 | "channel": "INTEGRATION_TESTS", 7 | "signature": "0x271939ae35918d0e90877f2319dd0d9737f8334d52539125743caf2460b3896423ca69b1fc85662443cc0bd4ce91e2fb247d7d8291284d8c431e6962c611c4c31c", 8 | "time": 1665758931.005458, 9 | "item_type": "inline", 10 | "item_content": "{\"address\":\"0x696879aE4F6d8DaDD5b8F1cbb1e663B89b08f106\",\"time\":1665758931.0054002,\"content\":{\"title\":\"My first blog post\",\"body\":\"Ermahgerd, a bleug!\"},\"type\":\"test-post\"}", 11 | "item_hash": "9f02e3b5efdbdc0b487359117ae3af40db654892487feae452689a0b84dc1025" 12 | }, 13 | { 14 | "chain": "ETH", 15 | "channel": "TEST", 16 | "sender": "0x696879aE4F6d8DaDD5b8F1cbb1e663B89b08f106", 17 | "type": "FORGET", 18 | "time": 1652786534.1139255, 19 | "item_type": "inline", 20 | "item_content": "{\"address\":\"0x696879aE4F6d8DaDD5b8F1cbb1e663B89b08f106\",\"time\":1652786534.1138077,\"hashes\":[\"9f02e3b5efdbdc0b487359117ae3af40db654892487feae452689a0b84dc1025\"]}", 21 | "item_hash": "431a0d2f79ecfa859949d2a09f67068ce7ebd4eb777d179ad958be6c79abc66b", 22 | "signature": "0x409cdef65af51d6a508a1fdc56c0baa6d1abac7f539ab5f290e3245c522a4c766b930c4196d9f5d8c8c94a4d36c4b65bf04a2773f058f03803b9b0bca2fd85a51b" 23 | }, 24 | { 25 | "chain": "ETH", 26 | "sender": "0x696879aE4F6d8DaDD5b8F1cbb1e663B89b08f106", 27 | "type": "POST", 28 | "channel": "INTEGRATION_TESTS", 29 | "signature": "0x271939ae35918d0e90877f2319dd0d9737f8334d52539125743caf2460b3896423ca69b1fc85662443cc0bd4ce91e2fb247d7d8291284d8c431e6962c611c4c31c", 30 | "time": 1665758931.005458, 31 | "item_type": "inline", 32 | "item_content": "{\"address\":\"0x696879aE4F6d8DaDD5b8F1cbb1e663B89b08f106\",\"time\":1665758931.0054002,\"content\":{\"title\":\"My first blog post\",\"body\":\"Ermahgerd, a bleug!\"},\"type\":\"test-post\"}", 33 | "item_hash": "9f02e3b5efdbdc0b487359117ae3af40db654892487feae452689a0b84dc1025", 34 | "tx_hash": "0xfffd825eff4dfeb229d5fe6cfc7ca7f0a6f692fbd0286a6b08b7d0890cbeeb4a" 35 | } 36 | ] -------------------------------------------------------------------------------- /tests/message_processing/fixtures/test-data-pending-messaging.json: -------------------------------------------------------------------------------- 1 | { 2 | "chain": "ETH", 3 | "sender": "0x696879aE4F6d8DaDD5b8F1cbb1e663B89b08f106", 4 | "type": "POST", 5 | "channel": "INTEGRATION_TESTS", 6 | "signature": "0x271939ae35918d0e90877f2319dd0d9737f8334d52539125743caf2460b3896423ca69b1fc85662443cc0bd4ce91e2fb247d7d8291284d8c431e6962c611c4c31c", 7 | "time": 1665758931.005458, 8 | "item_type": "inline", 9 | "item_content": "{\"address\":\"0x696879aE4F6d8DaDD5b8F1cbb1e663B89b08f106\",\"time\":1665758931.0054002,\"content\":{\"title\":\"My first blog post\",\"body\":\"Ermahgerd, a bleug!\"},\"type\":\"test-post\"}", 10 | "item_hash": "9f02e3b5efdbdc0b487359117ae3af40db654892487feae452689a0b84dc1025" 11 | } 12 | 13 | -------------------------------------------------------------------------------- /tests/message_processing/fixtures/test-data-posts.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "chain": "ETH", 4 | "sender": "0x696879aE4F6d8DaDD5b8F1cbb1e663B89b08f106", 5 | "type": "POST", 6 | "channel": "INTEGRATION_TESTS", 7 | "signature": "0x271939ae35918d0e90877f2319dd0d9737f8334d52539125743caf2460b3896423ca69b1fc85662443cc0bd4ce91e2fb247d7d8291284d8c431e6962c611c4c31c", 8 | "time": 1665758931.005458, 9 | "item_type": "inline", 10 | "item_content": "{\"address\":\"0x696879aE4F6d8DaDD5b8F1cbb1e663B89b08f106\",\"time\":1665758931.0054002,\"content\":{\"title\":\"My first blog post\",\"body\":\"Ermahgerd, a bleug!\"},\"type\":\"test-post\"}", 11 | "item_hash": "9f02e3b5efdbdc0b487359117ae3af40db654892487feae452689a0b84dc1025" 12 | }, 13 | { 14 | "chain": "ETH", 15 | "sender": "0x696879aE4F6d8DaDD5b8F1cbb1e663B89b08f106", 16 | "type": "POST", 17 | "channel": "INTEGRATION_TESTS", 18 | "signature": "0xe0fc46bf4e85b633cade35945390dfd72c02251c44d5fb38753591da298e795000b3461e3802a841c73a9804aa5c4a5c915e9a4785debec2f23ba3bf742609b81b", 19 | "time": 1665759467.291409, 20 | "item_type": "inline", 21 | "item_content": "{\"address\":\"0x696879aE4F6d8DaDD5b8F1cbb1e663B89b08f106\",\"time\":1665759467.2913437,\"content\":{\"title\":\"My first blog post\",\"body\":\"Okay, I was a bit excited. Let's do it again.\"},\"ref\":\"9f02e3b5efdbdc0b487359117ae3af40db654892487feae452689a0b84dc1025\",\"type\":\"amend\"}", 22 | "item_hash": "93776ad67063b955869a7fa705ea2987add39486e1ed5951e9842291cf0f566c" 23 | } 24 | ] 25 | -------------------------------------------------------------------------------- /tests/message_processing/load_fixtures.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pathlib import Path 3 | from typing import Dict, List 4 | 5 | 6 | def load_fixture_messages(fixture: str) -> List[Dict]: 7 | fixture_path = Path(__file__).parent / "fixtures" / fixture 8 | 9 | with open(fixture_path) as f: 10 | return json.load(f)["content"]["messages"] 11 | 12 | 13 | def load_fixture_message(fixture: str) -> Dict: 14 | fixture_path = Path(__file__).parent / "fixtures" / fixture 15 | 16 | with open(fixture_path) as f: 17 | return json.load(f) 18 | 19 | 20 | def load_fixture_message_list(fixture: str) -> List[Dict]: 21 | fixture_path = Path(__file__).parent / "fixtures" / fixture 22 | 23 | with open(fixture_path) as f: 24 | return json.load(f) 25 | -------------------------------------------------------------------------------- /tests/message_processing/test_process_pending_messages.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from configmanager import Config 3 | 4 | from aleph.db.models import PendingMessageDb 5 | from aleph.handlers.message_handler import MessagePublisher 6 | from aleph.storage import StorageService 7 | from aleph.toolkit.timestamp import utc_now 8 | from aleph.types.db_session import DbSessionFactory 9 | from aleph.types.message_status import MessageOrigin 10 | 11 | from .load_fixtures import load_fixture_message 12 | 13 | 14 | @pytest.mark.asyncio 15 | async def test_duplicated_pending_message( 16 | mocker, 17 | mock_config: Config, 18 | session_factory: DbSessionFactory, 19 | test_storage_service: StorageService, 20 | ): 21 | message = load_fixture_message("test-data-pending-messaging.json") 22 | 23 | message_publisher = MessagePublisher( 24 | session_factory=session_factory, 25 | storage_service=test_storage_service, 26 | config=mock_config, 27 | pending_message_exchange=mocker.AsyncMock(), 28 | ) 29 | 30 | test1 = await message_publisher.add_pending_message( 31 | message_dict=message, 32 | reception_time=utc_now(), 33 | origin=MessageOrigin.P2P, 34 | ) 35 | assert test1 36 | 37 | test2 = await message_publisher.add_pending_message( 38 | message_dict=message, 39 | reception_time=utc_now(), 40 | origin=MessageOrigin.P2P, 41 | ) 42 | assert test2 43 | 44 | assert test2.content == test1.content 45 | assert test2.reception_time == test1.reception_time 46 | 47 | with session_factory() as session: 48 | pending_messages = session.query(PendingMessageDb).count() 49 | assert pending_messages == 1 50 | -------------------------------------------------------------------------------- /tests/services/test_node_cache.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aleph.services.cache.node_cache import NodeCache 4 | 5 | 6 | @pytest.mark.asyncio 7 | async def test_get_set(node_cache: NodeCache): 8 | key = "test_get_set" 9 | 10 | await node_cache.set(key, 12) 11 | assert await node_cache.get(key) == b"12" 12 | 13 | await node_cache.set(key, "hello") 14 | assert await node_cache.get(key) == b"hello" 15 | 16 | 17 | @pytest.mark.asyncio 18 | async def test_incr_decr(node_cache: NodeCache): 19 | key = "test_incr_decr" 20 | await node_cache.redis_client.delete(key) 21 | 22 | await node_cache.set(key, 42) 23 | await node_cache.incr(key) 24 | assert await node_cache.get(key) == b"43" 25 | 26 | await node_cache.decr(key) 27 | assert await node_cache.get(key) == b"42" 28 | 29 | 30 | @pytest.mark.asyncio 31 | async def test_api_servers_cache(node_cache: NodeCache): 32 | await node_cache.redis_client.delete(node_cache.API_SERVERS_KEY) 33 | 34 | assert await node_cache.get_api_servers() == set() 35 | 36 | api_server_1 = "https://api2.aleph.im" 37 | api_server_2 = "https://api3.aleph.im" 38 | 39 | await node_cache.add_api_server(api_server_2) 40 | assert await node_cache.get_api_servers() == {api_server_2} 41 | assert not await node_cache.has_api_server(api_server_1) 42 | assert await node_cache.has_api_server(api_server_2) 43 | 44 | await node_cache.add_api_server(api_server_1) 45 | assert await node_cache.get_api_servers() == {api_server_1, api_server_2} 46 | assert await node_cache.has_api_server(api_server_1) 47 | assert await node_cache.has_api_server(api_server_2) 48 | 49 | await node_cache.redis_client.delete(node_cache.API_SERVERS_KEY) 50 | -------------------------------------------------------------------------------- /tests/services/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aleph.services.utils import ( 4 | get_IP, 5 | get_ip4_from_service, 6 | get_ip4_from_socket, 7 | is_valid_ip4, 8 | ) 9 | 10 | 11 | def test_is_valid_ip4(): 12 | assert is_valid_ip4("1.2.3.4") 13 | assert is_valid_ip4("123.456.789.123") 14 | assert not is_valid_ip4("") 15 | assert not is_valid_ip4("Hello !") 16 | assert not is_valid_ip4("a.b.c.d") 17 | 18 | 19 | @pytest.mark.asyncio 20 | async def test_get_ip4_from_service(): 21 | ip4 = await get_ip4_from_service() 22 | assert is_valid_ip4(ip4) 23 | 24 | 25 | def test_get_ip4_from_socket(): 26 | ip4 = get_ip4_from_socket() 27 | assert is_valid_ip4(ip4) 28 | 29 | 30 | @pytest.mark.asyncio 31 | async def test_get_IP(): 32 | ip4 = await get_IP() 33 | assert is_valid_ip4(ip4) 34 | -------------------------------------------------------------------------------- /tests/toolkit/test_batch.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aleph.toolkit.batch import async_batch 4 | 5 | 6 | async def async_range(*args): 7 | for i in range(*args): 8 | yield i 9 | 10 | 11 | @pytest.mark.asyncio 12 | async def test_async_batch(): 13 | # batch with a remainder 14 | batches = [b async for b in async_batch(async_range(0, 10), 3)] 15 | assert batches == [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] 16 | 17 | # iterable divisible by n 18 | batches = [b async for b in async_batch(async_range(0, 4), 2)] 19 | assert batches == [[0, 1], [2, 3]] 20 | 21 | # n = 1 22 | batches = [b async for b in async_batch(async_range(0, 5), 1)] 23 | assert batches == [[0], [1], [2], [3], [4]] 24 | 25 | # n = len(iterable) 26 | batches = [b async for b in async_batch(async_range(0, 7), 7)] 27 | assert batches == [[0, 1, 2, 3, 4, 5, 6]] 28 | -------------------------------------------------------------------------------- /tests/toolkit/test_ignore_exceptions.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aleph.toolkit.exceptions import ignore_exceptions 4 | 5 | 6 | def test_ignore_one_type_of_exception(): 7 | some_dict = {"a": 1} 8 | 9 | with ignore_exceptions(KeyError): 10 | _ = some_dict["b"] 11 | 12 | assert some_dict["a"] == 1 13 | 14 | 15 | def test_ignore_multiple_types_of_exceptions(): 16 | some_dict = {"a": 1} 17 | 18 | with ignore_exceptions(KeyError, AttributeError): 19 | _ = some_dict["b"] 20 | 21 | with ignore_exceptions(KeyError, AttributeError): 22 | _ = some_dict.new_dict_method_from_python5() 23 | 24 | assert some_dict["a"] == 1 25 | 26 | 27 | def test_ignore_no_exception(): 28 | some_dict = {"a": 1} 29 | 30 | with pytest.raises(KeyError): 31 | with ignore_exceptions(): 32 | _ = some_dict["b"] 33 | 34 | 35 | def test_ignore_the_wrong_type_of_exception(): 36 | some_dict = [] 37 | 38 | with pytest.raises(AttributeError): 39 | with ignore_exceptions(IndexError): 40 | _ = some_dict.items() 41 | 42 | 43 | def test_ignore_exception_with_callback(): 44 | some_dict = {"a": 1} 45 | 46 | callback_was_called = False 47 | 48 | def callback(e): 49 | assert isinstance(e, KeyError) 50 | nonlocal callback_was_called 51 | callback_was_called = True 52 | 53 | with ignore_exceptions(KeyError, on_error=callback): 54 | _ = some_dict["b"] 55 | 56 | assert callback_was_called 57 | -------------------------------------------------------------------------------- /tests/toolkit/test_json.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import pytest 4 | 5 | import aleph.toolkit.json as aleph_json 6 | 7 | 8 | def test_loads(): 9 | """ 10 | A (simplistic) load test, as a sanity check. 11 | """ 12 | 13 | expected = {"1": {"a": "b", "c": "d"}, "2": ["x", "y", "z"], "3": "world"} 14 | serialized_json = json.dumps(expected) 15 | 16 | actual = aleph_json.loads(serialized_json) 17 | assert actual == expected 18 | 19 | 20 | def test_loads_invalid_json(): 21 | s = '{"1": "3"' 22 | with pytest.raises(aleph_json.DecodeError): 23 | _ = aleph_json.loads(s) 24 | 25 | 26 | def test_reject_nans(): 27 | """ 28 | Test that the implementation rejects NaN as it is not part of the official 29 | JSON specification and is unsupported by Postgres. 30 | """ 31 | 32 | serialized_json = '{"1": 1, "2": 2, "3": NaN}' 33 | with pytest.raises(json.decoder.JSONDecodeError): 34 | _ = aleph_json.loads(serialized_json) 35 | 36 | 37 | def test_serialized_json_type(): 38 | """ 39 | Check that the output of dumps is of the announced type. 40 | """ 41 | 42 | expected = {"1": "2", "3": {"4": "5"}} 43 | 44 | serialized_json = aleph_json.dumps(expected) 45 | assert isinstance(serialized_json, aleph_json.SerializedJson) 46 | 47 | actual = json.loads(serialized_json) 48 | assert actual == expected 49 | 50 | 51 | def test_loads_large_ints_json(): 52 | """ 53 | Check that the output of dumps and loads don't raise TypeError errors caused by large ints on orjson library. 54 | """ 55 | 56 | expected = { 57 | "0x3E1aba4ad853Dd7Aa531aB59F10bd9f4d89aebaF": 498729072221377800000, 58 | "0x525C49BF83Ce3a1AAf425ac1A463537dB68c8bd7": 8059602048250472000000, 59 | "0x7F05Ed9650E48f3E564125EAdCdc0d5E7c2E8DaB": 1991950397951749400000000, 60 | "0xb6e45ADfa0C7D70886bBFC990790d64620F1BAE8": 497997000000000000000000000, 61 | } 62 | 63 | serialized_json = aleph_json.dumps(expected) 64 | assert isinstance(serialized_json, aleph_json.SerializedJson) 65 | 66 | actual = json.loads(serialized_json) 67 | assert actual == expected 68 | -------------------------------------------------------------------------------- /tests/toolkit/test_timer.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from aleph.toolkit.timer import Timer 4 | 5 | 6 | def test_timer_sleep(): 7 | sleep_duration = 0.5 8 | 9 | with Timer() as t: 10 | time.sleep(sleep_duration) 11 | 12 | assert t.elapsed() >= sleep_duration 13 | -------------------------------------------------------------------------------- /tests/toolkit/test_timestamp.py: -------------------------------------------------------------------------------- 1 | import pytz 2 | 3 | from aleph.toolkit.timestamp import timestamp_to_datetime 4 | 5 | 6 | def test_timestamp_to_datetime(): 7 | t1 = 1675206096.0 # 20230201T00:01:36+01:00 8 | dt1 = timestamp_to_datetime(t1) 9 | 10 | assert dt1.year == 2023 11 | assert dt1.month == 1 12 | assert dt1.day == 31 13 | assert dt1.hour == 23 14 | assert dt1.minute == 1 15 | assert dt1.second == 36 16 | 17 | assert dt1.tzinfo == pytz.utc 18 | -------------------------------------------------------------------------------- /tests/web/controllers/test_programs.py: -------------------------------------------------------------------------------- 1 | import json 2 | from hashlib import sha256 3 | from pathlib import Path 4 | from typing import List, Sequence 5 | 6 | import pytest 7 | import pytest_asyncio 8 | from message_test_helpers import make_validated_message_from_dict 9 | 10 | from aleph.db.models import MessageDb 11 | from aleph.types.db_session import DbSessionFactory 12 | 13 | 14 | @pytest_asyncio.fixture 15 | async def fixture_program_messages( 16 | session_factory: DbSessionFactory, 17 | ) -> List[MessageDb]: 18 | fixtures_file = Path(__file__).parent / "fixtures/messages/program.json" 19 | 20 | with fixtures_file.open() as f: 21 | json_messages = json.load(f) 22 | 23 | # Add item_content and item_hash to messages, modify in place: 24 | messages = [] 25 | for message_dict in json_messages: 26 | if "item_content" not in message_dict: 27 | message_dict["item_content"] = json.dumps(message_dict["content"]) 28 | if "item_hash" not in message_dict: 29 | message_dict["item_hash"] = sha256( 30 | message_dict["item_content"].encode() 31 | ).hexdigest() 32 | 33 | messages.append( 34 | make_validated_message_from_dict( 35 | message_dict=message_dict, 36 | raw_content=json.dumps(message_dict["content"]), 37 | ) 38 | ) 39 | 40 | with session_factory() as session: 41 | session.add_all(messages) 42 | session.commit() 43 | 44 | return messages 45 | 46 | 47 | @pytest.mark.asyncio 48 | async def test_get_programs_on_message( 49 | fixture_program_messages: Sequence[MessageDb], ccn_api_client 50 | ): 51 | response = await ccn_api_client.get("/api/v0/programs/on/message") 52 | assert response.status == 200, await response.text() 53 | 54 | data = await response.json() 55 | expected = { 56 | "item_hash": fixture_program_messages[0].item_hash, 57 | "content": { 58 | "on": {"message": fixture_program_messages[0].content["on"]["message"]} 59 | }, 60 | } 61 | 62 | assert data == [expected] 63 | -------------------------------------------------------------------------------- /tests/web/controllers/test_pub_json.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import pytest 4 | 5 | 6 | @pytest.mark.asyncio 7 | async def test_pub_valid_aleph_message(mock_config, ccn_api_client, mocker): 8 | message_topic = mock_config.aleph.queue_topic.value 9 | 10 | mocker.patch("aleph.web.controllers.p2p.pub_p2p") 11 | 12 | message = { 13 | "chain": "ETH", 14 | "channel": "TEST", 15 | "sender": "0x696879aE4F6d8DaDD5b8F1cbb1e663B89b08f106", 16 | "type": "STORE", 17 | "time": 1652794362.573859, 18 | "item_type": "inline", 19 | "item_content": '{"address":"0x696879aE4F6d8DaDD5b8F1cbb1e663B89b08f106","time":1652794362.5736332,"item_type":"storage","item_hash":"5ccdd7bccfbc5955e2e40166dd0cdea0b093154fd87bc2bea57e7c768cde2f21","mime_type":"text/plain"}', 20 | "item_hash": "f6fc4884e3ec3624bd3f60a3c37abf83a130777086061b1a373e659f2bab4d06", 21 | "signature": "0x7b87c29388a7a452353f9cae8718b66158fb5bdc93f032964226745ee04919092550791b93f79e5ee1981f2d9d6e5ac0cae0d28b68bb63fe0fcbd79015a6f3ea1b", 22 | } 23 | 24 | response = await ccn_api_client.post( 25 | "/api/v0/ipfs/pubsub/pub", 26 | json={"topic": message_topic, "data": json.dumps(message)}, 27 | ) 28 | assert response.status == 200, await response.text() 29 | response_json = await response.json() 30 | assert response_json["status"] == "success" 31 | assert response_json["failed"] == [] 32 | 33 | 34 | @pytest.mark.asyncio 35 | async def test_pub_invalid_aleph_message(mock_config, ccn_api_client, mocker): 36 | message_topic = mock_config.aleph.queue_topic.value 37 | 38 | mocker.patch("aleph.web.controllers.p2p.pub_p2p") 39 | 40 | response = await ccn_api_client.post( 41 | "/api/v0/ipfs/pubsub/pub", 42 | json={ 43 | "topic": message_topic, 44 | "data": json.dumps( 45 | {"header": "this is not an Aleph message at all", "type": "STORE"} 46 | ), 47 | }, 48 | ) 49 | assert response.status == 422, await response.text() 50 | print(await response.text()) 51 | --------------------------------------------------------------------------------