├── .dockerignore ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── PULL_REQUEST_TEMPLATE.md ├── scripts │ └── extract_droplet_ipv4.py └── workflows │ ├── build-deb-package-and-integration-tests.yml │ ├── codeql-analysis.yml │ ├── deploy-main-on-staging.yml │ ├── pr-rating.yml │ ├── test-build-examples.yml │ └── test-using-pytest.yml ├── .gitignore ├── .gitmodules ├── CONFIGURE_CADDY.md ├── LICENSE ├── LICENSE.txt ├── README.md ├── TESTING.md ├── config.json ├── doc ├── INSTALL-Debian-11.md ├── INSTALL-Debian-12.md ├── INSTALL-Ubuntu-20.04.md ├── INSTALL-Ubuntu-22.04.md ├── INSTALL.md ├── confidential.md ├── images │ └── boot_process.drawio.png └── operator_auth.md ├── docker ├── publish_vm_connector.sh ├── publish_vm_supervisor_dev.sh ├── run_vm_connector.sh ├── run_vm_supervisor.sh ├── vm_connector.dockerfile └── vm_supervisor-dev.dockerfile ├── examples ├── Makefile ├── README.md ├── confidential_instance_message_from_aleph.json ├── data │ └── example.json ├── example_confidential_image │ ├── README.md │ ├── build_debian_image.sh │ └── setup_debian_rootfs.sh ├── example_django │ ├── blog │ │ ├── __init__.py │ │ ├── admin.py │ │ ├── apps.py │ │ ├── fixtures │ │ │ └── default_articles.json │ │ ├── forms.py │ │ ├── migrations │ │ │ ├── 0001_initial.py │ │ │ ├── 0002_auto_20210702_1331.py │ │ │ └── __init__.py │ │ ├── models.py │ │ ├── templates │ │ │ └── blog │ │ │ │ ├── article_list.html │ │ │ │ └── comment.html │ │ ├── urls.py │ │ └── views.py │ ├── example_django │ │ ├── __init__.py │ │ ├── asgi.py │ │ ├── settings.py │ │ ├── urls.py │ │ └── wsgi.py │ └── manage.py ├── example_fastapi │ ├── README.md │ └── main.py ├── example_fastapi_1.py ├── example_http_js │ ├── .dockerignore │ ├── Dockerfile │ ├── Makefile │ ├── README.md │ ├── package.json │ └── src │ │ ├── run.sh │ │ └── server.js ├── example_http_rust │ ├── Cargo.toml │ ├── Dockerfile │ ├── Makefile │ ├── README.md │ └── src │ │ └── main.rs ├── example_pip │ ├── main.py │ └── requirements.txt ├── instance_message_from_aleph.json ├── program_message_from_aleph.json ├── qemu_message_from_aleph.json └── volumes │ ├── Dockerfile │ └── build_squashfs.sh ├── kernels ├── build-kernel.sh ├── linux.config └── microvm-kernel-x86_64-5.10.config ├── packaging ├── Makefile ├── aleph-vm │ ├── DEBIAN │ │ ├── conffiles │ │ ├── control │ │ ├── postinst │ │ ├── postrm │ │ ├── preinst │ │ └── prerm │ └── etc │ │ ├── aleph-vm │ │ └── supervisor.env │ │ ├── ipfs │ │ ├── KUBO.md │ │ └── kubo.json │ │ ├── needrestart │ │ └── conf.d │ │ │ └── aleph-vm.conf │ │ └── systemd │ │ └── system │ │ ├── aleph-vm-controller@.service │ │ ├── aleph-vm-supervisor.service │ │ └── ipfs.service ├── debian-12.dockerfile ├── extract_requirements.sh ├── repositories │ ├── bookworm │ │ └── conf │ │ │ └── distributions │ └── jammy │ │ └── conf │ │ └── distributions ├── ubuntu-22.04.dockerfile ├── ubuntu-24.04.dockerfile └── version_from_git.py ├── pyproject.toml ├── runtimes ├── aleph-debian-12-python │ ├── create_disk_image.sh │ ├── init0.sh │ ├── init1.py │ ├── loading.html │ └── update_inits.sh ├── instance-rootfs │ ├── create-debian-12-disk.sh │ ├── create-debian-12-qemu-disk.sh │ ├── create-ubuntu-22-04-disk.sh │ ├── create-ubuntu-22-04-qemu-disk.sh │ └── create-ubuntu-24-04-qemu-disk.sh └── ovmf │ ├── README.md │ ├── build_ovmf.sh │ ├── download_dependencies.sh │ └── patches │ └── edk2 │ └── 0001-Fix-invokation-of-cryptomount-s-for-AMD-SEV.patch ├── src └── aleph │ ├── __init__.py │ └── vm │ ├── __init__.py │ ├── conf.py │ ├── constants.py │ ├── controllers │ ├── __init__.py │ ├── __main__.py │ ├── configuration.py │ ├── firecracker │ │ ├── __init__.py │ │ ├── executable.py │ │ ├── instance.py │ │ ├── program.py │ │ ├── snapshot_manager.py │ │ ├── snapshots.py │ │ └── storage.py │ ├── interface.py │ ├── qemu │ │ ├── QEMU.md │ │ ├── __init__.py │ │ ├── client.py │ │ ├── cloudinit.py │ │ └── instance.py │ └── qemu_confidential │ │ ├── __init__.py │ │ └── instance.py │ ├── garbage_collector.py │ ├── guest_api │ ├── __init__.py │ └── __main__.py │ ├── hypervisors │ ├── __init__.py │ ├── firecracker │ │ ├── __init__.py │ │ ├── config.py │ │ └── microvm.py │ ├── qemu │ │ ├── __init__.py │ │ └── qemuvm.py │ └── qemu_confidential │ │ ├── __init__.py │ │ └── qemuvm.py │ ├── models.py │ ├── network │ ├── __init__.py │ ├── firewall.py │ ├── get_interface_ipv4.py │ ├── hostnetwork.py │ ├── interfaces.py │ ├── ipaddresses.py │ ├── ndp_proxy.py │ └── port_availability_checker.py │ ├── orchestrator │ ├── INSTANCES.md │ ├── README.md │ ├── __init__.py │ ├── __main__.py │ ├── alembic.ini │ ├── chain.py │ ├── cli.py │ ├── custom_logs.py │ ├── machine.py │ ├── messages.py │ ├── metrics.py │ ├── migrations │ │ ├── __init__.py │ │ ├── env.py │ │ ├── script.py.mako │ │ └── versions │ │ │ ├── 0001_bbb12a12372e_execution_records.py │ │ │ ├── 0002_5c6ae643c69b_add_gpu_column_to_executions_table.py │ │ │ ├── 2da719d72cea_add_mapped_ports_column.py │ │ │ └── __init__.py │ ├── payment.py │ ├── pubsub.py │ ├── reactor.py │ ├── resources.py │ ├── run.py │ ├── status.py │ ├── supervisor.py │ ├── tasks.py │ ├── utils.py │ ├── views │ │ ├── __init__.py │ │ ├── authentication.py │ │ ├── host_status.py │ │ ├── operator.py │ │ ├── static │ │ │ ├── aleph-cloud-v1.svg │ │ │ ├── aleph-cloud-v2.svg │ │ │ ├── helpers.js │ │ │ ├── lightweight-charts.standalone.production.js │ │ │ └── main.css │ │ └── templates │ │ │ └── index.html │ └── vm │ │ └── __init__.py │ ├── pool.py │ ├── resources.py │ ├── sevclient.py │ ├── storage.py │ ├── systemd.py │ ├── utils │ ├── __init__.py │ ├── aggregate.py │ ├── logs.py │ └── test_helpers.py │ ├── version.py │ └── vm_type.py ├── tests └── supervisor │ ├── test_authentication.py │ ├── test_checkpayment.py │ ├── test_execution.py │ ├── test_firewall.py │ ├── test_gpu_x_vga_support.py │ ├── test_instance.py │ ├── test_interfaces.py │ ├── test_ipv6_allocator.py │ ├── test_log.py │ ├── test_qemu_instance.py │ ├── test_resolvectl_dns_servers.py │ ├── test_resources.py │ ├── test_status.py │ ├── test_utils.py │ ├── test_views.py │ └── views │ ├── test_operator.py │ ├── test_run_code.py │ └── test_view_errors.py ├── tutorials ├── ADVANCED.md ├── README.md ├── REQUIREMENTS.md ├── SERVER.md └── TESTING.md └── vm_connector ├── README.md ├── __init__.py ├── conf.py ├── main.py └── tests └── test_message.json /.dockerignore: -------------------------------------------------------------------------------- 1 | /.idea 2 | **/*.pyc 3 | **/__pycache__ 4 | 5 | **/rootfs/ 6 | **/*.sqlite3 7 | # **/*.squashfs 8 | **/*.bin 9 | **/*.ext4 10 | **/*.zip 11 | **/*.pyz 12 | **/*.rdb 13 | **/*.key 14 | **/data.tgz 15 | /pydantic/ 16 | **/target 17 | /packaging/sevctl/target 18 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | Explain what problem this PR is resolving 2 | 3 | Related ClickUp, GitHub or Jira tickets : ALEPH-XXX 4 | 5 | ## Self proofreading checklist 6 | 7 | - [ ] The new code clear, easy to read and well commented. 8 | - [ ] New code does not duplicate the functions of builtin or popular libraries. 9 | - [ ] An LLM was used to review the new code and look for simplifications. 10 | - [ ] New classes and functions contain docstrings explaining what they provide. 11 | - [ ] All new code is covered by relevant tests. 12 | - [ ] Documentation has been updated regarding these changes. 13 | - [ ] Dependencies update in the project.toml have been mirrored in the Debian package build script `packaging/Makefile` 14 | 15 | ## Changes 16 | 17 | Explain the changes that were made. The idea is not to list exhaustively all the changes made (GitHub already provides a full diff), but to help the reviewers better understand: 18 | - which specific file changes go together, e.g: when creating a table in the front-end, there usually is a config file that goes with it 19 | - the reasoning behind some changes, e.g: deleted files because they are now redundant 20 | - the behaviour to expect, e.g: tooltip has purple background color because the client likes it so, changed a key in the API response to be consistent with other endpoints 21 | 22 | ## How to test 23 | 24 | Explain how to test your PR. 25 | If a specific config is required explain it here (account, data entry, ...) 26 | 27 | ## Print screen / video 28 | 29 | Upload here screenshots or videos showing the changes if relevant. 30 | 31 | ## Notes 32 | 33 | Things that the reviewers should know: known bugs that are out of the scope of the PR, other trade-offs that were made. 34 | If the PR depends on a PR in another repo, or merges into another PR (i.o. main), it should also be mentioned here 35 | -------------------------------------------------------------------------------- /.github/scripts/extract_droplet_ipv4.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Extract the IP address of a DigitalOcean Droplet 4 | from the JSON returned by `doctl compute droplet get $name --output json 5 | """ 6 | 7 | import json 8 | import sys 9 | 10 | droplet_raw = sys.stdin.read() 11 | try: 12 | droplet_info = json.loads(droplet_raw) 13 | if not droplet_info: 14 | sys.exit("No droplet info") 15 | if "errors" in droplet_info: 16 | sys.exit(droplet_raw) 17 | if "networks" not in droplet_info[0]: 18 | sys.exit(f"droplet_info[] {droplet_info[0]}") 19 | elif ("v4" not in droplet_info[0]["networks"]) or not droplet_info[0]["networks"]["v4"]: 20 | sys.exit("networks {}".format(droplet_info[0]["networks"])) 21 | else: 22 | print(droplet_info[0]["networks"]["v4"], file=sys.stderr) 23 | for network in droplet_info[0]["networks"]["v4"]: 24 | if network["type"] == "public": 25 | print(network["ip_address"]) # noqa: T201 26 | break 27 | else: 28 | sys.exit("No public ipv4 found") 29 | 30 | except Exception as e: 31 | if not isinstance(e, SystemExit): 32 | print(f"Failed to find ipv4: {e}", file=sys.stderr) # noqa: T201 33 | print(droplet_raw, file=sys.stderr) # noqa: T201 34 | raise 35 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # For most projects, this workflow file will not need changing; you simply need 3 | # to commit it to your repository. 4 | # 5 | # You may wish to alter this file to override the set of languages analyzed, 6 | # or to provide custom queries or build logic. 7 | # 8 | # ******** NOTE ******** 9 | # We have attempted to detect the languages in your repository. Please check 10 | # the `language` matrix defined below to confirm you have the correct set of 11 | # supported CodeQL languages. 12 | # 13 | name: "CodeQL" 14 | 15 | 16 | on: 17 | push: 18 | branches: [main] 19 | pull_request: 20 | # The branches below must be a subset of the branches above 21 | branches: [main] 22 | schedule: 23 | - cron: '15 16 * * 0' 24 | 25 | 26 | jobs: 27 | analyze: 28 | name: Analyze 29 | runs-on: ubuntu-latest 30 | permissions: 31 | actions: read 32 | contents: read 33 | security-events: write 34 | 35 | strategy: 36 | fail-fast: false 37 | matrix: 38 | language: ['python'] 39 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 40 | # Learn more: 41 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 42 | 43 | steps: 44 | - name: Checkout repository 45 | uses: actions/checkout@v4 46 | 47 | # Initializes the CodeQL tools for scanning. 48 | - name: Initialize CodeQL 49 | uses: github/codeql-action/init@v3 50 | with: 51 | languages: ${{ matrix.language }} 52 | # If you wish to specify custom queries, you can do so here or in a config file. 53 | # By default, queries listed here will override any specified in a config file. 54 | # Prefix the list here with "+" to use these queries and those in the config file. 55 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 56 | 57 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 58 | # If this step fails, then you should remove it and run the build manually (see below) 59 | - name: Autobuild 60 | uses: github/codeql-action/autobuild@v3 61 | 62 | # ℹ️ Command-line programs to run using the OS shell. 63 | # 📚 https://git.io/JvXDl 64 | 65 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 66 | # and modify them (or add more) to build your code if your project 67 | # uses a compiled language 68 | 69 | #- run: | 70 | # make bootstrap 71 | # make release 72 | 73 | - name: Perform CodeQL Analysis 74 | uses: github/codeql-action/analyze@v3 75 | -------------------------------------------------------------------------------- /.github/workflows/pr-rating.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Test PR Difficulty Rating Action 3 | 4 | 5 | permissions: 6 | pull-requests: write 7 | 8 | 9 | on: 10 | pull_request: 11 | types: [opened, reopened, ready_for_review] 12 | 13 | 14 | jobs: 15 | difficulty-rating: 16 | runs-on: ubuntu-latest 17 | if: github.event.pull_request.draft == false 18 | steps: 19 | - name: PR Difficulty Rating 20 | uses: rate-my-pr/difficulty@v1 21 | with: 22 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 23 | LLAMA_URL: ${{ secrets.LLAMA_URL }} 24 | -------------------------------------------------------------------------------- /.github/workflows/test-build-examples.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | name: "Build Examples" 4 | on: push 5 | 6 | 7 | jobs: 8 | build_pip: 9 | name: "Build with Pip requirements" 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - name: Checkout repository 14 | uses: actions/checkout@v4 15 | 16 | - name: Workaround github issue https://github.com/actions/runner-images/issues/7192 17 | run: sudo echo RESET grub-efi/install_devices | sudo debconf-communicate grub-pc 18 | 19 | - run: | 20 | sudo apt-get -y update 21 | sudo apt-get -y upgrade 22 | sudo apt-get -y install python3-pip python3-venv squashfs-tools build-essential python3-nftables 23 | 24 | sudo mkdir /opt/packages 25 | sudo chown $(whoami) /opt/packages 26 | 27 | - run: | 28 | pip3 install hatch 29 | 30 | - run: | 31 | hatch build 32 | 33 | - run: | 34 | ls 35 | pwd 36 | pip3 install -t /opt/packages -r ./examples/example_pip/requirements.txt 37 | mksquashfs /opt/packages packages.squashfs 38 | 39 | # - run: | 40 | # ipfs add packages.squashfs 41 | 42 | # TODO: There is currently no easy way pass the item_hash from a pin to a new program. 43 | # - run: | 44 | # aleph pin QmQr3dEd6LiFq6JmUJYPLrffy45RGFhPWsxWmzo9zZb7Sy 45 | # 46 | # - run: | 47 | # aleph program ./examples/example_pip main:app 48 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | *.pyc 3 | __pycache__ 4 | 5 | *.sqlite3 6 | *.bin 7 | *.ext4 8 | *.zip 9 | *.pyz 10 | *.tgz 11 | *.rdb 12 | *.key 13 | /pydantic/ 14 | node_modules 15 | *.squashfs 16 | /examples/example_http_rust/target/ 17 | /examples/example_django/static/admin/ 18 | /runtimes/aleph-debian-11-python/rootfs/ 19 | /packaging/aleph-vm/opt/ 20 | /packaging/target/ 21 | /packaging/sevctl/target/ 22 | /packaging/repositories/*/db/ 23 | /packaging/repositories/*/dists/ 24 | /packaging/repositories/*/pool/ 25 | /kernels/linux-*/ 26 | /kernels/linux-*.tar 27 | /kernels/linux-*.tar.sign 28 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/.gitmodules -------------------------------------------------------------------------------- /CONFIGURE_CADDY.md: -------------------------------------------------------------------------------- 1 | # Caddy Reverse-proxy for Aleph-VM 2 | 3 | A reverse-proxy is required for production use. It allows: 4 | 5 | - A different domain name for each VM function 6 | - Secure connections using HTTPS 7 | - Load balancing between multiple servers 8 | 9 | Using a different domain name for each VM function is important when running web applications, 10 | both for security and usability purposes. 11 | 12 | The VM Supervisor supports using domains in the form `https://identifer.vm.yourdomain.org`, where 13 | _identifier_ is the identifier/hash of the message describing the VM function and `yourdomain.org` 14 | represents your domain name. 15 | 16 | ## 1. Wildcard certificates 17 | 18 | A wildcard certificate is recommended to allow any subdomain of your domain to work. 19 | 20 | You can create one using [Let's Encrypt](https://letsencrypt.org/) and 21 | [Certbot](https://certbot.eff.org/) with the following instructions. 22 | 23 | ```shell 24 | sudo apt install -y certbot 25 | 26 | certbot certonly --manual --email email@yourdomain.org --preferred-challenges dns \ 27 | --server https://acme-v02.api.letsencrypt.org/directory --agree-tos \ 28 | -d 'vm.yourdomain.org,*.vm.yourdomain.org' 29 | ``` 30 | 31 | ## 2. Caddy Server 32 | 33 | In this documentation, we will install the modern [Caddy](https://caddyserver.com/) reverse-proxy. 34 | 35 | Replace `vm.yourdomain.org` with your domain of choice. 36 | 37 | To install on Debian/Ubuntu, according to the 38 | [official instructions](https://caddyserver.com/docs/install#debian-ubuntu-raspbian): 39 | ```shell 40 | sudo apt install -y debian-keyring debian-archive-keyring apt-transport-https 41 | curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | sudo gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg 42 | curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | sudo tee /etc/apt/sources.list.d/caddy-stable.list 43 | sudo apt update 44 | sudo apt install caddy 45 | ``` 46 | 47 | Then give Caddy access to the certificates generated by Certbot: 48 | ```shell 49 | chmod 750 /etc/letsencrypt/live/ 50 | chmod 750 /etc/letsencrypt/archive/ 51 | chmod 640 /etc/letsencrypt/archive/vm.yourdomain.org/privkey1.pem 52 | chgrp -R caddy /etc/letsencrypt/archive/ 53 | chgrp -R caddy /etc/letsencrypt/live/ 54 | ``` 55 | 56 | Configure Caddy: 57 | ```shell 58 | cat >/etc/caddy/Caddyfile </etc/caddy/Caddyfile < 55 | ``` 56 | 57 | 58 | Specify `--capture=no` to pytest so it launch. This way you get the full output, including firecracker logs 59 | 60 | ## Debugging runtimes 61 | If the error is in the runtime: 62 | Modify the #! to pass the -v option to python, which will print all the debugging info 63 | `#!/usr/bin/python3 -vOO` 64 | 65 | To have these modification take effect you need to rebuild the runtime file using `create_disk_image.sh` as _root_ 66 | 67 | ```shell 68 | sudo bash create_disk_image.sh 69 | ``` 70 | 71 | Don't forget to have the print system log option set `ALEPH_VM_PRINT_SYSTEM_LOGS=1` 72 | 73 | `aleph-debian-12-python` is used in test_create_execution -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "vm_id": 5, 3 | "settings": { 4 | "PRINT_SYSTEM_LOGS": true, 5 | "IPV4_ADDRESS_POOL": "172.16.0.0/12", 6 | "IPV4_NETWORK_PREFIX_LENGTH": 24, 7 | "NETWORK_INTERFACE": "enp5s0", 8 | "IPV6_ALLOCATION_POLICY": "static", 9 | "IPV6_ADDRESS_POOL": "fc00:1:2:3::/64", 10 | "IPV6_SUBNET_PREFIX": 124, 11 | "USE_NDP_PROXY": true, 12 | "IPV6_FORWARDING_ENABLED": false 13 | }, 14 | "vm_configuration": { 15 | "use_jailer": true, 16 | "firecracker_bin_path": "/opt/firecracker/firecracker", 17 | "jailer_bin_path": "/opt/firecracker/jailer", 18 | "config_file_path": "/var/lib/aleph/vm/config.json", 19 | "init_timeout": 30 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /doc/INSTALL-Debian-11.md: -------------------------------------------------------------------------------- 1 | [[https://docs.aleph.im/nodes/compute/installation/debian-11/]] -------------------------------------------------------------------------------- /doc/INSTALL-Debian-12.md: -------------------------------------------------------------------------------- 1 | [[https://docs.aleph.im/nodes/compute/installation/debian-12/]] -------------------------------------------------------------------------------- /doc/INSTALL-Ubuntu-20.04.md: -------------------------------------------------------------------------------- 1 | Moved to [[https://docs.aleph.im/nodes/compute/installation/ubuntu-20.04/]] -------------------------------------------------------------------------------- /doc/INSTALL-Ubuntu-22.04.md: -------------------------------------------------------------------------------- 1 | [[https://docs.aleph.im/nodes/compute/installation/ubuntu-22.04/]] -------------------------------------------------------------------------------- /doc/INSTALL.md: -------------------------------------------------------------------------------- 1 | # Installing Aleph-VM 2 | see [[ https://docs.aleph.im/nodes/compute/]] -------------------------------------------------------------------------------- /doc/images/boot_process.drawio.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/doc/images/boot_process.drawio.png -------------------------------------------------------------------------------- /docker/publish_vm_connector.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euf -o pipefail 3 | 4 | if hash docker 2> /dev/null 5 | then 6 | DOCKER_COMMAND=docker 7 | else 8 | DOCKER_COMMAND=podman 9 | fi 10 | 11 | #VERSION=$(git describe --tags)-alpha 12 | VERSION=alpha 13 | 14 | $DOCKER_COMMAND build -t alephim/vm-connector -f docker/vm_connector.dockerfile . 15 | 16 | $DOCKER_COMMAND tag alephim/vm-connector alephim/vm-connector:$VERSION 17 | $DOCKER_COMMAND push alephim/vm-connector:$VERSION docker.io/alephim/vm-connector:$VERSION 18 | echo docker.io/alephim/vm-connector:$VERSION 19 | -------------------------------------------------------------------------------- /docker/publish_vm_supervisor_dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euf -o pipefail 3 | 4 | if hash docker 2> /dev/null 5 | then 6 | DOCKER_COMMAND=docker 7 | else 8 | DOCKER_COMMAND=podman 9 | fi 10 | 11 | #VERSION=$(git describe --tags)-alpha 12 | VERSION=alpha 13 | 14 | $DOCKER_COMMAND build -t alephim/vm-supervisor-dev -f docker/vm_supervisor-dev.dockerfile . 15 | 16 | $DOCKER_COMMAND tag alephim/vm-supervisor-dev alephim/vm-supervisor-dev:$VERSION 17 | $DOCKER_COMMAND push alephim/vm-supervisor-dev:$VERSION docker.io/alephim/vm-supervisor-dev:$VERSION 18 | echo docker.io/alephim/vm-supervisor-dev:$VERSION 19 | -------------------------------------------------------------------------------- /docker/run_vm_connector.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -euf 4 | 5 | # Use Podman if installed, else use Docker 6 | if hash podman 2> /dev/null 7 | then 8 | DOCKER_COMMAND=podman 9 | else 10 | DOCKER_COMMAND=docker 11 | fi 12 | 13 | $DOCKER_COMMAND build -t aleph-connector -f docker/vm_connector.dockerfile . 14 | 15 | $DOCKER_COMMAND run -ti --rm -p 4021:4021/tcp \ 16 | -v "$(pwd)/kernels:/opt/kernels:ro" \ 17 | -v "$(pwd)/vm_connector:/opt/vm_connector:ro" \ 18 | --name aleph-connector \ 19 | aleph-connector "$@" 20 | -------------------------------------------------------------------------------- /docker/run_vm_supervisor.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -euf 4 | 5 | # Use Podman if installed, else use Docker 6 | if hash podman 2> /dev/null 7 | then 8 | DOCKER_COMMAND=podman 9 | else 10 | DOCKER_COMMAND=docker 11 | fi 12 | 13 | $DOCKER_COMMAND build -t alephim/vm-supervisor-dev -f docker/orchestrator-dev.dockerfile . 14 | 15 | $DOCKER_COMMAND run -ti --rm \ 16 | -v "$(pwd)/runtimes/aleph-debian-11-python/rootfs.squashfs:/opt/aleph-vm/runtimes/aleph-debian-11-python/rootfs.squashfs:ro" \ 17 | -v "$(pwd)/examples/volumes/volume-venv.squashfs:/opt/aleph-vm/examples/volumes/volume-venv.squashfs:ro" \ 18 | -v "$(pwd)/vm_supervisor:/opt/aleph-vm/vm_supervisor:ro" \ 19 | -v "$(pwd)/firecracker:/opt/aleph-vm/firecracker:ro" \ 20 | --device /dev/kvm \ 21 | -p 4020:4020 \ 22 | alephim/vm-supervisor-dev "$@" 23 | -------------------------------------------------------------------------------- /docker/vm_connector.dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9 2 | 3 | RUN apt-get update && apt-get -y upgrade && apt-get install -y \ 4 | libsecp256k1-dev \ 5 | zip \ 6 | && rm -rf /var/lib/apt/lists/* 7 | 8 | RUN pip install fastapi aiofiles uvicorn aleph-client eth-account 9 | 10 | WORKDIR /opt 11 | ENV PYTHONPATH=/opt 12 | EXPOSE 4021 13 | 14 | COPY ./vm_connector /opt/vm_connector 15 | CMD ["uvicorn", "vm_connector.main:app", "--host", "0.0.0.0", "--port", "4021", "--reload"] 16 | -------------------------------------------------------------------------------- /docker/vm_supervisor-dev.dockerfile: -------------------------------------------------------------------------------- 1 | # This is mainly a copy of the installation instructions from [orchestrator/README.md] 2 | 3 | FROM debian:bookworm 4 | 5 | RUN apt-get update && apt-get -y upgrade && apt-get install -y \ 6 | sudo acl curl squashfs-tools git \ 7 | python3 python3-aiohttp python3-alembic python3-msgpack python3-pip python3-aiodns python3-aioredis\ 8 | python3-nftables python3-psutil python3-setproctitle python3-sqlalchemy python3-packaging ndppd nftables \ 9 | && rm -rf /var/lib/apt/lists/* 10 | 11 | RUN useradd jailman 12 | 13 | RUN mkdir /opt/firecracker 14 | RUN chown $(whoami) /opt/firecracker 15 | RUN curl -fsSL https://github.com/firecracker-microvm/firecracker/releases/download/v1.3.3/firecracker-v1.3.3-x86_64.tgz | tar -xz --no-same-owner --directory /opt/firecracker 16 | RUN curl -fsSL -o /opt/firecracker/vmlinux.bin https://s3.amazonaws.com/spec.ccfc.min/img/quickstart_guide/x86_64/kernels/vmlinux.bin 17 | 18 | # Link binaries on version-agnostic paths: 19 | RUN ln /opt/firecracker/release-*/firecracker-v* /opt/firecracker/firecracker 20 | RUN ln /opt/firecracker/release-*/jailer-v* /opt/firecracker/jailer 21 | 22 | RUN pip3 install typing-extensions 'aleph-message~=1.0.1' 23 | 24 | RUN mkdir -p /var/lib/aleph/vm/jailer 25 | 26 | ENV PYTHONPATH /mnt 27 | 28 | # Networking only works in privileged containers 29 | ENV ALEPH_VM_ALLOW_VM_NETWORKING False 30 | ENV ALEPH_VM_NETWORK_INTERFACE "tap0" 31 | # Jailer does not work in Docker containers 32 | ENV ALEPH_VM_USE_JAILER False 33 | # Use fake test data 34 | ENV ALEPH_VM_FAKE_DATA True 35 | # Allow connections from host 36 | ENV ALEPH_VM_SUPERVISOR_HOST "0.0.0.0" 37 | 38 | # Make it easy to enter this command from a shell script 39 | RUN echo "python3 -m vm_supervisor --print-settings --very-verbose --system-logs --profile -f ./examples/example_fastapi" >> /root/.bash_history 40 | 41 | RUN mkdir /opt/aleph-vm/ 42 | COPY ./vm_supervisor /opt/aleph-vm/vm_supervisor 43 | COPY ./firecracker /opt/aleph-vm/firecracker 44 | COPY ./guest_api /opt/aleph-vm/guest_api 45 | COPY ./examples /opt/aleph-vm/examples 46 | COPY ./runtimes /opt/aleph-vm/runtimes 47 | 48 | WORKDIR /opt/aleph-vm 49 | 50 | CMD "bash" 51 | -------------------------------------------------------------------------------- /examples/Makefile: -------------------------------------------------------------------------------- 1 | all: example_fastapi.zip data.tgz 2 | 3 | clean: 4 | rm example_fastapi.zip 5 | rm data.tgz 6 | 7 | example_fastapi.zip: 8 | zip -r example_fastapi.zip example_fastapi 9 | 10 | data.tgz: 11 | tar -cvzf data.tgz data 12 | 13 | example_pip.squashfs: 14 | rm -fr /opt/python 15 | pip3 install -t /opt/requirements -r example_pip/requirements.txt 16 | mksquashfs /opt/requirements requirements.squashfs 17 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Creating and running an Aleph Program 2 | 3 | In this example, we will cover how to develop and run a small webapp based on 4 | on [FastAPI](https://fastapi.tiangolo.com/). 5 | 6 | ## Initial setup 7 | 8 | Let's start by creating a package for our app: 9 | Create a directory named `example_fastapi` 10 | and an empty file named `__init__.py` file within the directory. 11 | ``` 12 | example_fastapi/ 13 | example_fastapi/__init__.py 14 | ``` 15 | 16 | The copy the example from the FastAPI tutorial in `__init__.py`: 17 | ```python 18 | from typing import Optional 19 | 20 | from fastapi import FastAPI 21 | 22 | app = FastAPI() 23 | 24 | 25 | @app.get("/") 26 | def index(): 27 | return {"Hello": "World"} 28 | 29 | 30 | @app.get("/items/{item_id}") 31 | def read_item(item_id: int, q: Optional[str] = None): 32 | return {"item_id": item_id, "q": q} 33 | ``` 34 | 35 | Install the FastAPI library and Uvicorn: 36 | ```shell 37 | pip install fastapi uvicorn 38 | ``` 39 | 40 | Uvicorn is used to run ASGI compatible web applications, such as the `app` 41 | web application from the example above. You need to specify it the name of the 42 | Python module to use and the name of the app: 43 | ```shell 44 | uvicorn example_fastapi:app --reload 45 | ``` 46 | 47 | Then open the app in a web browser on http://localhost:8000 48 | 49 | > Tip: With `--reload`, Uvicorn will automatically reload your code upon changes 50 | 51 | ## Upload on Aleph 52 | 53 | The same `app` we just used with Gunicorn can be used by Aleph to run 54 | the web app, since Aleph attempts to be compatible with 55 | [ASGI](https://asgi.readthedocs.io/ASGI). 56 | 57 | To achieve this, we need to follow the following steps: 58 | 59 | ### 1. Create a zip archive containing the app 60 | 61 | ```shell 62 | zip -r example_fastapi.zip example_fastapi 63 | ``` 64 | 65 | ### 2. Store the zip archive on Aleph 66 | 67 | You can use [aleph-client](https://github.com/aleph-im/aleph-client) to achieve this. 68 | See `examples/store.py`. 69 | 70 | ### 3. Create an Aleph message describing how to run your app 71 | 72 | See [this example](https://explorer.aleph.im/address/ETH/0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba/message/POST/91c83eff3ba23d6b501a2aa3c4364ec235eb8283b6fa8ac20d235642a48791b8). 73 | 74 | In the `code` section, replace the `ref` with the `item_hash` of the messages 75 | storing your code. 76 | 77 | Update the `entrypoint` field according to your app if necessary. 78 | 79 | ## Testing 80 | 81 | Open the HTTP interface of a node running the VM Supervisor: 82 | 83 | http://ip-of-supervisor:4020/vm/{message_hash}/ 84 | -------------------------------------------------------------------------------- /examples/confidential_instance_message_from_aleph.json: -------------------------------------------------------------------------------- 1 | { 2 | "chain": "ETH", 3 | "item_hash": "fake-hash-fake-hash-fake-hash-fake-hash-fake-hash-fake-hash-hash", 4 | "sender": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", 5 | "type": "INSTANCE", 6 | "channel": "Fun-dApps", 7 | "confirmed": true, 8 | "content": { 9 | "address": "0xE0178501683a4C321cAE8263839F349e0f07dECd", 10 | "allow_amend": false, 11 | "variables": { 12 | "VM_CUSTOM_NUMBER": "32" 13 | }, 14 | "environment": { 15 | "hypervisor": "qemu", 16 | "reproducible": true, 17 | "internet": true, 18 | "aleph_api": true, 19 | "shared_cache": true, 20 | "trusted_execution": { 21 | "firmware": "88978bb4c2ff54400ce5f51c3a109e1af1ab03d1ea4409666917317ac513846b", 22 | "policy": 1 23 | } 24 | }, 25 | "resources": { 26 | "vcpus": 1, 27 | "memory": 512, 28 | "seconds": 30 29 | }, 30 | "rootfs": { 31 | "parent": { 32 | "ref": "549ec451d9b099cad112d4aaa2c00ac40fb6729a92ff252ff22eef0b5c3cb613", 33 | "use_latest": true 34 | }, 35 | "persistence": "host", 36 | "size_mib": 5000 37 | }, 38 | "authorized_keys": [ 39 | "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDj95BHGUx0/z2G/tTrEi8o49i70xvjcEUdSs3j4A33jE7pAphrfRVbuFMgFubcm8n9r5ftd/H8SjjTL4hY9YvWV5ZuMf92GUga3n4wgevvPlBszYZCy/idxFl0vtHYC1CcK9v4tVb9onhDt8FOJkf2m6PmDyvC+6tl6LwoerXTeeiKr5VnTB4KOBkammtFmix3d1X1SZd/cxdwZIHcQ7BNsqBm2w/YzVba6Z4ZnFUelBkQtMQqNs2aV51O1pFFqtZp2mM71D5d8vn9pOtqJ5QmY5IW6NypcyqKJZg5o6QguK5rdXLkc7AWro27BiaHIENl3w0wazp9EDO9zPAGJ6lz olivier@lanius" 40 | ], 41 | "time": 1619017773.8950517 42 | }, 43 | "item_content": "{\"address\":\"0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba\",\"allow_amend\":false,\"variables\":{\"VM_CUSTOM_NUMBER\":\"32\"},\"environment\":{\"reproducible\":true,\"internet\":true,\"aleph_api\":true,\"shared_cache\":true},\"resources\":{\"vcpus\":1,\"memory\":128,\"seconds\":30},\"rootfs\":{\"parent\":{\"ref\":\"549ec451d9b099cad112d4aaa2c00ac40fb6729a92ff252ff22eef0b5c3cb613\",\"use_latest\":true},\"persistence\":\"host\",\"size_mib\":20000},\"cloud_config\":{\"password\":\"password\",\"chpasswd\":{\"expire\":\"False\"}},\"volumes\":[{\"mount\":\"/opt/venv\",\"ref\":\"5f31b0706f59404fad3d0bff97ef89ddf24da4761608ea0646329362c662ba51\",\"use_latest\":false},{\"comment\":\"Working data persisted on the VM supervisor, not available on other nodes\",\"mount\":\"/var/lib/example\",\"name\":\"data\",\"persistence\":\"host\",\"size_mib\":5}],\"replaces\":\"0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba\",\"time\":1619017773.8950517}", 44 | "item_type": "inline", 45 | "signature": "0x372da8230552b8c3e65c05b31a0ff3a24666d66c575f8e11019f62579bf48c2b7fe2f0bbe907a2a5bf8050989cdaf8a59ff8a1cbcafcdef0656c54279b4aa0c71b", 46 | "size": 749, 47 | "time": 1619017773.8950577, 48 | "confirmations": [ 49 | { 50 | "chain": "ETH", 51 | "height": 12284734, 52 | "hash": "0x67f2f3cde5e94e70615c92629c70d22dc959a118f46e9411b29659c2fce87cdc" 53 | } 54 | ] 55 | } 56 | -------------------------------------------------------------------------------- /examples/data/example.json: -------------------------------------------------------------------------------- 1 | { 2 | "hello": "world" 3 | } 4 | -------------------------------------------------------------------------------- /examples/example_confidential_image/README.md: -------------------------------------------------------------------------------- 1 | # Create an encrypted VM image 2 | Theses samples scripts create an encrypted VM image suitable be used for confidential computing. 3 | 4 | They will create an encrypted partition, a boot partition and the necessary initramfs to decrypt the partition. The created image is designed to work in tandem with the custom OVMF found in `runtimes/ovmf` which can receive the decryption key in a secure channel via QMP and pass it to grub to decrypt the disk. 5 | 6 | You can customise your VM by modifying the `setup_debian_rootfs.sh` script and adding your instructions at the end. This script is run "inside" the VM chroot. For examples: add your user, ssh key or install additional software. 7 | 8 | 9 | ## Procedure to create the image 10 | ### Requirements 11 | * guestmount 12 | * parted 13 | * cryptsetup 14 | 15 | On debian they can be installed via their respective packages : 16 | `apt install guestmount parted cryptsetup` 17 | 18 | ### Procure a debian image 19 | Your image need to have cloud-init installed in it for the network setup. It is recommended to start from the genericcloud image. Experiment with using the nocloud image then installing cloud-init have failed to work. 20 | 21 | ```shell 22 | wget https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-genericcloud-amd64.qcow2 23 | ``` 24 | 25 | ### Extract the root filesystem 26 | To do so, we simply need to mount the raw image with `guestmount`. 27 | 28 | > Make sure that you stop the VM before exporting the root filesystem. 29 | 30 | ```shell 31 | sudo mkdir -p /mnt/debian 32 | sudo guestmount \ 33 | --format=qcow2 \ 34 | -a ./debian-12-genericcloud-amd64.qcow2 \ 35 | -o allow_other \ 36 | -i /mnt/debian 37 | ``` 38 | 39 | Then, you can simply copy the root file system to any directory, take caution to preserve the proper permission like the setuid bit with the --archive option. 40 | 41 | ```shell 42 | export ROOT_DIR=./extracted 43 | mkdir ${ROOT_DIR} 44 | sudo cp --archive /mnt/debian/* ${ROOT_DIR} 45 | ``` 46 | 47 | Clean up the mount 48 | ```shell 49 | sudo guestunmount /mnt/debian 50 | sudo rmdir /mnt/debian 51 | ``` 52 | 53 | 54 | Run the build_debian_image.sh that will create the image with the encrypted disk 55 | > This script will require sudo for certain commands 56 | 57 | The password option is the *secret* password key, with which the disk will be encrypted, you will need to pass it to launch the VM. 58 | 59 | ```shell 60 | bash ./build_debian_image.sh --rootfs-dir $ROOT_DIR -o ~/destination-image.img --password your-password 61 | ``` 62 | 63 | > Tip: To debug the image creation, pass the `-x` option to bash in front of the script name 64 | 65 | ## To test and further customise you image you can also boot it inside qemu 66 | ```shell 67 | sudo qemu-system-x86_64 \ 68 | -drive format=raw,file= \ 69 | -enable-kvm \ 70 | -m 2048 \ 71 | -nic user,model=virtio \ 72 | -nographic \ 73 | -serial mon:stdio \ 74 | -drive if=pflash,format=raw,unit=0,file=/usr/share/ovmf/OVMF.fd,readonly=on 75 | ``` 76 | 77 | > Once you have entered your password you might have to wait a minute or so for the disk to decrypt and boot. 78 | 79 | To exit qemu : press Ctrl a, x and then [Enter] 80 | -------------------------------------------------------------------------------- /examples/example_django/blog/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/examples/example_django/blog/__init__.py -------------------------------------------------------------------------------- /examples/example_django/blog/admin.py: -------------------------------------------------------------------------------- 1 | from django.contrib import admin 2 | 3 | from .models import Article, Comment 4 | 5 | admin.site.register(Article) 6 | admin.site.register(Comment) 7 | -------------------------------------------------------------------------------- /examples/example_django/blog/apps.py: -------------------------------------------------------------------------------- 1 | from django.apps import AppConfig 2 | 3 | 4 | class BlogConfig(AppConfig): 5 | default_auto_field = "django.db.models.BigAutoField" 6 | name = "blog" 7 | -------------------------------------------------------------------------------- /examples/example_django/blog/fixtures/default_articles.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "model": "blog.article", 4 | "pk": "f115d067-f6c9-4532-a140-40c51f37a1bc", 5 | "fields": { 6 | "date": "2021-07-02T13:33:03Z", 7 | "title": "Something different", 8 | "body": "Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum." 9 | } 10 | } 11 | ] 12 | -------------------------------------------------------------------------------- /examples/example_django/blog/forms.py: -------------------------------------------------------------------------------- 1 | from django import forms 2 | from django.forms import ModelForm 3 | 4 | from .models import Comment 5 | 6 | 7 | class CommentForm(ModelForm): 8 | class Meta: 9 | model = Comment 10 | fields = ["text", "article"] 11 | widgets = {"article": forms.HiddenInput()} 12 | -------------------------------------------------------------------------------- /examples/example_django/blog/migrations/0001_initial.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 3.2.4 on 2021-07-02 09:35 2 | 3 | import django.db.models.deletion 4 | from django.db import migrations, models 5 | 6 | 7 | class Migration(migrations.Migration): 8 | initial = True 9 | 10 | dependencies = [] 11 | 12 | operations = [ 13 | migrations.CreateModel( 14 | name="Article", 15 | fields=[ 16 | ("date", models.DateTimeField(auto_created=True)), 17 | ("id", models.UUIDField(primary_key=True, serialize=False)), 18 | ( 19 | "title", 20 | models.CharField(help_text="Title of the blog article", max_length=256), 21 | ), 22 | ("body", models.TextField(help_text="Body of the blog article")), 23 | ], 24 | ), 25 | migrations.CreateModel( 26 | name="Comment", 27 | fields=[ 28 | ("date", models.DateTimeField(auto_created=True, auto_now_add=True)), 29 | ("id", models.UUIDField(primary_key=True, serialize=False)), 30 | ("text", models.CharField(max_length=1024)), 31 | ( 32 | "article", 33 | models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="blog.article"), 34 | ), 35 | ], 36 | ), 37 | ] 38 | -------------------------------------------------------------------------------- /examples/example_django/blog/migrations/0002_auto_20210702_1331.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 3.2.4 on 2021-07-02 13:31 2 | 3 | import uuid 4 | 5 | from django.db import migrations, models 6 | 7 | 8 | class Migration(migrations.Migration): 9 | dependencies = [ 10 | ("blog", "0001_initial"), 11 | ] 12 | 13 | operations = [ 14 | migrations.AlterField( 15 | model_name="article", 16 | name="id", 17 | field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False), 18 | ), 19 | migrations.AlterField( 20 | model_name="comment", 21 | name="date", 22 | field=models.DateTimeField(auto_now_add=True), 23 | ), 24 | migrations.AlterField( 25 | model_name="comment", 26 | name="id", 27 | field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False), 28 | ), 29 | ] 30 | -------------------------------------------------------------------------------- /examples/example_django/blog/migrations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/examples/example_django/blog/migrations/__init__.py -------------------------------------------------------------------------------- /examples/example_django/blog/models.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | 3 | from django.db import models 4 | 5 | 6 | class Article(models.Model): 7 | id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) 8 | title = models.CharField(max_length=256, help_text="Title of the blog article") 9 | body = models.TextField(help_text="Body of the blog article") 10 | date = models.DateTimeField(auto_created=True) 11 | 12 | def __str__(self): 13 | return f"Blog article '{self.title}'" 14 | 15 | 16 | class Comment(models.Model): 17 | id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) 18 | text = models.CharField(max_length=1024) 19 | article = models.ForeignKey(to=Article, on_delete=models.CASCADE) 20 | date = models.DateTimeField(auto_now_add=True, editable=False) 21 | 22 | def __str__(self): 23 | return f"Comment on {self.article.title}" 24 | -------------------------------------------------------------------------------- /examples/example_django/blog/templates/blog/article_list.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | My Django Blog 5 | 30 | 31 | 32 |

My Django Blog

33 | 34 | {% for article in object_list %} 35 |
36 |

{{ article.title }}

37 |
Published on
38 |

39 | {{ article.body }} 40 |

41 |
42 | {% for comment in article.comment_set.all %} 43 |

{{ comment.text }}

44 | {% endfor %} 45 | 46 |
47 | {% csrf_token %} 48 | {{ form }} 49 | 50 | 51 |
52 |
53 |
54 | {% empty %} 55 |
  • No articles yet.
  • 56 | {% endfor %} 57 | 58 | 61 | 62 | 63 | -------------------------------------------------------------------------------- /examples/example_django/blog/templates/blog/comment.html: -------------------------------------------------------------------------------- 1 |
    2 | {% csrf_token %} 3 | {{ form }} 4 | 5 |
    6 | -------------------------------------------------------------------------------- /examples/example_django/blog/urls.py: -------------------------------------------------------------------------------- 1 | from django.urls import path 2 | from django.views.decorators.csrf import csrf_exempt 3 | 4 | from .views import ArticleListView, CommentFormView, test_view 5 | 6 | urlpatterns = [ 7 | path("", ArticleListView.as_view(), name="article-list"), 8 | path("comment", csrf_exempt(CommentFormView.as_view()), name="comment"), 9 | path("post", csrf_exempt(test_view), name="test-post"), 10 | ] 11 | -------------------------------------------------------------------------------- /examples/example_django/blog/views.py: -------------------------------------------------------------------------------- 1 | from django.http import JsonResponse 2 | from django.views.generic import CreateView, ListView 3 | 4 | from .forms import CommentForm 5 | from .models import Article 6 | 7 | 8 | class ArticleListView(ListView): 9 | model = Article 10 | ordering = "-date" 11 | 12 | extra_context = {"form": CommentForm} 13 | 14 | 15 | class CommentFormView(CreateView): 16 | template_name = "blog/comment.html" 17 | form_class = CommentForm 18 | success_url = "/" 19 | 20 | 21 | def test_view(request): 22 | print(request.POST) 23 | return JsonResponse(request.POST) 24 | -------------------------------------------------------------------------------- /examples/example_django/example_django/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/examples/example_django/example_django/__init__.py -------------------------------------------------------------------------------- /examples/example_django/example_django/asgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | ASGI config for example_django project. 3 | 4 | It exposes the ASGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.asgi import get_asgi_application 13 | 14 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example_django.settings") 15 | 16 | application = get_asgi_application() 17 | 18 | os.system("/usr/bin/python3 /opt/code/manage.py migrate") 19 | 20 | os.system("/usr/bin/python3 /opt/code/manage.py loaddata /opt/code/blog/fixtures/default_articles.json") 21 | -------------------------------------------------------------------------------- /examples/example_django/example_django/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Django settings for example_django project. 3 | 4 | Generated by 'django-admin startproject' using Django 3.2.4. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.2/topics/settings/ 8 | 9 | For the full list of settings and their values, see 10 | https://docs.djangoproject.com/en/3.2/ref/settings/ 11 | """ 12 | 13 | import os.path 14 | from pathlib import Path 15 | 16 | # Build paths inside the project like this: BASE_DIR / 'subdir'. 17 | BASE_DIR = Path(__file__).resolve().parent.parent 18 | 19 | 20 | # Quick-start development settings - unsuitable for production 21 | # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ 22 | 23 | # SECURITY WARNING: keep the secret key used in production secret! 24 | SECRET_KEY = "django-insecure-1r3v1fc$q%sqy)0#bybc4pd##g+!tpm%+4^5opqyu93o0hqk$w" 25 | 26 | # SECURITY WARNING: don't run with debug turned on in production! 27 | DEBUG = True 28 | 29 | ALLOWED_HOSTS = ["127.0.0.1", "vm.demo.okeso.fr", "test.vm.demo.okeso.fr"] 30 | 31 | 32 | # Application definition 33 | 34 | INSTALLED_APPS = [ 35 | "django.contrib.admin", 36 | "django.contrib.auth", 37 | "django.contrib.contenttypes", 38 | "django.contrib.sessions", 39 | "django.contrib.messages", 40 | "django.contrib.staticfiles", 41 | "blog", 42 | ] 43 | 44 | MIDDLEWARE = [ 45 | "django.middleware.security.SecurityMiddleware", 46 | "django.contrib.sessions.middleware.SessionMiddleware", 47 | "django.middleware.common.CommonMiddleware", 48 | "django.middleware.csrf.CsrfViewMiddleware", 49 | "django.contrib.auth.middleware.AuthenticationMiddleware", 50 | "django.contrib.messages.middleware.MessageMiddleware", 51 | "django.middleware.clickjacking.XFrameOptionsMiddleware", 52 | ] 53 | 54 | ROOT_URLCONF = "example_django.urls" 55 | 56 | TEMPLATES = [ 57 | { 58 | "BACKEND": "django.template.backends.django.DjangoTemplates", 59 | "DIRS": [], 60 | "APP_DIRS": True, 61 | "OPTIONS": { 62 | "context_processors": [ 63 | "django.template.context_processors.debug", 64 | "django.template.context_processors.request", 65 | "django.contrib.auth.context_processors.auth", 66 | "django.contrib.messages.context_processors.messages", 67 | ], 68 | }, 69 | }, 70 | ] 71 | 72 | WSGI_APPLICATION = "example_django.wsgi.application" 73 | 74 | 75 | # Database 76 | # https://docs.djangoproject.com/en/3.2/ref/settings/#databases 77 | 78 | if os.path.isdir("/var/lib/sqlite"): 79 | # Inside Aleph VM 80 | DATABASES = { 81 | "default": { 82 | "ENGINE": "django.db.backends.sqlite3", 83 | "NAME": "/var/lib/sqlite/db.sqlite3", 84 | } 85 | } 86 | else: 87 | # On developer setup 88 | DATABASES = { 89 | "default": { 90 | "ENGINE": "django.db.backends.sqlite3", 91 | "NAME": BASE_DIR / "db.sqlite3", 92 | } 93 | } 94 | 95 | 96 | # Password validation 97 | # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators 98 | 99 | AUTH_PASSWORD_VALIDATORS = [ 100 | { 101 | "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", 102 | }, 103 | { 104 | "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", 105 | }, 106 | { 107 | "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", 108 | }, 109 | { 110 | "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", 111 | }, 112 | ] 113 | 114 | 115 | # Internationalization 116 | # https://docs.djangoproject.com/en/3.2/topics/i18n/ 117 | 118 | LANGUAGE_CODE = "en-us" 119 | 120 | TIME_ZONE = "UTC" 121 | 122 | USE_I18N = True 123 | 124 | USE_L10N = True 125 | 126 | USE_TZ = True 127 | 128 | 129 | # Static files (CSS, JavaScript, Images) 130 | # https://docs.djangoproject.com/en/3.2/howto/static-files/ 131 | 132 | STATIC_URL = "https://ipfs.io/ipfs/QmUhm7UWrGrjoJY5cVZ9ur9PtT7nHzdmXJuNpD8s7VLcJR/" 133 | 134 | STATIC_ROOT = os.path.join(BASE_DIR, "static") 135 | 136 | # Default primary key field type 137 | # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field 138 | 139 | DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField" 140 | -------------------------------------------------------------------------------- /examples/example_django/example_django/urls.py: -------------------------------------------------------------------------------- 1 | """example_django URL Configuration 2 | 3 | The `urlpatterns` list routes URLs to views. For more information please see: 4 | https://docs.djangoproject.com/en/3.2/topics/http/urls/ 5 | Examples: 6 | Function views 7 | 1. Add an import: from my_app import views 8 | 2. Add a URL to urlpatterns: path('', views.home, name='home') 9 | Class-based views 10 | 1. Add an import: from other_app.views import Home 11 | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') 12 | Including another URLconf 13 | 1. Import the include() function: from django.urls import include, path 14 | 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) 15 | """ 16 | 17 | from django.contrib import admin 18 | from django.urls import include, path 19 | 20 | urlpatterns = [ 21 | path("", include("blog.urls")), 22 | path("admin/", admin.site.urls), 23 | ] 24 | -------------------------------------------------------------------------------- /examples/example_django/example_django/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for example_django project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.wsgi import get_wsgi_application 13 | 14 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example_django.settings") 15 | 16 | application = get_wsgi_application() 17 | -------------------------------------------------------------------------------- /examples/example_django/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Django's command-line utility for administrative tasks.""" 3 | 4 | import os 5 | import sys 6 | 7 | 8 | def main(): 9 | """Run administrative tasks.""" 10 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example_django.settings") 11 | try: 12 | from django.core.management import execute_from_command_line 13 | except ImportError as exc: 14 | raise ImportError( 15 | "Couldn't import Django. Are you sure it's installed and " 16 | "available on your PYTHONPATH environment variable? Did you " 17 | "forget to activate a virtual environment?" 18 | ) from exc 19 | execute_from_command_line(sys.argv) 20 | 21 | 22 | if __name__ == "__main__": 23 | main() 24 | -------------------------------------------------------------------------------- /examples/example_fastapi/README.md: -------------------------------------------------------------------------------- 1 | Publish using: 2 | 3 | ```shell 4 | aleph program upload ../aleph-vm/examples/example_fastapi main:app \ 5 | --persistent-volume "persistence=host,size_mib=1,mount=/var/lib/example,name=increment-storage,comment=Persistence" 6 | ``` 7 | -------------------------------------------------------------------------------- /examples/example_fastapi_1.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI 2 | 3 | app = FastAPI() 4 | 5 | 6 | @app.get("/") 7 | def read_root(): 8 | return {"Hello": "World"} 9 | 10 | 11 | @app.get("/run/{item_id}") 12 | def read_item(item_id: str, q: str | None = None): 13 | return {"item_id": item_id, "q": q} 14 | 15 | 16 | @app.post("/run/{item_id}") 17 | def read_item_post(item_id: str, q: str | None = None): 18 | return {"item_id_post": item_id, "q": q} 19 | -------------------------------------------------------------------------------- /examples/example_http_js/.dockerignore: -------------------------------------------------------------------------------- 1 | *.zip 2 | *.squashfs 3 | *.key 4 | -------------------------------------------------------------------------------- /examples/example_http_js/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:16-bookworm 2 | 3 | RUN apt-get update && apt-get -y upgrade && apt-get install -y \ 4 | libsecp256k1-dev \ 5 | squashfs-tools \ 6 | python3-pip \ 7 | git \ 8 | && rm -rf /var/lib/apt/lists/* 9 | 10 | RUN pip install aleph-client 11 | 12 | WORKDIR /usr/src/example_http_js 13 | COPY . . 14 | 15 | RUN npm i 16 | -------------------------------------------------------------------------------- /examples/example_http_js/Makefile: -------------------------------------------------------------------------------- 1 | 2 | podman-prepare: 3 | podman build -t aleph-example-js . 4 | 5 | podman-publish: 6 | podman run --rm -ti aleph-example-js make publish 7 | 8 | podman-client: 9 | podman rmi aleph-example-js 10 | 11 | docker-prepare: 12 | docker build -t aleph-example-js . 13 | 14 | docker-publish: 15 | docker run --rm -ti aleph-example-js make publish 16 | 17 | publish: 18 | chmod +x ./src/run.sh 19 | aleph program upload ./src "run.sh" 20 | -------------------------------------------------------------------------------- /examples/example_http_js/README.md: -------------------------------------------------------------------------------- 1 | # Aleph VM JS Example 2 | 3 | A simple example program written in JS that can run in an Aleph VM. 4 | 5 | ## About 6 | 7 | This example is a simple HTTP server listening on port 8080. 8 | It does not depend on third-party libraries. 9 | 10 | Test it on http://localhost:8080. 11 | 12 | ## Publish the program 13 | 14 | ### Locally 15 | 16 | ```shell 17 | make publish 18 | ``` 19 | 20 | ### Using Podman 21 | 22 | ```shell 23 | make podman-prepare 24 | make podman-publish 25 | ``` 26 | 27 | ### Using Docker 28 | 29 | ```shell 30 | make docker-prepare 31 | make docker-publish 32 | ``` 33 | -------------------------------------------------------------------------------- /examples/example_http_js/package.json: -------------------------------------------------------------------------------- 1 | {} -------------------------------------------------------------------------------- /examples/example_http_js/src/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -euf 4 | 5 | cd /opt/code 6 | node /opt/code/server.js 7 | -------------------------------------------------------------------------------- /examples/example_http_js/src/server.js: -------------------------------------------------------------------------------- 1 | const http = require('http'); 2 | const requestListener = function (req, res) { 3 | res.writeHead(200, {'Content-Type': 'text/plain'}); 4 | res.end('Hello, World!'); 5 | } 6 | const server = http.createServer(requestListener); 7 | server.listen(8080); 8 | -------------------------------------------------------------------------------- /examples/example_http_rust/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "example_http_rust" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | -------------------------------------------------------------------------------- /examples/example_http_rust/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:bookworm 2 | 3 | RUN apt-get update && apt-get -y upgrade && apt-get install -y \ 4 | libsecp256k1-dev \ 5 | python3-pip \ 6 | squashfs-tools \ 7 | && rm -rf /var/lib/apt/lists/* 8 | 9 | RUN pip install aleph-client 10 | 11 | WORKDIR /usr/src/example_http_rust 12 | COPY . . 13 | 14 | RUN cargo install --path . 15 | -------------------------------------------------------------------------------- /examples/example_http_rust/Makefile: -------------------------------------------------------------------------------- 1 | 2 | podman-prepare: 3 | podman build -t aleph-example-rust . 4 | 5 | podman-publish: 6 | podman run --rm -ti aleph-example-rust make publish 7 | 8 | docker-prepare: 9 | docker build -t aleph-example-rust . 10 | 11 | docker-publish: 12 | docker run --rm -ti aleph-example-rust make publish 13 | 14 | publish: 15 | cargo build --release 16 | mkdir -p ./dist 17 | cp target/release/example_http_rust ./dist/ 18 | aleph program upload ./dist example_http_rust 19 | -------------------------------------------------------------------------------- /examples/example_http_rust/README.md: -------------------------------------------------------------------------------- 1 | # Aleph VM Rust Example 2 | 3 | A simple example program written in Rust that 4 | can run in an Aleph VM. 5 | 6 | ## About 7 | 8 | This example is a simple HTTP server listening on port 8080. 9 | It does not depend on third-party libraries. 10 | 11 | Test it on http://localhost:8080. 12 | 13 | ## Publish the program 14 | 15 | ### Locally 16 | 17 | ```shell 18 | make publish 19 | ``` 20 | 21 | ### Using Podman 22 | 23 | ```shell 24 | make podman-prepare 25 | make podman-publish 26 | ``` 27 | 28 | ### Using Docker 29 | 30 | ```shell 31 | make docker-prepare 32 | make docker-publish 33 | ``` 34 | -------------------------------------------------------------------------------- /examples/example_http_rust/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::io::prelude::*; 2 | use std::net::TcpListener; 3 | use std::net::TcpStream; 4 | 5 | fn main() { 6 | 7 | let listener = TcpListener::bind("0.0.0.0:8080").unwrap(); 8 | println!("Running on 0.0.0.0:8080"); 9 | for stream in listener.incoming() { 10 | let stream = stream.unwrap(); 11 | handle_connection(stream); 12 | } 13 | } 14 | 15 | 16 | fn handle_connection(mut stream: TcpStream) { 17 | println!("handling connection"); 18 | 19 | const MSG: &str = "helloworld"; 20 | let msg = MSG.as_bytes(); 21 | 22 | let response = format!("{:x?}", msg); 23 | 24 | let mut buffer = [0; 1024]; 25 | 26 | stream.read(&mut buffer).unwrap(); 27 | 28 | let response = format!("HTTP/1.1 200 OK\nContent-Type: text/plain\n\nOKIDOK\n{}", response); 29 | 30 | stream.write(response.as_bytes()).unwrap(); 31 | stream.flush().unwrap(); 32 | } 33 | -------------------------------------------------------------------------------- /examples/example_pip/main.py: -------------------------------------------------------------------------------- 1 | import pandas as pandas 2 | from fastapi import FastAPI, Response 3 | 4 | app = FastAPI() 5 | 6 | 7 | @app.get("/") 8 | async def root(): 9 | data = range(10) 10 | df = pandas.DataFrame(data) 11 | return Response(content=df.to_html(), media_type="text/html") 12 | -------------------------------------------------------------------------------- /examples/example_pip/requirements.txt: -------------------------------------------------------------------------------- 1 | pandas 2 | -------------------------------------------------------------------------------- /examples/instance_message_from_aleph.json: -------------------------------------------------------------------------------- 1 | { 2 | "chain": "ETH", 3 | "item_hash": "fake-hash-fake-hash-fake-hash-fake-hash-fake-hash-fake-hash-hash", 4 | "sender": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", 5 | "type": "INSTANCE", 6 | "channel": "Fun-dApps", 7 | "confirmed": true, 8 | "content": { 9 | "address": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", 10 | "allow_amend": false, 11 | "variables": { 12 | "VM_CUSTOM_NUMBER": "32" 13 | }, 14 | "environment": { 15 | "reproducible": true, 16 | "internet": true, 17 | "aleph_api": true, 18 | "shared_cache": true 19 | }, 20 | "resources": { 21 | "vcpus": 1, 22 | "memory": 512, 23 | "seconds": 30 24 | }, 25 | "rootfs": { 26 | "parent": { 27 | "ref": "549ec451d9b099cad112d4aaa2c00ac40fb6729a92ff252ff22eef0b5c3cb613", 28 | "use_latest": true 29 | }, 30 | "persistence": "host", 31 | "size_mib": 5000 32 | }, 33 | "authorized_keys": [ 34 | "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHlGJRaIv/EzNT0eNqNB5DiGEbii28Fb2zCjuO/bMu7y amolinsdiaz@gmail.com" 35 | ], 36 | "volumes": [ 37 | { 38 | "mount": "/opt/venv", 39 | "ref": "5f31b0706f59404fad3d0bff97ef89ddf24da4761608ea0646329362c662ba51", 40 | "use_latest": false 41 | } 42 | ], 43 | "replaces": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", 44 | "time": 1619017773.8950517 45 | }, 46 | "item_content": "{\"address\":\"0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba\",\"allow_amend\":false,\"variables\":{\"VM_CUSTOM_NUMBER\":\"32\"},\"environment\":{\"reproducible\":true,\"internet\":true,\"aleph_api\":true,\"shared_cache\":true},\"resources\":{\"vcpus\":1,\"memory\":128,\"seconds\":30},\"rootfs\":{\"parent\":{\"ref\":\"549ec451d9b099cad112d4aaa2c00ac40fb6729a92ff252ff22eef0b5c3cb613\",\"use_latest\":true},\"persistence\":\"host\",\"size_mib\":20000},\"cloud_config\":{\"password\":\"password\",\"chpasswd\":{\"expire\":\"False\"}},\"volumes\":[{\"mount\":\"/opt/venv\",\"ref\":\"5f31b0706f59404fad3d0bff97ef89ddf24da4761608ea0646329362c662ba51\",\"use_latest\":false},{\"comment\":\"Working data persisted on the VM supervisor, not available on other nodes\",\"mount\":\"/var/lib/example\",\"name\":\"data\",\"persistence\":\"host\",\"size_mib\":5}],\"replaces\":\"0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba\",\"time\":1619017773.8950517}", 47 | "item_type": "inline", 48 | "signature": "0x372da8230552b8c3e65c05b31a0ff3a24666d66c575f8e11019f62579bf48c2b7fe2f0bbe907a2a5bf8050989cdaf8a59ff8a1cbcafcdef0656c54279b4aa0c71b", 49 | "size": 749, 50 | "time": 1619017773.8950577, 51 | "confirmations": [ 52 | { 53 | "chain": "ETH", 54 | "height": 12284734, 55 | "hash": "0x67f2f3cde5e94e70615c92629c70d22dc959a118f46e9411b29659c2fce87cdc" 56 | } 57 | ] 58 | } 59 | -------------------------------------------------------------------------------- /examples/qemu_message_from_aleph.json: -------------------------------------------------------------------------------- 1 | { 2 | "chain": "ETH", 3 | "item_hash": "fake-hash-fake-hash-fake-hash-fake-hash-fake-hash-fake-hash-hash", 4 | "sender": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", 5 | "type": "INSTANCE", 6 | "channel": "Fun-dApps", 7 | "confirmed": true, 8 | "content": { 9 | "address": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", 10 | "allow_amend": false, 11 | "variables": { 12 | "VM_CUSTOM_NUMBER": "32" 13 | }, 14 | "environment": { 15 | "reproducible": true, 16 | "internet": true, 17 | "aleph_api": true, 18 | "shared_cache": true, 19 | "hypervisor": "qemu" 20 | }, 21 | "resources": { 22 | "vcpus": 1, 23 | "memory": 512, 24 | "seconds": 30 25 | }, 26 | "rootfs": { 27 | "parent": { 28 | "ref": "549ec451d9b099cad112d4aaa2c00ac40fb6729a92ff252ff22eef0b5c3cb613", 29 | "use_latest": false 30 | }, 31 | "persistence": "host", 32 | "size_mib": 5000 33 | }, 34 | "authorized_keys": [ 35 | "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDj95BHGUx0/z2G/tTrEi8o49i70xvjcEUdSs3j4A33jE7pAphrfRVbuFMgFubcm8n9r5ftd/H8SjjTL4hY9YvWV5ZuMf92GUga3n4wgevvPlBszYZCy/idxFl0vtHYC1CcK9v4tVb9onhDt8FOJkf2m6PmDyvC+6tl6LwoerXTeeiKr5VnTB4KOBkammtFmix3d1X1SZd/cxdwZIHcQ7BNsqBm2w/YzVba6Z4ZnFUelBkQtMQqNs2aV51O1pFFqtZp2mM71D5d8vn9pOtqJ5QmY5IW6NypcyqKJZg5o6QguK5rdXLkc7AWro27BiaHIENl3w0wazp9EDO9zPAGJ6lz olivier@lanius" 36 | ], 37 | "volumes": [ 38 | { 39 | "mount": "/opt/venv", 40 | "ref": "5f31b0706f59404fad3d0bff97ef89ddf24da4761608ea0646329362c662ba51", 41 | "use_latest": false 42 | }, 43 | { 44 | "comment": "Working data persisted on the VM supervisor, not available on other nodes", 45 | "mount": "/var/lib/example", 46 | "name": "data", 47 | "persistence": "host", 48 | "size_mib": 5 49 | } 50 | ], 51 | "replaces": "0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba", 52 | "time": 1619017773.8950517 53 | }, 54 | "item_content": "{\"address\":\"0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba\",\"allow_amend\":false,\"variables\":{\"VM_CUSTOM_NUMBER\":\"32\"},\"environment\":{\"reproducible\":true,\"internet\":true,\"aleph_api\":true,\"shared_cache\":true},\"resources\":{\"vcpus\":1,\"memory\":128,\"seconds\":30},\"rootfs\":{\"parent\":{\"ref\":\"549ec451d9b099cad112d4aaa2c00ac40fb6729a92ff252ff22eef0b5c3cb613\",\"use_latest\":true},\"persistence\":\"host\",\"size_mib\":20000},\"cloud_config\":{\"password\":\"password\",\"chpasswd\":{\"expire\":\"False\"}},\"volumes\":[{\"mount\":\"/opt/venv\",\"ref\":\"5f31b0706f59404fad3d0bff97ef89ddf24da4761608ea0646329362c662ba51\",\"use_latest\":false},{\"comment\":\"Working data persisted on the VM supervisor, not available on other nodes\",\"mount\":\"/var/lib/example\",\"name\":\"data\",\"persistence\":\"host\",\"size_mib\":5}],\"replaces\":\"0x9319Ad3B7A8E0eE24f2E639c40D8eD124C5520Ba\",\"time\":1619017773.8950517}", 55 | "item_type": "inline", 56 | "signature": "0x372da8230552b8c3e65c05b31a0ff3a24666d66c575f8e11019f62579bf48c2b7fe2f0bbe907a2a5bf8050989cdaf8a59ff8a1cbcafcdef0656c54279b4aa0c71b", 57 | "size": 749, 58 | "time": 1619017773.8950577, 59 | "confirmations": [ 60 | { 61 | "chain": "ETH", 62 | "height": 12284734, 63 | "hash": "0x67f2f3cde5e94e70615c92629c70d22dc959a118f46e9411b29659c2fce87cdc" 64 | } 65 | ] 66 | } 67 | -------------------------------------------------------------------------------- /examples/volumes/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:bookworm 2 | 3 | RUN apt-get update && apt-get -y upgrade && apt-get install -y \ 4 | python3-venv \ 5 | squashfs-tools \ 6 | && rm -rf /var/lib/apt/lists/* 7 | 8 | RUN python3 -m venv /opt/venv 9 | RUN /opt/venv/bin/pip install 'aleph-message~=1.0.1' 10 | 11 | CMD mksquashfs /opt/venv /mnt/volume-venv.squashfs 12 | -------------------------------------------------------------------------------- /examples/volumes/build_squashfs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -euf 4 | 5 | # Use Podman if installed, else use Docker 6 | if hash podman 2> /dev/null 7 | then 8 | DOCKER_COMMAND=podman 9 | else 10 | DOCKER_COMMAND=docker 11 | fi 12 | 13 | $DOCKER_COMMAND build -t aleph-vm-build-squashfs . 14 | $DOCKER_COMMAND run --rm -v "$(pwd)":/mnt aleph-vm-build-squashfs 15 | -------------------------------------------------------------------------------- /kernels/build-kernel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euf -o pipefail 4 | 5 | # apt install ncurses-dev flex bison bc 6 | 7 | rm -fr linux-5.10.197 linux-5.10.197.tar linux-5.10.197.tar.sign linux-5.10.197.tar.xz 8 | 9 | 10 | curl -OL "https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.197.tar.xz" 11 | curl -OL "https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.197.tar.sign" 12 | unxz linux-5.10.197.tar.xz 13 | 14 | gpg --locate-keys torvalds@kernel.org gregkh@kernel.org 15 | gpg --verify linux-5.10.197.tar.sign linux-5.10.197.tar 16 | 17 | tar -xvf linux-5.10.197.tar 18 | 19 | cp microvm-kernel-x86_64-5.10.config linux-5.10.197/.config 20 | 21 | cd linux-5.10.197/ 22 | make menuconfig 23 | 24 | make -j$(nproc) vmlinux 25 | 26 | # Copy the updated config locally for documentation 27 | cp linux-5.10.197/.config ./linux.config 28 | -------------------------------------------------------------------------------- /packaging/aleph-vm/DEBIAN/conffiles: -------------------------------------------------------------------------------- 1 | /etc/aleph-vm/supervisor.env 2 | -------------------------------------------------------------------------------- /packaging/aleph-vm/DEBIAN/control: -------------------------------------------------------------------------------- 1 | Package: aleph-vm 2 | Version: 0.1.8 3 | Architecture: all 4 | Maintainer: Aleph.im 5 | Description: Aleph.im VM execution engine 6 | Depends: python3,python3-pip,python3-aiohttp,python3-msgpack,python3-aiodns,python3-alembic,python3-sqlalchemy,python3-setproctitle,redis,python3-aioredis,python3-psutil,sudo,acl,curl,systemd-container,squashfs-tools,debootstrap,python3-packaging,python3-cpuinfo,python3-nftables,python3-jsonschema,cloud-image-utils,ndppd,python3-yaml,python3-dotenv,python3-schedule,qemu-system-x86,qemu-utils,python3-systemd,python3-dbus,btrfs-progs,nftables,lshw,python3-jwcrypto,python3-netifaces 7 | Section: aleph-im 8 | Priority: Extra 9 | -------------------------------------------------------------------------------- /packaging/aleph-vm/DEBIAN/postinst: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euf -o pipefail 3 | 4 | if ! id -u jailman > /dev/null 2>&1; then 5 | useradd jailman 6 | fi 7 | 8 | rm -fr /srv/jailer # Upgrade from < 0.1.11 9 | rm -fr /tmp/aleph # Upgrade from < 0.1.11 10 | mkdir -p /var/lib/aleph/vm/jailer 11 | 12 | # Create the IPFS directory if it does not exist 13 | if [ ! -d "/var/lib/ipfs" ]; then 14 | mkdir -p /var/lib/ipfs 15 | # Set appropriate permissions if needed 16 | chown ipfs:ipfs /var/lib/ipfs 17 | fi 18 | 19 | # Systemd is absent from containers 20 | if ! [[ -v container ]]; then 21 | systemctl daemon-reload 22 | systemctl enable ipfs.service 23 | systemctl restart ipfs.service 24 | systemctl enable aleph-vm-supervisor.service 25 | systemctl restart aleph-vm-supervisor.service 26 | fi 27 | -------------------------------------------------------------------------------- /packaging/aleph-vm/DEBIAN/postrm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euf -o pipefail 3 | 4 | rm -fr /srv/jailer # Upgrade from < 0.1.11 5 | rm -fr /tmp/aleph/ # Upgrade from < 0.1.11 6 | rm -fr /var/lib/aleph/vm/jailer 7 | 8 | if [ "$1" = "purge" ]; then 9 | # Remove the directory when the package is purged 10 | rm -rf /var/lib/ipfs 11 | fi 12 | 13 | systemctl daemon-reload 14 | -------------------------------------------------------------------------------- /packaging/aleph-vm/DEBIAN/preinst: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -uf -o pipefail 3 | 4 | # Documentation: https://www.debian.org/doc/debian-policy/ch-maintainerscripts.html 5 | 6 | # Systemd is absent from containers 7 | if ! [[ -v container ]]; then 8 | # Stop the service during an upgrade. 9 | # The service does not exist during a new install and will fail, this is okay 10 | systemctl stop aleph-vm-supervisor.service 11 | fi 12 | 13 | set -e 14 | 15 | # We will not delete this user on uninstall since there may be files owned by that user in /var/lib/ipfs 16 | addgroup --system ipfs 17 | adduser --system --ingroup ipfs ipfs 18 | -------------------------------------------------------------------------------- /packaging/aleph-vm/DEBIAN/prerm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euf -o pipefail 3 | 4 | systemctl disable aleph-vm-supervisor.service 5 | systemctl stop aleph-vm-supervisor.service 6 | -------------------------------------------------------------------------------- /packaging/aleph-vm/etc/aleph-vm/supervisor.env: -------------------------------------------------------------------------------- 1 | # System logs make boot ~2x slower 2 | ALEPH_VM_PRINT_SYSTEM_LOGS=False 3 | ALEPH_VM_DOMAIN_NAME=vm.example.org 4 | ALEPH_VM_PAYMENT_RECEIVER_ADDRESS= 5 | -------------------------------------------------------------------------------- /packaging/aleph-vm/etc/ipfs/KUBO.md: -------------------------------------------------------------------------------- 1 | The IP range `86.84.0.0/16` is managed by `KPN Internet` is filtered out due to 2 | an abuse letter sent to a node operator by Hetzner regarding "an attack" from the node. 3 | The content of this "attack" appears as legit IPFS traffic 4 | (TCP packets from port 4001 to port 4001 and UDP packets from port 4001 to port 46024). 5 | -------------------------------------------------------------------------------- /packaging/aleph-vm/etc/ipfs/kubo.json: -------------------------------------------------------------------------------- 1 | { 2 | "AutoNAT": { 3 | "ServiceMode": "disabled" 4 | }, 5 | "AddrFilters": [ 6 | "/ip4/86.84.0.0/ipcidr/16" 7 | ], 8 | "Reprovider": { 9 | "Strategy": "roots" 10 | }, 11 | "Swarm": { 12 | "EnableHolePunching":false, 13 | "RelayService": { 14 | "Enabled": false 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /packaging/aleph-vm/etc/needrestart/conf.d/aleph-vm.conf: -------------------------------------------------------------------------------- 1 | # Do not restart Aleph Network Services 2 | $nrconf{override_rc}{qr(^aleph-vm-supervisor)} = 0; 3 | $nrconf{override_rc}{qr(^aleph-vm-controller@.*\.service$)} = 0; 4 | -------------------------------------------------------------------------------- /packaging/aleph-vm/etc/systemd/system/aleph-vm-controller@.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Aleph VM %i Controller 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | RestartSec=5s 8 | PrivateTmp=yes 9 | NoNewPrivileges=true 10 | WorkingDirectory=/opt/aleph-vm 11 | Environment=PYTHONPATH=/opt/aleph-vm/:$PYTHONPATH 12 | ExecStart=/usr/bin/python3 -m aleph.vm.controllers --config=/var/lib/aleph/vm/%i-controller.json 13 | Restart=on-failure 14 | # KillMode=Mixed is used so initially only the Python controller process receives the SIGTERM signal. 15 | # The controller catches it and sends a QEMU command to shut down the Guest VM, allowing it to clean up 16 | # properly and avoid disk corruption. 17 | # After 30s (TimeoutStopSec), if the process is still running, both the controller and subprocesses receive SIGKILL. 18 | KillMode=mixed 19 | TimeoutStopSec=30 20 | 21 | [Install] 22 | WantedBy=multi-user.target 23 | -------------------------------------------------------------------------------- /packaging/aleph-vm/etc/systemd/system/aleph-vm-supervisor.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Aleph.im VM execution engine 3 | After=network.target ipfs.service 4 | Wants=ipfs.service 5 | 6 | [Service] 7 | User=0 8 | Group=0 9 | WorkingDirectory=/opt/aleph-vm 10 | Environment=PYTHONPATH=/opt/aleph-vm/:$PYTHONPATH 11 | Environment=PYTHONDONTWRITEBYTECODE="enabled" 12 | EnvironmentFile=/etc/aleph-vm/supervisor.env 13 | ExecStart=python3 -m aleph.vm.orchestrator --print-settings 14 | Restart=always 15 | RestartSec=10s 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /packaging/aleph-vm/etc/systemd/system/ipfs.service: -------------------------------------------------------------------------------- 1 | # Source: https://github.com/ipfs/kubo/blob/master/misc/systemd/ipfs-hardened.service 2 | 3 | # This file will be overwritten on package upgrades, avoid customizations here. 4 | # 5 | # To make persistent changes, create file in 6 | # "/etc/systemd/system/ipfs.service.d/overwrite.conf" with 7 | # `systemctl edit ipfs.service`. This file will be parsed after this 8 | # file has been parsed. 9 | # 10 | # To overwrite a variable, like ExecStart you have to specify it once 11 | # blank and a second time with a new value, like: 12 | # ExecStart= 13 | # ExecStart=/usr/bin/ipfs daemon --flag1 --flag2 14 | # 15 | # For more info about custom unit files see systemd.unit(5). 16 | 17 | # This service file enables systemd-hardening features compatible with IPFS, 18 | # while breaking compatibility with the fuse-mount function. Use this one only 19 | # if you don't need the fuse-mount functionality. 20 | 21 | [Unit] 22 | Description=InterPlanetary File System (IPFS) daemon 23 | Documentation=https://docs.ipfs.tech/ 24 | After=network.target 25 | 26 | [Service] 27 | # hardening 28 | ReadOnlyPaths="/opt/kubo/" "/etc/ipfs" 29 | ReadWritePaths="/var/lib/ipfs/" 30 | NoNewPrivileges=true 31 | ProtectSystem=strict 32 | ProtectKernelTunables=true 33 | ProtectKernelModules=true 34 | ProtectKernelLogs=true 35 | PrivateDevices=true 36 | DevicePolicy=closed 37 | ProtectControlGroups=true 38 | RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 AF_NETLINK 39 | ProtectHostname=true 40 | PrivateTmp=true 41 | ProtectClock=true 42 | LockPersonality=true 43 | RestrictNamespaces=true 44 | RestrictRealtime=true 45 | MemoryDenyWriteExecute=true 46 | SystemCallArchitectures=native 47 | SystemCallFilter=@system-service 48 | SystemCallFilter=~@privileged 49 | ProtectHome=true 50 | RemoveIPC=true 51 | RestrictSUIDSGID=true 52 | CapabilityBoundingSet=CAP_NET_BIND_SERVICE 53 | 54 | # enable for 1-1024 port listening 55 | #AmbientCapabilities=CAP_NET_BIND_SERVICE 56 | # enable to specify a custom path see docs/environment-variables.md for further documentations 57 | #Environment=IPFS_PATH=/custom/ipfs/path 58 | # enable to specify a higher limit for open files/connections 59 | #LimitNOFILE=1000000 60 | 61 | # Avoid a permission denier error when running `lstat /home/ipfs/.config/ipfs/denylists` 62 | # due to checking $XDG_CONFIG_HOME/ipfs/denylists/ 63 | Environment=XDG_CONFIG_HOME=/etc 64 | 65 | #don't use swap 66 | MemorySwapMax=0 67 | 68 | # Don't timeout on startup. Opening the IPFS repo can take a long time in some cases (e.g., when 69 | # badger is recovering) and migrations can delay startup. 70 | # 71 | # Ideally, we'd be a bit smarter about this but there's no good way to do that without hooking 72 | # systemd dependencies deeper into go-ipfs. 73 | TimeoutStartSec=infinity 74 | 75 | Type=notify 76 | User=ipfs 77 | Group=ipfs 78 | Environment=IPFS_PATH="/var/lib/ipfs" 79 | ExecStart=/opt/kubo/ipfs daemon --init --migrate --init-profile=server --config-file /etc/ipfs/kubo.json 80 | Restart=on-failure 81 | KillSignal=SIGINT 82 | 83 | [Install] 84 | WantedBy=default.target 85 | -------------------------------------------------------------------------------- /packaging/debian-12.dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.79.0-bookworm 2 | 3 | RUN apt-get update && apt-get -y upgrade && apt-get install -y \ 4 | make \ 5 | git \ 6 | curl \ 7 | sudo \ 8 | python3-pip \ 9 | python3-venv \ 10 | && rm -rf /var/lib/apt/lists/* 11 | 12 | WORKDIR /opt 13 | COPY ../src/aleph ./src/aleph 14 | COPY ../packaging ./packaging 15 | COPY ../kernels ./kernels 16 | 17 | COPY ../examples/ ./examples 18 | -------------------------------------------------------------------------------- /packaging/extract_requirements.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euf -o pipefail 3 | 4 | export DEBIAN_FRONTEND=noninteractive 5 | 6 | apt update 7 | apt --yes install /opt/packaging/target/aleph-vm.deb 8 | pip freeze > "$1" 9 | -------------------------------------------------------------------------------- /packaging/repositories/bookworm/conf/distributions: -------------------------------------------------------------------------------- 1 | Origin: Aleph-IM 2 | Label: aleph-im 3 | Suite: stable 4 | Codename: bookworm 5 | Version: 3.0 6 | Architectures: amd64 source 7 | Components: contrib 8 | #UDebComponents: main 9 | Description: Aleph-im packages 10 | SignWith: yes 11 | #DebOverride: override 12 | #UDebOverride: override 13 | #DscOverride: srcoverride 14 | -------------------------------------------------------------------------------- /packaging/repositories/jammy/conf/distributions: -------------------------------------------------------------------------------- 1 | Origin: Aleph-IM 2 | Label: aleph-im 3 | Suite: stable 4 | Codename: jammy 5 | Version: 3.0 6 | Architectures: amd64 source 7 | Components: contrib 8 | #UDebComponents: main 9 | Description: Aleph-im packages 10 | SignWith: yes 11 | #DebOverride: override 12 | #UDebOverride: override 13 | #DscOverride: srcoverride 14 | -------------------------------------------------------------------------------- /packaging/ubuntu-22.04.dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | 3 | RUN apt-get update && apt-get -y upgrade && apt-get install -y \ 4 | make \ 5 | git \ 6 | curl \ 7 | sudo \ 8 | python3-pip \ 9 | python3-venv \ 10 | cargo \ 11 | && rm -rf /var/lib/apt/lists/* 12 | 13 | WORKDIR /opt 14 | COPY ../src/aleph ./src/aleph 15 | COPY ../packaging ./packaging 16 | COPY ../kernels ./kernels 17 | 18 | COPY ../examples/ ./examples 19 | -------------------------------------------------------------------------------- /packaging/ubuntu-24.04.dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:24.04 2 | 3 | RUN apt-get update && apt-get -y upgrade && apt-get install -y \ 4 | make \ 5 | git \ 6 | curl \ 7 | sudo \ 8 | python3-pip \ 9 | python3-venv \ 10 | cargo \ 11 | && rm -rf /var/lib/apt/lists/* 12 | 13 | WORKDIR /opt 14 | COPY ../src/aleph ./src/aleph 15 | COPY ../packaging ./packaging 16 | COPY ../kernels ./kernels 17 | 18 | COPY ../examples/ ./examples 19 | -------------------------------------------------------------------------------- /packaging/version_from_git.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Set the version number of a package based on the current repository: 5 | 6 | Use the tag it one is available for the current commit. 7 | Else default to the short commit id, prefixed by the name of the current branch. 8 | 9 | Pass the path to the target file to edit in argument. 10 | """ 11 | 12 | import os.path 13 | import re 14 | import subprocess 15 | import sys 16 | 17 | script_path, *args, format_, target_file_path = sys.argv 18 | 19 | for arg in args: 20 | if arg not in ("--inplace", "--stdout"): 21 | print( 22 | "Usage: version_from_git.py [target FILE PATH] [FORMAT] [OPTION...]\n\n" 23 | "set the version number of a Debian package based on the current git commit\n\n" 24 | "supported formats are 'deb' and 'setup.py'\n\n" 25 | " --help print this message\n" 26 | " --inplace edit file in place\n" 27 | " --inplace edit file in place\n" 28 | " --stdout print the result on stdout\n" 29 | ) 30 | sys.exit(1) 31 | 32 | if not os.path.isfile(target_file_path): 33 | print(f"No such file: '{target_file_path}'") 34 | sys.exit(2) 35 | 36 | 37 | def get_git_version(): 38 | output = subprocess.check_output(("git", "describe", "--tags")) 39 | return output.decode().strip() 40 | 41 | 42 | version = get_git_version() 43 | 44 | with open(target_file_path) as target_file: 45 | target_content = target_file.read() 46 | 47 | if format_ == "deb": 48 | updated_content = re.sub(r"(Version:)\w*(.*)", f"\\1 {version}", target_content) 49 | elif format_ == "setup.py": 50 | updated_content = re.sub(r"(version)\w*=(.*)'", f"\\1='{version}'", target_content) 51 | elif format_ == "__version__": 52 | updated_content = re.sub(r"(__version__)\w*(.*)", f"\\1 = '{version}'", target_content) 53 | else: 54 | print(f"Format must be 'deb', 'setup.py' or '__version__', not '{format_}'") 55 | 56 | if "--inplace" in args: 57 | with open(target_file_path, "w") as target_file: 58 | target_file.write(updated_content) 59 | 60 | if "--stdout" in args: 61 | print(updated_content) 62 | -------------------------------------------------------------------------------- /runtimes/aleph-debian-12-python/create_disk_image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | rm ./rootfs.squashfs 4 | 5 | set -euf 6 | 7 | rm -fr ./rootfs 8 | mkdir ./rootfs 9 | 10 | debootstrap --variant=minbase bookworm ./rootfs http://deb.debian.org/debian/ 11 | 12 | chroot ./rootfs /bin/sh < /etc/locale.gen 35 | locale-gen en_US.UTF-8 36 | 37 | echo "Pip installing aleph-sdk-python" 38 | mkdir -p /opt/aleph/libs 39 | # Fixing this protobuf dependency version to avoid getting CI errors as version 5.29.0 have this compilation issue. 40 | pip3 install --target /opt/aleph/libs 'aleph-sdk-python==2.0.0' 'aleph-message~=1.0.1' 'fastapi~=0.109.2' 'protobuf==5.28.3' 41 | 42 | # Compile Python code to bytecode for faster execution 43 | # -o2 is needed to compile with optimization level 2 which is what we launch init1.py ("python -OO") 44 | # otherwise they are not used 45 | python3 -m compileall -o 2 -f /usr/local/lib/python3.11 46 | python3 -m compileall -o 2 -f /opt/aleph/libs 47 | 48 | echo "PubkeyAuthentication yes" >> /etc/ssh/sshd_config 49 | echo "PasswordAuthentication no" >> /etc/ssh/sshd_config 50 | echo "ChallengeResponseAuthentication no" >> /etc/ssh/sshd_config 51 | echo "PermitRootLogin yes" >> /etc/ssh/sshd_config 52 | 53 | mkdir -p /overlay 54 | 55 | # Set up a login terminal on the serial console (ttyS0): 56 | ln -s agetty /etc/init.d/agetty.ttyS0 57 | echo ttyS0 > /etc/securetty 58 | EOT 59 | 60 | cat < ./rootfs/etc/inittab 61 | # /etc/inittab 62 | 63 | ::sysinit:/sbin/init sysinit 64 | ::sysinit:/sbin/init boot 65 | ::wait:/sbin/init default 66 | 67 | # Set up a couple of getty's 68 | tty1::respawn:/sbin/getty 38400 tty1 69 | tty2::respawn:/sbin/getty 38400 tty2 70 | tty3::respawn:/sbin/getty 38400 tty3 71 | tty4::respawn:/sbin/getty 38400 tty4 72 | tty5::respawn:/sbin/getty 38400 tty5 73 | tty6::respawn:/sbin/getty 38400 tty6 74 | 75 | # Put a getty on the serial port 76 | ttyS0::respawn:/sbin/getty -L ttyS0 115200 vt100 77 | 78 | # Stuff to do for the 3-finger salute 79 | ::ctrlaltdel:/sbin/reboot 80 | 81 | # Stuff to do before rebooting 82 | ::shutdown:/sbin/init shutdown 83 | EOT 84 | 85 | # Reduce size 86 | rm -fr ./rootfs/root/.cache 87 | rm -fr ./rootfs/var/cache 88 | mkdir -p ./rootfs/var/cache/apt/archives/partial 89 | rm -fr ./rootfs/usr/share/doc 90 | rm -fr ./rootfs/usr/share/man 91 | rm -fr ./rootfs/var/lib/apt/lists/ 92 | 93 | # Custom init 94 | cp ./init0.sh ./rootfs/sbin/init 95 | cp ./init1.py ./rootfs/root/init1.py 96 | cp ./loading.html ./rootfs/root/loading.html 97 | chmod +x ./rootfs/sbin/init 98 | chmod +x ./rootfs/root/init1.py 99 | 100 | mksquashfs ./rootfs/ ./rootfs.squashfs 101 | -------------------------------------------------------------------------------- /runtimes/aleph-debian-12-python/init0.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -euf 4 | 5 | mount -t proc proc /proc -o nosuid,noexec,nodev 6 | 7 | log() { 8 | echo "$(awk '{print $1}' /proc/uptime)" '|S' "$@" 9 | } 10 | log "init0.sh is launching" 11 | 12 | # Switch root from read-only ext4 to to read-write overlay 13 | mkdir -p /overlay 14 | /bin/mount -t tmpfs -o noatime,mode=0755 tmpfs /overlay 15 | mkdir -p /overlay/root /overlay/work 16 | /bin/mount -o noatime,lowerdir=/,upperdir=/overlay/root,workdir=/overlay/work -t overlay "overlayfs:/overlay/root" /mnt 17 | mkdir -p /mnt/rom 18 | pivot_root /mnt /mnt/rom 19 | 20 | mount --move /rom/proc /proc 21 | mount --move /rom/dev /dev 22 | 23 | mkdir -p /dev/pts 24 | mkdir -p /dev/shm 25 | 26 | mount -t sysfs sys /sys -o nosuid,noexec,nodev 27 | mount -t tmpfs run /run -o mode=0755,nosuid,nodev 28 | #mount -t devtmpfs dev /dev -o mode=0755,nosuid 29 | mount -t devpts devpts /dev/pts -o mode=0620,gid=5,nosuid,noexec 30 | mount -t tmpfs shm /dev/shm -omode=1777,nosuid,nodev 31 | 32 | # Required by Docker 33 | cgroupfs-mount 34 | update-alternatives --set iptables /usr/sbin/iptables-legacy 35 | update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy 36 | 37 | # Enable the following to force the storage driver used by Docker. 38 | # See https://docs.docker.com/storage/storagedriver/select-storage-driver/ 39 | #echo '{\n"storage-driver": "overlay2"\n}\n' > /etc/docker/daemon.json 40 | 41 | # List block devices 42 | lsblk 43 | 44 | #cat /proc/sys/kernel/random/entropy_avail 45 | 46 | # TODO: Move in init1 47 | mkdir -p /run/sshd 48 | /usr/sbin/sshd & 49 | log "SSH UP" 50 | 51 | log "Setup socat" 52 | socat UNIX-LISTEN:/tmp/socat-socket,fork,reuseaddr VSOCK-CONNECT:2:53 & 53 | log "Socat ready" 54 | 55 | export PYTHONPATH=/opt/aleph/libs 56 | 57 | # Replace this script with the manager 58 | exec /root/init1.py 59 | -------------------------------------------------------------------------------- /runtimes/aleph-debian-12-python/update_inits.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | rm ./rootfs.squashfs 4 | 5 | set -euf 6 | 7 | cp ./init0.sh ./rootfs/sbin/init 8 | cp ./init1.py ./rootfs/root/init1.py 9 | chmod +x ./rootfs/sbin/init 10 | chmod +x ./rootfs/root/init1.py 11 | 12 | mksquashfs ./rootfs/ ./rootfs.squashfs 13 | 14 | echo "OK" 15 | -------------------------------------------------------------------------------- /runtimes/instance-rootfs/create-debian-12-disk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euf 4 | 5 | # Variables 6 | ROOTFS_FILE="./debian-12.btrfs" 7 | MOUNT_ORIGIN_DIR="/mnt/debian" 8 | MOUNT_DIR="/mnt/vm" 9 | IMAGE_URL="https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-genericcloud-amd64.tar.xz" 10 | IMAGE_NAME="debian-12-genericcloud.tar.xz" 11 | IMAGE_RAW_NAME="disk.raw" 12 | 13 | # Cleanup previous run 14 | umount "$MOUNT_ORIGIN_DIR" || true 15 | umount "$MOUNT_DIR" || true 16 | rm -f "$ROOTFS_FILE" 17 | 18 | # Prepare directories 19 | mkdir -p "$MOUNT_ORIGIN_DIR" 20 | mkdir -p "$MOUNT_DIR" 21 | 22 | # Download Debian image 23 | echo "Downloading Debian 12 image" 24 | curl -L "$IMAGE_URL" -o "$IMAGE_NAME" 25 | 26 | # Allocate 1GB rootfs.btrfs file 27 | echo "Allocate 1GB $ROOTFS_FILE file" 28 | fallocate -l 1G "$ROOTFS_FILE" 29 | mkfs.btrfs -m single --label root "$ROOTFS_FILE" 30 | mount "$ROOTFS_FILE" "$MOUNT_DIR" 31 | 32 | # Extract Debian image 33 | echo "Extracting Debian 12 image" 34 | tar xvf "$IMAGE_NAME" 35 | 36 | # Mount first partition of Debian Image 37 | LOOPDISK=$(losetup --find --show $IMAGE_RAW_NAME) 38 | partx -u "$LOOPDISK" 39 | mount "$LOOPDISK"p1 "$MOUNT_ORIGIN_DIR" 40 | 41 | # Fix boot partition missing 42 | sed -i '$d' "$MOUNT_ORIGIN_DIR"/etc/fstab 43 | 44 | # Copy Debian image to rootfs 45 | echo "Copying Debian 12 image to $ROOTFS_FILE file" 46 | cp -vap "$MOUNT_ORIGIN_DIR/." "$MOUNT_DIR" 47 | 48 | # Cleanup and unmount 49 | umount "$MOUNT_ORIGIN_DIR" 50 | partx -d "$LOOPDISK" 51 | losetup -d "$LOOPDISK" 52 | umount "$MOUNT_DIR" 53 | rm "$IMAGE_RAW_NAME" 54 | rm "$IMAGE_NAME" 55 | -------------------------------------------------------------------------------- /runtimes/instance-rootfs/create-debian-12-qemu-disk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euf 4 | 5 | # Variables 6 | ROOTFS_FILENAME="./rootfs.img" 7 | IMAGE_URL="https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-genericcloud-amd64.qcow2" 8 | IMAGE_NAME="debian-12-genericcloud-amd64.qcow2" 9 | 10 | # Cleanup previous run 11 | rm -f "$ROOTFS_FILENAME" 12 | 13 | # Download Ubuntu image 14 | echo "Downloading Debian 12 image" 15 | curl -L "$IMAGE_URL" -o "$IMAGE_NAME" 16 | 17 | # Rename final file 18 | mv "$IMAGE_NAME" "$ROOTFS_FILENAME" 19 | -------------------------------------------------------------------------------- /runtimes/instance-rootfs/create-ubuntu-22-04-disk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euf 4 | 5 | # Variables 6 | ROOTFS_FILE="./ubuntu-22-04.btrfs" 7 | ROOTFS_DIR="./rootfs" 8 | MOUNT_DIR="/mnt/vm" 9 | IMAGE_URL="https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64-root.tar.xz" 10 | IMAGE_NAME="jammy-server-cloudimg-root.tar.xz" 11 | 12 | # Cleanup previous run 13 | umount "$MOUNT_DIR" || true 14 | rm -f "$ROOTFS_FILE" 15 | rm -rf "$ROOTFS_DIR" 16 | 17 | # Prepare directories 18 | mkdir -p "$MOUNT_DIR" 19 | mkdir -p "$ROOTFS_DIR" 20 | 21 | # Download Ubuntu image 22 | echo "Downloading Ubuntu 22.04 image" 23 | curl -L "$IMAGE_URL" -o "$IMAGE_NAME" 24 | 25 | # Allocate 1,4 GB rootfs.btrfs file 26 | echo "Allocate 1,4 GB $ROOTFS_FILE file" 27 | fallocate -l 1400M "$ROOTFS_FILE" 28 | mkfs.btrfs "$ROOTFS_FILE" 29 | mount "$ROOTFS_FILE" "$MOUNT_DIR" 30 | 31 | # Extract Ubuntu image to rootfs 32 | echo "Extracting Ubuntu 22.04 image" 33 | tar xvf "$IMAGE_NAME" -C "$MOUNT_DIR" 34 | 35 | # Cleanup and unmount 36 | umount "$MOUNT_DIR" 37 | rm -rf "$ROOTFS_DIR" 38 | rm "$IMAGE_NAME" 39 | -------------------------------------------------------------------------------- /runtimes/instance-rootfs/create-ubuntu-22-04-qemu-disk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euf 4 | 5 | # Variables 6 | ROOTFS_FILENAME="./rootfs.img" 7 | IMAGE_URL="https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64-disk-kvm.img" 8 | IMAGE_NAME="jammy-server-cloudimg-amd64-disk-kvm.img" 9 | 10 | # Cleanup previous run 11 | rm -f "$ROOTFS_FILENAME" 12 | 13 | # Download Ubuntu image 14 | echo "Downloading Ubuntu 22.04 image" 15 | curl -L "$IMAGE_URL" -o "$IMAGE_NAME" 16 | 17 | # Rename final file 18 | mv "$IMAGE_NAME" "$ROOTFS_FILENAME" 19 | -------------------------------------------------------------------------------- /runtimes/instance-rootfs/create-ubuntu-24-04-qemu-disk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euf 4 | 5 | # Variables 6 | ROOTFS_FILENAME="./rootfs.img" 7 | IMAGE_URL="https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img" 8 | IMAGE_NAME="noble-server-cloudimg-amd64.img" 9 | 10 | # Cleanup previous run 11 | rm -f "$ROOTFS_FILENAME" 12 | 13 | # Download Ubuntu image 14 | echo "Downloading Ubuntu 24.04 image" 15 | curl -L "$IMAGE_URL" -o "$IMAGE_NAME" 16 | 17 | # Rename final file 18 | mv "$IMAGE_NAME" "$ROOTFS_FILENAME" 19 | -------------------------------------------------------------------------------- /runtimes/ovmf/README.md: -------------------------------------------------------------------------------- 1 | # OVMF build for Confidential VMs 2 | 3 | The files in this directory build a version of OVMF able to store SEV secrets 4 | in a physical memory region that will then be accessible by Grub. The final OVMF image 5 | also include Grub in order to measure OVMF+Grub before loading secrets inside 6 | the VM. 7 | 8 | This process relies on the patch sets produced by James Bottomley: 9 | https://listman.redhat.com/archives/edk2-devel-archive/2020-November/msg01247.html 10 | 11 | ## Build instructions 12 | 13 | As this requires a patched version of Grub, it is advised to build both tools inside a container. 14 | 15 | 16 | e.g using podman 17 | ``` 18 | # Clone grub and edk2, and apply the patches 19 | bash ./download_dependencies.sh 20 | podman run -v ./build_ovmf.sh:/opt/build_ovmf.sh -v ./downloads:/opt/downloads\ 21 | ubuntu:22.04 bash /opt/download_dependencies.sh 22 | # The OVMF.fd file will be in `downloads/edk2/Build/AmdSev/RELEASE_GCC5/FV/OVMF.fd 23 | cp downloads/edk2/Build/AmdSev/RELEASE_GCC5/FV/OVMF.fd confidential-OVMF.fd 24 | ``` 25 | -------------------------------------------------------------------------------- /runtimes/ovmf/build_ovmf.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # Script to build OVMF + Grub for confidential computing. The resulting image will be 3 | # a single firmware image containing OVMF and Grub so that the entirety of the unencrypted 4 | # boot code can be measured before feeding secrets to the VM. 5 | 6 | set -eo pipefail 7 | 8 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 9 | 10 | GRUB_DIR="${SCRIPT_DIR}/downloads/grub" 11 | EDK2_DIR="${SCRIPT_DIR}/downloads/edk2" 12 | 13 | if [ ! -d "${GRUB_DIR}" ]; then 14 | echo "Grub directory not found: ${GRUB_DIR}" >&2 15 | fi 16 | 17 | if [ ! -d "${EDK2_DIR}" ]; then 18 | echo "EDK2 directory not found: ${EDK2_DIR}" >&2 19 | fi 20 | 21 | apt-get update 22 | # Packages for Grub 23 | apt-get install -y autoconf autopoint binutils bison flex gcc gettext git make pkg-config python3 python-is-python3 24 | # Packages for OVMF (there are some duplicates with Grub, kept for documentation) 25 | apt-get install -y bison build-essential dosfstools flex iasl libgmp3-dev libmpfr-dev mtools nasm subversion texinfo uuid-dev 26 | 27 | cd $GRUB_DIR 28 | ./bootstrap 29 | ./configure --prefix /usr/ --with-platform=efi --target=x86_64 30 | make 31 | make install 32 | 33 | # Build OVMF 34 | cd $EDK2_DIR 35 | OvmfPkg/build.sh -b RELEASE -p OvmfPkg/AmdSev/AmdSevX64.dsc 36 | -------------------------------------------------------------------------------- /runtimes/ovmf/download_dependencies.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | set -eo pipefail 4 | 5 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 6 | DOWNLOAD_DIR="${SCRIPT_DIR}/downloads" 7 | PATCH_DIR="${SCRIPT_DIR}/patches" 8 | 9 | GRUB_GIT_REPOSITORY="https://github.com/aleph-im/grub.git" 10 | GRUB_COMMIT="aleph/efi-secrets" 11 | GRUB_DIR="${DOWNLOAD_DIR}/grub" 12 | 13 | EDK2_GIT_REPOSITORY="https://github.com/tianocore/edk2.git" 14 | EDK2_COMMIT="edk2-stable202205" 15 | EDK2_DIR="${DOWNLOAD_DIR}/edk2" 16 | 17 | # Download Grub 18 | git clone --depth 1 --branch "${GRUB_COMMIT}" ${GRUB_GIT_REPOSITORY} "${GRUB_DIR}" 19 | 20 | # Download EDK2 (=OVMF) 21 | git clone --recurse-submodules "${EDK2_GIT_REPOSITORY}" "${EDK2_DIR}" 22 | 23 | 24 | 25 | 26 | # Apply patches to EDK2 27 | EDK2_PATCH_DIR="${PATCH_DIR}/edk2" 28 | pushd "${EDK2_DIR}" > /dev/null 29 | git checkout "${EDK2_COMMIT}" 30 | git submodule update 31 | # Default user is needed by git am. only set it for the repo if not set already 32 | if ! git config user.name > /dev/null; then 33 | git config --local user.name "Your Name" 34 | fi 35 | if ! git config user.email > /dev/null; then 36 | git config --local user.email "you@example.com" 37 | fi 38 | git am --ignore-space-change --ignore-whitespace "${EDK2_PATCH_DIR}/0001-Fix-invokation-of-cryptomount-s-for-AMD-SEV.patch" 39 | popd > /dev/null 40 | -------------------------------------------------------------------------------- /runtimes/ovmf/patches/edk2/0001-Fix-invokation-of-cryptomount-s-for-AMD-SEV.patch: -------------------------------------------------------------------------------- 1 | From b3f1d358cc4098fb59a778d5340018a4e73ff87f Mon Sep 17 00:00:00 2001 2 | From: Olivier Desenfans 3 | Date: Thu, 30 Jun 2022 10:38:18 +0200 4 | Subject: [PATCH] Fix invokation of cryptomount -s for AMD SEV 5 | 6 | The current implementation targeted the first version of James 7 | Bottomley's Grub patches. These patches have since been updated 8 | to move the secret loading part from a dedicated command to 9 | a secret-finding module that must be invoked with 10 | 11 | cryptomount -s MOD 12 | 13 | Fixed the name of the Grub module which was renamed from sevsecret 14 | to efisecret. 15 | --- 16 | OvmfPkg/AmdSev/Grub/grub.cfg | 10 ++-------- 17 | OvmfPkg/AmdSev/Grub/grub.sh | 2 +- 18 | 2 files changed, 3 insertions(+), 9 deletions(-) 19 | 20 | diff --git a/OvmfPkg/AmdSev/Grub/grub.cfg b/OvmfPkg/AmdSev/Grub/grub.cfg 21 | index 17be94277a..331baf798c 100644 22 | --- a/OvmfPkg/AmdSev/Grub/grub.cfg 23 | +++ b/OvmfPkg/AmdSev/Grub/grub.cfg 24 | @@ -10,16 +10,10 @@ 25 | ## 26 | 27 | echo "Entering grub config" 28 | -sevsecret 29 | +cryptomount -s efisecret 30 | if [ $? -ne 0 ]; then 31 | - echo "Failed to locate anything in the SEV secret area, prompting for password" 32 | + echo "Failed to mount root securely, retrying with password prompt" 33 | cryptomount -a 34 | -else 35 | - cryptomount -s 36 | - if [ $? -ne 0 ]; then 37 | - echo "Failed to mount root securely, retrying with password prompt" 38 | - cryptomount -a 39 | - fi 40 | fi 41 | set root= 42 | for f in (crypto*); do 43 | diff --git a/OvmfPkg/AmdSev/Grub/grub.sh b/OvmfPkg/AmdSev/Grub/grub.sh 44 | index 99807d7291..abec80d7da 100644 45 | --- a/OvmfPkg/AmdSev/Grub/grub.sh 46 | +++ b/OvmfPkg/AmdSev/Grub/grub.sh 47 | @@ -44,7 +44,7 @@ GRUB_MODULES=" 48 | linux 49 | linuxefi 50 | reboot 51 | - sevsecret 52 | + efisecret 53 | " 54 | basedir=$(dirname -- "$0") 55 | 56 | -- 57 | 2.25.1 58 | 59 | -------------------------------------------------------------------------------- /src/aleph/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/src/aleph/__init__.py -------------------------------------------------------------------------------- /src/aleph/vm/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/src/aleph/vm/__init__.py -------------------------------------------------------------------------------- /src/aleph/vm/constants.py: -------------------------------------------------------------------------------- 1 | KiB = 1024 2 | MiB = 1024 * 1024 3 | GiB = 1024 * 1024 * 1024 4 | HOUR = 60 * 60 5 | MINUTE = 60 6 | -------------------------------------------------------------------------------- /src/aleph/vm/controllers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/src/aleph/vm/controllers/__init__.py -------------------------------------------------------------------------------- /src/aleph/vm/controllers/configuration.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from enum import Enum 3 | from pathlib import Path 4 | 5 | from pydantic import BaseModel 6 | 7 | from aleph.vm.conf import Settings, settings 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | class VMConfiguration(BaseModel): 13 | use_jailer: bool 14 | firecracker_bin_path: Path 15 | jailer_bin_path: Path 16 | config_file_path: Path 17 | init_timeout: float 18 | 19 | 20 | class QemuVMHostVolume(BaseModel): 21 | mount: str 22 | path_on_host: Path 23 | read_only: bool 24 | 25 | 26 | class QemuGPU(BaseModel): 27 | pci_host: str 28 | supports_x_vga: bool = True # Default to True for backward compatibility 29 | 30 | 31 | class QemuVMConfiguration(BaseModel): 32 | qemu_bin_path: str 33 | cloud_init_drive_path: str | None = None 34 | image_path: str 35 | monitor_socket_path: Path 36 | qmp_socket_path: Path 37 | vcpu_count: int 38 | mem_size_mb: int 39 | interface_name: str | None = None 40 | host_volumes: list[QemuVMHostVolume] 41 | gpus: list[QemuGPU] 42 | 43 | 44 | class QemuConfidentialVMConfiguration(BaseModel): 45 | qemu_bin_path: str 46 | cloud_init_drive_path: str | None = None 47 | image_path: str 48 | monitor_socket_path: Path 49 | qmp_socket_path: Path 50 | vcpu_count: int 51 | mem_size_mb: int 52 | interface_name: str | None = None 53 | host_volumes: list[QemuVMHostVolume] 54 | gpus: list[QemuGPU] 55 | ovmf_path: Path 56 | sev_session_file: Path 57 | sev_dh_cert_file: Path 58 | sev_policy: int 59 | 60 | 61 | class HypervisorType(str, Enum): 62 | qemu = "qemu" 63 | firecracker = "firecracker" 64 | 65 | 66 | class Configuration(BaseModel): 67 | vm_id: int 68 | vm_hash: str 69 | settings: Settings 70 | vm_configuration: QemuConfidentialVMConfiguration | QemuVMConfiguration | VMConfiguration 71 | hypervisor: HypervisorType = HypervisorType.firecracker 72 | 73 | 74 | def save_controller_configuration(vm_hash: str, configuration: Configuration) -> Path: 75 | """Save VM configuration to be used by the controller service""" 76 | config_file_path = Path(f"{settings.EXECUTION_ROOT}/{vm_hash}-controller.json") 77 | with config_file_path.open("w") as controller_config_file: 78 | controller_config_file.write( 79 | configuration.model_dump_json( 80 | by_alias=True, exclude_none=True, indent=4, exclude={"settings": {"USE_DEVELOPER_SSH_KEYS"}} 81 | ) 82 | ) 83 | config_file_path.chmod(0o644) 84 | return config_file_path 85 | -------------------------------------------------------------------------------- /src/aleph/vm/controllers/firecracker/__init__.py: -------------------------------------------------------------------------------- 1 | from .instance import AlephFirecrackerInstance 2 | from .program import AlephFirecrackerProgram 3 | 4 | __all__ = ("AlephFirecrackerProgram", "AlephFirecrackerInstance") 5 | -------------------------------------------------------------------------------- /src/aleph/vm/controllers/firecracker/snapshot_manager.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import threading 4 | from time import sleep 5 | 6 | from aleph_message.models import ItemHash 7 | from schedule import Job, Scheduler 8 | 9 | from aleph.vm.conf import settings 10 | 11 | from .executable import AlephFirecrackerExecutable 12 | from .snapshots import CompressedDiskVolumeSnapshot 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | 17 | def wrap_async_snapshot(vm): 18 | asyncio.run(do_vm_snapshot(vm)) 19 | 20 | 21 | def run_threaded_snapshot(vm): 22 | job_thread = threading.Thread(target=wrap_async_snapshot, args=(vm,)) 23 | job_thread.start() 24 | 25 | 26 | async def do_vm_snapshot(vm: AlephFirecrackerExecutable) -> CompressedDiskVolumeSnapshot: 27 | try: 28 | logger.debug(f"Starting new snapshot for VM {vm.vm_hash}") 29 | assert vm, "VM execution not set" 30 | 31 | snapshot = await vm.create_snapshot() 32 | await snapshot.upload() 33 | 34 | logger.debug(f"New snapshots for VM {vm.vm_hash} created in {snapshot.path}") 35 | return snapshot 36 | except ValueError as error: 37 | msg = "Something failed taking an snapshot" 38 | raise ValueError(msg) from error 39 | 40 | 41 | def infinite_run_scheduler_jobs(scheduler: Scheduler) -> None: 42 | while True: 43 | scheduler.run_pending() 44 | sleep(1) 45 | 46 | 47 | class SnapshotExecution: 48 | vm_hash: ItemHash 49 | execution: AlephFirecrackerExecutable 50 | frequency: int 51 | _scheduler: Scheduler 52 | _job: Job 53 | 54 | def __init__( 55 | self, 56 | scheduler: Scheduler, 57 | vm_hash: ItemHash, 58 | execution: AlephFirecrackerExecutable, 59 | frequency: int, 60 | ): 61 | self.vm_hash = vm_hash 62 | self.execution = execution 63 | self.frequency = frequency 64 | self._scheduler = scheduler 65 | 66 | async def start(self) -> None: 67 | logger.debug(f"Starting snapshots for VM {self.vm_hash} every {self.frequency} minutes") 68 | job = self._scheduler.every(self.frequency).minutes.do(run_threaded_snapshot, self.execution) 69 | self._job = job 70 | 71 | async def stop(self) -> None: 72 | logger.debug(f"Stopping snapshots for VM {self.vm_hash}") 73 | self._scheduler.cancel_job(self._job) 74 | 75 | 76 | class SnapshotManager: 77 | """ 78 | Manage VM snapshots. 79 | """ 80 | 81 | executions: dict[ItemHash, SnapshotExecution] 82 | _scheduler: Scheduler 83 | 84 | def __init__(self): 85 | self.executions = {} 86 | self._scheduler = Scheduler() 87 | 88 | def run_in_thread(self) -> None: 89 | job_thread = threading.Thread( 90 | target=infinite_run_scheduler_jobs, 91 | args=[self._scheduler], 92 | daemon=True, 93 | name="SnapshotManager", 94 | ) 95 | job_thread.start() 96 | 97 | async def start_for(self, vm: AlephFirecrackerExecutable, frequency: int | None = None) -> None: 98 | if not vm.support_snapshot: 99 | msg = "Snapshots are not implemented for programs." 100 | raise NotImplementedError(msg) 101 | 102 | default_frequency = frequency or settings.SNAPSHOT_FREQUENCY 103 | 104 | vm_hash = vm.vm_hash 105 | snapshot_execution = SnapshotExecution( 106 | scheduler=self._scheduler, 107 | vm_hash=vm_hash, 108 | execution=vm, 109 | frequency=default_frequency, 110 | ) 111 | self.executions[vm_hash] = snapshot_execution 112 | await snapshot_execution.start() 113 | 114 | async def stop_for(self, vm_hash: ItemHash) -> None: 115 | try: 116 | snapshot_execution = self.executions.pop(vm_hash) 117 | except KeyError: 118 | logger.warning("Could not find snapshot task for instance %s", vm_hash) 119 | return 120 | 121 | await snapshot_execution.stop() 122 | 123 | async def stop_all(self) -> None: 124 | await asyncio.gather(*(self.stop_for(vm_hash) for vm_hash, execution in self.executions)) 125 | -------------------------------------------------------------------------------- /src/aleph/vm/controllers/firecracker/snapshots.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | 4 | from aleph_message.models import ItemHash 5 | 6 | from aleph.vm.conf import SnapshotCompressionAlgorithm 7 | from aleph.vm.storage import compress_volume_snapshot, create_volume_snapshot 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | class DiskVolumeFile: 13 | path: Path 14 | size: int 15 | 16 | def __init__(self, path: Path): 17 | self.path = path 18 | self.size = path.stat().st_size 19 | 20 | 21 | class CompressedDiskVolumeSnapshot(DiskVolumeFile): 22 | algorithm: SnapshotCompressionAlgorithm 23 | 24 | def __init__(self, path: Path, algorithm: SnapshotCompressionAlgorithm): 25 | super().__init__(path=path) 26 | self.algorithm = algorithm 27 | 28 | def delete(self) -> None: 29 | self.path.unlink(missing_ok=True) 30 | 31 | async def upload(self) -> ItemHash: 32 | # TODO: Upload snapshots to Aleph Network 33 | pass 34 | 35 | 36 | class DiskVolumeSnapshot(DiskVolumeFile): 37 | compressed: CompressedDiskVolumeSnapshot | None 38 | 39 | def delete(self) -> None: 40 | if self.compressed: 41 | self.compressed.delete() 42 | 43 | self.path.unlink(missing_ok=True) 44 | 45 | async def compress(self, algorithm: SnapshotCompressionAlgorithm) -> CompressedDiskVolumeSnapshot: 46 | compressed_snapshot = await compress_volume_snapshot(self.path, algorithm) 47 | compressed = CompressedDiskVolumeSnapshot(path=compressed_snapshot, algorithm=algorithm) 48 | self.compressed = compressed 49 | return compressed 50 | 51 | 52 | class DiskVolume(DiskVolumeFile): 53 | async def take_snapshot(self) -> DiskVolumeSnapshot: 54 | snapshot = await create_volume_snapshot(self.path) 55 | return DiskVolumeSnapshot(snapshot) 56 | -------------------------------------------------------------------------------- /src/aleph/vm/controllers/firecracker/storage.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/src/aleph/vm/controllers/firecracker/storage.py -------------------------------------------------------------------------------- /src/aleph/vm/controllers/interface.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from abc import ABC 4 | from asyncio.subprocess import Process 5 | from collections.abc import Callable, Coroutine 6 | from typing import Any 7 | 8 | from aleph_message.models import ItemHash 9 | from aleph_message.models.execution.environment import MachineResources 10 | 11 | from aleph.vm.controllers.firecracker.snapshots import CompressedDiskVolumeSnapshot 12 | from aleph.vm.network.interfaces import TapInterface 13 | from aleph.vm.utils.logs import get_past_vm_logs, make_logs_queue 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | 18 | class AlephVmControllerInterface(ABC): 19 | log_queues: list[asyncio.Queue] = [] 20 | _queue_cancellers: dict[asyncio.Queue, Callable] = {} 21 | 22 | vm_id: int 23 | """id in the VMPool, attributed at execution""" 24 | vm_hash: ItemHash 25 | """identifier for the VM definition, linked to an Aleph Message""" 26 | resources: Any 27 | """local resource for the machine""" 28 | enable_console: bool 29 | enable_networking: bool 30 | """enable networking for this VM""" 31 | hardware_resources: MachineResources 32 | support_snapshot: bool 33 | """Does this controller support snapshotting""" 34 | guest_api_process: Process | None = None 35 | tap_interface: TapInterface | None = None 36 | """Network interface used for this VM""" 37 | 38 | def get_ip(self) -> str | None: 39 | if self.tap_interface: 40 | return self.tap_interface.guest_ip.with_prefixlen 41 | return None 42 | 43 | def get_ip_route(self) -> str | None: 44 | if self.tap_interface: 45 | return str(self.tap_interface.host_ip).split("/", 1)[0] 46 | return None 47 | 48 | def get_ipv6(self) -> str | None: 49 | if self.tap_interface: 50 | return self.tap_interface.guest_ipv6.with_prefixlen 51 | return None 52 | 53 | def get_ipv6_gateway(self) -> str | None: 54 | if self.tap_interface: 55 | return str(self.tap_interface.host_ipv6.ip) 56 | return None 57 | 58 | def to_dict(self): 59 | """Dict representation of the virtual machine. Used to record resource usage and for JSON serialization.""" 60 | raise NotImplementedError() 61 | 62 | async def setup(self): 63 | """Configuration done before the VM process is started""" 64 | raise NotImplementedError() 65 | 66 | async def start(self): 67 | """Start the VM process""" 68 | raise NotImplementedError() 69 | 70 | async def wait_for_init(self) -> None: 71 | """Wait for the init process of the virtual machine to be ready. 72 | May be empty.""" 73 | pass 74 | 75 | async def configure(self) -> None: 76 | """Configuration done after the VM process is started""" 77 | raise NotImplementedError() 78 | 79 | async def load_configuration(self) -> None: 80 | """Load configuration just after the VM process is started""" 81 | raise NotImplementedError() 82 | 83 | async def start_guest_api(self): 84 | raise NotImplementedError() 85 | 86 | async def stop_guest_api(self): 87 | raise NotImplementedError() 88 | 89 | async def teardown(self) -> Coroutine: 90 | raise NotImplementedError() 91 | 92 | async def create_snapshot(self) -> CompressedDiskVolumeSnapshot: 93 | """Must be implement if self.support_snapshot is True""" 94 | raise NotImplementedError() 95 | 96 | def get_log_queue(self) -> asyncio.Queue: 97 | queue, canceller = make_logs_queue(self._journal_stdout_name, self._journal_stderr_name) 98 | self._queue_cancellers[queue] = canceller 99 | # Limit the number of queues per VM 100 | # TODO : fix 101 | if len(self.log_queues) > 20: 102 | logger.warning("Too many log queues, dropping the oldest one") 103 | self.unregister_queue(self.log_queues[1]) 104 | self.log_queues.append(queue) 105 | return queue 106 | 107 | def unregister_queue(self, queue: asyncio.Queue) -> None: 108 | if queue in self.log_queues: 109 | self._queue_cancellers[queue]() 110 | del self._queue_cancellers[queue] 111 | self.log_queues.remove(queue) 112 | queue.empty() 113 | 114 | @property 115 | def _journal_stdout_name(self) -> str: 116 | return f"vm-{self.vm_hash}-stdout" 117 | 118 | @property 119 | def _journal_stderr_name(self) -> str: 120 | return f"vm-{self.vm_hash}-stderr" 121 | 122 | def past_logs(self): 123 | yield from get_past_vm_logs(self._journal_stdout_name, self._journal_stderr_name) 124 | -------------------------------------------------------------------------------- /src/aleph/vm/controllers/qemu/__init__.py: -------------------------------------------------------------------------------- 1 | from .instance import AlephQemuInstance 2 | 3 | __all__ = "AlephQemuInstance" 4 | -------------------------------------------------------------------------------- /src/aleph/vm/controllers/qemu/client.py: -------------------------------------------------------------------------------- 1 | import qmp 2 | from pydantic import BaseModel 3 | 4 | 5 | class VmSevInfo(BaseModel): 6 | enabled: bool 7 | api_major: int 8 | api_minor: int 9 | build_id: int 10 | policy: int 11 | state: str 12 | handle: int 13 | 14 | 15 | class QemuVmClient: 16 | def __init__(self, vm): 17 | self.vm = vm 18 | if not (vm.qmp_socket_path and vm.qmp_socket_path.exists()): 19 | msg = "VM is not running" 20 | raise Exception(msg) 21 | client = qmp.QEMUMonitorProtocol(str(vm.qmp_socket_path)) 22 | client.connect() 23 | 24 | # qmp_client = qmp.QEMUMonitorProtocol(address=("localhost", vm.qmp_port)) 25 | self.qmp_client = client 26 | 27 | def __enter__(self): 28 | return self 29 | 30 | def __exit__(self, exc_type, exc_val, exc_tb): 31 | self.close() 32 | 33 | def close(self) -> None: 34 | self.qmp_client.close() 35 | 36 | def query_sev_info(self) -> VmSevInfo: 37 | caps = self.qmp_client.command("query-sev") 38 | return VmSevInfo( 39 | enabled=caps["enabled"], 40 | api_major=caps["api-major"], 41 | api_minor=caps["api-minor"], 42 | handle=caps["handle"], 43 | state=caps["state"], 44 | build_id=caps["build-id"], 45 | policy=caps["policy"], 46 | ) 47 | 48 | def query_launch_measure(self) -> str: 49 | measure = self.qmp_client.command("query-sev-launch-measure") 50 | return measure["data"] 51 | 52 | def inject_secret(self, packet_header: str, secret: str) -> None: 53 | """ 54 | Injects the secret in the SEV secret area. 55 | 56 | :param packet_header: The packet header, as a base64 string. 57 | :param secret: The encoded secret, as a base64 string. 58 | """ 59 | 60 | self.qmp_client.command( 61 | "sev-inject-launch-secret", 62 | **{"packet-header": packet_header, "secret": secret}, 63 | ) 64 | 65 | def continue_execution(self) -> None: 66 | """ 67 | Resumes the execution of the VM. 68 | """ 69 | self.qmp_client.command("cont") 70 | 71 | def query_status(self) -> None: 72 | """ 73 | Get running status. 74 | """ 75 | # {'status': 'prelaunch', 'singlestep': False, 'running': False} 76 | return self.qmp_client.command("query-status") 77 | -------------------------------------------------------------------------------- /src/aleph/vm/controllers/qemu_confidential/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/src/aleph/vm/controllers/qemu_confidential/__init__.py -------------------------------------------------------------------------------- /src/aleph/vm/guest_api/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/src/aleph/vm/guest_api/__init__.py -------------------------------------------------------------------------------- /src/aleph/vm/hypervisors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/src/aleph/vm/hypervisors/__init__.py -------------------------------------------------------------------------------- /src/aleph/vm/hypervisors/firecracker/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/src/aleph/vm/hypervisors/firecracker/__init__.py -------------------------------------------------------------------------------- /src/aleph/vm/hypervisors/firecracker/config.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from pydantic import BaseModel, ConfigDict, PositiveInt 4 | 5 | VSOCK_PATH = "/tmp/v.sock" 6 | 7 | 8 | class BootSource(BaseModel): 9 | kernel_image_path: Path = Path("vmlinux.bin") 10 | boot_args: str = "console=ttyS0 reboot=k panic=1 pci=off ro noapic nomodules random.trust_cpu=on" 11 | 12 | @staticmethod 13 | def args(enable_console: bool = True, writable: bool = False): 14 | default = "reboot=k panic=1 pci=off noapic nomodules random.trust_cpu=on" 15 | if writable: 16 | default = default + " rw" 17 | else: 18 | default = default + " ro" 19 | if enable_console: 20 | return "console=ttyS0 " + default 21 | else: 22 | return default 23 | 24 | 25 | class Drive(BaseModel): 26 | drive_id: str = "rootfs" 27 | path_on_host: Path = Path("./runtimes/aleph-alpine-3.13-python/rootfs.ext4") 28 | is_root_device: bool = True 29 | is_read_only: bool = True 30 | 31 | 32 | class MachineConfig(BaseModel): 33 | vcpu_count: PositiveInt = 1 34 | mem_size_mib: PositiveInt = 128 35 | smt: bool = False 36 | 37 | 38 | class Vsock(BaseModel): 39 | vsock_id: str = "1" 40 | guest_cid: PositiveInt = 3 41 | uds_path: str = VSOCK_PATH 42 | 43 | 44 | class NetworkInterface(BaseModel): 45 | iface_id: str = "eth0" 46 | guest_mac: str = "AA:FC:00:00:00:01" 47 | host_dev_name: str 48 | 49 | 50 | class FirecrackerConfig(BaseModel): 51 | boot_source: BootSource 52 | drives: list[Drive] 53 | machine_config: MachineConfig 54 | vsock: Vsock | None = None 55 | network_interfaces: list[NetworkInterface] | None = None 56 | 57 | model_config = ConfigDict(populate_by_name=True, alias_generator=lambda field_name: field_name.replace("_", "-")) 58 | -------------------------------------------------------------------------------- /src/aleph/vm/hypervisors/qemu/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/src/aleph/vm/hypervisors/qemu/__init__.py -------------------------------------------------------------------------------- /src/aleph/vm/hypervisors/qemu_confidential/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/src/aleph/vm/hypervisors/qemu_confidential/__init__.py -------------------------------------------------------------------------------- /src/aleph/vm/network/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/src/aleph/vm/network/__init__.py -------------------------------------------------------------------------------- /src/aleph/vm/network/get_interface_ipv4.py: -------------------------------------------------------------------------------- 1 | import ipaddress 2 | 3 | import netifaces 4 | 5 | 6 | def get_interface_ipv4(interface_name: str) -> str: 7 | """ 8 | Get the main IPv4 address from a network interface. 9 | 10 | Args: 11 | interface_name: Name of the network interface (e.g., 'eth0', 'wlan0') 12 | 13 | Returns: 14 | str: The IPv4 address of the interface 15 | 16 | Raises: 17 | ValueError: If the interface doesn't exist or has no IPv4 address 18 | """ 19 | try: 20 | # Check if the interface exists 21 | if interface_name not in netifaces.interfaces(): 22 | raise ValueError(f"Interface {interface_name} does not exist") 23 | 24 | # Get addresses for the interface 25 | addrs = netifaces.ifaddresses(interface_name) 26 | 27 | # Check for IPv4 addresses (AF_INET is IPv4) 28 | if netifaces.AF_INET not in addrs: 29 | raise ValueError(f"No IPv4 address found for interface {interface_name}") 30 | 31 | # Get the first IPv4 address 32 | ipv4_info = addrs[netifaces.AF_INET][0] 33 | ipv4_addr = ipv4_info["addr"] 34 | 35 | # Validate that it's a proper IPv4 address 36 | ipaddress.IPv4Address(ipv4_addr) 37 | 38 | return ipv4_addr 39 | 40 | except Exception as e: 41 | raise ValueError(f"Error getting IPv4 address: {str(e)}") 42 | -------------------------------------------------------------------------------- /src/aleph/vm/network/ipaddresses.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Iterator 2 | from ipaddress import IPv4Interface, IPv4Network 3 | 4 | 5 | class IPv4NetworkWithInterfaces(IPv4Network): 6 | def hosts(self) -> Iterator[IPv4Interface]: 7 | network = int(self.network_address) 8 | broadcast = int(self.broadcast_address) 9 | for x in range(network + 1, broadcast): 10 | yield IPv4Interface((x, self.prefixlen)) 11 | 12 | def __getitem__(self, n) -> IPv4Interface: 13 | network = int(self.network_address) 14 | broadcast = int(self.broadcast_address) 15 | if n >= 0: 16 | if network + n > broadcast: 17 | msg = "address out of range" 18 | raise IndexError(msg) 19 | return IPv4Interface((network + n, self.prefixlen)) 20 | else: 21 | n += 1 22 | if broadcast + n < network: 23 | msg = "address out of range" 24 | raise IndexError(msg) 25 | return IPv4Interface((broadcast + n, self.prefixlen)) 26 | -------------------------------------------------------------------------------- /src/aleph/vm/network/ndp_proxy.py: -------------------------------------------------------------------------------- 1 | """ 2 | Neighbourhood Discovery Proxy (NDP) functionalities. 3 | 4 | Some cloud providers do not route the whole advertised IPv6 address range to servers, but instead 5 | only route one address. They will issue NDP requests to the network to determine if the other 6 | addresses in the range are used. This means that our server (be it the hypervisor or the VMs) 7 | has to answer to these requests to make the VMs routable. 8 | 9 | To achieve this, we use ndppd. Each time an update is required, we overwrite /etc/ndppd.conf 10 | and restart the service. 11 | """ 12 | 13 | import logging 14 | from dataclasses import dataclass 15 | from ipaddress import IPv6Network 16 | from pathlib import Path 17 | from subprocess import CalledProcessError 18 | 19 | from aleph.vm.utils import run_in_subprocess 20 | 21 | logger = logging.getLogger(__name__) 22 | 23 | 24 | @dataclass 25 | class NdpRule: 26 | address_range: IPv6Network 27 | 28 | 29 | class NdpProxy: 30 | def __init__(self, host_network_interface: str): 31 | self.host_network_interface = host_network_interface 32 | self.interface_address_range_mapping: dict[str, IPv6Network] = {} 33 | 34 | @staticmethod 35 | async def _restart_ndppd(): 36 | logger.debug("Restarting ndppd") 37 | try: 38 | await run_in_subprocess(["systemctl", "restart", "ndppd"]) 39 | except CalledProcessError as error: 40 | logger.error("Failed to restart ndppd: %s", error) 41 | # We do not raise the error here, since this should not crash the entire system 42 | 43 | async def _update_ndppd_conf(self): 44 | config = f"proxy {self.host_network_interface} {{\n" 45 | for interface, address_range in self.interface_address_range_mapping.items(): 46 | config += f" rule {address_range} {{\n iface {interface}\n }}\n" 47 | config += "}\n" 48 | Path("/etc/ndppd.conf").write_text(config) 49 | await self._restart_ndppd() 50 | 51 | async def add_range(self, interface: str, address_range: IPv6Network, update_service: bool = True): 52 | logger.debug("Proxying range %s -> %s", address_range, interface) 53 | self.interface_address_range_mapping[interface] = address_range 54 | if update_service: 55 | await self._update_ndppd_conf() 56 | 57 | async def delete_range(self, interface: str, update_service: bool = True): 58 | try: 59 | address_range = self.interface_address_range_mapping.pop(interface) 60 | logger.debug("Deactivated proxying for %s (%s)", interface, address_range) 61 | except KeyError: 62 | return 63 | 64 | if update_service: 65 | await self._update_ndppd_conf() 66 | -------------------------------------------------------------------------------- /src/aleph/vm/network/port_availability_checker.py: -------------------------------------------------------------------------------- 1 | import socket 2 | from typing import Optional 3 | 4 | from aleph.vm.network.firewall import check_nftables_redirections 5 | 6 | MIN_DYNAMIC_PORT = 24000 7 | MAX_PORT = 65535 8 | 9 | 10 | def get_available_host_port(start_port: Optional[int] = None) -> int: 11 | """Find an available port on the host system. 12 | 13 | Args: 14 | start_port: Optional starting port number. If not provided, starts from MIN_DYNAMIC_PORT 15 | 16 | Returns: 17 | An available port number 18 | 19 | Raises: 20 | RuntimeError: If no ports are available in the valid range 21 | """ 22 | start_port = start_port if start_port and start_port >= MIN_DYNAMIC_PORT else MIN_DYNAMIC_PORT 23 | for port in range(start_port, MAX_PORT): 24 | try: 25 | # check if there is already a redirect to that port 26 | if check_nftables_redirections(port): 27 | continue 28 | # Try both TCP and UDP on all interfaces 29 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as tcp_sock: 30 | tcp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 31 | tcp_sock.bind(("0.0.0.0", port)) 32 | tcp_sock.listen(1) 33 | 34 | with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as udp_sock: 35 | udp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 36 | udp_sock.bind(("0.0.0.0", port)) 37 | 38 | return port 39 | 40 | except (socket.error, OSError): 41 | pass 42 | 43 | raise RuntimeError(f"No available ports found in range {MIN_DYNAMIC_PORT}-{MAX_PORT}") 44 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/INSTANCES.md: -------------------------------------------------------------------------------- 1 | 2 | # Instance Messages 3 | 4 | Support of Instance message in the aleph-message repository was added in this PR: 5 | https://github.com/aleph-im/aleph-message/pull/48 6 | 7 | ## Changes added 8 | 9 | ### Aleph message repository 10 | 11 | I added a new type of message called `InstanceMessage`, with the changes that we designed for VM instances. 12 | The content of this message is a new type called `InstanceContent`, that replaces the field `runtime` by `rootfs` that 13 | instead be an Immutable volume becomes a Persistent volume and adds a new field inside called `parent`, that will be the 14 | item hash of the base filesystem of the VM. We will create a .ext4 file with the size of the volume and **"attach"** to it 15 | the base filesystem. 16 | 17 | Note that this filesystem should be in **.ext4** format, cannot be an **squashfs** 18 | file, because we will map it as a block device inside the machine. 19 | 20 | Also, I added a union type for Instance messages and Program message called `ExecutableMessage` and also a new one called 21 | `ExecutableContent` as union of Instance and program content types. 22 | 23 | ### Aleph VM repository 24 | 25 | I have created a function called `create_devmapper` in _**vm_supervisor/storage.py**_. This method can create a 26 | dev-mapper device base on the parent reference. I followed 27 | [this](https://community.aleph.im/t/deploying-mutable-vm-instances-on-aleph/56/2) implementation. 28 | 29 | In the _**firecracker/microvm.py**_ file I added the `mount_rootfs` method to mount the block device in the case that we 30 | use jailer and also assign correct permissions. And when the VM goes down, I clear all these configurations in the 31 | `teardown` process. As link a block device in a chroot doesn't work I had to do a workaround that consists of copy all 32 | the "dm-*" block devices in the chroot and mount the entire `/dev/mapper` folder in the chroot to make it work. I didn't 33 | found a better solution to it. 34 | 35 | Also, I added support to run a writable root filesystem in Firecracker. I have bypassed all the parts that we pass and 36 | use the **_"code"_** properties, like the encoding or the entrypoint. 37 | 38 | A new instance message example has been added in **_examples/instance_message_from_aleph.json_**. 39 | 40 | ### Current status 41 | 42 | Now the Dev-mapper device works well, Firecracker loads it in write state, but we need to fix 2 things: 43 | - Route the requests from the CRN to the Firecracker VM on any port, not only using the 8080. 44 | - ~~- Use the entire hard disk inside VM, because now only detects the size of the rootfs.~~(Done) 45 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/__init__.py: -------------------------------------------------------------------------------- 1 | from aleph.vm.version import __version__ 2 | 3 | __all__ = ("__version__",) 4 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/__main__.py: -------------------------------------------------------------------------------- 1 | from .cli import main 2 | 3 | if __name__ == "__main__": 4 | main() 5 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/alembic.ini: -------------------------------------------------------------------------------- 1 | # A generic, single database configuration. 2 | 3 | [alembic] 4 | # path to migration scripts 5 | ;script_location = orchestrator/migrations 6 | script_location = migrations 7 | 8 | # template used to generate migration file names; The default value is %%(rev)s_%%(slug)s 9 | # Uncomment the line below if you want the files to be prepended with date and time 10 | # see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file 11 | # for all available tokens 12 | # file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s 13 | 14 | # sys.path path, will be prepended to sys.path if present. 15 | # defaults to the current working directory. 16 | prepend_sys_path = . 17 | 18 | # timezone to use when rendering the date within the migration file 19 | # as well as the filename. 20 | # If specified, requires the python-dateutil library that can be 21 | # installed by adding `alembic[tz]` to the pip requirements 22 | # string value is passed to dateutil.tz.gettz() 23 | # leave blank for localtime 24 | # timezone = 25 | 26 | # max length of characters to apply to the 27 | # "slug" field 28 | # truncate_slug_length = 40 29 | 30 | # set to 'true' to run the environment during 31 | # the 'revision' command, regardless of autogenerate 32 | # revision_environment = false 33 | 34 | # set to 'true' to allow .pyc and .pyo files without 35 | # a source .py file to be detected as revisions in the 36 | # versions/ directory 37 | # sourceless = false 38 | 39 | # version location specification; This defaults 40 | # to migrations/versions. When using multiple version 41 | # directories, initial revisions must be specified with --version-path. 42 | # The path separator used here should be the separator specified by "version_path_separator" below. 43 | # version_locations = %(here)s/bar:%(here)s/bat:migrations/versions 44 | 45 | # version path separator; As mentioned above, this is the character used to split 46 | # version_locations. The default within new alembic.ini files is "os", which uses os.pathsep. 47 | # If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas. 48 | # Valid values for version_path_separator are: 49 | # 50 | # version_path_separator = : 51 | # version_path_separator = ; 52 | # version_path_separator = space 53 | version_path_separator = os # Use os.pathsep. Default configuration used for new projects. 54 | 55 | # the output encoding used when revision files 56 | # are written from script.py.mako 57 | # output_encoding = utf-8 58 | 59 | [post_write_hooks] 60 | # post_write_hooks defines scripts or Python functions that are run 61 | # on newly generated revision scripts. See the documentation for further 62 | # detail and examples 63 | 64 | # format using "black" - use the console_scripts runner, against the "black" entrypoint 65 | # hooks = black 66 | # black.type = console_scripts 67 | # black.entrypoint = black 68 | # black.options = -l 79 REVISION_SCRIPT_FILENAME 69 | 70 | # Logging configuration 71 | [loggers] 72 | keys = root,sqlalchemy,alembic 73 | 74 | [handlers] 75 | keys = console 76 | 77 | [formatters] 78 | keys = generic 79 | 80 | [logger_root] 81 | level = WARN 82 | handlers = console 83 | qualname = 84 | 85 | [logger_sqlalchemy] 86 | level = WARN 87 | handlers = 88 | qualname = sqlalchemy.engine 89 | 90 | [logger_alembic] 91 | level = INFO 92 | handlers = 93 | qualname = alembic 94 | 95 | [handler_console] 96 | class = StreamHandler 97 | args = (sys.stderr,) 98 | level = NOTSET 99 | formatter = generic 100 | 101 | [formatter_generic] 102 | format = %(levelname)-5.5s [%(name)s] %(message)s 103 | datefmt = %H:%M:%S 104 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/chain.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from aleph_message.models import Chain 4 | from pydantic import BaseModel, model_validator 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | class ChainInfo(BaseModel): 10 | """ 11 | A chain information. 12 | """ 13 | 14 | chain_id: int 15 | rpc: str 16 | standard_token: str | None = None 17 | super_token: str | None = None 18 | testnet: bool = False 19 | active: bool = True 20 | 21 | @property 22 | def token(self) -> str | None: 23 | return self.super_token or self.standard_token 24 | 25 | @model_validator(mode="before") 26 | @classmethod 27 | def check_tokens(cls, values): 28 | if not values.get("standard_token") and not values.get("super_token"): 29 | msg = "At least one of standard_token or super_token must be provided." 30 | raise ValueError(msg) 31 | return values 32 | 33 | 34 | STREAM_CHAINS: dict[Chain | str, ChainInfo] = { 35 | # TESTNETS 36 | "SEPOLIA": ChainInfo( 37 | chain_id=11155111, 38 | rpc="https://eth-sepolia.public.blastapi.io", 39 | standard_token="0xc4bf5cbdabe595361438f8c6a187bdc330539c60", 40 | super_token="0x22064a21fee226d8ffb8818e7627d5ff6d0fc33a", 41 | active=False, 42 | testnet=True, 43 | ), 44 | # MAINNETS 45 | Chain.ETH: ChainInfo( 46 | chain_id=1, 47 | rpc="https://eth-mainnet.public.blastapi.io", 48 | standard_token="0x27702a26126e0B3702af63Ee09aC4d1A084EF628", 49 | active=False, 50 | ), 51 | Chain.AVAX: ChainInfo( 52 | chain_id=43114, 53 | rpc="https://api.avax.network/ext/bc/C/rpc", 54 | super_token="0xc0Fbc4967259786C743361a5885ef49380473dCF", 55 | ), 56 | Chain.BASE: ChainInfo( 57 | chain_id=8453, 58 | rpc="https://base-mainnet.public.blastapi.io", 59 | super_token="0xc0Fbc4967259786C743361a5885ef49380473dCF", 60 | ), 61 | } 62 | 63 | 64 | class InvalidChainError(ValueError): 65 | pass 66 | 67 | 68 | def get_chain(chain: str) -> ChainInfo: 69 | try: 70 | return STREAM_CHAINS[chain] 71 | except KeyError: 72 | msg = f"Unknown chain id for chain {chain}" 73 | raise InvalidChainError(msg) 74 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/custom_logs.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import logging 3 | from contextvars import ContextVar 4 | 5 | from aleph_message.models import ItemHash 6 | 7 | from aleph.vm.models import VmExecution 8 | 9 | ctx_current_execution: ContextVar[VmExecution | None] = ContextVar("current_execution") 10 | ctx_current_execution_hash: ContextVar[ItemHash | None] = ContextVar("current_execution_hash") 11 | 12 | 13 | @contextlib.contextmanager 14 | def set_vm_for_logging(vm_hash): 15 | token = ctx_current_execution_hash.set(vm_hash) 16 | try: 17 | yield 18 | finally: 19 | ctx_current_execution_hash.reset(token) 20 | 21 | 22 | class InjectingFilter(logging.Filter): 23 | """ 24 | A filter which injects context-specific information into logs 25 | """ 26 | 27 | def filter(self, record): 28 | vm_hash = ctx_current_execution_hash.get(None) 29 | if not vm_hash: 30 | vm_execution: VmExecution | None = ctx_current_execution.get(None) 31 | if vm_execution: 32 | vm_hash = vm_execution.vm_hash 33 | 34 | if not vm_hash: 35 | return False 36 | 37 | record.vm_hash = vm_hash 38 | return True 39 | 40 | 41 | def setup_handlers(args, log_format): 42 | # Set up two custom handler, one that will add the VM information if present and the other print if not 43 | execution_handler = logging.StreamHandler() 44 | execution_handler.addFilter(InjectingFilter()) 45 | execution_handler.setFormatter( 46 | logging.Formatter("%(asctime)s | %(levelname)s %(name)s:%(lineno)s | {%(vm_hash)s} %(message)s ") 47 | ) 48 | non_execution_handler = logging.StreamHandler() 49 | non_execution_handler.addFilter(lambda x: ctx_current_execution_hash.get(None) is None) 50 | non_execution_handler.setFormatter( 51 | logging.Formatter("%(asctime)s | %(levelname)s %(name)s:%(lineno)s | %(message)s ") 52 | ) 53 | return [non_execution_handler, execution_handler] 54 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/machine.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | import re 4 | import shutil 5 | 6 | import psutil 7 | 8 | from aleph.vm.utils import run_in_subprocess 9 | 10 | 11 | async def get_hardware_info(): 12 | lshw_path = shutil.which("lshw") 13 | assert lshw_path, "lshw not found in PATH. apt install lshw." 14 | lshw_output = await run_in_subprocess([lshw_path, "-sanitize", "-json"]) 15 | data = json.loads(lshw_output) 16 | 17 | hw_info = {"cpu": None, "memory": None} 18 | 19 | for hw in data["children"][0]["children"]: 20 | if hw["id"] == "cpu": 21 | hw_info["cpu"] = hw 22 | elif hw["class"] == "memory" and hw["id"] == "memory": 23 | hw_info["memory"] = hw 24 | 25 | return hw_info 26 | 27 | 28 | def get_cpu_info(hw): 29 | cpu_info = hw["cpu"] 30 | 31 | if "x86_64" in cpu_info["capabilities"] or "x86-64" in cpu_info["capabilities"]: 32 | architecture = "x86_64" 33 | elif "arm64" in cpu_info["capabilities"] or "arm-64" in cpu_info["capabilities"]: 34 | architecture = "arm64" 35 | else: 36 | architecture = None 37 | 38 | vendor = cpu_info["vendor"] 39 | # lshw vendor implementation => https://github.com/lyonel/lshw/blob/15e4ca64647ad119b69be63274e5de2696d3934f/src/core/cpuinfo.cc#L308 40 | 41 | if "Intel Corp" in vendor: 42 | vendor = "GenuineIntel" 43 | elif "Advanced Micro Devices [AMD]" in vendor: 44 | vendor = "AuthenticAMD" 45 | 46 | return { 47 | "architecture": architecture, 48 | "vendor": vendor, 49 | "model": cpu_info["product"], 50 | "frequency": cpu_info["capacity"], 51 | "count": psutil.cpu_count(), 52 | } 53 | 54 | 55 | def get_memory_info(hw): 56 | mem_info = hw["memory"] 57 | 58 | memory_type = "" 59 | memory_clock = "" 60 | for bank in mem_info["children"]: 61 | memory_clock = bank.get("clock") 62 | if "description" in bank: 63 | matched = re.search("(DDR[2-6])", bank["description"]) 64 | if matched: 65 | memory_type = matched.group(0) 66 | break 67 | else: 68 | pass 69 | 70 | return { 71 | "size": mem_info["size"], 72 | "units": mem_info["units"], 73 | "type": memory_type, 74 | "clock": memory_clock, 75 | "clock_units": "Hz" if memory_clock is not None else "", 76 | } 77 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/messages.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import copy 3 | 4 | from aiohttp import ClientConnectorError, ClientResponseError, ClientSession 5 | from aiohttp.web_exceptions import HTTPNotFound, HTTPServiceUnavailable 6 | from aleph_message.models import ExecutableMessage, ItemHash, MessageType 7 | from aleph_message.status import MessageStatus 8 | 9 | from aleph.vm.conf import settings 10 | from aleph.vm.storage import get_latest_amend, get_message 11 | 12 | 13 | async def try_get_message(ref: str) -> ExecutableMessage: 14 | """Get the message or raise an aiohttp HTTP error""" 15 | try: 16 | return await get_message(ref) 17 | except ClientConnectorError as error: 18 | raise HTTPServiceUnavailable(reason="Aleph Connector unavailable") from error 19 | except ClientResponseError as error: 20 | if error.status == HTTPNotFound.status_code: 21 | raise HTTPNotFound(reason="Hash not found", text=f"Hash not found: {ref}") from error 22 | else: 23 | raise 24 | 25 | 26 | async def get_latest_ref(item_hash: str) -> str: 27 | try: 28 | return await get_latest_amend(item_hash) 29 | except ClientConnectorError as error: 30 | raise HTTPServiceUnavailable(reason="Aleph Connector unavailable") from error 31 | except ClientResponseError as error: 32 | if error.status == HTTPNotFound.status_code: 33 | raise HTTPNotFound(reason="Hash not found", text=f"Hash not found: {item_hash}") from error 34 | else: 35 | raise 36 | 37 | 38 | async def update_with_latest_ref(obj): 39 | """ 40 | Update the reference `ref` inplace if a newer version is available. 41 | 42 | Useful to update references in parallel with asyncio.gather. 43 | """ 44 | if hasattr(obj, "use_latest") and obj.use_latest: 45 | obj.ref = await get_latest_ref(obj.ref) 46 | else: 47 | return obj 48 | 49 | 50 | async def update_message(message: ExecutableMessage): 51 | if message.type == MessageType.program: 52 | # Load amends 53 | await asyncio.gather( 54 | update_with_latest_ref(message.content.runtime), 55 | update_with_latest_ref(message.content.code), 56 | update_with_latest_ref(message.content.data), 57 | *(update_with_latest_ref(volume) for volume in (message.content.volumes or [])), 58 | ) 59 | else: 60 | assert message.type == MessageType.instance 61 | await asyncio.gather( 62 | update_with_latest_ref(message.content.rootfs.parent), 63 | *(update_with_latest_ref(volume) for volume in (message.content.volumes or [])), 64 | ) 65 | 66 | 67 | async def load_updated_message( 68 | ref: ItemHash, 69 | ) -> tuple[ExecutableMessage, ExecutableMessage]: 70 | original_message = await try_get_message(ref) 71 | message = copy.deepcopy(original_message) 72 | await update_message(message) 73 | return message, original_message 74 | 75 | 76 | async def get_message_status(item_hash: ItemHash) -> MessageStatus: 77 | """ 78 | Fetch the status of an execution from the reference API server. 79 | We use a normal API call to the CCN instead to use the connector because we want to get the updated status of the 80 | message and bypass the messages cache. 81 | """ 82 | async with ClientSession() as session: 83 | url = f"{settings.API_SERVER}/api/v0/messages/{item_hash}" 84 | resp = await session.get(url) 85 | # Raise an error if the request failed 86 | resp.raise_for_status() 87 | 88 | resp_data = await resp.json() 89 | return resp_data["status"] 90 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/metrics.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from collections.abc import Iterable 3 | from pathlib import Path 4 | from typing import Any 5 | from uuid import UUID 6 | 7 | from sqlalchemy import ( 8 | JSON, 9 | Boolean, 10 | Column, 11 | DateTime, 12 | Float, 13 | Integer, 14 | String, 15 | delete, 16 | select, 17 | ) 18 | from sqlalchemy.ext.asyncio import ( 19 | AsyncEngine, 20 | AsyncSession, 21 | async_sessionmaker, 22 | create_async_engine, 23 | ) 24 | 25 | try: 26 | from sqlalchemy.orm import declarative_base 27 | except ImportError: 28 | from sqlalchemy.ext.declarative import declarative_base 29 | 30 | from aleph.vm.conf import make_db_url, settings 31 | 32 | AsyncSessionMaker: async_sessionmaker[AsyncSession] 33 | 34 | logger = logging.getLogger(__name__) 35 | 36 | Base: Any = declarative_base() 37 | 38 | 39 | def setup_engine(): 40 | global AsyncSessionMaker 41 | engine = create_async_engine(make_db_url(), echo=False) 42 | AsyncSessionMaker = async_sessionmaker(engine, expire_on_commit=False, class_=AsyncSession) 43 | return engine 44 | 45 | 46 | async def create_tables(engine: AsyncEngine): 47 | async with engine.begin() as conn: 48 | await conn.run_sync(Base.metadata.create_all) 49 | 50 | 51 | class ExecutionRecord(Base): 52 | __tablename__ = "executions" 53 | 54 | uuid = Column(String, primary_key=True) 55 | vm_hash = Column(String, nullable=False) 56 | vm_id = Column(Integer, nullable=True) 57 | 58 | time_defined = Column(DateTime, nullable=False) 59 | time_prepared = Column(DateTime) 60 | time_started = Column(DateTime) 61 | time_stopping = Column(DateTime) 62 | 63 | cpu_time_user = Column(Float, nullable=True) 64 | cpu_time_system = Column(Float, nullable=True) 65 | 66 | io_read_count = Column(Integer, nullable=True) 67 | io_write_count = Column(Integer, nullable=True) 68 | io_read_bytes = Column(Integer, nullable=True) 69 | io_write_bytes = Column(Integer, nullable=True) 70 | 71 | vcpus = Column(Integer, nullable=False) 72 | memory = Column(Integer, nullable=False) 73 | network_tap = Column(String, nullable=True) 74 | 75 | message = Column(JSON, nullable=True) 76 | original_message = Column(JSON, nullable=True) 77 | persistent = Column(Boolean, nullable=True) 78 | 79 | gpus = Column(JSON, nullable=True) 80 | mapped_ports = Column(JSON, nullable=True) 81 | 82 | def __repr__(self): 83 | return f"" 84 | 85 | def to_dict(self): 86 | return {c.name: getattr(self, c.name) for c in self.__table__.c} 87 | 88 | 89 | async def save_execution_data(execution_uuid: UUID, execution_data: str): 90 | """Save the execution data in a file on disk""" 91 | directory = Path(settings.EXECUTION_LOG_DIRECTORY) 92 | directory.mkdir(exist_ok=True) 93 | (directory / f"{execution_uuid}.json").write_text(execution_data) 94 | 95 | 96 | async def save_record(record: ExecutionRecord): 97 | """Record the resource usage in database""" 98 | async with AsyncSessionMaker() as session: # Use AsyncSession in a context manager 99 | session.add(record) 100 | await session.commit() # Use await for commit 101 | 102 | 103 | async def delete_record(execution_uuid: str): 104 | """Delete the resource usage in database""" 105 | async with AsyncSessionMaker() as session: 106 | try: 107 | statement = delete(ExecutionRecord).where(ExecutionRecord.uuid == execution_uuid) 108 | await session.execute(statement) 109 | await session.commit() 110 | finally: 111 | await session.close() 112 | 113 | 114 | async def get_execution_records() -> Iterable[ExecutionRecord]: 115 | """Get the execution records from the database.""" 116 | async with AsyncSessionMaker() as session: # Use AsyncSession in a context manager 117 | result = await session.execute(select(ExecutionRecord)) # Use execute for querying 118 | executions = result.scalars().all() 119 | await session.commit() 120 | return executions 121 | 122 | 123 | async def get_last_record_for_vm(vm_hash) -> ExecutionRecord | None: 124 | """Get the execution records from the database.""" 125 | async with AsyncSessionMaker() as session: # Use AsyncSession in a context manager 126 | result = await session.execute( 127 | select(ExecutionRecord).where(ExecutionRecord.vm_hash == vm_hash).limit(1) 128 | ) # Use execute for querying 129 | return result.scalar() 130 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/migrations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/src/aleph/vm/orchestrator/migrations/__init__.py -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/migrations/env.py: -------------------------------------------------------------------------------- 1 | from alembic import context 2 | from sqlalchemy import create_engine 3 | 4 | from aleph.vm.conf import make_sync_db_url 5 | 6 | # Auto-generate migrations 7 | from aleph.vm.orchestrator.metrics import Base 8 | 9 | # # this is the Alembic Config object, which provides 10 | # # access to the values within the .ini file in use. 11 | # config = context.config 12 | # 13 | # # Interpret the config file for Python logging. 14 | # # This line sets up loggers basically. 15 | # if config.config_file_name is not None: 16 | # fileConfig(config.config_file_name) 17 | 18 | 19 | target_metadata = Base.metadata 20 | 21 | # other values from the config, defined by the needs of env.py, 22 | # can be acquired: 23 | # my_important_option = config.get_main_option("my_important_option") 24 | # ... etc. 25 | 26 | 27 | def run_migrations_offline() -> None: 28 | """Run migrations in 'offline' mode. 29 | 30 | This configures the context with just a URL 31 | and not an Engine, though an Engine is acceptable 32 | here as well. By skipping the Engine creation 33 | we don't even need a DBAPI to be available. 34 | 35 | Calls to context.execute() here emit the given string to the 36 | script output. 37 | 38 | """ 39 | url = make_sync_db_url() 40 | 41 | context.configure( 42 | url=url, 43 | target_metadata=target_metadata, 44 | literal_binds=True, 45 | dialect_opts={"paramstyle": "named"}, 46 | ) 47 | 48 | with context.begin_transaction(): 49 | context.run_migrations() 50 | 51 | 52 | def run_migrations_online() -> None: 53 | """Run migrations in 'online' mode. 54 | 55 | In this scenario we need to create an Engine 56 | and associate a connection with the context. 57 | 58 | """ 59 | connectable = create_engine(make_sync_db_url()) 60 | with connectable.connect() as connection: 61 | context.configure(connection=connection, target_metadata=target_metadata) 62 | 63 | with context.begin_transaction(): 64 | context.run_migrations() 65 | 66 | 67 | if context.is_offline_mode(): 68 | run_migrations_offline() 69 | else: 70 | run_migrations_online() 71 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/migrations/script.py.mako: -------------------------------------------------------------------------------- 1 | """${message} 2 | 3 | Revision ID: ${up_revision} 4 | Revises: ${down_revision | comma,n} 5 | Create Date: ${create_date} 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | ${imports if imports else ""} 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = ${repr(up_revision)} 14 | down_revision = ${repr(down_revision)} 15 | branch_labels = ${repr(branch_labels)} 16 | depends_on = ${repr(depends_on)} 17 | 18 | 19 | def upgrade() -> None: 20 | ${upgrades if upgrades else "pass"} 21 | 22 | 23 | def downgrade() -> None: 24 | ${downgrades if downgrades else "pass"} 25 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/migrations/versions/0001_bbb12a12372e_execution_records.py: -------------------------------------------------------------------------------- 1 | """execution records 2 | 3 | Revision ID: bbb12a12372e 4 | Revises: 5 | Create Date: 2022-09-28 18:52:16.431200 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | # revision identifiers, used by Alembic. 13 | from sqlalchemy import create_engine 14 | from sqlalchemy.engine import reflection 15 | 16 | from aleph.vm.conf import make_db_url 17 | 18 | revision = "bbb12a12372e" 19 | down_revision = None 20 | branch_labels = None 21 | depends_on = None 22 | 23 | 24 | def upgrade() -> None: 25 | engine = create_engine(make_db_url()) 26 | inspector = reflection.Inspector.from_engine(engine) 27 | 28 | # The table already exists on most CRNs. 29 | tables = inspector.get_table_names() 30 | if "records" not in tables: 31 | op.create_table( 32 | "records", 33 | sa.Column("uuid", sa.String(), nullable=False), 34 | sa.Column("vm_hash", sa.String(), nullable=False), 35 | sa.Column("time_defined", sa.DateTime(), nullable=False), 36 | sa.Column("time_prepared", sa.DateTime(), nullable=True), 37 | sa.Column("time_started", sa.DateTime(), nullable=True), 38 | sa.Column("time_stopping", sa.DateTime(), nullable=True), 39 | sa.Column("cpu_time_user", sa.Float(), nullable=True), 40 | sa.Column("cpu_time_system", sa.Float(), nullable=True), 41 | sa.Column("io_read_count", sa.Integer(), nullable=True), 42 | sa.Column("io_write_count", sa.Integer(), nullable=True), 43 | sa.Column("io_read_bytes", sa.Integer(), nullable=True), 44 | sa.Column("io_write_bytes", sa.Integer(), nullable=True), 45 | sa.Column("vcpus", sa.Integer(), nullable=False), 46 | sa.Column("memory", sa.Integer(), nullable=False), 47 | sa.Column("network_tap", sa.String(), nullable=True), 48 | sa.PrimaryKeyConstraint("uuid"), 49 | ) 50 | 51 | # Support intermediate versions that have the records table 52 | # but without the network_tap column 53 | records_columns = [column["name"] for column in inspector.get_columns("records")] 54 | if "network_tap" not in records_columns: 55 | op.add_column("records", sa.Column("network_tap", sa.String(), nullable=True)) 56 | 57 | # ### end Alembic commands ### 58 | 59 | 60 | def downgrade() -> None: 61 | # ### commands auto generated by Alembic - please adjust! ### 62 | op.drop_table("records") 63 | # ### end Alembic commands ### 64 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/migrations/versions/0002_5c6ae643c69b_add_gpu_column_to_executions_table.py: -------------------------------------------------------------------------------- 1 | """add gpu table 2 | 3 | Revision ID: 5c6ae643c69b 4 | Revises: bbb12a12372e 5 | Create Date: 2024-12-09 19:40:19.279735 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | # revision identifiers, used by Alembic. 13 | from sqlalchemy import create_engine 14 | from sqlalchemy.engine import reflection 15 | 16 | from aleph.vm.conf import make_db_url 17 | 18 | revision = "5c6ae643c69b" 19 | down_revision = "bbb12a12372e" 20 | branch_labels = None 21 | depends_on = None 22 | 23 | 24 | def upgrade() -> None: 25 | engine = create_engine(make_db_url()) 26 | inspector = reflection.Inspector.from_engine(engine) 27 | 28 | # The table already exists on most CRNs. 29 | tables = inspector.get_table_names() 30 | if "executions" in tables: 31 | columns = inspector.get_columns("executions") 32 | column_names = [c["name"] for c in columns] 33 | if "gpus" not in column_names: 34 | op.add_column("executions", sa.Column("gpus", sa.JSON(), nullable=True)) 35 | 36 | 37 | def downgrade() -> None: 38 | op.drop_column("executions", "gpus") 39 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/migrations/versions/2da719d72cea_add_mapped_ports_column.py: -------------------------------------------------------------------------------- 1 | """add mapped_ports column 2 | 3 | Revision ID: 2da719d72cea 4 | Revises: 5c6ae643c69b 5 | Create Date: 2025-05-29 23:20:42.801850 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | # revision identifiers, used by Alembic. 13 | from sqlalchemy import create_engine 14 | from sqlalchemy.engine import reflection 15 | 16 | from aleph.vm.conf import make_db_url 17 | 18 | # revision identifiers, used by Alembic. 19 | revision = "2da719d72cea" 20 | down_revision = "5c6ae643c69b" 21 | branch_labels = None 22 | depends_on = None 23 | 24 | 25 | def upgrade() -> None: 26 | engine = create_engine(make_db_url()) 27 | inspector = reflection.Inspector.from_engine(engine) 28 | 29 | # The table already exists on most CRNs. 30 | tables = inspector.get_table_names() 31 | if "executions" in tables: 32 | columns = inspector.get_columns("executions") 33 | column_names = [c["name"] for c in columns] 34 | if "mapped_ports" not in column_names: 35 | op.add_column("executions", sa.Column("mapped_ports", sa.JSON(), nullable=True)) 36 | 37 | 38 | def downgrade() -> None: 39 | op.drop_column("executions", "mapped_ports") 40 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/migrations/versions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/src/aleph/vm/orchestrator/migrations/versions/__init__.py -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/pubsub.py: -------------------------------------------------------------------------------- 1 | """ 2 | Small async PubSub implementation. 3 | Used to trigger VM shutdown on updates. 4 | """ 5 | 6 | import asyncio 7 | import logging 8 | from collections.abc import Hashable 9 | 10 | from aleph_message.models import AlephMessage, ChainRef, ItemHash 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | class PubSub: 16 | subscribers: dict[Hashable, set[asyncio.Queue[set]]] 17 | 18 | def __init__(self): 19 | self.subscribers = {} 20 | 21 | async def subscribe(self, key): 22 | queue: asyncio.Queue[AlephMessage] = asyncio.Queue() 23 | self.subscribers.setdefault(key, set()).add(queue) 24 | await queue.get() 25 | 26 | # Cleanup: remove the queue from the subscribers 27 | subscriber = self.subscribers.get(key) 28 | if subscriber: 29 | subscriber.discard(queue) 30 | # Remove keys with no remaining queue 31 | if not self.subscribers.get(key): 32 | self.subscribers.pop(key) 33 | 34 | async def msubscribe(self, *keys): 35 | """Subscribe to multiple keys""" 36 | keys = tuple(key for key in keys if key is not None) 37 | logger.debug(f"msubscribe({keys})") 38 | 39 | queue: asyncio.Queue[AlephMessage] = asyncio.Queue() 40 | 41 | # Register the queue on all keys 42 | for key in keys: 43 | self.subscribers.setdefault(key, set()).add(queue) 44 | 45 | # Wait for any subscription 46 | await queue.get() 47 | 48 | # Cleanup: remove the queue from the subscribers 49 | for key in keys: 50 | for subscriber in list(self.subscribers.values()): 51 | subscriber.discard(queue) 52 | # Remove keys with no remaining queue (empty set remaining) 53 | if self.subscribers.get(key) == set(): 54 | self.subscribers.pop(key) 55 | 56 | async def publish(self, key: ItemHash | str | ChainRef, value: AlephMessage): 57 | for queue in self.subscribers.get(key, ()): 58 | await queue.put(value) 59 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/reactor.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from collections.abc import Coroutine 3 | 4 | from aleph_message.models import AlephMessage 5 | from aleph_message.models.execution.environment import Subscription 6 | 7 | from aleph.vm.pool import VmPool 8 | from aleph.vm.utils import create_task_log_exceptions 9 | 10 | from .pubsub import PubSub 11 | from .run import run_code_on_event 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | 16 | def is_equal_or_includes(value, compare_to) -> bool: 17 | if isinstance(value, str): 18 | return value == compare_to 19 | elif isinstance(value, dict): 20 | for subkey, subvalue in value.items(): 21 | if not hasattr(compare_to, subkey): 22 | return False 23 | if not is_equal_or_includes(subvalue, getattr(compare_to, subkey)): 24 | return False 25 | return True 26 | else: 27 | msg = "Unsupported value" 28 | raise ValueError(msg) 29 | 30 | 31 | def subscription_matches(subscription: Subscription, message: AlephMessage) -> bool: 32 | if not subscription: 33 | # Require at least one value to match 34 | return False 35 | for key, value in subscription.dict().items(): 36 | if not is_equal_or_includes(value, getattr(message, key)): 37 | return False 38 | return True 39 | 40 | 41 | class Reactor: 42 | pubsub: PubSub 43 | pool: VmPool 44 | listeners: list[AlephMessage] 45 | 46 | def __init__(self, pubsub: PubSub, pool: VmPool): 47 | self.pubsub = pubsub 48 | self.pool = pool 49 | self.listeners = [] 50 | 51 | async def trigger(self, message: AlephMessage): 52 | coroutines: list[Coroutine] = [] 53 | 54 | for listener in self.listeners: 55 | if not listener.content.on.message: 56 | logger.warning( 57 | r"Program with no subscription was registered in reactor listeners: {listener.item_hash}" 58 | ) 59 | continue 60 | 61 | for subscription in listener.content.on.message: 62 | if subscription_matches(subscription, message): 63 | vm_hash = listener.item_hash 64 | event = message.model_dump_json() 65 | # Register the listener in the list of coroutines to run asynchronously: 66 | coroutines.append(run_code_on_event(vm_hash, event, self.pubsub, pool=self.pool)) 67 | break 68 | 69 | # Call all listeners asynchronously from the event loop: 70 | for coroutine in coroutines: 71 | create_task_log_exceptions(coroutine) 72 | 73 | def register(self, message: AlephMessage): 74 | if message.content.on.message: 75 | self.listeners.append(message) 76 | else: 77 | logger.debug(f"Program with no subscription cannot be registered in reactor listeners: {message.item_hash}") 78 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/utils.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta, timezone 2 | from decimal import ROUND_FLOOR, Decimal 3 | from logging import getLogger 4 | from typing import Any, TypedDict 5 | 6 | import aiohttp 7 | 8 | from aleph.vm.conf import settings 9 | 10 | logger = getLogger(__name__) 11 | 12 | 13 | class AggregateSettingsDict(TypedDict): 14 | compatible_gpus: list[Any] 15 | community_wallet_address: str 16 | community_wallet_timestamp: int 17 | 18 | 19 | LAST_AGGREGATE_SETTINGS: AggregateSettingsDict | None = None 20 | LAST_AGGREGATE_SETTINGS_FETCHED_AT: datetime | None = None 21 | PRICE_PRECISION = 18 # Price precision 22 | 23 | 24 | async def fetch_aggregate_settings() -> AggregateSettingsDict | None: 25 | """ 26 | Get the settings Aggregate dict from the PyAleph API Aggregate. 27 | 28 | API Endpoint: 29 | GET /api/v0/aggregates/{address}.json?keys=settings 30 | 31 | For more details, see the PyAleph API documentation: 32 | https://github.com/aleph-im/pyaleph/blob/master/src/aleph/web/controllers/routes.py#L62 33 | """ 34 | 35 | async with aiohttp.ClientSession() as session: 36 | url = f"{settings.API_SERVER}/api/v0/aggregates/{settings.SETTINGS_AGGREGATE_ADDRESS}.json?keys=settings" 37 | logger.info(f"Fetching settings aggregate from {url}") 38 | resp = await session.get(url) 39 | 40 | # Raise an error if the request failed 41 | resp.raise_for_status() 42 | 43 | resp_data = await resp.json() 44 | return resp_data["data"]["settings"] 45 | 46 | 47 | async def update_aggregate_settings(): 48 | global LAST_AGGREGATE_SETTINGS # noqa: PLW0603 49 | global LAST_AGGREGATE_SETTINGS_FETCHED_AT # noqa: PLW0603 50 | 51 | if ( 52 | not LAST_AGGREGATE_SETTINGS 53 | or LAST_AGGREGATE_SETTINGS_FETCHED_AT 54 | and datetime.now(tz=timezone.utc) - LAST_AGGREGATE_SETTINGS_FETCHED_AT > timedelta(minutes=1) 55 | ): 56 | try: 57 | aggregate = await fetch_aggregate_settings() 58 | LAST_AGGREGATE_SETTINGS = aggregate 59 | LAST_AGGREGATE_SETTINGS_FETCHED_AT = datetime.now(tz=timezone.utc) 60 | 61 | except Exception: 62 | logger.exception("Failed to fetch aggregate settings") 63 | 64 | 65 | async def get_aggregate_settings() -> AggregateSettingsDict | None: 66 | """The settings aggregate is a special aggregate used to share some common settings for VM setup 67 | 68 | Ensure the cached version is up to date and return it""" 69 | await update_aggregate_settings() 70 | 71 | if not LAST_AGGREGATE_SETTINGS: 72 | logger.error("No setting aggregate") 73 | return LAST_AGGREGATE_SETTINGS 74 | 75 | 76 | async def get_community_wallet_address() -> str | None: 77 | setting_aggr = await get_aggregate_settings() 78 | return setting_aggr and setting_aggr.get("community_wallet_address") 79 | 80 | 81 | async def get_community_wallet_start() -> datetime: 82 | """Community wallet start time. 83 | 84 | After this timestamp. New PAYG must include a payment to the community wallet""" 85 | setting_aggr = await get_aggregate_settings() 86 | if setting_aggr is None or "community_wallet_timestamp" not in setting_aggr: 87 | return datetime.now(tz=timezone.utc) 88 | timestamp = setting_aggr["community_wallet_timestamp"] 89 | start_datetime = datetime.fromtimestamp(timestamp, tz=timezone.utc) 90 | return start_datetime 91 | 92 | 93 | async def is_after_community_wallet_start(dt: datetime | None = None) -> bool: 94 | """Community wallet start time""" 95 | if not dt: 96 | dt = datetime.now(tz=timezone.utc) 97 | start_dt = await get_community_wallet_start() 98 | return dt > start_dt 99 | 100 | 101 | def format_cost(v: Decimal | str, p: int = PRICE_PRECISION) -> Decimal: 102 | return Decimal(v).quantize(Decimal(1) / Decimal(10**p), ROUND_FLOOR) 103 | 104 | 105 | def get_compatible_gpus() -> list[Any]: 106 | if not LAST_AGGREGATE_SETTINGS: 107 | return [] 108 | return LAST_AGGREGATE_SETTINGS["compatible_gpus"] 109 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/views/host_status.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import socket 3 | from collections.abc import Awaitable, Callable 4 | from typing import Any 5 | 6 | import aiohttp 7 | 8 | from aleph.vm.conf import settings 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | def return_false_on_timeout(func: Callable[..., Awaitable[Any]]) -> Callable[..., Awaitable[bool]]: 14 | async def wrapper(*args: Any, **kwargs: Any) -> bool: 15 | try: 16 | return await func(*args, **kwargs) 17 | except TimeoutError: 18 | logger.warning(f"Timeout while checking {func.__name__}") 19 | return False 20 | 21 | return wrapper 22 | 23 | 24 | async def check_ip_connectivity(url: str, socket_family: socket.AddressFamily = socket.AF_UNSPEC) -> bool: 25 | timeout = aiohttp.ClientTimeout(total=5) 26 | async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(family=socket_family), timeout=timeout) as session: 27 | try: 28 | async with session.get(url) as resp: 29 | # We expect the Quad9 endpoints to return a 404 error, but other endpoints may return a 200 30 | if resp.status not in (200, 404): 31 | resp.raise_for_status() 32 | return True 33 | except aiohttp.ClientConnectorError: 34 | return False 35 | 36 | 37 | @return_false_on_timeout 38 | async def check_host_egress_ipv4() -> bool: 39 | """Check if the host has IPv4 connectivity.""" 40 | return await check_ip_connectivity(settings.CONNECTIVITY_IPV4_URL) 41 | 42 | 43 | @return_false_on_timeout 44 | async def check_host_egress_ipv6() -> bool: 45 | """Check if the host has IPv6 connectivity.""" 46 | return await check_ip_connectivity(settings.CONNECTIVITY_IPV6_URL) 47 | 48 | 49 | async def resolve_dns(hostname: str) -> tuple[str | None, str | None]: 50 | """Resolve a hostname to an IPv4 and IPv6 address.""" 51 | ipv4: str | None = None 52 | ipv6: str | None = None 53 | 54 | info = socket.getaddrinfo(hostname, 80, proto=socket.IPPROTO_TCP) 55 | if not info: 56 | logger.error("DNS resolution failed") 57 | 58 | # Iterate over the results to find the IPv4 and IPv6 addresses they may not all be present. 59 | # The function returns a list of 5-tuples with the following structure: 60 | # (family, type, proto, canonname, sockaddr) 61 | for info_tuple in info: 62 | if info_tuple[0] == socket.AF_INET: 63 | ipv4 = info_tuple[4][0] 64 | elif info_tuple[0] == socket.AF_INET6: 65 | ipv6 = info_tuple[4][0] 66 | 67 | if ipv4 and not ipv6: 68 | logger.warning(f"DNS resolution for {hostname} returned only an IPv4 address") 69 | elif ipv6 and not ipv4: 70 | logger.warning(f"DNS resolution for {hostname} returned only an IPv6 address") 71 | 72 | return ipv4, ipv6 73 | 74 | 75 | async def check_dns_ipv4() -> bool: 76 | """Check if DNS resolution is working via IPv4.""" 77 | ipv4, _ = await resolve_dns(settings.CONNECTIVITY_DNS_HOSTNAME) 78 | return bool(ipv4) 79 | 80 | 81 | async def check_dns_ipv6() -> bool: 82 | """Check if DNS resolution is working via IPv6.""" 83 | _, ipv6 = await resolve_dns(settings.CONNECTIVITY_DNS_HOSTNAME) 84 | return bool(ipv6) 85 | 86 | 87 | async def check_domain_resolution_ipv4() -> bool: 88 | """Check if the host's hostname resolves to an IPv4 address.""" 89 | ipv4, _ = await resolve_dns(settings.DOMAIN_NAME) 90 | return bool(ipv4) 91 | 92 | 93 | async def check_domain_resolution_ipv6() -> bool: 94 | """Check if the host's hostname resolves to an IPv6 address.""" 95 | _, ipv6 = await resolve_dns(settings.DOMAIN_NAME) 96 | return bool(ipv6) 97 | 98 | 99 | @return_false_on_timeout 100 | async def check_domain_ipv4() -> bool: 101 | """Check if the host's hostname is accessible via IPv4.""" 102 | return await check_ip_connectivity(settings.DOMAIN_NAME, socket.AF_INET) 103 | 104 | 105 | @return_false_on_timeout 106 | async def check_domain_ipv6() -> bool: 107 | """Check if the host's hostname is accessible via IPv6.""" 108 | return await check_ip_connectivity(settings.DOMAIN_NAME, socket.AF_INET6) 109 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/views/static/aleph-cloud-v2.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/views/static/main.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: IBM Plex Regular, monospace; 3 | white-space: normal; 4 | margin: auto; 5 | max-width: 800px; 6 | } 7 | 8 | details { 9 | margin-top: 30px; 10 | } 11 | 12 | main { 13 | width: 90vw; 14 | margin: 2vh auto; 15 | max-width: 800px; 16 | } 17 | 18 | progress { 19 | width: 100%; 20 | height: 0.5em; 21 | } 22 | 23 | .virtualization-wrapper { 24 | height: 30px; 25 | display: flex; 26 | align-items: end; 27 | } 28 | 29 | #loader-container { 30 | text-align: center; 31 | padding: 20px; 32 | height: 80px; 33 | vertical-align: center; 34 | } 35 | 36 | .loader { 37 | display: inline-block; 38 | width: 5px; 39 | height: 20px; 40 | margin: 0.3px; 41 | background: #207AC9; 42 | } 43 | 44 | @keyframes move { 45 | 0% { 46 | height: 10px; 47 | } 48 | 49 | 50% { 50 | height: 5px; 51 | } 52 | 53 | 100% { 54 | height: 10px; 55 | } 56 | } 57 | 58 | @keyframes move2 { 59 | 0% { 60 | height: 5px; 61 | } 62 | 63 | 50% { 64 | height: 10px; 65 | } 66 | 67 | 100% { 68 | height: 5px; 69 | } 70 | } 71 | 72 | #loader-one { 73 | animation-name: move; 74 | animation-duration: 1s; 75 | animation-iteration-count: infinite; 76 | } 77 | 78 | #loader-two { 79 | animation-name: move2; 80 | animation-duration: 1s; 81 | animation-iteration-count: infinite; 82 | } 83 | 84 | #loader-three { 85 | animation-name: move; 86 | animation-duration: 1s; 87 | animation-iteration-count: infinite; 88 | } 89 | 90 | #chart { 91 | width: 100%; 92 | height: 300px; 93 | } 94 | 95 | .flex { 96 | display: flex; 97 | justify-content: space-between; 98 | } 99 | 100 | #chart-wrapper{ 101 | display: none; 102 | } 103 | 104 | footer{ 105 | font-size: 70%; 106 | opacity: .75; 107 | } 108 | -------------------------------------------------------------------------------- /src/aleph/vm/orchestrator/vm/__init__.py: -------------------------------------------------------------------------------- 1 | from aleph.vm.controllers.firecracker import ( 2 | AlephFirecrackerInstance, 3 | AlephFirecrackerProgram, 4 | ) 5 | 6 | __all__ = ( 7 | "AlephFirecrackerProgram", 8 | "AlephFirecrackerInstance", 9 | ) 10 | -------------------------------------------------------------------------------- /src/aleph/vm/sevclient.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from aleph.vm.utils import run_in_subprocess 4 | 5 | 6 | class SevClient: 7 | sev_dir: Path 8 | sev_ctl_executable: Path 9 | certificates_dir: Path 10 | certificates_archive: Path 11 | 12 | def __init__(self, sev_dir: Path, sev_ctl_executable: Path): 13 | self.sev_dir = sev_dir 14 | self.sev_ctl_executable = sev_ctl_executable 15 | self.certificates_dir = sev_dir / "platform" 16 | self.certificates_dir.mkdir(exist_ok=True, parents=True) 17 | self.certificates_archive = self.certificates_dir / "certs_export.cert" 18 | 19 | async def sev_ctl_cmd(self, *args) -> bytes: 20 | """Run a command of the 'sevctl' tool.""" 21 | return await run_in_subprocess( 22 | [str(self.sev_ctl_executable), *args], 23 | check=True, 24 | ) 25 | 26 | async def get_certificates(self) -> Path: 27 | if not self.certificates_archive.is_file(): 28 | _ = await self.sev_ctl_cmd("export", str(self.certificates_archive)) 29 | 30 | return self.certificates_archive 31 | -------------------------------------------------------------------------------- /src/aleph/vm/systemd.py: -------------------------------------------------------------------------------- 1 | """ 2 | async SystemD Manager implementation. 3 | """ 4 | 5 | import logging 6 | 7 | import dbus 8 | from dbus import DBusException, SystemBus 9 | from dbus.proxies import Interface 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | class SystemDManager: 15 | """SystemD Manager class. 16 | 17 | Used to manage the systemd services on the host on Linux. 18 | """ 19 | 20 | bus: SystemBus 21 | manager: Interface 22 | 23 | def __init__(self): 24 | self.bus = dbus.SystemBus() 25 | systemd = self.bus.get_object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") 26 | self.manager = dbus.Interface(systemd, "org.freedesktop.systemd1.Manager") 27 | 28 | def stop_and_disable(self, service: str) -> None: 29 | if self.is_service_active(service): 30 | self.stop(service) 31 | if self.is_service_enabled(service): 32 | self.disable(service) 33 | 34 | def enable(self, service: str) -> None: 35 | self.manager.EnableUnitFiles([service], False, True) 36 | logger.debug(f"Enabled {service} service") 37 | 38 | def start(self, service: str) -> None: 39 | self.manager.StartUnit(service, "replace") 40 | logger.debug(f"Started {service} service") 41 | 42 | def stop(self, service: str) -> None: 43 | self.manager.StopUnit(service, "replace") 44 | logger.debug(f"Stopped {service} service") 45 | 46 | def restart(self, service: str) -> None: 47 | self.manager.RestartUnit(service, "replace") 48 | logger.debug(f"Restarted {service} service") 49 | 50 | def disable(self, service: str) -> None: 51 | self.manager.DisableUnitFiles([service], False) 52 | logger.debug(f"Disabled {service} service") 53 | 54 | def is_service_enabled(self, service: str) -> bool: 55 | try: 56 | return self.manager.GetUnitFileState(service) == "enabled" 57 | except DBusException as error: 58 | logger.error(error) 59 | return False 60 | 61 | def is_service_active(self, service: str) -> bool: 62 | try: 63 | if not self.is_service_enabled(service): 64 | return False 65 | unit_path = self.manager.GetUnit(service) 66 | systemd_service = self.bus.get_object("org.freedesktop.systemd1", object_path=unit_path) 67 | unit = dbus.Interface(systemd_service, "org.freedesktop.systemd1.Unit") 68 | unit_properties = dbus.Interface(unit, "org.freedesktop.DBus.Properties") 69 | active_state = unit_properties.Get("org.freedesktop.systemd1.Unit", "ActiveState") 70 | return active_state == "active" 71 | except DBusException as error: 72 | logger.error(error) 73 | return False 74 | 75 | async def enable_and_start(self, service: str) -> None: 76 | if not self.is_service_enabled(service): 77 | self.enable(service) 78 | if not self.is_service_active(service): 79 | self.start(service) 80 | -------------------------------------------------------------------------------- /src/aleph/vm/utils/aggregate.py: -------------------------------------------------------------------------------- 1 | from logging import getLogger 2 | 3 | import aiohttp 4 | 5 | from aleph.vm.conf import settings 6 | 7 | logger = getLogger(__name__) 8 | 9 | 10 | async def get_user_aggregate(addr: str, keys_arg: list[str]) -> dict: 11 | """ 12 | Get the settings Aggregate dict from the PyAleph API Aggregate. 13 | 14 | API Endpoint: 15 | GET /api/v0/aggregates/{address}.json?keys=settings 16 | 17 | For more details, see the PyAleph API documentation: 18 | https://github.com/aleph-im/pyaleph/blob/master/src/aleph/web/controllers/routes.py#L62 19 | """ 20 | 21 | async with aiohttp.ClientSession() as session: 22 | url = f"{settings.API_SERVER}/api/v0/aggregates/{addr}.json" 23 | logger.info(f"Fetching aggregate from {url}") 24 | resp = await session.get(url, params={"keys": ",".join(keys_arg)}) 25 | # No aggregate for the user 26 | if resp.status == 404: 27 | return {} 28 | # Raise an error if the request failed 29 | 30 | resp.raise_for_status() 31 | 32 | resp_data = await resp.json() 33 | return resp_data["data"] or {} 34 | 35 | 36 | async def get_user_settings(addr: str, key) -> dict: 37 | aggregate = await get_user_aggregate(addr, [key]) 38 | return aggregate.get(key, {}) 39 | -------------------------------------------------------------------------------- /src/aleph/vm/utils/logs.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from collections.abc import Callable, Generator 4 | from datetime import datetime, timedelta 5 | from typing import TypedDict 6 | 7 | from systemd import journal 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | class EntryDict(TypedDict): 13 | SYSLOG_IDENTIFIER: str 14 | MESSAGE: str 15 | __REALTIME_TIMESTAMP: datetime 16 | 17 | 18 | def make_logs_queue(stdout_identifier, stderr_identifier, skip_past=False) -> tuple[asyncio.Queue, Callable[[], None]]: 19 | """Create a queue which streams the logs for the process. 20 | 21 | @param stdout_identifier: journald identifier for process stdout 22 | @param stderr_identifier: journald identifier for process stderr 23 | @param skip_past: Skip past history. 24 | @return: queue and function to cancel the queue. 25 | 26 | The consumer is required to call the queue cancel function when it's done consuming the queue. 27 | 28 | Works by creating a journald reader, and using `add_reader` to call a callback when 29 | data is available for reading. 30 | In the callback we check the message type and fill the queue accordingly 31 | 32 | For more information refer to the sd-journal(3) manpage 33 | and systemd.journal module documentation. 34 | """ 35 | journal_reader = journal.Reader() 36 | journal_reader.add_match(SYSLOG_IDENTIFIER=stdout_identifier) 37 | journal_reader.add_match(SYSLOG_IDENTIFIER=stderr_identifier) 38 | queue: asyncio.Queue = asyncio.Queue(maxsize=5) 39 | tasks: list[asyncio.Task] = [] 40 | 41 | loop = asyncio.get_event_loop() 42 | 43 | async def process_messages() -> None: 44 | """Enqueue all the available log entries, wait if queue is full, then wait for new message via add_reader""" 45 | # Remove reader so we don't get called again while processing 46 | loop.remove_reader(journal_reader.fileno()) 47 | entry: EntryDict 48 | for entry in journal_reader: 49 | log_type = "stdout" if entry["SYSLOG_IDENTIFIER"] == stdout_identifier else "stderr" 50 | msg = entry["MESSAGE"] 51 | # will wait if queue is full 52 | await queue.put((log_type, msg)) 53 | journal_reader.process() # reset fd status 54 | journal_reader.process() # reset fd status 55 | # Call _ready_for_read read when entries are readable again, this is non-blocking 56 | loop.add_reader(journal_reader.fileno(), _ready_for_read) 57 | 58 | def _ready_for_read() -> None: 59 | # wrapper around process_messages as add_reader don't take an async func 60 | task = loop.create_task(process_messages(), name=f"process_messages-queue-{id(queue)}") 61 | tasks.append(task) 62 | task.add_done_callback(tasks.remove) 63 | 64 | if skip_past: 65 | # seek_tail doesn't work see https://github.com/systemd/systemd/issues/17662 66 | journal_reader.seek_realtime(datetime.now() - timedelta(seconds=10)) 67 | 68 | _ready_for_read() 69 | 70 | def do_cancel(): 71 | logger.info(f"cancelling queue and reader {journal_reader}") 72 | loop.remove_reader(journal_reader.fileno()) 73 | for task in tasks: 74 | task.cancel() 75 | journal_reader.close() 76 | 77 | return queue, do_cancel 78 | 79 | 80 | def get_past_vm_logs(stdout_identifier, stderr_identifier) -> Generator[EntryDict, None, None]: 81 | """Get existing log for the VM identifiers. 82 | 83 | @param stdout_identifier: journald identifier for process stdout 84 | @param stderr_identifier: journald identifier for process stderr 85 | @return: an iterator of log entry 86 | 87 | Works by creating a journald reader, and using `add_reader` to call a callback when 88 | data is available for reading. 89 | 90 | For more information refer to the sd-journal(3) manpage 91 | and systemd.journal module documentation. 92 | """ 93 | r = journal.Reader() 94 | r.add_match(SYSLOG_IDENTIFIER=stdout_identifier) 95 | r.add_match(SYSLOG_IDENTIFIER=stderr_identifier) 96 | 97 | r.seek_head() 98 | yield from r 99 | -------------------------------------------------------------------------------- /src/aleph/vm/utils/test_helpers.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import json 3 | 4 | import eth_account.messages 5 | import pytest 6 | from eth_account.datastructures import SignedMessage 7 | from eth_account.signers.local import LocalAccount 8 | from jwcrypto import jwk 9 | from jwcrypto.jwa import JWA 10 | 11 | 12 | @pytest.fixture 13 | def patch_datetime_now(monkeypatch): 14 | """Fixture for patching the datetime.now() and datetime.utcnow() methods 15 | to return a fixed datetime object. 16 | This fixture creates a subclass of `datetime.datetime` called `mydatetime`, 17 | which overrides the `now()` and `utcnow()` class methods to return a fixed 18 | datetime object specified by `FAKE_TIME`. 19 | """ 20 | 21 | class MockDateTime(datetime.datetime): 22 | FAKE_TIME = datetime.datetime(2010, 12, 25, 17, 5, 55) 23 | 24 | @classmethod 25 | def now(cls, tz=None, *args, **kwargs): 26 | return cls.FAKE_TIME.replace(tzinfo=tz) 27 | 28 | @classmethod 29 | def utcnow(cls, *args, **kwargs): 30 | return cls.FAKE_TIME 31 | 32 | monkeypatch.setattr(datetime, "datetime", MockDateTime) 33 | return MockDateTime 34 | 35 | 36 | async def generate_signer_and_signed_headers_for_operation( 37 | patch_datetime_now, operation_payload: dict 38 | ) -> tuple[LocalAccount, dict]: 39 | """Generate a temporary eth_account for testing and sign the operation with it""" 40 | account = eth_account.Account() 41 | signer_account = account.create() 42 | key = jwk.JWK.generate( 43 | kty="EC", 44 | crv="P-256", 45 | # key_ops=["verify"], 46 | ) 47 | pubkey = { 48 | "pubkey": json.loads(key.export_public()), 49 | "alg": "ECDSA", 50 | "domain": "localhost", 51 | "address": signer_account.address, 52 | "expires": (patch_datetime_now.FAKE_TIME + datetime.timedelta(days=1)).isoformat() + "Z", 53 | } 54 | pubkey_payload = json.dumps(pubkey).encode("utf-8").hex() 55 | signable_message = eth_account.messages.encode_defunct(hexstr=pubkey_payload) 56 | signed_message: SignedMessage = signer_account.sign_message(signable_message) 57 | pubkey_signature = to_0x_hex(signed_message.signature) 58 | pubkey_signature_header = json.dumps( 59 | { 60 | "payload": pubkey_payload, 61 | "signature": pubkey_signature, 62 | } 63 | ) 64 | payload_as_bytes = json.dumps(operation_payload).encode("utf-8") 65 | 66 | payload_signature = JWA.signing_alg("ES256").sign(key, payload_as_bytes) 67 | headers = { 68 | "X-SignedPubKey": pubkey_signature_header, 69 | "X-SignedOperation": json.dumps( 70 | { 71 | "payload": payload_as_bytes.hex(), 72 | "signature": payload_signature.hex(), 73 | } 74 | ), 75 | } 76 | return signer_account, headers 77 | 78 | 79 | def to_0x_hex(b: bytes) -> str: 80 | """ 81 | Convert the bytes to a 0x-prefixed hex string 82 | """ 83 | 84 | # force this for compat between different hexbytes versions which behave differenty 85 | # and conflict with other package don't allow us to have the version we want 86 | return "0x" + bytes.hex(b) 87 | -------------------------------------------------------------------------------- /src/aleph/vm/version.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from subprocess import STDOUT, CalledProcessError, check_output 3 | 4 | logger = logging.getLogger(__name__) 5 | 6 | 7 | def get_version_from_git() -> str | None: 8 | try: 9 | return check_output(("git", "describe", "--tags"), stderr=STDOUT).strip().decode() 10 | except FileNotFoundError: 11 | logger.warning("version: git not found") 12 | return None 13 | except CalledProcessError as err: 14 | logger.info("version: git description not available: %s", err.output.decode().strip()) 15 | return None 16 | 17 | 18 | def get_version_from_apt() -> str | None: 19 | try: 20 | import apt 21 | 22 | return apt.Cache().get("aleph-vm").installed.version 23 | except (ImportError, AttributeError): 24 | logger.warning("apt version not available") 25 | return None 26 | 27 | 28 | def get_version() -> str | None: 29 | return get_version_from_git() or get_version_from_apt() 30 | 31 | 32 | # The version number is hardcoded in the following line when packaging the software 33 | __version__ = get_version() or "version-unavailable" 34 | -------------------------------------------------------------------------------- /src/aleph/vm/vm_type.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | from aleph_message.models import ExecutableContent, InstanceContent, ProgramContent 4 | 5 | 6 | class VmType(Enum): 7 | microvm = 1 8 | persistent_program = 2 9 | instance = 3 10 | 11 | @staticmethod 12 | def from_message_content(content: ExecutableContent) -> "VmType": 13 | if isinstance(content, InstanceContent): 14 | return VmType.instance 15 | 16 | elif isinstance(content, ProgramContent): 17 | if content.on.persistent: 18 | return VmType.persistent_program 19 | return VmType.microvm 20 | 21 | msg = f"Unexpected message content type: {type(content)}" 22 | raise TypeError(msg) 23 | -------------------------------------------------------------------------------- /tests/supervisor/test_interfaces.py: -------------------------------------------------------------------------------- 1 | from ipaddress import IPv4Interface 2 | from subprocess import run 3 | 4 | import pytest 5 | from pyroute2 import IPRoute 6 | 7 | from aleph.vm.network.interfaces import ( 8 | MissingInterfaceError, 9 | add_ip_address, 10 | create_tap_interface, 11 | delete_tap_interface, 12 | set_link_up, 13 | ) 14 | 15 | 16 | def test_create_tap_interface(): 17 | """Test the creation of a TAP interface and related error handling.""" 18 | test_device_name = "test_tap" 19 | try: 20 | with IPRoute() as ipr: 21 | create_tap_interface(ipr, test_device_name) 22 | # Check that the interface was created 23 | assert run(["ip", "link", "show", test_device_name], check=False).returncode == 0 24 | # Create the interface a second time, which should be ignored 25 | create_tap_interface(ipr, test_device_name) 26 | finally: 27 | run(["ip", "tuntap", "del", test_device_name, "mode", "tap"], check=False) 28 | 29 | 30 | def test_add_ip_address(): 31 | """Test the addition of an IP address to an interface.""" 32 | test_device_name = "test_tap" 33 | test_ipv4 = IPv4Interface(("10.10.10.10", 24)) 34 | try: 35 | with IPRoute() as ipr: 36 | # We need an interface to add an address to 37 | create_tap_interface(ipr, test_device_name) 38 | # Add an IP address to the interface 39 | add_ip_address(ipr, test_device_name, test_ipv4) 40 | # Check that the address was added 41 | assert run(["ip", "address", "show", test_device_name], check=False).returncode == 0 42 | # Add the same address again, which should be ignored 43 | add_ip_address(ipr, test_device_name, test_ipv4) 44 | finally: 45 | # Delete the interface, ignoring any errors 46 | run(["ip", "tuntap", "del", test_device_name, "mode", "tap"], check=False) 47 | 48 | # Without an interface, the function should raise an error 49 | with pytest.raises(MissingInterfaceError): 50 | add_ip_address(IPRoute(), test_device_name, test_ipv4) 51 | 52 | 53 | def test_link_up_down(): 54 | """Test the addition of an IP address to an interface.""" 55 | test_device_name = "test_tap" 56 | try: 57 | with IPRoute() as ipr: 58 | # We need an interface to set the link up 59 | create_tap_interface(ipr, test_device_name) 60 | 61 | set_link_up(ipr, test_device_name) 62 | # Check that the interface is up 63 | assert run(["ip", "link", "show", test_device_name], check=False).returncode == 0 64 | # Delete the interface 65 | delete_tap_interface(ipr, test_device_name) 66 | # Check that the interface is down 67 | assert run(["ip", "link", "show", test_device_name], check=False).returncode != 0 68 | finally: 69 | # Delete the interface, ignoring any errors 70 | run(["ip", "tuntap", "del", test_device_name, "mode", "tap"], check=False) 71 | -------------------------------------------------------------------------------- /tests/supervisor/test_ipv6_allocator.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from aleph.vm.network.hostnetwork import StaticIPv6Allocator 4 | from aleph.vm.vm_type import VmType 5 | 6 | # Avoid failures linked to settings when initializing the global VmPool object 7 | os.environ["ALEPH_VM_ALLOW_VM_NETWORKING"] = "False" 8 | 9 | from ipaddress import IPv6Network 10 | 11 | from aleph_message.models import ItemHash 12 | 13 | 14 | def test_static_ipv6_allocator(): 15 | allocator = StaticIPv6Allocator(ipv6_range=IPv6Network("1111:2222:3333:4444::/64"), subnet_prefix=124) 16 | ip_subnet = allocator.allocate_vm_ipv6_subnet( 17 | vm_id=3, 18 | vm_hash=ItemHash("8920215b2e961a4d4c59a8ceb2803af53f91530ff53d6704273ab4d380bc6446"), 19 | vm_type=VmType.microvm, 20 | ) 21 | assert ip_subnet == IPv6Network("1111:2222:3333:4444:0001:8920:215b:2e90/124") 22 | -------------------------------------------------------------------------------- /tests/supervisor/test_log.py: -------------------------------------------------------------------------------- 1 | from asyncio import QueueEmpty 2 | 3 | from aleph.vm.utils.logs import make_logs_queue 4 | 5 | 6 | def test_make_logs_queue(): 7 | stdout_identifier = "test_stdout" 8 | stderr_identifier = "test_stderr" 9 | queue, do_cancel = make_logs_queue(stdout_identifier, stderr_identifier) 10 | import pytest 11 | 12 | with pytest.raises(QueueEmpty): 13 | while queue.get_nowait(): 14 | queue.task_done() 15 | do_cancel() 16 | -------------------------------------------------------------------------------- /tests/supervisor/test_resolvectl_dns_servers.py: -------------------------------------------------------------------------------- 1 | # Avoid failures linked to nftables when initializing the global VmPool object 2 | import os 3 | from unittest import mock 4 | 5 | from aleph.vm.conf import resolvectl_dns_servers 6 | 7 | os.environ["ALEPH_VM_ALLOW_VM_NETWORKING"] = "False" 8 | 9 | 10 | def test_resolvectl(): 11 | with mock.patch( 12 | "aleph.vm.conf.check_output", 13 | return_value="Link 2 (eth0): 109.88.203.3 62.197.111.140\n", 14 | ): 15 | servers = {"109.88.203.3", "62.197.111.140"} 16 | 17 | dns_servers = set(resolvectl_dns_servers("eth0")) 18 | assert dns_servers == servers 19 | 20 | 21 | def test_resolvectl_ipv6(): 22 | with mock.patch( 23 | "aleph.vm.conf.check_output", 24 | return_value="Link 2 (eth0): 109.88.203.3 62.197.111.140 2a02:2788:fff0:7::3\n 2a02:2788:fff0:5::140\n", 25 | ): 26 | ipv4_servers = {"109.88.203.3", "62.197.111.140"} 27 | ipv6_servers = {"2a02:2788:fff0:7::3", "2a02:2788:fff0:5::140"} 28 | 29 | dns_servers = set(resolvectl_dns_servers("eth0")) 30 | assert dns_servers == ipv4_servers | ipv6_servers 31 | -------------------------------------------------------------------------------- /tests/supervisor/test_resources.py: -------------------------------------------------------------------------------- 1 | from unittest import mock 2 | 3 | from aleph.vm.resources import get_gpu_devices 4 | 5 | 6 | def mock_is_kernel_enabled_gpu(pci_host: str) -> bool: 7 | value = True if pci_host == "01:00.0" else False 8 | return value 9 | 10 | 11 | def test_get_gpu_devices(): 12 | class DevicesReturn: 13 | stdout: str = ( 14 | '00:1f.0 "ISA bridge [0601]" "Intel Corporation [8086]" "Device [7a06]" -r11 -p00 "ASUSTeK Computer Inc. [1043]" "Device [8882]"' 15 | '\n00:1f.4 "SMBus [0c05]" "Intel Corporation [8086]" "Raptor Lake-S PCH SMBus Controller [7a23]" -r11 -p00 "ASUSTeK Computer Inc. [1043]" "Device [8882]"' 16 | '\n00:1f.5 "Serial bus controller [0c80]" "Intel Corporation [8086]" "Raptor Lake SPI (flash) Controller [7a24]" -r11 -p00 "ASUSTeK Computer Inc. [1043]" "Device [8882]"' 17 | '\n01:00.0 "VGA compatible controller [0300]" "NVIDIA Corporation [10de]" "AD104GL [RTX 4000 SFF Ada Generation] [27b0]" -ra1 -p00 "NVIDIA Corporation [10de]" "AD104GL [RTX 4000 SFF Ada Generation] [16fa]"' 18 | '\n01:00.1 "Audio device [0403]" "NVIDIA Corporation [10de]" "Device [22bc]" -ra1 -p00 "NVIDIA Corporation [10de]" "Device [16fa]"' 19 | '\n02:00.0 "Non-Volatile memory controller [0108]" "Samsung Electronics Co Ltd [144d]" "NVMe SSD Controller PM9A1/PM9A3/980PRO [a80a]" -p02 "Samsung Electronics Co Ltd [144d]" "NVMe SSD Controller PM9A1/PM9A3/980PRO [aa0a]"' 20 | ) 21 | 22 | with mock.patch( 23 | "subprocess.run", 24 | return_value=DevicesReturn(), 25 | ): 26 | with mock.patch( 27 | "aleph.vm.resources.is_kernel_enabled_gpu", 28 | wraps=mock_is_kernel_enabled_gpu, 29 | ): 30 | expected_gpu_devices = get_gpu_devices() 31 | 32 | assert expected_gpu_devices[0].vendor == "NVIDIA" 33 | assert expected_gpu_devices[0].device_name == "AD104GL [RTX 4000 SFF Ada Generation]" 34 | assert expected_gpu_devices[0].device_class == "0300" 35 | assert expected_gpu_devices[0].pci_host == "01:00.0" 36 | assert expected_gpu_devices[0].device_id == "10de:27b0" 37 | -------------------------------------------------------------------------------- /tests/supervisor/test_status.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import AsyncMock, MagicMock, Mock 2 | 3 | import pytest 4 | from aleph_message.models import ItemHash 5 | 6 | from aleph.vm.orchestrator.status import check_internet 7 | 8 | 9 | @pytest.mark.asyncio 10 | async def test_check_internet_wrong_result_code(): 11 | vm_id = ItemHash("cafecafecafecafecafecafecafecafecafecafecafecafecafecafecafecafe") 12 | 13 | mock_session = Mock() 14 | mock_session.get = MagicMock() 15 | 16 | mock_session.get.return_value.__aenter__.return_value.json = AsyncMock( 17 | return_value={"result": 200, "headers": {"Server": "nginx"}} 18 | ) 19 | assert await check_internet(mock_session, vm_id) is True 20 | 21 | mock_session.get.return_value.__aenter__.return_value.json = AsyncMock( 22 | return_value={"result": 400, "headers": {"Server": "nginx"}} 23 | ) 24 | assert await check_internet(mock_session, vm_id) is False 25 | -------------------------------------------------------------------------------- /tests/supervisor/test_utils.py: -------------------------------------------------------------------------------- 1 | from unittest import mock 2 | 3 | from aleph.vm.utils import ( 4 | check_amd_sev_es_supported, 5 | check_amd_sev_snp_supported, 6 | check_amd_sev_supported, 7 | check_system_module, 8 | ) 9 | 10 | 11 | def test_check_system_module_enabled(): 12 | with mock.patch( 13 | "pathlib.Path.exists", 14 | return_value=True, 15 | ): 16 | expected_value = "Y" 17 | with mock.patch( 18 | "aleph.vm.utils.Path.open", 19 | mock.mock_open(read_data=expected_value), 20 | ): 21 | output = check_system_module("kvm_amd/parameters/sev_enp") 22 | assert output == expected_value 23 | 24 | assert check_amd_sev_supported() is True 25 | assert check_amd_sev_es_supported() is True 26 | assert check_amd_sev_snp_supported() is True 27 | 28 | with mock.patch( 29 | "aleph.vm.utils.Path.open", 30 | mock.mock_open(read_data="N"), 31 | ): 32 | output = check_system_module("kvm_amd/parameters/sev_enp") 33 | assert output == "N" 34 | 35 | assert check_amd_sev_supported() is False 36 | assert check_amd_sev_es_supported() is False 37 | assert check_amd_sev_snp_supported() is False 38 | -------------------------------------------------------------------------------- /tests/supervisor/views/test_run_code.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from aiohttp import ClientResponseError, web 3 | from aiohttp.test_utils import make_mocked_request 4 | from aiohttp.web_exceptions import HTTPBadRequest 5 | from aleph_message.exceptions import UnknownHashError 6 | from aleph_message.models import ItemHash 7 | 8 | from aleph.vm.conf import settings 9 | from aleph.vm.orchestrator.views import run_code_from_path 10 | 11 | 12 | @pytest.mark.asyncio 13 | async def test_run_code_from_invalid_path(aiohttp_client): 14 | """ 15 | Test that the run_code_from_path endpoint raises the right 16 | error on invalid paths. 17 | """ 18 | item_hash = "invalid-item-hash" 19 | with pytest.raises(UnknownHashError): 20 | assert ItemHash(item_hash).is_storage(item_hash) 21 | 22 | app = web.Application() 23 | 24 | app.router.add_route("*", "/vm/{ref}{suffix:.*}", run_code_from_path) 25 | client = await aiohttp_client(app) 26 | 27 | invalid_hash_request: web.Request = make_mocked_request( 28 | "GET", 29 | "/vm/" + item_hash, 30 | match_info={ 31 | "ref": item_hash, 32 | "suffix": "/some/suffix", 33 | }, 34 | headers={"Host": settings.DOMAIN_NAME}, 35 | app=app, 36 | ) 37 | with pytest.raises(HTTPBadRequest): 38 | await run_code_from_path(invalid_hash_request) 39 | 40 | # Calling the view from an HTTP client should result in a Bad Request error. 41 | resp = await client.get("/vm/" + item_hash + "/some/suffix") 42 | assert resp.status == HTTPBadRequest.status_code 43 | text = await resp.text() 44 | assert text == f"Invalid message reference: {item_hash}" 45 | with pytest.raises(ClientResponseError): 46 | resp.raise_for_status() 47 | -------------------------------------------------------------------------------- /tests/supervisor/views/test_view_errors.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from aiohttp.test_utils import TestClient 3 | 4 | from aleph.vm.orchestrator.supervisor import setup_webapp 5 | 6 | 7 | @pytest.mark.asyncio 8 | async def test_json_404_about(aiohttp_client, mocker): 9 | app = setup_webapp(pool=None) 10 | client: TestClient = await aiohttp_client(app) 11 | response = await client.get( 12 | "/about/non_existing_path", 13 | ) 14 | assert response.status == 404 15 | assert response.content_type == "application/json" 16 | assert await response.json() == {"error": "404: Not Found"} 17 | 18 | 19 | @pytest.mark.asyncio 20 | async def test_json_err_allocation_notify(aiohttp_client, mocker): 21 | app = setup_webapp(pool=None) 22 | client: TestClient = await aiohttp_client(app) 23 | response = await client.post("/control/allocation/notify", data="invalid_json") 24 | assert response.status == 400 25 | assert response.content_type == "application/json" 26 | assert await response.json() == {"error": "Body is not valid JSON"} 27 | -------------------------------------------------------------------------------- /tutorials/REQUIREMENTS.md: -------------------------------------------------------------------------------- 1 | # Tutorial: Adding Python libraries to an Aleph VM 2 | 3 | ## 0.a Setup your environment (Debian/Ubuntu Linux) 4 | ```shell 5 | sudo apt install python3-pip python3-venv squashfs-tools 6 | ``` 7 | 8 | ```shell 9 | pip3 install aleph-client 10 | ``` 11 | 12 | ## 0.b Quick install (macOS using Vagrant) 13 | 14 | For starting to run aleph-vm on mac you have to initialize a VM. 15 | 16 | ### Install VirtualBox 17 | You will need VirtualBox, a free and open-source hosted hypervisor (or virtual machine manager) for the next step. 18 | 19 | You can download and install it here . 20 | 21 | ### Install Vagrant 22 | Vagrant is an open-source software product for building and maintaining portable virtual software development environments based on VirtualBox. 23 | 24 | Run following command for installing it (before make sure [homebrew](brew.sh) is installed on your mac). 25 | 26 | ```shell 27 | brew install vagrant 28 | ``` 29 | 30 | Once Vagrant is installed, go to your working repository and initialize vagrant 31 | 32 | ```shell 33 | vagrant init boxomatic/debian-11 34 | ``` 35 | 36 | A `Vagrantfile` (in Ruby) will be created, you can consult it if you wish. 37 | 38 | Now in order to instantiate a new virtual machine, run the following command: 39 | 40 | ```shell 41 | vagrant up 42 | ``` 43 | 44 | If this does not work, check out you System Preferences > Security and Privacy and allow the "System software from developer" in the bottom of the window. 45 | 46 | Once the command is down, your virtual machine will be booted and ready! 47 | 48 | ### Set Vagrantfile configuration 49 | 50 | Open the vagrantfile and add following `config.vm.box` 51 | 52 | ```shell 53 | config.vm.network "forwarded_port", guest:8000, host:8000 54 | ``` 55 | 56 | ### 1. Install the packages in a directory 57 | 58 | ```shell 59 | pip install -t /opt/packages -r requirements.txt 60 | ``` 61 | 62 | ```shell 63 | mksquashfs /opt/packages packages.squashfs 64 | ``` 65 | 66 | 67 | ## 2. Upload the packages 68 | 69 | ### 2.a. Without IPFS (small size) 70 | 71 | ```shell 72 | aleph upload packages.squashfs 73 | ``` 74 | 75 | ### 2.b. With IPFS 76 | ```shell 77 | /opt/go-ipfs/ipfs daemon 78 | ``` 79 | 80 | ```shell 81 | ipfs add packages.squashfs 82 | ``` 83 | | added QmWWX6BaaRkRSr2iNdwH5e29ACPg2nCHHXTRTfuBmVm3Ga venv.squashfs 84 | 85 | ```shell 86 | aleph pin QmWWX6BaaRkRSr2iNdwH5e29ACPg2nCHHXTRTfuBmVm3Ga 87 | ``` 88 | 89 | ## 3. Create your program 90 | 91 | ```shell 92 | aleph program upload ./my-program main:app 93 | ``` 94 | 95 | Press Enter at the following prompt to use the default runtime: 96 | ``` 97 | Ref of runtime ? [bd79839bf96e595a06da5ac0b6ba51dea6f7e2591bb913deccded04d831d29f4] 98 | ``` 99 | 100 | Press `Y` to add extra volumes to your program: 101 | ``` 102 | Add volume ? [y/N] Y 103 | Description: Python Packages 104 | Mount: /opt/packages 105 | Ref: 61f43ab261060ff94838dc94313a70cdb939a5fc6c99924b96d55dcc2c108d03 106 | Use latest version ? [Y/n] 107 | ``` 108 | 109 | Finally, press Enter to skip adding more volumes. 110 | ```shell 111 | Add volume ? [y/N] 112 | ``` 113 | -------------------------------------------------------------------------------- /tutorials/SERVER.md: -------------------------------------------------------------------------------- 1 | # Tutorial: Creating a non-Python program on Aleph-VM 2 | 3 | > This tutorial follows up the first tutorial [Creating and hosting a program on Aleph-VM](./README.md). 4 | 5 | ## 0. Welcome 6 | 7 | In this second tutorial, we will guide you on how to run programs written in any programming language on Aleph Virtual Machines. 8 | 9 | In addition to running Python programs using ASGI as covered in the first tutorial, 10 | Aleph VMs also support any program that listens for HTTP requests on port 8080. 11 | 12 | This can be used to run existing programs on Aleph VMs, or to use other programming languages to write programs and run them on Aleph-VM. 13 | 14 | ### What we will cover 15 | 16 | Since Python is the only language currently supported, this tutorial we will cover two other languages: [Rust](https://www.rust-lang.org/) and Javascript ([NodeJS](https://nodejs.org/)). 17 | 18 | ## 1. Rust 19 | 20 | In this first section, you will run a program written in Rust on an Aleph VM. 21 | 22 | ### 1.a. Requirements 23 | 24 | You need a Rust compiler. You can install one using the [official Install Rust guide](https://www.rust-lang.org/tools/install) 25 | or via your favourite package manager. 26 | 27 | $ sudo apt install rustc cargo 28 | 29 | ### 1.b. Writing a Rust program 30 | 31 | Let's use a very simple HTTP server inspired by the [Building a Single-Threaded Web Server](https://doc.rust-lang.org/book/ch20-01-single-threaded.html) 32 | section of The Rust Programming Language Book: 33 | 34 | ```shell 35 | $ cargo new example_http_rust 36 | Created binary (application) `example_http_rust` project 37 | $ cd example_http_rust 38 | ``` 39 | 40 | Filename: `src/main.rs` 41 | ```rust 42 | use std::io::prelude::*; 43 | use std::net::TcpListener; 44 | use std::net::TcpStream; 45 | 46 | fn main() { 47 | 48 | let listener = TcpListener::bind("0.0.0.0:8080").unwrap(); 49 | println!("Running on 0.0.0.0:8080"); 50 | for stream in listener.incoming() { 51 | let stream = stream.unwrap(); 52 | handle_connection(stream); 53 | } 54 | } 55 | 56 | fn handle_connection(mut stream: TcpStream) { 57 | println!("handling connection"); 58 | 59 | const MSG: &str = "helloworld"; 60 | let msg = MSG.as_bytes(); 61 | 62 | let response = format!("{:x?}", msg); 63 | 64 | let mut buffer = [0; 1024]; 65 | 66 | stream.read(&mut buffer).unwrap(); 67 | 68 | let response = format!("HTTP/1.1 200 OK\n\nOKIDOK\n{}", response); 69 | 70 | stream.write(response.as_bytes()).unwrap(); 71 | stream.flush().unwrap(); 72 | } 73 | ``` 74 | 75 | ```shell 76 | cargo run 77 | ``` 78 | 79 | Open http://127.0.0.1:8080 in your browser to test your new server. 80 | 81 | ### 1.c. Publishing a Rust program 82 | 83 | Compile your program: 84 | ```shell 85 | cargo build --release 86 | ``` 87 | 88 | Publish it on Aleph using the same procedure as with the Python example, except the entrypoint refers to the name of the binary to execute. 89 | 90 | ```shell 91 | aleph program upload ./target/release/example_http_rust example_http_rust 92 | ``` 93 | 94 | If your program takes some arguments, pass them in the entrypoint by using quotes: `"example_http_rust --help`. 95 | 96 | ℹ️ If you get the error `Invalid zip archive`, you are probably missing the Squashfs user tool `mksquashfs`. In that case, first create the squashfs archive and then upload it using `aleph program upload ./target/release/example_http_rust.squashfs example_http_rust` 97 | -------------------------------------------------------------------------------- /tutorials/TESTING.md: -------------------------------------------------------------------------------- 1 | # Testing your VMs locally 2 | 3 | You can test your VM locally without uploading each version on the Aleph network. 4 | 5 | To do this, you'll want to use the `--fake-data-program` or `-f` argument of the VM Supervisor. 6 | 7 | ## 0. Build the required squashfs volumes 8 | 9 | Build or download the required squashfs volumes: 10 | 11 | ```shell 12 | cd ./runtimes/aleph-debian-11-python/ 13 | sudo bash ./create_disk_image.sh 14 | 15 | cd ../.. 16 | ``` 17 | > ℹ️ This does not work in a container since debootstrap requires mounting volumes. 18 | 19 | This will create a local runtime root filesystem in `./runtimes/aleph-debian-11-python/rootfs.squashfs`. 20 | 21 | ```shell 22 | cd ./examples/volumes/ 23 | bash ./build_squashfs.sh 24 | 25 | cd ../.. 26 | ``` 27 | This will create a local example read-only volume named `./example/volumes/volume-venv.squashfs`. 28 | 29 | ## 1. In a Docker container 30 | 31 | Run the developer image, mounting the two generated volumes: 32 | ```shell 33 | docker run -ti --rm \ 34 | -v "$(pwd)/runtimes/aleph-debian-11-python/rootfs.squashfs:/opt/aleph-vm/runtimes/aleph-debian-11-python/rootfs.squashfs:ro" \ 35 | -v "$(pwd)/examples/volumes/volume-venv.squashfs:/opt/aleph-vm/examples/volumes/volume-venv.squashfs:ro" \ 36 | --device /dev/kvm \ 37 | -p 4020:4020 \ 38 | docker.io/alephim/vm-supervisor-dev 39 | ``` 40 | 41 | Or launch this command using: 42 | ```shell 43 | bash ./docker/run_vm_supervisor.sh 44 | ``` 45 | 46 | 47 | Within the container, run the supervisor with fake data: 48 | ```shell 49 | python3 -m orchestrator --print-settings --very-verbose --system-logs --fake-data-program ./examples/example_fastapi 50 | ``` 51 | 52 | > ℹ️ The command is in your .bash_history, press key up to skip typing it. 53 | 54 | ## 2. On your system 55 | 56 | ### 2.a. Install the system requirements 57 | 58 | See [../vm_supervisor/README.md](../src/aleph/vm/orchestrator/README.md) to install the system requirements. 59 | 60 | ### 2.b. Run the supervisor with fake data: 61 | 62 | ```shell 63 | python3 -m orchestrator --print-settings --very-verbose --system-logs --fake-data-program ./examples/example_fastapi 64 | ``` 65 | 66 | -------------------------------------------------------------------------------- /vm_connector/README.md: -------------------------------------------------------------------------------- 1 | # VM Connector 2 | 3 | Service to schedule the execution of Aleph VM functions 4 | for the [Aleph.im](https://aleph.im/) project and assist 5 | [VM Supervisors](../src/aleph/vm/orchestrator) with operations related 6 | to the Aleph network. 7 | 8 | ## 1. Supported platforms 9 | 10 | We support running the VM Connector in a Docker container, on 11 | [platforms supported by Docker](https://docs.docker.com/engine/install/#supported-platforms). 12 | 13 | ## 2. Installation 14 | 15 | ### 2.a. Install Docker 16 | 17 | On a Debian/Ubuntu system: 18 | ```shell 19 | apt update 20 | apt install -y docker.io 21 | ``` 22 | 23 | ### 2.b. Pull the Docker image 24 | 25 | ```shell 26 | docker pull alephim/vm-connector:alpha 27 | ``` 28 | 29 | ## 3. Running 30 | 31 | Run the Docker image 32 | ```shell 33 | docker run -d -p 4021:4021/tcp --restart=always --name vm-connector alephim/vm-connector:alpha 34 | ``` 35 | 36 | ## 4. Configuration 37 | 38 | The VM Supervisor can be configured using environment variables: 39 | 40 | `API_SERVER` should point to your Aleph Node. 41 | Defaults to https://official.aleph.cloud 42 | 43 | `IPFS_SERVER` should point to your IPFS Gateway, defaults to https://ipfs.aleph.im/ipfs 44 | -------------------------------------------------------------------------------- /vm_connector/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleph-im/aleph-vm/35ce27f1e51fd1bc86ceea502a81de77c26c5fe3/vm_connector/__init__.py -------------------------------------------------------------------------------- /vm_connector/conf.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import NewType 3 | 4 | from pydantic import BaseSettings 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | Url = NewType("Url", str) 9 | 10 | 11 | class ConnectorSettings(BaseSettings): 12 | API_SERVER: Url = Url("https://official.aleph.cloud") 13 | IPFS_SERVER: Url = Url("https://ipfs.aleph.im/ipfs") 14 | OFFLINE_TEST_MODE: bool = False 15 | 16 | def update(self, **kwargs): 17 | for key, value in kwargs.items(): 18 | if key != key.upper(): 19 | logger.warning(f"Setting {key} is not uppercase") 20 | if hasattr(self, key): 21 | setattr(self, key, value) 22 | else: 23 | raise ValueError(f"Unknown setting '{key}'") 24 | 25 | def display(self) -> str: 26 | return "\n".join( 27 | f"{annotation:<17} = {getattr(self, annotation)}" for annotation, value in self.__annotations__.items() 28 | ) 29 | 30 | class Config: 31 | env_prefix = "ALEPH_" 32 | case_sensitive = False 33 | env_file = ".env" 34 | 35 | 36 | # Settings singleton 37 | settings = ConnectorSettings() 38 | -------------------------------------------------------------------------------- /vm_connector/tests/test_message.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "vm-function", 3 | "address": "0x99eC8dc3831F67359a18420449fb028D5dCAa60f", 4 | "content": { 5 | "code": { 6 | "encoding": "zip", 7 | "entrypoint": "example_fastapi:app", 8 | "ref": "7eb2eca2378ea8855336ed76c8b26219f1cb90234d04441de9cf8cb1c649d003", 9 | "latest_amend": true 10 | }, 11 | "on": { 12 | "http": true 13 | }, 14 | "environment":{ 15 | "reproducible": true, 16 | "internet": false, 17 | "aleph_api": false 18 | }, 19 | "resources": { 20 | "vcpus": 1, 21 | "memory": 128, 22 | "seconds": 1 23 | }, 24 | "runtime": { 25 | "ref": "5f31b0706f59404fad3d0bff97ef89ddf24da4761608ea0646329362c662ba51", 26 | "latest_amend": true, 27 | "comment": "Aleph Alpine Linux with Python 3.8" 28 | }, 29 | "data": { 30 | "encoding": "tar.gzip", 31 | "mount": "/mnt", 32 | "ref": "7eb2eca2378ea8855336ed76c8b26219f1cb90234d04441de9cf8cb1c649d003", 33 | "latest_amend": true 34 | }, 35 | "export": { 36 | "encoding": "tar.gzip", 37 | "mount": "/mnt" 38 | } 39 | }, 40 | "time": 1618386753.856 41 | } 42 | --------------------------------------------------------------------------------