├── .dockerignore ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── bin-package-18.04.yaml │ ├── bin-package-no-tag.yaml │ ├── bin-package.yaml │ ├── bins.yaml │ ├── codeql-analysis.yml │ ├── grid-deploy.yaml │ ├── publish-bootstrap.yaml │ ├── publish.yaml │ ├── test-bootstrap.yaml │ ├── test.yaml │ ├── zos-update-worker-main.yml │ └── zos-update-worker-release.yml ├── .gitignore ├── .golangci.yml ├── CODEOWNERS ├── CONTRIBUTING.md ├── DEVELOPMENT.md ├── LICENSE ├── README.md ├── VALUES.md ├── bins ├── README.md ├── bins-extra.sh ├── distfiles │ └── .keep ├── packages │ ├── basesystem │ │ └── basesystem.sh │ ├── cloudconsole │ │ └── cloudconsole.sh │ ├── cloudhypervisor │ │ └── cloudhypervisor.sh │ ├── containerd │ │ ├── containerd.sh │ │ └── files │ │ │ ├── config.toml │ │ │ └── containerd.yaml │ ├── corex │ │ └── corex.sh │ ├── cpubench │ │ └── cpubench.sh │ ├── hdparm │ │ └── hdparm.sh │ ├── iperf │ │ └── iperf.sh │ ├── mdadm │ │ └── mdadm.sh │ ├── misc │ │ ├── misc.sh │ │ ├── readme.md │ │ └── root │ │ │ └── usr │ │ │ └── share │ │ │ ├── btrfs │ │ │ └── volstat.sh │ │ │ └── udhcp │ │ │ └── probe.script │ ├── mycelium │ │ └── mycelium.sh │ ├── nnc │ │ └── nnc.sh │ ├── qsfs │ │ └── qsfs.sh │ ├── rfs │ │ └── rfs.sh │ ├── runc │ │ └── runc.sh │ ├── shimlogs │ │ └── shimlogs.sh │ ├── tailstream │ │ └── tailstream.sh │ ├── tpm │ │ └── tpm.sh │ ├── traefik │ │ └── traefik.sh │ ├── vector │ │ ├── files │ │ │ ├── vector.yaml │ │ │ └── zinit-vector.yaml │ │ └── vector.sh │ ├── virtiofsd │ │ └── virtiofsd.sh │ ├── virtwhat │ │ └── virtwhat.sh │ └── yggdrasil │ │ └── yggdrasil.sh ├── releases │ └── .keep └── workdir │ └── .keep ├── bootstrap ├── Makefile ├── bootstrap │ ├── .gitignore │ ├── Cargo.lock │ ├── Cargo.toml │ ├── Makefile │ ├── README.md │ └── src │ │ ├── bootstrap.rs │ │ ├── config.rs │ │ ├── hub.rs │ │ ├── kparams.rs │ │ ├── main.rs │ │ ├── workdir.rs │ │ ├── zfs.rs │ │ └── zinit.rs ├── etc │ └── zinit │ │ ├── bootstrap.yaml │ │ └── internet.yaml └── usr │ └── share │ └── udhcp │ └── probe.script ├── client └── README.md ├── cmds ├── Makefile ├── identityd │ ├── main.go │ ├── monitor.go │ └── ssh.go ├── internet │ ├── README.md │ └── main.go ├── modules │ ├── api_gateway │ │ └── main.go │ ├── contd │ │ └── main.go │ ├── flistd │ │ └── main.go │ ├── gateway │ │ └── main.go │ ├── networkd │ │ ├── main.go │ │ └── nft.go │ ├── noded │ │ ├── main.go │ │ └── public.go │ ├── powerd │ │ └── main.go │ ├── provisiond │ │ ├── cap.go │ │ ├── events.go │ │ ├── main.go │ │ ├── migration.go │ │ ├── reporter.go │ │ ├── swagger.go │ │ └── swagger │ │ │ ├── favicon-16x16.png │ │ │ ├── favicon-32x32.png │ │ │ ├── index.html │ │ │ ├── oauth2-redirect.html │ │ │ ├── swagger-ui-bundle.js │ │ │ ├── swagger-ui-es-bundle-core.js │ │ │ ├── swagger-ui-es-bundle.js │ │ │ ├── swagger-ui-standalone-preset.js │ │ │ ├── swagger-ui.css │ │ │ ├── swagger-ui.js │ │ │ └── zos-api.yml │ ├── qsfsd │ │ └── main.go │ ├── storaged │ │ └── main.go │ ├── vmd │ │ └── main.go │ ├── zbusdebug │ │ └── main.go │ ├── zlf │ │ ├── README.md │ │ └── main.go │ └── zui │ │ ├── disk.go │ │ ├── header.go │ │ ├── main.go │ │ ├── mem.go │ │ ├── net.go │ │ ├── prov.go │ │ └── service.go └── zos │ └── main.go ├── etc ├── sysctl.conf └── zinit │ ├── api-gateway.yaml │ ├── boot.yaml │ ├── contd.yaml │ ├── flistd.yaml │ ├── gateway.yaml │ ├── identityd.yaml │ ├── init │ └── node-ready.sh │ ├── networkd.yaml │ ├── node-ready.yaml │ ├── noded.yaml │ ├── powerd.yaml │ ├── provisiond.yaml │ ├── qsfsd.yaml │ ├── quiet.yaml │ ├── redis.yaml │ ├── storaged.yaml │ ├── sysctl.yaml │ ├── ttylog.yaml │ ├── vmd.yaml │ └── zui.yaml ├── go.mod ├── go.sum ├── qemu ├── Makefile ├── README.md ├── net.sh ├── overlay ├── overlay.normal │ ├── .zero-os-debug │ ├── bin │ │ ├── api-gateway │ │ ├── contd │ │ ├── flistd │ │ ├── gateway │ │ ├── identityd │ │ ├── internet │ │ ├── networkd │ │ ├── noded │ │ ├── powerd │ │ ├── provisiond │ │ ├── qsfsd │ │ ├── storaged │ │ ├── vmd │ │ ├── zbusdebug │ │ └── zui │ └── etc │ │ ├── sysctl.conf │ │ └── zinit │ │ ├── api-gateway.yaml │ │ ├── boot.yaml │ │ ├── bootstrap.yaml │ │ ├── contd.yaml │ │ ├── flistd.yaml │ │ ├── gateway.yaml │ │ ├── identityd.yaml │ │ ├── init │ │ └── node-ready.sh │ │ ├── internet.yaml │ │ ├── networkd.yaml │ │ ├── node-ready.yaml │ │ ├── noded.yaml │ │ ├── powerd.yaml │ │ ├── provisiond.yaml │ │ ├── qsfsd.yaml │ │ ├── quiet.yaml │ │ ├── redis.yaml │ │ ├── storaged.yaml │ │ ├── sysctl.yaml │ │ ├── ttylog.yaml │ │ ├── vmd.yaml │ │ └── zui.yaml ├── revert-vfio.sh ├── setup-vfio.sh ├── vm.sh ├── vm_gpu.sh └── walkthrough.md ├── scripts ├── collect.sh ├── create_image.sh ├── debug_image.md ├── debug_image.sh └── install_deps.sh ├── specs ├── Kubernetes.md ├── container │ ├── design.planuml │ └── readme.md ├── grid3 │ ├── contract.md │ ├── png │ │ ├── grid3-overlay.png │ │ └── sequence.png │ ├── readme.md │ └── uml │ │ ├── rmb.wsd │ │ └── sequence.wsd ├── init.md ├── ipc.md ├── k8s_ha.jpg ├── network │ ├── Gateway_Container.md │ ├── HIDDEN-PUBLIC.dia │ ├── HIDDEN-PUBLIC.md │ ├── HIDDEN-PUBLIC.png │ ├── Nodes.svg │ ├── Public_Provisioning_for_ExitNRs.md │ ├── Requirements.md │ ├── Routing_Firewalling.md │ ├── create-public-in-farm.md │ ├── datastructs.md │ ├── readme.md │ ├── setup │ │ ├── nftables.conf │ │ ├── setupwgtest.sh │ │ ├── vrftests.sh │ │ ├── wg1.conf │ │ ├── wg1.priv │ │ ├── wg1.pub │ │ ├── wg2.conf │ │ ├── wg2.priv │ │ ├── wg2.pub │ │ ├── wg3.conf │ │ ├── wg3.priv │ │ ├── wg3.pub │ │ ├── wg4.conf │ │ ├── wg4.priv │ │ ├── wg4.pub │ │ ├── wg5.conf │ │ ├── wg5.priv │ │ └── wg5.pub │ └── tnodb.wsd ├── readme.md ├── storage │ └── readme.md ├── upgrade.md └── zbus.md └── tools └── zos-update-worker ├── .gitignore ├── .goreleaser.yaml ├── Makefile ├── README.md ├── Taskfile.yml ├── cmd └── worker.go ├── go.mod ├── go.sum ├── internal ├── update_worker.go └── worker_test.go └── main.go /.dockerignore: -------------------------------------------------------------------------------- 1 | .github 2 | assets 3 | bin 4 | bins 5 | bootstrap 6 | client 7 | cmds 8 | docs 9 | etc 10 | out 11 | papers 12 | qemu 13 | scripts 14 | specs -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Describe the bug 11 | 12 | A clear and concise description of what the bug is. 13 | 14 | ## To Reproduce 15 | 16 | Steps to reproduce the behavior: 17 | 18 | 1. Go to '...' 19 | 2. Click on '....' 20 | 3. Scroll down to '....' 21 | 4. See error 22 | 23 | ## Expected behavior 24 | 25 | A clear and concise description of what you expected to happen. 26 | 27 | ## Screenshots 28 | 29 | If applicable, add screenshots to help explain your problem. 30 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Is your feature request related to a problem? Please describe 11 | 12 | A clear and concise description of what the problem is. Ex. I'm always frustrated when \[...] 13 | 14 | ## Describe the solution you'd like 15 | 16 | A clear and concise description of what you want to happen. 17 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ### Description 2 | 3 | Describe the changes introduced by this PR and what does it affect 4 | 5 | ### Changes 6 | 7 | List of changes this PR includes 8 | 9 | ### Related Issues 10 | 11 | List of related issues 12 | 13 | ### Checklist 14 | 15 | - [ ] Tests included 16 | - [ ] Build pass 17 | - [ ] Documentation 18 | - [ ] Code format and docstring 19 | -------------------------------------------------------------------------------- /.github/workflows/bin-package-18.04.yaml: -------------------------------------------------------------------------------- 1 | # Builds a single runtime package, this 2 | # is similar to bin-package but always uses ubuntu:18.04 to 3 | # build the package. 4 | name: Build Extra Binary 5 | 6 | on: 7 | workflow_call: 8 | inputs: 9 | package: 10 | description: "package to build" 11 | required: true 12 | type: string 13 | secrets: 14 | token: 15 | required: true 16 | jobs: 17 | builder: 18 | name: builder 19 | runs-on: ubuntu-latest 20 | container: ubuntu:18.04 21 | steps: 22 | - name: Checkout code into the Go module directory 23 | uses: actions/checkout@v1 24 | - name: Set tag of build 25 | id: tag 26 | run: | 27 | ref="${{ github.ref }}" 28 | if [ "${{ github.ref_type }}" = "tag" ]; then 29 | echo "reference=${ref#refs/tags/}" >> $GITHUB_OUTPUT 30 | else 31 | export reference="${{ github.sha }}" 32 | # in container we have do to this is bash because 33 | # the container default sh which has no support for substring 34 | bash -c 'echo "reference=${reference:0:7}" >> $GITHUB_OUTPUT' 35 | fi 36 | - name: Setup basesystem 37 | run: | 38 | cd bins 39 | ./bins-extra.sh --package basesystem 40 | - name: Build package (${{ inputs.package }}) 41 | id: package 42 | run: | 43 | cd bins 44 | ./bins-extra.sh --package ${{ inputs.package }} 45 | - name: Publish flist (tf-autobuilder, ${{ steps.package.outputs.name }}) 46 | if: success() 47 | uses: threefoldtech/publish-flist@master 48 | with: 49 | token: ${{ secrets.token }} 50 | action: publish 51 | user: tf-autobuilder 52 | root: bins/releases/${{ inputs.package }} 53 | name: ${{ steps.package.outputs.name }}.flist 54 | - name: Tagging 55 | uses: threefoldtech/publish-flist@master 56 | if: success() && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v')) 57 | with: 58 | token: ${{ secrets.token }} 59 | action: tag 60 | user: tf-autobuilder 61 | name: ${{ steps.tag.outputs.reference }}/${{ inputs.package }}.flist 62 | target: tf-autobuilder/${{ steps.package.outputs.name }}.flist 63 | -------------------------------------------------------------------------------- /.github/workflows/bin-package-no-tag.yaml: -------------------------------------------------------------------------------- 1 | # Builds a single runtime package, this 2 | # is similar to `bin-package.yaml` but never 3 | # tags the built binary. It means any package 4 | # built with this never becomes part of zos installation 5 | name: Build Extra Binary (no tagging) 6 | 7 | on: 8 | workflow_call: 9 | inputs: 10 | package: 11 | description: "package to build" 12 | required: true 13 | type: string 14 | secrets: 15 | token: 16 | required: true 17 | jobs: 18 | builder: 19 | name: builder 20 | runs-on: ubuntu-latest 21 | container: ubuntu:20.04 22 | steps: 23 | - name: Checkout code into the Go module directory 24 | uses: actions/checkout@v1 25 | - name: Setup basesystem 26 | run: | 27 | cd bins 28 | ./bins-extra.sh --package basesystem 29 | - name: Build package (${{ inputs.package }}) 30 | id: package 31 | run: | 32 | cd bins 33 | ./bins-extra.sh --package ${{ inputs.package }} 34 | - name: Publish flist (tf-autobuilder, ${{ steps.package.outputs.name }}) 35 | if: success() 36 | uses: threefoldtech/publish-flist@master 37 | with: 38 | token: ${{ secrets.token }} 39 | action: publish 40 | user: tf-autobuilder 41 | root: bins/releases/${{ inputs.package }} 42 | name: ${{ steps.package.outputs.name }}.flist 43 | -------------------------------------------------------------------------------- /.github/workflows/bin-package.yaml: -------------------------------------------------------------------------------- 1 | # Builds a single runtime package, this 2 | # workflow is only invoked from the `bins.yaml` file 3 | # to build the configured packages 4 | # 5 | # the built binary is ALWAYS published to tf-autobuilder 6 | # and then tagged with a certain tag. this can be the version 7 | # of the release or the `short sha` of the head, if on main 8 | # branch 9 | name: Build Extra Binary 10 | 11 | on: 12 | workflow_call: 13 | inputs: 14 | package: 15 | description: "package to build" 16 | required: true 17 | type: string 18 | secrets: 19 | token: 20 | required: true 21 | jobs: 22 | builder: 23 | name: builder 24 | runs-on: ubuntu-latest 25 | container: ubuntu:20.04 26 | steps: 27 | - name: Checkout code into the Go module directory 28 | uses: actions/checkout@v1 29 | # the tag step only extract the correct version 30 | # tag to use. this is the short sha in case of 31 | # a branch, or the tag name in case of "release" 32 | # the value is then stored as `reference` 33 | # and then accessed later in the workflow 34 | - name: Set tag of build 35 | id: tag 36 | shell: bash 37 | run: | 38 | ref="${{ github.ref }}" 39 | if [ "${{ github.ref_type }}" = "tag" ]; then 40 | echo "reference=${ref#refs/tags/}" >> $GITHUB_OUTPUT 41 | else 42 | reference="${{ github.sha }}" 43 | echo "reference=${reference:0:7}" >> $GITHUB_OUTPUT 44 | fi 45 | - name: Setup basesystem 46 | shell: bash 47 | run: | 48 | apt update 49 | cd bins 50 | ./bins-extra.sh --package basesystem 51 | - name: Build package (${{ inputs.package }}) 52 | id: package 53 | shell: bash 54 | run: | 55 | cd bins 56 | ./bins-extra.sh --package ${{ inputs.package }} 57 | - name: Publish flist (tf-autobuilder, ${{ steps.package.outputs.name }}) 58 | if: success() && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v')) 59 | uses: threefoldtech/publish-flist@master 60 | with: 61 | token: ${{ secrets.token }} 62 | action: publish 63 | user: tf-autobuilder 64 | root: bins/releases/${{ inputs.package }} 65 | name: ${{ steps.package.outputs.name }}.flist 66 | - name: Tagging 67 | uses: threefoldtech/publish-flist@master 68 | if: success() && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v')) 69 | with: 70 | token: ${{ secrets.token }} 71 | action: tag 72 | user: tf-autobuilder 73 | name: ${{ steps.tag.outputs.reference }}/${{ inputs.package }}.flist 74 | target: tf-autobuilder/${{ steps.package.outputs.name }}.flist 75 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | name: "CodeQL" 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | # The branches below must be a subset of the branches above 8 | branches: [main] 9 | schedule: 10 | - cron: "0 11 * * 3" 11 | 12 | jobs: 13 | analyze: 14 | name: Analyze 15 | runs-on: ubuntu-latest 16 | 17 | strategy: 18 | fail-fast: false 19 | matrix: 20 | # Override automatic language detection by changing the below list 21 | # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python'] 22 | language: ["go"] 23 | # Learn more... 24 | # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection 25 | 26 | steps: 27 | - name: Checkout repository 28 | uses: actions/checkout@v2 29 | with: 30 | # We must fetch at least the immediate parents so that if this is 31 | # a pull request then we can checkout the head. 32 | fetch-depth: 2 33 | 34 | # If this run was triggered by a pull request event, then checkout 35 | # the head of the pull request instead of the merge commit. 36 | - run: git checkout HEAD^2 37 | if: ${{ github.event_name == 'pull_request' }} 38 | 39 | # Initializes the CodeQL tools for scanning. 40 | - name: Initialize CodeQL 41 | uses: github/codeql-action/init@v1 42 | with: 43 | languages: ${{ matrix.language }} 44 | 45 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 46 | # If this step fails, then you should remove it and run the build manually (see below) 47 | - name: Autobuild 48 | uses: github/codeql-action/autobuild@v1 49 | 50 | # ℹ️ Command-line programs to run using the OS shell. 51 | # 📚 https://git.io/JvXDl 52 | 53 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 54 | # and modify them (or add more) to build your code if your project 55 | # uses a compiled language 56 | 57 | #- run: | 58 | # make bootstrap 59 | # make release 60 | 61 | - name: Perform CodeQL Analysis 62 | uses: github/codeql-action/analyze@v1 63 | -------------------------------------------------------------------------------- /.github/workflows/grid-deploy.yaml: -------------------------------------------------------------------------------- 1 | name: Deploy 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | grid: 7 | description: "grid to deploy" 8 | required: true 9 | default: "qa" 10 | type: choice 11 | options: 12 | - qa 13 | - testing 14 | - production 15 | version: 16 | description: "version to release" 17 | required: true 18 | type: string 19 | jobs: 20 | link-release: 21 | name: linking 22 | runs-on: ubuntu-latest 23 | steps: 24 | - name: Symlink flist (development) 25 | uses: threefoldtech/publish-flist@master 26 | with: 27 | token: ${{ secrets.HUB_JWT }} 28 | action: crosslink 29 | user: tf-zos 30 | name: zos:${{ github.event.inputs.grid }}-3:latest.flist 31 | target: tf-autobuilder/zos:${{ github.event.inputs.version }}.flist 32 | -------------------------------------------------------------------------------- /.github/workflows/publish-bootstrap.yaml: -------------------------------------------------------------------------------- 1 | name: Publish Bootstrap 2 | on: 3 | push: 4 | paths: 5 | - "bootstrap/bootstrap/**" 6 | - ".github/workflows/publish-bootstrap.yaml" 7 | 8 | jobs: 9 | bootstrap: 10 | name: Building 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout code 14 | uses: actions/checkout@master 15 | - name: Prepare musl 16 | run: | 17 | sudo apt-get update 18 | sudo apt-get install -y musl musl-tools 19 | - name: Setup toolchain 20 | uses: dtolnay/rust-toolchain@stable 21 | with: 22 | targets: x86_64-unknown-linux-musl 23 | - name: Build bootstrap 24 | run: make release 25 | working-directory: bootstrap/bootstrap 26 | - name: Collecting files 27 | run: | 28 | mkdir -p ${{ github.workspace }}/archive/sbin/ 29 | cp -a bootstrap/bootstrap/target/x86_64-unknown-linux-musl/release/bootstrap ${{ github.workspace }}/archive/sbin/ 30 | - name: Set name of the development build 31 | id: setname 32 | run: | 33 | echo "build=bootstrap-v$(date +%y%m%d.%-H%M%S.0)-dev.flist" >> $GITHUB_OUTPUT 34 | - name: Publish flist (${{ steps.setname.outputs.build }}) 35 | if: success() 36 | uses: threefoldtech/publish-flist@master 37 | with: 38 | action: publish 39 | user: tf-autobuilder 40 | root: archive 41 | token: ${{ secrets.HUB_JWT }} 42 | name: ${{ steps.setname.outputs.build }} 43 | - name: Symlink flist (development) 44 | if: success() 45 | uses: threefoldtech/publish-flist@master 46 | with: 47 | action: symlink 48 | user: tf-autobuilder 49 | token: ${{ secrets.HUB_JWT }} 50 | name: ${{ steps.setname.outputs.build }} 51 | target: bootstrap:development.flist 52 | - name: Symlink flist (release) 53 | if: success() && github.ref == 'refs/heads/main' 54 | uses: threefoldtech/publish-flist@master 55 | with: 56 | action: symlink 57 | user: tf-autobuilder 58 | token: ${{ secrets.HUB_JWT }} 59 | name: ${{ steps.setname.outputs.build }} 60 | target: bootstrap:latest.flist 61 | -------------------------------------------------------------------------------- /.github/workflows/publish.yaml: -------------------------------------------------------------------------------- 1 | # the publish workflow builds zos binaries 2 | # and tag them correctly 3 | # the build is triggered when pushing to any 4 | # branch, but the tagging of the build only happens 5 | # either on main or when a tag is created 6 | name: Release 7 | on: 8 | push: 9 | branches: 10 | - "*" 11 | tags: 12 | - "v*" 13 | jobs: 14 | build: 15 | name: Build and upload 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Set up Go 1.21 19 | uses: actions/setup-go@v1 20 | with: 21 | go-version: 1.21 22 | id: go 23 | 24 | - name: Checkout code into the Go module directory 25 | uses: actions/checkout@v1 26 | 27 | - name: Build binaries 28 | run: | 29 | cd cmds 30 | make 31 | env: 32 | GO111MODULE: on 33 | # the tag step only extract the correct version 34 | # tag to use. this is the short sha in case of 35 | # a branch, or the tag name in case of "release" 36 | # the value is then stored as `reference` 37 | # and then accessed later in the workflow 38 | - name: Set tag of build 39 | id: tag 40 | run: | 41 | ref="${{ github.ref }}" 42 | if [ "${{ github.ref_type }}" = "tag" ]; then 43 | echo "reference=${ref#refs/tags/}" >> $GITHUB_OUTPUT 44 | else 45 | reference="${{ github.sha }}" 46 | echo "reference=${reference:0:7}" >> $GITHUB_OUTPUT 47 | fi 48 | 49 | - name: Set version of build 50 | id: version 51 | run: | 52 | echo "version=v$(date +%y%m%d.%-H%M%S.0)" >> $GITHUB_OUTPUT 53 | 54 | - name: Collecting files 55 | run: | 56 | scripts/collect.sh ${{ github.workspace }}/archive 57 | 58 | - name: Publish flist (zos:${{ steps.version.outputs.version }}.flist) 59 | if: success() 60 | uses: threefoldtech/publish-flist@master 61 | with: 62 | token: ${{ secrets.HUB_JWT }} 63 | action: publish 64 | user: tf-autobuilder 65 | root: archive 66 | name: zos:${{ steps.version.outputs.version }}.flist 67 | 68 | # we tag only if and only if we merged to main 69 | # in that case the tag will be the short sha. 70 | # or if we tagged a certain version and that 71 | # will use the tag value (has to start with v) 72 | - name: Tagging 73 | uses: threefoldtech/publish-flist@master 74 | if: success() && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v')) 75 | with: 76 | token: ${{ secrets.HUB_JWT }} 77 | action: tag 78 | user: tf-autobuilder 79 | name: ${{ steps.tag.outputs.reference }}/zos.flist 80 | target: tf-autobuilder/zos:${{ steps.version.outputs.version }}.flist 81 | 82 | # only for main branch (devnet) 83 | # this basically releases this build to devnet 84 | - name: Cross tagging (development) 85 | if: success() && github.ref == 'refs/heads/main' 86 | uses: threefoldtech/publish-flist@master 87 | with: 88 | token: ${{ secrets.HUB_JWT }} 89 | action: crosstag 90 | user: tf-zos 91 | name: development 92 | target: tf-autobuilder/${{ steps.tag.outputs.reference }} 93 | -------------------------------------------------------------------------------- /.github/workflows/test-bootstrap.yaml: -------------------------------------------------------------------------------- 1 | name: Bootstrap Tests 2 | on: 3 | push: 4 | paths: 5 | - "bootstrap/bootstrap/**" 6 | - ".github/workflows/test-bootstrap.yaml" 7 | jobs: 8 | bootstrap: 9 | name: Running Bootstrap Tests 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout code 13 | uses: actions/checkout@master 14 | - name: Prepare musl 15 | run: | 16 | sudo apt-get update 17 | sudo apt-get install -y musl musl-tools 18 | - name: Setup toolchain 19 | uses: dtolnay/rust-toolchain@stable 20 | with: 21 | targets: x86_64-unknown-linux-musl 22 | - name: Test bootstrap 23 | run: make test 24 | working-directory: bootstrap/bootstrap 25 | - name: Build bootstrap 26 | run: make release 27 | working-directory: bootstrap/bootstrap 28 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Tests and Coverage 2 | on: [push] 3 | 4 | jobs: 5 | daemons: 6 | name: Running Daemon Tests 7 | runs-on: ubuntu-latest 8 | steps: 9 | - name: Set up Go 1.21 10 | uses: actions/setup-go@v1 11 | with: 12 | go-version: 1.21 13 | id: go 14 | 15 | - name: Prepare dependencies 16 | run: | 17 | sudo apt-get update 18 | sudo apt-get install -y libjansson-dev libhiredis-dev 19 | 20 | - name: Checkout code into the Go module directory 21 | uses: actions/checkout@v4 22 | 23 | # - name: Get dependencies 24 | # run: | 25 | # cd zos/pkg 26 | # make getdeps 27 | # env: 28 | # GO111MODULE: on 29 | 30 | # - name: Run tests 31 | # run: | 32 | # cd zos/pkg 33 | # make testrace 34 | # env: 35 | # GO111MODULE: on 36 | 37 | - name: Build binaries 38 | run: | 39 | cd cmds 40 | make 41 | env: 42 | GO111MODULE: on 43 | -------------------------------------------------------------------------------- /.github/workflows/zos-update-worker-main.yml: -------------------------------------------------------------------------------- 1 | name: Zos Update Worker 2 | 3 | defaults: 4 | run: 5 | working-directory: tools/zos-update-worker 6 | 7 | on: 8 | push: 9 | paths: 10 | - tools/zos-update-worker/** 11 | pull_request: 12 | paths: 13 | - tools/zos-update-worker/** 14 | 15 | jobs: 16 | Explore-Packge: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Check out repository code 20 | uses: actions/checkout@v3 21 | 22 | - name: Install GO 23 | uses: actions/setup-go@v3 24 | with: 25 | go-version: 1.19 26 | 27 | - name: golangci-lint 28 | uses: golangci/golangci-lint-action@v3 29 | with: 30 | args: --timeout 3m --verbose 31 | working-directory: tools/zos-update-worker 32 | 33 | - name: staticcheck 34 | uses: dominikh/staticcheck-action@v1.3.0 35 | with: 36 | version: "2022.1.3" 37 | working-directory: tools/zos-update-worker 38 | env: 39 | GO111MODULE: on 40 | 41 | - name: gofmt 42 | uses: Jerome1337/gofmt-action@v1.0.5 43 | with: 44 | gofmt-flags: "-l -d" 45 | gofmt-path: "tools/zos-update-worker" 46 | 47 | - name: Test 48 | run: go test -v ./... 49 | 50 | 51 | -------------------------------------------------------------------------------- /.github/workflows/zos-update-worker-release.yml: -------------------------------------------------------------------------------- 1 | name: Update Worker Release 2 | 3 | defaults: 4 | run: 5 | working-directory: tools/zos-update-worker 6 | 7 | on: 8 | push: 9 | tags: 10 | - "v*" 11 | 12 | jobs: 13 | zos-update-worker: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Check out repository code 17 | uses: actions/checkout@v3 18 | 19 | - name: Install GO 20 | uses: actions/setup-go@v3 21 | with: 22 | go-version: 1.19 23 | 24 | - name: Build zos update worker 25 | run: | 26 | make build 27 | 28 | - name: Get release 29 | id: get_release 30 | uses: bruceadams/get-release@v1.2.3 31 | env: 32 | GITHUB_TOKEN: ${{ github.token }} 33 | 34 | - name: Upload Release Asset for zos update worker 35 | id: upload-release-asset-worker 36 | uses: actions/upload-release-asset@v1 37 | env: 38 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 39 | with: 40 | upload_url: ${{ steps.get_release.outputs.upload_url }} 41 | asset_path: tools/zos-update-worker/bin/zos-update-worker 42 | asset_name: zos-update-worker 43 | asset_content_type: application/x-pie-executable 44 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # editors 2 | .vscode/ 3 | out/ 4 | /bin/ 5 | 6 | # tools binaries 7 | tools/bcdb_mock/bcdb_mock 8 | tools/tfuser/tfuser 9 | tools/tffarmer/tffarmer 10 | tools/schemac/schemac 11 | tools/updatectl/updatectl 12 | tools/bcdb_mock/allocations.json 13 | tools/bcdb_mock/farms.json 14 | tools/bcdb_mock/nodes.json 15 | tools/bcdb_mock/reservations.json 16 | 17 | qemu/node* 18 | qemu/*.efi 19 | .ssh 20 | 21 | # c object 22 | *.o 23 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | linters: 2 | enable: 3 | - errcheck 4 | - gofmt 5 | - govet 6 | - ineffassign 7 | - unconvert 8 | - staticcheck 9 | - gocyclo 10 | enable-all: false 11 | linters-settings: 12 | gocyclo: 13 | # Minimal code complexity to report. 14 | # Default: 30 (but we recommend 10-20) 15 | min-complexity: 100 16 | run: 17 | timeout: 20m 18 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @muhamadazmy @Omarabdul3ziz @ashraffouda @rawdaGastan @xmonader @Eslam-Nawara 2 | -------------------------------------------------------------------------------- /DEVELOPMENT.md: -------------------------------------------------------------------------------- 1 | # Development 2 | 3 | This is a draft guide to help people develop zos itself. It will give you also some tips and tricks to debug you changes, test new features 4 | and even test flists. 5 | 6 | ## Starting your own virtual zos 7 | 8 | Please follow instructions under [qemu](qemu/README.md) on how to run your own zos in a VM 9 | 10 | ## Updating the code inside zos 11 | 12 | When you modify the code in the repo, build and start zos it's gonna use all zos binaries from `bin/` directory. If you need to change the code again you have 2 options:\ 13 | 14 | - Either restart the node 15 | - or `scp` the new binary to replace the one on the vm 16 | - depends on what you replacing you might need to start the service manually first. You also need 17 | to restart the service after replacing the binary with `zinit restart ` 18 | 19 | ## Logs 20 | 21 | - All the node logs can be inspected with `zinit log` 22 | - If you have no access to zos node you still can inspect the logs on `https://mon.grid.tf/` 23 | - You will need to request access to this. You can request access from OPS 24 | - If you are debugging a VM. You have also multiple ways to inspect the VM logs 25 | - If you can ssh to the zos node, inspect the logs under `/var/cache/modules/vmd/logs` 26 | - You also need to try out the console of your VM it can give some valuable information 27 | - If above still didn't give you enough information to debug the issue I would recommend trying the same flist on a node that you have access to so you have better control/visibility 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 0-OS ![Tests](https://github.com/threefoldtech/zos/workflows/Tests%20and%20Coverage/badge.svg) [![Go Report Card](https://goreportcard.com/badge/github.com/threefoldtech/zos)](https://goreportcard.com/report/github.com/threefoldtech/zos) 2 | 3 | 0-OS is an autonomous operating system design to expose raw compute, storage and network capacity. 4 | 5 | This repository host the V2 of 0-OS which is a complete rewrite from scratch. If you want to know about the history and decision that motivated the creation of the V2, you can read [this article](https://github.com/threefoldtech/zosbase/blob/main/docs/internals/history/readme.md) 6 | 7 | 0-OS is mainly used to run node on the Threefold Grid. 8 | Head to and to learn more about Threefold and the grid. 9 | 10 | ## Documentation 11 | 12 | Start exploring the code base by first checking the [documentation](https://github.com/threefoldtech/zosbase/tree/main/docs) and [specification documents](/specs). 13 | 14 | An [FAQ](https://github.com/threefoldtech/zosbase/blob/main/docs/faq/readme.md) is also available for all the common questions. 15 | 16 | ## Setting up your development environment 17 | 18 | If you want to contribute read the [contribution guideline](CONTRIBUTING.md) and the documentation to setup your [development environment](qemu/README.md) 19 | 20 | ## Grid Networks 21 | 22 | 0-OS is deployed on 3 different "flavor" of network: 23 | 24 | - **production network**: Released of stable version. Used to run the real grid with real money. Cannot be reset ever. Only stable and battle tested feature reach this level. You can find the [dashboard here](https://dashboard.grid.tf/) 25 | - **test network**: Mostly stable features that need to be tested at scale, allow preview and test of new features. Always the latest and greatest. This network can be reset sometimes, but should be relatively stable. You can find the [dashboard here](https://dashboard.test.grid.tf/) 26 | - **QA network**: Mostly unstable features that need to be tested internally, allow preview and test of new features. Can be behind development. This network can be reset sometimes, but should be relatively stable. You can find the [dashboard here](https://dashboard.qa.grid.tf/) 27 | - **dev network**: ephemeral network only setup to develop and test new features. Can be created and reset at anytime. You can find the [dashboard here](https://dashboard.dev.grid.tf/) 28 | 29 | Learn more about the different network by reading the [upgrade documentation](https://github.com/threefoldtech/zosbase/blob/main/docs/internals/identity/upgrade.md#philosophy) 30 | 31 | ### Provisioning of workloads 32 | 33 | ZOS does not expose an interface, instead of wait for reservation to happen on a trusted 34 | source, and once this reservation is available, the node will actually apply it to reality. You can start reading about [provisioning](https://github.com/threefoldtech/zosbase/tree/main/docs/internals/provision) in this document. 35 | 36 | ## Owners 37 | 38 | [@maxux](https://github.com/maxux) [@muhamadazmy](https://github.com/muhamadazmy) [@delandtj](https://github.com/delandtj) [@leesmet](https://github.com/leesmet) 39 | 40 | ## Community 41 | 42 | If you have some questions or just want to hang out, you can find us on: 43 | 44 | - telegram: 45 | - Matrix: #zero-os:matrix.org 46 | -------------------------------------------------------------------------------- /VALUES.md: -------------------------------------------------------------------------------- 1 | # Team Values 2 | 3 | We want to make sure every member has a shared understanding of the goals and 4 | values we hold as a team: 5 | 6 | - Optimize for the **overall project**, not your own area or feature 7 | - A shortcut for one individual can mean a lot of extra work or disruption for 8 | the rest of the team. 9 | 10 | - Our repos (at least the master branch) should always be in release shape: **Always Green** 11 | - This lets us move faster in the mid and long term 12 | - This implies investments in build/test infrastructure to have fast, reliable 13 | tests to ensure that we can release at any time. 14 | - Extra discipline may require more work by individuals to keep the build in 15 | good state, but less work overall for the team. 16 | 17 | - Be **specific**, **respectful** and **courteous** 18 | - Disagreements are expected, but don't use broad 19 | generalizations, exaggerations, or judgement words that can be taken 20 | personally. Consider other people’s perspectives. Empathize with our users. Focus on the specific 21 | issue at hand, and remember that we all care about the project, first and 22 | foremost. 23 | - [GitHub issues](https://github.com/threefoldtech/zos/issues/new), 24 | document comments, or meetings are often better and higher bandwidth ways to 25 | communicate complex and nuanced design issues, as opposed to protracted 26 | heated live chats. 27 | - Be mindful of the terminology you are using, it may not be the same as 28 | someone else's and cause misunderstanding. To promote clear and precise 29 | communication, please define the terms you are using in context. 30 | 31 | - Raising issues is great, suggesting solutions is even better 32 | - Think of a proposed alternative and improvement rather than just what you 33 | perceive as wrong. 34 | - If you have no immediate solution even after thinking about it - if 35 | something does seem significant, do raise it to someone that may be able to 36 | also think of solutions or to the group (don’t stay frustrated! Feel safe 37 | in bringing up issues) 38 | - Avoid rehashing old issues that have been already resolved/decided (unless 39 | you have new insights or information) 40 | 41 | - Be productive and **happy**, and most importantly, have _fun_ :-) 42 | -------------------------------------------------------------------------------- /bins/README.md: -------------------------------------------------------------------------------- 1 | # Extra Binaries 2 | 3 | Zero-OS image comes with minimal software suite shipped and some software required 4 | on runtime are downloaded via flist later, when the system booted. 5 | 6 | This directory contains a build script suite to build theses runtime dependencies. 7 | 8 | ## Requirement 9 | 10 | The whole build system is intended to be used inside a Docker container using `ubuntu:18.04` image 11 | like the initramfs build script. This can be used inside Github Actions Steps. 12 | 13 | ## Package system 14 | 15 | The build system use a simple mechanism which build different « packages ». Each packages 16 | are defined on the package directory. A package is a directory, the directory name is the package name. 17 | Inside this directory, you need a bash script called with the package name. 18 | 19 | Eg: `packages/mysoftware/mysoftware.sh` 20 | 21 | You can get an extra directory next to your bash script, called `files` and put inside some files 22 | you need during build phase. 23 | 24 | To build your package, you can invoke the build script like this: `bash bins-extra.sh --package mysoftware`. 25 | 26 | The build script takes care to set up an environment easy to use inside your package script. 27 | You have different variables setup you can use: 28 | 29 | - `FILESDIR`: the path where your files next to your build script are located 30 | - `WORKDIR`: a temporary directory you can use to build your software 31 | - `ROOTDIR`: root directory you can use to put your final files to packages 32 | - `DISTDIR`: a temporary directory where you can download stuff (like source code, etc.) 33 | 34 | Your build script have to get a function called `build_${pkgname}` eg: `build_mysoftware`. 35 | This function have to deal with everything you want and put final files into `ROOTDIR`. When this 36 | is done, the build script will ensure all the shared libraries needed by your binaries will be 37 | available on `ROOTDIR`. 38 | 39 | Take a look on existing build script to see how it works. Script won't look foreign for you if you know 40 | a how ebuild scripts works for Gentoo :) 41 | -------------------------------------------------------------------------------- /bins/distfiles/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/threefoldtech/zos/0199ee83ff9918918f18fd25fd7ba1cdc1c7abce/bins/distfiles/.keep -------------------------------------------------------------------------------- /bins/packages/basesystem/basesystem.sh: -------------------------------------------------------------------------------- 1 | build_basesystem() { 2 | apt-get update 3 | apt-get install -y curl wget build-essential autoconf unzip 4 | } 5 | 6 | -------------------------------------------------------------------------------- /bins/packages/cloudconsole/cloudconsole.sh: -------------------------------------------------------------------------------- 1 | CLOUDCONSOLE_VERSION="v0.1.2" 2 | CLOUDCONSOLE_CHECKSUM="ae22f069ec74d74b25ebbbddbc390fa2" 3 | CLOUDCONSOLE_LINK="https://github.com/threefoldtech/cloud-console/releases/download/${CLOUDCONSOLE_VERSION}/cloud-console" 4 | 5 | download_cloudconsole() { 6 | echo "downloading cloud-console" 7 | download_file ${CLOUDCONSOLE_LINK} ${CLOUDCONSOLE_CHECKSUM} cloud-console-${CLOUDCONSOLE_VERSION} 8 | } 9 | 10 | 11 | prepare_cloudconsole() { 12 | echo "[+] prepare cloud-console" 13 | github_name "cloud-console-${CLOUDCONSOLE_VERSION}" 14 | } 15 | 16 | install_cloudconsole() { 17 | echo "[+] install cloud-console" 18 | 19 | mkdir -p "${ROOTDIR}/usr/bin" 20 | 21 | cp ${DISTDIR}/cloud-console-${CLOUDCONSOLE_VERSION} ${ROOTDIR}/usr/bin/cloud-console 22 | chmod +x ${ROOTDIR}/usr/bin/* 23 | } 24 | 25 | build_cloudconsole() { 26 | pushd "${DISTDIR}" 27 | 28 | download_cloudconsole 29 | prepare_cloudconsole 30 | install_cloudconsole 31 | 32 | popd 33 | } 34 | -------------------------------------------------------------------------------- /bins/packages/cloudhypervisor/cloudhypervisor.sh: -------------------------------------------------------------------------------- 1 | CLOUDHYPERVISOR_VERSION="39.0" 2 | CLOUDHYPERVISOR_CHECKSUM="c6f0f32b8ed6e68e0f9ddff805d912c7" 3 | CLOUDHYPERVISOR_LINK="https://github.com/cloud-hypervisor/cloud-hypervisor/releases/download/v${CLOUDHYPERVISOR_VERSION}/cloud-hypervisor-static" 4 | 5 | 6 | download_cloudhypervisor() { 7 | echo "down" 8 | download_file ${CLOUDHYPERVISOR_LINK} ${CLOUDHYPERVISOR_CHECKSUM} cloud-hypervisor-${CLOUDHYPERVISOR_VERSION} 9 | } 10 | 11 | 12 | prepare_cloudhypervisor() { 13 | echo "[+] prepare cloud-hypervisor" 14 | github_name "cloud-hypervisor-${CLOUDHYPERVISOR_VERSION}" 15 | } 16 | 17 | install_cloudhypervisor() { 18 | echo "[+] install cloud-hypervisor" 19 | 20 | mkdir -p "${ROOTDIR}/usr/bin" 21 | 22 | cp ${DISTDIR}/cloud-hypervisor-${CLOUDHYPERVISOR_VERSION} ${ROOTDIR}/usr/bin/cloud-hypervisor 23 | chmod +x ${ROOTDIR}/usr/bin/* 24 | } 25 | 26 | build_cloudhypervisor() { 27 | pushd "${DISTDIR}" 28 | 29 | download_cloudhypervisor 30 | prepare_cloudhypervisor 31 | install_cloudhypervisor 32 | 33 | popd 34 | } 35 | -------------------------------------------------------------------------------- /bins/packages/containerd/containerd.sh: -------------------------------------------------------------------------------- 1 | CONTAINERD_VERSION="1.6.18" 2 | CONTAINERD_CHECKSUM="1ac525600fe7ba6ef76cf8a833153768" 3 | CONTAINERD_LINK="https://github.com/containerd/containerd/archive/refs/tags/v${CONTAINERD_VERSION}.tar.gz" 4 | 5 | dependencies_containerd() { 6 | export DEBIAN_FRONTEND=noninteractive 7 | export DEBCONF_NONINTERACTIVE_SEEN=true 8 | 9 | 10 | apt-get install -y btrfs-progs libbtrfs-dev libseccomp-dev build-essential pkg-config 11 | 12 | if [ -z $GOPATH ] || [ ! -z $CI ]; then 13 | if command -v go > /dev/null && [ ! -z $CI ]; then 14 | export GOPATH=$(go env GOPATH) 15 | else 16 | curl -L https://go.dev/dl/go1.20.1.linux-amd64.tar.gz > /tmp/go1.20.1.linux-amd64.tar.gz 17 | tar -C /usr/local -xzf /tmp/go1.20.1.linux-amd64.tar.gz 18 | mkdir -p /gopath 19 | 20 | export PATH=/usr/local/go/bin:$PATH 21 | fi 22 | fi 23 | 24 | CONTAINERD_HOME="/src/github.com/containerd" 25 | } 26 | 27 | download_containerd() { 28 | download_file ${CONTAINERD_LINK} ${CONTAINERD_CHECKSUM} containerd-${CONTAINERD_VERSION}.tar.gz 29 | } 30 | 31 | extract_containerd() { 32 | mkdir -p ${CONTAINERD_HOME} 33 | rm -rf ${CONTAINERD_HOME}/containerd 34 | 35 | pushd ${CONTAINERD_HOME} 36 | 37 | echo "[+] extracting: containerd-${CONTAINERD_VERSION}" 38 | tar -xf ${DISTDIR}/containerd-${CONTAINERD_VERSION}.tar.gz -C . 39 | mv containerd-${CONTAINERD_VERSION} containerd 40 | 41 | popd 42 | } 43 | 44 | prepare_containerd() { 45 | echo "[+] prepare containerd" 46 | github_name "containerd-${CONTAINERD_VERSION}" 47 | } 48 | 49 | compile_containerd() { 50 | echo "[+] compiling containerd" 51 | make CGO_CFLAGS=-I${ROOTDIR}/usr/include 52 | } 53 | 54 | install_containerd() { 55 | echo "[+] install containerd" 56 | 57 | mkdir -p "${ROOTDIR}/usr/bin" 58 | mkdir -p "${ROOTDIR}/etc/containerd" 59 | mkdir -p "${ROOTDIR}/etc/zinit" 60 | 61 | cp -av bin/* "${ROOTDIR}/usr/bin/" 62 | cp -av ${FILESDIR}/config.toml "${ROOTDIR}/etc/containerd/" 63 | cp -av ${FILESDIR}/containerd.yaml "${ROOTDIR}/etc/zinit/" 64 | } 65 | 66 | build_containerd() { 67 | pushd "${DISTDIR}" 68 | 69 | dependencies_containerd 70 | download_containerd 71 | extract_containerd 72 | 73 | popd 74 | pushd ${CONTAINERD_HOME}/containerd 75 | 76 | prepare_containerd 77 | compile_containerd 78 | install_containerd 79 | 80 | popd 81 | } 82 | -------------------------------------------------------------------------------- /bins/packages/containerd/files/config.toml: -------------------------------------------------------------------------------- 1 | root = "/var/lib/containerd" 2 | state = "/run/containerd" 3 | oom_score = 0 4 | disabled_plugins = ["cri"] 5 | 6 | [grpc] 7 | address = "/run/containerd/containerd.sock" 8 | uid = 0 9 | gid = 0 10 | max_recv_message_size = 16777216 11 | max_send_message_size = 16777216 12 | 13 | [debug] 14 | address = "" 15 | uid = 0 16 | gid = 0 17 | level = "debug" 18 | 19 | [metrics] 20 | address = "" 21 | grpc_histogram = false 22 | 23 | [cgroup] 24 | path = "" 25 | 26 | [plugins] 27 | [plugins.diff-service] 28 | default = ["walking"] 29 | [plugins.linux] 30 | shim = "containerd-shim" 31 | runtime = "runc" 32 | runtime_root = "" 33 | no_shim = false 34 | shim_debug = false 35 | [plugins.opt] 36 | path = "/opt/containerd" 37 | [plugins.restart] 38 | interval = "10s" 39 | [plugins.scheduler] 40 | pause_threshold = 0.02 41 | deletion_threshold = 0 42 | mutation_threshold = 100 43 | schedule_delay = "0s" 44 | startup_delay = "100ms" 45 | 46 | -------------------------------------------------------------------------------- /bins/packages/containerd/files/containerd.yaml: -------------------------------------------------------------------------------- 1 | exec: containerd 2 | test: ctr version 3 | after: 4 | - node-ready 5 | 6 | -------------------------------------------------------------------------------- /bins/packages/corex/corex.sh: -------------------------------------------------------------------------------- 1 | COREX_VERSION="2.1.4" 2 | COREX_CHECKSUM="6427daa42cf93ba65d0377e29ea57148" 3 | COREX_LINK="https://github.com/threefoldtech/corex/releases/download/${COREX_VERSION}/corex-${COREX_VERSION}-amd64-linux-static" 4 | 5 | 6 | download_corex() { 7 | download_file ${COREX_LINK} ${COREX_CHECKSUM} corex-${COREX_VERSION} 8 | } 9 | 10 | prepare_corex() { 11 | echo "[+] prepare corex" 12 | github_name "corex-${COREX_VERSION}" 13 | } 14 | 15 | install_corex() { 16 | echo "[+] install corex" 17 | 18 | mkdir -p "${ROOTDIR}/usr/bin" 19 | cp -av corex-${COREX_VERSION} "${ROOTDIR}/usr/bin/corex" 20 | chmod +x "${ROOTDIR}/usr/bin/corex" 21 | } 22 | 23 | build_corex() { 24 | pushd "${DISTDIR}" 25 | 26 | download_corex 27 | prepare_corex 28 | install_corex 29 | 30 | popd 31 | } 32 | -------------------------------------------------------------------------------- /bins/packages/cpubench/cpubench.sh: -------------------------------------------------------------------------------- 1 | CPU_BENCHMARK_VERSION="v0.1" 2 | CPU_BENCHMARK_CHECKSUM="25891eb15ec0b1bb8d745a8af3907895" 3 | CPU_BENCHMARK_LINK="https://github.com/threefoldtech/cpu-benchmark-simple/releases/download/${CPU_BENCHMARK_VERSION}/grid-cpubench-simple-0.1-linux-amd64-static" 4 | 5 | download_cpubench() { 6 | echo "downloading cpubench" 7 | download_file ${CPU_BENCHMARK_LINK} ${CPU_BENCHMARK_CHECKSUM} cpubench 8 | } 9 | 10 | 11 | prepare_cpubench() { 12 | echo "[+] prepare cpubench" 13 | github_name "cpubench-${CPU_BENCHMARK_VERSION}" 14 | } 15 | 16 | install_cpubench() { 17 | echo "[+] install cpubench" 18 | mkdir -p "${ROOTDIR}/usr/bin" 19 | 20 | cp ${DISTDIR}/cpubench ${ROOTDIR}/usr/bin/cpubench 21 | chmod +x ${ROOTDIR}/usr/bin/cpubench 22 | } 23 | 24 | build_cpubench() { 25 | pushd "${DISTDIR}" 26 | 27 | download_cpubench 28 | prepare_cpubench 29 | install_cpubench 30 | 31 | popd 32 | } 33 | -------------------------------------------------------------------------------- /bins/packages/hdparm/hdparm.sh: -------------------------------------------------------------------------------- 1 | HDPARM_VERSION="9.58" 2 | HDPARM_CHECKSUM="4652c49cf096a64683c05f54b4fa4679" 3 | HDPARM_LINK="https://sourceforge.net/projects/hdparm/files/hdparm/hdparm-9.58.tar.gz/download" 4 | 5 | download_hdparm() { 6 | download_file ${HDPARM_LINK} ${HDPARM_CHECKSUM} hdparm-${HDPARM_VERSION}.tar.gz 7 | } 8 | 9 | extract_hdparm() { 10 | echo "[+] extracting hdparm" 11 | 12 | rm -rf ${WORKDIR}/* 13 | tar -xf ${DISTDIR}/hdparm-${HDPARM_VERSION}.tar.gz -C ${WORKDIR} 14 | } 15 | 16 | prepare_hdparm() { 17 | echo "[+] prepare hdparm tools" 18 | github_name "hdparm-${HDPARM_VERSION}" 19 | } 20 | 21 | compile_hdparm() { 22 | echo "[+] compile hdparm" 23 | pwd 24 | make ${MAKEOPTS} install 25 | 26 | echo "[+] building hdparm images" 27 | } 28 | 29 | install_hdparm() { 30 | echo "[+] install hdparm" 31 | 32 | mkdir -p "${ROOTDIR}/sbin/" 33 | cp hdparm ${ROOTDIR}/sbin/ 34 | 35 | echo "[+] install hdparm images" 36 | 37 | } 38 | 39 | build_hdparm() { 40 | pushd "${DISTDIR}" 41 | 42 | download_hdparm 43 | extract_hdparm 44 | 45 | popd 46 | pushd ${WORKDIR}/hdparm-${HDPARM_VERSION} 47 | 48 | prepare_hdparm 49 | compile_hdparm 50 | install_hdparm 51 | 52 | popd 53 | } 54 | 55 | -------------------------------------------------------------------------------- /bins/packages/iperf/iperf.sh: -------------------------------------------------------------------------------- 1 | IPERF_VERSION="3.14" 2 | IPERF_CHECKSUM="9ef3769b601d79250bb1593b0146dbcb" 3 | IPERF_LINK="https://github.com/userdocs/iperf3-static/releases/download/${IPERF_VERSION}/iperf3-amd64" 4 | 5 | download_iperf() { 6 | echo "downloading iperf" 7 | download_file ${IPERF_LINK} ${IPERF_CHECKSUM} iperf-${IPERF_VERSION} 8 | } 9 | 10 | 11 | prepare_iperf() { 12 | echo "[+] prepare iperf" 13 | github_name "iperf-${IPERF_VERSION}" 14 | } 15 | 16 | install_iperf() { 17 | echo "[+] install iperf" 18 | 19 | mkdir -p "${ROOTDIR}/usr/bin" 20 | 21 | cp ${DISTDIR}/iperf-${IPERF_VERSION} ${ROOTDIR}/usr/bin/iperf 22 | chmod +x ${ROOTDIR}/usr/bin/* 23 | } 24 | 25 | build_iperf() { 26 | pushd "${DISTDIR}" 27 | 28 | download_iperf 29 | prepare_iperf 30 | install_iperf 31 | 32 | popd 33 | } 34 | -------------------------------------------------------------------------------- /bins/packages/mdadm/mdadm.sh: -------------------------------------------------------------------------------- 1 | MDADM_VERSION="4.3" 2 | MDADM_LINK="https://kernel.googlesource.com/pub/scm/utils/mdadm/mdadm" 3 | 4 | download_mdadm() { 5 | download_git $MDADM_LINK "mdadm-$MDADM_VERSION" 6 | } 7 | 8 | prepare_mdadm() { 9 | echo "[+] prepare mdadm" 10 | github_name "mdadm-${MDADM_VERSION}" 11 | } 12 | 13 | compile_mdadm() { 14 | echo "[+] compiling mdadm" 15 | make 16 | } 17 | 18 | install_mdadm() { 19 | echo "[+] installing mdadm" 20 | mkdir -p "${ROOTDIR}/bin" 21 | cp mdadm "${ROOTDIR}/bin/mdadm" 22 | } 23 | 24 | build_mdadm() { 25 | apt-get install -y \ 26 | build-essential \ 27 | git \ 28 | libudev-dev 29 | 30 | pushd "${WORKDIR}" 31 | 32 | download_mdadm 33 | prepare_mdadm 34 | 35 | pushd "mdadm" 36 | compile_mdadm 37 | install_mdadm 38 | popd 39 | 40 | popd 41 | 42 | } 43 | -------------------------------------------------------------------------------- /bins/packages/misc/misc.sh: -------------------------------------------------------------------------------- 1 | MISC_VERSION="1" 2 | 3 | prepare_misc() { 4 | echo "[+] prepare 0-fs" 5 | github_name "misc-${MISC_VERSION}" 6 | } 7 | 8 | install_misc() { 9 | echo "[+] install misc" 10 | 11 | mkdir -p "${ROOTDIR}/" 12 | cp -av $1/packages/misc/root/* "${ROOTDIR}/" 13 | } 14 | 15 | build_misc() { 16 | base=$(pwd) 17 | pushd "${DISTDIR}" 18 | 19 | prepare_misc 20 | install_misc $base 21 | 22 | popd 23 | } 24 | -------------------------------------------------------------------------------- /bins/packages/misc/readme.md: -------------------------------------------------------------------------------- 1 | # The package 2 | 3 | this package include miscellaneous files that don't really fit a certain package 4 | 5 | ## Files 6 | 7 | ### `prop.script` 8 | 9 | this file is a **HARD LINK** to the file with the same name at `/bootstrap/usr/share/udhcp/` this file exists in two places 10 | because 11 | 12 | - It needs to be part of the 0-initramfs (zos kernel) because it's needed to exist before the node has internet connection 13 | and the bootstrap directory is what is included during the kernel build 14 | - But it also need to be in a package because that any node that is booted with an older build of the kernel must also get the same 15 | file 16 | 17 | I am not sure if the hard-link is maintained in a github repo (I assume not) so any changes to the prop.script must be maintained in 18 | the two copies 19 | 20 | ### `volstat.sh` 21 | 22 | this file is used by virtiofsd to fetch the real state of the shard directory (total size and available space) based on btrfs subvolume quota information 23 | -------------------------------------------------------------------------------- /bins/packages/misc/root/usr/share/btrfs/volstat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | vol=$1 5 | 6 | # The given volume can either be an overlay mount 7 | # or an actual btrfs subvol. 8 | # in case of an overlay we need to extract the upperdir path 9 | if options=$(findmnt $vol -t overlay -r -n -o OPTIONS); then 10 | # extract the upperdir path 11 | vol=$(echo $options | cut -d ',' -f 4 | cut -d '=' -f 2) 12 | vol=${vol%"/rw"} 13 | fi 14 | 15 | if ! info=$(btrfs subvol show $vol 2>/dev/null); then 16 | echo "invalid btrfs volume '$vol'" 17 | exit 1 18 | fi 19 | 20 | # now we hopefully sure we have a path to an actual btrfs subvol 21 | # we then try to extract the quota information 22 | id=$(echo "$info" | grep 'Subvolume ID:'| cut -f 4) 23 | output=$(btrfs qgroup show --raw -r $vol| grep "^0/$id") 24 | 25 | size=$(echo $output | cut -d ' ' -f 4) 26 | used=$(echo $output | cut -d ' ' -f 3) 27 | 28 | echo $size $(( $size - $used )) 29 | -------------------------------------------------------------------------------- /bins/packages/misc/root/usr/share/udhcp/probe.script: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # this prope script can work as a udhcpc script 4 | # that does not configure the interface but instead 5 | # prints out the result from the bound operation 6 | # in a json object. 7 | # this is usually used as 8 | # udhcpc -qf --now -s -i inf 9 | 10 | stage=$1 11 | 12 | # we only handle bound process 13 | if [ $stage != "bound" ]; then 14 | exit 0 15 | fi 16 | 17 | # the idea is that we print out 18 | # all config received in a json 19 | # object so others can read that 20 | # out 21 | cat < /dev/null && [ ! -z $CI ]; then 13 | export GOPATH=$(go env GOPATH) 14 | else 15 | curl -L https://go.dev/dl/go1.20.1.linux-amd64.tar.gz > /tmp/go1.20.1.linux-amd64.tar.gz 16 | tar -C /usr/local -xzf /tmp/go1.20.1.linux-amd64.tar.gz 17 | mkdir -p /gopath 18 | 19 | export PATH=/usr/local/go/bin:$PATH 20 | fi 21 | fi 22 | 23 | RUNC_HOME="/src/github.com/opencontainers" 24 | } 25 | 26 | download_runc() { 27 | download_file ${RUNC_LINK} ${RUNC_CHECKSUM} runc-${RUNC_VERSION}.tar.gz 28 | } 29 | 30 | extract_runc() { 31 | mkdir -p ${RUNC_HOME} 32 | rm -rf ${RUNC_HOME}/runc 33 | 34 | pushd ${RUNC_HOME} 35 | 36 | echo "[+] extracting: runc-${RUNC_VERSION}" 37 | tar -xf ${DISTDIR}/runc-${RUNC_VERSION}.tar.gz -C . 38 | mv runc-${RUNC_VERSION} runc 39 | 40 | popd 41 | } 42 | 43 | prepare_runc() { 44 | echo "[+] prepare runc" 45 | github_name "runc-${RUNC_VERSION}" 46 | } 47 | 48 | compile_runc() { 49 | echo "[+] compiling runc" 50 | make BUILDTAGS='seccomp' 51 | } 52 | 53 | install_runc() { 54 | echo "[+] install runc" 55 | mkdir -p "${ROOTDIR}/usr/bin" 56 | cp -av runc "${ROOTDIR}/usr/bin/" 57 | } 58 | 59 | build_runc() { 60 | pushd "${DISTDIR}" 61 | 62 | dependencies_runc 63 | download_runc 64 | extract_runc 65 | 66 | popd 67 | pushd ${RUNC_HOME}/runc 68 | 69 | prepare_runc 70 | compile_runc 71 | install_runc 72 | 73 | popd 74 | } 75 | -------------------------------------------------------------------------------- /bins/packages/shimlogs/shimlogs.sh: -------------------------------------------------------------------------------- 1 | SHIMLOGS_VERSION="0.3" 2 | SHIMLOGS_CHECKSUM="f2b3ceaca8abe09fe6b96b694569d0a3" 3 | SHIMLOGS_LINK="https://github.com/threefoldtech/shim-logs/archive/v${SHIMLOGS_VERSION}.tar.gz" 4 | 5 | dependencies_shimlogs() { 6 | apt-get install -y libjansson-dev libhiredis-dev build-essential 7 | } 8 | 9 | download_shimlogs() { 10 | download_file ${SHIMLOGS_LINK} ${SHIMLOGS_CHECKSUM} shim-logs-${SHIMLOGS_VERSION}.tar.gz 11 | } 12 | 13 | extract_shimlogs() { 14 | tar -xf ${DISTDIR}/shim-logs-${SHIMLOGS_VERSION}.tar.gz -C ${WORKDIR} 15 | } 16 | 17 | prepare_shimlogs() { 18 | echo "[+] prepare shim-logs" 19 | github_name "shim-logs-${SHIMLOGS_VERSION}" 20 | } 21 | 22 | compile_shimlogs() { 23 | echo "[+] compile shim-logs" 24 | make 25 | } 26 | 27 | install_shimlogs() { 28 | echo "[+] install shim-logs" 29 | 30 | mkdir -p "${ROOTDIR}/bin" 31 | 32 | cp shim-logs ${ROOTDIR}/bin/shim-logs 33 | chmod +x ${ROOTDIR}/bin/shim-logs 34 | } 35 | 36 | build_shimlogs() { 37 | pushd "${DISTDIR}" 38 | 39 | dependencies_shimlogs 40 | download_shimlogs 41 | extract_shimlogs 42 | 43 | popd 44 | pushd ${WORKDIR}/shim-logs-${SHIMLOGS_VERSION} 45 | 46 | prepare_shimlogs 47 | compile_shimlogs 48 | install_shimlogs 49 | 50 | popd 51 | } 52 | 53 | -------------------------------------------------------------------------------- /bins/packages/tailstream/tailstream.sh: -------------------------------------------------------------------------------- 1 | TAILSTREAM_VERSION="0.1.6" 2 | TAILSTREAM_CHECKSUM="adac25b1a6bbdd6a207156167bd323aa" 3 | TAILSTREAM_LINK="https://github.com/threefoldtech/tailstream/releases/download/v${TAILSTREAM_VERSION}/tailstream" 4 | 5 | download_tailstream() { 6 | download_file ${TAILSTREAM_LINK} ${TAILSTREAM_CHECKSUM} tailstream-${TAILSTREAM_VERSION} 7 | } 8 | 9 | prepare_tailstream() { 10 | echo "[+] prepare tailstream" 11 | github_name "tailstream-${TAILSTREAM_VERSION}" 12 | } 13 | 14 | install_tailstream() { 15 | echo "[+] install tailstream" 16 | 17 | mkdir -p "${ROOTDIR}/bin" 18 | cp -av tailstream-${TAILSTREAM_VERSION} "${ROOTDIR}/bin/tailstream" 19 | chmod +x "${ROOTDIR}/bin/tailstream" 20 | } 21 | 22 | build_tailstream() { 23 | pushd "${DISTDIR}" 24 | 25 | download_tailstream 26 | prepare_tailstream 27 | install_tailstream 28 | 29 | popd 30 | } 31 | -------------------------------------------------------------------------------- /bins/packages/tpm/tpm.sh: -------------------------------------------------------------------------------- 1 | TPM_VERSION="5.2" 2 | TPM_CHECKSUM="0057615ef43b9322d4577fc3bde0e8d6" 3 | TPM_LINK="https://github.com/tpm2-software/tpm2-tools/releases/download/${TPM_VERSION}/tpm2-tools-${TPM_VERSION}.tar.gz" 4 | 5 | TSS_VERSION="3.2.0" 6 | TSS_CHECKSUM="0d60d0df3fd0daae66881a3022281323" 7 | TSS_LINK="https://github.com/tpm2-software/tpm2-tss/releases/download/${TSS_VERSION}/tpm2-tss-${TSS_VERSION}.tar.gz" 8 | 9 | download_tss() { 10 | download_file $TSS_LINK $TSS_CHECKSUM 11 | } 12 | 13 | extract_tss() { 14 | if [ ! -d "tpm2-tss-${TSS_VERSION}" ]; then 15 | echo "[+] extracting: tpm2-tss-${TSS_VERSION}" 16 | tar -xf ${DISTDIR}/tpm2-tss-${TSS_VERSION}.tar.gz -C ${WORKDIR} 17 | fi 18 | } 19 | 20 | prepare_tss() { 21 | echo "[+] preparing tpm-tss" 22 | apt -y update 23 | apt -y install \ 24 | autoconf-archive \ 25 | libcmocka0 \ 26 | libcmocka-dev \ 27 | procps \ 28 | iproute2 \ 29 | build-essential \ 30 | git \ 31 | pkg-config \ 32 | gcc \ 33 | libtool \ 34 | automake \ 35 | libssl-dev \ 36 | uthash-dev \ 37 | autoconf \ 38 | doxygen \ 39 | libjson-c-dev \ 40 | libini-config-dev \ 41 | libcurl4-openssl-dev \ 42 | libltdl-dev 43 | 44 | ./configure --prefix=/usr 45 | } 46 | 47 | compile_tss() { 48 | echo "[+] compiling tpm-tss" 49 | make ${MAKEOPTS} 50 | } 51 | 52 | install_tss() { 53 | echo "[+] installing tpm-tss" 54 | make DESTDIR="${ROOTDIR}" install 55 | } 56 | 57 | download_tpm() { 58 | download_file $TPM_LINK $TPM_CHECKSUM 59 | } 60 | 61 | extract_tpm() { 62 | if [ ! -d "tpm2-tools-${TPM_VERSION}" ]; then 63 | echo "[+] extracting: tpm2-tools-${TPM_VERSION}" 64 | tar -xf ${DISTDIR}/tpm2-tools-${TPM_VERSION}.tar.gz -C ${WORKDIR} 65 | fi 66 | } 67 | 68 | prepare_tpm() { 69 | echo "[+] preparing tpm" 70 | github_name "tpm-${TPM_VERSION}" 71 | ./configure --prefix=/usr 72 | } 73 | 74 | compile_tpm() { 75 | echo "[+] compiling tpm" 76 | make ${MAKEOPTS} 77 | } 78 | 79 | install_tpm() { 80 | echo "[+] installing tpm" 81 | make DESTDIR="${ROOTDIR}" install 82 | } 83 | 84 | build_tpm() { 85 | pushd "${DISTDIR}" 86 | download_tss 87 | extract_tss 88 | popd 89 | 90 | pushd "${WORKDIR}/tpm2-tss-${TSS_VERSION}" 91 | 92 | prepare_tss 93 | compile_tss 94 | install_tss 95 | 96 | popd 97 | 98 | pushd "${DISTDIR}" 99 | download_tpm 100 | extract_tpm 101 | popd 102 | 103 | pushd "${WORKDIR}/tpm2-tools-${TPM_VERSION}" 104 | 105 | export PKG_CONFIG_PATH="${ROOTDIR}/usr/lib/pkgconfig/" 106 | export CFLAGS="-I${ROOTDIR}/usr/include" 107 | export LDFLAGS="-L${ROOTDIR}/usr/lib" 108 | 109 | prepare_tpm 110 | compile_tpm 111 | install_tpm 112 | 113 | unset PKG_CONFIG_PATH 114 | unset CFLAGS 115 | unset LDFLAGS 116 | 117 | popd 118 | 119 | clean_up 120 | } 121 | 122 | clean_up(){ 123 | pwd 124 | pushd releases/tpm 125 | rm -rf lib/*.a 126 | rm -rf lib/*.la 127 | rm -rf etc/init.d 128 | rm -rf usr/lib/*.a 129 | rm -rf usr/lib/*.la 130 | rm -rf usr/share/doc 131 | rm -rf usr/share/gtk-doc 132 | rm -rf usr/share/man 133 | rm -rf usr/share/locale 134 | rm -rf usr/share/info 135 | rm -rf usr/share/bash-completion 136 | rm -rf usr/lib/pkgconfig 137 | rm -rf usr/include 138 | popd 139 | } -------------------------------------------------------------------------------- /bins/packages/traefik/traefik.sh: -------------------------------------------------------------------------------- 1 | TRAEFIK_VERSION="2.9.9" 2 | TRAEFIK_CHECKSUM="83d21fc65ac5f68df985c4dca523e590" 3 | TRAEFIK_LINK="https://github.com/traefik/traefik/releases/download/v${TRAEFIK_VERSION}/traefik_v${TRAEFIK_VERSION}_linux_amd64.tar.gz" 4 | TRAEFIK_PACKAGE="vector.tar.gz" 5 | 6 | download_traefik() { 7 | echo "download traefik" 8 | download_file ${TRAEFIK_LINK} ${TRAEFIK_CHECKSUM} ${TRAEFIK_PACKAGE} 9 | } 10 | 11 | extract_traefik() { 12 | tar -xf ${DISTDIR}/${TRAEFIK_PACKAGE} -C ${WORKDIR} 13 | } 14 | 15 | prepare_traefik() { 16 | echo "[+] prepare traefik" 17 | github_name "traefik-${TRAEFIK_VERSION}" 18 | } 19 | 20 | install_traefik() { 21 | echo "[+] install traefik" 22 | 23 | mkdir -p "${ROOTDIR}" 24 | 25 | cp ${WORKDIR}/traefik ${ROOTDIR}/ 26 | chmod +x ${ROOTDIR}/* 27 | strip ${ROOTDIR}/* 28 | } 29 | 30 | build_traefik() { 31 | pushd "${DISTDIR}" 32 | download_traefik 33 | extract_traefik 34 | popd 35 | 36 | prepare_traefik 37 | install_traefik 38 | } 39 | -------------------------------------------------------------------------------- /bins/packages/vector/files/vector.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sources: 3 | zinit: 4 | type: exec 5 | mode: streaming 6 | command: 7 | - zinit 8 | - log 9 | host: 10 | type: host_metrics 11 | network: 12 | devices: 13 | excludes: 14 | - b-* 15 | - br-* 16 | - dumdum 17 | - mydumdum 18 | - dummy* 19 | - tozos* 20 | filesystem: 21 | mountpoints: 22 | excludes: 23 | - /var/cache 24 | - /var/cache/* 25 | - /var/run/* 26 | - /dev 27 | - /dev/* 28 | - /sys 29 | - /sys/* 30 | - /proc 31 | - /proc/* 32 | transforms: 33 | metrics: 34 | type: remap 35 | inputs: 36 | - host 37 | source: |- 38 | tags = { 39 | "node": get_env_var("NODE") ?? "unknown", 40 | "network": get_env_var("NETWORK") ?? "unknown", 41 | "farm": get_env_var("FARM") ?? "unknown", 42 | } 43 | 44 | .tags = merge!(.tags, tags) 45 | del(.tags.host) 46 | logs: 47 | type: remap 48 | inputs: 49 | - zinit 50 | source: |- 51 | structured, err = parse_regex(.message, r'\[(?P\+|\-)\] (?P[^:]+):') 52 | if err == null { 53 | . = merge(., structured) 54 | } 55 | 56 | level, err = parse_regex(.message, r'(?Pdebug|info|error|warn|fatal|panic)') 57 | if err != null { 58 | .level = "info" 59 | } else { 60 | . = merge(., level) 61 | } 62 | 63 | if .output == "+" { 64 | .stream = "stdout" 65 | } else { 66 | .stream = "stderr" 67 | } 68 | 69 | .node = get_env_var("NODE") ?? "unknown" 70 | .network = get_env_var("NETWORK") ?? "unknown" 71 | .farm = get_env_var("FARM") ?? "unknown" 72 | 73 | del(.output) 74 | del(.command) 75 | del(.host) 76 | del(.source_type) 77 | del(.pid) 78 | 79 | sinks: 80 | # out: 81 | # inputs: 82 | # - metrics 83 | # type: console 84 | # encoding: 85 | # codec: "json" 86 | # TODO: enable once we have vector installed 87 | vector: 88 | type: vector 89 | inputs: 90 | - metrics 91 | address: agg.grid.tf:24901 92 | compression: true 93 | 94 | loki: 95 | inputs: 96 | - logs 97 | type: loki 98 | labels: 99 | node: "{{ node }}" 100 | level: "{{ level }}" 101 | module: "{{ module }}" 102 | network: "{{ network }}" 103 | stream: "{{ stream }}" 104 | farm: "{{ farm }}" 105 | endpoint: http://loki.grid.tf:3100 106 | encoding: 107 | codec: text 108 | compression: snappy 109 | healthcheck: 110 | enabled: false 111 | -------------------------------------------------------------------------------- /bins/packages/vector/files/zinit-vector.yaml: -------------------------------------------------------------------------------- 1 | # pkill -HUP is for older version of zinit that doesn't start process 2 | # in its own process group 3 | exec: | 4 | sh -c ' 5 | set -e 6 | 7 | pkill vector || true 8 | 9 | export NODE=$(identityd -address) 10 | export FARM=$(identityd -farm) 11 | export NETWORK=$(identityd -net) 12 | 13 | if [ "${NODE}" = "" ]; then 14 | echo "cannot get node id" 15 | exit 1 16 | fi 17 | 18 | if [ "${FARM}" = "" ]; then 19 | echo "cannot get farm id" 20 | exit 1 21 | fi 22 | 23 | if [ "${NETWORK}" = "" ]; then 24 | echo "cannot get farm id" 25 | exit 1 26 | fi 27 | 28 | exec vector -c /etc/vector/vector.yaml 29 | ' 30 | log: stdout 31 | after: 32 | - identityd 33 | -------------------------------------------------------------------------------- /bins/packages/vector/vector.sh: -------------------------------------------------------------------------------- 1 | VECTOR_VERSION="0.25.1" 2 | VECTOR_CHECKSUM="07bcae774d8f6dc5f34a5f4f7bafd313" 3 | VECTOR_LINK="https://github.com/vectordotdev/vector/releases/download/v${VECTOR_VERSION}/vector-${VECTOR_VERSION}-x86_64-unknown-linux-musl.tar.gz" 4 | VECTOR_PACKAGE="vector.tar.gz" 5 | 6 | download_vector() { 7 | download_file ${VECTOR_LINK} ${VECTOR_CHECKSUM} ${VECTOR_PACKAGE} 8 | } 9 | 10 | extract_vector() { 11 | tar -xf ${DISTDIR}/${VECTOR_PACKAGE} -C ${WORKDIR} 12 | } 13 | 14 | prepare_vector() { 15 | echo "[+] prepare vector" 16 | github_name "vector-${VECTOR_VERSION}" 17 | } 18 | 19 | compile_vector() { 20 | echo "[+] compile vector" 21 | } 22 | 23 | install_vector() { 24 | echo "[+] install vector" 25 | 26 | mkdir -p "${ROOTDIR}/usr/bin" 27 | mkdir -p "${ROOTDIR}/etc/zinit" 28 | mkdir -p "${ROOTDIR}/etc/vector" 29 | 30 | cp ${WORKDIR}/vector-x86_64-unknown-linux-musl/bin/vector ${ROOTDIR}/usr/bin/vector 31 | 32 | cp ${FILESDIR}/zinit-vector.yaml ${ROOTDIR}/etc/zinit/vector.yaml 33 | cp ${FILESDIR}/vector.yaml ${ROOTDIR}/etc/vector/ 34 | 35 | chmod +x ${ROOTDIR}/usr/bin/* 36 | } 37 | 38 | build_vector() { 39 | pushd "${DISTDIR}" 40 | 41 | download_vector 42 | extract_vector 43 | 44 | popd 45 | pushd ${WORKDIR} 46 | 47 | prepare_vector 48 | compile_vector 49 | install_vector 50 | 51 | popd 52 | } 53 | -------------------------------------------------------------------------------- /bins/packages/virtiofsd/virtiofsd.sh: -------------------------------------------------------------------------------- 1 | VIRTIOFSD_VERSION="v1.10.2" 2 | VIRTIOFSD_CHECKSUM="df1ed186ee84843605137758e2aa6e80" 3 | VIRTIOFSD_LINK="https://gitlab.com/muhamad.azmy/virtiofsd/-/jobs/6547244336/artifacts/download?file_type=archive" 4 | 5 | download_virtiofsd() { 6 | download_file ${VIRTIOFSD_LINK} ${VIRTIOFSD_CHECKSUM} virtiofsd-${VIRTIOFSD_VERSION}.zip 7 | } 8 | 9 | prepare_virtiofsd() { 10 | echo "[+] prepare virtiofsd" 11 | github_name "virtiofsd-${VIRTIOFSD_VERSION}" 12 | } 13 | 14 | install_virtiofsd() { 15 | echo "[+] install virtiofsd" 16 | 17 | mkdir -p "${ROOTDIR}/bin" 18 | unzip -p virtiofsd-${VIRTIOFSD_VERSION} > "${ROOTDIR}/bin/virtiofsd-rs" 19 | chmod +x "${ROOTDIR}/bin/virtiofsd-rs" 20 | } 21 | 22 | build_virtiofsd() { 23 | pushd "${DISTDIR}" 24 | 25 | download_virtiofsd 26 | prepare_virtiofsd 27 | install_virtiofsd 28 | 29 | popd 30 | } 31 | -------------------------------------------------------------------------------- /bins/packages/virtwhat/virtwhat.sh: -------------------------------------------------------------------------------- 1 | # VIRTWHAT_Git 2 | VIRTWHAT_VERSION="d163be0" 3 | VIRTWHAT_CHECKSUM="d84c9a51e2869fbe949a83adf7f80b56" 4 | VIRTWHAT_LINK="http://git.annexia.org/?p=virt-what.git;a=snapshot;h=${VIRTWHAT_VERSION};sf=tgz" 5 | 6 | download_virtwhat() { 7 | download_file ${VIRTWHAT_LINK} ${VIRTWHAT_CHECKSUM} "virt-what-${VIRTWHAT_VERSION}.tar.gz" 8 | } 9 | 10 | extract_virtwhat() { 11 | tar -xf ${DISTDIR}/virt-what-${VIRTWHAT_VERSION}.tar.gz -C ${WORKDIR} 12 | } 13 | 14 | prepare_virtwhat() { 15 | echo "[+] prepare virt-what" 16 | github_name "virtwhat-${VIRTWHAT_VERSION}" 17 | } 18 | 19 | compile_virtwhat() { 20 | echo "[+] compile virt-what" 21 | pushd ${WORKDIR}/virt-what-${VIRTWHAT_VERSION} 22 | autoreconf -i 23 | autoconf 24 | ./configure 25 | make 26 | popd 27 | } 28 | 29 | install_virtwhat() { 30 | echo "[+] install virt-what" 31 | 32 | mkdir -p "${ROOTDIR}/usr/bin" 33 | 34 | 35 | cp ${WORKDIR}/virt-what-${VIRTWHAT_VERSION}/virt-what ${ROOTDIR}/usr/bin/virt-what 36 | cp ${WORKDIR}/virt-what-${VIRTWHAT_VERSION}/virt-what-cpuid-helper ${ROOTDIR}/usr/bin/virt-what-cpuid-helper 37 | 38 | chmod +x ${ROOTDIR}/usr/bin/virt-what 39 | chmod +x ${ROOTDIR}/usr/bin/virt-what-cpuid-helper 40 | } 41 | 42 | build_virtwhat() { 43 | pushd "${DISTDIR}" 44 | 45 | download_virtwhat 46 | extract_virtwhat 47 | 48 | popd 49 | pushd ${WORKDIR} 50 | 51 | prepare_virtwhat 52 | compile_virtwhat 53 | install_virtwhat 54 | 55 | popd 56 | } 57 | 58 | -------------------------------------------------------------------------------- /bins/packages/yggdrasil/yggdrasil.sh: -------------------------------------------------------------------------------- 1 | YGGDRASIL_VERSION="0.5.4" 2 | YGGDRASIL_CHECKSUM="77ecd0750b884eba9fdb7ee936290629" 3 | YGGDRASIL_LINK="https://github.com/yggdrasil-network/yggdrasil-go/releases/download/v${YGGDRASIL_VERSION}/yggdrasil-${YGGDRASIL_VERSION}-amd64.deb" 4 | 5 | download_yggdrasil() { 6 | download_file ${YGGDRASIL_LINK} ${YGGDRASIL_CHECKSUM} yggdrasil-v${YGGDRASIL_VERSION}.deb 7 | } 8 | 9 | extract_yggdrasil() { 10 | apt-get install ./yggdrasil-v${YGGDRASIL_VERSION}.deb 11 | } 12 | 13 | prepare_yggdrasil() { 14 | echo "[+] prepare yggdrasil" 15 | github_name "yggdrasil-${YGGDRASIL_VERSION}" 16 | } 17 | 18 | install_yggdrasil() { 19 | echo "[+] install yggdrasil" 20 | 21 | mkdir -p "${ROOTDIR}/usr/bin" 22 | mkdir -p "${ROOTDIR}/etc/yggdrasil" 23 | mkdir -p "${ROOTDIR}/etc/zinit" 24 | 25 | cp -av $(which yggdrasil) "${ROOTDIR}/usr/bin/" 26 | cp -av $(which yggdrasilctl) "${ROOTDIR}/usr/bin/" 27 | } 28 | 29 | build_yggdrasil() { 30 | pushd "${DISTDIR}" 31 | download_yggdrasil 32 | extract_yggdrasil 33 | popd 34 | 35 | prepare_yggdrasil 36 | install_yggdrasil 37 | } 38 | -------------------------------------------------------------------------------- /bins/releases/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/threefoldtech/zos/0199ee83ff9918918f18fd25fd7ba1cdc1c7abce/bins/releases/.keep -------------------------------------------------------------------------------- /bins/workdir/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/threefoldtech/zos/0199ee83ff9918918f18fd25fd7ba1cdc1c7abce/bins/workdir/.keep -------------------------------------------------------------------------------- /bootstrap/Makefile: -------------------------------------------------------------------------------- 1 | build: 2 | cd ../cmds && make internet 3 | cd bootstrap && make release 4 | 5 | install: build 6 | mkdir -p ${ROOT}/etc/zinit/ 7 | mkdir -p ${ROOT}/bin 8 | mkdir -p ${ROOT}/sbin 9 | 10 | # install interent 11 | cp ../bin/internet ${ROOT}/bin 12 | 13 | # install bootstrap 14 | cp -a etc ${ROOT} 15 | cp -a usr ${ROOT} 16 | cp bootstrap/target/x86_64-unknown-linux-musl/release/bootstrap ${ROOT}/sbin/ 17 | -------------------------------------------------------------------------------- /bootstrap/bootstrap/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk -------------------------------------------------------------------------------- /bootstrap/bootstrap/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bootstrap" 3 | version = "0.1.0" 4 | authors = ["Muhamad Azmy "] 5 | edition = "2018" 6 | build = false 7 | 8 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 9 | 10 | [dependencies] 11 | retry = "0.5" 12 | shlex = "0.1" 13 | anyhow = "1.0" 14 | reqwest = { version = "0.11", features = ["blocking", "json"]} 15 | serde = { version = "1.0", features = ["derive"] } 16 | serde_json = "1.0" 17 | log = "0.4" 18 | simple_logger = "1.3" 19 | nix = "0.15" 20 | openssl-sys = "*" 21 | walkdir = "2" 22 | clap = "2.33" 23 | exec = "0.3" 24 | 25 | [features] 26 | # Force openssl-sys to staticly link in the openssl library. Necessary when 27 | # cross compiling to x86_64-unknown-linux-musl. 28 | vendored = ["openssl-sys/vendored"] 29 | -------------------------------------------------------------------------------- /bootstrap/bootstrap/Makefile: -------------------------------------------------------------------------------- 1 | 2 | release: prepare 3 | cargo build --release --target=x86_64-unknown-linux-musl --features vendored 4 | 5 | debug: prepare 6 | cargo build --target=x86_64-unknown-linux-musl --features vendored 7 | 8 | test: prepare 9 | cargo test --target=x86_64-unknown-linux-musl --features vendored 10 | 11 | prepare: 12 | rustup target add x86_64-unknown-linux-musl 13 | -------------------------------------------------------------------------------- /bootstrap/bootstrap/README.md: -------------------------------------------------------------------------------- 1 | # Bootstrap 2 | 3 | Bootstrap is a multi stage strategy to bring the node to a final working 4 | state with latest released version of everything! 5 | 6 | ## Stages 7 | 8 | `bootstrap` to make sure it runs everything with the correct version it 9 | will do a multiple stage bootstrap. Currently this is only two stages: 10 | 11 | - Update self (bootstrap binary itself) 12 | - Update software 13 | - Core utils and daemons 14 | - ZOS daemons 15 | - Start daemons 16 | 17 | ## How to works 18 | 19 | - Bootstrap is used by [0-initramfs](https://github.com/threefoldtech/0-initramfs/blob/development-zos-v3/packages/modules.sh) to basically add `internet` and `bootstrap` services to the base image 20 | - After internet service is fully started, bootstrap will start to download flists needed for zos node to work properly 21 | - As described above bootstrap run in two stages: 22 | - The first stage is used to update bootstrap itself, and it is done like that to avoid re-building the image if we only changed the bootstrap code. this update is basically done from `tf-autobuilder` repo in the [hub/tf-autobuilder](https://hub.grid.tf/tf-autobuilder) and download the latest bootstrap flist 23 | - For the second stage bootstrap will download the flists for that env. bootstrap cares about `runmode` argument that we pass during the start of the node. for example if we passed `runmode=dev` it will get the the tag `development` under [hub/tf-zos](https://hub.grid.tf/tf-zos) each tag is linked to a sub-directory where all flists for this env exists to be downloaded and installed on the node 24 | 25 | ## Testing in Developer setup 26 | 27 | To test bootstrap changes on a local dev-setup you need to do the following 28 | 29 | - under zos/qemu `cp -r overlay.normal overlay.custom` 30 | - build `bootstrap` bin 31 | - copy the `bootstrap` bin to overlay.custom/sbin/ 32 | - remove dir `overlay.custom/bin` 33 | - remove all files under `overlay.custom/etc/zinit/` 34 | - add the file overlay.custom/etc/zinit/bootstrap.yaml with the following content 35 | 36 | ``` 37 | exec: bootstrap -d 38 | oneshot: true 39 | after: 40 | - internet 41 | ``` 42 | 43 | - remove overlay link under `qemu/overlay ` 44 | - create a new link pointing to overlay.custom under zos/qemu `ln -s overlay.custom overlay` 45 | - boot your vm as normal 46 | -------------------------------------------------------------------------------- /bootstrap/bootstrap/src/kparams.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Error; 2 | use shlex::split; 3 | use std::collections::HashMap; 4 | use std::fs; 5 | 6 | const PARAM_FILE: &str = "/proc/cmdline"; 7 | type Params = HashMap>; 8 | 9 | pub fn params() -> Result { 10 | let bytes = fs::read(PARAM_FILE)?; 11 | parse(&bytes) 12 | } 13 | 14 | fn parse(bytes: &[u8]) -> Result { 15 | let args = match split(std::str::from_utf8(bytes)?) { 16 | Some(args) => args, 17 | None => bail!("failed to parse kernel params"), 18 | }; 19 | 20 | let mut map = Params::new(); 21 | 22 | for arg in args { 23 | let parts: Vec<&str> = arg.splitn(2, '=').collect(); 24 | let key = String::from(parts[0]); 25 | let value = match parts.len() { 26 | 1 => None, 27 | _ => Some(String::from(parts[1])), 28 | }; 29 | 30 | map.insert(key, value); 31 | } 32 | 33 | Ok(map) 34 | } 35 | 36 | #[cfg(test)] 37 | mod tests { 38 | // Note this useful idiom: importing names from outer (for mod tests) scope. 39 | use super::*; 40 | 41 | #[test] 42 | fn test_parse() -> Result<(), Error> { 43 | let input: &str = "initrd=initramfs-linux.img version=v3 root=UUID=10f9e7bb-ba63-4fbd-a95e-c78b5496cfbe rootflags=subvol=root rw b43.allhwsupport=1"; 44 | let result = parse(input.as_bytes())?; 45 | assert_eq!(result.len(), 6); 46 | assert_eq!(result["rw"], None); 47 | assert_eq!(result["version"], Some(String::from("v3"))); 48 | assert_eq!(result["rootflags"], Some(String::from("subvol=root"))); 49 | Ok(()) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /bootstrap/bootstrap/src/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate anyhow; 3 | #[macro_use] 4 | extern crate log; 5 | 6 | mod bootstrap; 7 | mod config; 8 | mod hub; 9 | mod kparams; 10 | mod workdir; 11 | mod zfs; 12 | mod zinit; 13 | 14 | use anyhow::Result; 15 | use config::Config; 16 | 17 | fn app() -> Result<()> { 18 | let config = Config::current()?; 19 | 20 | let level = if config.debug { 21 | log::LevelFilter::Debug 22 | } else { 23 | log::LevelFilter::Info 24 | }; 25 | 26 | simple_logger::SimpleLogger::new() 27 | .with_utc_timestamps() 28 | .with_level(level) 29 | .init() 30 | .unwrap(); 31 | 32 | // configure available stage 33 | let stages: Vec Result<()>> = vec![ 34 | // self update 35 | |cfg| -> Result<()> { 36 | if cfg.debug { 37 | // if debug is set, do not upgrade self. 38 | return Ok(()); 39 | } 40 | bootstrap::update(cfg) 41 | }, 42 | // install all system binaries 43 | |cfg| -> Result<()> { bootstrap::install(cfg) }, 44 | ]; 45 | 46 | let index = config.stage as usize - 1; 47 | 48 | if index >= stages.len() { 49 | bail!( 50 | "unknown stage '{}' only {} stage(s) are supported", 51 | config.stage, 52 | stages.len() 53 | ); 54 | } 55 | 56 | info!("running stage {}/{}", config.stage, stages.len()); 57 | stages[index](&config)?; 58 | 59 | // Why we run stages in different "processes" (hence using exec) 60 | // the point is that will allow the stages to do changes to the 61 | // bootstrap binary. It means an old image with an old version of 62 | // bootstrap will still be able to run latest code. Because always 63 | // the first stage is to update self. 64 | let next = config.stage as usize + 1; 65 | if next <= stages.len() { 66 | debug!("spawning stage: {}", next); 67 | let bin: Vec = std::env::args().take(1).collect(); 68 | let mut cmd = exec::Command::new(&bin[0]); 69 | let cmd = cmd.arg("-s").arg(format!("{}", next)); 70 | let cmd = if config.debug { cmd.arg("-d") } else { cmd }; 71 | 72 | //this call will never return unless something is wrong. 73 | bail!("{}", cmd.exec()); 74 | } 75 | 76 | Ok(()) 77 | } 78 | 79 | fn main() { 80 | let code = match app() { 81 | Ok(_) => 0, 82 | Err(err) => { 83 | eprintln!("{}", err); 84 | 1 85 | } 86 | }; 87 | 88 | std::process::exit(code); 89 | } 90 | -------------------------------------------------------------------------------- /bootstrap/bootstrap/src/workdir.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use std::env; 3 | use std::path::PathBuf; 4 | 5 | pub struct WorkDir { 6 | path: PathBuf, 7 | old: PathBuf, 8 | } 9 | 10 | impl WorkDir { 11 | fn new(path: T) -> Result 12 | where 13 | T: Into, 14 | { 15 | let path = path.into(); 16 | let wd = WorkDir { 17 | path: path, 18 | old: env::current_dir()?, 19 | }; 20 | debug!("creating: {:?}", wd.path); 21 | match std::fs::create_dir(&wd.path) { 22 | Err(e) => { 23 | if e.kind() != std::io::ErrorKind::AlreadyExists { 24 | bail!("{}", e); 25 | } 26 | } 27 | _ => {} 28 | } 29 | debug!("mounting tmpfs"); 30 | nix::mount::mount( 31 | Some("none"), // This should be set to None but for some reason the compiles complaines 32 | &wd.path, 33 | Some("tmpfs"), 34 | nix::mount::MsFlags::empty(), 35 | Some("size=512M"), 36 | )?; 37 | 38 | env::set_current_dir(&wd.path)?; 39 | Ok(wd) 40 | } 41 | 42 | pub fn run(path: T, f: F) -> Result 43 | where 44 | T: Into, 45 | F: FnOnce() -> O, 46 | { 47 | let _wd = WorkDir::new(path)?; 48 | 49 | Ok(f()) 50 | } 51 | } 52 | 53 | impl Drop for WorkDir { 54 | fn drop(&mut self) { 55 | match env::set_current_dir(&self.old) { 56 | Err(e) => { 57 | error!("failed change directory to: {}", e); 58 | } 59 | _ => {} 60 | } 61 | match nix::mount::umount2(&self.path, nix::mount::MntFlags::MNT_FORCE) { 62 | Err(e) => { 63 | error!("failed to unmount workdir: {}", e); 64 | } 65 | _ => {} 66 | } 67 | match std::fs::remove_dir_all(&self.path) { 68 | Err(e) => { 69 | error!("failed to delete workdir: {}", e); 70 | } 71 | _ => {} 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /bootstrap/bootstrap/src/zinit.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use std::process::Command; 3 | 4 | /// monitor service via name 5 | pub fn monitor(name: T) -> Result<()> 6 | where 7 | T: AsRef, 8 | { 9 | let output = Command::new("zinit") 10 | .arg("monitor") 11 | .arg(name.as_ref()) 12 | .output()?; 13 | if output.status.success() { 14 | return Ok(()); 15 | } 16 | bail!( 17 | "failed to monitor service '{}': {:?}", 18 | name.as_ref(), 19 | output 20 | ); 21 | } 22 | -------------------------------------------------------------------------------- /bootstrap/etc/zinit/bootstrap.yaml: -------------------------------------------------------------------------------- 1 | exec: /sbin/bootstrap 2 | oneshot: true 3 | log: stdout # to see the logs on terminal 4 | after: 5 | - internet -------------------------------------------------------------------------------- /bootstrap/etc/zinit/internet.yaml: -------------------------------------------------------------------------------- 1 | exec: internet 2 | oneshot: true 3 | log: stdout 4 | after: 5 | - udev-trigger 6 | -------------------------------------------------------------------------------- /bootstrap/usr/share/udhcp/probe.script: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # this prope script can work as a udhcpc script 4 | # that does not configure the interface but instead 5 | # prints out the result from the bound operation 6 | # in a json object. 7 | # this is usually used as 8 | # udhcpc -qf --now -s -i inf 9 | 10 | stage=$1 11 | 12 | # we only handle bound process 13 | if [ $stage != "bound" ]; then 14 | exit 0 15 | fi 16 | 17 | # the idea is that we print out 18 | # all config received in a json 19 | # object so others can read that 20 | # out 21 | cat < /dev/null | tail -n1`" && echo "*") 5 | version = github.com/threefoldtech/zos/pkg/version 6 | ldflags = '-w -s -X $(version).Branch=$(branch) -X $(version).Revision=$(revision) -X $(version).Dirty=$(dirty) -extldflags "-static"' 7 | 8 | all: identityd internet zos 9 | strip $(OUT)/* 10 | 11 | .PHONY: output clean identityd internet zos 12 | 13 | output: 14 | mkdir -p $(OUT) 15 | 16 | identityd: 17 | cd identityd && CGO_ENABLED=0 GOOS=linux go build -ldflags $(ldflags) -o $(OUT)/identityd 18 | 19 | internet: output 20 | cd internet && CGO_ENABLED=0 GOOS=linux go build -ldflags $(ldflags) -o $(OUT)/internet 21 | 22 | zos: output 23 | cd zos && CGO_ENABLED=0 GOOS=linux go build -ldflags $(ldflags) -o $(OUT)/zos 24 | -------------------------------------------------------------------------------- /cmds/identityd/monitor.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/blang/semver" 8 | "github.com/threefoldtech/zosbase/pkg" 9 | ) 10 | 11 | type monitorStream struct { 12 | duration time.Duration 13 | version semver.Version 14 | } 15 | 16 | var _ pkg.VersionMonitor = (*monitorStream)(nil) 17 | 18 | // newVersionMonitor creates a new instance of version monitor 19 | func newVersionMonitor(d time.Duration, version semver.Version) *monitorStream { 20 | return &monitorStream{ 21 | duration: d, 22 | version: version, 23 | } 24 | } 25 | 26 | func (m *monitorStream) GetVersion() semver.Version { 27 | return m.version 28 | } 29 | 30 | func (m *monitorStream) Version(ctx context.Context) <-chan semver.Version { 31 | ch := make(chan semver.Version) 32 | go func() { 33 | defer close(ch) 34 | ch <- m.version 35 | 36 | for { 37 | ch <- m.version 38 | time.Sleep(m.duration) 39 | } 40 | }() 41 | 42 | return ch 43 | } 44 | -------------------------------------------------------------------------------- /cmds/identityd/ssh.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "net/http" 7 | "os" 8 | "path/filepath" 9 | "slices" 10 | "time" 11 | 12 | "github.com/cenkalti/backoff" 13 | "github.com/hashicorp/go-retryablehttp" 14 | "github.com/rs/zerolog/log" 15 | "github.com/threefoldtech/zosbase/pkg" 16 | "github.com/threefoldtech/zosbase/pkg/environment" 17 | "github.com/threefoldtech/zosbase/pkg/kernel" 18 | ) 19 | 20 | var ( 21 | mainNetFarms = []pkg.FarmID{ 22 | 1, 79, 77, 76, 23 | } 24 | ) 25 | 26 | func manageSSHKeys() error { 27 | extraUser, addUser := kernel.GetParams().GetOne("ssh-user") 28 | 29 | authorizedKeysPath := filepath.Join("/", "root", ".ssh", "authorized_keys") 30 | err := os.Remove(authorizedKeysPath) 31 | if err != nil && !os.IsNotExist(err) { 32 | return fmt.Errorf("failed to deleted authorized_keys file: %+w", err) 33 | } 34 | 35 | env := environment.MustGet() 36 | config, err := environment.GetConfig() 37 | if err != nil { 38 | return err 39 | } 40 | 41 | authorizedUsers := config.Users.Authorized 42 | 43 | if env.RunningMode == environment.RunningMain { 44 | // we don't support adding the user passed as ssh-user on mainnet 45 | addUser = false 46 | } 47 | 48 | // if we are in mainnet but one of the managed farms we will use the user list from testnet 49 | // instead 50 | if env.RunningMode == environment.RunningMain && slices.Contains(mainNetFarms, env.FarmID) { 51 | // that's only if main config has no configured users 52 | if len(authorizedUsers) == 0 { 53 | config, err = environment.GetConfigForMode(environment.RunningTest) 54 | if err != nil { 55 | return err 56 | } 57 | 58 | authorizedUsers = config.Users.Authorized 59 | } 60 | } 61 | 62 | // check if we will add the extra user 63 | if addUser { 64 | authorizedUsers = append(authorizedUsers, extraUser) 65 | } 66 | 67 | file, err := os.OpenFile(authorizedKeysPath, os.O_RDWR|os.O_CREATE, 0644) 68 | if err != nil { 69 | return fmt.Errorf("failed to open authorized_keys files: %w", err) 70 | } 71 | 72 | defer file.Close() 73 | 74 | for _, user := range authorizedUsers { 75 | fetchKey := func() error { 76 | res, err := retryablehttp.Get(fmt.Sprintf("https://github.com/%s.keys", user)) 77 | 78 | if err != nil { 79 | return fmt.Errorf("failed to fetch user keys: %+w", err) 80 | } 81 | 82 | if res.StatusCode == http.StatusNotFound { 83 | return backoff.Permanent(fmt.Errorf("failed to get user keys for user (%s): keys not found", user)) 84 | } 85 | 86 | if res.StatusCode != http.StatusOK { 87 | return fmt.Errorf("failed to get user keys for user (%s) with status code %d", user, res.StatusCode) 88 | } 89 | 90 | _, err = io.Copy(file, res.Body) 91 | return err 92 | } 93 | 94 | log.Info().Str("user", user).Msg("fetching user ssh keys") 95 | err = backoff.Retry(fetchKey, backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 3)) 96 | if err != nil { 97 | // skip user if failed to load the keys multiple times 98 | // this means the username is not correct and need to be skipped 99 | log.Error().Str("user", user).Err(err).Msg("failed to retrieve user keys") 100 | } 101 | } 102 | 103 | return nil 104 | } 105 | -------------------------------------------------------------------------------- /cmds/internet/README.md: -------------------------------------------------------------------------------- 1 | # Internet 2 | 3 | The internet module is responsible to connetct zos node to the internet. 4 | 5 | ## How it works 6 | 7 | The internet module bootstraps the private zos network as follows: 8 | 9 | - Find a physical interface that can get an IPv4 over DHCP or use `priv vlan` if configured as kernel param. 10 | 11 | - Create a bridge called `zos` and attach the interface to it. 12 | 13 | - Start a DHCP daemon after the Bridge and interface are brought UP to get an IP. 14 | 15 | - Test the internet connetction by trying to connect to some addresses `"bootstrap.grid.tf:http", "hub.grid.tf:http"` 16 | 17 | ## Build 18 | 19 | The internet binary is build as a part of the build process of zos base image as follows: 20 | 21 | - The `0-initramfs` first installs bootstrap. 22 | 23 | - The installation builds and copys the internet binary to initramfs root bin along with other bootstrap binaries. 24 | -------------------------------------------------------------------------------- /cmds/modules/api_gateway/main.go: -------------------------------------------------------------------------------- 1 | package apigateway 2 | 3 | import ( 4 | "context" 5 | "crypto/ed25519" 6 | "encoding/hex" 7 | "fmt" 8 | 9 | "github.com/cenkalti/backoff/v3" 10 | "github.com/rs/zerolog/log" 11 | substrate "github.com/threefoldtech/tfchain/clients/tfchain-client-go" 12 | "github.com/threefoldtech/tfgrid-sdk-go/rmb-sdk-go/peer" 13 | "github.com/threefoldtech/zbus" 14 | "github.com/threefoldtech/zosbase/pkg/environment" 15 | "github.com/threefoldtech/zosbase/pkg/stubs" 16 | substrategw "github.com/threefoldtech/zosbase/pkg/substrate_gateway" 17 | "github.com/threefoldtech/zosbase/pkg/utils" 18 | zosapi "github.com/threefoldtech/zosbase/pkg/zos_api" 19 | "github.com/urfave/cli/v2" 20 | ) 21 | 22 | const module = "api-gateway" 23 | 24 | // Module entry point 25 | var Module cli.Command = cli.Command{ 26 | Name: module, 27 | Usage: "handles outgoing chain calls and incoming rmb calls", 28 | Flags: []cli.Flag{ 29 | &cli.StringFlag{ 30 | Name: "broker", 31 | Usage: "connection string to the message `BROKER`", 32 | Value: "unix:///var/run/redis.sock", 33 | }, 34 | &cli.UintFlag{ 35 | Name: "workers", 36 | Usage: "number of workers `N`", 37 | Value: 1, 38 | }, 39 | }, 40 | Action: action, 41 | } 42 | 43 | func action(cli *cli.Context) error { 44 | var ( 45 | msgBrokerCon string = cli.String("broker") 46 | workerNr uint = cli.Uint("workers") 47 | ) 48 | 49 | server, err := zbus.NewRedisServer(module, msgBrokerCon, workerNr) 50 | if err != nil { 51 | return fmt.Errorf("fail to connect to message broker server: %w", err) 52 | } 53 | redis, err := zbus.NewRedisClient(msgBrokerCon) 54 | if err != nil { 55 | return fmt.Errorf("fail to connect to message broker server: %w", err) 56 | } 57 | idStub := stubs.NewIdentityManagerStub(redis) 58 | 59 | sk := ed25519.PrivateKey(idStub.PrivateKey(cli.Context)) 60 | id, err := substrate.NewIdentityFromEd25519Key(sk) 61 | log.Info().Str("address", id.Address()).Msg("node address") 62 | if err != nil { 63 | return err 64 | } 65 | 66 | manager, err := environment.GetSubstrate() 67 | if err != nil { 68 | return fmt.Errorf("failed to create substrate manager: %w", err) 69 | } 70 | 71 | router := peer.NewRouter() 72 | gw, err := substrategw.NewSubstrateGateway(manager, id) 73 | if err != nil { 74 | return fmt.Errorf("failed to create api gateway: %w", err) 75 | } 76 | 77 | server.Register(zbus.ObjectID{Name: "api-gateway", Version: "0.0.1"}, gw) 78 | 79 | ctx, _ := utils.WithSignal(context.Background()) 80 | utils.OnDone(ctx, func(_ error) { 81 | log.Info().Msg("shutting down") 82 | }) 83 | 84 | go func() { 85 | for { 86 | if err := server.Run(ctx); err != nil && err != context.Canceled { 87 | log.Error().Err(err).Msg("unexpected error") 88 | continue 89 | } 90 | 91 | break 92 | } 93 | }() 94 | 95 | api, err := zosapi.NewZosAPI(manager, redis, msgBrokerCon) 96 | if err != nil { 97 | return fmt.Errorf("failed to create zos api: %w", err) 98 | } 99 | api.SetupRoutes(router) 100 | 101 | pair, err := id.KeyPair() 102 | if err != nil { 103 | return err 104 | } 105 | 106 | bo := backoff.NewExponentialBackOff() 107 | bo.MaxElapsedTime = 0 108 | backoff.Retry(func() error { 109 | _, err = peer.NewPeer( 110 | ctx, 111 | hex.EncodeToString(pair.Seed()), 112 | manager, 113 | router.Serve, 114 | peer.WithKeyType(peer.KeyTypeEd25519), 115 | peer.WithRelay(environment.GetRelaysURLs()...), 116 | peer.WithInMemoryExpiration(6*60*60), // 6 hours 117 | ) 118 | if err != nil { 119 | return fmt.Errorf("failed to start a new rmb peer: %w", err) 120 | } 121 | 122 | return nil 123 | }, bo) 124 | 125 | log.Info(). 126 | Str("broker", msgBrokerCon). 127 | Uint("worker nr", workerNr). 128 | Msg("starting api-gateway module") 129 | 130 | // block forever 131 | <-ctx.Done() 132 | return nil 133 | } 134 | -------------------------------------------------------------------------------- /cmds/modules/contd/main.go: -------------------------------------------------------------------------------- 1 | package contd 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "os/exec" 7 | "time" 8 | 9 | "github.com/cenkalti/backoff/v3" 10 | "github.com/pkg/errors" 11 | "github.com/rs/zerolog/log" 12 | "github.com/urfave/cli/v2" 13 | 14 | "github.com/threefoldtech/zbus" 15 | "github.com/threefoldtech/zosbase/pkg/container" 16 | "github.com/threefoldtech/zosbase/pkg/utils" 17 | ) 18 | 19 | const module = "container" 20 | 21 | // Module is contd entry point 22 | var Module cli.Command = cli.Command{ 23 | Name: "contd", 24 | Usage: "handles containers creations", 25 | Flags: []cli.Flag{ 26 | &cli.StringFlag{ 27 | Name: "root", 28 | Usage: "`ROOT` working directory of the module", 29 | Value: "/var/cache/modules/contd", 30 | }, 31 | &cli.StringFlag{ 32 | Name: "broker", 33 | Usage: "connection string to the message `BROKER`", 34 | Value: "unix:///var/run/redis.sock", 35 | }, 36 | &cli.StringFlag{ 37 | Name: "congainerd", 38 | Usage: "connection string to containerd `CONTAINERD`", 39 | Value: "/run/containerd/containerd.sock", 40 | }, 41 | &cli.UintFlag{ 42 | Name: "workers", 43 | Usage: "number of workers `N`", 44 | Value: 1, 45 | }, 46 | }, 47 | Action: action, 48 | } 49 | 50 | func action(cli *cli.Context) error { 51 | var ( 52 | moduleRoot string = cli.String("root") 53 | msgBrokerCon string = cli.String("broker") 54 | workerNr uint = cli.Uint("workers") 55 | containerdCon string = cli.String("containerd") 56 | ) 57 | 58 | // wait for shim-logs to be available before starting 59 | log.Info().Msg("wait for shim-logs binary to be available") 60 | bo := backoff.NewExponentialBackOff() 61 | bo.MaxElapsedTime = 0 //forever 62 | _ = backoff.RetryNotify(func() error { 63 | _, err := exec.LookPath("shim-logs") 64 | return err 65 | // return fmt.Errorf("wait forever") 66 | }, bo, func(err error, d time.Duration) { 67 | log.Warn().Err(err).Msgf("shim-logs binary not found, retying in %s", d.String()) 68 | }) 69 | 70 | if err := os.MkdirAll(moduleRoot, 0750); err != nil { 71 | return errors.Wrap(err, "fail to create module root") 72 | } 73 | 74 | server, err := zbus.NewRedisServer(module, msgBrokerCon, workerNr) 75 | if err != nil { 76 | return errors.Wrap(err, "fail to connect to message broker server") 77 | } 78 | 79 | client, err := zbus.NewRedisClient(msgBrokerCon) 80 | if err != nil { 81 | return errors.Wrap(err, "fail to connect to message broker server") 82 | } 83 | 84 | containerd := container.New(client, moduleRoot, containerdCon) 85 | 86 | server.Register(zbus.ObjectID{Name: module, Version: "0.0.1"}, containerd) 87 | 88 | log.Info(). 89 | Str("broker", msgBrokerCon). 90 | Uint("worker nr", workerNr). 91 | Msg("starting containerd module") 92 | 93 | ctx, _ := utils.WithSignal(context.Background()) 94 | utils.OnDone(ctx, func(_ error) { 95 | log.Info().Msg("shutting down") 96 | }) 97 | 98 | // start watching for events 99 | go containerd.Watch(ctx) 100 | 101 | if err := server.Run(ctx); err != nil && err != context.Canceled { 102 | return errors.Wrap(err, "unexpected error") 103 | } 104 | 105 | return nil 106 | } 107 | -------------------------------------------------------------------------------- /cmds/modules/flistd/main.go: -------------------------------------------------------------------------------- 1 | package flistd 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/pkg/errors" 8 | "github.com/threefoldtech/zosbase/pkg/stubs" 9 | "github.com/threefoldtech/zosbase/pkg/utils" 10 | "github.com/urfave/cli/v2" 11 | 12 | "github.com/rs/zerolog/log" 13 | 14 | "github.com/threefoldtech/zbus" 15 | "github.com/threefoldtech/zosbase/pkg/flist" 16 | ) 17 | 18 | const ( 19 | module = "flist" 20 | 21 | cacheAge = time.Hour * 24 * 90 // 90 days 22 | cacheCleanup = time.Hour * 24 23 | ) 24 | 25 | // Module is entry point for module 26 | var Module cli.Command = cli.Command{ 27 | Name: "flistd", 28 | Usage: "handles mounting of flists", 29 | Flags: []cli.Flag{ 30 | &cli.StringFlag{ 31 | Name: "root", 32 | Usage: "`ROOT` working directory of the module", 33 | Value: "/var/cache/modules/flistd", 34 | }, 35 | &cli.StringFlag{ 36 | Name: "broker", 37 | Usage: "connection string to the message `BROKER`", 38 | Value: "unix:///var/run/redis.sock", 39 | }, 40 | &cli.UintFlag{ 41 | Name: "workers", 42 | Usage: "number of workers `N`", 43 | Value: 1, 44 | }, 45 | }, 46 | Action: action, 47 | } 48 | 49 | func action(cli *cli.Context) error { 50 | var ( 51 | moduleRoot string = cli.String("root") 52 | msgBrokerCon string = cli.String("broker") 53 | workerNr uint = cli.Uint("workers") 54 | ) 55 | 56 | redis, err := zbus.NewRedisClient(msgBrokerCon) 57 | if err != nil { 58 | return errors.Wrap(err, "fail to connect to message broker server") 59 | } 60 | 61 | storage := stubs.NewStorageModuleStub(redis) 62 | 63 | server, err := zbus.NewRedisServer(module, msgBrokerCon, workerNr) 64 | if err != nil { 65 | return errors.Wrap(err, "fail to connect to message broker server") 66 | } 67 | 68 | mod := flist.New(moduleRoot, storage) 69 | server.Register(zbus.ObjectID{Name: module, Version: "0.0.1"}, mod) 70 | 71 | ctx, _ := utils.WithSignal(context.Background()) 72 | 73 | if cleaner, ok := mod.(flist.Cleaner); ok { 74 | // go cleaner.MountsCleaner(ctx, cacheCleanup) 75 | go cleaner.CacheCleaner(ctx, cacheCleanup, cacheAge) 76 | } 77 | 78 | log.Info(). 79 | Str("broker", msgBrokerCon). 80 | Uint("worker nr", workerNr). 81 | Msg("starting flist module") 82 | 83 | utils.OnDone(ctx, func(_ error) { 84 | log.Info().Msg("shutting down") 85 | }) 86 | 87 | if err := server.Run(ctx); err != nil && err != context.Canceled { 88 | return errors.Wrap(err, "unexpected error") 89 | } 90 | 91 | return nil 92 | } 93 | -------------------------------------------------------------------------------- /cmds/modules/gateway/main.go: -------------------------------------------------------------------------------- 1 | package gateway 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/pkg/errors" 7 | "github.com/threefoldtech/zosbase/pkg/gateway" 8 | "github.com/threefoldtech/zosbase/pkg/utils" 9 | "github.com/urfave/cli/v2" 10 | 11 | "github.com/rs/zerolog/log" 12 | 13 | "github.com/threefoldtech/zbus" 14 | ) 15 | 16 | const ( 17 | module = "gateway" 18 | ) 19 | 20 | // Module is entry point for module 21 | var Module cli.Command = cli.Command{ 22 | Name: "gateway", 23 | Usage: "manage web gateway proxy", 24 | Flags: []cli.Flag{ 25 | &cli.StringFlag{ 26 | Name: "root", 27 | Usage: "`ROOT` working directory of the module", 28 | Value: "/var/cache/modules/gateway", 29 | }, 30 | &cli.StringFlag{ 31 | Name: "broker", 32 | Usage: "connection string to the message `BROKER`", 33 | Value: "unix:///var/run/redis.sock", 34 | }, 35 | &cli.UintFlag{ 36 | Name: "workers", 37 | Usage: "number of workers `N`", 38 | Value: 1, 39 | }, 40 | }, 41 | Action: action, 42 | } 43 | 44 | func action(cli *cli.Context) error { 45 | var ( 46 | moduleRoot string = cli.String("root") 47 | msgBrokerCon string = cli.String("broker") 48 | workerNr uint = cli.Uint("workers") 49 | ) 50 | 51 | server, err := zbus.NewRedisServer(module, msgBrokerCon, workerNr) 52 | if err != nil { 53 | return errors.Wrap(err, "fail to connect to message broker server") 54 | } 55 | 56 | client, err := zbus.NewRedisClient(msgBrokerCon) 57 | if err != nil { 58 | return errors.Wrap(err, "failed to connect to zbus broker") 59 | } 60 | 61 | mod, err := gateway.New(cli.Context, client, moduleRoot) 62 | if err != nil { 63 | return errors.Wrap(err, "failed to construct gateway object") 64 | } 65 | server.Register(zbus.ObjectID{Name: "manager", Version: "0.0.1"}, mod) 66 | 67 | ctx, cancel := utils.WithSignal(context.Background()) 68 | defer cancel() 69 | 70 | log.Info(). 71 | Str("broker", msgBrokerCon). 72 | Uint("worker nr", workerNr). 73 | Msg("starting gateway module") 74 | 75 | utils.OnDone(ctx, func(_ error) { 76 | log.Info().Msg("shutting down") 77 | }) 78 | 79 | if err := server.Run(ctx); err != nil && err != context.Canceled { 80 | return errors.Wrap(err, "unexpected error") 81 | } 82 | 83 | return nil 84 | } 85 | -------------------------------------------------------------------------------- /cmds/modules/networkd/nft.go: -------------------------------------------------------------------------------- 1 | package networkd 2 | 3 | import ( 4 | "context" 5 | "os/exec" 6 | 7 | "github.com/pkg/errors" 8 | "github.com/rs/zerolog/log" 9 | ) 10 | 11 | func ensureHostFw(ctx context.Context) error { 12 | log.Info().Msg("ensuring existing host nft rules") 13 | 14 | cmd := exec.CommandContext(ctx, "/bin/sh", "-c", 15 | ` 16 | nft 'add table inet filter' 17 | nft 'add table arp filter' 18 | nft 'add table bridge filter' 19 | 20 | # duo to a bug we had we need to make sure those chains are 21 | # deleted and then recreated later 22 | nft 'delete chain inet filter input' 23 | nft 'delete chain inet filter forward' 24 | nft 'delete chain inet filter output' 25 | 26 | nft 'delete chain bridge filter input' 27 | nft 'delete chain bridge filter forward' 28 | nft 'delete chain bridge filter output' 29 | 30 | nft 'delete chain arp filter input' 31 | nft 'delete chain arp filter output' 32 | 33 | # recreate chains correctly 34 | nft 'add chain inet filter input { type filter hook input priority filter; policy accept; }' 35 | nft 'add chain inet filter forward { type filter hook forward priority filter; policy accept; }' 36 | nft 'add chain inet filter output { type filter hook output priority filter; policy accept; }' 37 | nft 'add chain inet filter prerouting { type filter hook prerouting priority filter; policy accept; }' 38 | 39 | nft 'add chain arp filter input { type filter hook input priority filter; policy accept; }' 40 | nft 'add chain arp filter output { type filter hook output priority filter; policy accept; }' 41 | 42 | nft 'add chain bridge filter input { type filter hook input priority filter; policy accept; }' 43 | nft 'add chain bridge filter forward { type filter hook forward priority filter; policy accept; }' 44 | nft 'add chain bridge filter prerouting { type filter hook prerouting priority filter; policy accept; }' 45 | nft 'add chain bridge filter postrouting { type filter hook postrouting priority filter; policy accept; }' 46 | nft 'add chain bridge filter output { type filter hook output priority filter; policy accept; }' 47 | 48 | nft 'flush chain bridge filter forward' 49 | nft 'flush chain inet filter forward' 50 | nft 'flush chain inet filter prerouting' 51 | 52 | # drop smtp traffic for hidden nodes 53 | nft 'add rule inet filter prerouting iifname "b-*" tcp dport {25, 587, 465} reject with icmp type admin-prohibited' 54 | `) 55 | 56 | if err := cmd.Run(); err != nil { 57 | return errors.Wrap(err, "could not set up host nft rules") 58 | } 59 | 60 | return nil 61 | } 62 | -------------------------------------------------------------------------------- /cmds/modules/noded/public.go: -------------------------------------------------------------------------------- 1 | package noded 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/pkg/errors" 8 | "github.com/rs/zerolog/log" 9 | substrate "github.com/threefoldtech/tfchain/clients/tfchain-client-go" 10 | "github.com/threefoldtech/zbus" 11 | "github.com/threefoldtech/zosbase/pkg" 12 | "github.com/threefoldtech/zosbase/pkg/events" 13 | "github.com/threefoldtech/zosbase/pkg/stubs" 14 | ) 15 | 16 | func setPublicConfig(ctx context.Context, cl zbus.Client, cfg *substrate.PublicConfig) error { 17 | log.Info().Msg("setting node public config") 18 | netMgr := stubs.NewNetworkerStub(cl) 19 | 20 | if cfg == nil { 21 | return netMgr.UnsetPublicConfig(ctx) 22 | } 23 | 24 | pub, err := pkg.PublicConfigFrom(*cfg) 25 | if err != nil { 26 | return errors.Wrap(err, "failed to create public config from setup") 27 | } 28 | 29 | return netMgr.SetPublicConfig(ctx, pub) 30 | } 31 | 32 | // public sets and watches changes to public config on chain and tries to apply the provided setup 33 | func public(ctx context.Context, nodeID uint32, cl zbus.Client, events *events.RedisConsumer) error { 34 | ch, err := events.PublicConfig(ctx) 35 | if err != nil { 36 | return errors.Wrap(err, "failed to subscribe to node events") 37 | } 38 | 39 | substrateGateway := stubs.NewSubstrateGatewayStub(cl) 40 | 41 | reapply: 42 | for { 43 | node, err := substrateGateway.GetNode(ctx, nodeID) 44 | if err != nil { 45 | return errors.Wrap(err, "failed to get node public config") 46 | } 47 | 48 | var cfg *substrate.PublicConfig 49 | if node.PublicConfig.HasValue { 50 | cfg = &node.PublicConfig.AsValue 51 | } 52 | 53 | if err := setPublicConfig(ctx, cl, cfg); err != nil { 54 | return errors.Wrap(err, "failed to set public config (reapply)") 55 | } 56 | 57 | for { 58 | select { 59 | case <-ctx.Done(): 60 | return nil 61 | case event := <-ch: 62 | log.Info().Msgf("got a public config update: %+v", event.PublicConfig) 63 | var cfg *substrate.PublicConfig 64 | if event.PublicConfig.HasValue { 65 | cfg = &event.PublicConfig.AsValue 66 | } 67 | if err := setPublicConfig(ctx, cl, cfg); err != nil { 68 | return errors.Wrap(err, "failed to set public config") 69 | } 70 | case <-time.After(2 * time.Hour): 71 | // last resort, if none of the events 72 | // was received, it will be a good idea to just 73 | // check every 2 hours for changes. 74 | continue reapply 75 | } 76 | } 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /cmds/modules/provisiond/cap.go: -------------------------------------------------------------------------------- 1 | package provisiond 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/cenkalti/backoff/v3" 8 | "github.com/centrifuge/go-substrate-rpc-client/v4/types" 9 | "github.com/rs/zerolog/log" 10 | substrate "github.com/threefoldtech/tfchain/clients/tfchain-client-go" 11 | "github.com/threefoldtech/zosbase/pkg/gridtypes" 12 | "github.com/threefoldtech/zosbase/pkg/provision" 13 | "github.com/threefoldtech/zosbase/pkg/stubs" 14 | ) 15 | 16 | type DeploymentID struct { 17 | Twin uint32 18 | Contract uint64 19 | } 20 | 21 | type CapacitySetter struct { 22 | substrateGateway *stubs.SubstrateGatewayStub 23 | ch chan DeploymentID 24 | storage provision.Storage 25 | } 26 | 27 | func NewCapacitySetter(substrateGateway *stubs.SubstrateGatewayStub, storage provision.Storage) CapacitySetter { 28 | return CapacitySetter{ 29 | substrateGateway: substrateGateway, 30 | storage: storage, 31 | ch: make(chan DeploymentID, 215), 32 | } 33 | } 34 | 35 | func (c *CapacitySetter) Callback(twin uint32, contract uint64, delete bool) { 36 | // we don't set capacity on the grid on deletion 37 | if delete { 38 | return 39 | } 40 | 41 | // we just push it to channel so we can return as soon 42 | // as possible. The channel should have enough capacity 43 | // to accept enough active contracts. 44 | c.ch <- DeploymentID{Twin: twin, Contract: contract} 45 | } 46 | 47 | func (c *CapacitySetter) setWithClient(deployments ...gridtypes.Deployment) error { 48 | caps := make([]substrate.ContractResources, 0, len(deployments)) 49 | for _, deployment := range deployments { 50 | var total gridtypes.Capacity 51 | for i := range deployment.Workloads { 52 | wl := &deployment.Workloads[i] 53 | if wl.Result.State.IsOkay() { 54 | cap, err := wl.Capacity() 55 | if err != nil { 56 | log.Error().Err(err).Str("workload", wl.Name.String()). 57 | Msg("failed to compute capacity consumption for workload") 58 | continue 59 | } 60 | 61 | total.Add(&cap) 62 | } 63 | } 64 | cap := substrate.ContractResources{ 65 | ContractID: types.U64(deployment.ContractID), 66 | Used: substrate.Resources{ 67 | HRU: types.U64(total.HRU), 68 | SRU: types.U64(total.SRU), 69 | CRU: types.U64(total.CRU), 70 | MRU: types.U64(total.MRU), 71 | }, 72 | } 73 | 74 | log.Debug(). 75 | Uint64("contract", deployment.ContractID). 76 | Uint("sru", uint(cap.Used.SRU)). 77 | Uint("hru", uint(cap.Used.HRU)). 78 | Uint("mru", uint(cap.Used.MRU)). 79 | Uint("cru", uint(cap.Used.CRU)). 80 | Msg("reporting contract usage") 81 | 82 | caps = append(caps, cap) 83 | } 84 | 85 | bo := backoff.WithMaxRetries( 86 | backoff.NewConstantBackOff(6*time.Second), 87 | 4, 88 | ) 89 | 90 | return backoff.RetryNotify(func() error { 91 | return c.substrateGateway.SetContractConsumption(context.Background(), caps...) 92 | }, bo, func(err error, d time.Duration) { 93 | log.Error().Err(err).Dur("retry-in", d).Msg("failed to set contract consumption") 94 | }) 95 | } 96 | 97 | func (c *CapacitySetter) Set(deployment ...gridtypes.Deployment) error { 98 | if len(deployment) == 0 { 99 | return nil 100 | } 101 | 102 | return c.setWithClient(deployment...) 103 | } 104 | 105 | func (c *CapacitySetter) Run(ctx context.Context) error { 106 | for { 107 | var id DeploymentID 108 | select { 109 | case <-ctx.Done(): 110 | return nil 111 | case id = <-c.ch: 112 | } 113 | 114 | log := log.With().Uint32("twin", id.Twin).Uint64("contract", id.Contract).Logger() 115 | 116 | deployment, err := c.storage.Get(id.Twin, id.Contract) 117 | if err != nil { 118 | log.Error().Err(err).Msg("failed to get deployment") 119 | continue 120 | } 121 | 122 | if err := c.Set(deployment); err != nil { 123 | log.Error().Err(err).Msg("failed to set contract usage") 124 | } 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /cmds/modules/provisiond/swagger.go: -------------------------------------------------------------------------------- 1 | package provisiond 2 | 3 | import ( 4 | "embed" 5 | "io/fs" 6 | "path/filepath" 7 | 8 | "github.com/rs/zerolog/log" 9 | ) 10 | 11 | //go:embed swagger 12 | var static embed.FS 13 | 14 | var swaggerFs fs.FS = &fsWithPrefix{ 15 | FS: static, 16 | prefix: "swagger", 17 | } 18 | 19 | type fsWithPrefix struct { 20 | fs.FS 21 | prefix string 22 | } 23 | 24 | func (f *fsWithPrefix) Open(name string) (fs.File, error) { 25 | newName := filepath.Join(f.prefix, name) 26 | file, err := f.FS.Open(newName) 27 | if err != nil { 28 | log.Error().Err(err).Msg("failed to open file") 29 | } 30 | return file, err 31 | } 32 | -------------------------------------------------------------------------------- /cmds/modules/provisiond/swagger/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/threefoldtech/zos/0199ee83ff9918918f18fd25fd7ba1cdc1c7abce/cmds/modules/provisiond/swagger/favicon-16x16.png -------------------------------------------------------------------------------- /cmds/modules/provisiond/swagger/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/threefoldtech/zos/0199ee83ff9918918f18fd25fd7ba1cdc1c7abce/cmds/modules/provisiond/swagger/favicon-32x32.png -------------------------------------------------------------------------------- /cmds/modules/provisiond/swagger/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Swagger UI 8 | 9 | 10 | 11 | 29 | 30 | 31 | 32 |
33 | 34 | 35 | 36 | 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /cmds/modules/provisiond/swagger/oauth2-redirect.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Swagger UI: OAuth2 Redirect 5 | 6 | 7 | 74 | 75 | 76 | -------------------------------------------------------------------------------- /cmds/modules/qsfsd/main.go: -------------------------------------------------------------------------------- 1 | package qsfsd 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/pkg/errors" 7 | "github.com/threefoldtech/zosbase/pkg/qsfsd" 8 | "github.com/threefoldtech/zosbase/pkg/utils" 9 | "github.com/urfave/cli/v2" 10 | 11 | "github.com/rs/zerolog/log" 12 | 13 | "github.com/threefoldtech/zbus" 14 | ) 15 | 16 | const ( 17 | module = "qsfsd" 18 | ) 19 | 20 | // Module is entry point for module 21 | var Module cli.Command = cli.Command{ 22 | Name: "qsfsd", 23 | Usage: "manage qsfsd", 24 | Flags: []cli.Flag{ 25 | &cli.StringFlag{ 26 | Name: "root", 27 | Usage: "`ROOT` working directory of the module", 28 | Value: "/var/cache/modules/qsfsd", 29 | }, 30 | &cli.StringFlag{ 31 | Name: "broker", 32 | Usage: "connection string to the message `BROKER`", 33 | Value: "unix:///var/run/redis.sock", 34 | }, 35 | &cli.UintFlag{ 36 | Name: "workers", 37 | Usage: "number of workers `N`", 38 | Value: 1, 39 | }, 40 | }, 41 | Action: action, 42 | } 43 | 44 | func action(cli *cli.Context) error { 45 | var ( 46 | moduleRoot string = cli.String("root") 47 | msgBrokerCon string = cli.String("broker") 48 | workerNr uint = cli.Uint("workers") 49 | ) 50 | 51 | server, err := zbus.NewRedisServer(module, msgBrokerCon, workerNr) 52 | if err != nil { 53 | return errors.Wrap(err, "fail to connect to message broker server") 54 | } 55 | 56 | client, err := zbus.NewRedisClient(msgBrokerCon) 57 | if err != nil { 58 | return errors.Wrap(err, "failed to connect to zbus broker") 59 | } 60 | 61 | ctx, cancel := utils.WithSignal(cli.Context) 62 | defer cancel() 63 | 64 | mod, err := qsfsd.New(ctx, client, moduleRoot) 65 | if err != nil { 66 | return errors.Wrap(err, "failed to construct qsfsd object") 67 | } 68 | 69 | server.Register(zbus.ObjectID{Name: "manager", Version: "0.0.1"}, mod) 70 | log.Info(). 71 | Str("broker", msgBrokerCon). 72 | Uint("worker nr", workerNr). 73 | Msg("starting qsfsd module") 74 | 75 | utils.OnDone(ctx, func(_ error) { 76 | log.Info().Msg("shutting down") 77 | }) 78 | 79 | if err := server.Run(ctx); err != nil && err != context.Canceled { 80 | return errors.Wrap(err, "unexpected error") 81 | } 82 | 83 | return nil 84 | } 85 | -------------------------------------------------------------------------------- /cmds/modules/storaged/main.go: -------------------------------------------------------------------------------- 1 | package storaged 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/pkg/errors" 7 | "github.com/rs/zerolog/log" 8 | "github.com/urfave/cli/v2" 9 | 10 | "github.com/threefoldtech/zbus" 11 | "github.com/threefoldtech/zosbase/pkg/storage" 12 | "github.com/threefoldtech/zosbase/pkg/utils" 13 | ) 14 | 15 | const ( 16 | redisSocket = "unix:///var/run/redis.sock" 17 | module = "storage" 18 | ) 19 | 20 | // Module is module entry point 21 | var Module cli.Command = cli.Command{ 22 | Name: "storaged", 23 | Usage: "handles and manages disks and volumes creation", 24 | Flags: []cli.Flag{ 25 | &cli.StringFlag{ 26 | Name: "broker", 27 | Usage: "connection string to the message `BROKER`", 28 | Value: "unix:///var/run/redis.sock", 29 | }, 30 | &cli.UintFlag{ 31 | Name: "workers", 32 | Usage: "number of workers `N`", 33 | Value: 1, 34 | }, 35 | }, 36 | Action: action, 37 | } 38 | 39 | func action(cli *cli.Context) error { 40 | var ( 41 | msgBrokerCon string = cli.String("broker") 42 | workerNr uint = cli.Uint("workers") 43 | ) 44 | 45 | storageModule, err := storage.New(cli.Context) 46 | if err != nil { 47 | return errors.Wrap(err, "failed to initialize storage module") 48 | } 49 | 50 | log.Info().Msg("storage initialization complete") 51 | server, err := zbus.NewRedisServer(module, msgBrokerCon, workerNr) 52 | if err != nil { 53 | return errors.Wrap(err, "fail to connect to message broker server") 54 | } 55 | 56 | server.Register(zbus.ObjectID{Name: "storage", Version: "0.0.1"}, storageModule) 57 | 58 | log.Info(). 59 | Str("broker", msgBrokerCon). 60 | Uint("worker nr", workerNr). 61 | Msg("starting storaged module") 62 | 63 | ctx, _ := utils.WithSignal(context.Background()) 64 | utils.OnDone(ctx, func(_ error) { 65 | log.Info().Msg("shutting down") 66 | }) 67 | 68 | if err := server.Run(ctx); err != nil && err != context.Canceled { 69 | return errors.Wrap(err, "unexpected error") 70 | } 71 | 72 | return nil 73 | } 74 | -------------------------------------------------------------------------------- /cmds/modules/zbusdebug/main.go: -------------------------------------------------------------------------------- 1 | package zbusdebug 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "time" 8 | 9 | "github.com/pkg/errors" 10 | "github.com/threefoldtech/zbus" 11 | "github.com/urfave/cli/v2" 12 | "gopkg.in/yaml.v2" 13 | 14 | "github.com/rs/zerolog/log" 15 | ) 16 | 17 | var ( 18 | // PossibleModules is a list of all know zos modules. the modules must match 19 | // the module name declared by the server. Hence, we collect them here for 20 | // validation 21 | PossibleModules = map[string]struct{}{ 22 | "storage": {}, 23 | "node": {}, 24 | "identityd": {}, 25 | "vmd": {}, 26 | "flist": {}, 27 | "network": {}, 28 | "container": {}, 29 | "provision": {}, 30 | "gateway": {}, 31 | "qsfsd": {}, 32 | } 33 | 34 | //Module entry point 35 | Module cli.Command = cli.Command{ 36 | Name: "zbusdebug", 37 | Usage: "show status summery for running zbus modules", 38 | Flags: []cli.Flag{ 39 | &cli.StringFlag{ 40 | Name: "broker", 41 | Value: "unix:///var/run/redis.sock", 42 | Usage: "connection string to the message `BROKER`", 43 | }, 44 | &cli.StringFlag{ 45 | Name: "module", 46 | Usage: "debug specific `MODULE`", 47 | }, 48 | }, 49 | Action: action, 50 | } 51 | ) 52 | 53 | func action(cli *cli.Context) error { 54 | var ( 55 | msgBrokerCon string = cli.String("broker") 56 | module string = cli.String("module") 57 | ) 58 | 59 | cl, err := zbus.NewRedisClient(msgBrokerCon) 60 | if err != nil { 61 | return errors.Wrap(err, "failed to initialize zbus client") 62 | } 63 | 64 | var debug []string 65 | if module != "" { 66 | _, ok := PossibleModules[module] 67 | if !ok { 68 | return fmt.Errorf("unknown module") 69 | } 70 | 71 | debug = append(debug, module) 72 | } else { 73 | for module := range PossibleModules { 74 | debug = append(debug, module) 75 | } 76 | } 77 | parent := context.Background() 78 | for _, module := range debug { 79 | if err := printModuleStatus(parent, cl, module); err != nil { 80 | log.Error().Str("module", module).Err(err).Msg("failed to get status for module") 81 | if len(debug) == 1 { 82 | return err 83 | } 84 | } 85 | } 86 | 87 | return nil 88 | } 89 | 90 | func printModuleStatus(ctx context.Context, cl zbus.Client, module string) error { 91 | fmt.Println("## Status for ", module) 92 | ctx, cancel := context.WithTimeout(ctx, 3*time.Second) 93 | defer cancel() 94 | status, err := cl.Status(ctx, module) 95 | if err != nil { 96 | return err 97 | } 98 | 99 | enc := yaml.NewEncoder(os.Stdout) 100 | defer enc.Close() 101 | 102 | enc.Encode(status) 103 | fmt.Println() 104 | return nil 105 | } 106 | -------------------------------------------------------------------------------- /cmds/modules/zlf/README.md: -------------------------------------------------------------------------------- 1 | # DEPRECATED 2 | 3 | # zlf 4 | Zero-OS Logs Forwarder reads logs from local unix socket and forward them to a remote redis server 5 | 6 | # Usage 7 | Usage of ./zlf: 8 | -channel string 9 | redis logs channel name (default "zinit-logs") 10 | -host string 11 | redis host (default "localhost") 12 | -logs string 13 | zinit unix socket (default "/var/run/log.sock") 14 | -port int 15 | redis port (default 6379) 16 | -------------------------------------------------------------------------------- /cmds/modules/zlf/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "io" 7 | "net" 8 | "strings" 9 | 10 | "github.com/go-redis/redis" 11 | "github.com/rs/zerolog/log" 12 | "github.com/threefoldtech/zosbase/pkg/app" 13 | ) 14 | 15 | func reader(c io.Reader, r *redis.Client, channel string) { 16 | buf := make([]byte, 1024) 17 | for { 18 | n, err := c.Read(buf[:]) 19 | if err != nil { 20 | return 21 | } 22 | 23 | logline := strings.TrimSpace(string(buf[0:n])) 24 | 25 | if r.Publish(channel, logline).Err() != nil { 26 | log.Fatal().Err(err).Msg("error while publishing log line") 27 | } 28 | } 29 | } 30 | 31 | func main() { 32 | app.Initialize() 33 | 34 | lUnix := flag.String("logs", "/var/run/log.sock", "zinit unix socket") 35 | rChan := flag.String("channel", "zinit-logs", "redis logs channel name") 36 | rHost := flag.String("host", "localhost", "redis host") 37 | rPort := flag.Int("port", 6379, "redis port") 38 | 39 | flag.Parse() 40 | 41 | fmt.Printf("[+] opening logs: %s\n", *lUnix) 42 | 43 | // connect to local logs 44 | c, err := net.Dial("unix", *lUnix) 45 | if err != nil { 46 | log.Fatal().Err(err).Msg("cannot dial server") 47 | } 48 | 49 | fmt.Printf("[+] connecting redis: %s:%d\n", *rHost, *rPort) 50 | 51 | // connect to redis server 52 | client := redis.NewClient(&redis.Options{ 53 | Addr: fmt.Sprintf("%s:%d", *rHost, *rPort), 54 | Password: "", 55 | DB: 0, 56 | }) 57 | 58 | if _, err := client.Ping().Result(); err != nil { 59 | log.Fatal().Err(err).Msg("cannot ping server") 60 | } 61 | 62 | fmt.Printf("[+] forwarding logs to channel: %s\n", *rChan) 63 | 64 | reader(c, client, *rChan) 65 | } 66 | -------------------------------------------------------------------------------- /cmds/modules/zui/disk.go: -------------------------------------------------------------------------------- 1 | package zui 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sort" 7 | 8 | ui "github.com/gizak/termui/v3" 9 | "github.com/gizak/termui/v3/widgets" 10 | "github.com/pkg/errors" 11 | "github.com/threefoldtech/zbus" 12 | "github.com/threefoldtech/zosbase/pkg/stubs" 13 | ) 14 | 15 | func diskRender(client zbus.Client, grid *ui.Grid, render *signalFlag) error { 16 | const ( 17 | mega = 1024 * 1024 18 | ) 19 | 20 | pools := widgets.NewTable() 21 | pools.Title = "Storage Pools" 22 | pools.RowSeparator = false 23 | pools.TextAlignment = ui.AlignCenter 24 | pools.Rows = [][]string{ 25 | {"POOL", "TOTAL", "USED"}, 26 | } 27 | 28 | grid.Set( 29 | ui.NewRow(1.0, 30 | ui.NewCol(1, pools), 31 | ), 32 | ) 33 | 34 | ctx := context.Background() 35 | 36 | monitor := stubs.NewStorageModuleStub(client) 37 | stats, err := monitor.Monitor(ctx) 38 | if err != nil { 39 | return errors.Wrap(err, "failed to start net monitor stream") 40 | } 41 | 42 | var keys []string 43 | 44 | go func() { 45 | for s := range stats { 46 | if len(keys) != len(s) { 47 | for key := range s { 48 | keys = append(keys, key) 49 | } 50 | sort.Strings(keys) 51 | } 52 | 53 | rows := pools.Rows[:1] 54 | 55 | for _, key := range keys { 56 | pool := s[key] 57 | rows = append(rows, 58 | []string{ 59 | key, 60 | fmt.Sprintf("%d MB", pool.Total/mega), 61 | fmt.Sprintf("%0.00f%%", 100.0*(float64(pool.Used)/float64(pool.Total))), 62 | }, 63 | ) 64 | } 65 | 66 | pools.Rows = rows 67 | render.Signal() 68 | } 69 | }() 70 | 71 | return nil 72 | } 73 | -------------------------------------------------------------------------------- /cmds/modules/zui/header.go: -------------------------------------------------------------------------------- 1 | package zui 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | "syscall" 8 | "unsafe" 9 | 10 | "github.com/gizak/termui/v3/widgets" 11 | "github.com/pkg/errors" 12 | 13 | "github.com/rs/zerolog/log" 14 | "github.com/threefoldtech/zbus" 15 | "github.com/threefoldtech/zosbase/pkg/app" 16 | "github.com/threefoldtech/zosbase/pkg/environment" 17 | "github.com/threefoldtech/zosbase/pkg/registrar" 18 | "github.com/threefoldtech/zosbase/pkg/stubs" 19 | ) 20 | 21 | func green(s string) string { 22 | return fmt.Sprintf("[%s](fg:green)", s) 23 | } 24 | 25 | func red(s string) string { 26 | return fmt.Sprintf("[%s](fg:red)", s) 27 | } 28 | 29 | func isInProgressError(err error) bool { 30 | return strings.Contains(err.Error(), registrar.ErrInProgress.Error()) 31 | } 32 | 33 | // func headerRenderer(c zbus.Client, h *widgets.Paragraph, r *Flag) error { 34 | func headerRenderer(ctx context.Context, c zbus.Client, h *widgets.Paragraph, r *signalFlag) error { 35 | env, err := environment.Get() 36 | if err != nil { 37 | return err 38 | } 39 | 40 | identity := stubs.NewIdentityManagerStub(c) 41 | registrar := stubs.NewRegistrarStub(c) 42 | 43 | h.Text = "\n Fetching realtime node information... please wait." 44 | 45 | s := " Welcome to [Zero-OS](fg:yellow), [ThreeFold](fg:blue) Autonomous Operating System\n" + 46 | "\n" + 47 | " This is node %s (farmer %s)\n" + 48 | " running Zero-OS version [%s](fg:blue) (mode [%s](fg:cyan))\n" + 49 | " kernel: %s\n" + 50 | " cache disk: %s" 51 | 52 | host := stubs.NewVersionMonitorStub(c) 53 | ch, err := host.Version(ctx) 54 | if err != nil { 55 | return errors.Wrap(err, "failed to start update stream for version") 56 | } 57 | 58 | go func() { 59 | registrarLable := "registrar" 60 | zui := stubs.NewZUIStub(c) 61 | 62 | // empty out zui errors for registrar 63 | if zuiErr := zui.PushErrors(ctx, registrarLable, []string{}); zuiErr != nil { 64 | log.Info().Err(zuiErr).Send() 65 | } 66 | 67 | farmID, _ := identity.FarmID(ctx) 68 | for version := range ch { 69 | var name string 70 | var nodeID string 71 | var farm string 72 | if name, err = identity.Farm(ctx); err != nil { 73 | farm = red(fmt.Sprintf("%d: %s", farmID, err.Error())) 74 | } else { 75 | farm = green(fmt.Sprintf("%d: %s", farmID, name)) 76 | } 77 | 78 | if node, err := registrar.NodeID(ctx); err != nil { 79 | if isInProgressError(err) { 80 | nodeID = green(err.Error()) 81 | } else { 82 | nodeID = red(fmt.Sprintf("%d (unregistered)", node)) 83 | if zuiErr := zui.PushErrors(ctx, registrarLable, []string{err.Error()}); zuiErr != nil { 84 | log.Info().Err(zuiErr).Send() 85 | } 86 | } 87 | } else { 88 | nodeID = green(fmt.Sprint(node)) 89 | } 90 | 91 | cache := green("OK") 92 | if app.CheckFlag(app.LimitedCache) { 93 | cache = red("no ssd disks detected, running on hdd-only mode") 94 | } else if app.CheckFlag(app.ReadonlyCache) { 95 | cache = red("cache is read-only") 96 | } 97 | 98 | var utsname syscall.Utsname 99 | var uname string 100 | if err := syscall.Uname(&utsname); err != nil { 101 | uname = red(err.Error()) 102 | } else { 103 | uname = green(string(unsafe.Slice((*byte)(unsafe.Pointer(&utsname.Release)), len(utsname.Release)))) 104 | } 105 | 106 | h.Text = fmt.Sprintf(s, nodeID, farm, version.String(), env.RunningMode.String(), uname, cache) 107 | r.Signal() 108 | } 109 | }() 110 | 111 | return nil 112 | } 113 | -------------------------------------------------------------------------------- /cmds/modules/zui/mem.go: -------------------------------------------------------------------------------- 1 | package zui 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | ui "github.com/gizak/termui/v3" 8 | "github.com/gizak/termui/v3/widgets" 9 | "github.com/pkg/errors" 10 | "github.com/threefoldtech/zbus" 11 | "github.com/threefoldtech/zosbase/pkg/stubs" 12 | ) 13 | 14 | func memRender(client zbus.Client, grid *ui.Grid, render *signalFlag) error { 15 | const ( 16 | mega = 1024 * 1024 17 | ) 18 | 19 | percent := widgets.NewGauge() 20 | percent.Percent = 0 21 | percent.BarColor = ui.ColorGreen 22 | percent.Title = "Memory Percent" 23 | 24 | total := widgets.NewParagraph() 25 | total.Title = "Memory" 26 | 27 | grid.Set( 28 | ui.NewRow(1, 29 | ui.NewCol(1./2, percent), 30 | ui.NewCol(1./2, total), 31 | ), 32 | ) 33 | 34 | monitor := stubs.NewSystemMonitorStub(client) 35 | stream, err := monitor.Memory(context.Background()) 36 | if err != nil { 37 | return errors.Wrap(err, "failed to start mem monitor stream") 38 | } 39 | 40 | go func() { 41 | for point := range stream { 42 | percent.Percent = int(point.UsedPercent) 43 | if point.UsedPercent < 50 { 44 | percent.BarColor = ui.ColorGreen 45 | } else if point.UsedPercent >= 50 && point.UsedPercent < 90 { 46 | percent.BarColor = ui.ColorMagenta 47 | } else if point.UsedPercent > 90 { 48 | percent.BarColor = ui.ColorRed 49 | } 50 | 51 | total.Text = fmt.Sprintf("Total: %d MB, Used: %d MB, Free: %d MB", point.Total/mega, point.Used/mega, point.Free/mega) 52 | render.Signal() 53 | } 54 | }() 55 | 56 | return nil 57 | } 58 | -------------------------------------------------------------------------------- /cmds/modules/zui/net.go: -------------------------------------------------------------------------------- 1 | package zui 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net" 7 | "strings" 8 | 9 | ui "github.com/gizak/termui/v3" 10 | "github.com/gizak/termui/v3/widgets" 11 | _ "github.com/pkg/errors" 12 | "github.com/threefoldtech/zbus" 13 | "github.com/threefoldtech/zosbase/pkg" 14 | "github.com/threefoldtech/zosbase/pkg/network/types" 15 | "github.com/threefoldtech/zosbase/pkg/stubs" 16 | ) 17 | 18 | func addressRender(ctx context.Context, table *widgets.Table, client zbus.Client, render *signalFlag) error { 19 | table.Title = "Network" 20 | table.FillRow = true 21 | table.RowSeparator = false 22 | 23 | table.Rows = [][]string{ 24 | {"ZOS", loading}, 25 | {"DMZ", loading}, 26 | {"YGG", loading}, 27 | {"PUB", loading}, 28 | {"DUL", loading}, 29 | } 30 | 31 | stub := stubs.NewNetworkerStub(client) 32 | zos, err := stub.ZOSAddresses(ctx) 33 | if err != nil { 34 | return err 35 | } 36 | 37 | dmz, err := stub.DMZAddresses(ctx) 38 | if err != nil { 39 | return err 40 | } 41 | 42 | ygg, err := stub.YggAddresses(ctx) 43 | if err != nil { 44 | return err 45 | } 46 | 47 | pub, err := stub.PublicAddresses(ctx) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | toString := func(al pkg.NetlinkAddresses) string { 53 | var buf strings.Builder 54 | for _, a := range al { 55 | if a.IP == nil || len(a.IP) == 0 { 56 | continue 57 | } 58 | 59 | if buf.Len() > 0 { 60 | buf.WriteString(", ") 61 | } 62 | 63 | buf.WriteString(a.String()) 64 | } 65 | 66 | return buf.String() 67 | } 68 | 69 | a, err := stub.Interfaces(ctx, types.DefaultBridge, "") 70 | if err != nil { 71 | return err 72 | } 73 | 74 | table.Rows[0][1] = toString(a.Interfaces[types.DefaultBridge].IPs) 75 | 76 | go func() { 77 | for { 78 | render.Signal() 79 | table.ColumnWidths = []int{6, table.Size().X - 9} 80 | select { 81 | case a := <-zos: 82 | table.Rows[0][1] = toString(a) 83 | case a := <-dmz: 84 | table.Rows[1][1] = toString(a) 85 | case a := <-ygg: 86 | table.Rows[2][1] = toString(a) 87 | case a := <-pub: 88 | str := "no public config" 89 | if a.HasPublicConfig { 90 | str = toString([]net.IPNet{a.IPv4.IPNet, a.IPv6.IPNet}) 91 | } 92 | table.Rows[3][1] = str 93 | } 94 | 95 | exit, err := stub.GetPublicExitDevice(ctx) 96 | dual := exit.String() 97 | if err != nil { 98 | dual = fmt.Sprintf("error: %s", err) 99 | } 100 | 101 | table.Rows[4][1] = dual 102 | } 103 | }() 104 | 105 | return nil 106 | } 107 | 108 | func netRender(client zbus.Client, grid *ui.Grid, render *signalFlag) error { 109 | addresses := widgets.NewTable() 110 | 111 | grid.Set( 112 | ui.NewRow(1, 113 | ui.NewCol(1, addresses), 114 | ), 115 | ) 116 | ctx := context.Background() 117 | 118 | if err := addressRender(ctx, addresses, client, render); err != nil { 119 | return err 120 | } 121 | 122 | return nil 123 | } 124 | -------------------------------------------------------------------------------- /cmds/modules/zui/prov.go: -------------------------------------------------------------------------------- 1 | package zui 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | ui "github.com/gizak/termui/v3" 8 | "github.com/gizak/termui/v3/widgets" 9 | "github.com/pkg/errors" 10 | "github.com/threefoldtech/zbus" 11 | "github.com/threefoldtech/zosbase/pkg/gridtypes" 12 | "github.com/threefoldtech/zosbase/pkg/stubs" 13 | ) 14 | 15 | const ( 16 | gig = 1024 * 1024 * 1024.0 17 | mb = 1024 * 1024.0 18 | loading = "Loading..." 19 | ) 20 | 21 | func resourcesRender(client zbus.Client, grid *ui.Grid, render *signalFlag) error { 22 | prov := widgets.NewTable() 23 | usage := widgets.NewTable() 24 | prov.FillRow = true 25 | 26 | grid.Set( 27 | ui.NewRow(1.0, 28 | ui.NewCol(.6, prov), 29 | ui.NewCol(.4, usage), 30 | ), 31 | ) 32 | 33 | if err := provRender(client, render, prov); err != nil { 34 | return errors.Wrap(err, "failed to render system provisioned resources") 35 | } 36 | 37 | if err := usageRender(client, render, usage); err != nil { 38 | return errors.Wrap(err, "failed to render system resources usage") 39 | } 40 | 41 | return nil 42 | } 43 | 44 | func provRender(client zbus.Client, render *signalFlag, prov *widgets.Table) error { 45 | prov.Title = "System Resources" 46 | prov.RowSeparator = false 47 | 48 | prov.Rows = [][]string{ 49 | {"", "Total", "Reserved"}, 50 | {"CRU", loading, loading}, 51 | {"Memory", loading, loading}, 52 | {"SSD", loading, loading}, 53 | {"HDD", loading, loading}, 54 | {"IPv4", loading, loading}, 55 | } 56 | 57 | monitor := stubs.NewStatisticsStub(client) 58 | 59 | total := monitor.Total(context.Background()) 60 | assignTotalResources(prov, total) 61 | render.Signal() 62 | 63 | reserved, err := monitor.ReservedStream(context.Background()) 64 | if err != nil { 65 | return errors.Wrap(err, "failed to start net monitor stream") 66 | } 67 | 68 | go func() { 69 | for counter := range reserved { 70 | rows := prov.Rows 71 | rows[1][2] = fmt.Sprint(counter.CRU) 72 | rows[2][2] = fmt.Sprintf("%0.00f GB", float64(counter.MRU)/gig) 73 | rows[3][2] = fmt.Sprintf("%0.00f GB", float64(counter.SRU)/gig) 74 | rows[4][2] = fmt.Sprintf("%0.00f GB", float64(counter.HRU)/gig) 75 | rows[5][2] = fmt.Sprint(counter.IPV4U) 76 | 77 | render.Signal() 78 | } 79 | }() 80 | 81 | return nil 82 | } 83 | 84 | func usageRender(client zbus.Client, render *signalFlag, usage *widgets.Table) error { 85 | usage.Title = "Usage" 86 | usage.RowSeparator = false 87 | usage.FillRow = true 88 | 89 | usage.Rows = [][]string{ 90 | {"CPU", loading}, 91 | {"Memory", loading}, 92 | } 93 | 94 | sysMonitor := stubs.NewSystemMonitorStub(client) 95 | cpuStream, err := sysMonitor.CPU(context.Background()) 96 | if err != nil { 97 | return errors.Wrap(err, "failed to start cpu monitor stream") 98 | } 99 | 100 | go func() { 101 | for point := range cpuStream { 102 | usage.Rows[0][1] = fmt.Sprintf("%0.00f%%", point.Percent) 103 | render.Signal() 104 | } 105 | }() 106 | 107 | memStream, err := sysMonitor.Memory(context.Background()) 108 | if err != nil { 109 | return errors.Wrap(err, "failed to start mem monitor stream") 110 | } 111 | 112 | go func() { 113 | for point := range memStream { 114 | usage.Rows[1][1] = fmt.Sprintf("%0.00f MB", float64(point.Used/mb)) 115 | render.Signal() 116 | } 117 | }() 118 | 119 | return nil 120 | } 121 | 122 | func assignTotalResources(prov *widgets.Table, total gridtypes.Capacity) { 123 | rows := prov.Rows 124 | rows[1][1] = fmt.Sprint(total.CRU) 125 | rows[2][1] = fmt.Sprintf("%0.00f GB", float64(total.MRU)/gig) 126 | rows[3][1] = fmt.Sprintf("%0.00f GB", float64(total.SRU)/gig) 127 | rows[4][1] = fmt.Sprintf("%0.00f GB", float64(total.HRU)/gig) 128 | rows[5][1] = fmt.Sprint(total.IPV4U) 129 | } 130 | -------------------------------------------------------------------------------- /cmds/zos/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | 8 | "github.com/rs/zerolog" 9 | "github.com/rs/zerolog/log" 10 | apigateway "github.com/threefoldtech/zos/cmds/modules/api_gateway" 11 | "github.com/threefoldtech/zos/cmds/modules/contd" 12 | "github.com/threefoldtech/zos/cmds/modules/flistd" 13 | "github.com/threefoldtech/zos/cmds/modules/gateway" 14 | "github.com/threefoldtech/zos/cmds/modules/networkd" 15 | "github.com/threefoldtech/zos/cmds/modules/noded" 16 | "github.com/threefoldtech/zos/cmds/modules/powerd" 17 | "github.com/threefoldtech/zos/cmds/modules/provisiond" 18 | "github.com/threefoldtech/zos/cmds/modules/qsfsd" 19 | "github.com/threefoldtech/zos/cmds/modules/storaged" 20 | "github.com/threefoldtech/zos/cmds/modules/vmd" 21 | "github.com/threefoldtech/zos/cmds/modules/zbusdebug" 22 | "github.com/threefoldtech/zos/cmds/modules/zui" 23 | "github.com/threefoldtech/zosbase/pkg/app" 24 | "github.com/threefoldtech/zosbase/pkg/version" 25 | "github.com/urfave/cli/v2" 26 | ) 27 | 28 | func main() { 29 | app.Initialize() 30 | 31 | exe := cli.App{ 32 | Name: "ZOS", 33 | Version: version.Current().String(), 34 | 35 | Flags: []cli.Flag{ 36 | &cli.BoolFlag{ 37 | Name: "list", 38 | Hidden: true, 39 | Usage: "print all available clients names", 40 | }, 41 | &cli.BoolFlag{ 42 | Name: "debug", 43 | Aliases: []string{"d"}, 44 | Usage: "force debug level", 45 | }, 46 | }, 47 | Commands: []*cli.Command{ 48 | &zui.Module, 49 | &storaged.Module, 50 | &flistd.Module, 51 | &contd.Module, 52 | &vmd.Module, 53 | &noded.Module, 54 | &networkd.Module, 55 | &provisiond.Module, 56 | &zbusdebug.Module, 57 | &gateway.Module, 58 | &qsfsd.Module, 59 | &powerd.Module, 60 | &apigateway.Module, 61 | }, 62 | Before: func(c *cli.Context) error { 63 | if c.Bool("debug") { 64 | zerolog.SetGlobalLevel(zerolog.DebugLevel) 65 | log.Debug().Msg("setting log level to debug") 66 | } 67 | return nil 68 | }, 69 | Action: func(c *cli.Context) error { 70 | if !c.Bool("list") { 71 | cli.ShowAppHelpAndExit(c, 0) 72 | } 73 | // this hidden flag (complete) is used to list 74 | // all available modules names to automate building of 75 | // symlinks 76 | for _, cmd := range c.App.VisibleCommands() { 77 | if cmd.Name == "help" { 78 | continue 79 | } 80 | fmt.Println(cmd.Name) 81 | } 82 | return nil 83 | }, 84 | } 85 | 86 | cli.VersionPrinter = func(c *cli.Context) { 87 | fmt.Println(c.App.Version) 88 | } 89 | name := filepath.Base(os.Args[0]) 90 | args := os.Args 91 | for _, cmd := range exe.Commands { 92 | if cmd.Name == name { 93 | args = make([]string, 0, len(os.Args)+1) 94 | // this converts /bin/name to 'zos 95 | args = append(args, "bin", name) 96 | args = append(args, os.Args[1:]...) 97 | break 98 | } 99 | } 100 | 101 | if err := exe.Run(args); err != nil { 102 | log.Fatal().Err(err).Msg("exiting") 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /etc/sysctl.conf: -------------------------------------------------------------------------------- 1 | fs.inotify.max_user_instances = 8192 2 | fs.inotify.max_user_watches = 524288 3 | -------------------------------------------------------------------------------- /etc/zinit/api-gateway.yaml: -------------------------------------------------------------------------------- 1 | exec: api-gateway --broker unix:///var/run/redis.sock 2 | after: 3 | - boot 4 | - identityd 5 | -------------------------------------------------------------------------------- /etc/zinit/boot.yaml: -------------------------------------------------------------------------------- 1 | # boot is a pseudo boot stage to make sure that 2 | # storaged and networkd are running before any 3 | # of the other modules 4 | 5 | exec: "true" 6 | oneshot: true 7 | after: 8 | # list of all services that must be running before moving on 9 | - node-ready 10 | - storaged 11 | - internet -------------------------------------------------------------------------------- /etc/zinit/contd.yaml: -------------------------------------------------------------------------------- 1 | exec: contd --broker unix:///var/run/redis.sock --root /var/cache/modules/contd 2 | after: 3 | - containerd 4 | - boot 5 | -------------------------------------------------------------------------------- /etc/zinit/flistd.yaml: -------------------------------------------------------------------------------- 1 | exec: flistd --broker unix:///var/run/redis.sock --root /var/cache/modules/flistd 2 | after: 3 | - boot 4 | # identityd is added to make sure all binaries are up to date 5 | - identityd 6 | -------------------------------------------------------------------------------- /etc/zinit/gateway.yaml: -------------------------------------------------------------------------------- 1 | exec: gateway --broker unix:///var/run/redis.sock --root /var/cache/modules/gateway 2 | after: 3 | - boot 4 | - networkd 5 | -------------------------------------------------------------------------------- /etc/zinit/identityd.yaml: -------------------------------------------------------------------------------- 1 | exec: identityd 2 | test: zbusdebug --module identityd 3 | after: 4 | - boot 5 | -------------------------------------------------------------------------------- /etc/zinit/init/node-ready.sh: -------------------------------------------------------------------------------- 1 | #!env sh 2 | 3 | # This file has some initialization steps that should be executed after 4 | # the node basic services are loaded (as defined by 0-initramfs), but before 5 | # the rest of the system is booted. 6 | 7 | 8 | setup_loopback() { 9 | ip link set lo up 10 | } 11 | 12 | disable_overlay() { 13 | rmmod ata_piix 14 | rmmod pata_acpi 15 | rmmod ata_generic 16 | rmmod libata 17 | partprobe 18 | } 19 | 20 | main() { 21 | # bring the loop back interface up 22 | setup_loopback 23 | 24 | # does not expose qemu overlay to the system 25 | disable_overlay 26 | 27 | exit 0 28 | } 29 | 30 | main 31 | -------------------------------------------------------------------------------- /etc/zinit/networkd.yaml: -------------------------------------------------------------------------------- 1 | exec: networkd --broker unix:///var/run/redis.sock --root /var/cache/modules/networkd 2 | test: zbusdebug --module network 3 | after: 4 | - boot 5 | -------------------------------------------------------------------------------- /etc/zinit/node-ready.yaml: -------------------------------------------------------------------------------- 1 | # ready is a pseudo boot stage to mark that all 2 | # zinit unit files provided by the base image 3 | # have been booted correctly 4 | 5 | exec: sh /etc/zinit/init/node-ready.sh 6 | oneshot: true 7 | after: 8 | # list of all services that must be running before moving on 9 | # note, these services are defined by the 0-initramfs repo 10 | - local-modprobe 11 | - udev-trigger 12 | - redis 13 | - haveged 14 | - cgroup 15 | -------------------------------------------------------------------------------- /etc/zinit/noded.yaml: -------------------------------------------------------------------------------- 1 | exec: ip netns exec ndmz noded --broker unix:///var/run/redis.sock 2 | after: 3 | - boot 4 | - networkd 5 | -------------------------------------------------------------------------------- /etc/zinit/powerd.yaml: -------------------------------------------------------------------------------- 1 | exec: powerd --broker unix://var/run/redis.sock 2 | after: 3 | - boot 4 | - noded 5 | -------------------------------------------------------------------------------- /etc/zinit/provisiond.yaml: -------------------------------------------------------------------------------- 1 | # provisind runs inside ndmz. the ndmz has rules to accept connection to 2 | # provisiond address :2021 3 | exec: provisiond --broker unix:///var/run/redis.sock --root /var/cache/modules/provisiond 4 | after: 5 | - boot 6 | - flistd 7 | - contd 8 | - networkd 9 | -------------------------------------------------------------------------------- /etc/zinit/qsfsd.yaml: -------------------------------------------------------------------------------- 1 | exec: qsfsd --broker unix:///var/run/redis.sock --root /var/cache/modules/qsfsd 2 | after: 3 | - boot 4 | - contd -------------------------------------------------------------------------------- /etc/zinit/quiet.yaml: -------------------------------------------------------------------------------- 1 | # makes sure kernel does not print to console 2 | # it would be better if we can just force the kernel 3 | # not to write to the active tty 4 | exec: dmesg -n crit 5 | oneshot: true 6 | -------------------------------------------------------------------------------- /etc/zinit/redis.yaml: -------------------------------------------------------------------------------- 1 | exec: redis-server --port 0 --unixsocket /var/run/redis.sock -------------------------------------------------------------------------------- /etc/zinit/storaged.yaml: -------------------------------------------------------------------------------- 1 | exec: storaged --broker unix:///var/run/redis.sock 2 | # we only consider the storaged is running only if the /var/cache is mounted 3 | test: mountpoint /var/cache 4 | after: 5 | - node-ready 6 | -------------------------------------------------------------------------------- /etc/zinit/sysctl.yaml: -------------------------------------------------------------------------------- 1 | exec: sysctl -p 2 | oneshot: true 3 | -------------------------------------------------------------------------------- /etc/zinit/ttylog.yaml: -------------------------------------------------------------------------------- 1 | exec: sh -c 'zinit log > /dev/tty2' 2 | log: stdout 3 | -------------------------------------------------------------------------------- /etc/zinit/vmd.yaml: -------------------------------------------------------------------------------- 1 | exec: vmd --broker unix:///var/run/redis.sock 2 | after: 3 | - boot 4 | - networkd 5 | -------------------------------------------------------------------------------- /etc/zinit/zui.yaml: -------------------------------------------------------------------------------- 1 | # This script is trying to be compatible with zinit version 0.2.5 and the 0.2.1 2 | # in version 0.2.1 processes are not started in it's own sessions hence, the openvt is required 3 | # openvt fails in version 0.2.5 because the process is running in it's own session, hence 4 | # instead a redirection is used. 5 | exec: | 6 | sh -c ' 7 | pkill zui 8 | 9 | if ! openvt -s -c 3 -w -- zui -broker unix:///var/run/redis.sock; then 10 | exec zui -broker unix:///var/run/redis.sock <> /dev/tty3 >&0 2>&1 11 | fi 12 | ' 13 | after: 14 | - node-ready 15 | - quiet 16 | -------------------------------------------------------------------------------- /qemu/Makefile: -------------------------------------------------------------------------------- 1 | all: start 2 | 3 | prepare: kernel 4 | 5 | zinit: 6 | @echo "copy zinit into overlay" 7 | cp $(shell which zinit) overlay/sbin/zinit 8 | 9 | kernel: 10 | @echo "Download 0-OS kernel" 11 | wget https://bootstrap.grid.tf/kernel/net/dev.efi 12 | 13 | start: 14 | bash vm.sh -n node1 -c "runmode=dev farmer_id=$(FARMERID)" 15 | test: 16 | bash vm.sh -n node1 -c "runmode=test farmer_id=$(FARMERID)" 17 | 18 | auth: 19 | @echo "Copying your public ssh to machine rootfs" 20 | mkdir -p overlay/root/.ssh 21 | cp ~/.ssh/id_rsa.pub overlay/root/.ssh/authorized_keys 22 | 23 | net: 24 | @echo "Creating a virtual natted network" 25 | bash ./net.sh 26 | 27 | run: 28 | @echo "Running your node" 29 | sudo ./vm.sh -g -n node-01 -c "farmer_id=$(id) version=v3 printk.devmsg=on runmode=dev nomodeset ssh-user=$(user)" 30 | 31 | run-gpu: 32 | @echo "Running your node" 33 | sudo ./vm_gpu.sh -g -n node-01 -c "farmer_id=$(id) version=v3 printk.devmsg=on runmode=dev nomodeset ssh-user=$(user)" -------------------------------------------------------------------------------- /qemu/net.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This is the same as the first case at qemu/README.md in a single script 4 | 5 | sudo ip link add zos0 type bridge 6 | sudo ip link set zos0 up 7 | 8 | sudo ip addr add 192.168.123.1/24 dev zos0 9 | md5=$(echo $USER| md5sum ) 10 | ULA=${md5:0:2}:${md5:2:4}:${md5:6:4} 11 | sudo ip addr add fd${ULA}::1/64 dev zos0 12 | # you might want to add fe80::1/64 13 | sudo ip addr add fe80::1/64 dev zos0 14 | 15 | sudo iptables -t nat -I POSTROUTING -s 192.168.123.0/24 -j MASQUERADE 16 | sudo ip6tables -t nat -I POSTROUTING -s fd${ULA}::/64 -j MASQUERADE 17 | sudo iptables -t filter -I FORWARD --source 192.168.123.0/24 -j ACCEPT 18 | sudo iptables -t filter -I FORWARD --destination 192.168.123.0/24 -j ACCEPT 19 | sudo sysctl -w net.ipv4.ip_forward=1 20 | 21 | sudo dnsmasq --strict-order \ 22 | --except-interface=lo \ 23 | --interface=zos0 \ 24 | --bind-interfaces \ 25 | --dhcp-range=192.168.123.20,192.168.123.50 \ 26 | --dhcp-range=::1000,::1fff,constructor:zos0,ra-stateless,12h \ 27 | --conf-file="" \ 28 | --pid-file=/var/run/qemu-dnsmasq-zos0.pid \ 29 | --dhcp-leasefile=/var/run/qemu-dnsmasq-zos0.leases \ 30 | --dhcp-no-override 31 | -------------------------------------------------------------------------------- /qemu/overlay: -------------------------------------------------------------------------------- 1 | overlay.normal -------------------------------------------------------------------------------- /qemu/overlay.normal/.zero-os-debug: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/threefoldtech/zos/0199ee83ff9918918f18fd25fd7ba1cdc1c7abce/qemu/overlay.normal/.zero-os-debug -------------------------------------------------------------------------------- /qemu/overlay.normal/bin/api-gateway: -------------------------------------------------------------------------------- 1 | ../../../bin/zos -------------------------------------------------------------------------------- /qemu/overlay.normal/bin/contd: -------------------------------------------------------------------------------- 1 | ../../../bin/zos -------------------------------------------------------------------------------- /qemu/overlay.normal/bin/flistd: -------------------------------------------------------------------------------- 1 | ../../../bin/zos -------------------------------------------------------------------------------- /qemu/overlay.normal/bin/gateway: -------------------------------------------------------------------------------- 1 | ../../../bin/zos -------------------------------------------------------------------------------- /qemu/overlay.normal/bin/identityd: -------------------------------------------------------------------------------- 1 | ../../../bin/identityd -------------------------------------------------------------------------------- /qemu/overlay.normal/bin/internet: -------------------------------------------------------------------------------- 1 | ../../../bin/internet -------------------------------------------------------------------------------- /qemu/overlay.normal/bin/networkd: -------------------------------------------------------------------------------- 1 | ../../../bin/zos -------------------------------------------------------------------------------- /qemu/overlay.normal/bin/noded: -------------------------------------------------------------------------------- 1 | ../../../bin/zos -------------------------------------------------------------------------------- /qemu/overlay.normal/bin/powerd: -------------------------------------------------------------------------------- 1 | ../../../bin/zos -------------------------------------------------------------------------------- /qemu/overlay.normal/bin/provisiond: -------------------------------------------------------------------------------- 1 | ../../../bin/zos -------------------------------------------------------------------------------- /qemu/overlay.normal/bin/qsfsd: -------------------------------------------------------------------------------- 1 | ../../../bin/zos -------------------------------------------------------------------------------- /qemu/overlay.normal/bin/storaged: -------------------------------------------------------------------------------- 1 | ../../../bin/zos -------------------------------------------------------------------------------- /qemu/overlay.normal/bin/vmd: -------------------------------------------------------------------------------- 1 | ../../../bin/zos -------------------------------------------------------------------------------- /qemu/overlay.normal/bin/zbusdebug: -------------------------------------------------------------------------------- 1 | ../../../bin/zos -------------------------------------------------------------------------------- /qemu/overlay.normal/bin/zui: -------------------------------------------------------------------------------- 1 | ../../../bin/zos -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/sysctl.conf: -------------------------------------------------------------------------------- 1 | ../../../etc/sysctl.conf -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/api-gateway.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/api-gateway.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/boot.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/boot.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/bootstrap.yaml: -------------------------------------------------------------------------------- 1 | # Override the default bootstrap procedure 2 | # embedded in the image if using overlay 3 | exec: "true" 4 | oneshot: true 5 | after: 6 | - internet 7 | -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/contd.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/contd.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/flistd.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/flistd.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/gateway.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/gateway.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/identityd.yaml: -------------------------------------------------------------------------------- 1 | exec: identityd -d 2 | test: zbusdebug --module identityd 3 | after: 4 | - boot 5 | -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/init/node-ready.sh: -------------------------------------------------------------------------------- 1 | ../../../../../etc/zinit/init/node-ready.sh -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/internet.yaml: -------------------------------------------------------------------------------- 1 | ../../../../bootstrap/etc/zinit/internet.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/networkd.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/networkd.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/node-ready.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/node-ready.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/noded.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/noded.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/powerd.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/powerd.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/provisiond.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/provisiond.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/qsfsd.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/qsfsd.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/quiet.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/quiet.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/redis.yaml: -------------------------------------------------------------------------------- 1 | exec: redis-server --protected-mode no --unixsocket /var/run/redis.sock 2 | -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/storaged.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/storaged.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/sysctl.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/sysctl.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/ttylog.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/ttylog.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/vmd.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/vmd.yaml -------------------------------------------------------------------------------- /qemu/overlay.normal/etc/zinit/zui.yaml: -------------------------------------------------------------------------------- 1 | ../../../../etc/zinit/zui.yaml -------------------------------------------------------------------------------- /qemu/revert-vfio.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo "[1/5] Reverting GRUB parameters..." 6 | 7 | # Clean VFIO-related parameters from GRUB_CMDLINE_LINUX_DEFAULT 8 | sudo sed -i 's/\s*vfio-pci.ids=[^" ]*//g' /etc/default/grub 9 | sudo sed -i 's/\s*intel_iommu=on//g' /etc/default/grub 10 | sudo sed -i 's/\s*iommu=pt//g' /etc/default/grub 11 | 12 | echo "[2/5] Removing VFIO initramfs modules..." 13 | sudo rm -f /etc/initramfs-tools/modules 14 | 15 | echo "[3/5] Removing NVIDIA blacklist..." 16 | sudo rm -f /etc/modprobe.d/blacklist-nvidia.conf 17 | 18 | echo "[4/5] Regenerating initramfs..." 19 | sudo update-initramfs -u 20 | 21 | echo "[5/5] Updating GRUB..." 22 | sudo update-grub 23 | 24 | echo "✅ Reverted VFIO GPU passthrough configuration. Reboot to re-enable NVIDIA on host." 25 | -------------------------------------------------------------------------------- /qemu/setup-vfio.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo "[1/5] Updating GRUB boot parameters..." 6 | 7 | # Backup and update GRUB 8 | # Update vfio-pci.ids with your GPU IDs 9 | sudo sed -i.bak '/^GRUB_CMDLINE_LINUX_DEFAULT=/ s/"$/ intel_iommu=on iommu=pt vfio-pci.ids=10de:2560,10de:228e"/' /etc/default/grub 10 | 11 | echo "[2/5] Creating vfio.conf for initramfs..." 12 | echo -e "vfio\nvfio_iommu_type1\nvfio_pci\nvfio_virqfd" | sudo tee /etc/initramfs-tools/modules 13 | 14 | echo "[3/5] Blacklisting NVIDIA drivers..." 15 | cat < /dev/null; then 74 | echo "VM $name is already running" 75 | exit 1 76 | fi 77 | 78 | tpmargs="" 79 | if [[ $tpm -eq "1" ]]; then 80 | if ! command -v swtpm &> /dev/null; then 81 | echo "tpm option require `swtpm` please install first" 82 | exit 1 83 | fi 84 | pkill swtpm 85 | tpm_dir="$vmdir/tpm" 86 | tpm_socket="$vmdir/swtpm.sock" 87 | mkdir -p $tpm_dir 88 | rm $tpm_socket &> /dev/null || true 89 | # runs in the backgroun 90 | swtpm \ 91 | socket --tpm2 \ 92 | --tpmstate dir=$tpm_dir \ 93 | --ctrl type=unixio,path=$vmdir/swtpm.sock \ 94 | --log level=20 &> tpm.logs & 95 | 96 | while [ ! -S "$tpm_socket" ]; do 97 | echo "waiting for tpm" 98 | sleep 1s 99 | done 100 | sleep 1s 101 | tpmargs="-chardev socket,id=chrtpm,path=${tpm_socket} -tpmdev emulator,id=tpm0,chardev=chrtpm -device tpm-tis,tpmdev=tpm0" 102 | fi 103 | 104 | echo "boot $image" 105 | 106 | qemu-system-x86_64 -kernel $image \ 107 | -m $(( mem * 1024 )) \ 108 | -enable-kvm \ 109 | -cpu host,host-phys-bits \ 110 | -smp $smp \ 111 | -uuid $uuid \ 112 | -netdev bridge,id=zos0,br=${bridge} -device virtio-net-pci,netdev=zos0,mac="${basemac}1" \ 113 | -drive file=fat:rw:$basepath/overlay,format=raw \ 114 | -append "${cmdline}" \ 115 | -drive file=$vmdir/vda.qcow2,if=virtio -drive file=$vmdir/vdb.qcow2,if=virtio \ 116 | -drive file=$vmdir/vdc.qcow2,if=virtio -drive file=$vmdir/vdd.qcow2,if=virtio \ 117 | -drive file=$vmdir/vde.qcow2,if=virtio \ 118 | -serial null -serial mon:stdio \ 119 | ${graphics} \ 120 | ${tpmargs} \ 121 | ; 122 | -------------------------------------------------------------------------------- /qemu/vm_gpu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | debug=0 5 | reset=0 6 | image=zos.efi 7 | kernelargs="" 8 | bridge=zos0 9 | graphics="-nographic -nodefaults" 10 | smp=1 11 | mem=3 12 | tpm=0 13 | 14 | usage() { 15 | cat < /dev/null; then 74 | echo "VM $name is already running" 75 | exit 1 76 | fi 77 | 78 | tpmargs="" 79 | if [[ $tpm -eq "1" ]]; then 80 | if ! command -v swtpm &> /dev/null; then 81 | echo "tpm option require `swtpm` please install first" 82 | exit 1 83 | fi 84 | pkill swtpm 85 | tpm_dir="$vmdir/tpm" 86 | tpm_socket="$vmdir/swtpm.sock" 87 | mkdir -p $tpm_dir 88 | rm $tpm_socket &> /dev/null || true 89 | # runs in the backgroun 90 | swtpm \ 91 | socket --tpm2 \ 92 | --tpmstate dir=$tpm_dir \ 93 | --ctrl type=unixio,path=$vmdir/swtpm.sock \ 94 | --log level=20 &> tpm.logs & 95 | 96 | while [ ! -S "$tpm_socket" ]; do 97 | echo "waiting for tpm" 98 | sleep 1s 99 | done 100 | sleep 1s 101 | tpmargs="-chardev socket,id=chrtpm,path=${tpm_socket} -tpmdev emulator,id=tpm0,chardev=chrtpm -device tpm-tis,tpmdev=tpm0" 102 | fi 103 | 104 | echo "boot $image" 105 | 106 | qemu-system-x86_64 -kernel $image \ 107 | -m $(( mem * 1024 )) \ 108 | -enable-kvm \ 109 | -cpu host,host-phys-bits \ 110 | -smp $smp \ 111 | -uuid $uuid \ 112 | -netdev bridge,id=zos0,br=${bridge} -device virtio-net-pci,netdev=zos0,mac="${basemac}1" \ 113 | -drive file=fat:rw:$basepath/overlay,format=raw \ 114 | -append "${cmdline}" \ 115 | -drive file=$vmdir/vda.qcow2,if=virtio -drive file=$vmdir/vdb.qcow2,if=virtio \ 116 | -drive file=$vmdir/vdc.qcow2,if=virtio -drive file=$vmdir/vdd.qcow2,if=virtio \ 117 | -drive file=$vmdir/vde.qcow2,if=virtio \ 118 | -serial null -serial mon:stdio \ 119 | ${graphics} \ 120 | ${tpmargs} \ 121 | -device pci-bridge,chassis_nr=1,id=pcie.1 \ 122 | -device vfio-pci,host=01:00.0,bus=pcie.1,addr=00.0,multifunction=on \ 123 | -device vfio-pci,host=01:00.1,bus=pcie.1,addr=00.1 124 | ; 125 | -------------------------------------------------------------------------------- /qemu/walkthrough.md: -------------------------------------------------------------------------------- 1 | # 0-OS Walkthrough 2 | 3 | ## Create your identity 4 | 5 | In order to identify yourself, you'll need a unique key. 6 | You can easily generate that key with `tfuser` tool. 7 | 8 | ``` 9 | tfuser id -o /tmp/demo-user.seed 10 | ``` 11 | 12 | ## Register your Farm 13 | 14 | In order to group your nodes and identify them to be your, you need 15 | to add them into your farm. But first, you need to create your farm. 16 | 17 | In order to create your farm, you'll need your seed you just created. 18 | This is the only way to idenfity and know you're the owner of the farm. 19 | 20 | ``` 21 | tffarm farm register --seed /tmp/demo-user.seed MyNewFarm 22 | ``` 23 | 24 | ## Start your node 25 | 26 | Start your VM with the `farmer_id` kernel argument. 27 | If you're using the makefile, just do: `make FARMERID=.... start` 28 | -------------------------------------------------------------------------------- /scripts/collect.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | # This file is used by CI to build an archive with 5 | # all the binaries, and config files for flist building 6 | 7 | archive=$1 8 | 9 | if [ -z "${archive}" ]; then 10 | echo "missing argument" >&2 11 | exit 1 12 | fi 13 | 14 | mkdir -p ${archive}/bin ${archive}/etc 15 | cp bin/* ${archive}/bin/ 16 | for sub in $(bin/zos --list); do 17 | ln -s zos ${archive}/bin/${sub} 18 | done 19 | cp -r etc/* ${archive}/etc/ 20 | -------------------------------------------------------------------------------- /scripts/create_image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir $1 4 | sudo debootstrap jammy $1 http://archive.ubuntu.com/ubuntu 5 | 6 | sudo arch-chroot $1 /bin/bash < /etc/resolv.conf 11 | 12 | apt-get update -y 13 | apt-get install -y cloud-init openssh-server curl 14 | cloud-init clean 15 | 16 | apt-get install -y linux-modules-extra-5.15.0-25-generic 17 | echo 'fs-virtiofs' >> /etc/initramfs-tools/modules 18 | update-initramfs -c -k all 19 | 20 | EOF -------------------------------------------------------------------------------- /scripts/install_deps.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | CHV_VERSION="v39.0" 5 | CHV_URL="https://github.com/cloud-hypervisor/cloud-hypervisor/releases/download/${CHV_VERSION}/cloud-hypervisor" 6 | RUSTUP_URL="https://sh.rustup.rs" 7 | VIRTIOFSD_REPO="https://gitlab.com/muhamad.azmy/virtiofsd/-/jobs/6547244336/artifacts/download?file_type=archive" 8 | RFS_VERSION="v1.1.1" 9 | RFS_URL="https://github.com/threefoldtech/rfs/releases/download/${RFS_VERSION}/rfs" 10 | 11 | install_chv() { 12 | echo "Installing cloud-hypervisor ${CHV_VERSION} ..." 13 | wget -q ${CHV_URL} -O /usr/local/bin/cloud-hypervisor 14 | chmod +x /usr/local/bin/cloud-hypervisor 15 | setcap cap_sys_admin,cap_dac_override+eip /usr/local/bin/cloud-hypervisor 16 | } 17 | 18 | 19 | install_virtiofsd() { 20 | echo "Installing virtiofsd ..." 21 | 22 | # specially needed for virtiofsd bin 23 | apt -y update 24 | apt -y install libseccomp-dev libcap-ng-dev 25 | 26 | curl -L -k -o "/tmp/virtiofsd-rs.zip" ${VIRTIOFSD_REPO} 27 | unzip -p /tmp/virtiofsd-rs.zip > /usr/local/bin/virtiofsd 28 | chmod +x /usr/local/bin/virtiofsd 29 | setcap cap_sys_admin,cap_dac_override+eip /usr/local/bin/virtiofsd 30 | } 31 | 32 | install_rfs() { 33 | echo "Installing rfs ${RFS_VERSION} ..." 34 | wget -q ${RFS_URL} 35 | chmod +x rfs 36 | mv ./rfs /usr/local/bin/rfs1 37 | } 38 | 39 | main() { 40 | # must run as superuser 41 | if [ $(id -u) != "0" ]; then 42 | echo "You must be the superuser to run this script" >&2 43 | exit 1 44 | fi 45 | 46 | TEMP_DIR=$(mktemp -d) 47 | pushd $TEMP_DIR 48 | 49 | # install cloud-hypervisor 50 | if ! command -v cloud-hypervisor &>/dev/null; then 51 | install_chv 52 | fi 53 | 54 | # install virtiofsd 55 | if ! command -v virtiofsd &>/dev/null; then 56 | 57 | install_virtiofsd 58 | fi 59 | 60 | # install rfs 61 | if ! command -v rfs1 &>/dev/null; then 62 | install_rfs 63 | fi 64 | 65 | # install mcopy/mkdosfs needed to create cidata image. comment if you a have the image 66 | apt -y install dosfstools mtools fuse 67 | 68 | # install screen for managing multiple servers 69 | # NOTE: rust bins like virtiofsd miss the logs, runs on a screen session to workaround that 70 | apt -y install screen 71 | 72 | popd 73 | rm -rf $TEMP_DIR 74 | } 75 | 76 | main -------------------------------------------------------------------------------- /specs/container/design.planuml: -------------------------------------------------------------------------------- 1 | @startuml 2 | 3 | Title Container module flow 4 | 5 | actor tftech 6 | actor reservation 7 | control module 8 | control shim 9 | entity runc 10 | 11 | 12 | == Create container == 13 | reservation -> module: reservation request\n to deploy a container 14 | activate module 15 | module -> shim: Start shim process 16 | activate shim 17 | module -> shim: Create container 18 | shim -> runc: Create container 19 | activate runc 20 | runc -> container: Create container 21 | activate container 22 | runc --> shim: container created 23 | deactivate runc 24 | shim --> module: container created 25 | 26 | == Start container == 27 | module -> shim: Start container 28 | shim -> runc: Start container 29 | activate runc 30 | runc -> container: start container 31 | container -> runc: container started 32 | deactivate runc 33 | 34 | == Upgrade module == 35 | tftech -> module: publish upgrade 36 | deactivate module 37 | module -> module: restart/upgrade 38 | note left: container module is restarted,\n but the container itself is untouched 39 | activate module 40 | 41 | == Stop container == 42 | module -> shim: stop container 43 | shim -> runc: stop container 44 | runc -> container: stop container 45 | deactivate container 46 | container --> runc: container stopped 47 | runc --> shim: container stopped 48 | shim --> module: container stopped 49 | 50 | module -> shim: kill shim 51 | deactivate shim 52 | deactivate module 53 | 54 | @enduml -------------------------------------------------------------------------------- /specs/container/readme.md: -------------------------------------------------------------------------------- 1 | # Container module 2 | 3 | This module is responsible to manage containerized applications. 4 | Its only focus is about starting a process with the proper isolation. So there is no notion of storage or networking in this module, this is handled by the layer above. 5 | 6 | The container runtime used will be compatible with the [OCI specification](https://github.com/opencontainers/runtime-spec), but which one in particular is still to be decided. 7 | 8 | ## Design 9 | 10 | One of the requirements of the container module is that any container running on 0-OS should not be affected by an upgrade/restart of any 0-OS module. 11 | In order to do that, we need to have shim process that keeps the file descriptor of the container open in case of a restart of the container module. The shim process is going to be the one responsible to talk to the OCI runtime to manage the container itself. 12 | 13 | Next is a simplify version of the lifetime flow a container: 14 | 15 | ![flow](../../assets/Container_module_flow.png) 16 | 17 | ## Implementation 18 | 19 | Most of the work to implement such a system as already been done by other. Mainly containerd has some very nice libraries around 20 | shim and runc. 21 | 22 | Here is a list of link of interest: 23 | - [containerd](https://github.com/containerd/containerd) 24 | - specifically the runtime package: https://github.com/containerd/containerd/tree/master/runtime 25 | - [containerd client example](https://github.com/containerd/containerd/blob/master/docs/getting-started.md) 26 | - [list of project that have integrated containerd](https://github.com/containerd/containerd/blob/master/ADOPTERS.md) 27 | - [firecracker shim design](https://github.com/firecracker-microvm/firecracker-containerd/blob/master/docs/shim-design.md) 28 | 29 | 30 | ## Module interface 31 | 32 | ```go 33 | type ContainerID string 34 | 35 | type NetworkInfo struct{ 36 | // Currently a container can only join one (and only one) 37 | // network namespace that has to be pre defined on the node 38 | // for the container tenant 39 | 40 | // Containers don't need to know about anything about bridges, 41 | // IPs, wireguards since this is all is only known by the network 42 | // resource which is out of the scope of this module 43 | Namespace string 44 | } 45 | 46 | type MountInfo struct { 47 | Source string // source of the mount point on the host 48 | Target string // target of mount inside the container 49 | Type string // mount type 50 | Options []string // mount options 51 | } 52 | 53 | type ContainerInfo struct { 54 | ID ContainerID 55 | //Container info 56 | Name string 57 | Flist string 58 | Tags []string 59 | Network NetworkInfo 60 | Mounts []MountInfo 61 | 62 | // NOTE: 63 | // Port forwards are not defined by the container. It can be defined 64 | // by the Network namespace resource. BUT ideally, no port forwards 65 | // will ever be needed since all is gonna be routing based. 66 | } 67 | 68 | 69 | type ContainerModule interface { 70 | // Run creates and starts a container on the node. It auto starts commnad line 71 | // defined by `entrypoint` 72 | Run(ns string, name string, flist string, tags, env []string, network NetworkInfo, 73 | mounts []MountInfo, entrypoint string) (ContainerID, error) 74 | 75 | // Inspect, return information about the container, given its container id 76 | Inspect(ns string, id ContainerID) (ContainerInfo, error) 77 | Delete(ns string, id ContainerID) error 78 | } 79 | ``` 80 | 81 | Currently, the container module only expose a single entity (container) where u can only create or delete as is. there 82 | is no exposure to the underlying processes or task running inside the container. This is only to keep things as simple 83 | as possible, until its necessary to expose these internals. 84 | 85 | ## Logs 86 | Container stdin/stderr is written to `/var/log//.log` -------------------------------------------------------------------------------- /specs/grid3/contract.md: -------------------------------------------------------------------------------- 1 | # Deployment contract 2 | The deployment contract is a contract between: 3 | - User (owner of deployment) 4 | - Node (Zos node) 5 | 6 | The contract must satisfy the following requirements 7 | 8 | - The user must be able to "reconstruct" his deployment. He (at any time) should be able to read the history of hist deployments, and reconstruct the full setup from the blockchain (substrate) 9 | - The user information in the contract must be private to the user, even when the blockchain is public, only the user can read the "content" of this contract. 10 | - The node must be able to validate the contract 11 | 12 | 13 | # Proposal #1 14 | This assumes the following constrains: 15 | - Nodes work solo. A node only is concerned about itself, and doesn't know or care about other nodes. This is how they implemented right now and this simplify the node life and makes it much easier to manage. A complex multi node setup is orchestrated by an external tool on the client side. 16 | - A single contract is between a **single** user and a **single** node. A multi node setup is orchestrated by the user, and the user need to create multiple contracts for each node involved in the development (this is to simplify the setup, a user can then read all his contracts and reconstruct his setup when needed) 17 | - _OPTIONAL_: Single contract for multiple nodes probably can be implemented but will make implementing validation way more complex. 18 | 19 | ## Implementation 20 | - Deployment: is a description of the entire deployment on a single node (network, volumes, vms, public_ips etc...). Please check the deployment structure [here](../../pkg/gridtypes/deployment.go) 21 | 22 | - The user then create a contract as follows: 23 | ```js 24 | contract = { 25 | // address is the node address. 26 | address: "" 27 | // data is the encrypted deployment body. This encrypted the deployment with the **USER** public key. So only the user can read this data later on (or any other key that he keeps safe). 28 | // this data part is read only by the user and can actually hold any information to help him reconstruct his deployment or can be left empty. 29 | data: encrypted(deployment) // optional 30 | // hash: is the deployment predictable hash. the node must use the same method to calculate the challenge (bytes) to compute this same hash. 31 | //used for validating the deployment from node side. 32 | hash: hash(deployment) 33 | // ips: number of ips that need to be reserved by the contract and used by the deployment 34 | ips: 0 35 | } 36 | ``` 37 | - After contract creation is successful, the user sends the **FULL** deployment to the node plus the contract ID. In this case, the contract ID is assumed to be the deployment unique ID. 38 | - The node then will get the contract object from the chain 39 | - Validation of the node address that it matches _this_ node. 40 | - Validation of the twin (user) signature 41 | - The node will recompute the hash and compare it against the contract hash as a sort of validation. 42 | - If validation is successful, deployment is applied. 43 | - Node start report capacity consumption to the blockchain, the contract then can bill the user. 44 | -------------------------------------------------------------------------------- /specs/grid3/png/grid3-overlay.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/threefoldtech/zos/0199ee83ff9918918f18fd25fd7ba1cdc1c7abce/specs/grid3/png/grid3-overlay.png -------------------------------------------------------------------------------- /specs/grid3/png/sequence.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/threefoldtech/zos/0199ee83ff9918918f18fd25fd7ba1cdc1c7abce/specs/grid3/png/sequence.png -------------------------------------------------------------------------------- /specs/grid3/uml/rmb.wsd: -------------------------------------------------------------------------------- 1 | @startuml 2 | actor Client as cl 3 | participant RMB as rmb 4 | participant Node as zos 5 | 6 | cl --> rmb: Create an RMB message \n(cmd: zos.deployment.deploy, dat: base64(json(deployment))) 7 | rmb -> zos: Decode payload(dat). \nSo node receive a deployment object 8 | zos -> zos: Verify hash against contract hash\nVerify signature of the twin 9 | zos -> rmb: Accepted Or Error 10 | rmb --> cl: Response from the node 11 | group If valid 12 | zos -> zos: Deploy 13 | end group 14 | ... 15 | cl --> rmb: Get Deployment (cmd: zos.deployment.get) 16 | rmb -> zos: Call get method 17 | zos -> rmb: Get Response 18 | rmb --> cl: Response 19 | @enduml 20 | -------------------------------------------------------------------------------- /specs/grid3/uml/sequence.wsd: -------------------------------------------------------------------------------- 1 | @startuml 2 | actor User as user 3 | participant "Grid DB" as db 4 | entity "Node" as node 5 | 6 | user -> db: Create a reservation contract 7 | db -> user: Gets contract ID 8 | user --> node: contract id + deployment 9 | note right 10 | Dotted lines are messages sent over RMB 11 | end note 12 | node -> db: Get and validate contract\nand read deployment data 13 | node --> user: Response with Accepted or Error 14 | group if accepted 15 | node -> node: Apply Deployment 16 | user --> node: Poll on Deployment status and information 17 | ... 18 | node -> db: consumption reports ever X amount of minutes 19 | end group 20 | db -> user: Billing based on node reports 21 | @enduml 22 | -------------------------------------------------------------------------------- /specs/init.md: -------------------------------------------------------------------------------- 1 | # PID 1 (init) 2 | The init process is very important to the system, and this process specifically can not be restart. Hence it also can't get life updated. 3 | This means a PID1 in zos must do minimal tasks as follows: 4 | 5 | - Start configured system processes 6 | - udev, syslogd, klogd, redis, haveged, etc... 7 | - API, and separate modules 8 | - init, must make sure all theses services are always up and running, re-spawn if needed 9 | - networking ? (may be this should delegated to a separate module) 10 | - shutdown, and reboot 11 | 12 | # Available options on the market 13 | - [`runit`](http://smarden.org/runit/) (very light and configurable) 14 | - `systemd` (too much) 15 | - `ignite` (written in rust, pretty immature and no active development) 16 | - [finit](https://github.com/troglobit/finit) : http://troglobit.com/finit.html 17 | - Build our own pid 1 in rust, use the ignite as base (or as a reference) 18 | 19 | # Discussions 20 | After some internal discussion, runit might not be the best option due to how it was built, and the its purpose (mainly run in containers). 21 | We are strongly leaning toward using our own init based on ignite. 22 | 23 | # Implementation proposal 24 | - Once the init process starts it loads all services [configurations](#configuration) 25 | - Configuration is analysed for dependencies cycles, to avoid blocking 26 | - Once configuration is validated, a `job` thread is started for each defined service 27 | - The `job` thread will check dependencies state reported by other thread services, once they are all `ready` it will spawn 28 | it's own service, make sure it's always running by re-spawning (if needed), a `oneshot` service will never re-spawn. 29 | - A service status can be one of the following 30 | - running 31 | - ok (only `oneshot` can be in this state) 32 | - error (a `job` can not start because one of it's dependencies failed or the binary does not exist) 33 | - re-spawn (process is being re-spawned) 34 | - All services logs are written directly to kernel ring-buffer 35 | - Optionally later on, one of the daemon can be responsible of reading the logs and push them somewhere else. 36 | - once a service update it's status, other `waiting` threads (that depends on this one) will get freed to take start. 37 | 38 | ## Controlling 39 | A unix socket interface (named pipe? may be) can be used to control the init process as follows 40 | - Shutdown, Reboot: 41 | - the manager, will set global runlevel to shutdown, ask individual services to die. 42 | - once each service exits, their monitor threads will not re-spawn due to global runtime state 43 | - once all services are down, a shutdown (or reboot) is performed. 44 | - Status inquiry 45 | - List all configured services and their status. 46 | - Sync to config 47 | - A new service can be added to the configurations directory, then the init will be asked to re-scan, new service MUST be scheduled to run, deleted service MUST be stopped. Changed services 48 | are not going to re-spawn. 49 | - Reload 50 | - Reload is given a specific service name, to reload the config in case the parameter or the env list has changed. Reload is NOT done automatically with `sync`. Must be explicitly called with certain services. 51 | - Signal a service by name. 52 | 53 | # Configuration 54 | Each service must have a configuration file that defines how a service should start. 55 | 56 | ```yaml 57 | exec: full command line goes here 58 | oneshot: true/false # default to false 59 | after: # optional 60 | - dependency 1 61 | - dependency 2 62 | environ: # optional 63 | KEY-1: VALUE-1 64 | KEY-2: VALUE-2 65 | ``` 66 | -------------------------------------------------------------------------------- /specs/ipc.md: -------------------------------------------------------------------------------- 1 | # Inter-process communication 2 | According to the quick architecture discussion session we will have sub processes that 3 | are responsible for certain tasks for example `networking`, or `vm`. The api should be 4 | able to communicate with the individual solutions to `pull` a solution by combining functionality 5 | from the separate components. 6 | 7 | A sort of secured, inter process communication is needed, where the api can reach the different components 8 | also, handle different signals when an event that require attention. For example a `network` manager needs 9 | to free up a virtual network device when the VM that uses it exits. 10 | 11 | We did some research for the best inter-process bus solution we can have, but unfortunately the available solution didn't 12 | fit precisely (for exampele DBus, and gRPC). Hence we built our own solution [zbus](zbus.md) 13 | 14 | ## Overview 15 | The API will receive a DSL that describes a certain service. For example a container. While the dsl is not specified yet, we can have this 16 | pseudo dsl script 17 | ```yaml 18 | - container: 19 | name: container-1 20 | image: image/id 21 | require: 22 | storage: 23 | # storage specs 24 | network: 25 | # network specs 26 | ``` 27 | 28 | The api, will use the bus to find out who implements the `network` api, and doing the required calls on the network component (over zbus), 29 | then just passing the results to the container component, to do it's part. 30 | 31 | In this scenario, the API is a DSL interpreter and a broker that can ask proper components for an object. -------------------------------------------------------------------------------- /specs/k8s_ha.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/threefoldtech/zos/0199ee83ff9918918f18fd25fd7ba1cdc1c7abce/specs/k8s_ha.jpg -------------------------------------------------------------------------------- /specs/network/HIDDEN-PUBLIC.dia: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/threefoldtech/zos/0199ee83ff9918918f18fd25fd7ba1cdc1c7abce/specs/network/HIDDEN-PUBLIC.dia -------------------------------------------------------------------------------- /specs/network/HIDDEN-PUBLIC.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/threefoldtech/zos/0199ee83ff9918918f18fd25fd7ba1cdc1c7abce/specs/network/HIDDEN-PUBLIC.png -------------------------------------------------------------------------------- /specs/network/create-public-in-farm.md: -------------------------------------------------------------------------------- 1 | ### Node has public access through another path than it's 0-boot path 2 | 3 | A Farmer that can provide vor ExitNodes will need to register these Exitnodes on the Grid as such, where: 4 | - He registers the IPv6 allocation that he received 5 | - He registers the IPv4 subnet 6 | - Specifies which started an self-registered nodes are effectively ExitNodes 7 | - Has his switches and router configured to forward the correct Prefixes/Subnets to the environment 8 | 9 | The Registry will hand out 10 | 11 | ``` 12 | public obj: 13 | allocation: 14 | IPv4: 15 | IPv4 addr/mask 16 | IPv4 gateway 17 | IPv6: 18 | IPv6 addr/mask 19 | IPv6 gateway 20 | 21 | get public obj from Registry 22 | 23 | 24 | 25 | 26 | ``` 27 | -------------------------------------------------------------------------------- /specs/network/readme.md: -------------------------------------------------------------------------------- 1 | # Network module 2 | 3 | Network module should be responsible for the following: 4 | 5 | - Maintain node connectivity to its networks, similar to `NetworkManager` it will try to automatically bring the node networking up 6 | - The network module should work out of the box, without any configuration. Extra configurations (API driven or from external config store) MUST be honored to fine tune networking, for example setting up static ips, or ignore NICs in the automated bootstrap. 7 | - Network module should provide the following features: 8 | - creation of any number of private network spaces per user 9 | - A user network space should be able to route traffic in the space between containers, VM and to the outside. 10 | - Allow to configure "fast" network when available. This is use for farm that have GB NICs between nodes of the farm. support for bonding, ... 11 | ## [Some considerations](Requirements.md) (read first) 12 | 13 | ## Interface 14 | 15 | Some definitions first : (nomenclature) 16 | 17 | - A network, Tenant Network, TN 18 | A Tenant Network is all the network resources (netresource, NR) that a user (tenant) has bought and for which a transaction exists. 19 | That means: 20 | - in every node there is a watcher that verifies if the TN has a new version 21 | - for every new version of that network, a node applies the configuration of the NR, that is: 22 | - update the NR container/vrf 23 | - update the wireguard configuration 24 | - update eventual IPv{6,4?} necessary service (dnsmasq/radvd...) 25 | 26 | - A Network Resource (netresource, NR) 27 | A netresource is a single entity in a node that contains: 28 | - a routing namespace (or a VRF, tbd) like a kubernetes pause container, that holds interconnects to other NR of a network, can have a bridge attached for connecting standalone containers/vms. 29 | - services to run that network (IPv6 is not plug and play all the way) 30 | - contains routes to other NRs throug which wg/bridge/vxlan/IPSec GRE 31 | - contains the watcher for new versions of the TN 32 | 33 | 34 | ## Implementation 35 | 36 | [A preliminary layout of how a `Network` should look like](datastructs.md) 37 | 38 | ## Research 39 | So far the choice of technologies to research on: 40 | 41 | - For inter node communication: [wireguard](https://www.wireguard.com/) 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /specs/network/setup/nftables.conf: -------------------------------------------------------------------------------- 1 | table inet filter { 2 | chain input { 3 | type filter hook input priority 0; policy accept; 4 | } 5 | 6 | chain forward { 7 | type filter hook forward priority 0; policy accept; 8 | } 9 | 10 | chain output { 11 | type filter hook output priority 0; policy accept; 12 | } 13 | } 14 | table ip nat { 15 | chain prerouting { 16 | type nat hook prerouting priority -100; policy accept; 17 | } 18 | 19 | chain input { 20 | type nat hook input priority 100; policy accept; 21 | } 22 | 23 | chain output { 24 | type nat hook output priority -100; policy accept; 25 | } 26 | 27 | chain postrouting { 28 | type nat hook postrouting priority 100; policy accept; 29 | } 30 | } 31 | table ip raw { 32 | chain prerouting { 33 | type filter hook prerouting priority -300; policy accept; 34 | iif "oz1" ct zone set 1 35 | iif "oz2" ct zone set 2 36 | iif "oz3" ct zone set 3 37 | iif "oz4" ct zone set 4 38 | iif "oz5" ct zone set 5 39 | iif "ivrf" ct zone set 65535 40 | } 41 | 42 | chain output { 43 | type filter hook output priority -300; policy accept; 44 | oif "oz1" ct zone set 1 45 | oif "oz2" ct zone set 2 46 | oif "oz3" ct zone set 3 47 | oif "oz4" ct zone set 4 48 | oif "oz5" ct zone set 5 49 | oif "ivrf" ct zone set 65535 50 | } 51 | } 52 | table ip mangle { 53 | chain output { 54 | type route hook output priority -150; policy accept; 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /specs/network/setup/vrftests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # setup connection namespaces with a dummy and add an ip 4 | NUM=5 5 | 6 | function prepare(){ 7 | for i in $(seq 1 $NUM) ; do 8 | # ExitPoint and their IPv4 9 | ip netns add z${i} 10 | ip -n z${i} link set lo up 11 | ip -n z${i} link add zone${i} type dummy 12 | ip -n z${i} link set zone${i} up 13 | ip -n z${i} addr add 10.10.0.1/24 dev zone${i} 14 | done 15 | 16 | # a public IPv4 in a Nat container 17 | # 18 | ip netns add vrf 19 | 20 | ip link add ivrf type veth peer name ovrf 21 | ip link set ivrf netns vrf 22 | ip -n vrf link set ivrf up 23 | ip -n vrf link set lo up 24 | ip -n vrf addr add 172.18.0.254/24 dev ivrf 25 | ip -n vrf link add cvrf type dummy 26 | 27 | for i in $(seq 1 $NUM) ; do 28 | ip link add oz${i} type veth peer name iz${i} 29 | ip link set iz${i} netns z${i} 30 | ip -n z${i} link set iz${i} up 31 | ip -n z${i} addr add 172.16.0.1/24 dev iz${i} 32 | ip -n z${i} route add default via 172.16.0.254 33 | ip link set oz${i} netns vrf 34 | ip -n vrf link set oz${i} up 35 | done 36 | 37 | ip link set ovrf up 38 | } 39 | 40 | function delete(){ 41 | for i in $(seq 1 $NUM) ; do 42 | ip netns del z${i} 43 | ip link del oz${i} 44 | done 45 | ip netns del vrf 46 | ip link del ovrf 47 | 48 | } 49 | 50 | -------------------------------------------------------------------------------- /specs/network/setup/wg1.conf: -------------------------------------------------------------------------------- 1 | # WG1 2 | [Interface] 3 | ListenPort = 16001 4 | PrivateKey = ECyTpsjBKXKHF9OmmuBA8v/ic1xKgFOUZA65a4rb83c= 5 | 6 | # Config for --- WG2 --- 7 | [Peer] 8 | PublicKey = sYUXTbxtrXPEy5/xB+yWjL+lvIvpiIYTY7gzObNtvRE= 9 | Endpoint = 127.0.0.1:16002 10 | AllowedIPs = fe80::2,192.168.255.2,2001:1:1:2::/64 11 | PersistentKeepalive = 20 12 | 13 | # Config for --- WG3 --- 14 | [Peer] 15 | PublicKey = tPt7YzziTIV5Q9EYH4Yu9mGtSHJPUbRQZXIEI1LCV3s= 16 | Endpoint = 127.0.0.1:16003 17 | AllowedIPs = fe80::3,192.168.255.3,2001:1:1:3::/64 18 | PersistentKeepalive = 20 19 | 20 | # Config for --- WG4 --- 21 | [Peer] 22 | PublicKey = hGaw31Qjj/eJ7LVvhtpXVCXEA9LDamw0S0XT+yoIHgM= 23 | Endpoint = 127.0.0.1:16004 24 | AllowedIPs = fe80::4,192.168.255.4,2001:1:1:4::/64 25 | PersistentKeepalive = 20 26 | 27 | # Config for --- WG5 --- 28 | [Peer] 29 | PublicKey = EaoN8h1qAbQP4YoV1TpSHp5nQpWx3t8FVT4v/ECu0AA= 30 | Endpoint = 127.0.0.1:16005 31 | AllowedIPs = fe80::5,192.168.255.5,2001:1:1:5::/64 32 | PersistentKeepalive = 20 33 | -------------------------------------------------------------------------------- /specs/network/setup/wg1.priv: -------------------------------------------------------------------------------- 1 | ECyTpsjBKXKHF9OmmuBA8v/ic1xKgFOUZA65a4rb83c= 2 | -------------------------------------------------------------------------------- /specs/network/setup/wg1.pub: -------------------------------------------------------------------------------- 1 | Fugz7M+CdxIfe+FgMpvrqMvztiDVsZ59mOi2wYU4QkQ= 2 | -------------------------------------------------------------------------------- /specs/network/setup/wg2.conf: -------------------------------------------------------------------------------- 1 | # WG2 2 | [Interface] 3 | ListenPort = 16002 4 | PrivateKey = 4N8UnDx0f/h4+VpxDJ6/uwcS/wZl9MafBMlApy7//H4= 5 | 6 | # Config for --- WG1 --- 7 | [Peer] 8 | PublicKey = XQD2K4pcSjkwxvjSZjYqksTQaD8ANBZoufJ6AKShImw= 9 | Endpoint = 127.0.0.1:16001 10 | AllowedIPs = fe80::1,192.168.255.1,2001:1:1:1::/64,::/0,0.0.0.0/0 11 | PersistentKeepalive = 20 12 | 13 | # Config for --- WG3 --- 14 | [Peer] 15 | PublicKey = hB3qTy67wAPe9NTrM8JpZ/9yFX2hws3bRlqEeQiTs3c= 16 | Endpoint = 127.0.0.1:16003 17 | AllowedIPs = fe80::3,192.168.255.3,2001:1:1:3::/64 18 | PersistentKeepalive = 20 19 | 20 | # Config for --- WG4 --- 21 | [Peer] 22 | PublicKey = Wkj5433521OMxELS0Pj/jxYIiNTto36FvjQZuEqcIlU= 23 | Endpoint = 127.0.0.1:16004 24 | AllowedIPs = fe80::4,192.168.255.4,2001:1:1:4::/64 25 | PersistentKeepalive = 20 26 | 27 | # Config for --- WG5 --- 28 | [Peer] 29 | PublicKey = KwjGRCcuxm1x/iHRlT1C6HBQqdhkespBXhCKj6XPjAA= 30 | Endpoint = 127.0.0.1:16005 31 | AllowedIPs = fe80::5,192.168.255.5,2001:1:1:5::/64 32 | PersistentKeepalive = 20 33 | -------------------------------------------------------------------------------- /specs/network/setup/wg2.priv: -------------------------------------------------------------------------------- 1 | 4N8UnDx0f/h4+VpxDJ6/uwcS/wZl9MafBMlApy7//H4= 2 | -------------------------------------------------------------------------------- /specs/network/setup/wg2.pub: -------------------------------------------------------------------------------- 1 | ZV3Ej3srYb0pcmxIYOp+LupX/Q/hTWGvtWADXgOfXFU= 2 | -------------------------------------------------------------------------------- /specs/network/setup/wg3.conf: -------------------------------------------------------------------------------- 1 | # WG3 2 | [Interface] 3 | ListenPort = 16003 4 | PrivateKey = 6NDgyX1auPlCdbpSdMIGiUh36om3j2X1L+a7XBDbDGo= 5 | 6 | # Config for --- WG1 --- 7 | [Peer] 8 | PublicKey = XQD2K4pcSjkwxvjSZjYqksTQaD8ANBZoufJ6AKShImw= 9 | Endpoint = 127.0.0.1:16001 10 | AllowedIPs = fe80::1,192.168.255.1,2001:1:1:1::/64,::/0,0.0.0.0/0 11 | PersistentKeepalive = 20 12 | 13 | # Config for --- WG2 --- 14 | [Peer] 15 | PublicKey = ZV3Ej3srYb0pcmxIYOp+LupX/Q/hTWGvtWADXgOfXFU= 16 | Endpoint = 127.0.0.1:16002 17 | AllowedIPs = fe80::2,192.168.255.2,2001:1:1:2::/64 18 | PersistentKeepalive = 20 19 | 20 | # Config for --- WG4 --- 21 | [Peer] 22 | PublicKey = Wkj5433521OMxELS0Pj/jxYIiNTto36FvjQZuEqcIlU= 23 | Endpoint = 127.0.0.1:16004 24 | AllowedIPs = fe80::4,192.168.255.4,2001:1:1:4::/64 25 | PersistentKeepalive = 20 26 | 27 | # Config for --- WG5 --- 28 | [Peer] 29 | PublicKey = KwjGRCcuxm1x/iHRlT1C6HBQqdhkespBXhCKj6XPjAA= 30 | Endpoint = 127.0.0.1:16005 31 | AllowedIPs = fe80::5,192.168.255.5,2001:1:1:5::/64 32 | PersistentKeepalive = 20 33 | -------------------------------------------------------------------------------- /specs/network/setup/wg3.priv: -------------------------------------------------------------------------------- 1 | 6NDgyX1auPlCdbpSdMIGiUh36om3j2X1L+a7XBDbDGo= 2 | -------------------------------------------------------------------------------- /specs/network/setup/wg3.pub: -------------------------------------------------------------------------------- 1 | hB3qTy67wAPe9NTrM8JpZ/9yFX2hws3bRlqEeQiTs3c= 2 | -------------------------------------------------------------------------------- /specs/network/setup/wg4.conf: -------------------------------------------------------------------------------- 1 | # WG4 2 | [Interface] 3 | ListenPort = 16004 4 | PrivateKey = oJ0CkMSuOO1TDMUk36LEEx0W8fmj0CLX1YA5ljQeWHk= 5 | 6 | # Config for --- WG1 --- 7 | [Peer] 8 | PublicKey = XQD2K4pcSjkwxvjSZjYqksTQaD8ANBZoufJ6AKShImw= 9 | Endpoint = 127.0.0.1:16001 10 | AllowedIPs = fe80::1,192.168.255.1,2001:1:1:1::/64,::/0,0.0.0.0/0 11 | PersistentKeepalive = 20 12 | 13 | # Config for --- WG2 --- 14 | [Peer] 15 | PublicKey = ZV3Ej3srYb0pcmxIYOp+LupX/Q/hTWGvtWADXgOfXFU= 16 | Endpoint = 127.0.0.1:16002 17 | AllowedIPs = fe80::2,192.168.255.2,2001:1:1:2::/64 18 | PersistentKeepalive = 20 19 | 20 | # Config for --- WG3 --- 21 | [Peer] 22 | PublicKey = hB3qTy67wAPe9NTrM8JpZ/9yFX2hws3bRlqEeQiTs3c= 23 | Endpoint = 127.0.0.1:16003 24 | AllowedIPs = fe80::3,192.168.255.3,2001:1:1:3::/64 25 | PersistentKeepalive = 20 26 | 27 | # Config for --- WG5 --- 28 | [Peer] 29 | PublicKey = KwjGRCcuxm1x/iHRlT1C6HBQqdhkespBXhCKj6XPjAA= 30 | Endpoint = 127.0.0.1:16005 31 | AllowedIPs = fe80::5,192.168.255.5,2001:1:1:5::/64 32 | PersistentKeepalive = 20 33 | -------------------------------------------------------------------------------- /specs/network/setup/wg4.priv: -------------------------------------------------------------------------------- 1 | oJ0CkMSuOO1TDMUk36LEEx0W8fmj0CLX1YA5ljQeWHk= 2 | -------------------------------------------------------------------------------- /specs/network/setup/wg4.pub: -------------------------------------------------------------------------------- 1 | Wkj5433521OMxELS0Pj/jxYIiNTto36FvjQZuEqcIlU= 2 | -------------------------------------------------------------------------------- /specs/network/setup/wg5.conf: -------------------------------------------------------------------------------- 1 | # WG5 2 | [Interface] 3 | ListenPort = 16005 4 | PrivateKey = UL6+x20KAsty+iDNLXLS4E4XqB4lay9is44QTcln8Es= 5 | 6 | # Config for --- WG1 --- 7 | [Peer] 8 | PublicKey = XQD2K4pcSjkwxvjSZjYqksTQaD8ANBZoufJ6AKShImw= 9 | Endpoint = 127.0.0.1:16001 10 | AllowedIPs = fe80::1,192.168.255.1,2001:1:1:1::/64,::/0,0.0.0.0/0 11 | PersistentKeepalive = 20 12 | 13 | # Config for --- WG2 --- 14 | [Peer] 15 | PublicKey = ZV3Ej3srYb0pcmxIYOp+LupX/Q/hTWGvtWADXgOfXFU= 16 | Endpoint = 127.0.0.1:16002 17 | AllowedIPs = fe80::2,192.168.255.2,2001:1:1:2::/64 18 | PersistentKeepalive = 20 19 | 20 | # Config for --- WG3 --- 21 | [Peer] 22 | PublicKey = hB3qTy67wAPe9NTrM8JpZ/9yFX2hws3bRlqEeQiTs3c= 23 | Endpoint = 127.0.0.1:16003 24 | AllowedIPs = fe80::3,192.168.255.3,2001:1:1:3::/64 25 | PersistentKeepalive = 20 26 | 27 | # Config for --- WG4 --- 28 | [Peer] 29 | PublicKey = Wkj5433521OMxELS0Pj/jxYIiNTto36FvjQZuEqcIlU= 30 | Endpoint = 127.0.0.1:16004 31 | AllowedIPs = fe80::4,192.168.255.4,2001:1:1:4::/64 32 | PersistentKeepalive = 20 33 | -------------------------------------------------------------------------------- /specs/network/setup/wg5.priv: -------------------------------------------------------------------------------- 1 | UL6+x20KAsty+iDNLXLS4E4XqB4lay9is44QTcln8Es= 2 | -------------------------------------------------------------------------------- /specs/network/setup/wg5.pub: -------------------------------------------------------------------------------- 1 | KwjGRCcuxm1x/iHRlT1C6HBQqdhkespBXhCKj6XPjAA= 2 | -------------------------------------------------------------------------------- /specs/network/tnodb.wsd: -------------------------------------------------------------------------------- 1 | @startuml 2 | title Tenant Network Object db 3 | start 4 | :Incoming request for\nNR in Network Transaction; 5 | if (Network Transaction in\nBCDB/Blockchain?) then (yes) 6 | if (TNo exists?) then (yes) 7 | :request prefix from Farmer Allocation; 8 | else (no) 9 | :create TNo; 10 | :request prefix from Farmer; 11 | if (NoFree or\nUnreachable) then (Error) 12 | stop 13 | else (Ok) 14 | endif 15 | endif 16 | :Update TNo; 17 | :save TNo; 18 | :reply TNo; 19 | else (no) 20 | :Error, no network transaction; 21 | stop 22 | endif 23 | end 24 | @enduml -------------------------------------------------------------------------------- /specs/readme.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | - [PID 1](init.md) 3 | - [IPC](ipc.md) 4 | - [provisioning](provisioning) 5 | - Components 6 | - [storage](storage/readme.md) 7 | - [flist](flist/readme.md) 8 | - [container](container/readme.md) 9 | - [network](network/readme.md) 10 | -------------------------------------------------------------------------------- /specs/zbus.md: -------------------------------------------------------------------------------- 1 | # Requirements 2 | - A component can register one or more `objects` 3 | - Each object can have a global name, where clients can use to call `methods` on the registered objects 4 | - Objects interface need to explicitly set a version. Multiple versions of the same interface can be running at the same time 5 | - A client, need to specify the object name, and interface version. A Proxy object can be used to call the remote methods 6 | - Support mocked proxies to allow local unit tests without the need for the remove object 7 | 8 | ## Suggested message brokers 9 | - Redis 10 | - Disque 11 | - Rabbitmq 12 | 13 | 14 | ## Overview 15 | ![ipc overview](../assets/ipc.png) 16 | 17 | ## POC 18 | Please check [zbus](https://github.com/threefoldtech/zbus) for a proof of concept 19 | 20 | ## Example 21 | 22 | Server code 23 | ```go 24 | //server code should be something like that 25 | type Service struct{} 26 | 27 | func (s *Service) MyMethod(a int, b string) (string, error) { 28 | //do something 29 | return "hello", nil 30 | } 31 | 32 | func main() { 33 | server = zbus.New() // config ? 34 | var s Service 35 | server.Register("my-service", "1.0", s) 36 | 37 | server.Run() 38 | } 39 | ``` 40 | 41 | Client code 42 | ```go 43 | 44 | func main() { 45 | client = zbus.Client() // config? 46 | 47 | //client is a low level client we should have some stubs on top of that that hide the call 48 | 49 | c := ServiceStub{client} 50 | 51 | res, err := c.MyMethod(10, "hello") 52 | } 53 | ``` -------------------------------------------------------------------------------- /tools/zos-update-worker/.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | 17 | tf-autobuilder/ 18 | tf-zos/ 19 | bin/ 20 | coverage/ 21 | 22 | dist/ 23 | -------------------------------------------------------------------------------- /tools/zos-update-worker/.goreleaser.yaml: -------------------------------------------------------------------------------- 1 | # This is an example .goreleaser.yml file with some sensible defaults. 2 | # Make sure to check the documentation at https://goreleaser.com 3 | before: 4 | hooks: 5 | # You may remove this if you don't use go modules. 6 | - go mod tidy 7 | # you may remove this if you don't need go generate 8 | - go generate ./... 9 | builds: 10 | - env: 11 | - CGO_ENABLED=0 12 | goos: 13 | - linux 14 | - darwin 15 | archives: 16 | - replacements: 17 | darwin: Darwin 18 | linux: Linux 19 | 386: i386 20 | amd64: x86_64 21 | checksum: 22 | name_template: 'checksums.txt' 23 | snapshot: 24 | name_template: "{{ incpatch .Version }}-next" 25 | changelog: 26 | sort: asc 27 | filters: 28 | exclude: 29 | - '^docs:' 30 | - '^test:' 31 | 32 | # modelines, feel free to remove those if you don't want/use them: 33 | # yaml-language-server: $schema=https://goreleaser.com/static/schema.json 34 | # vim: set ts=2 sw=2 tw=0 fo=cnqoj 35 | -------------------------------------------------------------------------------- /tools/zos-update-worker/Makefile: -------------------------------------------------------------------------------- 1 | OUT=$(shell realpath -m bin) 2 | GOPATH=$(shell go env GOPATH) 3 | branch=$(shell git symbolic-ref -q --short HEAD || git describe --tags --exact-match) 4 | revision=$(shell git rev-parse HEAD) 5 | dirty=$(shell test -n "`git diff --shortstat 2> /dev/null | tail -n1`" && echo "*") 6 | ldflags='-w -s -X $(version).Branch=$(branch) -X $(version).Revision=$(revision) -X $(version).Dirty=$(dirty)' 7 | 8 | all: getdeps test 9 | 10 | getdeps: 11 | @echo "Installing golangci-lint" && go get github.com/golangci/golangci-lint/cmd/golangci-lint && go install github.com/golangci/golangci-lint/cmd/golangci-lint 12 | go mod tidy 13 | 14 | lint: 15 | @echo "Running $@" 16 | golangci-lint run -c ../../.golangci.yml 17 | 18 | test: lint 19 | go test -v -vet=off ./... 20 | 21 | benchmarks: 22 | go test -v -vet=off ./... -bench=. -count 1 -benchtime=10s -benchmem -run=^# 23 | 24 | coverage: clean 25 | mkdir coverage 26 | go test -v -vet=off ./... -coverprofile=coverage/coverage.out 27 | go tool cover -html=coverage/coverage.out -o coverage/coverage.html 28 | 29 | testrace: lint 30 | go test -v -race -vet=off ./... 31 | 32 | run: 33 | go run main.go 34 | 35 | build: 36 | go build -o bin/zos-update-worker main.go 37 | 38 | clean: 39 | rm ./coverage -rf 40 | rm ./bin -rf 41 | -------------------------------------------------------------------------------- /tools/zos-update-worker/README.md: -------------------------------------------------------------------------------- 1 | # zos-update-worker 2 | 3 | A worker to get the version set on the chain with the substrate-client with a specific interval (for example: 10 mins) for mainnet, testnet, and qanet 4 | 5 | ## How to use 6 | 7 | - Get the binary 8 | 9 | > Download the latest from the [releases page](https://github.com/threefoldtech/zos/releases) 10 | 11 | - Run the worker 12 | 13 | After downloading the binary 14 | 15 | ```bash 16 | sudo cp zos-update-worker /usr/local/bin 17 | zos-update-worker 18 | ``` 19 | 20 | - you can run the command with: 21 | 22 | ```bash 23 | zos-update-worker --src=tf-autobuilder --dst=tf-zos --interval=10 --main-url=wss://tfchain.grid.tf/ws --main-url=wss://tfchain.grid.tf/ws --test-url=wss://tfchain.test.grid.tf/ws --test-url=wss://tfchain.test.grid.tf/ws --qa-url=wss://tfchain.qa.grid.tf/ws --qa-url=wss://tfchain.qa.grid.tf/ws 24 | ``` 25 | 26 | ## Test 27 | 28 | ```bash 29 | make test 30 | ``` 31 | 32 | ## Coverage 33 | 34 | ```bash 35 | make coverage 36 | ``` 37 | 38 | ## Substrate URLs 39 | 40 | ```go 41 | SUBSTRATE_URLS := map[string][]string{ 42 | "qa": {"wss://tfchain.qa.grid.tf/ws"}, 43 | "testing": {"wss://tfchain.test.grid.tf/ws"}, 44 | "production": {"wss://tfchain.grid.tf/ws"}, 45 | } 46 | ``` 47 | -------------------------------------------------------------------------------- /tools/zos-update-worker/Taskfile.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | tasks: 4 | build: 5 | desc: Build the app 6 | cmds: 7 | - GOFLAGS=-mod=mod go build -o bin/zos-update-worker main.go 8 | 9 | run: 10 | desc: Run the app 11 | cmds: 12 | - GOFLAGS=-mod=mod go run main.go 13 | 14 | test: 15 | desc: Test the app 16 | cmds: 17 | - go test -v -vet=off ./... 18 | 19 | benchmarks: 20 | desc: Test the app benchmarks 21 | cmds: 22 | - go test -v -vet=off ./... -bench=. -count 1 -benchtime=10s -benchmem -run=^# 23 | 24 | coverage: 25 | desc: Test the app coverages 26 | cmds: 27 | - rm ./coverage -rf 28 | - mkdir coverage 29 | - go test -v -vet=off ./... -coverprofile=coverage/coverage.out 30 | - go tool cover -html=coverage/coverage.out -o coverage/coverage.html 31 | 32 | deps: 33 | desc: install deps 34 | cmds: 35 | - go get -u golang.org/x/lint/golint 36 | - go get -u github.com/fzipp/gocyclo/cmd/gocyclo 37 | - go get -u github.com/remyoudompheng/go-misc/deadcode 38 | - go get -u github.com/client9/misspell/cmd/misspell 39 | 40 | verifiers: 41 | desc: Run verifiers 42 | cmds: 43 | - gofmt -d . 44 | - $(go env GOPATH)/bin/golangci-lint run 45 | - $(go env GOPATH)/bin/gocyclo -over 100 . 46 | - $(go env GOPATH)/bin/deadcode -test $(shell go list ./...) || true 47 | - $(go env GOPATH)/bin/misspell -i monitord -error `find .` 48 | - go run honnef.co/go/tools/cmd/staticcheck -- ./... 49 | 50 | clean: 51 | desc: Remove all coverage and bin files 52 | cmds: 53 | - rm ./coverage -rf 54 | - rm ./bin -rf 55 | -------------------------------------------------------------------------------- /tools/zos-update-worker/cmd/worker.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2022 NAME HERE 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | package cmd 17 | 18 | import ( 19 | "fmt" 20 | "os" 21 | "time" 22 | 23 | "github.com/rs/zerolog" 24 | "github.com/rs/zerolog/log" 25 | 26 | "github.com/spf13/cobra" 27 | "github.com/threefoldtech/zosbase/tools/zos-update-version/internal" 28 | ) 29 | 30 | var rootCmd = &cobra.Command{ 31 | Use: "zos-update-version", 32 | Short: "A worker to update the version of zos", 33 | RunE: func(cmd *cobra.Command, args []string) error { 34 | if ok, _ := cmd.Flags().GetBool("debug"); ok { 35 | zerolog.SetGlobalLevel(zerolog.DebugLevel) 36 | } else { 37 | zerolog.SetGlobalLevel(zerolog.InfoLevel) 38 | } 39 | 40 | src, err := cmd.Flags().GetString("src") 41 | if err != nil { 42 | return err 43 | } 44 | 45 | dst, err := cmd.Flags().GetString("dst") 46 | if err != nil { 47 | return err 48 | } 49 | 50 | params := internal.Params{} 51 | interval, err := cmd.Flags().GetInt("interval") 52 | if err != nil { 53 | return err 54 | } 55 | params.Interval = time.Duration(interval) * time.Minute 56 | 57 | production, err := cmd.Flags().GetStringSlice("main-url") 58 | if err != nil { 59 | return err 60 | } 61 | if len(production) > 0 { 62 | params.MainUrls = production 63 | } 64 | 65 | test, err := cmd.Flags().GetStringSlice("test-url") 66 | if err != nil { 67 | return err 68 | } 69 | if len(test) > 0 { 70 | params.TestUrls = test 71 | } 72 | 73 | qa, err := cmd.Flags().GetStringSlice("qa-url") 74 | if err != nil { 75 | return err 76 | } 77 | if len(qa) > 0 { 78 | params.QAUrls = qa 79 | } 80 | 81 | worker, err := internal.NewWorker(src, dst, params) 82 | if err != nil { 83 | return err 84 | } 85 | worker.UpdateWithInterval(cmd.Context()) 86 | return nil 87 | }, 88 | } 89 | 90 | func Execute() { 91 | if err := rootCmd.Execute(); err != nil { 92 | fmt.Println(err) 93 | os.Exit(1) 94 | } 95 | } 96 | 97 | func init() { 98 | 99 | log.Logger = log.Output(zerolog.NewConsoleWriter()) 100 | 101 | cobra.OnInitialize() 102 | 103 | rootCmd.Flags().StringP("src", "s", "tf-autobuilder", "Enter your source directory") 104 | rootCmd.Flags().StringP("dst", "d", "tf-zos", "Enter your destination directory") 105 | rootCmd.Flags().IntP("interval", "i", 10, "Enter the interval between each update") 106 | rootCmd.Flags().Bool("debug", false, "enable debug logging") 107 | rootCmd.Flags().StringSliceP("main-url", "m", []string{}, "Enter your mainnet substrate urls") 108 | rootCmd.Flags().StringSliceP("test-url", "t", []string{}, "Enter your testnet substrate urls") 109 | rootCmd.Flags().StringSliceP("qa-url", "q", []string{}, "Enter your qanet substrate urls") 110 | } 111 | -------------------------------------------------------------------------------- /tools/zos-update-worker/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/threefoldtech/zosbase/tools/zos-update-version 2 | 3 | go 1.21 4 | 5 | require ( 6 | github.com/rs/zerolog v1.28.0 7 | github.com/spf13/cobra v1.6.1 8 | github.com/threefoldtech/substrate-client v0.1.5 9 | ) 10 | 11 | require ( 12 | github.com/ChainSafe/go-schnorrkel v1.0.0 // indirect 13 | github.com/cenkalti/backoff v2.2.1+incompatible 14 | github.com/centrifuge/go-substrate-rpc-client/v4 v4.0.5 // indirect 15 | github.com/cosmos/go-bip39 v1.0.0 // indirect 16 | github.com/deckarep/golang-set v1.8.0 // indirect 17 | github.com/decred/base58 v1.0.3 // indirect 18 | github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect 19 | github.com/ethereum/go-ethereum v1.10.17 // indirect 20 | github.com/go-stack/stack v1.8.1 // indirect 21 | github.com/gorilla/websocket v1.5.0 // indirect 22 | github.com/gtank/merlin v0.1.1 // indirect 23 | github.com/gtank/ristretto255 v0.1.2 // indirect 24 | github.com/inconshreveable/mousetrap v1.0.1 // indirect 25 | github.com/jbenet/go-base58 v0.0.0-20150317085156-6237cf65f3a6 // indirect 26 | github.com/mattn/go-colorable v0.1.12 // indirect 27 | github.com/mattn/go-isatty v0.0.14 // indirect 28 | github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect 29 | github.com/pierrec/xxHash v0.1.5 // indirect 30 | github.com/pkg/errors v0.9.1 // indirect 31 | github.com/rs/cors v1.8.2 // indirect 32 | github.com/spf13/pflag v1.0.5 // indirect 33 | github.com/vedhavyas/go-subkey v1.0.3 // indirect 34 | golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 // indirect 35 | golang.org/x/sys v0.13.0 // indirect 36 | gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect 37 | ) 38 | 39 | replace github.com/centrifuge/go-substrate-rpc-client/v4 v4.0.5 => github.com/threefoldtech/go-substrate-rpc-client/v4 v4.0.6-0.20220927094755-0f0d22c73cc7 40 | -------------------------------------------------------------------------------- /tools/zos-update-worker/internal/worker_test.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestWorker(t *testing.T) { 10 | testDir := t.TempDir() 11 | 12 | params := Params{ 13 | Interval: 1 * time.Second, 14 | QAUrls: []string{"wss://tfchain.qa.grid.tf/ws"}, 15 | TestUrls: []string{"wss://tfchain.test.grid.tf/ws"}, 16 | MainUrls: []string{"wss://tfchain.grid.tf/ws"}, 17 | } 18 | src := testDir + "/tf-autobuilder" 19 | dst := testDir + "/tf-zos" 20 | 21 | err := os.Mkdir(src, os.ModePerm) 22 | if err != nil { 23 | t.Error(err) 24 | } 25 | 26 | err = os.Mkdir(dst, os.ModePerm) 27 | if err != nil { 28 | t.Error(err) 29 | } 30 | 31 | worker, err := NewWorker(src, dst, params) 32 | if err != nil { 33 | t.Error(err) 34 | } 35 | 36 | t.Run("test_no_src_qa", func(t *testing.T) { 37 | err := worker.updateZosVersion("qa", worker.substrate["qa"]) 38 | if err == nil { 39 | t.Errorf("update zos should fail") 40 | } 41 | }) 42 | 43 | t.Run("test_no_src_test", func(t *testing.T) { 44 | _, err := os.Create(src + "/zos:v3.4.0-qa1.flist") 45 | if err != nil { 46 | t.Error(err) 47 | } 48 | 49 | err = worker.updateZosVersion("testing", worker.substrate["testing"]) 50 | if err == nil { 51 | t.Errorf("update zos should fail for test, %v", err) 52 | } 53 | }) 54 | 55 | t.Run("test_no_src_main", func(t *testing.T) { 56 | _, err = os.Create(src + "/zos:v3.1.1-rc2.flist") 57 | if err != nil { 58 | t.Error(err) 59 | } 60 | 61 | err = worker.updateZosVersion("production", worker.substrate["production"]) 62 | if err == nil { 63 | t.Errorf("update zos should fail for main, %v", err) 64 | } 65 | }) 66 | 67 | t.Run("test_params_wrong_url", func(t *testing.T) { 68 | params.QAUrls = []string{"wss://tfchain.qa1.grid.tf/ws"} 69 | 70 | worker, err = NewWorker(src, dst, params) 71 | if err != nil { 72 | t.Error(err) 73 | } 74 | err := worker.updateZosVersion("qa", worker.substrate["qa"]) 75 | if err == nil { 76 | t.Errorf("update zos should fail") 77 | } 78 | }) 79 | } 80 | -------------------------------------------------------------------------------- /tools/zos-update-worker/main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2022 NAME HERE 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | package main 17 | 18 | import ( 19 | "github.com/threefoldtech/zosbase/tools/zos-update-version/cmd" 20 | ) 21 | 22 | func main() { 23 | cmd.Execute() 24 | } 25 | --------------------------------------------------------------------------------