├── .github
├── CODEOWNERS
├── PULL_REQUEST_TEMPLATE.md
├── dependabot.yml
├── labeler.yml
└── workflows
│ ├── auto-add-labels.yml
│ ├── auto-close.yml
│ ├── auto-update.yml
│ ├── post-merge-edge-node-container.yml
│ ├── post-merge-edge-node-simulator.yml
│ ├── post-merge-vm-provisioning.yml
│ └── pre-merge.yml
├── .gitignore
├── .markdownlint.yml
├── .tool-versions
├── CODE_OF_CONDUCT.md
├── CONTRIBUTORS.md
├── LICENSE
├── LICENSES
└── Apache-2.0.txt
├── Makefile
├── README.md
├── REUSE.toml
├── SECURITY.md
├── VERSION
├── common.mk
├── edge-node-container
├── .dockerignore
├── .gitignore
├── .golangci.yml
├── .markdownlintignore
├── .trivyignore
├── Dockerfile
├── Dockerfile.utils
├── EN_MANIFEST_VERSION
├── LICENSES
│ └── Apache-2.0.txt
├── Makefile
├── README.md
├── REUSE.toml
├── VERSION
├── chart
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── bma_values.yaml
│ ├── templates
│ │ ├── _helpers.tpl
│ │ ├── envvars.yaml
│ │ ├── serviceaccount.yaml
│ │ └── statefulset.yaml
│ └── values.yaml
├── cmd
│ └── main.go
├── docs
│ ├── cluster-dns.md
│ └── internals.md
├── entrypoint.sh
├── go.mod
├── go.sum
├── requirements.txt
├── scripts
│ ├── _utils.sh
│ ├── agents.service
│ ├── agents.sh
│ ├── agents_env.tpl
│ ├── cluster-dns
│ │ ├── cluster-dns.service
│ │ ├── cluster-dns.sh
│ │ ├── env
│ │ └── env.tpl
│ ├── dmiTemplate
│ │ ├── dmi
│ │ └── dmi-dump
│ ├── fake-uuid.sh
│ ├── onboard.service
│ ├── onboard.sh
│ ├── proxy.tpl
│ └── sudoers.txt
├── trivy.yaml
└── utils
│ └── bma_utils.py
├── edge-node-simulator
├── .dockerignore
├── .gitignore
├── .golangci.yml
├── .markdownlintignore
├── Dockerfile
├── LICENSES
│ └── Apache-2.0.txt
├── Makefile
├── README.md
├── REUSE.toml
├── VERSION
├── api
│ ├── buf.lock
│ ├── buf.yaml
│ └── ensim
│ │ └── v1
│ │ └── ensim.proto
├── buf.gen.yaml
├── buf.work.yaml
├── charts
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── templates
│ │ ├── _helpers.tpl
│ │ ├── deployment.yaml
│ │ ├── envvars.yaml
│ │ ├── service.yaml
│ │ └── serviceaccount.yaml
│ └── values.yaml
├── cmd
│ └── ensim
│ │ ├── client
│ │ └── main.go
│ │ └── server
│ │ └── main.go
├── docs
│ └── internals.md
├── go.mod
├── go.sum
├── pkg
│ ├── api
│ │ └── ensim
│ │ │ └── v1
│ │ │ ├── ensim.pb.go
│ │ │ ├── ensim.pb.validate.go
│ │ │ └── ensim_grpc.pb.go
│ ├── en
│ │ ├── agents
│ │ │ ├── agents.go
│ │ │ ├── pua.go
│ │ │ └── pua_test.go
│ │ ├── defs
│ │ │ └── defs.go
│ │ ├── keycloak
│ │ │ ├── comms.go
│ │ │ ├── jwt.go
│ │ │ ├── keycloak.go
│ │ │ ├── tenant.go
│ │ │ └── token.go
│ │ ├── onboard
│ │ │ ├── artifacts.go
│ │ │ ├── artifacts_test.go
│ │ │ ├── north.go
│ │ │ ├── onboard.go
│ │ │ ├── proto
│ │ │ │ ├── README.md
│ │ │ │ ├── doc.go
│ │ │ │ ├── workflow.pb.go
│ │ │ │ ├── workflow.proto
│ │ │ │ ├── workflow
│ │ │ │ │ └── v2
│ │ │ │ │ │ ├── mock.go
│ │ │ │ │ │ ├── workflow.pb.go
│ │ │ │ │ │ ├── workflow.proto
│ │ │ │ │ │ └── workflow_grpc.pb.go
│ │ │ │ └── workflow_grpc.pb.go
│ │ │ ├── south.go
│ │ │ └── tinker.go
│ │ └── utils
│ │ │ ├── utils.go
│ │ │ └── utils_test.go
│ └── sim
│ │ ├── cfg.go
│ │ ├── cli.go
│ │ ├── client.go
│ │ ├── defs.go
│ │ ├── edgenode.go
│ │ ├── manager.go
│ │ ├── northbound.go
│ │ ├── store.go
│ │ ├── utils.go
│ │ └── watcher.go
├── requirements.txt
└── test
│ ├── README.md
│ ├── ensim
│ ├── smoke_test.go
│ └── stats_test.go
│ ├── flags
│ └── flags.go
│ ├── infra
│ ├── cleanup_test.go
│ ├── common_test.go
│ ├── day0_test.go
│ ├── day1_test.go
│ ├── day2_test.go
│ └── e2e_test.go
│ ├── mt
│ └── mt_test.go
│ └── utils
│ ├── api.go
│ ├── examples.go
│ ├── kc.go
│ ├── mt.go
│ └── utils.go
├── pico
├── .editorconfig
├── .gitignore
├── .tool-versions
├── .tool-versions.license
├── Makefile
├── README.md
├── VERSION
├── VERSION.license
├── modules
│ ├── common
│ │ ├── main.tf
│ │ ├── terraform.tf
│ │ └── variables.tf
│ ├── pico-vm-kubevirt
│ │ ├── .terraform.lock.hcl
│ │ ├── README.md
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ ├── terraform.tf
│ │ └── variables.tf
│ ├── pico-vm-libvirt
│ │ ├── .terraform.lock.hcl
│ │ ├── customize_domain.xsl.tftpl
│ │ ├── customize_domain.xsl.tftpl.license
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ ├── terraform.tf
│ │ └── variables.tf
│ └── pico-vm-proxmox
│ │ ├── .terraform.lock.hcl
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ ├── terraform.tf
│ │ └── variables.tf
└── static
│ ├── node_details.png
│ └── node_details.png.license
├── placeholder.txt
├── requirements.txt
├── tools
├── helmbuild.sh
├── helmlint.sh
├── helmpush.sh
└── requirements.txt
├── version.mk
└── vm-provisioning
├── LICENSES
└── Apache-2.0.txt
├── Makefile
├── README.md
├── REUSE.toml
├── Show_vms_data.ps1
├── VERSION
├── ansible
├── calculate_max_vms.yml
├── create_vms.yml
├── install_ansible.sh
├── install_vm_dependencies.yml
├── inventory.yml
├── secret.yml
├── show_vms_data.yml
└── ssh_key_setup.yml
├── certs
└── Full_server.crt
├── config
├── docs
├── 01-vm-prov-design.md
├── 02-ven-design.png
├── 03-NW-Diagram1.png
├── 04-NW-Diagram2.png
├── Ansible-calculate-max-vms.png
├── Ansible-nio-flow-flag.png
├── Ansible-ssh-dir.png
├── Ansible-ssh-key-setup.png
├── Ansible-user-logs.png
├── Microvisor_Provision.png
├── Security_feature.png
├── UbuntuOS_Provision.png
└── provider_config.png
├── images
├── arch_simplified.jpg
├── kvm_check.png
├── rate_limit_argo.png
└── vm_arch.png
├── install_packages.sh
├── requirements.txt
├── scripts
├── ci_network_bridge.sh
├── ci_setup_dnsmasq.sh
├── common_vars.sh
├── create_new_user.sh
├── create_vm.sh
├── destroy_vm.sh
├── host_status_check.sh
├── io_configs.sh
├── network_file_backup_restore.sh
├── nio_configs.sh
├── nio_flow_host_config.sh
├── nio_flow_validation.sh
├── remove_all_packages.sh
├── show_host-status.sh
├── socket_login.exp
├── update_provider_defaultos.sh
└── vm_network_cleanup.sh
├── templates
├── Vagrantfile
└── orch_network.xml
└── tools
├── yaml-syntax-check.py
└── yaml_validator
├── README.md
├── Rx.py
├── yaml_schemas.py
└── yaml_validator.py
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | * @Andrea-Campanella @cjnolan @damiankopyto @daniele-moro @ikyrycho @jkossak @ktaube26 @niket-intc @osinstom @Paiadhithi @PalashGoelIntel @pierventre @raphaelvrosa @rranjan3 @soniabha-intc @sunil-parida @tmatenko @vthiruveedula @zdw @Ram-srini
6 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
13 |
14 | ### Description
15 |
16 | Please include a summary of the changes and the related issue. List any dependencies that are required for this change.
17 |
18 | Fixes # (issue)
19 |
20 | ### Any Newly Introduced Dependencies
21 |
22 | Please describe any newly introduced 3rd party dependencies in this change. List their name, license information and how they are used in the project.
23 |
24 | ### How Has This Been Tested?
25 |
26 | Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration
27 |
28 | ### Checklist:
29 |
30 | - [ ] I agree to use the APACHE-2.0 license for my code changes
31 | - [ ] I have not introduced any 3rd party dependency changes
32 | - [ ] I have performed a self-review of my code
33 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | ---
5 | version: 2
6 | updates:
7 | - package-ecosystem: "gomod"
8 | directories:
9 | - "/edge-node-simulator"
10 | - "/edge-node-container"
11 | schedule:
12 | interval: daily
13 | open-pull-requests-limit: 3
14 | commit-message:
15 | prefix: "[gomod] "
16 | groups:
17 | dependencies:
18 | patterns:
19 | - "*"
20 | exclude-patterns: # Internal dependencies are update into separate PRs.
21 | - "*open-edge-platform*"
22 | internal-dependencies:
23 | patterns:
24 | - "*open-edge-platform*"
25 | - package-ecosystem: "github-actions"
26 | directories:
27 | - "/" # this enables searching only in /.github/workflows directory
28 | schedule:
29 | interval: daily
30 | open-pull-requests-limit: 10
31 | commit-message:
32 | prefix: "[gha] "
33 |
--------------------------------------------------------------------------------
/.github/labeler.yml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 | ---
4 |
5 | CI 🤖:
6 | - changed-files:
7 | - any-glob-to-any-file: [".github/**", "tools/**"]
8 |
9 | root:
10 | - changed-files:
11 | - any-glob-to-any-file: "*"
12 |
13 | edge-node-simulator:
14 | - changed-files:
15 | - any-glob-to-any-file: "edge-node-simulator/**"
16 |
17 | edge-node-container:
18 | - changed-files:
19 | - any-glob-to-any-file: "edge-node-container/**"
20 |
21 | vm-provisioning:
22 | - changed-files:
23 | - any-glob-to-any-file: "vm-provisioning/**"
24 |
--------------------------------------------------------------------------------
/.github/workflows/auto-add-labels.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | name: Labeler
6 |
7 | on:
8 | pull_request:
9 |
10 | concurrency:
11 | group: ${{ github.workflow }}-${{ github.ref }}
12 | cancel-in-progress: true
13 |
14 | permissions: {}
15 |
16 | jobs:
17 | label:
18 | runs-on: ubuntu-latest
19 | permissions:
20 | contents: read
21 | issues: write
22 | pull-requests: write
23 | steps:
24 | - uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5.0.0
25 | with:
26 | repo-token: ${{ secrets.SYS_ORCH_GITHUB }}
27 |
--------------------------------------------------------------------------------
/.github/workflows/auto-close.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | name: Stale Pull Requests
6 |
7 | # After 30 days of no activity on a PR, the PR should be marked as stale,
8 | # a comment made on the PR informing the author of the new status,
9 | # and closed after 15 days if there is no further activity from the change to stale state.
10 | on:
11 | schedule:
12 | - cron: '30 1 * * *' # run every day
13 | workflow_dispatch: {}
14 |
15 | permissions: {}
16 |
17 | jobs:
18 | stale-auto-close:
19 | runs-on: ubuntu-latest
20 | permissions:
21 | pull-requests: write
22 | issues: write
23 | steps:
24 | - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
25 | with:
26 | repo-token: ${{ secrets.GITHUB_TOKEN }}
27 | stale-pr-message: 'This pull request is stale because it has been open 30 days with no activity. Make a comment or update the PR to avoid closing PR after 15 days.'
28 | days-before-pr-stale: 30
29 | days-before-pr-close: 15
30 | remove-pr-stale-when-updated: 'true'
31 | close-pr-message: 'This pull request was automatically closed due to inactivity'
32 |
--------------------------------------------------------------------------------
/.github/workflows/auto-update.yml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | ---
5 |
6 | name: Auto Update PR
7 |
8 | # On push to the main branch and support branches, update any branches that are out of date
9 | # and have auto-merge enabled. If the branch is currently out of date with the base branch,
10 | # it must be first manually updated and then will be kept up to date on future runs.
11 | on:
12 | push:
13 | branches:
14 | - main
15 | - release-*
16 |
17 | permissions: {}
18 |
19 | concurrency:
20 | group: ${{ github.workflow }}-${{ github.ref }}
21 | cancel-in-progress: true
22 |
23 | jobs:
24 | update-pull-requests:
25 | permissions:
26 | contents: read
27 | pull-requests: write
28 | runs-on: ubuntu-latest
29 |
30 | steps:
31 | - name: Checkout repository
32 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
33 | with:
34 | persist-credentials: false
35 |
36 | - name: Update pull requests
37 | uses: open-edge-platform/orch-ci/.github/actions/pr_updater@f341738d975c38b2b91f25d405baeb2d39bf2ddb # 0.1.14
38 | with:
39 | github_token: ${{ secrets.SYS_ORCH_GITHUB }}
40 |
--------------------------------------------------------------------------------
/.github/workflows/post-merge-edge-node-container.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | name: Post-Merge edge-node-container
6 |
7 | on:
8 | push:
9 | branches:
10 | - main
11 | - release-*
12 | paths:
13 | - 'edge-node-container/**'
14 | workflow_dispatch:
15 |
16 | permissions: {}
17 |
18 | jobs:
19 | post-merge-pipeline:
20 | permissions:
21 | contents: read
22 | security-events: write
23 | id-token: write
24 | uses: open-edge-platform/orch-ci/.github/workflows/post-merge.yml@3bdd409ccf738472c6e1547d14628b51c70dbe99 # v0.1.21
25 | with:
26 | run_version_check: true
27 | run_dep_version_check: true
28 | run_build: true
29 | run_docker_build: true
30 | run_docker_push: true
31 | run_helm_build: true
32 | run_helm_push: true
33 | run_version_tag: true
34 | prefix_tag_separator: "/"
35 | project_folder: "edge-node-container"
36 | secrets:
37 | SYS_ORCH_GITHUB: ${{ secrets.SYS_ORCH_GITHUB }}
38 | COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
39 | COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
40 | NO_AUTH_ECR_PUSH_USERNAME: ${{ secrets.NO_AUTH_ECR_PUSH_USERNAME }}
41 | NO_AUTH_ECR_PUSH_PASSWD: ${{ secrets.NO_AUTH_ECR_PUSH_PASSWD }}
42 | MSTEAMS_WEBHOOK: ${{ secrets.TEAMS_WEBHOOK }}
43 |
--------------------------------------------------------------------------------
/.github/workflows/post-merge-edge-node-simulator.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | name: Post-Merge edge-node-simulator
6 |
7 | on:
8 | push:
9 | branches:
10 | - main
11 | - release-*
12 | paths:
13 | - 'edge-node-simulator/**'
14 | workflow_dispatch:
15 |
16 | permissions: {}
17 |
18 | jobs:
19 | post-merge-pipeline:
20 | permissions:
21 | contents: read
22 | security-events: write
23 | id-token: write
24 | uses: open-edge-platform/orch-ci/.github/workflows/post-merge.yml@3bdd409ccf738472c6e1547d14628b51c70dbe99 # v0.1.21
25 | with:
26 | run_version_check: true
27 | run_dep_version_check: true
28 | run_build: true
29 | run_docker_build: true
30 | run_docker_push: true
31 | run_helm_build: true
32 | run_helm_push: true
33 | run_version_tag: true
34 | prefix_tag_separator: "/"
35 | project_folder: "edge-node-simulator"
36 | secrets:
37 | SYS_ORCH_GITHUB: ${{ secrets.SYS_ORCH_GITHUB }}
38 | COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
39 | COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
40 | NO_AUTH_ECR_PUSH_USERNAME: ${{ secrets.NO_AUTH_ECR_PUSH_USERNAME }}
41 | NO_AUTH_ECR_PUSH_PASSWD: ${{ secrets.NO_AUTH_ECR_PUSH_PASSWD }}
42 | MSTEAMS_WEBHOOK: ${{ secrets.TEAMS_WEBHOOK }}
43 |
--------------------------------------------------------------------------------
/.github/workflows/post-merge-vm-provisioning.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | name: Post-Merge VEN vm-provisioning
6 |
7 | on:
8 | push:
9 | branches:
10 | - main
11 | - release-*
12 | paths:
13 | - 'vm-provisioning/**'
14 | workflow_dispatch:
15 |
16 | permissions: {}
17 |
18 | jobs:
19 | post-merge-pipeline:
20 | permissions:
21 | contents: read
22 | deployments: write
23 | uses: open-edge-platform/orch-ci/.github/workflows/post-merge.yml@3bdd409ccf738472c6e1547d14628b51c70dbe99 # v0.1.21
24 | with:
25 | run_version_check: true
26 | run_dep_version_check: true
27 | run_build: false
28 | run_docker_build: false
29 | run_docker_push: false
30 | run_helm_build: false
31 | run_helm_push: false
32 | run_version_tag: true
33 | prefix_tag_separator: "/"
34 | project_folder: "vm-provisioning"
35 | secrets:
36 | SYS_ORCH_GITHUB: ${{ secrets.SYS_ORCH_GITHUB }}
37 | COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
38 | COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
39 | NO_AUTH_ECR_PUSH_USERNAME: ${{ secrets.NO_AUTH_ECR_PUSH_USERNAME }}
40 | NO_AUTH_ECR_PUSH_PASSWD: ${{ secrets.NO_AUTH_ECR_PUSH_PASSWD }}
41 | MSTEAMS_WEBHOOK: ${{ secrets.TEAMS_WEBHOOK }}
42 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | *.swp
5 | venv*
6 | out/*
7 | .vscode
8 | .DS_Store
9 | .idea/
10 | ## From https://github.com/github/gitignore/blob/main/Go.gitignore
11 |
12 | # Binaries for programs and plugins
13 | *.exe
14 | *.exe~
15 | *.dll
16 | *.so
17 |
18 |
--------------------------------------------------------------------------------
/.markdownlint.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | default: true
6 | MD004:
7 | style: dash
8 | MD010:
9 | # Code blocks may have hard tabs.
10 | code_blocks: false
11 | MD013:
12 | line_length: 120 # Max line length checking.
13 | code_blocks: false
14 | MD025:
15 | # Ignore the front matter title. Pages still need a top level header (#).
16 | front_matter_title: ""
17 | MD029:
18 | style: ordered
19 | MD033:
20 | allowed_elements:
21 | - ref # allow hugo relative reference links
22 | - br # allow mermaid
to create new line
23 | - a # allow anchors created by protoc-gen-doc and similar tools
24 | - img # allow images - needed if you need to define a size
25 | - span # allow span tags - needed if you need to highlight text in color
26 |
--------------------------------------------------------------------------------
/.tool-versions:
--------------------------------------------------------------------------------
1 | golangci-lint 1.63.4
2 | golang 1.24.1
3 | yq 4.34.2
4 | oras 1.2.0
5 | kind 0.17.0
6 | helm 3.11.1
7 | jq 1.6
8 | ginkgo 2.23.3
9 | gomega 1.36.2
10 |
--------------------------------------------------------------------------------
/CONTRIBUTORS.md:
--------------------------------------------------------------------------------
1 | # Contributors
2 |
3 | ## Special thanks for all the people who had helped this project so far
4 |
5 | - [daniele-moro](https://github.com/daniele-moro)
6 | - [lodhirax](https://github.com/lodhirax)
7 | - [jokuniew](https://github.com/jokuniew)
8 | - [PalashGoelIntel](https://github.com/PalashGoelIntel)
9 | - [Ram-srini](https://github.com/Ram-srini)
10 | - [raphaelvrosa](https://github.com/raphaelvrosa)
11 | - [sunil-parida](https://github.com/sunil-parida)
12 | - [svoonnax](https://github.com/svoonnax)
13 | - [sybacharan](https://github.com/sybacharan)
14 | - [teone](https://github.com/teone)
15 | - [tmatenko](https://github.com/tmatenko)
16 | - [vthiruveedula](https://github.com/vthiruveedula)
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | SUBPROJECTS := vm-provisioning edge-node-simulator edge-node-container
5 |
6 | .DEFAULT_GOAL := help
7 | .PHONY: all clean clean-all help lint test build license
8 |
9 | all: lint mdlint build
10 | @# Help: Runs build, lint, test stages for all subprojects
11 |
12 |
13 | #### Python venv Target ####
14 | VENV_DIR := venv_ven
15 |
16 | $(VENV_DIR): requirements.txt ## Create Python venv
17 | python3 -m venv $@ ;\
18 | set +u; . ./$@/bin/activate; set -u ;\
19 | python -m pip install --upgrade pip ;\
20 | python -m pip install -r requirements.txt
21 |
22 | dependency-check: $(VENV_DIR)
23 |
24 | license:
25 | @echo "---LICENSE CHECK---"
26 | @for dir in $(SUBPROJECTS); do $(MAKE) -C $$dir license; done
27 | @echo "---END LICENSE CHECK---"
28 |
29 | lint:
30 | @# Help: Runs lint stage in all subprojects
31 | @echo "---MAKEFILE LINT---"
32 | @for dir in $(SUBPROJECTS); do $(MAKE) -C $$dir lint; done
33 | @echo "---END MAKEFILE LINT---"
34 |
35 | build:
36 | @# Help: Runs build stage in all subprojects
37 | @echo "---MAKEFILE BUILD---"
38 | for dir in $(SUBPROJECTS); do $(MAKE) -C $$dir build; done
39 | @echo "---END MAKEFILE Build---"
40 |
41 | mdlint:
42 | @echo "---MAKEFILE LINT README---"
43 | @for dir in $(SUBPROJECTS); do $(MAKE) -C $$dir mdlint; done
44 | @echo "---END MAKEFILE LINT README---"
45 |
46 | clean:
47 | @# Help: Runs clean stage in all subprojects
48 | @echo "---MAKEFILE CLEAN---"
49 | for dir in $(SUBPROJECTS); do $(MAKE) -C $$dir clean; done
50 | @echo "---END MAKEFILE CLEAN---"
51 |
52 | clean-all:
53 | @# Help: Runs clean-all stage in all subprojects
54 | @echo "---MAKEFILE CLEAN-ALL---"
55 | for dir in $(SUBPROJECTS); do $(MAKE) -C $$dir clean-all; done
56 | @echo "---END MAKEFILE CLEAN-ALL---"
57 |
58 | help:
59 | @printf "%-20s %s\n" "Target" "Description"
60 | @printf "%-20s %s\n" "------" "-----------"
61 | @make -pqR : 2>/dev/null \
62 | | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' \
63 | | sort \
64 | | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' \
65 | | xargs -I _ sh -c 'printf "%-20s " _; make _ -nB | (grep -i "^# Help:" || echo "") | tail -1 | sed "s/^# Help: //g"'
66 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Virtual Edge Node
2 |
3 | [](https://opensource.org/licenses/Apache-2.0)
4 |
5 |
6 | [](https://scorecard.dev/viewer/?uri=github.com/open-edge-platform/virtual-edge-node)
7 |
8 | ## Overview
9 |
10 | The Virtual Edge Node (VEN) is designed to streamline the onboarding and provisioning of virtual machines, as well as
11 | the deployment, management, and testing of edge computing applications. It offers a virtualized platform that replicates
12 | the functionality of physical edge devices, enabling developers and testers to simulate real-world scenarios without
13 | requiring actual hardware.
14 |
15 | **Note: This repository is intended for Edge Developers testing environments and is not meant for production
16 | usecase or deployment on live systems.**
17 |
18 | The repository supports Day 0 provisioning of the Virtual Edge Nodes for the Edge Manageability Framework and includes:
19 |
20 | - [**VM-Provisioning**](vm-provisioning/): provides a set of scripts, templates, and configurations designed to streamline
21 | and automate the initial setup and deployment of virtual machines (VMs) during the Day 0 provisioning phase on an Edge
22 | Orchestrator. It utilizes Vagrant and libvirt APIs to ensure efficient and smooth VM provisioning.
23 | - [**Edge Node in a Container**](edge-node-container/): contains an emulated version of an edge node in a container,
24 | for testing purposes only.
25 | - [**Edge Node Simulator**](edge-node-simulator/): contains a simulator for edge nodes with the Infrastructure Manager,
26 | for testing purposes only.
27 |
28 | Read more about Virtual Edge Node in the [User Guide][user-guide-url].
29 |
30 | Navigate through the folders to get started, develop, and contribute to Virtual Edge Node.
31 |
32 | ## Contribute
33 |
34 | To learn how to contribute to the project, see the [Contributor's
35 | Guide](https://docs.openedgeplatform.intel.com/edge-manage-docs/main/developer_guide/contributor_guide/index.html).
36 |
37 | ## Community and Support
38 |
39 | To learn more about the project, its community, and governance, visit
40 | the [Edge Orchestrator Community](https://docs.openedgeplatform.intel.com/edge-manage-docs/main/index.html).
41 |
42 | For support, start with [Troubleshooting](https://docs.openedgeplatform.intel.com/edge-manage-docs/main/developer_guide/troubleshooting/index.html)
43 |
44 | ## License
45 |
46 | Each component of the Virtual Edge Node is licensed under [Apache 2.0][apache-license].
47 |
48 | Last Updated Date: April 15, 2025
49 |
50 | [apache-license]: https://www.apache.org/licenses/LICENSE-2.0
51 |
52 | [user-guide-url]: https://docs.openedgeplatform.intel.com/edge-manage-docs/main/developer_guide/virtual_edge_node/index.html
53 |
--------------------------------------------------------------------------------
/REUSE.toml:
--------------------------------------------------------------------------------
1 | version = 1
2 |
3 | [[annotations]]
4 | path = ["**/VERSION", ".tool-versions", "**.md", "vm-provisioning/**", "edge-node-container/**", "edge-node-simulator/**"]
5 | precedence = "aggregate"
6 | SPDX-FileCopyrightText = "2025 Intel Corporation"
7 | SPDX-License-Identifier = "Apache-2.0"
8 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 | Intel is committed to rapidly addressing security vulnerabilities affecting our customers and providing clear guidance on the solution, impact, severity and mitigation.
3 |
4 | ## Reporting a Vulnerability
5 | Please report any security vulnerabilities in this project utilizing the guidelines [here](https://www.intel.com/content/www/us/en/security-center/vulnerability-handling-guidelines.html).
6 |
7 | ## About [Virtual Edge Node Provisioning (VEN)](./vm-provisioning)
8 |
9 | - VEN is not part of EMF (Edge Manageability Framework).
10 | - VEN is developed and released for testing purposes only.
11 | - VEN is used by the Continuous Integration pipeline of EMF to
12 | run integration tests in the scope of onboarding and provisioning
13 | of Edge Nodes.
14 | - Developers can use the Virtual Edge Node to create and manage VMs
15 | that mirror production environments without need for physical hardware.
16 | - VEN is useful for developers and testers who need to simulate and test
17 | the onboarding and provisioning processes of virtual environments using
18 | development code.
19 | - It provides set of scripts, templates, and configurations to deploy VENs
20 | on an Edge Orchstrator.
21 |
22 | ## About [Edge Node in a Container (ENiC)](./edge-node-container)
23 |
24 | - ENiC is not part of EMF (Edge Manageability Framework);
25 | - ENiC is developed and released for testing purposes only;
26 | - ENiC can be used to validate EMF features, in infrastructure,
27 | cluster and application scopes;
28 | - ENiC is used by the Continuous Integration pipeline of EMF to
29 | run integration tests in the scope of infrastructure, cluster,
30 | application and UI domains;
31 | - ENiC does not contain external (management/control) interfaces,
32 | it only communicates with an EMF orchestrator for the sole purpose
33 | of testing its functionalities;
34 | - ENiC performs the onboarding/provisioning process of an actual edge node
35 | using simulated calls, which exercise the same interfaces as an actual
36 | edge node does in the EMF orchestrator;
37 | - ENiC enables the execution of Bare Metal Agents in the same manner as an
38 | actual edge node, i.e., BMAs are installed (from their .deb packages),
39 | configured and executed as systemd services;
40 | - ENiC requires its container to run in privileged mode and with root
41 | user because it needs to install the BMAs after it is initiated,
42 | and some BMAs require access to the “dmidecode” tool,
43 | used to retrieve the system UUID and Serial Number;
44 | - Further hardening of ENiC is going to be performed to reduce the
45 | privileges/capabilities of the container and possibly avoid the execution
46 | of it using the root user. This requires further investigation.
47 |
--------------------------------------------------------------------------------
/VERSION:
--------------------------------------------------------------------------------
1 | 1.0.0
2 |
--------------------------------------------------------------------------------
/edge-node-container/.dockerignore:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | assets
5 | docs
6 | utils
7 | venv
8 |
9 | pip_requirements.txt
10 | *.hcl
11 | *.md
12 | .git
13 | .github
14 | .idea
15 | .reuse
16 | .gitignore
17 | *.tgz
18 | ena-manifest.yaml
19 |
--------------------------------------------------------------------------------
/edge-node-container/.gitignore:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | *.swp
5 | *.test
6 | .DS_Store
7 | .idea
8 | .vscode
9 | artifacts
10 | go.work
11 | out/*
12 | build/*
13 | vendor
14 | venv*
15 | *.crt
16 | *.pem
17 | out
18 | enic-*.tgz
19 | orch-ca.crt
20 | ena-manifest.yaml
21 |
--------------------------------------------------------------------------------
/edge-node-container/.markdownlintignore:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | out/
5 | vendor/
6 | venv_enic/
7 |
--------------------------------------------------------------------------------
/edge-node-container/.trivyignore:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | # Requires update of golang-jwt/jwt, will be done post-3.0
5 | CVE-2025-30204
6 |
7 | # Dockerfile issues, will be addressed, when possible, post-3.0
8 | AVD-DS-0002
9 | AVD-DS-0029
10 |
11 | # Helm chart issues, will be addressed post-3.0
12 | AVD-KSV-0109
13 | AVD-KSV-0014
14 | AVD-KSV-0017
15 | AVD-KSV-0118
16 |
17 | # enic-utils issues, will be addresses post-3.0
18 | CVE-2025-31115
19 | CVE-2024-56406
20 |
--------------------------------------------------------------------------------
/edge-node-container/Dockerfile.utils:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | FROM debian:12.9-slim@sha256:12c396bd585df7ec21d5679bb6a83d4878bc4415ce926c9e5ea6426d23c60bdc
5 |
6 | ARG REPO_URL
7 | ARG VERSION
8 | ARG REVISION
9 | ARG BUILD_DATE
10 |
11 | LABEL org.opencontainers.image.version=${VERSION} \
12 | org.opencontainers.image.source=${REPO_URL} \
13 | org.opencontainers.image.revision=${REVISION} \
14 | org.opencontainers.image.created=${BUILD_DATE}
15 |
16 | # Install base dependencies
17 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
18 | --no-install-recommends \
19 | curl \
20 | uuid-runtime \
21 | dmidecode \
22 | gettext-base \
23 | jq \
24 | lsb-release \
25 | sudo && \
26 | rm -rf /var/lib/apt/lists/*
27 |
28 | WORKDIR /opt
29 | RUN mkdir -p enic/bin
30 |
31 | # Copy fake-uuid.sh and dmiTemplate files to enic/bin
32 | COPY ./scripts/fake-uuid.sh ./enic/bin
33 | COPY ./scripts/dmiTemplate ./enic/bin/dmiTemplate
34 |
35 | ENV PATH="${PATH}:/opt/enic/bin"
36 |
37 | ENTRYPOINT [""]
--------------------------------------------------------------------------------
/edge-node-container/EN_MANIFEST_VERSION:
--------------------------------------------------------------------------------
1 | 1.1.5
2 |
--------------------------------------------------------------------------------
/edge-node-container/REUSE.toml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | version = 1
5 |
6 | [[annotations]]
7 | path = [
8 | "VERSION",
9 | "go.sum",
10 | "**.md",
11 | "docs/*",
12 | "EN_MANIFEST_VERSION",
13 | "scripts/dmiTemplate/*",
14 | "chart/bma_values.yaml",
15 | ]
16 |
17 | precedence = "aggregate"
18 | SPDX-FileCopyrightText = "(C) 2025 Intel Corporation"
19 | SPDX-License-Identifier = "Apache-2.0"
20 |
--------------------------------------------------------------------------------
/edge-node-container/VERSION:
--------------------------------------------------------------------------------
1 | 0.7.2-dev
2 |
--------------------------------------------------------------------------------
/edge-node-container/chart/.helmignore:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | # Patterns to ignore when building packages.
5 | # This supports shell glob matching, relative path matching, and
6 | # negation (prefixed with !). Only one pattern per line.
7 | .DS_Store
8 | # Common VCS dirs
9 | .git/
10 | .gitignore
11 | .bzr/
12 | .bzrignore
13 | .hg/
14 | .hgignore
15 | .svn/
16 | # Common backup files
17 | *.swp
18 | *.bak
19 | *.tmp
20 | *.orig
21 | *~
22 | # Various IDEs
23 | .project
24 | .idea/
25 | *.tmproj
26 | .vscode/
27 |
--------------------------------------------------------------------------------
/edge-node-container/chart/Chart.yaml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 | ---
4 | apiVersion: v2
5 | name: enic
6 | description: Edge Node in a Container
7 | type: application
8 | version: 0.7.2-dev
9 | appVersion: "0.7.2-dev"
10 | home: edge-orchestrator.intel.com
11 | maintainers:
12 | - name: Edge Infrastructure Manager Team
13 |
--------------------------------------------------------------------------------
/edge-node-container/chart/bma_values.yaml:
--------------------------------------------------------------------------------
1 | caddy_version: 2.7.6
2 | cluster_agent_version: 1.7.3
3 | hardware_discovery_agent_version: 1.7.1
4 | inbc_program_version: 4.2.8.6-1
5 | inbm_cloudadapter_agent_version: 4.2.8.6-1
6 | inbm_configuration_agent_version: 4.2.8.6-1
7 | inbm_diagnostic_agent_version: 4.2.8.6-1
8 | inbm_dispatcher_agent_version: 4.2.8.6-1
9 | inbm_telemetry_agent_version: 4.2.8.6-1
10 | mqtt_version: 4.2.8.6-1
11 | node_agent_version: 1.7.2
12 | platform_observability_agent_version: 1.8.0
13 | platform_telemetry_agent_version: 1.4.0
14 | platform_update_agent_version: 1.5.2
15 | tpm_provision_version: 4.2.8.6-1
16 | trtl_version: 4.2.8.6-1
17 |
--------------------------------------------------------------------------------
/edge-node-container/chart/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 | {{/*
4 | Expand the name of the chart.
5 | */}}
6 | {{- define "enic.name" -}}
7 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
8 | {{- end }}
9 |
10 | {{/*
11 | Create a default fully qualified app name.
12 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
13 | If release name contains chart name it will be used as a full name.
14 | */}}
15 | {{- define "enic.fullname" -}}
16 | {{- if .Values.fullnameOverride }}
17 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
18 | {{- else }}
19 | {{- $name := default .Chart.Name .Values.nameOverride }}
20 | {{- if contains $name .Release.Name }}
21 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
22 | {{- else }}
23 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
24 | {{- end }}
25 | {{- end }}
26 | {{- end }}
27 |
28 | {{/*
29 | Create chart name and version as used by the chart label.
30 | */}}
31 | {{- define "enic.chart" -}}
32 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
33 | {{- end }}
34 |
35 | {{/*
36 | Common labels
37 | */}}
38 | {{- define "enic.labels" -}}
39 | helm.sh/chart: {{ include "enic.chart" . }}
40 | {{ include "enic.selectorLabels" . }}
41 | {{- if .Chart.AppVersion }}
42 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
43 | {{- end }}
44 | app.kubernetes.io/managed-by: {{ .Release.Service }}
45 | {{- end }}
46 |
47 | {{/*
48 | Selector labels
49 | */}}
50 | {{- define "enic.selectorLabels" -}}
51 | app.kubernetes.io/name: {{ include "enic.name" . }}
52 | app.kubernetes.io/instance: {{ .Release.Name }}
53 | {{- end }}
54 |
55 | {{/*
56 | Create the name of the service account to use
57 | */}}
58 | {{- define "enic.serviceAccountName" -}}
59 | {{- if .Values.serviceAccount.create }}
60 | {{- default (include "enic.fullname" .) .Values.serviceAccount.name }}
61 | {{- else }}
62 | {{- default "default" .Values.serviceAccount.name }}
63 | {{- end }}
64 | {{- end }}
65 |
--------------------------------------------------------------------------------
/edge-node-container/chart/templates/envvars.yaml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | # Note that the agent values are coming from the bma_values.yaml
5 | # no default values are defined for them
6 | ---
7 | apiVersion: v1
8 | kind: ConfigMap
9 | metadata:
10 | name: env-vars
11 | data:
12 | {{- if .Values.proxy.enabled }}
13 | HTTP_PROXY: "{{ .Values.proxy.http_proxy }}"
14 | HTTPS_PROXY: "{{ .Values.proxy.https_proxy }}"
15 | NO_PROXY: "{{ .Values.proxy.no_proxy }}"
16 | http_proxy: "{{ .Values.proxy.http_proxy }}"
17 | https_proxy: "{{ .Values.proxy.https_proxy }}"
18 | no_proxy: "{{ .Values.proxy.no_proxy }}"
19 | {{- end }}
20 | PATH: "{{ .Values.param.path}}"
21 | DEBUG: "{{ .Values.param.debug }}"
22 | _ORCH_IP_: "{{ .Values.param.orch_ip }}"
23 | _ORCH_FQDN_: "{{ .Values.param.orch_fqdn }}"
24 | _ORCH_USER_: "{{ .Values.param.orchUser }}"
25 | _ORCH_PASS_: "{{ .Values.param.orchPass }}"
26 | _ORCH_ORG_: "{{ .Values.param.orchOrg }}"
27 | _ORCH_PROJECT_: "{{ .Values.param.orchProject }}"
28 | _ENABLE_NIO_: "{{ .Values.param.enableNIO }}"
29 | _OAM_SERVER_ADDRESS_: "{{ .Values.param.oamServerAddress }}"
30 | _NODE_AGENT_VERSION_: "{{ .Values.bma_versions.node_agent_version }}"
31 | _CLUSTER_AGENT_VERSION_: "{{ .Values.bma_versions.cluster_agent_version }}"
32 | _HDA_AGENT_VERSION_: "{{ .Values.bma_versions.hardware_discovery_agent_version }}"
33 | _POA_AGENT_VERSION_: "{{ .Values.bma_versions.platform_observability_agent_version }}"
34 | _TRTL_VERSION_: "{{ .Values.bma_versions.trtl_version }}"
35 | _INBM_CLOUDADAPTER_AGENT_VERSION_: "{{ .Values.bma_versions.inbm_cloudadapter_agent_version }}"
36 | _INBM_DISPATCHER_AGENT_VERSION_: "{{ .Values.bma_versions.inbm_dispatcher_agent_version }}"
37 | _INBM_CONFIGURATION_AGENT_VERSION_: "{{ .Values.bma_versions.inbm_configuration_agent_version }}"
38 | _INBM_TELEMETRY_AGENT_VERSION_: "{{ .Values.bma_versions.inbm_telemetry_agent_version }}"
39 | _INBM_DIAGNOSTIC_AGENT_VERSION_: "{{ .Values.bma_versions.inbm_diagnostic_agent_version }}"
40 | _INBM_DISPATCHER_AGENT_VERSION_: "{{ .Values.bma_versions.inbm_dispatcher_agent_version }}"
41 | _INBC_PROGRAM_VERSION_: "{{ .Values.bma_versions.inbc_program_version }}"
42 | _MQTT_VERSION_: "{{ .Values.bma_versions.mqtt_version }}"
43 | _TPM_PROVISION_VERSION_: "{{ .Values.bma_versions.tpm_provision_version }}"
44 | _PLATFORM_UPDATE_AGENT_VERSION_: "{{ .Values.bma_versions.platform_update_agent_version }}"
45 | _PLATFORM_TELEMETRY_AGENT_VERSION_: "{{ .Values.bma_versions.platform_telemetry_agent_version }}"
46 | _CADDY_VERSION_: "{{ .Values.bma_versions.caddy_version }}"
47 |
--------------------------------------------------------------------------------
/edge-node-container/chart/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 | ---
4 | {{- if .Values.serviceAccount.create }}
5 | apiVersion: v1
6 | kind: ServiceAccount
7 | metadata:
8 | name: {{ include "enic.serviceAccountName" . }}
9 | labels:
10 | {{- include "enic.labels" . | nindent 4 }}
11 | {{- with .Values.serviceAccount.annotations }}
12 | annotations:
13 | {{- toYaml . | nindent 4 }}
14 | {{- end }}
15 | {{- end }}
16 |
--------------------------------------------------------------------------------
/edge-node-container/chart/values.yaml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | # Default values for edge-node-in-a-container (enic).
5 | # This is a YAML-formatted file.
6 | # Declare variables to be passed into your templates.
7 | ---
8 | replicaCount: 1
9 | global:
10 | registry:
11 | name: null
12 | imagePullSecrets:
13 | - name: ""
14 | images:
15 | enic:
16 | repository: infra/enic
17 | pullPolicy: IfNotPresent
18 | # Image specific takes precedence
19 | # registry:
20 | # name: null
21 | # imagePullSecrets:
22 | # - name: ""
23 | utils:
24 | repository: infra/enic-utils
25 | pullPolicy: IfNotPresent
26 | # Image specific takes precedence
27 | # registry:
28 | # name: null
29 | # imagePullSecrets:
30 | # - name: ""
31 | nameOverride: ""
32 | fullnameOverride: "enic"
33 | serviceAccount:
34 | # Specifies whether a service account should be created
35 | create: true
36 | # Annotations to add to the service account
37 | annotations: {}
38 | # The name of the service account to use.
39 | # If not set and create is true, a name is generated using the fullname template
40 | name: ""
41 | podAnnotations: {}
42 | podSecurityContext: {}
43 | # podSecurityContext:
44 | # seccompProfile:
45 | # type: RuntimeDefault
46 | securityContext:
47 | privileged: true
48 | autoscaling:
49 | enabled: false
50 | minReplicas: 1
51 | maxReplicas: 100
52 | targetCPUUtilizationPercentage: 80
53 | # targetMemoryUtilizationPercentage: 80
54 | nodeSelector: {}
55 | storage:
56 | storageClassName: standard
57 | # storageClassName: openebs-hostpath
58 | tolerations: []
59 | affinity: {}
60 | tlsSecretName: "tls-orch"
61 | param:
62 | debug: true
63 | orch_fqdn: "kind.internal"
64 | orch_ip: ""
65 | path: "/var/lib/rancher/rke2/bin:/root/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
66 | orchUser: ""
67 | orchPass: ""
68 | orchProject: "sample-project"
69 | orchOrg: "sample-org"
70 | oamServerAddress: "0.0.0.0:5991"
71 | enableNIO: false
72 | resources:
73 | requests:
74 | memory: "4Gi"
75 | cpu: 2
76 | limits:
77 | memory: "12Gi"
78 | cpu: 6
79 | proxy:
80 | enabled: false
81 | http_proxy: ""
82 | https_proxy: ""
83 | no_proxy: ""
84 | # BMA Versions - Autogenerated, do not manually edit, use `make apply-version` or `make bma_versions`
85 | bma_versions:
86 | caddy_version: 2.7.6
87 | cluster_agent_version: 1.7.3
88 | hardware_discovery_agent_version: 1.7.1
89 | inbc_program_version: 4.2.8.6-1
90 | inbm_cloudadapter_agent_version: 4.2.8.6-1
91 | inbm_configuration_agent_version: 4.2.8.6-1
92 | inbm_diagnostic_agent_version: 4.2.8.6-1
93 | inbm_dispatcher_agent_version: 4.2.8.6-1
94 | inbm_telemetry_agent_version: 4.2.8.6-1
95 | mqtt_version: 4.2.8.6-1
96 | node_agent_version: 1.7.2
97 | platform_observability_agent_version: 1.8.0
98 | platform_telemetry_agent_version: 1.4.0
99 | platform_update_agent_version: 1.5.2
100 | tpm_provision_version: 4.2.8.6-1
101 | trtl_version: 4.2.8.6-1
102 |
--------------------------------------------------------------------------------
/edge-node-container/docs/cluster-dns.md:
--------------------------------------------------------------------------------
1 | # How to use cluster-dns
2 |
3 | 1.Configure the env file and copy to /etc/cluster-dns/env.
4 | a. CLUSTER_IP is the ip address of Orchestrator.
5 | b. CLUSTER_FQDN is the suffix of the domain name of the Orchestrator.
6 | 2.Copy the cluster-dns.sh to /usr/bin/code-cluster-dns.sh
7 | 3.Copy the cluster-dns.service to /etc/systemd/system
8 | 4.systemctl enable cluster-dns
9 | 5.systemctl start cluster-dns
10 |
11 | ## The functionality of cluster-dns
12 |
13 | The functionality of the cluster-dns is to patch the host alias
14 | field to the deployment of cattle, since you can't connect to the prviate
15 | Orchestrator without any hosts setting.
16 |
17 | If the Orchestrator is public and registered on the dns server, you not need
18 | to use this script.
19 |
--------------------------------------------------------------------------------
/edge-node-container/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
4 | # SPDX-License-Identifier: Apache-2.0
5 |
6 | # Script Name: entrypoint.sh
7 | # Description: This script is the entrypoint of ENiC
8 |
9 | set -xeo pipefail
10 |
11 | # Setup the default environment variables for systemd
12 | mkdir -p /etc/systemd/system.conf.d/
13 | tee /etc/systemd/system.conf.d/myenvironment.conf << END
14 | [Manager]
15 | DefaultEnvironment=$(while read -r Line; do echo -n "$Line " ; done < <(env))
16 | END
17 |
18 | # Start systemd
19 | if [ "$DEPLOY_TYPE" = "ENIVM" ]; then
20 | uid=$(id -u)
21 | export XDG_RUNTIME_DIR=/run/user/$uid
22 | exec /lib/systemd/systemd --user >/dev/null &
23 | else
24 | exec /lib/systemd/systemd
25 | fi
26 |
--------------------------------------------------------------------------------
/edge-node-container/requirements.txt:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | # lint yaml
5 | yamllint~=1.35.1
6 |
7 | # license check
8 | reuse~=5.0.2
9 |
10 | # utils
11 | certifi==2023.7.22
12 | charset-normalizer==3.3.0
13 | idna==3.4
14 | PyYAML==6.0.1
15 | requests==2.31.0
16 | urllib3==2.0.6
17 |
--------------------------------------------------------------------------------
/edge-node-container/scripts/_utils.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
4 | # SPDX-License-Identifier: Apache-2.0
5 |
6 | # contains functions shared across files
7 |
8 | configureEnvironment() {
9 | echo "Configure Environment"
10 | set +e
11 | # NOTE that the service will restart indefinetely until it finishes the config
12 | if [ ! -f /opt/enic/bin/agents_env.sh ]; then
13 | set -e
14 | echo "Generate configuration files"
15 | agents_env=$(envsubst < /etc/agents_env.tpl)
16 | echo "${agents_env}" > /opt/enic/bin/agents_env.sh
17 | fi
18 | set -e
19 | # shellcheck disable=SC1091
20 | source /opt/enic/bin/agents_env.sh
21 | }
22 |
--------------------------------------------------------------------------------
/edge-node-container/scripts/agents.service:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | [Unit]
5 | Description=Service running edge node provisioning
6 | After=network-online.target containerd.service
7 | Wants=network-online.target
8 | ConditionPathExists=!/var/edge_node/edge_node_provisioned
9 |
10 | [Service]
11 | User=root
12 | Restart=on-failure
13 | RestartSec=5s
14 | WorkingDirectory=/opt/enic/bin
15 | EnvironmentFile=/etc/environment
16 |
17 | ExecStart=/bin/bash /opt/enic/bin/agents.sh
18 | # Platform Update Agent makes use of bindfs which needs to be running for binding required files.
19 | # lpagent for PUA runs it but if lpke service becomes inactive the binding is lost. So, it needs
20 | # to be alive even after a successful execution/exit.
21 | RemainAfterExit=true
22 |
23 | [Install]
24 | WantedBy=multi-user.target
25 |
--------------------------------------------------------------------------------
/edge-node-container/scripts/agents_env.tpl:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | export ORCH_C_URL=cluster-orch-node.${_ORCH_FQDN_}:443
5 | export ORCH_I_URL=infra-node.${_ORCH_FQDN_}:443
6 | export ORCH_N_L_OBS=logs-node.${_ORCH_FQDN_}
7 | export ORCH_N_L_OBS_PORT=443
8 | export ORCH_N_M_OBS=metrics-node.${_ORCH_FQDN_}
9 | export ORCH_N_M_OBS_PORT=443
10 | export ORCH_I_MM_URL=update-node.${_ORCH_FQDN_}:443
11 | export ORCH_I_TM_URL=telemetry-node.${_ORCH_FQDN_}:443
12 | export ORCH_TOKEN_URL=keycloak.${_ORCH_FQDN_}
13 | export RS_TOKEN_URL=release.${_ORCH_FQDN_}
14 | export RS_TYPE=no-auth
15 | export APT_SOURCE_URL=files-rs.edgeorchestration.intel.com
16 | export APT_SOURCE_REPO_ROOT=files-edge-orch
17 | export APT_SOURCE_PROXY_PORT=60444
18 | # Agents version
19 | export NODE_AGENT_VERSION=${_NODE_AGENT_VERSION_}
20 | export CLUSTER_AGENT_VERSION=${_CLUSTER_AGENT_VERSION_}
21 | export HDA_AGENT_VERSION=${_HDA_AGENT_VERSION_}
22 | export POA_AGENT_VERSION=${_POA_AGENT_VERSION_}
23 | export TRTL_VERSION=${_TRTL_VERSION_}
24 | export INBM_CLOUDADAPTER_AGENT_VERSION=${_INBM_CLOUDADAPTER_AGENT_VERSION_}
25 | export INBM_DISPATCHER_AGENT_VERSION=${_INBM_DISPATCHER_AGENT_VERSION_}
26 | export INBM_CONFIGURATION_AGENT_VERSION=${_INBM_CONFIGURATION_AGENT_VERSION_}
27 | export INBM_TELEMETRY_AGENT_VERSION=${_INBM_TELEMETRY_AGENT_VERSION_}
28 | export INBM_DIAGNOSTIC_AGENT_VERSION=${_INBM_DIAGNOSTIC_AGENT_VERSION_}
29 | export INBC_PROGRAM_VERSION=${_INBC_PROGRAM_VERSION_}
30 | export MQTT_VERSION=${_MQTT_VERSION_}
31 | export TPM_PROVISION_VERSION=${_TPM_PROVISION_VERSION_}
32 | export PLATFORM_UPDATE_AGENT_VERSION=${_PLATFORM_UPDATE_AGENT_VERSION_}
33 | export PLATFORM_TELEMETRY_AGENT_VERSION=${_PLATFORM_TELEMETRY_AGENT_VERSION_}
34 | export CADDY_VERSION=${_CADDY_VERSION_}
35 |
--------------------------------------------------------------------------------
/edge-node-container/scripts/cluster-dns/cluster-dns.service:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | [Unit]
5 | Description=cluster-dns
6 | After=network.target
7 |
8 | [Service]
9 | Type=simple
10 | EnvironmentFile=/etc/cluster-dns/env
11 | ExecStart=/usr/bin/cluster-dns service
12 | User=root
13 | Restart=on-failure
14 | RestartSec=5s
15 |
16 | [Install]
17 | WantedBy=multi-user.target
18 |
--------------------------------------------------------------------------------
/edge-node-container/scripts/cluster-dns/env:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | CLUSTER_IP=
5 | CLUSTER_FQDN=kind.internal
6 | http_proxy=
7 | https_proxy=
8 | no_proxy=
9 |
--------------------------------------------------------------------------------
/edge-node-container/scripts/cluster-dns/env.tpl:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | CLUSTER_IP=${_ORCH_IP_}
5 | CLUSTER_FQDN=${_ORCH_FQDN_}
6 | http_proxy=${HTTP_PROXY}
7 | https_proxy=${HTTPS_PROXY}
8 | no_proxy=${NO_PROXY}
9 |
--------------------------------------------------------------------------------
/edge-node-container/scripts/dmiTemplate/dmi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/edge-node-container/scripts/dmiTemplate/dmi
--------------------------------------------------------------------------------
/edge-node-container/scripts/dmiTemplate/dmi-dump:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/edge-node-container/scripts/dmiTemplate/dmi-dump
--------------------------------------------------------------------------------
/edge-node-container/scripts/fake-uuid.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
4 | # SPDX-License-Identifier: Apache-2.0
5 |
6 | # Script Name: fake-uuid.sh
7 | # Description: This script creates a folder as the output directory ($3).
8 | # Inside the created folder, the script copies the dmi/dmi-dump reference template files ($1 and $2)
9 | # and overwrites them using the provided uuid ($4), it also writes in this folder a new file named
10 | # uuid containing the provided uuid ($4).
11 | # Then, these files (dmi/uuid) can be imported as specific volume targets in a docker container,
12 | # so that they can fake the source of information for the dmidecode tool.
13 | # If a uuid ($4) is not specified (e.g., ""), then the script generates a new uuid using the uuidgen tool.
14 |
15 | dmi=$1 # Stores the path to the dmi file template.
16 | dmiDump=$2 # Stores the path to the dmi-dump file template.
17 | outDir=$3 # Stores the path to the output of this script, the folder to output dmi/dmi-dump/uuid files.
18 | tId=$4 # Input uuid to be used to generate new dmi/dmi-dump/uuid files.
19 |
20 | # Checks if uuid variable is set and not empty.
21 | if [[ -z ${tId+x} || -n "$tId" ]];
22 | then
23 | echo "uuid ${tId}";
24 | else
25 | tId=$(uuidgen)
26 | echo "uuid is unset, generated ${tId}";
27 | fi
28 |
29 | # Validates uuid format.
30 | pattern='^\{?[A-Z0-9a-z]{8}-[A-Z0-9a-z]{4}-[A-Z0-9a-z]{4}-[A-Z0-9a-z]{4}-[A-Z0-9a-z]{12}\}?$'
31 | if [[ "$tId" =~ $pattern ]]; then
32 | echo "valid uuid format";
33 | else
34 | echo "invalid uuid format";
35 | exit 1;
36 | fi
37 |
38 | # Creates output folder to store dmi/dmi-dump/uuid files.
39 | mkdir -p "${outDir}"
40 |
41 | # Copy template dmi/dmi-dump files to output folder.
42 | cp "${dmi}" "${outDir}"
43 | cp "${dmiDump}" "${outDir}"
44 |
45 | dmiOut=${outDir}/$(basename "${dmi}")
46 | dmiDumpOut=${outDir}/$(basename "${dmiDump}")
47 |
48 | # Overwrites dmi/dmi-dump files with input/provided uuid.
49 | tmpId=$(echo "${tId}" | sed -ne 's@-@@gp'|sed 's@\([0-9a-zA-Z]\{2\}\)@\\x\1@g')
50 | echo -ne "${tmpId}" | dd of="${dmiOut}" bs=1 seek=74 conv=notrunc
51 | serial=$(echo "${tId}" |awk -F- '{print $1$2}')
52 | echo -ne "${serial}" | dd of="${dmiOut}" bs=1 seek=120 conv=notrunc
53 | dd if="${dmiOut}" of="${dmiDumpOut}" bs=1 seek=32 count=256 conv=notrunc
54 |
55 | # Outputs the overwritten uuid and sn into a file.
56 | uuid=$(dmidecode -s system-uuid --from-dump "${dmiDumpOut}")
57 | echo "${uuid}"
58 | echo "${uuid}" > "${outDir}"/uuid
59 | sn=$(dmidecode -s system-serial-number --from-dump "${dmiDumpOut}")
60 | echo "${sn}"
61 | echo "${sn}" > "${outDir}"/sn
--------------------------------------------------------------------------------
/edge-node-container/scripts/onboard.service:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | [Unit]
5 | Description=Service running edge node provisioning
6 | After=network-online.target containerd.service
7 | Wants=network-online.target
8 | ConditionPathExists=!/var/edge_node/edge_node_onboarded
9 |
10 | [Service]
11 | User=root
12 | Restart=on-failure
13 | RestartSec=5s
14 | WorkingDirectory=/opt/enic/bin
15 | EnvironmentFile=/etc/environment
16 |
17 | ExecStart=/bin/bash /opt/enic/bin/onboard.sh
18 | RemainAfterExit=true
19 |
20 | [Install]
21 | WantedBy=multi-user.target
22 |
--------------------------------------------------------------------------------
/edge-node-container/scripts/onboard.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
4 | # SPDX-License-Identifier: Apache-2.0
5 |
6 | # Script Name: onboard.sh
7 | # Description: This script is meant to run as systemd service
8 | # and is used to onboard/provision enic.
9 |
10 | set -xeo pipefail
11 |
12 | function onboard-provision() {
13 | BINARY_PATH="/opt/enic/bin/enic"
14 | echo "EdgeNode setup using golang scripts/binary ${BINARY_PATH}"
15 | $BINARY_PATH -globalLogLevel="debug" -orchFQDN="${_ORCH_FQDN_}" -orchCAPath="/usr/local/share/ca-certificates/ca.crt" -baseFolder="/etc/intel_edge_node" -onbUser="${_ORCH_USER_}" -onbPass="${_ORCH_PASS_}" -projectID="${_ORCH_PROJECT_}" -oamServerAddress="${_OAM_SERVER_ADDRESS_}" -enableNIO="${_ENABLE_NIO_}"
16 | }
17 |
18 | echo "Onboard/Provision"
19 | onboard-provision
20 |
21 | touch /var/edge_node/edge_node_onboarded
22 |
--------------------------------------------------------------------------------
/edge-node-container/scripts/proxy.tpl:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | http_proxy=${HTTP_PROXY}
5 | HTTP_PROXY=${HTTP_PROXY}
6 | https_proxy=${HTTPS_PROXY}
7 | HTTPS_PROXY=${HTTPS_PROXY}
8 | no_proxy=${NO_PROXY}
9 | NO_PROXY=${NO_PROXY}
10 |
--------------------------------------------------------------------------------
/edge-node-container/scripts/sudoers.txt:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | Defaults env_keep += "http_proxy https_proxy no_proxy HTTP_PROXY HTTPS_PROXY NO_PROXY"
--------------------------------------------------------------------------------
/edge-node-container/trivy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | ignorefile: edge-node-container/.trivyignore
6 |
--------------------------------------------------------------------------------
/edge-node-container/utils/bma_utils.py:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | import yaml
5 | import sys
6 | import requests
7 | import argparse
8 | import os
9 | import json
10 | import subprocess
11 |
12 | def parse(string):
13 | all = yaml.safe_load(string)
14 | packages = all['packages']
15 | v = [{'package': d['name'], 'version': d['version']} for d in packages]
16 | print(v)
17 | return v
18 |
19 | def download(bmas):
20 | full_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "bma_packages")
21 | print(bmas)
22 | for deb in bmas:
23 | # oras pull debs from OCI repositories
24 | filename = "{package}:{version}".format(**deb)
25 | command = f'oras pull "registry-rs.edgeorchestration.intel.com/edge-orch/en/deb/{filename}" -o {full_path}'
26 | subprocess.run(command, shell=True, check=True)
27 | subprocess.run(command, shell=True, check=True)
28 |
29 | def bma_values(bmas):
30 | full_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "chart", "bma_values.yaml")
31 | values = {}
32 | for deb in bmas:
33 | key = f"{deb['package'].replace('-', '_')}_version"
34 | values[key] = deb['version']
35 | values["caddy_version"] = "2.7.6"
36 | with open(full_path, 'w') as ymlfile:
37 | dumpdata = yaml.dump(values)
38 | ymlfile.write(dumpdata)
39 |
40 | if __name__ == "__main__":
41 | parser = argparse.ArgumentParser()
42 | args = parser.parse_args()
43 |
44 | bmas = parse(sys.stdin.read())
45 | download(bmas)
46 | bma_values(bmas)
47 |
--------------------------------------------------------------------------------
/edge-node-simulator/.dockerignore:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | *.license
5 | *.md
6 | *.yaml
7 | *.yml
8 | .git
9 | .github
10 | .idea
11 | .reuse
12 | .gitignore
13 | .golangci.yml
14 | Jenkinsfile
15 | LICENSES
16 | api
17 | test
18 | venv_*
19 | *.tgz
20 |
--------------------------------------------------------------------------------
/edge-node-simulator/.gitignore:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | *.swp
5 | *.test
6 | .DS_Store
7 | .idea
8 | .vscode
9 | artifacts
10 | go.work
11 | out/*
12 | build/*
13 | vendor
14 | venv*
15 | *.crt
16 | *.pem
17 | out
18 | *.tgz
19 |
--------------------------------------------------------------------------------
/edge-node-simulator/.markdownlintignore:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | out/
5 | vendor/
6 | venv_ensim/
7 |
--------------------------------------------------------------------------------
/edge-node-simulator/Dockerfile:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | FROM golang:1.24.1-alpine3.21 AS build
5 |
6 | ENV GO111MODULE=on
7 | ARG MAKE_TARGET=go-build
8 | # Arguments used to stamp the binary
9 | ARG REPO_URL
10 | ARG VERSION
11 | ARG REVISION
12 | ARG BUILD_DATE
13 |
14 | COPY Makefile go.mod go.sum /go/src/github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/
15 | COPY version.mk common.mk /go/src/github.com/open-edge-platform/virtual-edge-node/
16 | COPY cmd/ /go/src/github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/cmd/
17 | COPY pkg/ /go/src/github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/pkg/
18 | COPY vendor/ /go/src/github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/vendor/
19 |
20 | RUN apk add --no-cache make=4.4.1-r2
21 |
22 | WORKDIR /go/src/github.com/open-edge-platform/virtual-edge-node/edge-node-simulator
23 | RUN CGO_ENABLED=0 DOCKER_LABEL_REPO_URL=${REPO_URL} DOCKER_LABEL_VERSION=${VERSION} DOCKER_LABEL_REVISION=${REVISION} DOCKER_LABEL_BUILD_DATE=${BUILD_DATE} make ${MAKE_TARGET}
24 |
25 | FROM alpine:3.21@sha256:a8560b36e8b8210634f77d9f7f9efd7ffa463e380b75e2e74aff4511df3ef88c
26 |
27 | ARG REPO_URL
28 | ARG VERSION
29 | ARG REVISION
30 | ARG BUILD_DATE
31 |
32 | LABEL org.opencontainers.image.version=${VERSION} \
33 | org.opencontainers.image.source=${REPO_URL} \
34 | org.opencontainers.image.revision=${REVISION} \
35 | org.opencontainers.image.created=${BUILD_DATE}
36 |
37 | USER nobody
38 |
39 | COPY --from=build /go/src/github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/out/ensim/server/main /usr/local/bin/ensim-server
40 | COPY --from=build /go/src/github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/out/ensim/client/main /usr/local/bin/ensim-client
41 |
42 | ENTRYPOINT ["ensim-server"]
43 |
--------------------------------------------------------------------------------
/edge-node-simulator/REUSE.toml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | version = 1
5 |
6 | [[annotations]]
7 | path = [
8 | "VERSION",
9 | "go.sum",
10 | "**.md",
11 | "api/buf.lock",
12 | "pkg/api/ensim/**.pb*.go",
13 | "pkg/en/onboard/proto/**",
14 | "docs/*",
15 | ]
16 |
17 | precedence = "aggregate"
18 | SPDX-FileCopyrightText = "(C) 2025 Intel Corporation"
19 | SPDX-License-Identifier = "Apache-2.0"
20 |
--------------------------------------------------------------------------------
/edge-node-simulator/VERSION:
--------------------------------------------------------------------------------
1 | 0.6.4
2 |
--------------------------------------------------------------------------------
/edge-node-simulator/api/buf.lock:
--------------------------------------------------------------------------------
1 | # Generated by buf. DO NOT EDIT.
2 | version: v1
3 | deps:
4 | - remote: buf.build
5 | owner: bufbuild
6 | repository: protovalidate
7 | commit: d39267d9df8f4053bbac6b956a23169f
8 | digest: shake256:bfb2a6f67179429b8fec30ddd03fc42538cc4d2d207e3c89e325de75fd3716335be06114666d2dd0a099eaf37cbc685c5d1b1a347537af74017e780280e64a9c
9 | - remote: buf.build
10 | owner: googleapis
11 | repository: googleapis
12 | commit: 546238c53f7340c6a2a6099fb863bc1b
13 | digest: shake256:8d75c12f391e392b24c076d05117b47aeddb090add99c70247a8f4389b906a65f61a933c68e54ed8b73a050b967b6b712ba194348b67c3ab3ee26cc2cb25852c
14 |
--------------------------------------------------------------------------------
/edge-node-simulator/api/buf.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | version: v1
6 | deps:
7 | - buf.build/bufbuild/protovalidate
8 | - buf.build/googleapis/googleapis
9 | breaking:
10 | use:
11 | - FILE
12 | lint:
13 | use:
14 | - STANDARD
15 | - ENUM_FIRST_VALUE_ZERO
16 | - COMMENT_MESSAGE
17 | - COMMENT_RPC
18 | - COMMENT_SERVICE
19 | rpc_allow_google_protobuf_empty_responses: true
20 | ignore_only:
21 | # In accordance with AIP-131 we return un-wrapped resource messages on CRUD RPCs.
22 | RPC_RESPONSE_STANDARD_NAME:
23 | - ensim/v1/ensim.proto
24 | RPC_REQUEST_RESPONSE_UNIQUE:
25 | - ensim/v1/ensim.proto
26 |
--------------------------------------------------------------------------------
/edge-node-simulator/buf.gen.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | # buf.gen.yaml
6 | # docs: https://docs.buf.build/configuration/v1/buf-gen-yaml
7 |
8 | version: v1
9 |
10 | plugins:
11 |
12 | # go - https://pkg.go.dev/google.golang.org/protobuf
13 | - plugin: buf.build/protocolbuffers/go:v1.33.0
14 | out: pkg/api
15 | opt:
16 | - paths=source_relative
17 |
18 | # go grpc - https://pkg.go.dev/google.golang.org/grpc
19 | - plugin: buf.build/grpc/go:v1.3.0
20 | out: pkg/api
21 | opt:
22 | - paths=source_relative
23 | - require_unimplemented_servers=false
24 |
25 | # validation:
26 | # - https://github.com/bufbuild/protovalidate
27 | # - https://github.com/bufbuild/protovalidate-go
28 | - plugin: buf.build/bufbuild/validate-go:v1.0.1
29 | out: pkg/api
30 | opt:
31 | - paths=source_relative
32 |
--------------------------------------------------------------------------------
/edge-node-simulator/buf.work.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | # buf.work.yaml
6 | # docs: https://docs.buf.build/configuration/v1/buf-work-yaml
7 | # Only defines one directory, but required so that buf.yaml deps list will work
8 |
9 | version: v1
10 | directories:
11 | - api
12 |
--------------------------------------------------------------------------------
/edge-node-simulator/charts/.helmignore:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | # Patterns to ignore when building packages.
5 | # This supports shell glob matching, relative path matching, and
6 | # negation (prefixed with !). Only one pattern per line.
7 | .DS_Store
8 | # Common VCS dirs
9 | .git/
10 | .gitignore
11 | .bzr/
12 | .bzrignore
13 | .hg/
14 | .hgignore
15 | .svn/
16 | # Common backup files
17 | *.swp
18 | *.bak
19 | *.tmp
20 | *.orig
21 | *~
22 | # Various IDEs
23 | .project
24 | .idea/
25 | *.tmproj
26 | .vscode/
27 | # Local automation
28 | .appref.yaml
29 |
--------------------------------------------------------------------------------
/edge-node-simulator/charts/Chart.yaml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 | ---
4 | apiVersion: v2
5 | name: ensim
6 | description: Edge Infrastructure Manager - Edge Node Simulator
7 | type: application
8 | version: "0.6.4"
9 | appVersion: "0.6.4"
10 | home: edge-orchestrator.intel.com
11 | maintainers:
12 | - name: Edge Infrastructure Manager Team
13 |
--------------------------------------------------------------------------------
/edge-node-simulator/charts/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 | {{/*
4 | Expand the name of the chart.
5 | */}}
6 | {{- define "ensim.name" -}}
7 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
8 | {{- end }}
9 |
10 | {{/*
11 | Create a default fully qualified app name.
12 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
13 | If release name contains chart name it will be used as a full name.
14 | */}}
15 | {{- define "ensim.fullname" -}}
16 | {{- if .Values.fullnameOverride }}
17 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
18 | {{- else }}
19 | {{- $name := default .Chart.Name .Values.nameOverride }}
20 | {{- if contains $name .Release.Name }}
21 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
22 | {{- else }}
23 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
24 | {{- end }}
25 | {{- end }}
26 | {{- end }}
27 |
28 | {{/*
29 | Create chart name and version as used by the chart label.
30 | */}}
31 | {{- define "ensim.chart" -}}
32 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
33 | {{- end }}
34 |
35 | {{/*
36 | Common labels
37 | */}}
38 | {{- define "ensim.labels" -}}
39 | helm.sh/chart: {{ include "ensim.chart" . }}
40 | {{ include "ensim.selectorLabels" . }}
41 | {{- if .Chart.AppVersion }}
42 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
43 | {{- end }}
44 | app.kubernetes.io/managed-by: {{ .Release.Service }}
45 | {{- end }}
46 |
47 | {{/*
48 | Selector labels
49 | */}}
50 | {{- define "ensim.selectorLabels" -}}
51 | app.kubernetes.io/name: {{ include "ensim.name" . }}
52 | app.kubernetes.io/instance: {{ .Release.Name }}
53 | {{- end }}
54 |
55 | {{/*
56 | Create the name of the service account to use
57 | */}}
58 | {{- define "ensim.serviceAccountName" -}}
59 | {{- if .Values.serviceAccount.create }}
60 | {{- default (include "ensim.fullname" .) .Values.serviceAccount.name }}
61 | {{- else }}
62 | {{- default "default" .Values.serviceAccount.name }}
63 | {{- end }}
64 | {{- end }}
65 |
--------------------------------------------------------------------------------
/edge-node-simulator/charts/templates/envvars.yaml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | # Note that the agent values are coming from the bma_values.yaml
5 | # no default values are defined for them
6 | ---
7 | apiVersion: v1
8 | kind: ConfigMap
9 | metadata:
10 | name: env-vars-{{ include "ensim.fullname" . }}
11 | data:
12 | {{- if .Values.proxy.enabled }}
13 | HTTP_PROXY: "{{ .Values.proxy.http_proxy }}"
14 | HTTPS_PROXY: "{{ .Values.proxy.https_proxy }}"
15 | NO_PROXY: "{{ .Values.proxy.no_proxy }}"
16 | http_proxy: "{{ .Values.proxy.http_proxy }}"
17 | https_proxy: "{{ .Values.proxy.https_proxy }}"
18 | no_proxy: "{{ .Values.proxy.no_proxy }}"
19 | {{- end }}
20 |
--------------------------------------------------------------------------------
/edge-node-simulator/charts/templates/service.yaml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | ---
5 | apiVersion: v1
6 | kind: Service
7 | metadata:
8 | name: {{ template "ensim.fullname" . }}
9 | labels:
10 | {{- include "ensim.labels" . | nindent 4 }}
11 | spec:
12 | type: ClusterIP
13 | selector:
14 | {{- include "ensim.selectorLabels" . | nindent 4 }}
15 | ports:
16 | - name: grpc
17 | targetPort: grpc
18 | protocol: TCP
19 | port: {{ .Values.service.port}}
20 |
21 | {{- if .Values.service.nodePort.enabled }}
22 | ---
23 | apiVersion: v1
24 | kind: Service
25 | metadata:
26 | name: {{ template "ensim.fullname" . }}-nodeport
27 | labels:
28 | {{- include "ensim.labels" . | nindent 4 }}
29 | spec:
30 | type: NodePort
31 | selector:
32 | name: {{ template "ensim.fullname" . }}-nodeport
33 | app: ensim
34 | resource: {{ template "ensim.fullname" . }}
35 | {{- include "ensim.selectorLabels" . | nindent 4 }}
36 | ports:
37 | - name: grpc
38 | port: {{ .Values.service.port }}
39 | nodePort: {{ .Values.service.nodePort.port }}
40 | protocol: TCP
41 | {{- end }}
42 |
--------------------------------------------------------------------------------
/edge-node-simulator/charts/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 | ---
4 | {{- if .Values.serviceAccount.create }}
5 | apiVersion: v1
6 | kind: ServiceAccount
7 | metadata:
8 | name: {{ include "ensim.serviceAccountName" . }}
9 | labels:
10 | {{- include "ensim.labels" . | nindent 4 }}
11 | {{- with .Values.serviceAccount.annotations }}
12 | annotations:
13 | {{- toYaml . | nindent 4 }}
14 | {{- end }}
15 | {{- end }}
16 |
--------------------------------------------------------------------------------
/edge-node-simulator/charts/values.yaml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | # Default values for edge-node-simulator (ensim).
5 | # This is a YAML-formatted file.
6 | # Declare variables to be passed into your templates.
7 | ---
8 | replicaCount: 1
9 | global:
10 | registry:
11 | name: null
12 | imagePullSecrets:
13 | - name: ""
14 |
15 | image:
16 | repository: infra/ensim
17 | pullPolicy: IfNotPresent
18 | # Image specific takes precedence
19 | # registry:
20 | # name: null
21 | # imagePullSecrets:
22 | # - name: ""
23 |
24 | nameOverride: ""
25 | fullnameOverride: "ensim"
26 |
27 | serviceAccount:
28 | # Specifies whether a service account should be created
29 | create: true
30 | # Annotations to add to the service account
31 | annotations: {}
32 | # The name of the service account to use.
33 | # If not set and create is true, a name is generated using the fullname template
34 | name: ""
35 | podAnnotations: {}
36 |
37 | podSecurityContext:
38 | seccompProfile:
39 | type: RuntimeDefault
40 |
41 | securityContext:
42 | capabilities:
43 | drop:
44 | - ALL
45 | readOnlyRootFilesystem: true
46 | allowPrivilegeEscalation: false
47 | runAsNonRoot: true
48 | runAsUser: 65534
49 |
50 | autoscaling:
51 | enabled: false
52 | minReplicas: 1
53 | maxReplicas: 100
54 | targetCPUUtilizationPercentage: 80
55 | # targetMemoryUtilizationPercentage: 80
56 | nodeSelector: {}
57 | storage: {}
58 | tolerations: []
59 | affinity: {}
60 | tlsSecretName: "tls-orch"
61 |
62 | configArgs:
63 | server:
64 | oamServerAddress: "0.0.0.0:2379"
65 | globalLogLevel: "info"
66 | gRPCPort: 3196
67 | orchCAPath: "/usr/local/share/ca-certificates/orch-ca.crt"
68 | orchFQDN: "kind.internal"
69 | orchIP: ""
70 |
71 | service:
72 | port: 3196
73 | nodePort:
74 | enabled: false
75 | port: 31961
76 |
77 | resources:
78 | requests:
79 | memory: "4Gi"
80 | cpu: "2"
81 | limits:
82 | memory: "16Gi"
83 | cpu: 8
84 | proxy:
85 | enabled: false
86 | http_proxy: ""
87 | https_proxy: ""
88 | no_proxy: ""
89 |
--------------------------------------------------------------------------------
/edge-node-simulator/cmd/ensim/client/main.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package main
5 |
6 | import (
7 | "context"
8 | "flag"
9 | "os"
10 | "os/signal"
11 | "syscall"
12 |
13 | "github.com/open-edge-platform/infra-core/inventory/v2/pkg/logging"
14 | ensim "github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/pkg/sim"
15 | )
16 |
17 | var zlog = logging.GetLogger("ensim.client")
18 |
19 | var (
20 | errFatal error
21 | sigChan = make(chan os.Signal, 1) // channel to handle any interrupt signals
22 |
23 | addressSimulator = flag.String(
24 | "addressSimulator",
25 | "",
26 | "gRPC address (ip:port) of the Edge Node simulator",
27 | )
28 | orchProject = flag.String(
29 | "project",
30 | "",
31 | "default project",
32 | )
33 | orchEdgeOnboardPasswd = flag.String(
34 | "onbPass",
35 | "", // update
36 | "default password of orch keycloak onboard username",
37 | )
38 | orchEdgeOnboardUser = flag.String(
39 | "onbUser",
40 | "", // update
41 | "default onboard username of orch keycloak",
42 | )
43 | orchEdgeAPIPasswd = flag.String(
44 | "apiPass",
45 | "", // update
46 | "default password of orch keycloak API username",
47 | )
48 | orchEdgeAPIUser = flag.String(
49 | "apiUser",
50 | "", // update
51 | "default API username of orch keycloak",
52 | )
53 | enableNIO = flag.Bool(
54 | "enableNIO",
55 | false,
56 | "enables edge node NIO by default",
57 | )
58 | enableTeardown = flag.Bool(
59 | "enableTeardown",
60 | true,
61 | "enables edge node Teardown (removal from InfrastructureManager on delete) by default",
62 | )
63 | )
64 |
65 | func main() {
66 | defer func() {
67 | if errFatal != nil {
68 | zlog.Fatal().Err(errFatal).Msg("failed to start Edge Node simulator client")
69 | }
70 | }()
71 | zlog.Info().Msg("Edge Node Simulator Client")
72 | flag.Parse()
73 | ctx, cancel := context.WithCancel(context.Background())
74 |
75 | client, err := ensim.NewClient(ctx, *addressSimulator)
76 | if err != nil {
77 | zlog.Err(err).Msg("failed to create Edge Node sim client")
78 | errFatal = err
79 | return
80 | }
81 | defer client.Close()
82 |
83 | cliCfg := &ensim.CliCfg{
84 | Project: *orchProject,
85 | OnboardUsername: *orchEdgeOnboardUser,
86 | OnboardPassword: *orchEdgeOnboardPasswd,
87 | APIUsername: *orchEdgeAPIUser,
88 | APIPassword: *orchEdgeAPIPasswd,
89 | EnableNIO: *enableNIO,
90 | EnableTeardown: *enableTeardown,
91 | }
92 | c := ensim.NewCli(ctx, client, cliCfg)
93 | _, err = c.PromptRoot()
94 | if err != nil {
95 | zlog.Err(err).Msg("failed to start simulator client CLI")
96 | errFatal = err
97 | return
98 | }
99 |
100 | signal.Notify(sigChan,
101 | syscall.SIGHUP,
102 | syscall.SIGINT,
103 | syscall.SIGTERM,
104 | syscall.SIGQUIT,
105 | )
106 | s := <-sigChan
107 | zlog.Info().Msgf("Exiting Edge Node simulator client: received signal %s", s)
108 | cancel()
109 | }
110 |
--------------------------------------------------------------------------------
/edge-node-simulator/cmd/ensim/server/main.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package main
5 |
6 | import (
7 | "os"
8 | "os/signal"
9 | "sync"
10 | "syscall"
11 |
12 | "github.com/open-edge-platform/infra-core/inventory/v2/pkg/logging"
13 | "github.com/open-edge-platform/infra-core/inventory/v2/pkg/oam"
14 | _ "github.com/open-edge-platform/infra-core/inventory/v2/pkg/perf"
15 | ensim "github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/pkg/sim"
16 | )
17 |
18 | var zlog = logging.GetLogger("ensim.server")
19 |
20 | var (
21 | errFatal error
22 | wg = sync.WaitGroup{} // waitgroup so main will wait for all go routines to exit cleanly
23 | readyChan = make(chan bool, 1) // channel to signal the readiness.
24 | termChan = make(chan bool, 1) // channel to signal termination of main process.
25 | sigChan = make(chan os.Signal, 1) // channel to handle any interrupt signals
26 | )
27 |
28 | func setOAM(oamServerAddr string, termChan, readyChan chan bool, wg *sync.WaitGroup) {
29 | if oamServerAddr != "" {
30 | // Add oam grpc server
31 | wg.Add(1)
32 | go func() {
33 | // Disable tracing by default
34 | if err := oam.StartOamGrpcServer(termChan, readyChan, wg,
35 | oamServerAddr, false); err != nil {
36 | zlog.Fatal().Err(err).Msg("failed to start oam grpc server")
37 | }
38 | }()
39 | }
40 | }
41 |
42 | func main() {
43 | zlog.Info().Msg("Edge Node Simulator")
44 |
45 | defer func() {
46 | if errFatal != nil {
47 | zlog.Fatal().Err(errFatal).Msg("failed to start Edge Node simulator")
48 | }
49 | }()
50 |
51 | cfg, err := ensim.Cfg()
52 | if err != nil {
53 | zlog.Err(err).Msg("failed to get config")
54 | errFatal = err
55 | return
56 | }
57 | setOAM(cfg.OamServerAddr, termChan, readyChan, &wg)
58 |
59 | mngr, err := ensim.NewManager(cfg)
60 | if err != nil {
61 | zlog.Err(err).Msg("failed to create manager")
62 | errFatal = err
63 | return
64 | }
65 |
66 | err = mngr.Start()
67 | if err != nil {
68 | zlog.Err(err).Msg("failed to start manager")
69 | errFatal = err
70 | return
71 | }
72 | readyChan <- true
73 |
74 | signal.Notify(sigChan,
75 | syscall.SIGHUP,
76 | syscall.SIGINT,
77 | syscall.SIGTERM,
78 | syscall.SIGQUIT,
79 | )
80 | s := <-sigChan
81 | zlog.Info().Msgf("Exiting Edge Node simulator: received signal %s", s)
82 | mngr.Stop()
83 | close(termChan)
84 |
85 | // wait until agents / oam server / teardown terminates.
86 | wg.Wait()
87 | }
88 |
--------------------------------------------------------------------------------
/edge-node-simulator/pkg/en/agents/pua_test.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package agents_test
5 |
6 | import (
7 | "sync"
8 | "testing"
9 | "time"
10 |
11 | "github.com/stretchr/testify/assert"
12 |
13 | "github.com/open-edge-platform/infra-core/inventory/v2/pkg/logging"
14 | pb "github.com/open-edge-platform/infra-managers/maintenance/pkg/api/maintmgr/v1"
15 | "github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/pkg/en/agents"
16 | utils_test "github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/test/utils"
17 | )
18 |
19 | var zlog = logging.GetLogger("agents_test")
20 |
21 | func Test_PuaSched_Single(t *testing.T) {
22 | zlog.Info().Msg("Test_PuaSched_Single Started")
23 |
24 | wg := sync.WaitGroup{}
25 | termChan := make(chan bool)
26 | stateChan := make(chan pb.UpdateStatus_StatusType, 1)
27 | respChan := make(chan *pb.PlatformUpdateStatusResponse)
28 | pua := agents.NewPUA(stateChan)
29 | pua.Handle(&wg, termChan, respChan)
30 |
31 | currentUpdateStatus := pua.State()
32 | assert.Equal(t, pb.UpdateStatus_STATUS_TYPE_UP_TO_DATE, currentUpdateStatus)
33 |
34 | respUpdate := &pb.PlatformUpdateStatusResponse{
35 | UpdateSchedule: &pb.UpdateSchedule{
36 | SingleSchedule: &utils_test.MmSingleSchedule1,
37 | },
38 | UpdateSource: &pb.UpdateSource{
39 | KernelCommand: utils_test.OSResource.GetKernelCommand(),
40 | OsRepoUrl: utils_test.OSResource.GetImageUrl(),
41 | CustomRepos: utils_test.OSResource.GetUpdateSources(),
42 | },
43 | InstalledPackages: utils_test.OSResource.GetInstalledPackages(),
44 | }
45 | respChan <- respUpdate
46 |
47 | time.Sleep(time.Second * 2)
48 | gotState := <-stateChan
49 | zlog.Info().Msgf("Updated PUA state %v", gotState)
50 | currentUpdateStatus = pua.State()
51 | assert.Equal(t, pb.UpdateStatus_STATUS_TYPE_STARTED, currentUpdateStatus)
52 |
53 | time.Sleep(time.Second * 5)
54 | gotState = <-stateChan
55 | zlog.Info().Msgf("Updated PUA state %v", gotState)
56 | currentUpdateStatus = pua.State()
57 | assert.Equal(t, pb.UpdateStatus_STATUS_TYPE_UPDATED, currentUpdateStatus)
58 |
59 | termChan <- true
60 | wg.Wait()
61 | zlog.Info().Msg("Test_PuaSched_Single Finished")
62 | }
63 |
64 | func Test_PuaSched_Repeated(t *testing.T) {
65 | zlog.Info().Msg("Test_PuaSched_Repeated Started")
66 |
67 | wg := sync.WaitGroup{}
68 | termChan := make(chan bool)
69 | stateChan := make(chan pb.UpdateStatus_StatusType, 1)
70 | respChan := make(chan *pb.PlatformUpdateStatusResponse)
71 | pua := agents.NewPUA(stateChan)
72 | pua.Handle(&wg, termChan, respChan)
73 |
74 | currentUpdateStatus := pua.State()
75 | assert.Equal(t, pb.UpdateStatus_STATUS_TYPE_UP_TO_DATE, currentUpdateStatus)
76 |
77 | respUpdate := &pb.PlatformUpdateStatusResponse{
78 | UpdateSchedule: &pb.UpdateSchedule{
79 | RepeatedSchedules: utils_test.MmRepeatedSchedule1,
80 | },
81 | UpdateSource: &pb.UpdateSource{
82 | KernelCommand: utils_test.OSResource.GetKernelCommand(),
83 | OsRepoUrl: utils_test.OSResource.GetImageUrl(),
84 | CustomRepos: utils_test.OSResource.GetUpdateSources(),
85 | },
86 | InstalledPackages: utils_test.OSResource.GetInstalledPackages(),
87 | }
88 | respChan <- respUpdate
89 |
90 | time.Sleep(time.Second * 2)
91 | gotState := <-stateChan
92 | zlog.Info().Msgf("Updated PUA state %v", gotState)
93 | currentUpdateStatus = pua.State()
94 | assert.Equal(t, pb.UpdateStatus_STATUS_TYPE_UPDATED, currentUpdateStatus)
95 |
96 | termChan <- true
97 | wg.Wait()
98 | zlog.Info().Msg("Test_PuaSched_Repeated Finished")
99 | }
100 |
--------------------------------------------------------------------------------
/edge-node-simulator/pkg/en/defs/defs.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package defs
5 |
6 | var (
7 | // Default values of configuration parameters.
8 | OrchUser = "" // update
9 | OrchPasswd = "" // update
10 | OrchKcClientID = "system-client"
11 | OrchProject = "sample-project"
12 | OrchOrg = "intel"
13 | DefaultCAPath = "/usr/local/share/ca-certificates/orch-ca.crt"
14 | DefaultoamServerAddr = "0.0.0.0:2379"
15 |
16 | DefaultFQDN = "kind.internal"
17 | DefaultBaseFolder = "/etc/intel_edge_node"
18 | )
19 |
20 | //nolint:gosec // These are not credentials
21 | var (
22 | ENClientFolder = "/client-credentials"
23 | ENClientIDPath = "/client-credentials/client_id"
24 | ENClientSecretPath = "/client-credentials/client_secret"
25 | ENClientTokenPath = "/client-credentials/access_token"
26 | ENClientNamePath = "/client-credentials/client_name"
27 | ENTenantIDPath = "/tenantId"
28 | )
29 |
30 | var TokenFolders = []string{
31 | "/tokens/node-agent",
32 | "/tokens/hd-agent",
33 | "/tokens/cluster-agent",
34 | "/tokens/platform-update-agent",
35 | "/tokens/platform-observability-agent",
36 | "/tokens/platform-telemetry-agent",
37 | "/tokens/prometheus",
38 | "/tokens/license-agent",
39 | }
40 |
41 | //nolint:gosec // These are not credentials
42 | var (
43 | NodeAgentTokenPath = "/tokens/node-agent/access_token"
44 | UpdateAgentTokenPath = "/tokens/platform-update-agent/access_token"
45 | HDAgentTokenPath = "/tokens/hd-agent/access_token"
46 | TelemetryAgentTokenPath = "/tokens/platform-telemetry-agent/access_token"
47 | LicenseAgentTokenPath = "/tokens/license-agent/access_token"
48 | )
49 |
50 | type Settings struct {
51 | CertCAPath string
52 | CertCA string
53 | OrchFQDN string
54 | ENGUID string
55 | ENSerial string
56 | EdgeAPIUser string
57 | EdgeAPIPass string
58 | EdgeOnboardUser string
59 | EdgeOnboardPass string
60 | RunAgents bool
61 | SetupTeardown bool
62 | OamServerAddr string
63 | BaseFolder string
64 | AutoProvision bool
65 | Project string
66 | Org string
67 | MACAddress string
68 | ENiC bool
69 | EnableDownloads bool
70 | URLFilesRS string
71 | TinkerActionsVersion string
72 | AgentsManifestVersion string
73 | TiberOSVersion string
74 | }
75 |
--------------------------------------------------------------------------------
/edge-node-simulator/pkg/en/keycloak/jwt.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package keycloak
5 |
6 | import (
7 | "os"
8 | "time"
9 |
10 | "github.com/golang-jwt/jwt/v5"
11 | )
12 |
13 | const (
14 | // SharedSecretKey environment variable name for shared secret key for signing a token.
15 | SharedSecretKey = "SHARED_SECRET_KEY"
16 | secretKey = "randomSecretKey"
17 | writeRole = "infra-manager-core-write-role"
18 | readRole = "infra-manager-core-read-role"
19 | enReadWriteRole = "node-agent-readwrite-role"
20 | )
21 |
22 | func WithTenantID(tenantID string) Option {
23 | return func(o *Options) {
24 | o.tenantIDs = append(o.tenantIDs, tenantID)
25 | }
26 | }
27 |
28 | type Options struct {
29 | tenantIDs []string
30 | }
31 |
32 | type Option func(*Options)
33 |
34 | // parseOptions parses the given list of Option into an Options.
35 | func parseOptions(options ...Option) *Options {
36 | opts := &Options{}
37 | for _, option := range options {
38 | option(opts)
39 | }
40 | return opts
41 | }
42 |
43 | // CreateJWT returns random signing key and JWT token (HS256 encoded) in a string with both roles, read and write.
44 | // Only 1 token can persist in the system (otherwise, env variable holding secret key would be re-written).
45 | func CreateJWT() (string, string, error) {
46 | claims := &jwt.MapClaims{
47 | "iss": "https://keycloak.kind.internal/realms/master",
48 | "exp": time.Now().Add(time.Hour).Unix(),
49 | "typ": "Bearer",
50 | "realm_access": map[string]interface{}{
51 | "roles": []string{
52 | writeRole,
53 | readRole,
54 | },
55 | },
56 | }
57 |
58 | return CreateJWTWithClaims(claims)
59 | }
60 |
61 | // CreateENJWT returns random signing key and JWT token (HS256 encoded) in a string with EN's read-write role.
62 | // Only 1 token can persist in the system (otherwise, env variable holding secret key would be re-written).
63 | func CreateENJWT(opts ...Option) (string, string, error) {
64 | options := parseOptions(opts...)
65 | roles := []string{
66 | "default-roles-master",
67 | "release-service-access-token-read-role",
68 | }
69 |
70 | if len(options.tenantIDs) > 0 {
71 | for _, tID := range options.tenantIDs {
72 | roles = append(roles, tID+"_"+enReadWriteRole)
73 | }
74 | } else {
75 | roles = append(roles, enReadWriteRole)
76 | }
77 | claims := &jwt.MapClaims{
78 | "iss": "https://keycloak.kind.internal/realms/master",
79 | "exp": time.Now().Add(time.Hour).Unix(),
80 | "typ": "Bearer",
81 | "realm_access": map[string]interface{}{
82 | "roles": roles,
83 | },
84 | }
85 |
86 | return CreateJWTWithClaims(claims)
87 | }
88 |
89 | // CreateJWTWithClaims returns random signing key and JWT token (HS256 encoded) in a string with defined claims.
90 | func CreateJWTWithClaims(claims *jwt.MapClaims) (string, string, error) {
91 | os.Setenv(SharedSecretKey, secretKey)
92 | token := jwt.NewWithClaims(
93 | jwt.SigningMethodHS256,
94 | claims)
95 | jwtStr, err := token.SignedString([]byte(secretKey))
96 | if err != nil {
97 | return "", "", err
98 | }
99 | return secretKey, jwtStr, nil
100 | }
101 |
--------------------------------------------------------------------------------
/edge-node-simulator/pkg/en/keycloak/tenant.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package keycloak
5 |
6 | import (
7 | "errors"
8 | "fmt"
9 | "os"
10 | "path/filepath"
11 | "regexp"
12 | "slices"
13 | "strings"
14 |
15 | "github.com/golang-jwt/jwt/v5"
16 | )
17 |
18 | const (
19 | tenantIDRoleSeparator = "_"
20 | uuidPattern = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$"
21 | tenantIDPath = "/tenantId"
22 | tenantPerm = 0o640
23 | )
24 |
25 | var uuidRegex = regexp.MustCompile(uuidPattern)
26 |
27 | type TokenClaims struct {
28 | jwt.RegisteredClaims
29 | RealmAccess RealmAccess `json:"realm_access"`
30 | }
31 |
32 | type RealmAccess struct {
33 | Roles []string `json:"roles"`
34 | }
35 |
36 | // Creates tenant ID file, if it already exists returns nil.
37 | func CreateTenantID(confAuth *ConfigAuth, token string) error {
38 | tenantIDFilepath := filepath.Join(confAuth.ClientCredsPath, tenantIDPath)
39 | return createTenantID(tenantIDFilepath, token)
40 | }
41 |
42 | func createTenantID(path, token string) error {
43 | file, err := CreateExcl(path, tenantPerm)
44 | if errors.Is(err, os.ErrExist) {
45 | return nil
46 | } else if err != nil {
47 | return err
48 | }
49 | defer file.Close()
50 |
51 | tenantID, err := getTenantID(token)
52 | if err != nil {
53 | removeFile(file.Name())
54 | return err
55 | }
56 |
57 | _, err = file.WriteString("TENANT_ID=" + tenantID)
58 | if err != nil {
59 | removeFile(file.Name())
60 | return err
61 | }
62 |
63 | return nil
64 | }
65 |
66 | func getTenantID(token string) (string, error) {
67 | parser := &jwt.Parser{}
68 | t, _, err := parser.ParseUnverified(token, &TokenClaims{})
69 | if err != nil {
70 | return "", err
71 | }
72 |
73 | claims, ok := t.Claims.(*TokenClaims)
74 | if !ok {
75 | return "", fmt.Errorf("unknown claims type")
76 | }
77 |
78 | var tenantIDs []string
79 | for _, role := range claims.RealmAccess.Roles {
80 | if strings.Contains(role, tenantIDRoleSeparator) {
81 | roleTID := strings.Split(role, tenantIDRoleSeparator)[0]
82 | if !uuidRegex.MatchString(roleTID) {
83 | continue
84 | }
85 |
86 | if !slices.Contains(tenantIDs, roleTID) {
87 | tenantIDs = append(tenantIDs, roleTID)
88 | }
89 | }
90 | }
91 |
92 | if len(tenantIDs) == 0 {
93 | return "", fmt.Errorf("no tenant ID found in JWT")
94 | }
95 | if len(tenantIDs) > 1 {
96 | return "", fmt.Errorf("multiple tenant IDs found in JWT: %v", tenantIDs)
97 | }
98 | return tenantIDs[0], nil
99 | }
100 |
101 | func removeFile(path string) {
102 | err := os.Remove(path)
103 | if err != nil {
104 | zlog.Error().Err(err).Msgf("failed to remove %v", path)
105 | }
106 | }
107 |
--------------------------------------------------------------------------------
/edge-node-simulator/pkg/en/onboard/artifacts_test.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package onboard_test
5 |
6 | import (
7 | "testing"
8 |
9 | "github.com/stretchr/testify/assert"
10 |
11 | "github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/pkg/en/defs"
12 | "github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/pkg/en/onboard"
13 | )
14 |
15 | var (
16 | // Test data for the test cases.
17 | urlFilesRS = "registry-rs.edgeorchestration.intel.com"
18 | tinkerActionsVersion = "1.17.1"
19 | agentsManifestVersion = "1.0.0"
20 | orchFQDN = "kind.internal"
21 | tiverOsVersion = "1.0.0"
22 | )
23 |
24 | func TestGetArtifacts(t *testing.T) {
25 | cfg := &defs.Settings{
26 | OrchFQDN: orchFQDN,
27 | BaseFolder: "/tmp",
28 | EnableDownloads: true,
29 | URLFilesRS: urlFilesRS,
30 | TinkerActionsVersion: tinkerActionsVersion,
31 | AgentsManifestVersion: agentsManifestVersion,
32 | TiberOSVersion: tiverOsVersion,
33 | }
34 |
35 | err := onboard.GetArtifacts(cfg)
36 | assert.NoError(t, err)
37 | }
38 |
--------------------------------------------------------------------------------
/edge-node-simulator/pkg/en/onboard/onboard.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package onboard
5 |
6 | import (
7 | "github.com/open-edge-platform/infra-core/inventory/v2/pkg/logging"
8 | )
9 |
10 | var zlog = logging.GetLogger("onboard")
11 |
--------------------------------------------------------------------------------
/edge-node-simulator/pkg/en/onboard/proto/README.md:
--------------------------------------------------------------------------------
1 | # NOTICE
2 |
3 | The proto folder was copied from the repository: `https://github.com/tinkerbell/tink/tree/main/internal/proto`.
4 |
--------------------------------------------------------------------------------
/edge-node-simulator/pkg/en/onboard/proto/doc.go:
--------------------------------------------------------------------------------
1 | /*
2 | Package proto contains generated gRPC and Protobuf types for use in Tink Worker - Tink
3 | Server communication. Maintaining the generated code separately from core packages helps maintain
4 | a smaller surface area for those packages.
5 | */
6 | package proto
7 |
--------------------------------------------------------------------------------
/edge-node-simulator/pkg/en/onboard/proto/workflow/v2/workflow.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package internal.proto.workflow.v2;
4 |
5 | option go_package = "github.com/tinkerbell/tink/internal/proto/workflow/v2;workflow";
6 |
7 | // WorkflowService is responsible for retrieving workflows to be executed by the agent and
8 | // publishing events as a workflow executes.
9 | service WorkflowService {
10 | // GetWorkflows creates a stream that will receive workflows intended for the agent identified
11 | // by the GetWorkflowsRequest.agent_id.
12 | rpc GetWorkflows(GetWorkflowsRequest) returns (stream GetWorkflowsResponse) {}
13 |
14 | // PublishEvent publishes a workflow event.
15 | rpc PublishEvent(PublishEventRequest) returns (PublishEventResponse) {}
16 | }
17 |
18 | message GetWorkflowsRequest {
19 | string agent_id = 1;
20 | }
21 |
22 | message GetWorkflowsResponse {
23 | oneof cmd {
24 | StartWorkflow start_workflow = 1;
25 | StopWorkflow stop_workflow = 2;
26 | }
27 |
28 | message StartWorkflow {
29 | Workflow workflow = 1;
30 | }
31 |
32 | message StopWorkflow {
33 | string workflow_id = 1;
34 | }
35 | }
36 |
37 | message PublishEventRequest {
38 | Event event = 1;
39 | }
40 |
41 | message PublishEventResponse {}
42 |
43 | message Workflow {
44 | // A unique identifier for a workflow.
45 | string workflow_id = 1;
46 |
47 | // The actions that make up the workflow.
48 | repeated Action actions = 2;
49 |
50 | message Action {
51 | // A unique identifier for an action in the context of a workflow.
52 | string id = 1;
53 |
54 | // The name of the action. This can be used to identify actions in logging.
55 | string name = 2;
56 |
57 | // The image to run.
58 | string image = 3;
59 |
60 | // The command to execute when launching the image. When using Docker as the action runtime
61 | // it is used as the entrypoint.
62 | optional string cmd = 4;
63 |
64 | // Arguments to pass to the container.
65 | repeated string args = 5;
66 |
67 | // Environment variables to configure when launching the container.
68 | map env = 6;
69 |
70 | // Volumes to mount when launching the container.
71 | repeated string volumes = 7;
72 |
73 | // The network namespace to launch the container in.
74 | optional string network_namespace = 8;
75 | }
76 | }
77 |
78 | message Event {
79 | // A unique identifier for a workflow.
80 | string workflow_id = 1;
81 |
82 | oneof event {
83 | ActionStarted action_started = 2;
84 | ActionSucceeded action_succeeded = 3;
85 | ActionFailed action_failed = 4;
86 | WorkflowRejected workflow_rejected = 5;
87 | }
88 |
89 | message ActionStarted {
90 | // A unique identifier for an action in the context of a workflow.
91 | string action_id = 1;
92 | }
93 |
94 | message ActionSucceeded {
95 | // A unique identifier for an action in the context of a workflow.
96 | string action_id = 1;
97 | }
98 |
99 | message ActionFailed {
100 | // A unique identifier for an action in the context of a workflow.
101 | string action_id = 1;
102 |
103 | // A UpperCamelCase word or phrase concisly describing why an action failed. It is typically
104 | // provided by the action itself.
105 | optional string failure_reason = 2;
106 |
107 | // A free-form human readable string elaborating on the reason for failure. It is typically
108 | // provided by the action itself.
109 | optional string failure_message = 3;
110 |
111 | }
112 |
113 | message WorkflowRejected {
114 | // A message describing why the workflow was rejected.
115 | string message = 2;
116 | }
117 | }
--------------------------------------------------------------------------------
/edge-node-simulator/pkg/en/utils/utils_test.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package utils_test
5 |
6 | import (
7 | "testing"
8 |
9 | "github.com/stretchr/testify/assert"
10 |
11 | "github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/pkg/en/utils"
12 | )
13 |
14 | func Test_GetAddresses(t *testing.T) {
15 | mac1, err := utils.GetRandomMACAddress()
16 | assert.NoError(t, err)
17 | assert.NotNil(t, mac1)
18 | mac2, err := utils.GetRandomMACAddress()
19 | assert.NoError(t, err)
20 | assert.NotNil(t, mac2)
21 | assert.NotEqual(t, mac1, mac2)
22 | }
23 |
--------------------------------------------------------------------------------
/edge-node-simulator/pkg/sim/cfg.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package sim
5 |
6 | import (
7 | "flag"
8 | "fmt"
9 | "math"
10 | )
11 |
12 | type Config struct {
13 | CAPath string
14 | KeyPath string
15 | CertPath string
16 | GRPCPort int16
17 | OrchFQDN string
18 | OrchIP string
19 | OrchCAPath string
20 | BaseFolder string
21 | OamServerAddr string
22 | EnableDownloads bool
23 | URLFilesRS string
24 | TinkerActionsVersion string
25 | AgentsManifestVersion string
26 | TiberOSVersion string
27 | }
28 |
29 | var (
30 | caPath = flag.String(
31 | "caPath",
32 | "",
33 | "CA path for gRPC server",
34 | )
35 | keyPath = flag.String(
36 | "keyPath",
37 | "",
38 | "keyPath for gRPC server",
39 | )
40 | certPath = flag.String(
41 | "certPath",
42 | "",
43 | "certPath for gRPC server",
44 | )
45 | gRPCPort = flag.Int(
46 | "gRPCPort",
47 | defaultgRPCPort,
48 | "gRPCPort for server",
49 | )
50 |
51 | orchFQDN = flag.String(
52 | "orchFQDN",
53 | defaultOrchFQDN,
54 | "orchestrator FQDN",
55 | )
56 |
57 | orchIP = flag.String(
58 | "orchIP",
59 | "",
60 | "IP of orch",
61 | )
62 | orchCAPath = flag.String(
63 | "orchCAPath",
64 | "",
65 | "Path of orch CA file",
66 | )
67 | baseFolder = flag.String(
68 | "baseFolder",
69 | defaultBaseFolder,
70 | "Path of folder to store edge node credentials/tokens",
71 | )
72 | oamServerAddr = flag.String(
73 | "oamServerAddress",
74 | defaultoamServerAddr,
75 | "default OAM server address",
76 | )
77 | enableDownloads = flag.Bool(
78 | "enableDownloads",
79 | false,
80 | "enable downloads of artifacts in the simulator",
81 | )
82 | urlFilesRS = flag.String(
83 | "urlFilesRS",
84 | defaultURLFilesRS,
85 | "URL of files for RS",
86 | )
87 | tinkerActionsVersion = flag.String(
88 | "tinkerActionsVersion",
89 | defaultTinkerActionsVersion,
90 | "Version of tinker actions",
91 | )
92 | agentsManifestVersion = flag.String(
93 | "agentsManifestVersion",
94 | defaultAgentsManifestVersion,
95 | "Version of agents manifest",
96 | )
97 | tiberOSVersion = flag.String(
98 | "tiberOSVersion",
99 | defaultTiberOSVersion,
100 | "Version of TiberOS",
101 | )
102 | )
103 |
104 | // IntToint16 safely converts int to int16. This is needed for 64bit systems where int is defined as a 64bit integer.
105 | // Returns an error when the value is out of the range.
106 | func IntToInt16(i int) (int16, error) {
107 | if i < math.MinInt16 || i > math.MaxInt16 {
108 | return 0, fmt.Errorf("int value exceeds int16 range")
109 | }
110 | res := int16(i)
111 | if int(res) != i {
112 | zlog.InfraSec().InfraError("%#v of type int is out of range for int16", i).Msg("")
113 | return 0, fmt.Errorf("%#v of type int is out of range for int16", i)
114 | }
115 | return res, nil
116 | }
117 |
118 | func Cfg() (*Config, error) {
119 | flag.Parse()
120 |
121 | grpcPort, err := IntToInt16(*gRPCPort)
122 | if err != nil {
123 | zlog.InfraSec().InfraError("failed to convert gRPCPort to int16").Err(err).Msg("")
124 | return nil, err
125 | }
126 |
127 | cfg := &Config{
128 | CAPath: *caPath,
129 | KeyPath: *keyPath,
130 | CertPath: *certPath,
131 | GRPCPort: grpcPort,
132 | OrchFQDN: *orchFQDN,
133 | OrchIP: *orchIP,
134 | OrchCAPath: *orchCAPath,
135 | BaseFolder: *baseFolder,
136 | OamServerAddr: *oamServerAddr,
137 | EnableDownloads: *enableDownloads,
138 | URLFilesRS: *urlFilesRS,
139 | TinkerActionsVersion: *tinkerActionsVersion,
140 | AgentsManifestVersion: *agentsManifestVersion,
141 | TiberOSVersion: *tiberOSVersion,
142 | }
143 | zlog.Info().Msgf("Loaded cfg: %v", cfg)
144 | return cfg, nil
145 | }
146 |
--------------------------------------------------------------------------------
/edge-node-simulator/pkg/sim/defs.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package sim
5 |
6 | var (
7 | defaultoamServerAddr = "0.0.0.0:2379"
8 | defaultgRPCPort = 5001
9 | defaultBaseFolder = "/tmp/scale"
10 | defaultOrchFQDN = "kind.internal"
11 |
12 | defaultURLFilesRS = "registry-rs.edgeorchestration.intel.com/"
13 | defaultTinkerActionsVersion = "1.0.0"
14 | defaultAgentsManifestVersion = "1.0.0"
15 | defaultTiberOSVersion = "1.0.0"
16 | )
17 |
--------------------------------------------------------------------------------
/edge-node-simulator/pkg/sim/manager.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package sim
5 |
6 | import (
7 | "github.com/open-edge-platform/infra-core/inventory/v2/pkg/logging"
8 | "github.com/open-edge-platform/orch-library/go/pkg/northbound"
9 | )
10 |
11 | var zlog = logging.GetLogger("ifmsim")
12 |
13 | type Manager interface {
14 | Start() error
15 | Stop() error
16 | }
17 |
18 | type manager struct {
19 | store Store
20 | cfg *Config
21 | server *northbound.Server
22 | }
23 |
24 | func NewManager(cfg *Config) (Manager, error) {
25 | store := NewStore()
26 |
27 | return &manager{
28 | cfg: cfg,
29 | store: store,
30 | server: nil,
31 | }, nil
32 | }
33 |
34 | func (m *manager) Start() error {
35 | zlog.Info().Msg("Starting Manager")
36 | err := m.startNorthbound(m.cfg)
37 | if err != nil {
38 | return err
39 | }
40 |
41 | return nil
42 | }
43 |
44 | func (m *manager) Stop() error {
45 | zlog.Info().Msg("Stopping Manager")
46 | m.server.Stop()
47 | return nil
48 | }
49 |
50 | func (m *manager) startNorthbound(cfg *Config) error {
51 | m.server = northbound.NewServer(northbound.NewServerCfg(
52 | cfg.CAPath,
53 | cfg.KeyPath,
54 | cfg.CertPath,
55 | cfg.GRPCPort,
56 | true,
57 | northbound.SecurityConfig{}))
58 |
59 | m.server.AddService(NewIFMSimService(m.store, cfg))
60 |
61 | doneCh := make(chan error)
62 | go func() {
63 | err := m.server.Serve(func(started string) {
64 | zlog.Info().Msgf("Started NBI on %s", started)
65 | close(doneCh)
66 | })
67 | if err != nil {
68 | doneCh <- err
69 | }
70 | }()
71 | return <-doneCh
72 | }
73 |
--------------------------------------------------------------------------------
/edge-node-simulator/pkg/sim/utils.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package sim
5 |
6 | import (
7 | "context"
8 | "fmt"
9 | "os"
10 | "path/filepath"
11 |
12 | "google.golang.org/grpc"
13 | "google.golang.org/grpc/credentials/insecure"
14 |
15 | "github.com/open-edge-platform/infra-core/inventory/v2/pkg/cert"
16 | )
17 |
18 | func LoadFile(filePath string) (string, error) {
19 | dirFile, err := filepath.Abs(filePath)
20 | if err != nil {
21 | zlog.Err(err).Msgf("failed LoadFile, filepath unexistent %s", filePath)
22 | return "", err
23 | }
24 |
25 | dataBytes, err := os.ReadFile(dirFile)
26 | if err != nil {
27 | zlog.Err(err).Msgf("failed to read file %s", dirFile)
28 | return "", err
29 | }
30 |
31 | dataStr := string(dataBytes)
32 | return dataStr, nil
33 | }
34 |
35 | // connect creates a gRPC connection to a server.
36 | func Connect(
37 | _ context.Context,
38 | address string,
39 | caPath, certPath, keyPath string,
40 | insec bool,
41 | opts ...grpc.DialOption,
42 | ) (*grpc.ClientConn, error) {
43 | var conn *grpc.ClientConn
44 |
45 | if insec {
46 | dialOpt := grpc.WithTransportCredentials(insecure.NewCredentials())
47 | opts = append(opts, dialOpt)
48 | } else {
49 | if caPath == "" || certPath == "" || keyPath == "" {
50 | err := fmt.Errorf("CaCertPath %s or TlsCerPath %s or TlsKeyPath %s were not provided",
51 | caPath, certPath, keyPath,
52 | )
53 | zlog.Fatal().Err(err).Msgf("CaCertPath %s or TlsCerPath %s or TlsKeyPath %s were not provided\n",
54 | caPath, certPath, keyPath,
55 | )
56 | return nil, err
57 | }
58 | // setting secure gRPC connection
59 | creds, err := cert.HandleCertPaths(caPath, keyPath, certPath, true)
60 | if err != nil {
61 | zlog.Fatal().Err(err).Msgf("an error occurred while loading credentials to server %v, %v, %v: %v\n",
62 | caPath, certPath, keyPath, err,
63 | )
64 | return nil, err
65 | }
66 | opts = append(opts, grpc.WithTransportCredentials(creds))
67 | }
68 |
69 | // if testing, use a bufconn, otherwise TCP
70 | var err error
71 | if address == "bufconn" {
72 | conn, err = grpc.NewClient("", opts...)
73 | } else {
74 | conn, err = grpc.NewClient(address, opts...)
75 | }
76 | if err != nil {
77 | zlog.InfraSec().InfraErr(err).Msgf("Unable to dial connection to inventory client address %s", address)
78 | return nil, err
79 | }
80 | return conn, nil
81 | }
82 |
--------------------------------------------------------------------------------
/edge-node-simulator/pkg/sim/watcher.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package sim
5 |
6 | import (
7 | "sync"
8 |
9 | "github.com/google/uuid"
10 | )
11 |
12 | // EventChannel is a channel which can accept an Event.
13 | type EventChannel chan Event
14 |
15 | // Watchers stores the information about watchers.
16 | type Watchers struct {
17 | watchers map[uuid.UUID]Watcher
18 | rm sync.RWMutex
19 | }
20 |
21 | // Watcher event watcher.
22 | type Watcher struct {
23 | id uuid.UUID
24 | ch chan<- Event
25 | }
26 |
27 | // NewWatchers creates watchers.
28 | func NewWatchers() *Watchers {
29 | return &Watchers{
30 | watchers: make(map[uuid.UUID]Watcher),
31 | }
32 | }
33 |
34 | // Send sends an event for all registered watchers.
35 | func (ws *Watchers) Send(event Event) {
36 | ws.rm.RLock()
37 | go func() {
38 | for _, watcher := range ws.watchers {
39 | watcher.ch <- event
40 | }
41 | }()
42 | ws.rm.RUnlock()
43 | }
44 |
45 | // AddWatcher adds a watcher.
46 | func (ws *Watchers) AddWatcher(id uuid.UUID, ch chan<- Event) error {
47 | ws.rm.Lock()
48 | watcher := Watcher{
49 | id: id,
50 | ch: ch,
51 | }
52 | ws.watchers[id] = watcher
53 | ws.rm.Unlock()
54 | return nil
55 | }
56 |
57 | // RemoveWatcher removes a watcher.
58 | func (ws *Watchers) RemoveWatcher(id uuid.UUID) error {
59 | ws.rm.Lock()
60 | watchers := make(map[uuid.UUID]Watcher, len(ws.watchers)-1)
61 | for _, watcher := range ws.watchers {
62 | if watcher.id != id {
63 | watchers[id] = watcher
64 | }
65 | }
66 | ws.watchers = watchers
67 | ws.rm.Unlock()
68 | return nil
69 | }
70 |
--------------------------------------------------------------------------------
/edge-node-simulator/requirements.txt:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | # lint yaml
5 | yamllint~=1.35.1
6 |
7 | # license check
8 | reuse~=5.0.2
9 |
--------------------------------------------------------------------------------
/edge-node-simulator/test/README.md:
--------------------------------------------------------------------------------
1 | # Integration Tests
2 |
3 | The following steps provide guidance on how to run the integration tests for days 0, 1 and 2 using
4 | the Edge Node simulator.
5 | All test cases have descriptions in their respective files/definitions.
6 |
7 | ## Requirements
8 |
9 | For all the day0, day1 and day2 tests the following environment variables need to be defined.
10 |
11 | ```bash
12 | ORCH_FQDN="" # The FQDN of the target orchestrator cluster
13 | ENSIM_ADDR="localhost:3196" # The gRPC server address of the Edge Node simulator (if/when needed) - e.g., localhost:3196
14 | CA_PATH="" # The file path of the CA certificate of the target orchestrator cluster
15 | ONBUSER="" # The orch keycloak user - to retrieve token for Infrastructure Manager SBI interactions of ENSIM
16 | ONBPASS="" # The orch keycloak user password - to retrieve token for Infrastructure Manager SBI interactions of ENSIM
17 | APIUSER="" # The orch keycloak user - to retrieve token for Infrastructure Manager REST API interactions - if not specified goes to default
18 | APIPASS="" # The orch keycloak user password - to retrieve token for Infrastructure Manager REST API interactions - if not specified goes to default
19 | PROJECT="" # The project name in which the ONBUSER and APIUSER belong to.
20 | ```
21 |
22 | ## Edge Node Simulator Deployment
23 |
24 | Deploy the edge node simulator in the same namespace as orch-infra (Edge Infrastructure Manager).
25 |
26 | ```bash
27 | helm upgrade --install -n orch-infra ensim \
28 | oci://registry-rs.edgeorchestration.intel.com/edge-orch/infra/charts/ensim \
29 | --set global.registry.name=registry-rs.edgeorchestration.intel.com/edge-orch/ \
30 | --set configArgs.server.orchFQDN=kind.internal \
31 | --set tlsSecretName=gateway-ca-cert
32 |
33 | sleep 5
34 | kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=ensim -n orch-infra --timeout=5m
35 | ```
36 |
37 | ## Run Integration Tests
38 |
39 | Set port-forward to the following targets:
40 |
41 | ```bash
42 | kubectl port-forward svc/ensim -n orch-infra --address 0.0.0.0 3196:3196 &
43 | kubectl port-forward svc/api -n orch-infra --address 0.0.0.0 8080:8080 &
44 | ```
45 |
46 | ### Runs day0 integration tests
47 |
48 | ```bash
49 | ginkgo -v -r --fail-fast --race --json-report infra-tests-day0.json --output-dir . --label-filter="infra-tests-day0" ./test/infra -- \
50 | -project=${PROJECT} -projectID=${PROJECTID} -caFilepath=${CA_PATH} -simAddress=${ENSIM_ADDR} \
51 | -orchFQDN=${ORCH_FQDN} \
52 | -edgeAPIUser=${APIUSER} -edgeAPIPass=${APIPASS} \
53 | -edgeOnboardUser=${ONBUSER} -edgeOnboardPass=${ONBPASS}
54 | ```
55 |
56 | ### Runs day1 integration tests
57 |
58 | ```bash
59 | ginkgo -v -r --fail-fast --race --json-report infra-tests-day1.json --output-dir . --label-filter="infra-tests-day1" ./test/infra -- \
60 | -project=${PROJECT} -projectID=${PROJECTID} -caFilepath=${CA_PATH} -simAddress=${ENSIM_ADDR} \
61 | -orchFQDN=${ORCH_FQDN} \
62 | -edgeAPIUser=${APIUSER} -edgeAPIPass=${APIPASS} \
63 | -edgeOnboardUser=${ONBUSER} -edgeOnboardPass=${ONBPASS}
64 | ```
65 |
66 | ### Runs day2 integration tests
67 |
68 | ```bash
69 | ginkgo -v -r --fail-fast --race --json-report infra-tests-day2.json --output-dir . --label-filter="infra-tests-day2" ./test/infra -- \
70 | -project=${PROJECT} -projectID=${PROJECTID} -caFilepath=${CA_PATH} -simAddress=${ENSIM_ADDR} \
71 | -orchFQDN=${ORCH_FQDN} \
72 | -edgeAPIUser=${APIUSER} -edgeAPIPass=${APIPASS} \
73 | -edgeOnboardUser=${ONBUSER} -edgeOnboardPass=${ONBPASS}
74 | ```
75 |
76 | ## Run hosts/locations cleanup
77 |
78 | ```bash
79 | ginkgo -v -r --fail-fast --race --label-filter="cleanup" ./test/infra -- \
80 | -project=${PROJECT} -projectID=${PROJECTID} -caFilepath=${CA_PATH} -simAddress=${ENSIM_ADDR} \
81 | -orchFQDN=${ORCH_FQDN} \
82 | -edgeAPIUser=${APIUSER} -edgeAPIPass=${APIPASS} \
83 | -edgeOnboardUser=${ONBUSER} -edgeOnboardPass=${ONBPASS}
84 | ```
85 |
86 | ## Kill port-forward to ensim/api
87 |
88 | ```bash
89 | kill $(ps -eaf | grep 'kubectl' | grep 'port-forward svc/ensim' | awk '{print $2}')
90 | kill $(ps -eaf | grep 'kubectl' | grep 'port-forward svc/api' | awk '{print $2}')
91 | ```
92 |
--------------------------------------------------------------------------------
/edge-node-simulator/test/ensim/stats_test.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package ensim_test
5 |
6 | import (
7 | "context"
8 | "testing"
9 |
10 | "github.com/stretchr/testify/assert"
11 | "github.com/stretchr/testify/require"
12 |
13 | ensimapi "github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/pkg/api/ensim/v1"
14 | ensim "github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/pkg/sim"
15 | flags_test "github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/test/flags"
16 | )
17 |
18 | func TestENSim_Stats(t *testing.T) {
19 | zlog.Info().Msg("TestENSim_Stats Started")
20 |
21 | cfg := flags_test.GetConfig()
22 | require.NotNil(t, cfg)
23 |
24 | ctx, cancel := context.WithCancel(context.Background())
25 | defer cancel()
26 |
27 | simClient, err := ensim.NewClient(ctx, cfg.ENSimAddress)
28 | require.NoError(t, err)
29 | defer simClient.Close()
30 |
31 | listNodes, err := simClient.List(ctx)
32 | require.NoError(t, err)
33 | assert.NotEqual(t, 0, len(listNodes))
34 |
35 | setupFaulty := func(node *ensimapi.Node) ([]*ensimapi.NodeStatus, bool) {
36 | faults := []*ensimapi.NodeStatus{}
37 | status := node.GetStatus()
38 | for _, stat := range status {
39 | if stat.GetMode() != ensimapi.StatusMode_STATUS_MODE_OK {
40 | faults = append(faults, stat)
41 | }
42 | }
43 | if len(faults) > 0 {
44 | return faults, true
45 | }
46 | return nil, false
47 | }
48 |
49 | agentsOff := func(node *ensimapi.Node) ([]string, bool) {
50 | statesOff := []string{}
51 | states := node.GetAgentsStates()
52 | for _, state := range states {
53 | if state.GetCurrentState() == ensimapi.AgentState_AGENT_STATE_OFF {
54 | statesOff = append(statesOff, state.GetAgentType().String())
55 | }
56 | }
57 |
58 | if len(statesOff) > 0 {
59 | return statesOff, true
60 | }
61 | return nil, false
62 | }
63 |
64 | faultyNodes := 0
65 | faultyAgents := 0
66 |
67 | faultsStats := map[string]int{}
68 | faultsReasons := map[string]int{}
69 | processFaults := func(faults []*ensimapi.NodeStatus) {
70 | for _, fault := range faults {
71 | faultsStats[fault.GetSource().String()]++
72 | faultsReasons[fault.GetSource().String()+"-"+fault.GetDetails()]++
73 | }
74 | }
75 |
76 | agentsOffStats := map[string]int{}
77 | processAgentsOff := func(faults []string) {
78 | for _, fault := range faults {
79 | agentsOffStats[fault]++
80 | }
81 | }
82 |
83 | for _, node := range listNodes {
84 | faults, hasFaults := setupFaulty(node)
85 | agents, agentsOff := agentsOff(node)
86 |
87 | if hasFaults {
88 | faultyNodes++
89 | processFaults(faults)
90 | }
91 | if agentsOff {
92 | faultyAgents++
93 | processAgentsOff(agents)
94 | }
95 | }
96 |
97 | zlog.Info().Msgf("TestENSim_Stats summary: faulty %d nodes, %d agents, total %d", faultyNodes, faultyAgents, len(listNodes))
98 | zlog.Info().Msgf("Faults stats: %v", faultsStats)
99 | zlog.Info().Msgf("Faults details (total %d): %v", len(faultsReasons), faultsReasons)
100 | zlog.Info().Msgf("Agents Off: %v", agentsOffStats)
101 | zlog.Info().Msg("TestENSim_Stats Finished")
102 | }
103 |
--------------------------------------------------------------------------------
/edge-node-simulator/test/flags/flags.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package flags
5 |
6 | import (
7 | "flag"
8 |
9 | "github.com/open-edge-platform/infra-core/inventory/v2/pkg/logging"
10 | )
11 |
12 | var zlog = logging.GetLogger("flags")
13 |
14 | type TestConfig struct {
15 | OrchFQDN string
16 | EdgeAPIUser string
17 | EdgeAPIPass string
18 | EdgeOnboardUser string
19 | EdgeOnboardPass string
20 | CAPath string
21 | ENSimAddress string
22 | Project string
23 | AmountEdgeNodes int
24 | DeployEdgeNodes bool
25 | CreateOrgProject bool
26 | Cleanup bool
27 | }
28 |
29 | func GetDefaultConfig() *TestConfig {
30 | return &TestConfig{
31 | OrchFQDN: "kind.internal",
32 | EdgeAPIUser: "", // update
33 | EdgeAPIPass: "", // update
34 | EdgeOnboardUser: "", // update
35 | EdgeOnboardPass: "", // update
36 | CAPath: "",
37 | ENSimAddress: "localhost:5001",
38 | Project: "",
39 | AmountEdgeNodes: 1,
40 | DeployEdgeNodes: false,
41 | CreateOrgProject: false,
42 | Cleanup: false,
43 | }
44 | }
45 |
46 | var (
47 | defaultCfg = GetDefaultConfig()
48 |
49 | flagOrchestratorFQDN = flag.String(
50 | "orchFQDN", defaultCfg.OrchFQDN,
51 | "The orch cluster FQDN",
52 | )
53 |
54 | flagEdgeAPIUser = flag.String(
55 | "edgeAPIUser", defaultCfg.EdgeAPIUser,
56 | "The orch cluster EdgeAPIUser",
57 | )
58 |
59 | flagEdgeAPIPass = flag.String(
60 | "edgeAPIPass", defaultCfg.EdgeAPIPass,
61 | "The orch cluster EdgeAPIPass",
62 | )
63 |
64 | flagEdgeOnboardUser = flag.String(
65 | "edgeOnboardUser", defaultCfg.EdgeOnboardUser,
66 | "The orch cluster EdgeOnboardUser",
67 | )
68 |
69 | flagEdgeOnboardPass = flag.String(
70 | "edgeOnboardPass", defaultCfg.EdgeOnboardPass,
71 | "The orch cluster EdgeOnboardPass",
72 | )
73 |
74 | simAddress = flag.String(
75 | "simAddress",
76 | defaultCfg.ENSimAddress, "The gRPC address of the Infrastructure Manager simulator",
77 | )
78 |
79 | caPath = flag.String(
80 | "caFilepath",
81 | "", "The Infrastructure Manager cert CA file path",
82 | )
83 |
84 | project = flag.String(
85 | "project",
86 | defaultCfg.Project, "The project name",
87 | )
88 |
89 | amountEdgeNodes = flag.Int(
90 | "amountEdgeNodes",
91 | defaultCfg.AmountEdgeNodes, "The amount of edge nodes to be used in the tests",
92 | )
93 |
94 | deployEdgeNodes = flag.Bool(
95 | "deployEdgeNodes",
96 | defaultCfg.DeployEdgeNodes, "Flag to deploy edge nodes to execute tests",
97 | )
98 |
99 | createOrgProject = flag.Bool(
100 | "createOrgProject",
101 | defaultCfg.CreateOrgProject, "Flag to create org/project to execute tests",
102 | )
103 |
104 | cleanup = flag.Bool(
105 | "cleanup",
106 | defaultCfg.Cleanup, "Flag to perform cleanup of hosts/instances/schedules in Infrastructure Manager",
107 | )
108 | )
109 |
110 | func GetConfig() *TestConfig {
111 | flag.Parse()
112 |
113 | cfg := &TestConfig{
114 | OrchFQDN: *flagOrchestratorFQDN,
115 | EdgeAPIUser: *flagEdgeAPIUser,
116 | EdgeAPIPass: *flagEdgeAPIPass,
117 | EdgeOnboardUser: *flagEdgeOnboardUser,
118 | EdgeOnboardPass: *flagEdgeOnboardPass,
119 | ENSimAddress: *simAddress,
120 | CAPath: *caPath,
121 | Project: *project,
122 | AmountEdgeNodes: *amountEdgeNodes,
123 | DeployEdgeNodes: *deployEdgeNodes,
124 | CreateOrgProject: *createOrgProject,
125 | Cleanup: *cleanup,
126 | }
127 | zlog.Info().Msgf("GetConfig %v", cfg)
128 | return cfg
129 | }
130 |
--------------------------------------------------------------------------------
/edge-node-simulator/test/infra/cleanup_test.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package infra_test
5 |
6 | import (
7 | "net/http"
8 |
9 | . "github.com/onsi/ginkgo/v2"
10 | . "github.com/onsi/gomega"
11 |
12 | flags_test "github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/test/flags"
13 | utils_test "github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/test/utils"
14 | )
15 |
16 | var _ = Describe("Infrastructure Manager integration tests", Label(cleanupLabel), func() {
17 | var cfg *flags_test.TestConfig
18 | var httpClient *http.Client
19 |
20 | BeforeEach(func() {
21 | cfg = flags_test.GetConfig()
22 | Expect(cfg).NotTo(BeNil())
23 |
24 | certCA, err := utils_test.LoadFile(cfg.CAPath)
25 | Expect(err).To(BeNil())
26 |
27 | httpClient, err = utils_test.GetClientWithCA(certCA)
28 | Expect(err).To(BeNil())
29 | })
30 |
31 | Describe("Infrastructure Manager cleanup", Label(cleanupLabel), func() {
32 | It("should cleanup all hosts and locations in Infrastructure Manager", func(ctx SpecContext) {
33 | errCleanup := utils_test.HelperCleanupHostsAPI(ctx, httpClient, cfg)
34 | Expect(errCleanup).To(BeNil())
35 | errCleanup = utils_test.HelperCleanupSchedulesAPI(ctx, httpClient, cfg)
36 | Expect(errCleanup).To(BeNil())
37 | errCleanup = utils_test.HelperCleanupLocationsAPI(ctx, httpClient, cfg)
38 | Expect(errCleanup).To(BeNil())
39 | })
40 | })
41 | })
42 |
--------------------------------------------------------------------------------
/edge-node-simulator/test/infra/common_test.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package infra_test
5 |
6 | import (
7 | "context"
8 | "fmt"
9 | "time"
10 |
11 | "github.com/google/uuid"
12 |
13 | "github.com/open-edge-platform/infra-core/inventory/v2/pkg/logging"
14 | ensimapi "github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/pkg/api/ensim/v1"
15 | ensim "github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/pkg/sim"
16 | flags_test "github.com/open-edge-platform/virtual-edge-node/edge-node-simulator/test/flags"
17 | )
18 |
19 | var zlog = logging.GetLogger("en-test")
20 |
21 | var (
22 | e2eLabel = "infra-e2e"
23 | day2Label = "infra-tests-day2"
24 | day1Label = "infra-tests-day1"
25 | day0Label = "infra-tests-day0"
26 | cleanupLabel = "cleanup"
27 | )
28 |
29 | var (
30 | waitUntilHostsRunning = time.Second * 5
31 | waitHostsRunning = time.Minute * 5
32 | waitHostsConnectionLost = time.Minute * 5
33 | waitHostsMaintenance = time.Minute * 1
34 |
35 | TimeNow = int(time.Now().UTC().Unix())
36 | SafeTimeDelay = 600
37 | )
38 |
39 | var (
40 | filterRunning = fmt.Sprintf(`%s = %q`, "host_status", "Running")
41 | filterNoConnection = fmt.Sprintf(`%s = %q`, "host_status", "No Connection")
42 | filterInstanceStatusError = fmt.Sprintf(`%s = %q`, "instance_status", "Error")
43 | )
44 |
45 | func GenerateUUIDs(cfg *flags_test.TestConfig) []string {
46 | // Create nodes in Infrastructure Manager SIM
47 | enUUIDs := []string{}
48 | for i := 0; i < cfg.AmountEdgeNodes; i++ {
49 | hostUUID := uuid.New()
50 | enUUID := hostUUID.String()
51 | enUUIDs = append(enUUIDs, enUUID)
52 | }
53 | return enUUIDs
54 | }
55 |
56 | func GetENSimClient(ctx context.Context, cfg *flags_test.TestConfig) (ensim.Client, error) {
57 | simClient, err := ensim.NewClient(ctx, cfg.ENSimAddress)
58 | return simClient, err
59 | }
60 |
61 | func ENSIMCheckNodes(ctx context.Context, simClient ensim.Client, amount int) error {
62 | listNodes, err := simClient.List(ctx)
63 | if amount != len(listNodes) {
64 | return err
65 | }
66 | return nil
67 | }
68 |
69 | func ENSIMCreateNodes(ctx context.Context,
70 | cfg *flags_test.TestConfig,
71 | simClient ensim.Client,
72 | enUUIDs []string,
73 | ) error {
74 | enCredentals := &ensimapi.NodeCredentials{
75 | Project: cfg.Project,
76 | OnboardUsername: cfg.EdgeOnboardUser,
77 | OnboardPassword: cfg.EdgeOnboardPass,
78 | ApiUsername: cfg.EdgeAPIUser,
79 | ApiPassword: cfg.EdgeAPIPass,
80 | }
81 | for _, enUUID := range enUUIDs {
82 | zlog.Info().Msgf("Creating node %v", enUUID)
83 | err := simClient.Create(ctx, enUUID, enCredentals, true)
84 | if err != nil {
85 | return err
86 | }
87 | }
88 | return nil
89 | }
90 |
--------------------------------------------------------------------------------
/edge-node-simulator/test/infra/e2e_test.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package infra_test
5 |
6 | import (
7 | "testing"
8 |
9 | . "github.com/onsi/ginkgo/v2"
10 | . "github.com/onsi/gomega"
11 | )
12 |
13 | func TestInfrastructureManager(t *testing.T) {
14 | RegisterFailHandler(Fail)
15 | RunSpecs(t, "Infrastructure Manager E2E Integration Suite")
16 | }
17 |
--------------------------------------------------------------------------------
/pico/.editorconfig:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | # EditorConfig is awesome: https://editorconfig.org
6 |
7 | # top-most EditorConfig file
8 | root = true
9 |
10 | [*]
11 | end_of_line = lf
12 | insert_final_newline = true
13 | charset = utf-8
14 | trim_trailing_whitespace = true
15 | max_line_length = 120
16 | indent_style = space
17 | indent_size = 2
18 |
19 | [*.go]
20 | indent_style = tab
21 | indent_size = 4
22 |
23 | [*.{tf,tfvars}]
24 | indent_style = space
25 | indent_size = 2
26 |
27 | [Makefile]
28 | indent_style = tab
29 | indent_size = 4
30 |
--------------------------------------------------------------------------------
/pico/.gitignore:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | # Terraform files
6 | *.tfstate
7 | *.tfstate.*
8 | *.tfvars
9 |
10 | # Crash log files
11 | crash.log
12 |
13 | # Ignore override files
14 | override.tf
15 | override.tf.json
16 | *_override.tf
17 | *_override.tf.json
18 |
19 | # Ignore CLI configuration files
20 | .terraformrc
21 | terraform.rc
22 |
23 | # Ignore Terraform working directory
24 | .terraform/
25 |
26 | # Ignore output files
27 | output/
28 |
--------------------------------------------------------------------------------
/pico/.tool-versions:
--------------------------------------------------------------------------------
1 | tflint 0.51.2
2 | terraform 1.12.1
3 |
--------------------------------------------------------------------------------
/pico/.tool-versions.license:
--------------------------------------------------------------------------------
1 | SPDX-FileCopyrightText: 2025 Intel Corporation
2 |
3 | SPDX-License-Identifier: Apache-2.0
4 |
--------------------------------------------------------------------------------
/pico/Makefile:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | all: lint
6 |
7 | lint:
8 | tflint --init --recursive
9 | tflint --recursive
10 |
11 | dependency-check:
12 | @# Help: Runs dependency-check stage
13 | @echo "---MAKEFILE BUILD---"
14 | echo $@
15 | @echo "---END MAKEFILE Build---"
16 |
17 | build:
18 | @# Help: Runs build stage
19 | @echo "---MAKEFILE BUILD---"
20 | echo $@
21 | @echo "---END MAKEFILE Build---"
22 |
23 | docker-build:
24 | @# Help: Runs docker-build stage
25 | @echo "---MAKEFILE BUILD---"
26 | echo $@
27 | @echo "---END MAKEFILE Build---"
--------------------------------------------------------------------------------
/pico/VERSION:
--------------------------------------------------------------------------------
1 | v1.5.0-dev
2 |
--------------------------------------------------------------------------------
/pico/VERSION.license:
--------------------------------------------------------------------------------
1 | SPDX-FileCopyrightText: 2025 Intel Corporation
2 |
3 | SPDX-License-Identifier: Apache-2.0
4 |
--------------------------------------------------------------------------------
/pico/modules/common/main.tf:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | resource "null_resource" "generate_uefi_boot_image" {
6 | provisioner "local-exec" {
7 | command = < /dev/null
72 |
73 | # Clean up
74 | case "$OSTYPE" in
75 | darwin*)
76 | hdiutil detach ${path.module}/mnt ;;
77 | *)
78 | sudo umount ${path.module}/mnt ;;
79 | esac
80 | rmdir ${path.module}/mnt
81 | sudo losetup -d $${loop_device}
82 | EOT
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/pico/modules/common/terraform.tf:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | terraform {
6 | required_version = ">= 1.9.5"
7 |
8 | required_providers {
9 | kubectl = {
10 | source = "gavinbunney/kubectl"
11 | version = "1.19.0"
12 | }
13 |
14 | null = {
15 | source = "hashicorp/null"
16 | version = "~> 3.2.3"
17 | }
18 |
19 | random = {
20 | source = "hashicorp/random"
21 | version = "~> 3.7.1"
22 | }
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/pico/modules/common/variables.tf:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | variable "tinkerbell_nginx_domain" {
6 | description = "The domain of the Tinkerbell Nginx server"
7 | type = string
8 | }
9 |
10 | variable "boot_image_name" {
11 | description = "The name of the boot image file to be generated."
12 | type = string
13 | }
14 |
--------------------------------------------------------------------------------
/pico/modules/pico-vm-kubevirt/.terraform.lock.hcl:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | # This file is maintained automatically by "terraform init".
6 | # Manual edits may be lost in future updates.
7 |
8 | provider "registry.terraform.io/gavinbunney/kubectl" {
9 | version = "1.19.0"
10 | constraints = "~> 1.19.0"
11 | hashes = [
12 | "h1:9QkxPjp0x5FZFfJbE+B7hBOoads9gmdfj9aYu5N4Sfc=",
13 | "zh:1dec8766336ac5b00b3d8f62e3fff6390f5f60699c9299920fc9861a76f00c71",
14 | "zh:43f101b56b58d7fead6a511728b4e09f7c41dc2e3963f59cf1c146c4767c6cb7",
15 | "zh:4c4fbaa44f60e722f25cc05ee11dfaec282893c5c0ffa27bc88c382dbfbaa35c",
16 | "zh:51dd23238b7b677b8a1abbfcc7deec53ffa5ec79e58e3b54d6be334d3d01bc0e",
17 | "zh:5afc2ebc75b9d708730dbabdc8f94dd559d7f2fc5a31c5101358bd8d016916ba",
18 | "zh:6be6e72d4663776390a82a37e34f7359f726d0120df622f4a2b46619338a168e",
19 | "zh:72642d5fcf1e3febb6e5d4ae7b592bb9ff3cb220af041dbda893588e4bf30c0c",
20 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
21 | "zh:a1da03e3239867b35812ee031a1060fed6e8d8e458e2eaca48b5dd51b35f56f7",
22 | "zh:b98b6a6728fe277fcd133bdfa7237bd733eae233f09653523f14460f608f8ba2",
23 | "zh:bb8b071d0437f4767695c6158a3cb70df9f52e377c67019971d888b99147511f",
24 | "zh:dc89ce4b63bfef708ec29c17e85ad0232a1794336dc54dd88c3ba0b77e764f71",
25 | "zh:dd7dd18f1f8218c6cd19592288fde32dccc743cde05b9feeb2883f37c2ff4b4e",
26 | "zh:ec4bd5ab3872dedb39fe528319b4bba609306e12ee90971495f109e142d66310",
27 | "zh:f610ead42f724c82f5463e0e71fa735a11ffb6101880665d93f48b4a67b9ad82",
28 | ]
29 | }
30 |
31 | provider "registry.terraform.io/hashicorp/null" {
32 | version = "3.2.4"
33 | constraints = "~> 3.2.3"
34 | hashes = [
35 | "h1:hkf5w5B6q8e2A42ND2CjAvgvSN3puAosDmOJb3zCVQM=",
36 | "zh:59f6b52ab4ff35739647f9509ee6d93d7c032985d9f8c6237d1f8a59471bbbe2",
37 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
38 | "zh:795c897119ff082133150121d39ff26cb5f89a730a2c8c26f3a9c1abf81a9c43",
39 | "zh:7b9c7b16f118fbc2b05a983817b8ce2f86df125857966ad356353baf4bff5c0a",
40 | "zh:85e33ab43e0e1726e5f97a874b8e24820b6565ff8076523cc2922ba671492991",
41 | "zh:9d32ac3619cfc93eb3c4f423492a8e0f79db05fec58e449dee9b2d5873d5f69f",
42 | "zh:9e15c3c9dd8e0d1e3731841d44c34571b6c97f5b95e8296a45318b94e5287a6e",
43 | "zh:b4c2ab35d1b7696c30b64bf2c0f3a62329107bd1a9121ce70683dec58af19615",
44 | "zh:c43723e8cc65bcdf5e0c92581dcbbdcbdcf18b8d2037406a5f2033b1e22de442",
45 | "zh:ceb5495d9c31bfb299d246ab333f08c7fb0d67a4f82681fbf47f2a21c3e11ab5",
46 | "zh:e171026b3659305c558d9804062762d168f50ba02b88b231d20ec99578a6233f",
47 | "zh:ed0fe2acdb61330b01841fa790be00ec6beaac91d41f311fb8254f74eb6a711f",
48 | ]
49 | }
50 |
51 | provider "registry.terraform.io/hashicorp/random" {
52 | version = "3.7.2"
53 | constraints = "~> 3.7.1"
54 | hashes = [
55 | "h1:356j/3XnXEKr9nyicLUufzoF4Yr6hRy481KIxRVpK0c=",
56 | "zh:14829603a32e4bc4d05062f059e545a91e27ff033756b48afbae6b3c835f508f",
57 | "zh:1527fb07d9fea400d70e9e6eb4a2b918d5060d604749b6f1c361518e7da546dc",
58 | "zh:1e86bcd7ebec85ba336b423ba1db046aeaa3c0e5f921039b3f1a6fc2f978feab",
59 | "zh:24536dec8bde66753f4b4030b8f3ef43c196d69cccbea1c382d01b222478c7a3",
60 | "zh:29f1786486759fad9b0ce4fdfbbfece9343ad47cd50119045075e05afe49d212",
61 | "zh:4d701e978c2dd8604ba1ce962b047607701e65c078cb22e97171513e9e57491f",
62 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
63 | "zh:7b8434212eef0f8c83f5a90c6d76feaf850f6502b61b53c329e85b3b281cba34",
64 | "zh:ac8a23c212258b7976e1621275e3af7099e7e4a3d4478cf8d5d2a27f3bc3e967",
65 | "zh:b516ca74431f3df4c6cf90ddcdb4042c626e026317a33c53f0b445a3d93b720d",
66 | "zh:dc76e4326aec2490c1600d6871a95e78f9050f9ce427c71707ea412a2f2f1a62",
67 | "zh:eac7b63e86c749c7d48f527671c7aee5b4e26c10be6ad7232d6860167f99dbb0",
68 | ]
69 | }
70 |
--------------------------------------------------------------------------------
/pico/modules/pico-vm-kubevirt/main.tf:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | resource "random_integer" "vm_name_suffix" {
6 | min = 1000
7 | max = 100000
8 | }
9 |
10 | locals {
11 | full_vm_name = "${var.vm_name}-${random_integer.vm_name_suffix.result}"
12 | boot_image_name = "${local.full_vm_name}-uefi-boot.img"
13 | }
14 |
15 | module "common" {
16 | source = "../common"
17 | boot_image_name = local.boot_image_name
18 | tinkerbell_nginx_domain = var.tinkerbell_nginx_domain
19 | }
20 |
21 | resource "null_resource" "upload_uefi_boot_image" {
22 | depends_on = [
23 | module.common
24 | ]
25 |
26 | provisioner "local-exec" {
27 | command = <
2 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 | ${smbios_product}
28 | VEN V1
29 | ${smbios_serial}
30 | ${vm_uuid}
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
--------------------------------------------------------------------------------
/pico/modules/pico-vm-libvirt/customize_domain.xsl.tftpl.license:
--------------------------------------------------------------------------------
1 | SPDX-FileCopyrightText: 2025 Intel Corporation
2 |
3 | SPDX-License-Identifier: Apache-2.0
4 |
--------------------------------------------------------------------------------
/pico/modules/pico-vm-libvirt/main.tf:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | locals {
6 | boot_image_name = "${var.vm_name}-uefi-boot.img"
7 | vm_uuid = length(var.smbios_uuid) > 0 ? var.smbios_uuid : random_uuid.vm_uuid.result
8 | vm_serial = length(var.smbios_serial) > 0 ? var.smbios_serial : upper(random_id.vm_serial.hex)
9 | }
10 |
11 | resource "random_uuid" "vm_uuid" {}
12 |
13 | resource "random_id" "vm_serial" {
14 | byte_length = 5
15 | }
16 |
17 | module "common" {
18 | source = "../common"
19 | boot_image_name = local.boot_image_name
20 | tinkerbell_nginx_domain = var.tinkerbell_nginx_domain
21 | }
22 |
23 | # Ensure default storage pool exists before provisioning
24 | resource "null_resource" "ensure_libvirt_pool" {
25 | provisioner "local-exec" {
26 | command = <<-EOT
27 | set -e
28 | if ! virsh pool-info ${var.libvirt_pool_name} > /dev/null 2>&1; then
29 | echo "Creating libvirt pool: ${var.libvirt_pool_name}"
30 | virsh pool-define-as ${var.libvirt_pool_name} dir --target /var/lib/libvirt/images
31 | virsh pool-build ${var.libvirt_pool_name}
32 | virsh pool-start ${var.libvirt_pool_name}
33 | virsh pool-autostart ${var.libvirt_pool_name}
34 | else
35 | echo "Libvirt pool ${var.libvirt_pool_name} already exists."
36 | fi
37 | EOT
38 | }
39 | }
40 |
41 | resource "libvirt_volume" "uefi_boot_image" {
42 | depends_on = [null_resource.ensure_libvirt_pool, module.common]
43 | name = "${var.vm_name}-vol"
44 | pool = var.libvirt_pool_name
45 | source = "../common/output/${local.boot_image_name}"
46 | format = "raw"
47 | }
48 |
49 | resource "libvirt_domain" "node_vm" {
50 | name = var.vm_name
51 | memory = var.memory
52 | vcpu = var.cpu_cores
53 | running = false
54 |
55 | firmware = var.libvirt_firmware
56 |
57 | cpu {
58 | mode = "host-model" # Use host-model to match the host CPU as closely as possible
59 | }
60 |
61 | disk {
62 | volume_id = libvirt_volume.uefi_boot_image.id
63 | }
64 |
65 | network_interface {
66 | network_name = var.libvirt_network_name
67 | }
68 |
69 | graphics {
70 | type = "vnc"
71 | }
72 |
73 | boot_device {
74 | dev = ["hd"]
75 | }
76 |
77 | tpm {
78 | model = var.tpm_enable ? "tpm-tis" : ""
79 | }
80 |
81 | xml {
82 | xslt = templatefile("${path.module}/customize_domain.xsl.tftpl", {
83 | smbios_product = var.smbios_product,
84 | smbios_serial = local.vm_serial,
85 | vm_name = var.vm_name,
86 | vm_uuid = local.vm_uuid,
87 | vm_console = var.vm_console
88 | })
89 | }
90 | }
91 |
92 | resource "null_resource" "update_libvirtvm_and_restart" {
93 | provisioner "local-exec" {
94 | command = <<-EOT
95 | # Resize the boot disk
96 | virsh vol-resize ${libvirt_volume.uefi_boot_image.name} --pool ${libvirt_volume.uefi_boot_image.pool} ${var.disk_size}G
97 |
98 | # Start the VM
99 | virsh start "${libvirt_domain.node_vm.name}"
100 | EOT
101 | }
102 | }
103 |
--------------------------------------------------------------------------------
/pico/modules/pico-vm-libvirt/outputs.tf:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | output "vm_name_and_serial" {
6 | description = "VM name and serial number associated with the node"
7 | value = {
8 | name = libvirt_domain.node_vm.name
9 | serial = local.vm_serial
10 | uuid = local.vm_uuid
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/pico/modules/pico-vm-libvirt/terraform.tf:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | terraform {
6 | required_version = ">= 1.9.5"
7 |
8 | required_providers {
9 |
10 | libvirt = {
11 | source = "dmacvicar/libvirt"
12 | version = "~> 0.8.3"
13 | }
14 |
15 | null = {
16 | source = "hashicorp/null"
17 | version = "~> 3.2.3"
18 | }
19 |
20 | random = {
21 | source = "hashicorp/random"
22 | version = "~> 3.7.1"
23 | }
24 | }
25 | }
26 |
27 |
28 |
--------------------------------------------------------------------------------
/pico/modules/pico-vm-libvirt/variables.tf:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | variable "cpu_cores" {
6 | description = "Number of CPU cores for the VM"
7 | type = number
8 | default = 8
9 | }
10 |
11 | variable "memory" {
12 | description = "Dedicated memory for the VM in MB"
13 | type = number
14 | default = 8192
15 | }
16 |
17 | variable "disk_size" {
18 | description = "Disk size for the VM in GB"
19 | type = number
20 | default = 128
21 | }
22 |
23 | variable "smbios_serial" {
24 | description = "List of serial numbers for VMs"
25 | type = string
26 | default = ""
27 | }
28 |
29 | variable "smbios_uuid" {
30 | description = "SMBIOS UUID for the VM. If blank, it will be auto-generated."
31 | type = string
32 | default = ""
33 | }
34 |
35 | variable "smbios_product" {
36 | description = "SMBIOS product name for the VM"
37 | type = string
38 | default = "Pico Node"
39 | }
40 |
41 | variable "vm_name" {
42 | description = "Name of the virtual machine"
43 | type = string
44 | default = "pico-node-libvirt"
45 | }
46 |
47 | variable "vm_console" {
48 | description = "Enable Console port access or logs save to file i.e pty or file"
49 | type = string
50 | default = "pty"
51 | validation {
52 | condition = contains(["pty", "file"], var.vm_console)
53 | error_message = "The vm_console variable must be either 'pty' or 'file'."
54 | }
55 | }
56 |
57 | variable "tinkerbell_nginx_domain" {
58 | description = "The domain of the Tinkerbell Nginx server"
59 | type = string
60 | }
61 |
62 | variable "tpm_enable" {
63 | description = "Enable TPM for the VM"
64 | type = bool
65 | default = true
66 | }
67 |
68 | variable "libvirt_pool_name" {
69 | description = "The name of the libvirt storage pool"
70 | type = string
71 | default = "default"
72 | }
73 |
74 | variable "libvirt_network_name" {
75 | description = "The name of the libvirt network"
76 | type = string
77 | default = "default"
78 | }
79 |
80 | variable "libvirt_firmware" {
81 | description = "The firmware to use for the VM"
82 | type = string
83 | default = "/usr/share/OVMF/OVMF_CODE.fd"
84 | }
85 |
--------------------------------------------------------------------------------
/pico/modules/pico-vm-proxmox/.terraform.lock.hcl:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | # This file is maintained automatically by "terraform init".
6 | # Manual edits may be lost in future updates.
7 |
8 | provider "registry.terraform.io/bpg/proxmox" {
9 | version = "0.73.2"
10 | constraints = "~> 0.73.2"
11 | hashes = [
12 | "h1:P3TxLxAqcPoHdJ+ea8RQRIRzbB51fPMq4ZY6v0RVz/o=",
13 | "zh:134f2094eee81a810ce5ee1ffde44dd3dd8285497b7c8083b58bb459735d3763",
14 | "zh:2fc437f4c72353d28e27650529f0d52a8af42c3fa1d990cf9298caba22935907",
15 | "zh:33d703ad335676bb426344cfb0c9b8883ad3f565a2108f76a0ef103204e0ad5b",
16 | "zh:50f226aef62ff661dba5c317ffbc17c86c8ba82356c1752669c557ff7ff47c31",
17 | "zh:5ba3260e05e2ec00d1fadd6096f54d5d9c6f5fb7205d6da521970c48635d9633",
18 | "zh:6cddb69c4f3d499f06a42a21d7a4ced624ab69f772c6ab633aa27f5bc9187f6f",
19 | "zh:7aa32f333ac43d783d24d6d6a6b33cd81a657f9f15c272e8a2dc191701b5e346",
20 | "zh:895c92353ec1fcd87a1331a1ed34fc94327027cee3c19fbbacd7e9fbf348c5d1",
21 | "zh:8db04a53ded97607cf6dc47a7a5e84427839e6e708643f5dc80a1105ba2bf673",
22 | "zh:8fc0319b68099153394f90b6d65b63ade774fe86dd93140e37dfb08abf0b54aa",
23 | "zh:977774a06e5adbe958adb237c90bc54b9011b782a8b2d346d4a4f3bd5cd1189d",
24 | "zh:dbb17f2e30eee02e6aee9b753b0a45479d1775db72d595643077899ae2a56dd2",
25 | "zh:e12585f3fa493a2df4752b5d94949c35ea4fce5582b54b4b4f374e17b7f3a13a",
26 | "zh:f26e0763dbe6a6b2195c94b44696f2110f7f55433dc142839be16b9697fa5597",
27 | "zh:fb3bc137b2b7b98e88957fb32b568079d51f8ef5037b25ea9f8f981f5a1e5547",
28 | ]
29 | }
30 |
31 | provider "registry.terraform.io/hashicorp/null" {
32 | version = "3.2.4"
33 | constraints = "~> 3.2.3"
34 | hashes = [
35 | "h1:hkf5w5B6q8e2A42ND2CjAvgvSN3puAosDmOJb3zCVQM=",
36 | "zh:59f6b52ab4ff35739647f9509ee6d93d7c032985d9f8c6237d1f8a59471bbbe2",
37 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
38 | "zh:795c897119ff082133150121d39ff26cb5f89a730a2c8c26f3a9c1abf81a9c43",
39 | "zh:7b9c7b16f118fbc2b05a983817b8ce2f86df125857966ad356353baf4bff5c0a",
40 | "zh:85e33ab43e0e1726e5f97a874b8e24820b6565ff8076523cc2922ba671492991",
41 | "zh:9d32ac3619cfc93eb3c4f423492a8e0f79db05fec58e449dee9b2d5873d5f69f",
42 | "zh:9e15c3c9dd8e0d1e3731841d44c34571b6c97f5b95e8296a45318b94e5287a6e",
43 | "zh:b4c2ab35d1b7696c30b64bf2c0f3a62329107bd1a9121ce70683dec58af19615",
44 | "zh:c43723e8cc65bcdf5e0c92581dcbbdcbdcf18b8d2037406a5f2033b1e22de442",
45 | "zh:ceb5495d9c31bfb299d246ab333f08c7fb0d67a4f82681fbf47f2a21c3e11ab5",
46 | "zh:e171026b3659305c558d9804062762d168f50ba02b88b231d20ec99578a6233f",
47 | "zh:ed0fe2acdb61330b01841fa790be00ec6beaac91d41f311fb8254f74eb6a711f",
48 | ]
49 | }
50 |
51 | provider "registry.terraform.io/hashicorp/random" {
52 | version = "3.7.2"
53 | constraints = "~> 3.7.1"
54 | hashes = [
55 | "h1:356j/3XnXEKr9nyicLUufzoF4Yr6hRy481KIxRVpK0c=",
56 | "zh:14829603a32e4bc4d05062f059e545a91e27ff033756b48afbae6b3c835f508f",
57 | "zh:1527fb07d9fea400d70e9e6eb4a2b918d5060d604749b6f1c361518e7da546dc",
58 | "zh:1e86bcd7ebec85ba336b423ba1db046aeaa3c0e5f921039b3f1a6fc2f978feab",
59 | "zh:24536dec8bde66753f4b4030b8f3ef43c196d69cccbea1c382d01b222478c7a3",
60 | "zh:29f1786486759fad9b0ce4fdfbbfece9343ad47cd50119045075e05afe49d212",
61 | "zh:4d701e978c2dd8604ba1ce962b047607701e65c078cb22e97171513e9e57491f",
62 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
63 | "zh:7b8434212eef0f8c83f5a90c6d76feaf850f6502b61b53c329e85b3b281cba34",
64 | "zh:ac8a23c212258b7976e1621275e3af7099e7e4a3d4478cf8d5d2a27f3bc3e967",
65 | "zh:b516ca74431f3df4c6cf90ddcdb4042c626e026317a33c53f0b445a3d93b720d",
66 | "zh:dc76e4326aec2490c1600d6871a95e78f9050f9ce427c71707ea412a2f2f1a62",
67 | "zh:eac7b63e86c749c7d48f527671c7aee5b4e26c10be6ad7232d6860167f99dbb0",
68 | ]
69 | }
70 |
--------------------------------------------------------------------------------
/pico/modules/pico-vm-proxmox/main.tf:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | resource "random_integer" "vm_name_suffix" {
6 | min = 1000
7 | max = 100000
8 | }
9 |
10 | locals {
11 | full_vm_name = "${var.vm_name}-${random_integer.vm_name_suffix.result}"
12 | boot_image_name = "${local.full_vm_name}-uefi-boot.img"
13 | }
14 |
15 | module "common" {
16 | source = "../common"
17 | boot_image_name = local.boot_image_name
18 | tinkerbell_nginx_domain = var.tinkerbell_nginx_domain
19 | }
20 |
21 | resource "proxmox_virtual_environment_file" "upload_uefi_boot_image" {
22 | depends_on = [module.common]
23 | content_type = "iso"
24 | datastore_id = var.datastore_id
25 | node_name = var.proxmox_node_name
26 |
27 | source_file {
28 | path = "../common/output/${local.boot_image_name}"
29 | }
30 | }
31 |
32 | resource "proxmox_virtual_environment_vm" "node_vm" {
33 | depends_on = [
34 | random_integer.vm_name_suffix,
35 | proxmox_virtual_environment_file.upload_uefi_boot_image,
36 | ]
37 |
38 | node_name = var.proxmox_node_name
39 |
40 | name = local.full_vm_name
41 | description = var.vm_description
42 | tags = var.vm_tags
43 | agent {
44 | enabled = false
45 | }
46 | stop_on_destroy = true
47 | startup {
48 | up_delay = var.vm_startup.up_delay
49 | down_delay = var.vm_startup.down_delay
50 | }
51 |
52 | bios = "ovmf"
53 | smbios {
54 | serial = var.smbios_serial
55 | uuid = var.smbios_uuid
56 | product = var.smbios_product
57 | }
58 | operating_system {
59 | type = var.vm_operating_type
60 | }
61 |
62 | vga {
63 | type = var.vga_display_type
64 | }
65 |
66 | cpu {
67 | cores = var.cpu_cores
68 | type = var.cpu_type
69 | }
70 |
71 | memory {
72 | dedicated = var.memory_dedicated
73 | floating = var.memory_minimum
74 | }
75 |
76 | scsi_hardware = var.scsi_hardware
77 |
78 | disk {
79 | datastore_id = var.vm_datastore_id
80 | file_id = proxmox_virtual_environment_file.upload_uefi_boot_image.id
81 | interface = var.disk_interface
82 | size = var.disk_size
83 | aio = var.disk_aio
84 | cache = var.disk_cache_type
85 | iothread = var.disk_iothread
86 | backup = var.disk_backup
87 | replicate = var.disk_replicate
88 | }
89 |
90 | efi_disk {
91 | datastore_id = var.vm_datastore_id
92 | type = "4m"
93 | pre_enrolled_keys = false
94 | }
95 |
96 | boot_order = var.boot_order
97 |
98 | network_device {
99 | bridge = var.network_bridge
100 | enabled = true
101 | model = var.network_model
102 | vlan_id = var.network_vlan_id
103 | }
104 |
105 | dynamic "tpm_state" {
106 | for_each = var.tpm_enable ? [1] : []
107 |
108 | content {
109 | datastore_id = var.vm_datastore_id
110 | version = var.tpm_version
111 | }
112 | }
113 |
114 | kvm_arguments = "-chardev file,id=char0,path=/tmp/serial.${var.vm_name}.log -serial chardev:char0"
115 | }
116 |
--------------------------------------------------------------------------------
/pico/modules/pico-vm-proxmox/outputs.tf:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | output "vm_name" {
6 | value = proxmox_virtual_environment_vm.node_vm.name
7 | }
8 |
9 | output "vm_id" {
10 | value = proxmox_virtual_environment_vm.node_vm.id
11 | }
12 |
13 | output "vm_serial" {
14 | value = proxmox_virtual_environment_vm.node_vm.smbios[0].serial
15 | }
16 |
17 | output "vm_uuid" {
18 | value = proxmox_virtual_environment_vm.node_vm.smbios[0].uuid
19 | }
20 |
21 | output "tinkerbell_nginx_domain" {
22 | value = var.tinkerbell_nginx_domain
23 | }
24 |
--------------------------------------------------------------------------------
/pico/modules/pico-vm-proxmox/terraform.tf:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: 2025 Intel Corporation
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | terraform {
6 | required_version = ">= 1.9.5"
7 |
8 | required_providers {
9 | proxmox = {
10 | source = "bpg/proxmox"
11 | version = "~> 0.73.2"
12 | }
13 |
14 | null = {
15 | source = "hashicorp/null"
16 | version = "~> 3.2.3"
17 | }
18 |
19 | random = {
20 | source = "hashicorp/random"
21 | version = "~> 3.7.1"
22 | }
23 | }
24 | }
25 |
26 | provider "proxmox" {
27 | endpoint = var.proxmox_endpoint
28 | username = var.proxmox_username
29 | password = var.proxmox_password
30 | insecure = var.proxmox_insecure
31 |
32 | random_vm_id_start = var.proxmox_random_vm_id_start
33 | random_vm_id_end = var.proxmox_random_vm_id_end
34 | random_vm_ids = var.proxmox_random_vm_ids
35 |
36 | ssh {
37 | agent = true
38 | dynamic "node" {
39 | for_each = var.proxmox_endpoint_ssh != "" ? [1] : []
40 | content {
41 | name = var.proxmox_node_name
42 | address = var.proxmox_endpoint_ssh
43 | }
44 | }
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/pico/static/node_details.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/pico/static/node_details.png
--------------------------------------------------------------------------------
/pico/static/node_details.png.license:
--------------------------------------------------------------------------------
1 | SPDX-FileCopyrightText: 2025 Intel Corporation
2 |
3 | SPDX-License-Identifier: Apache-2.0
4 |
--------------------------------------------------------------------------------
/placeholder.txt:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 | Just an empty file for the checkmarx scan to pass. It requires a file to scan in order to pass
4 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | # lint yaml
5 | yamllint~=1.35.1
6 |
7 | # license check
8 | reuse~=5.0.2
9 |
--------------------------------------------------------------------------------
/tools/helmbuild.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
4 | # SPDX-License-Identifier: Apache-2.0
5 |
6 | # helmbuild.sh
7 | # build helm charts based on change folders
8 |
9 | set -eu -o pipefail
10 |
11 | echo "# helmbuild.sh, using git: $(git --version) #"
12 |
13 | # when not running under Jenkins, use current dir as workspace
14 | WORKSPACE=${WORKSPACE:-.}
15 |
16 | # Label to add Helm CI meta-data
17 | LABEL_REVISION=$(git rev-parse HEAD)
18 | LABEL_CREATED=$(date -u "+%Y-%m-%dT%H:%M:%SZ")
19 |
20 | # Get the changed file name from the latest commit and then get the root folder name.
21 | # shellcheck disable=SC1001
22 | changed_dirs=$(git show --pretty="" --name-only | xargs dirname \$\1 | cut -d "/" -f1 | sort | uniq)
23 |
24 | # Print lists of files that are changed/untracked
25 | if [ -z "$changed_dirs" ]
26 | then
27 | echo "# chart_version_check.sh - No changes, Success! #"
28 | exit 0
29 | fi
30 |
31 | for dir in ${changed_dirs};
32 | do
33 | if [ ! -f "$dir/Chart.yaml" ]; then
34 | continue
35 | fi
36 | echo "---------$dir-------------"
37 | echo "--download helm dependency"
38 | helm dep build "$dir"
39 | echo "--add annotations"
40 | yq eval -i ".annotations.revision = \"${LABEL_REVISION}\"" "$dir"/Chart.yaml
41 | yq eval -i ".annotations.created = \"${LABEL_CREATED}\"" "$dir"/Chart.yaml
42 | echo "--package helm"
43 | helm package "$dir"
44 | done
45 |
46 |
47 | echo "# helmbuild.sh Success! - all charts have updated packaged#"
48 | exit 0
49 |
50 |
--------------------------------------------------------------------------------
/tools/helmlint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # SPDX-FileCopyrightText: (C) 2018-present Open Networking Foundation
4 | # SPDX-License-Identifier: Apache-2.0
5 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
6 | # SPDX-License-Identifier: Apache-2.0
7 |
8 | # helmlint.sh
9 | # run `helm lint` on all helm charts that are found
10 |
11 | set +e -o pipefail
12 |
13 | # verify that we have helm installed
14 | command -v helm >/dev/null 2>&1 || { echo "helm not found, please install it" >&2; exit 1; }
15 |
16 | echo "# helmlint.sh, using helm version: $(helm version -c --short) #"
17 |
18 | # Collect success/failure, and list/types of failures
19 | fail_lint=0
20 | failed_lint=""
21 | failed_req=""
22 |
23 | # when not running under Jenkins, use current dir as workspace
24 | WORKSPACE=${WORKSPACE:-.}
25 |
26 | # cleanup repos if `clean` option passed as parameter
27 | if [ "$1" = "clean" ]
28 | then
29 | echo "Removing any downloaded charts"
30 | find "${WORKSPACE}" -type d -name 'charts' -exec rm -rf {} \;
31 | fi
32 |
33 | # now that $1 is checked, error on undefined vars
34 | set -u
35 |
36 | # loop on result of 'find -name Chart.yaml'
37 | while IFS= read -r -d '' chart
38 | do
39 | chartdir=$(dirname "${chart}")
40 |
41 | echo "Checking chart: $chartdir"
42 |
43 | # update dependencies (if any)
44 | helm dependency update "${chartdir}"
45 |
46 | # lint the chart (with values.yaml if it exists)
47 | if [ -f "${chartdir}/values.yaml" ]; then
48 | helm lint --strict --values "${chartdir}/values.yaml" "${chartdir}"
49 | else
50 | helm lint --strict "${chartdir}"
51 | fi
52 |
53 | rc=$?
54 | if [[ $rc != 0 ]]; then
55 | fail_lint=1
56 | failed_lint+="${chartdir} "
57 | fi
58 |
59 | # check that requirements are available if they're specified
60 | if [ -f "${chartdir}/requirements.yaml" ]
61 | then
62 | echo "Chart has requirements.yaml, checking availability"
63 | helm dependency update "${chartdir}"
64 | rc=$?
65 | if [[ $rc != 0 ]]; then
66 | fail_lint=1
67 | failed_req+="${chartdir} "
68 | fi
69 |
70 | # remove charts dir after checking for availability, as this chart might be
71 | # required by other charts in the next loop
72 | rm -rf "${chartdir}/charts"
73 | fi
74 |
75 | done < <(find "${WORKSPACE}" -name Chart.yaml -print0)
76 |
77 | if [[ $fail_lint != 0 ]]; then
78 | echo "# helmlint.sh Failure! #"
79 | echo "Charts that failed to lint: $failed_lint"
80 | echo "Charts with failures in requirements.yaml: $failed_req"
81 | exit 1
82 | fi
83 |
84 | echo "# helmlint.sh Success! - all charts linted and have valid requirements.yaml #"
85 |
86 | exit 0
87 |
88 |
--------------------------------------------------------------------------------
/tools/helmpush.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
4 | # SPDX-License-Identifier: Apache-2.0
5 |
6 | # helmpush.sh
7 | # search all packages with *.tgz name and then push to remote Helm server
8 |
9 | set -u -o pipefail
10 |
11 | echo "# helmpush.sh, using git: $(git --version) #"
12 |
13 | # when not running under Jenkins, use current dir as workspace
14 | WORKSPACE=${WORKSPACE:-.}
15 | HELM_CM_NAME=${HELM_CM_NAME:-oie}
16 |
17 | # Filter pakage with $name-$version.tgz, and version should be $major.$minor.$patch format
18 | pkg_list=$(find "${WORKSPACE}" -maxdepth 1 -type f -regex ".*tgz" | grep -E ".*[0-9]+\.[0-9]+\.[0-9]+\.tgz")
19 | if [ -z "$pkg_list" ];
20 | then
21 | echo "No Packages found, exit"
22 | exit 0
23 | fi
24 |
25 | for pkg in $pkg_list
26 | do
27 | echo "------$pkg------"
28 | helm cm-push "$pkg" "$HELM_CM_NAME"
29 | done
30 |
31 | echo "# helmpush.sh Success! - all charts have been pushed"
32 | exit 0
33 |
34 |
--------------------------------------------------------------------------------
/tools/requirements.txt:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | # lint yaml
5 | yamllint~=1.27.1
6 |
7 | # license check
8 | python-debian==0.1.44
9 | reuse~=1.0.0
10 |
--------------------------------------------------------------------------------
/vm-provisioning/Makefile:
--------------------------------------------------------------------------------
1 | # VM Provisioning project
2 |
3 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
4 | # SPDX-License-Identifier: Apache-2.0
5 |
6 | .DEFAULT_GOAL := help
7 | .PHONY: lint license help
8 |
9 | all: lint
10 |
11 | # Optionally include tool version checks, not used in Docker builds
12 | TOOL_VERSION_CHECK ?= 0
13 |
14 | ##### Variables #####
15 |
16 | # Project variables
17 | PROJECT_NAME := virtualedgenode
18 | BINARY_NAME := $(PROJECT_NAME)
19 |
20 | # Code versions, tags, and so on
21 | VERSION := $(shell cat VERSION)
22 | VERSION_MAJOR := $(shell cut -c 1 VERSION)
23 | IMG_NAME := ${PROJECT_NAME}
24 | IMG_VERSION ?= $(shell git branch --show-current)
25 | GIT_COMMIT ?= $(shell git rev-parse HEAD)
26 |
27 | # Yamllint variables
28 | YAML_FILES := $(shell find . -path './venv_virtualedgenode' -path './vendor' -prune -o -type f \( -name '*.yaml' -o -name '*.yml' \) -print )
29 | YAML_IGNORE := vendor, .github/workflows
30 |
31 | # Include shared makefile
32 | include ../common.mk
33 |
34 | ##### CI Targets #####
35 |
36 | lint: license shellcheck yamllint mdlint
37 | @$(MAKE) yaml-syntax-lint YAML_FILES="$(YAML_FILES)" YAML_IGNORE="$(YAML_IGNORE)"
38 | @$(MAKE) rx-yaml-input-validate
39 | dependency-check:
40 | chmod +x ./install_packages.sh
41 | ./install_packages.sh
42 | chmod -x ./install_packages.sh
43 |
44 | build:
45 | @# Help: Runs build stage
46 | @echo "---MAKEFILE BUILD---"
47 | echo $@
48 | @echo "---END MAKEFILE Build---"
49 |
50 | docker-build:
51 | @# Help: Runs docker-build stage
52 | @echo "---MAKEFILE BUILD---"
53 | echo $@
54 | @echo "---END MAKEFILE Build---"
55 |
56 | #### Sub-targets ####
57 |
58 | clean: common-clean # Clean generated files
59 | rm -rf ${OUT_DIR}
60 |
--------------------------------------------------------------------------------
/vm-provisioning/REUSE.toml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | version = 1
5 |
6 | [[annotations]]
7 | path = [
8 | "VERSION",
9 | "go.sum",
10 | "**.md",
11 | "placeholder.txt",
12 | "images/*",
13 | ".vagrant/*",
14 | "docs/*",
15 | "out/*",
16 | "virbr*",
17 | "tools/yaml_validator/__pycache__/*",
18 | ]
19 |
20 | precedence = "aggregate"
21 | SPDX-FileCopyrightText = "(C) 2025 Intel Corporation"
22 | SPDX-License-Identifier = "Apache-2.0"
23 |
--------------------------------------------------------------------------------
/vm-provisioning/VERSION:
--------------------------------------------------------------------------------
1 | 1.1.0-dev
2 |
--------------------------------------------------------------------------------
/vm-provisioning/ansible/calculate_max_vms.yml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | ---
5 | - name: Install specific packages, update apt cache, and verify applications
6 | hosts: all
7 | become: yes
8 | vars_files:
9 | - "{{ ansible_secret_file_path }}"
10 | tasks:
11 | - name: Copy the calculation script to the remote host
12 | copy:
13 | content: |
14 | #!/bin/bash
15 | # VM parameters
16 | VM_MEMORY=2048 # MB (2 GB)
17 | VM_CPUS=2 # Number of vCPUs assigned to each VM
18 | VM_DISK=45 # GB
19 | # Reservations for the host
20 | RESERVE_MEMORY_FOR_HOST=2048 # MB (2 GB)
21 | RESERVE_CPUS_FOR_HOST=2 # Number of CPU cores reserved for the host
22 | # Retrieve available resources
23 | TOTAL_CPUS=$(grep -c ^processor /proc/cpuinfo)
24 | TOTAL_RAM=$(free -m | awk '/^Mem:/{print $2}')
25 | FREE_DISK=$(df --output=avail --block-size=1G / | tail -n 1)
26 | # Calculate usable resources
27 | USABLE_CPUS=$((TOTAL_CPUS - RESERVE_CPUS_FOR_HOST))
28 | USABLE_RAM=$((TOTAL_RAM - RESERVE_MEMORY_FOR_HOST))
29 | # Calculate the maximum number of VMs
30 | MAX_VMS_CPU=$((USABLE_CPUS / VM_CPUS))
31 | MAX_VMS_RAM=$((USABLE_RAM / VM_MEMORY))
32 | MAX_VMS_DISK=$((FREE_DISK / VM_DISK))
33 | # Find the smallest value
34 | MAX_VMS=$((MAX_VMS_CPU < MAX_VMS_RAM ? MAX_VMS_CPU : MAX_VMS_RAM))
35 | MAX_VMS=$((MAX_VMS < MAX_VMS_DISK ? MAX_VMS : MAX_VMS_DISK))
36 | # Display the result
37 | echo "Total number of CPU cores (physical and virtual): $TOTAL_CPUS"
38 | echo "Available RAM (MB): $TOTAL_RAM"
39 | echo "Free Disk Space (GB): $FREE_DISK"
40 | echo "Usable CPU cores after reserving for host: $USABLE_CPUS"
41 | echo "Usable RAM (MB) after reserving for host: $USABLE_RAM"
42 | echo "Maximum VMs based on CPU: $MAX_VMS_CPU"
43 | echo "Maximum VMs based on RAM: $MAX_VMS_RAM"
44 | echo "Maximum VMs based on Disk: $MAX_VMS_DISK"
45 | echo "Overall Maximum Number of VMs: $MAX_VMS"
46 | dest: /tmp/calculate_max_vms.sh
47 | mode: '0755'
48 |
49 | - name: Execute the calculation script
50 | command: /tmp/calculate_max_vms.sh
51 | register: result
52 |
53 | - name: Display the results
54 | debug:
55 | msg: "{{ result.stdout_lines }}"
56 |
--------------------------------------------------------------------------------
/vm-provisioning/ansible/install_ansible.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 |
6 | # This script will install Ansible on an Ubuntu server.
7 |
8 | # Ensure the locale is set to UTF-8
9 | export LANG=C.UTF-8
10 | export LC_ALL=C.UTF-8
11 |
12 | # Update the system and install required packages
13 | echo "Updating the system and installing required packages..."
14 | sudo apt-get update
15 | sudo apt-get install -y software-properties-common
16 |
17 | # Add Ansible's official PPA (Personal Package Archive)
18 | echo "Adding Ansible's official PPA..."
19 | sudo apt-add-repository --yes --update ppa:ansible/ansible
20 |
21 | # Install Ansible
22 | echo "Installing Ansible..."
23 | sudo apt-get install -y ansible
24 |
25 | # Verify the installation
26 | ansible --version
27 |
28 | # Check if the locale is set to UTF-8
29 | locale
30 |
31 | echo "Ansible has been installed successfully and is ready to use."
32 |
--------------------------------------------------------------------------------
/vm-provisioning/ansible/install_vm_dependencies.yml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | ---
5 | - name: Rsync
6 | hosts: localhost
7 | become: yes
8 | vars_files:
9 | - "{{ ansible_secret_file_path }}"
10 | tasks:
11 | - name: Remove directory on remote hosts
12 | ansible.builtin.file:
13 | path: "{{ hostvars[item].copy_path }}"
14 | state: absent
15 | loop: "{{ groups['all'] }}"
16 | when:
17 | - hostvars[item].ansible_host is defined
18 | - hostvars[item].ansible_user is defined
19 | - hostvars[item].copy_path is defined
20 | - item != 'localhost'
21 | delegate_to: "{{ item }}"
22 |
23 | - name: Synchronize directory from source to target machine using rsync
24 | shell: rsync -avzP -e ssh {{ ansible_vm_deploy_scripts }} "{{ hostvars[item].ansible_user }}@{{ hostvars[item].ansible_host }}:{{ hostvars[item].copy_path }}"
25 | loop: "{{ groups['all'] }}"
26 | when:
27 | - hostvars[item].ansible_host is defined
28 | - hostvars[item].ansible_user is defined
29 | - hostvars[item].copy_path is defined
30 | - item != 'localhost'
31 | delegate_to: localhost
32 |
33 | - name: Install specific packages, update apt cache, and verify applications
34 | hosts: all
35 | become: no
36 | vars_files:
37 | - "{{ ansible_secret_file_path }}"
38 | tasks:
39 |
40 | - name: Check if install_packages is set to zero for each host
41 | debug:
42 | msg: "install_packages is set to zero on {{ inventory_hostname }}. VM dependencies will not be installed on this host."
43 | when: install_packages == 0
44 |
45 | - name: Register hosts where install_packages is not zero
46 | set_fact:
47 | install_packages_valid: true
48 | when: install_packages != 0
49 |
50 | - name: Fail if install_packages is zero for all hosts
51 | meta: end_play
52 | when: groups['all'] | map('extract', hostvars, 'install_packages_valid') | select('defined') | length == 0
53 | ignore_errors: false
54 |
55 | - name: Execute the installation script on the remote host and save the log
56 | become: no
57 | expect:
58 | command: "sh -c 'cd {{ copy_path }} && ./install_packages.sh | tee {{ copy_path }}/installation_log.txt'"
59 | responses:
60 | '\[sudo\] password for .*:': "{{ ansible_become_pass }}"
61 | timeout: "{{ ansible_timeout_for_install_vm_dependencies }}"
62 | register: create_vms_output
63 | when: install_packages > 0
64 |
65 | - name: Display the installation log directly from the remote host
66 | command: cat "{{ copy_path }}/installation_log.txt"
67 | register: installation_log
68 | changed_when: false
69 | when: install_packages > 0
70 |
71 | - name: Show the installation log
72 | debug:
73 | var: installation_log.stdout_lines
74 | when: install_packages > 0
75 |
76 | - name: Verify installations and display versions
77 | shell: dpkg -l | grep -E 'qemu|libvirt-daemon-system|ebtables|libguestfs-tools|libxslt-dev|libxml2-dev'
78 | register: installed_apps
79 | ignore_errors: yes
80 | when: install_packages > 0
81 |
82 | - name: Display the list of installed applications and their versions
83 | debug:
84 | msg: "{{ installed_apps.stdout_lines }}"
85 | when: install_packages > 0
86 |
--------------------------------------------------------------------------------
/vm-provisioning/ansible/inventory.yml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | ---
5 |
6 | all:
7 | vars:
8 | # add path before run ansible playbook e.g. ansible_vm_deploy_scripts: /home/intel/multivm_spawn_v7/
9 | # add path of all dependent file or folder space separated e.g. ansible_vm_deploy_scripts: /home/intel/folder1 /home/intel/folder2 /home/intel/file1 /home/intel/file2
10 | ansible_vm_deploy_scripts: ""
11 | ansible_secret_file_path: "" # add path before run ansible playbook e.g. /home/intel/ansible/secret.yml
12 | ansible_timeout_for_create_vm_script: 14400 # The value is provided in seconds
13 | ansible_timeout_for_install_vm_dependencies: 6000 # The value is provided in seconds
14 | hosts:
15 | host1:
16 | ansible_host: # add correct ip address e.g. 10.49.76.113
17 | ansible_user: # add correct ip user e.g. intel
18 | ansible_password: "{{ host1_sudo_password }}"
19 | ansible_become: yes
20 | ansible_become_pass: "{{ host1_sudo_password }}"
21 | ansible_become_method: sudo
22 | ansible_become_user: root
23 | copy_path: "/home/{{ ansible_user }}/ansible_scripts"
24 | number_of_vms: 0
25 | install_packages: 0 # non-sezo value for installing packages on remote host
26 | nio_flow: # true for NIO flow, false for IO_flow
27 | host2:
28 | ansible_host: # add correct ip address e.g. 10.49.76.157
29 | ansible_user: # add correct ip user e.g. intel
30 | ansible_password: "{{ host2_sudo_password }}"
31 | ansible_become: yes
32 | ansible_become_pass: "{{ host2_sudo_password }}"
33 | ansible_become_method: sudo
34 | ansible_become_user: root
35 | copy_path: "/home/{{ ansible_user }}/ansible_scripts"
36 | number_of_vms: 0
37 | install_packages: 0 # non-sezo value for installing packages on remote host
38 | nio_flow: # true for NIO flow, false for IO_flow
39 | host3:
40 | ansible_host: # add correct ip address e.g. 10.49.76.140
41 | ansible_user: # add correct ip user e.g. intel
42 | ansible_password: "{{ host3_sudo_password }}"
43 | ansible_become: yes
44 | ansible_become_pass: "{{ host3_sudo_password }}"
45 | ansible_become_method: sudo
46 | ansible_become_user: root
47 | copy_path: "/home/{{ ansible_user }}/ansible_scripts"
48 | number_of_vms: 0
49 | install_packages: 0 # non-sezo value for installing packages on remote host
50 | nio_flow: # true for NIO flow, false for IO_flow
51 | host4:
52 | ansible_host: # add correct ip address e.g. 10.49.76.159
53 | ansible_user: # add correct ip user e.g. intel
54 | ansible_password: "{{ host4_sudo_password }}"
55 | ansible_become: yes
56 | ansible_become_pass: "{{ host4_sudo_password }}"
57 | ansible_become_method: sudo
58 | ansible_become_user: root
59 | copy_path: "/home/{{ ansible_user }}/ansible_scripts"
60 | number_of_vms: 0
61 | install_packages: 0 # non-sezo value for installing packages on remote host
62 | nio_flow: # true for NIO flow, false for IO_flow
63 | host5:
64 | ansible_host: # add correct ip address e.g. 10.49.76.160
65 | ansible_user: # add correct ip user e.g. intel
66 | ansible_password: "{{ host5_sudo_password }}"
67 | ansible_become: yes
68 | ansible_become_pass: "{{ host5_sudo_password }}"
69 | ansible_become_method: sudo
70 | ansible_become_user: root
71 | copy_path: "/home/{{ ansible_user }}/ansible_scripts"
72 | number_of_vms: 0
73 | install_packages: 0 # non-sezo value for installing packages on remote host
74 | nio_flow: # true for NIO flow, false for IO_flow
75 |
--------------------------------------------------------------------------------
/vm-provisioning/ansible/secret.yml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | ---
5 |
6 | host1_sudo_password: "" # add sudo password for host1
7 | host2_sudo_password: "" # add sudo password for host2
8 | host3_sudo_password: "" # add sudo password for host3
9 | host4_sudo_password: "" # add sudo password for host4
10 | host5_sudo_password: "" # add sudo password for host5
11 |
12 | # IO Configurations
13 | ONBOARDING_USERNAME: "actual_onboard_user"
14 | ONBOARDING_PASSWORD: "actual_onboard_password"
15 |
16 | # NIO Configurations
17 | PROJECT_NAME: "your-project-name"
18 | PROJECT_API_USER: "actual_api_user"
19 | PROJECT_API_PASSWORD: "actual_api_password"
20 |
--------------------------------------------------------------------------------
/vm-provisioning/ansible/show_vms_data.yml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | ---
5 |
6 | - name: Run Show_vms_data script on Ansible controller
7 | hosts: localhost
8 | become: yes
9 | vars_files:
10 | - "{{ ansible_secret_file_path }}"
11 | tasks:
12 | - name: Gather list of servers
13 | set_fact:
14 | servers: >
15 | {%- set server_list = [] -%}
16 | {%- for host in groups['all'] -%}
17 | {%- set _ = server_list.append({'user': hostvars[host]['ansible_user'], 'host': hostvars[host]['ansible_host'], 'copy_path': hostvars[host]['copy_path']}) -%}
18 | {%- endfor -%}
19 | {{ server_list }}
20 |
21 | - name: Copy Show_vms_data script to controller
22 | copy:
23 | content: |
24 | #!/bin/bash
25 |
26 | # Define the SSH connections for each server
27 | servers=(
28 | {% for server in servers %}
29 | "{{ server.user }}@{{ server.host }} {{ server.copy_path }}"
30 | {% endfor %}
31 | )
32 |
33 | # Create the temporary log directory if it doesn't exist
34 | mkdir -p {{ playbook_dir }}/logs
35 |
36 | # Remove older log files before collecting new logs
37 | rm -f {{ playbook_dir }}/logs/logfile_*.log
38 | rm -f {{ playbook_dir }}/logs/merged_log.log
39 |
40 | # Start background jobs to collect logs
41 | for server in "${servers[@]}"; do
42 | user=$(echo $server | cut -d' ' -f1 | cut -d'@' -f1)
43 | hostname=$(echo $server | cut -d' ' -f1 | cut -d'@' -f2)
44 | copy_path=$(echo $server | cut -d' ' -f2)
45 | octet=$(echo $hostname | awk -F. '{print $4}')
46 | logFile="$copy_path/logs/master_log_$octet.log"
47 | tempLog="{{ playbook_dir }}/logs/logfile_$octet.log"
48 |
49 | ssh $user@$hostname "tail -f $logFile" > $tempLog &
50 | done
51 |
52 | # Continuously merge the collected logs into a single output
53 | while true; do
54 | cat {{ playbook_dir }}/logs/logfile_*.log > {{ playbook_dir }}/logs/merged_log.log
55 | sleep 1
56 | done
57 | dest: /tmp/Show_vms_data.sh
58 | mode: '0755'
59 |
60 | - name: Run Show_vms_data script
61 | shell: /tmp/Show_vms_data.sh
62 | async: 0
63 | poll: 0
64 | register: show_vms_data_job
65 |
66 | - name: Wait for Show_vms_data script to finish
67 | async_status:
68 | jid: "{{ show_vms_data_job.ansible_job_id }}"
69 | register: job_result
70 | until: job_result.finished
71 | retries: 30
72 | delay: 10
73 |
--------------------------------------------------------------------------------
/vm-provisioning/ansible/ssh_key_setup.yml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | ---
5 | - name: Set up SSH keys on localhost and copy them to remote hosts
6 | hosts: localhost
7 | gather_facts: yes
8 | vars_files:
9 | - "{{ ansible_secret_file_path }}"
10 | tasks:
11 | - name: Generate SSH key on localhost (if not exists)
12 | ansible.builtin.openssh_keypair:
13 | path: "~/.ssh/id_rsa"
14 | size: 2048
15 | state: present
16 | type: rsa
17 |
18 | - name: Install sshpass (required for ssh-copy-id with password)
19 | ansible.builtin.apt:
20 | name: sshpass
21 | state: present
22 | when: ansible_os_family == "Debian"
23 |
24 | - name: Display the results
25 | debug:
26 | msg: "{{ lookup('vars', item + '_sudo_password') }}"
27 | loop: "{{ groups['all'] }}"
28 |
29 | - name: Copy SSH key to remote hosts
30 | ansible.builtin.shell: |
31 | sshpass -p "{{ lookup('vars', item + '_sudo_password') }}" ssh-copy-id -i ~/.ssh/id_rsa.pub -o StrictHostKeyChecking=no "{{ hostvars[item].ansible_user }}"@"{{ hostvars[item].ansible_host }}"
32 | loop: "{{ groups['all'] }}"
33 | no_log: true
34 |
--------------------------------------------------------------------------------
/vm-provisioning/certs/Full_server.crt:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 | -----BEGIN CERTIFICATE-----
4 | YOUR CERTIFICATE
5 | -----END CERTIFICATE-----
6 |
--------------------------------------------------------------------------------
/vm-provisioning/config:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | # default value is set to 1
5 | # The default value is set to 1. If it is set to 0, the user has control over
6 | # the POOL_NAME and BRIDGE_NAME (Libvirt network interface).
7 | STANDALONE=1
8 |
9 | CLUSTER="kind.internal"
10 |
11 | # VM Resources
12 | RAM_SIZE=8192
13 | NO_OF_CPUS=4
14 | SDA_DISK_SIZE="110G"
15 | LIBVIRT_DRIVER="kvm"
16 |
17 | # Provisioned VM User credentials
18 | USERNAME="user"
19 | PASSWORD="user"
20 | CI_CONFIG="false"
21 |
22 | # Optional: Advance Settings
23 | INTF_NAME=""
24 | VM_NAME=""
25 | # The following configurations apply only if STANDALONE is set to 0
26 | # If STANDALONE is set to 1, POOL_NAME will be changed to 'default'
27 | POOL_NAME="default"
28 | # The name of the libvirt network interface
29 | BRIDGE_NAME=""
30 |
--------------------------------------------------------------------------------
/vm-provisioning/docs/02-ven-design.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/vm-provisioning/docs/02-ven-design.png
--------------------------------------------------------------------------------
/vm-provisioning/docs/03-NW-Diagram1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/vm-provisioning/docs/03-NW-Diagram1.png
--------------------------------------------------------------------------------
/vm-provisioning/docs/04-NW-Diagram2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/vm-provisioning/docs/04-NW-Diagram2.png
--------------------------------------------------------------------------------
/vm-provisioning/docs/Ansible-calculate-max-vms.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/vm-provisioning/docs/Ansible-calculate-max-vms.png
--------------------------------------------------------------------------------
/vm-provisioning/docs/Ansible-nio-flow-flag.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/vm-provisioning/docs/Ansible-nio-flow-flag.png
--------------------------------------------------------------------------------
/vm-provisioning/docs/Ansible-ssh-dir.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/vm-provisioning/docs/Ansible-ssh-dir.png
--------------------------------------------------------------------------------
/vm-provisioning/docs/Ansible-ssh-key-setup.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/vm-provisioning/docs/Ansible-ssh-key-setup.png
--------------------------------------------------------------------------------
/vm-provisioning/docs/Ansible-user-logs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/vm-provisioning/docs/Ansible-user-logs.png
--------------------------------------------------------------------------------
/vm-provisioning/docs/Microvisor_Provision.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/vm-provisioning/docs/Microvisor_Provision.png
--------------------------------------------------------------------------------
/vm-provisioning/docs/Security_feature.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/vm-provisioning/docs/Security_feature.png
--------------------------------------------------------------------------------
/vm-provisioning/docs/UbuntuOS_Provision.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/vm-provisioning/docs/UbuntuOS_Provision.png
--------------------------------------------------------------------------------
/vm-provisioning/docs/provider_config.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/vm-provisioning/docs/provider_config.png
--------------------------------------------------------------------------------
/vm-provisioning/images/arch_simplified.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/vm-provisioning/images/arch_simplified.jpg
--------------------------------------------------------------------------------
/vm-provisioning/images/kvm_check.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/vm-provisioning/images/kvm_check.png
--------------------------------------------------------------------------------
/vm-provisioning/images/rate_limit_argo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/vm-provisioning/images/rate_limit_argo.png
--------------------------------------------------------------------------------
/vm-provisioning/images/vm_arch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/open-edge-platform/virtual-edge-node/e14cf1632d88da9bb8fa3f499c4c025d7b634efd/vm-provisioning/images/vm_arch.png
--------------------------------------------------------------------------------
/vm-provisioning/requirements.txt:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | # YAML linter
5 | yamllint~=1.35.1
6 |
7 | # license checking
8 | python-debian==0.1.44
9 |
10 | # license check
11 | reuse~=5.0.2
12 |
13 | PyYAML>=5.3
14 |
15 |
--------------------------------------------------------------------------------
/vm-provisioning/scripts/ci_network_bridge.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | source "${PWD}/config"
6 |
7 | # Define the storage pool path
8 | STORAGE_POOL_PATH="/var/lib/libvirt/images/${POOL_NAME}"
9 |
10 | # Function to create network and storage pool
11 | create_resources() {
12 | # Create the network XML configuration
13 | cat < "${BRIDGE_NAME}.xml"
14 |
15 | ${BRIDGE_NAME}
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 | EOF
34 |
35 | # Define and start the new network
36 | virsh net-define "${BRIDGE_NAME}.xml"
37 | virsh net-start "${BRIDGE_NAME}"
38 | virsh net-autostart "${BRIDGE_NAME}"
39 |
40 | # Clean up the network XML file
41 | rm -f "${BRIDGE_NAME}.xml"
42 |
43 | # Create the storage pool directory
44 | sudo mkdir -p "${STORAGE_POOL_PATH}"
45 |
46 | # Create the storage pool XML configuration
47 | cat < "${POOL_NAME}.xml"
48 |
49 | ${POOL_NAME}
50 |
51 | ${STORAGE_POOL_PATH}
52 |
53 |
54 | EOF
55 |
56 | # Define, build, and start the storage pool
57 | virsh pool-define "${POOL_NAME}.xml"
58 | virsh pool-build "${POOL_NAME}"
59 | virsh pool-start "${POOL_NAME}"
60 | virsh pool-autostart "${POOL_NAME}"
61 |
62 | # Clean up the storage pool XML file
63 | rm -f "${POOL_NAME}.xml"
64 |
65 | # List all networks and storage pools
66 | virsh net-list --all
67 | virsh pool-list --all
68 |
69 | echo "Network '${BRIDGE_NAME}' and storage pool '${POOL_NAME}' created and started successfully."
70 | }
71 |
72 | # Function to destroy network and storage pool
73 | destroy_resources() {
74 | # Destroy and undefine the network
75 | virsh net-destroy "${BRIDGE_NAME}" 2>/dev/null
76 | virsh net-undefine "${BRIDGE_NAME}" 2>/dev/null
77 |
78 | # Destroy and undefine the storage pool
79 | virsh pool-destroy "${POOL_NAME}" 2>/dev/null
80 | virsh pool-undefine "${POOL_NAME}" 2>/dev/null
81 |
82 | # Remove the storage pool directory
83 | sudo rm -rf "${STORAGE_POOL_PATH}"
84 |
85 | # List all networks and storage pools
86 | virsh net-list --all
87 | virsh pool-list --all
88 |
89 | echo "Network '${BRIDGE_NAME}' and storage pool '${POOL_NAME}' destroyed successfully."
90 | }
91 |
92 | # Main script logic
93 | case "$1" in
94 | create)
95 | create_resources
96 | ;;
97 | destroy)
98 | destroy_resources
99 | ;;
100 | *)
101 | echo "Usage: $0 {create|destroy}"
102 | exit 1
103 | ;;
104 | esac
105 |
--------------------------------------------------------------------------------
/vm-provisioning/scripts/common_vars.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | # Assign value to common variables
6 | export network_xml_file="${PWD}/orch_network.xml"
7 | export BOOT_PATH="/var/lib/libvirt/boot"
8 | export BOOT_IMAGE="/var/lib/libvirt/images"
9 | export OVMF_PATH="/usr/share/OVMF"
10 | export log_file="out/logs/console.log"
--------------------------------------------------------------------------------
/vm-provisioning/scripts/create_new_user.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | # Function to create a new user
6 | create_user() {
7 | local username=$1
8 | local password=$2
9 |
10 | # Create the user with a home directory and bash shell
11 | sudo useradd -m -s /bin/bash "$username"
12 |
13 | # Set the user's password
14 | echo "$username:$password" | sudo chpasswd
15 |
16 | # Add the user to the specified groups
17 | sudo usermod -aG sudo,kvm,docker,libvirt "$username"
18 |
19 | # Verify the user's group membership
20 | groups "$username"
21 |
22 | echo "User $username has been created and added to the specified groups."
23 | }
24 |
25 | # Function to run a script independently using nohup and disown
26 | run_script_independently() {
27 | local username=$1
28 | local script_path=$2
29 |
30 | # Switch to the user's home directory
31 | sudo -u "$username" bash -c "cd ~ && nohup $script_path & disown"
32 | }
33 |
34 | # Main script
35 | read -r -p "Enter the username to create: " username
36 | read -r -sp "Enter the password for the new user: " password
37 | echo
38 |
39 | # Create the user
40 | create_user "$username" "$password"
41 | echo "User creation and setup complete."
42 |
--------------------------------------------------------------------------------
/vm-provisioning/scripts/destroy_vm.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | source "${PWD}/config"
6 | # Assign arguments to variables
7 | source "${PWD}/scripts/common_vars.sh"
8 | source "${PWD}/scripts/network_file_backup_restore.sh"
9 |
10 | # List all VMs and filter those starting with "vm-provisioning"
11 | VM_PREFIX="vm-provisioning"
12 | VM_LIST=$(virsh list --all --name | grep "^${VM_PREFIX}")
13 |
14 | # Check if any VMs were found
15 | if [ -z "$VM_LIST" ]; then
16 | echo "No VMs found with prefix '${VM_PREFIX}'."
17 | exit 0
18 | fi
19 | pkill -9 minicom || true
20 | # Iterate over each VM and delete it
21 | for vm_name in $VM_LIST; do
22 | nw_name=$(virsh domiflist "$vm_name" | sed -n '3p' | awk '{print $3}')
23 | nw_names+=("$nw_name")
24 | echo "Processing VM: $vm_name"
25 | # Destroy the VM if it is running
26 | if virsh list --name | grep -q "^${vm_name}$"; then
27 | echo "Destroying VM: $vm_name"
28 | virsh destroy "$vm_name"
29 | fi
30 | # Undefine the VM, including NVRAM if applicable
31 | echo "Undefining VM: $vm_name"
32 | virsh undefine "$vm_name" --nvram
33 | done
34 | if [ -n "$STANDALONE" ]; then
35 | echo "standalone mode $STANDALONE"
36 | restore_network_file "${nw_names[@]}"
37 | fi
38 | sudo rm -rf /tmp/console*.sock
39 | sudo find "${BOOT_IMAGE}"/ -name 'vm-provisioning*' -exec rm -rf {} +
40 | sudo ls -l "${BOOT_IMAGE}"/
41 | virsh list --all
42 | virsh net-list --all
43 |
--------------------------------------------------------------------------------
/vm-provisioning/scripts/host_status_check.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | set -eu
6 | # Load configuration
7 | source "${PWD}/config"
8 | source "${PWD}/scripts/nio_configs.sh"
9 |
10 | # Function to display usage information
11 | usage() {
12 | echo "Usage: $0 "
13 | echo "Check the status of hosts with the specified serial number."
14 | echo
15 | echo "Arguments:"
16 | echo " The serial number of the host to check."
17 | exit 1
18 | }
19 |
20 | # Check if serial number is provided
21 | EN_SERIAL_NO=$1
22 | if [ -z "$EN_SERIAL_NO" ]; then
23 | echo "ERROR: Serial number argument is required."
24 | usage
25 | fi
26 |
27 | # Obtain JWT token
28 | JWT_TOKEN=$(curl -s -k -X POST \
29 | "https://keycloak.${CLUSTER}/realms/master/protocol/openid-connect/token" \
30 | -H "Content-Type: application/x-www-form-urlencoded" \
31 | --data-urlencode "username=${PROJECT_API_USER}" \
32 | --data-urlencode "password=${PROJECT_API_PASSWORD}" \
33 | --data-urlencode "grant_type=password" \
34 | --data-urlencode "client_id=system-client" \
35 | --data-urlencode "scope=openid" \
36 | --fail-with-body | jq -r '.access_token')
37 |
38 | if [ -z "$JWT_TOKEN" ] || [ "$JWT_TOKEN" == "null" ]; then
39 | echo "Error: Failed to retrieve JWT token"
40 | exit 1
41 | else
42 | echo "JWT token retrieved successfully"
43 | fi
44 |
45 | # Function to check host status
46 | function host_status() {
47 | while true; do
48 | curl --noproxy "*" --location \
49 | "https://api.${CLUSTER}/v1/projects/${PROJECT_NAME}/compute/hosts" \
50 | -H 'Accept: application/json' -H "Authorization: Bearer $JWT_TOKEN" | jq '.' > host.json || true
51 | index_len=$(jq '.hosts[].uuid' host.json | wc -l)
52 | index_len=$((index_len - 1))
53 | rm -rf host-list;touch host-list
54 | for i in $(seq 0 $index_len); do
55 | if jq -r "[.hosts[]][${i}].serialNumber" host.json | grep -q "$EN_SERIAL_NO"; then
56 | host_id=$(jq -r "[.hosts[]][${i}].resourceId" host.json)
57 | instance_id=$(jq -r "[.hosts[]][${i}].instance.instanceID" host.json)
58 | get_guid=$(jq -r "[.hosts[]][${i}].uuid" host.json)
59 | sn_no=$(jq -r "[.hosts[]][${i}].serialNumber" host.json)
60 | host_status=$(jq -r "[.hosts[]][${i}].hostStatus" host.json)
61 | os_name=$(jq -r "[.hosts[]][${i}].instance.desiredOs.name" host.json)
62 | image_url=$(jq -r "[.hosts[]][${i}].instance.desiredOs.imageUrl" host.json)
63 | echo "$host_id,$os_name,$image_url,$instance_id,$sn_no,$get_guid,$host_status" >> host-list
64 | fi
65 | done
66 |
67 | host_running=$(grep -c "Running" host-list || true)
68 | total_host=$(wc -l < host-list)
69 |
70 | if [ "$host_running" -eq 0 ]; then
71 | echo "ERROR: No hosts with 'Running' status found for serial no $EN_SERIAL_NO."
72 | cat host-list
73 | else
74 | echo "Total No of onboarded hosts starting with serial no $EN_SERIAL_NO = $total_host"
75 | grep "Running" host-list || true
76 | grep -v "Running" host-list || true
77 | {
78 | echo "VEN_OS_NAME=$os_name"
79 | echo "VEN_IMAGE_URL=$image_url"
80 | echo "VEN_EN_SERIAL_NO=$sn_no"
81 | echo "VEN_EN_UUID=$get_guid"
82 | echo "VEN_EN_STATUS=$host_status"
83 | } > VEN_EN_INFO
84 | cat VEN_EN_INFO
85 | exit 0
86 | fi
87 | sleep 10
88 | done
89 | }
90 |
91 | # Execute the host status function
92 | host_status
93 |
--------------------------------------------------------------------------------
/vm-provisioning/scripts/io_configs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | # IO Flow Configurations
6 | # Check if ONBOARDING_USERNAME is set, otherwise prompt the user
7 | if [ -z "${ONBOARDING_USERNAME}" ]; then
8 | read -rp "Enter onboarding username: " ONBOARDING_USERNAME
9 | fi
10 |
11 | # Check if ONBOARDING_PASSWORD is set, otherwise prompt the user
12 | if [ -z "${ONBOARDING_PASSWORD}" ]; then
13 | read -rsp "Enter onboarding password: " ONBOARDING_PASSWORD
14 | echo
15 | fi
16 |
17 | # Export the variables for use in the script
18 | export USERNAME_HOOK="${ONBOARDING_USERNAME}"
19 | export PASSWORD_HOOK="${ONBOARDING_PASSWORD}"
20 |
--------------------------------------------------------------------------------
/vm-provisioning/scripts/network_file_backup_restore.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | #set -x
6 |
7 | # Assign arguments to variables
8 | source "${PWD}/config"
9 |
10 | function backup_network_file() {
11 | if [ -n "$BRIDGE_NAME" ]; then
12 | virsh net-list --all
13 | check_nw_int=$(sudo virsh net-list --all | awk '{print $1}' | grep -w "$BRIDGE_NAME")
14 | if [ -n "$check_nw_int" ]; then
15 | # Export the network configuration to an XML file
16 | virsh net-dumpxml "$BRIDGE_NAME" > "${BRIDGE_NAME}.xml"
17 | sudo cp "${BRIDGE_NAME}.xml" "${BRIDGE_NAME}".xml_bkp
18 | echo "Network file $BRIDGE_NAME copied to ${PWD}/${BRIDGE_NAME}.xml"
19 | # This variable is declared in create_vm.sh
20 | # shellcheck disable=SC2034
21 | network_xml_file="${PWD}/${BRIDGE_NAME}.xml"
22 | else
23 | echo "Network $BRIDGE_NAME does not exist, create the network with name $BRIDGE_NAME"
24 | exit 1
25 | fi
26 | fi
27 | }
28 |
29 | function restore_network_file() {
30 | if [ -n "$BRIDGE_NAME" ]; then
31 | sudo virsh net-destroy "$BRIDGE_NAME"
32 | sudo virsh net-undefine "$BRIDGE_NAME"
33 |
34 | sudo virsh net-define "${BRIDGE_NAME}".xml_bkp
35 | sudo virsh net-start "${BRIDGE_NAME}"
36 | sudo systemctl restart libvirtd
37 | sudo systemctl daemon-reload
38 | echo "Successfully reset the $BRIDGE_NAME with backup file"
39 | else
40 | # shellcheck disable=SC2119
41 | nw_names=("$@")
42 | for nw in "${nw_names[@]}"; do
43 | virsh net-destroy "$nw"
44 | virsh net-undefine "$nw"
45 | done
46 |
47 | fi
48 | }
49 |
50 |
--------------------------------------------------------------------------------
/vm-provisioning/scripts/nio_configs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | # # NIO Flow Configurations
6 |
7 | # Check if PROJECT_API_USER is set, otherwise prompt the user
8 | if [ -z "${PROJECT_API_USER}" ]; then
9 | read -rp "Enter Project API Username: " PROJECT_API_USER
10 | fi
11 |
12 | # Check if PROJECT_API_PASSWORD is set, otherwise prompt the user
13 | if [ -z "${PROJECT_API_PASSWORD}" ]; then
14 | read -rsp "Enter Project API Password: " PROJECT_API_PASSWORD
15 | echo
16 | fi
17 |
18 | # Prompt for PROJECT_NAME, use default if not provided
19 | if [ -z "${PROJECT_NAME}" ]; then
20 | read -rp "Enter Project Name (default: infra-proj-1): " PROJECT_NAME
21 | fi
22 |
23 | # Export the variables for use in the script
24 | export PROJECT_API_USER="${PROJECT_API_USER}"
25 | export PROJECT_API_PASSWORD="${PROJECT_API_PASSWORD}"
26 | export PROJECT_NAME="${PROJECT_NAME}"
--------------------------------------------------------------------------------
/vm-provisioning/scripts/nio_flow_host_config.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | source "${PWD}/scripts/nio_flow_validation.sh"
6 | source "${PWD}/scripts/common_vars.sh"
7 |
8 | cluster_fqdn=$CLUSTER
9 | project_name=${PROJECT_NAME:-default_project_name}
10 | num_vms=$1
11 | # Initialize the counter
12 | count=0
13 |
14 | serial_number=$2
15 |
16 | echo "Checking for serial number in $log_file..."
17 |
18 | # Get jwt token
19 | JWT_TOKEN=$(get_jwt_token)
20 | if [ -z "$JWT_TOKEN" ]; then
21 | echo 'FAIL="ERROR: JWT Token is required!"' >> "${log_file}"
22 | fi
23 |
24 | function process_serial_number()
25 | {
26 | serial_number=$1
27 | host_data=$(curl -X POST -H 'Accept: application/json' -H "Authorization: Bearer ${JWT_TOKEN}" --data "{\"name\":\"${serial_number}\",\"serialNumber\":\"${serial_number}\",\"autoOnboard\": true}" --header "Content-Type: application/json" "https://api.${cluster_fqdn}/v1/projects/${project_name}/compute/hosts/register" --insecure)
28 |
29 | # echo "host_data: $host_data"
30 | host_status=$(echo "$host_data" | jq -r '.hostStatus')
31 | resource_id=$(echo "$host_data" | jq -r '.resourceId')
32 |
33 | if [ -z "$host_status" ]; then
34 | echo "INFO: Host is created with Resource ID: $resource_id" >> "${log_file}"
35 | else
36 | echo "FAIL=\"ERROR: $host_data\"" >> "${log_file}"
37 | fi
38 | }
39 |
40 | function validate_serial() {
41 | local serial="$1"
42 | if [[ ! "$serial" =~ ^[A-Za-z0-9]{5,20}$ ]]; then
43 | echo "Error: Invalid serial '$serial'. Must be 5-20 alphanumeric chars."
44 | return 1
45 | fi
46 | return 0
47 | }
48 |
49 | if [[ $serial_number ]]; then
50 | # Split into an array
51 | IFS=',' read -ra serial_array <<< "$serial_number"
52 |
53 | for serial in "${serial_array[@]}"; do
54 | validate_serial "$serial"
55 | done
56 |
57 | # Loop through each serial number
58 | for serial in "${serial_array[@]}"; do
59 | process_serial_number "$serial"
60 | done
61 | else
62 | # Check if the log file exists
63 | tail -f "$log_file" | while read -r line; do
64 | # Extract serial numbers from the line
65 | if [[ $line =~ serial=([^,]+), ]]; then
66 | serial_number="${BASH_REMATCH[1]}"
67 | count=$((count + 1))
68 | echo "seial number found #${count}: ${serial_number}"
69 | process_serial_number "$serial_number"
70 | if [ "$count" -eq "$num_vms" ]; then
71 | echo "serial number generated for all VMs"
72 | exit 0
73 | fi
74 | fi
75 | done
76 | fi
77 |
--------------------------------------------------------------------------------
/vm-provisioning/scripts/nio_flow_validation.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 | #set -x
5 | source ./config
6 | source "${PWD}/scripts/nio_configs.sh"
7 |
8 | cluster_fqdn=$CLUSTER
9 | project_name=${PROJECT_NAME:-default_project_name}
10 | api_user=${PROJECT_API_USER:-api_user}
11 | api_password=${PROJECT_API_PASSWORD:- default_api_password}
12 |
13 | function get_jwt_token() {
14 | JWT_TOKEN=$(curl --location --insecure --request POST "https://keycloak.${cluster_fqdn}/realms/master/protocol/openid-connect/token" \
15 | --header 'Content-Type: application/x-www-form-urlencoded' \
16 | --data-urlencode 'grant_type=password' \
17 | --data-urlencode 'client_id=system-client' \
18 | --data-urlencode "username=${api_user}" \
19 | --data-urlencode "password=${api_password}" \
20 | --data-urlencode 'scope=openid profile email groups' | jq -r '.access_token')
21 |
22 | if [ -z "$JWT_TOKEN" ] || [ "$JWT_TOKEN" == "null" ]; then
23 | echo "ERROR: Failed to obtain JWT Token"
24 | exit 1
25 | fi
26 |
27 | echo "$JWT_TOKEN"
28 | }
29 |
30 | function does_project_exist() {
31 | JWT_TOKEN=$(get_jwt_token)
32 | echo "JWT Token: ${JWT_TOKEN}"
33 | proj_name=$(curl -X GET -H 'Accept: application/json' -H "Authorization: Bearer ${JWT_TOKEN}" --header "Content-Type: application/json" "https://api.${cluster_fqdn}/v1/projects/${project_name}" | jq -r .spec.description)
34 |
35 | echo "Project name:$project_name"
36 | if [ -z "$proj_name" ] || [ "$proj_name" == "null" ]; then
37 | echo "ERROR: Provided Project name does not exist"
38 | exit 1
39 | else
40 | echo "Project name $proj_name exist"
41 | fi
42 | }
43 |
44 | #does_project_exist
45 | #set +x
46 |
--------------------------------------------------------------------------------
/vm-provisioning/scripts/remove_all_packages.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 |
6 | sudo apt remove --purge libvirt-daemon-system libvirt-clients vagrant virt-manager ovmf expect minicom socat xterm -y
7 | sudo apt remove --purge qemu qemu-kvm libvirt-dev qemu-kvm -y
8 | sudo apt-get purge docker-ce docker-ce-cli containerd.io -y
9 | sudo rm -rf /var/lib/docker
10 | sudo rm -rf /var/lib/containerd
11 | sudo rm /etc/apt/sources.list.d/docker.list
12 | sudo apt-get autoremove -y
13 |
14 | sudo unlink /etc/apparmor.d/disable/usr.lib.libvirt.virt-aa-helper
15 | sudo unlink /etc/apparmor.d/disable/usr.sbin.libvirtd
--------------------------------------------------------------------------------
/vm-provisioning/scripts/vm_network_cleanup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | #set -x
6 |
7 | # source variables from common variable file
8 | source "${PWD}/scripts/common_vars.sh"
9 |
10 | # Function to get the IP address of a VM
11 | get_vm_ip() {
12 | local vm_name=$1
13 | local ip
14 | ip=$(virsh domifaddr "$vm_name" --source agent --interface --full | grep -oP '(\d{1,3}\.){3}\d{1,3}')
15 | echo "$ip"
16 | }
17 |
18 | # Function to delete a specific VM
19 | delete_vm() {
20 | local vm_name=$1
21 |
22 | # Check if the specified VM exists
23 | if ! virsh dominfo "$vm_name" &> /dev/null; then
24 | echo "VM '$vm_name' does not exist."
25 | return
26 | fi
27 |
28 | # Get the IP address of the VM
29 | ip=$(get_vm_ip "$vm_name")
30 | echo "Destroying VM: $vm_name (IP: $ip)"
31 | virsh destroy "$vm_name"
32 |
33 | # Check for and delete snapshots
34 | snapshots=$(virsh snapshot-list "$vm_name" --name)
35 | for snapshot in $snapshots; do
36 | if [ -n "$snapshot" ]; then
37 | echo "Deleting snapshot: $snapshot for VM: $vm_name"
38 | virsh snapshot-delete "$vm_name" "$snapshot"
39 | fi
40 | done
41 |
42 | # Remove NVRAM file if it exists
43 | nvram_file=$(virsh dumpxml "$vm_name" | grep -oP '(?<=).*?(?=)')
44 | if [ -n "$nvram_file" ]; then
45 | echo "Removing NVRAM file: $nvram_file for VM: $vm_name"
46 | rm -f "$nvram_file"
47 | fi
48 |
49 | # Undefine the VM and remove all associated storage
50 | echo "Undefining VM: $vm_name"
51 | virsh undefine "$vm_name" --remove-all-storage
52 |
53 | echo "VM '$vm_name' has been cleaned up."
54 | }
55 |
56 | # Check if VM names are provided as arguments
57 | if [ "$#" -gt 0 ]; then
58 | # Loop through each provided VM name and delete it
59 | for vm_name in "$@"; do
60 | delete_vm "$vm_name"
61 | done
62 | else
63 | # No VM names provided, delete all VMs
64 | echo "Cleaning up all VMs."
65 | vms=$(virsh list --all --name)
66 |
67 | for vm in $vms; do
68 | if [ -n "$vm" ]; then
69 | delete_vm "$vm"
70 | fi
71 | done
72 |
73 | echo "All VMs have been cleaned up."
74 | fi
75 |
76 | # Get a list of all inactive networks starting with "orchvm-net-"
77 | networks=$(virsh net-list --all | grep 'orchvm-net-' | grep -v ' active ' | awk '{print $1}')
78 |
79 | # Loop through the list and remove each network
80 | for net in $networks; do
81 | echo "Deleting inactive network: $net"
82 | virsh net-destroy "$net"
83 | virsh net-undefine "$net"
84 | done
85 | echo "All inactive networks starting with 'orchvm-net-' have been removed."
86 |
87 | sudo bash -c "rm -rf ${BOOT_PATH}/*}_ca.der"
88 | sudo bash -c "rm -rf ${OVMF_PATH}/OVMF_*-vm*.fd"
89 | sudo bash -c "rm -rf ${BOOT_IMAGE}/*-vm*.qcow2"
90 | sudo bash -c "rm -rf ${BOOT_IMAGE}/*-vm*.raw"
91 |
92 | echo "All Vhdd and certs got cleaned up."
93 |
--------------------------------------------------------------------------------
/vm-provisioning/templates/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
5 | # SPDX-License-Identifier: Apache-2.0
6 |
7 | # Load the configuration from the custom config file
8 | load './config'
9 |
10 | Vagrant.configure("2") do |config|
11 | # Define the number of VMs
12 | num_vms ||= orchvm-num-vms
13 |
14 | # Set a default management network name if not provided
15 | BRIDGE_NAME ||= "orchvm-net-000"
16 |
17 | VM_NAME = "orchvm-net-000-vm" if VM_NAME.nil? || VM_NAME.empty?
18 |
19 | static_config_serials = ""
20 |
21 | # Loop to create multiple VMs
22 | (1..num_vms).each do |i|
23 | padded_i = sprintf("%02d", i)
24 |
25 | serialnum = ""
26 |
27 | if !static_config_serials.empty?
28 | # Split the string into an array
29 | serials_array = static_config_serials.split(',')
30 | serialnum = serials_array[i-1]
31 | else
32 | serialnum = "VH000N000M#{padded_i}"
33 | end
34 |
35 |
36 | # Define the management network name
37 | config.vm.define "#{VM_NAME}#{i}" do |vm_config|
38 |
39 | config.vm.network :forwarded_port, guest: 22, host: (2200 + i - 1), host_ip: "0.0.0.0"
40 |
41 | vm_config.vm.provider "libvirt" do |libvirt|
42 | libvirt.title = "orchvm-net-000-vm#{i}"
43 |
44 | if STANDALONE != 0
45 | libvirt.storage_pool_name = "default"
46 | else
47 | libvirt.storage_pool_name = POOL_NAME
48 | end # Corrected from 'fi' to 'end'
49 |
50 | libvirt.driver = LIBVIRT_DRIVER
51 |
52 | # Network configuration to use the virbr0 bridge
53 | libvirt.management_network_name = "orchvm-net-000"
54 | libvirt.tpm_model = "tpm-tis"
55 | libvirt.tpm_type = "emulator"
56 | libvirt.tpm_version = "2.0"
57 | libvirt.tpm_path = '/dev/tpm0'
58 | libvirt.memory = RAM_SIZE
59 | libvirt.cpus = NO_OF_CPUS
60 | libvirt.loader = "/usr/share/OVMF/OVMF_CODE_orchvm-net-000-vm#{i}.fd"
61 | libvirt.nvram = "/usr/share/OVMF/OVMF_VARS_orchvm-net-000-vm#{i}.fd"
62 |
63 | libvirt.qemu_args = [
64 | { value: '-chardev' },
65 | { value: "socket,id=serial0,path=/tmp/console0_orchvm-net-000-vm#{i}.sock,server=on,wait=off" },
66 | { value: '-serial' },
67 | { value: 'chardev:serial0' },
68 | { value: '-chardev' },
69 | { value: "socket,id=serial1,path=/tmp/console1_orchvm-net-000-vm#{i}.sock,server=on,wait=off" },
70 | { value: '-serial' },
71 | { value: 'chardev:serial1' },
72 | { value: '-fw_cfg' },
73 | { value: 'name=etc/edk2/https/cacerts,file=/var/lib/libvirt/boot/orchvm-net-000_ca.der'},
74 | { value: '-smbios' },
75 | { value: "type=1,serial=#{serialnum}"}
76 | ]
77 | libvirt.disk_bus = "virtio"
78 | libvirt.storage :file, :size => SDA_DISK_SIZE, :type => 'qcow2', :device => 'sda', :bus => 'sata', :detect_zeroes => 'on'
79 |
80 | libvirt.boot "hd"
81 | libvirt.boot "network"
82 | # libvirt.boot "cdrom"
83 | end
84 | end
85 | end
86 | end
87 |
--------------------------------------------------------------------------------
/vm-provisioning/templates/orch_network.xml:
--------------------------------------------------------------------------------
1 |
5 |
6 | orchvm-net-000
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/vm-provisioning/tools/yaml-syntax-check.py:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | import os
5 | import sys
6 | import yaml
7 | import argparse
8 |
9 | def is_unquoted_boolean_like(value):
10 | if isinstance(value, str):
11 | stripped_value = value.strip()
12 | is_quoted = (stripped_value.startswith('"') and stripped_value.endswith('"')) or \
13 | (stripped_value.startswith("'") and stripped_value.endswith("'"))
14 | return not is_quoted and stripped_value.lower() in ['true', 'false', 'yes', 'no', 'on', 'off']
15 | return False
16 |
17 | def check_yaml_best_practices(data):
18 | """
19 | Check if the YAML data follows best practices.
20 | """
21 | issues_found = False
22 | if isinstance(data, dict):
23 | for key, value in data.items():
24 | if isinstance(value, dict):
25 | if check_yaml_best_practices(value):
26 | issues_found = True
27 | elif isinstance(value, list):
28 | for item in value:
29 | if check_yaml_best_practices(item):
30 | issues_found = True
31 | elif isinstance(value, str):
32 | if is_unquoted_boolean_like(value):
33 | print(f"Warning: The value '{value}' for '{key}' may be interpreted as a boolean. Consider quoting it.")
34 | issues_found = True
35 | elif isinstance(data, list):
36 | for item in data:
37 | if check_yaml_best_practices(item):
38 | issues_found = True
39 | return issues_found
40 |
41 | def test_yaml_file(file_path):
42 | """
43 | Test a single YAML file for safe loading and parsing.
44 | """
45 | try:
46 | if os.path.getsize(file_path) > 10 * 1024 * 1024: # 10 MB limit
47 | print(f"Warning: The file {file_path} is too large to process safely.")
48 | return
49 |
50 | with open(file_path, 'r') as stream:
51 | documents = list(yaml.safe_load_all(stream))
52 | issues_found = False
53 | for doc in documents:
54 | if check_yaml_best_practices(doc):
55 | issues_found = True
56 | if not issues_found:
57 | print(f"YAML file {file_path} loaded successfully and follows best practices.")
58 | else:
59 | print(f"YAML file {file_path} has issues that need to be addressed.")
60 | except yaml.YAMLError as e:
61 | print(f"YAML error in file {file_path}: {e}")
62 | except Exception as e:
63 | print(f"Unexpected error in file {file_path}: {e}")
64 |
65 | def parse_arguments():
66 | parser = argparse.ArgumentParser(description='Validate YAML files.')
67 | parser.add_argument('yaml_files', nargs='+', help='List of YAML files to validate')
68 | parser.add_argument('--ignore', nargs='*', default=[], help='List of directories to ignore')
69 | return parser.parse_args()
70 |
71 | def main():
72 | args = parse_arguments()
73 | for file_path in args.yaml_files:
74 | if not any(file_path.startswith(ignore_dir) for ignore_dir in args.ignore):
75 | print(f"Testing YAML file: {file_path}")
76 | test_yaml_file(file_path)
77 |
78 | if __name__ == "__main__":
79 | main()
80 |
--------------------------------------------------------------------------------
/vm-provisioning/tools/yaml_validator/README.md:
--------------------------------------------------------------------------------
1 | # How to Add More Files in the Future
2 |
3 | To add a new YAML file for validation `new_file.yml` with the schema similar to array_of_plays
4 | add this line to the yaml_validator.py file.
5 |
6 | ```python
7 | "new_file.yml": array_of_plays,
8 | ```
9 |
10 | For inventory files or other specific YAML structures, you can create a custom schema by defining a function like:
11 |
12 | ```python
13 | def get_special_schema():
14 | # Define custom schema logic
15 | return custom_schema
16 | ```
17 |
18 | and use this schema instead of array_of_plays.
19 |
--------------------------------------------------------------------------------