\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
135 |
136 |
137 |
--------------------------------------------------------------------------------
/tests/auth/jwt_test.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package auth
5 |
6 | import (
7 | "testing"
8 | "time"
9 | )
10 |
11 | func TestNewTestJWTGenerator(t *testing.T) {
12 | generator, err := NewTestJWTGenerator()
13 | if err != nil {
14 | t.Fatalf("Failed to create JWT generator: %v", err)
15 | }
16 |
17 | if generator.privateKey == nil {
18 | t.Error("Private key should not be nil")
19 | }
20 |
21 | if generator.publicKey == nil {
22 | t.Error("Public key should not be nil")
23 | }
24 | }
25 |
26 | func TestGenerateClusterManagerToken(t *testing.T) {
27 | generator, err := NewTestJWTGenerator()
28 | if err != nil {
29 | t.Fatalf("Failed to create JWT generator: %v", err)
30 | }
31 |
32 | subject := "test-user"
33 | projectUUID := "test-project-123"
34 | tokenString, err := generator.GenerateClusterManagerToken(subject, projectUUID, time.Hour)
35 | if err != nil {
36 | t.Fatalf("Failed to generate token: %v", err)
37 | }
38 |
39 | if tokenString == "" {
40 | t.Error("Token string should not be empty")
41 | }
42 |
43 | // Validate the token
44 | claims, err := generator.ValidateToken(tokenString)
45 | if err != nil {
46 | t.Fatalf("Failed to validate token: %v", err)
47 | }
48 |
49 | // Check claims
50 | if claims["sub"] != subject {
51 | t.Errorf("Expected subject %s, got %s", subject, claims["sub"])
52 | }
53 |
54 | if claims["iss"] != IssuerURL {
55 | t.Errorf("Expected issuer %s, got %s", IssuerURL, claims["iss"])
56 | }
57 | }
58 |
59 | func TestGenerateTokenWithCustomClaims(t *testing.T) {
60 | generator, err := NewTestJWTGenerator()
61 | if err != nil {
62 | t.Fatalf("Failed to create JWT generator: %v", err)
63 | }
64 |
65 | subject := "test-user"
66 | audience := []string{"test-service"}
67 | customClaims := map[string]interface{}{
68 | "role": "admin",
69 | "permissions": []string{"read", "write"},
70 | }
71 |
72 | tokenString, err := generator.GenerateToken(subject, audience, customClaims)
73 | if err != nil {
74 | t.Fatalf("Failed to generate token: %v", err)
75 | }
76 |
77 | // Validate the token
78 | claims, err := generator.ValidateToken(tokenString)
79 | if err != nil {
80 | t.Fatalf("Failed to validate token: %v", err)
81 | }
82 |
83 | // Check custom claims
84 | if claims["role"] != "admin" {
85 | t.Errorf("Expected role 'admin', got %s", claims["role"])
86 | }
87 |
88 | // Check permissions claim
89 | perms, ok := claims["permissions"].([]interface{})
90 | if !ok {
91 | t.Errorf("Expected permissions to be []interface{}, got %T", claims["permissions"])
92 | } else if len(perms) != 2 || perms[0] != "read" || perms[1] != "write" {
93 | t.Errorf("Expected permissions ['read', 'write'], got %v", claims["permissions"])
94 | }
95 | }
96 |
97 | func TestTokenExpiration(t *testing.T) {
98 | generator, err := NewTestJWTGenerator()
99 | if err != nil {
100 | t.Fatalf("Failed to create JWT generator: %v", err)
101 | }
102 |
103 | // Generate a token that expires in 1 millisecond
104 | tokenString, err := generator.GenerateShortLivedToken("test-user", 1*time.Millisecond)
105 | if err != nil {
106 | t.Fatalf("Failed to generate token: %v", err)
107 | }
108 |
109 | // Wait for token to expire
110 | time.Sleep(10 * time.Millisecond)
111 |
112 | // Try to validate expired token
113 | _, err = generator.ValidateToken(tokenString)
114 | if err == nil {
115 | t.Error("Expected validation to fail for expired token")
116 | }
117 | }
118 |
119 | func TestGetPublicKeyPEM(t *testing.T) {
120 | generator, err := NewTestJWTGenerator()
121 | if err != nil {
122 | t.Fatalf("Failed to create JWT generator: %v", err)
123 | }
124 |
125 | publicKeyPEM, err := generator.GetPublicKeyPEM()
126 | if err != nil {
127 | t.Fatalf("Failed to get public key PEM: %v", err)
128 | }
129 |
130 | if len(publicKeyPEM) == 0 {
131 | t.Error("Public key PEM should not be empty")
132 | }
133 |
134 | // Check that it's valid PEM format
135 | if string(publicKeyPEM[:11]) != "-----BEGIN " {
136 | t.Error("Public key PEM should start with '-----BEGIN '")
137 | }
138 | }
139 |
140 | func TestGetPrivateKeyPEM(t *testing.T) {
141 | generator, err := NewTestJWTGenerator()
142 | if err != nil {
143 | t.Fatalf("Failed to create JWT generator: %v", err)
144 | }
145 |
146 | privateKeyPEM, err := generator.GetPrivateKeyPEM()
147 | if err != nil {
148 | t.Fatalf("Failed to get private key PEM: %v", err)
149 | }
150 |
151 | if len(privateKeyPEM) == 0 {
152 | t.Error("Private key PEM should not be empty")
153 | }
154 |
155 | // Check that it's valid PEM format
156 | if string(privateKeyPEM[:11]) != "-----BEGIN " {
157 | t.Error("Private key PEM should start with '-----BEGIN '")
158 | }
159 | }
160 |
161 | func TestInvalidTokenValidation(t *testing.T) {
162 | generator, err := NewTestJWTGenerator()
163 | if err != nil {
164 | t.Fatalf("Failed to create JWT generator: %v", err)
165 | }
166 |
167 | // Test cases for invalid tokens
168 | testCases := []struct {
169 | name string
170 | token string
171 | }{
172 | {"invalid token string", "invalid.token.string"},
173 | {"malformed JWT", "header.payload.signature"},
174 | {"empty token", ""},
175 | {"incomplete JWT", "header.payload"},
176 | {"random string", "not-a-jwt-at-all"},
177 | }
178 |
179 | for _, tc := range testCases {
180 | t.Run(tc.name, func(t *testing.T) {
181 | _, err := generator.ValidateToken(tc.token)
182 | if err == nil {
183 | t.Errorf("Expected validation to fail for %s: %q", tc.name, tc.token)
184 | }
185 | })
186 | }
187 | }
188 |
189 | func TestTokenSignedWithDifferentKey(t *testing.T) {
190 | generator1, err := NewTestJWTGenerator()
191 | if err != nil {
192 | t.Fatalf("Failed to create first JWT generator: %v", err)
193 | }
194 |
195 | generator2, err := NewTestJWTGenerator()
196 | if err != nil {
197 | t.Fatalf("Failed to create second JWT generator: %v", err)
198 | }
199 |
200 | // Generate token with first generator
201 | tokenString, err := generator1.GenerateClusterManagerToken("test-user", "test-project", time.Hour)
202 | if err != nil {
203 | t.Fatalf("Failed to generate token: %v", err)
204 | }
205 |
206 | // Try to validate with second generator (different key)
207 | _, err = generator2.ValidateToken(tokenString)
208 | if err == nil {
209 | t.Error("Expected validation to fail when using different key")
210 | }
211 | }
212 |
--------------------------------------------------------------------------------
/tests/template-api-test/template_api_test.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package template_api_test
5 |
6 | import (
7 | "fmt"
8 | . "github.com/onsi/ginkgo/v2"
9 | . "github.com/onsi/gomega"
10 | "github.com/open-edge-platform/cluster-tests/tests/utils"
11 | "os/exec"
12 | "testing"
13 | "time"
14 | )
15 |
16 | func TestTemplateApiTests(t *testing.T) {
17 | RegisterFailHandler(Fail)
18 | _, _ = fmt.Fprintf(GinkgoWriter, "Starting template api tests\n")
19 | RunSpecs(t, "template api test suite")
20 | }
21 |
22 | var _ = Describe("Template API Tests", Ordered, func() {
23 | var (
24 | namespace string
25 | portForwardCmd *exec.Cmd
26 | )
27 | BeforeAll(func() {
28 | namespace = utils.GetEnv(utils.NamespaceEnvVar, utils.DefaultNamespace)
29 |
30 | By("Ensuring the namespace exists")
31 | err := utils.EnsureNamespaceExists(namespace)
32 | Expect(err).NotTo(HaveOccurred())
33 |
34 | By("Port forwarding to the cluster manager service")
35 | portForwardCmd = exec.Command("kubectl", "port-forward", utils.PortForwardService, fmt.Sprintf("%s:%s", utils.PortForwardLocalPort, utils.PortForwardRemotePort), "--address", utils.PortForwardAddress)
36 | err = portForwardCmd.Start()
37 | Expect(err).NotTo(HaveOccurred())
38 | time.Sleep(5 * time.Second)
39 |
40 | By("Deleting all templates in the namespace")
41 | err = utils.DeleteAllTemplate(namespace)
42 | Expect(err).NotTo(HaveOccurred())
43 | })
44 |
45 | AfterAll(func() {
46 | defer func() {
47 | if portForwardCmd != nil && portForwardCmd.Process != nil {
48 | portForwardCmd.Process.Kill()
49 | }
50 | }()
51 |
52 | By("Deleting all templates in the namespace")
53 | err := utils.DeleteAllTemplate(namespace)
54 | Expect(err).NotTo(HaveOccurred())
55 | })
56 |
57 | It("should validate the template import success", Label(utils.ClusterOrchTemplateApiSmokeTest, utils.ClusterOrchTemplateApiAllTest), func() {
58 | By("Importing the cluster template rke2 baseline")
59 | err := utils.ImportClusterTemplate(namespace, utils.TemplateTypeRke2Baseline)
60 | Expect(err).NotTo(HaveOccurred())
61 |
62 | By("Waiting for the cluster template to be ready")
63 | Eventually(func() bool {
64 | return utils.IsClusterTemplateReady(namespace, utils.Rke2TemplateName)
65 | }, 1*time.Minute, 2*time.Second).Should(BeTrue())
66 |
67 | By("Importing the cluster template k3s baseline")
68 | err = utils.ImportClusterTemplate(namespace, utils.TemplateTypeK3sBaseline)
69 | Expect(err).NotTo(HaveOccurred())
70 |
71 | By("Waiting for the cluster template to be ready")
72 | Eventually(func() bool {
73 | return utils.IsClusterTemplateReady(namespace, utils.K3sTemplateName)
74 | }, 1*time.Minute, 2*time.Second).Should(BeTrue())
75 | })
76 |
77 | It("Should be able to retrieve a template", Label(utils.ClusterOrchTemplateApiSmokeTest, utils.ClusterOrchTemplateApiAllTest), func() {
78 | By("Retrieving the K3s template")
79 | template, err := utils.GetClusterTemplate(namespace, utils.K3sTemplateOnlyName, utils.K3sTemplateOnlyVersion)
80 | Expect(err).NotTo(HaveOccurred())
81 | Expect(template.Name + "-" + template.Version).To(Equal(utils.K3sTemplateName))
82 |
83 | By("Retrieving the Rke2 template")
84 | template, err = utils.GetClusterTemplate(namespace, utils.Rke2TemplateOnlyName, utils.Rke2TemplateOnlyVersion)
85 | Expect(err).NotTo(HaveOccurred())
86 | Expect(template.Name + "-" + template.Version).To(Equal(utils.Rke2TemplateName))
87 | })
88 |
89 | It("Should not find a default template when non has been set", Label(utils.ClusterOrchTemplateApiAllTest), func() {
90 | By("Getting Default template when none has been set")
91 | defaultTemplateInfo, err := utils.GetDefaultTemplate(namespace)
92 | Expect(err).NotTo(HaveOccurred())
93 | Expect(defaultTemplateInfo).To(BeNil(), "Default template should be nil when none has been set")
94 | })
95 |
96 | It("Should be able to set a default template", Label(utils.ClusterOrchTemplateApiSmokeTest, utils.ClusterOrchTemplateApiAllTest), func() {
97 |
98 | By("Set the default template by providing only template name without version")
99 | err := utils.SetDefaultTemplate(namespace, utils.K3sTemplateOnlyName, "")
100 | Expect(err).NotTo(HaveOccurred())
101 |
102 | By("Getting Default template after setting it")
103 | defaultTemplateInfo, err := utils.GetDefaultTemplate(namespace)
104 | Expect(err).NotTo(HaveOccurred())
105 | Expect(*defaultTemplateInfo.Name).To(Equal(utils.K3sTemplateOnlyName), "Default template name should match the set template name")
106 | Expect(defaultTemplateInfo.Version).To(Equal(utils.K3sTemplateOnlyVersion), "Default template version should match the set template version")
107 |
108 | By("Set the default template by providing both template name and version")
109 | err = utils.SetDefaultTemplate(namespace, utils.Rke2TemplateOnlyName, utils.Rke2TemplateOnlyVersion)
110 | Expect(err).NotTo(HaveOccurred())
111 |
112 | By("Getting Default template after setting it")
113 | defaultTemplateInfo, err = utils.GetDefaultTemplate(namespace)
114 | Expect(err).NotTo(HaveOccurred())
115 | Expect(*defaultTemplateInfo.Name).To(Equal(utils.Rke2TemplateOnlyName), "Default template name should match the set template name")
116 | Expect(defaultTemplateInfo.Version).To(Equal(utils.Rke2TemplateOnlyVersion), "Default template version should match the set template version")
117 |
118 | By("Setting default template again after it has been set, should not error")
119 | err = utils.SetDefaultTemplate(namespace, utils.Rke2TemplateOnlyName, utils.Rke2TemplateOnlyVersion)
120 | Expect(err).NotTo(HaveOccurred())
121 |
122 | By("Getting Default template after setting it again")
123 | defaultTemplateInfo, err = utils.GetDefaultTemplate(namespace)
124 | Expect(err).NotTo(HaveOccurred())
125 | Expect(*defaultTemplateInfo.Name).To(Equal(utils.Rke2TemplateOnlyName), "Default template name should match the set template name")
126 | Expect(defaultTemplateInfo.Version).To(Equal(utils.Rke2TemplateOnlyVersion), "Default template version should match the set template version")
127 |
128 | })
129 |
130 | It("Should error out when setting a default template with an invalid name", Label(utils.ClusterOrchTemplateApiAllTest), func() {
131 | By("Setting default template to a non-existing template should error")
132 | err := utils.SetDefaultTemplate(namespace, "non-existing-template", "v1.0.0")
133 | Expect(err).To(HaveOccurred(), "Setting default template to a non-existing template should return an error")
134 |
135 | })
136 |
137 | It("Should return templates matching a filter", Label(utils.ClusterOrchTemplateApiAllTest), func() {
138 | By("Retrieving templates with a filter")
139 | templates, err := utils.GetClusterTemplatesWithFilter(namespace, "version=v0.0.1")
140 | Expect(err).NotTo(HaveOccurred())
141 | Expect(templates).ToNot(BeNil(), "Templates should not be nil")
142 | Expect(templates.TemplateInfoList).ToNot(BeNil())
143 | Expect(*templates.TemplateInfoList).To(HaveLen(2), "There should be two templates matching the filter - one rke2 and another k3s")
144 | })
145 | })
146 |
--------------------------------------------------------------------------------
/test-plan/test-plan.md:
--------------------------------------------------------------------------------
1 | # Test Plan for Cluster Orchestration sub-system in Intel® Open Edge Platform
2 |
3 | ## 1. Introduction
4 |
5 | ### 1.1 Purpose
6 |
7 | The purpose of the test plan document is to outline the testing strategy for the Cluster Orchestration (CO) sub-system in
8 | Intel® Open Edge platform. The document also provides the scope, objectives, and approach for testing the CO.
9 | The document also provides the list of test cases that will be executed to validate the CO.
10 |
11 | ### 1.2 Scope
12 |
13 | The scope is primarily to validate the CO by mocking external dependencies as much as possible.
14 | The CO is responsible for managing the life-cycle of the edge node cluster.
15 | Below diagrams represents the high-level design of the CO:
16 | 
17 |
18 | The scope of the test plan is to validate the CO sub-system by executing the test cases defined in this document.
19 | It is to be noted that other sub-systems may also get validated as part of the CO testing, but it is not the
20 | primary objective of this document.
21 |
22 | ### 1.3 Objectives
23 |
24 | The objectives of this document are as follows:
25 |
26 | - To outline the testing approach for the CO.
27 | - To define the test environment for testing the CO.
28 | - To define the test criteria for testing the CO.
29 | - To provide the list of test cases that will be executed to validate the CO.
30 |
31 | ## 2. Test Approach
32 |
33 | The test approach for the CO is to validate the CO by mocking external dependencies as much as possible. Below diagram
34 | represents the high-level test approach for the CO:
35 | 
36 |
37 | ## 3. Test Environment
38 |
39 | The test environment will use a system that is similar to `t3.xlarge` (or better) in configuration, i.e., 4vCPUs, 16 GiB
40 | memory and at least 50GiB of storage with Ubuntu 22.04 LTS OS to run the tests. The required tools and their versions
41 | for the test will be managed by `asdf`.
42 |
43 | ## 4. Test Catagories
44 |
45 | At a very high level, the tests can be classified as Functional and Non-Functional. These categories of tests are further
46 | classified into test types.
47 |
48 | The functional tests can be
49 |
50 | - Component level - COMP (Edge Cluster Manager, Intel Cluster Provider, ECM SB Handler etc)
51 | - Integration - INT (Eg: CO Subsystem)
52 | - System level - SYS (Eg: Test all of Intel® Open Edge Platform)
53 |
54 | Non-functional tests can be
55 |
56 | - Scalability (SCB)
57 | - Stress (STR)
58 | - Stability (STB)
59 | - Chaos (CHAOS)
60 | - Performance (PERF)
61 | - High Availability (HA)
62 | - Security (SEC)
63 | - etc.
64 |
65 | The initial goal of the test plan and execution will be focussed on Functional Integration tests to start with. However,
66 | the framework itself shall be extensible to include other types of tests in the future.
67 |
68 | ## 5. Test Cases
69 |
70 | ### 5.1 Test Case Format
71 |
72 | Test Case format shall look like below:
73 |
74 | 1. Test Case ID: A unique identifier for the test case. This can be a combination of the test category and a sequential
75 | number suffixed to `TC-CO-`. Ex: `TC-CO-INT-001`
76 | 1. Test Case Name: A brief, descriptive name for the test case.
77 | 1. Objective: The purpose of the test case.
78 | 1. Preconditions: Any conditions that must be met before the test can be executed.
79 | 1. Test Steps: A detailed, step-by-step description of the actions to be performed.
80 | 1. Test Data: Specific data to be used in the test.
81 | 1. Expected Result: The expected outcome of the test.
82 |
83 | ### 5.2 List of Test Cases
84 |
85 | ### Test Case ID: TC-CO-INT-001
86 |
87 | - **Test Description:** Verify Single Node RKE2 Cluster creation and deletion using Cluster Manager APIs
88 | - **Preconditions:**
89 | - Ensure the namespace exists or create it if it does not.
90 | - Port forward to the cluster manager service.
91 | - Import the cluster template and ensure it is ready.
92 | - **Test Steps:**
93 | 1. Authenticate with KeyCloak and obtain a token with the right roles and permissions to access the ECM /v1/clusters POST API.
94 | 1. Send a POST request to create a new cluster using the available ClusterTemplate.
95 | 1. Verify the Cluster CR is created in the Kubernetes API server.
96 | 1. Verify the associated resources (RKE2ControlPlane, IntelCluster, etc.) are created.
97 | 1. Check the status of the Cluster CR to ensure it is marked as ready.
98 | 1. Verify that the machine infrastructure is ready after successful cluster creation.
99 | 1. Delete the cluster if SKIP_DELETE_CLUSTER is not set to true.
100 | 1. Verify that the cluster is deleted.
101 | - **Expected Results:**
102 | - The Cluster CR is created successfully.
103 | - Associated resources are created and linked correctly.
104 | - The Cluster CR status is marked as ready.
105 | - The machine infrastructure is ready.
106 | - The cluster is deleted successfully if SKIP_DELETE_CLUSTER is not set to true.
107 |
108 | ### Test Case ID: TC-CO-INT-002
109 |
110 | - **Test Description:** Should successfully import RKE2 Single Node cluster template
111 | - **Preconditions:**
112 | - Ensure the namespace exists or create it if it does not.
113 | - Port forward to the cluster manager service.
114 | - **Test Steps:**
115 | 1. Import the cluster template.
116 | 1. Wait for the cluster template to be ready.
117 | - **Expected Results:**
118 | - The cluster template is imported successfully.
119 | - The cluster template is marked as ready.
120 |
121 | ### Test Case ID: TC-CO-INT-003
122 |
123 | - **Test Description:** Should verify that cluster create API should succeed
124 | - **Preconditions:**
125 | - Ensure the namespace exists or create it if it does not.
126 | - Port forward to the cluster manager service.
127 | - Import the cluster template and ensure it is ready.
128 | - **Test Steps:**
129 | 1. Record the start time before creating the cluster.
130 | 1Send a POST request to create a new cluster using the available ClusterTemplate.
131 | - **Expected Results:**
132 | - The cluster is created successfully.
133 |
134 | ### Test Case ID: TC-CO-INT-004
135 |
136 | - **Test Description:** Should verify that the cluster is fully active
137 | - **Preconditions:**
138 | - Ensure the namespace exists or create it if it does not.
139 | - Port forward to the cluster manager service.
140 | - Import the cluster template and ensure it is ready.
141 | - Create the cluster.
142 | - **Test Steps:**
143 | 1. Wait for IntelMachine to exist.
144 | 1Wait for all components to be ready.
145 | - **Expected Results:**
146 | - IntelMachine exists.
147 | - All components are ready.
148 |
149 | ### Test Case ID: TC-CO-INT-005
150 |
151 | - **Test Description:** Should verify that the cluster information can be queried
152 | - **Preconditions:**
153 | - Ensure the namespace exists or create it if it does not.
154 | - Port forward to the cluster manager service.
155 | - Import the cluster template and ensure it is ready.
156 | - Create the cluster.
157 | - **Test Steps:**
158 | 1. Send a GET request to retrieve the cluster information.
159 | - **Expected Results:**
160 | - The HTTP response status code is 200 (OK).
161 | - The cluster information is retrieved successfully.
162 |
163 | ### Test Case ID: TC-CO-INT-006
164 |
165 | - **Test Description:** Should verify that the cluster label can be queried
166 | - **Preconditions:**
167 | - Ensure the namespace exists or create it if it does not.
168 | - Port forward to the cluster manager service.
169 | - Import the cluster template and ensure it is ready.
170 | - Create the cluster.
171 | - **Test Steps:**
172 | 1. Send a GET request to retrieve the cluster label.
173 | - **Expected Results:**
174 | - The cluster label is retrieved successfully.
175 |
176 | ### Test Case ID: TC-CO-INT-007
177 |
178 | - **Test Description:** Should verify that the cluster label can be updated
179 | - **Preconditions:**
180 | - Ensure the namespace exists or create it if it does not.
181 | - Port forward to the cluster manager service.
182 | - Import the cluster template and ensure it is ready.
183 | - Create the cluster.
184 | - **Test Steps:**
185 | 1. Send a PUT request to update the cluster label.
186 | - **Expected Results:**
187 | - The cluster label is updated successfully.
188 |
189 | ### Test Case ID: TC-CO-INT-008
190 |
191 | - **Test Description:** Should verify that the connect gateway allow access to k8s api
192 | - **Preconditions:**
193 | - Port forward to the cluster gateway service.
194 | - **Test Steps:**
195 | 1. Get kubeconfig using clusterctl
196 | 1. Set server in kubeconfig to cluster connect gateway
197 | 1. Use kubeconfig to fetch list of pods
198 | - **Expected Results:**
199 | - The pod list information is retrieved successfully.
200 |
201 | ### Test Case ID: TC-CO-INT-009
202 |
203 | - **Test Description:** Should verify that a cluster template cannot be deleted if there is a cluster using it.
204 | - **Preconditions:**
205 | - Ensure the namespace exists or create it if it does not.
206 | - Port forward to the cluster manager service.
207 | - Import the cluster template and ensure it is ready.
208 | - Create a cluster using the imported cluster template.
209 | - **Test Steps:**
210 | 1. Attempt to delete the cluster template using the DELETE API.
211 | - **Expected Results:**
212 | - The DELETE request fails with an error message indicating that the cluster template is in use.
213 |
--------------------------------------------------------------------------------
/mage/test.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package mage
5 |
6 | import (
7 | "encoding/json"
8 | "fmt"
9 | "os"
10 | "os/exec"
11 | "path/filepath"
12 | "regexp"
13 | "strings"
14 |
15 | "github.com/open-edge-platform/cluster-tests/tests/utils"
16 |
17 | "github.com/magefile/mage/sh"
18 | "gopkg.in/yaml.v3"
19 | )
20 |
21 | const (
22 | gitCommitHashRegex = `\b[0-9a-f]{5,40}\b` // Matches a git commit hash (min 5, max 40 characters)
23 | )
24 |
25 | type HelmRepo struct {
26 | URL string `yaml:"url" json:"url"`
27 | ReleaseName string `yaml:"release-name" json:"release-name"`
28 | Package string `yaml:"package" json:"package"`
29 | Namespace string `yaml:"namespace" json:"namespace"`
30 | Version string `yaml:"version" json:"version"`
31 | UseDevel bool `yaml:"use-devel" json:"use-devel"`
32 | Overrides string `yaml:"overrides" json:"overrides"`
33 | }
34 |
35 | type GitRepo struct {
36 | URL string `yaml:"url" json:"url"`
37 | Version string `yaml:"version" json:"version"`
38 | }
39 |
40 | type Component struct {
41 | Name string `yaml:"name" json:"name"`
42 | SkipComponent bool `yaml:"skip-component" json:"skip-component"`
43 | SkipLocalBuild bool `yaml:"skip-local-build" json:"skip-local-build"`
44 | HelmRepo []HelmRepo `yaml:"helm-repo" json:"helm-repo"`
45 | GitRepo GitRepo `yaml:"git-repo" json:"git-repo"`
46 | PreInstallCommands []string `yaml:"pre-install-commands" json:"pre-install-commands"`
47 | MakeDirectory string `yaml:"make-directory" json:"make-directory"`
48 | MakeVariables []string `yaml:"make-variables" json:"make-variables"`
49 | MakeTargets []string `yaml:"make-targets" json:"make-targets"`
50 | PostInstallCommands []string `yaml:"post-install-commands" json:"post-install-commands"`
51 | }
52 |
53 | type Config struct {
54 | KindClusterConfig string `yaml:"kind-cluster-config" json:"kind-cluster-config"`
55 | Components []Component `yaml:"components" json:"components"`
56 | }
57 |
58 | func (Test) bootstrap() error {
59 | defaultConfig, err := parseConfig(".test-dependencies.yaml")
60 | if err != nil {
61 | return err
62 | }
63 |
64 | additionalConfigStr := os.Getenv("ADDITIONAL_CONFIG")
65 | fmt.Printf("Additional config: %s\n", additionalConfigStr)
66 | if additionalConfigStr != "" {
67 | var additionalConfig Config
68 | if err := json.Unmarshal([]byte(additionalConfigStr), &additionalConfig); err != nil {
69 | return err
70 | }
71 | fmt.Printf("Additional config after unmarshal: %+v\n", additionalConfig)
72 |
73 | mergeConfigs(defaultConfig, &additionalConfig)
74 | }
75 |
76 | if err := createKindCluster(defaultConfig.KindClusterConfig); err != nil {
77 | return err
78 | }
79 |
80 | for _, component := range defaultConfig.Components {
81 | if err := processComponent(component); err != nil {
82 | return err
83 | }
84 | }
85 |
86 | return nil
87 | }
88 |
89 | func (Test) cleanup() error {
90 | cmd := "kind delete cluster"
91 | return runCommand(cmd)
92 | }
93 |
94 | // nolint: unused
95 | func (Test) createCluster() error {
96 | return nil
97 | }
98 |
99 | // Test Runs cluster orch smoke test by creating locations, configuring host, creating a cluster and then finally cleanup
100 | func (Test) clusterOrchClusterApiSmokeTest() error {
101 | return sh.RunV(
102 | "ginkgo",
103 | "-v",
104 | "-r",
105 | "--fail-fast",
106 | "--race",
107 | fmt.Sprintf("--label-filter=%s", utils.ClusterOrchClusterApiSmokeTest),
108 | "./tests/cluster-api-test",
109 | )
110 | }
111 |
112 | // Test Runs cluster orch template api test
113 | func (Test) clusterOrchTemplateApiSmokeTest() error {
114 | return sh.RunV(
115 | "ginkgo",
116 | "-v",
117 | "-r",
118 | "--fail-fast",
119 | "--race",
120 | fmt.Sprintf("--label-filter=%s", utils.ClusterOrchTemplateApiSmokeTest),
121 | "./tests/template-api-test",
122 | )
123 | }
124 |
125 | // Test Runs cluster orch template api all tests
126 | func (Test) clusterOrchTemplateApiAllTest() error {
127 | return sh.RunV(
128 | "ginkgo",
129 | "-v",
130 | "-r",
131 | "--fail-fast",
132 | "--race",
133 | fmt.Sprintf("--label-filter=%s", utils.ClusterOrchTemplateApiAllTest),
134 | "./tests/template-api-test",
135 | )
136 | }
137 |
138 | // Test Runs cluster orch cluster api all tests
139 | func (Test) clusterOrchClusterApiAllTest() error {
140 | return sh.RunV(
141 | "ginkgo",
142 | "-v",
143 | "-r",
144 | "--fail-fast",
145 | "--race",
146 | fmt.Sprintf("--label-filter=%s", utils.ClusterOrchClusterApiAllTest),
147 | "./tests/cluster-api-test",
148 | )
149 | }
150 |
151 | // Test Runs cluster orch roubstness test
152 | func (Test) clusterOrchRobustness() error {
153 | return sh.RunV(
154 | "ginkgo",
155 | "-v",
156 | "-r",
157 | "--fail-fast",
158 | "--race",
159 | fmt.Sprintf("--label-filter=%s", utils.ClusterOrchRobustnessTest),
160 | "./tests/robustness-test",
161 | )
162 | }
163 |
164 | /////// Helper functions ///////
165 |
166 | func mergeConfigs(defaultConfig, additionalConfig *Config) {
167 | if additionalConfig.KindClusterConfig != "" {
168 | defaultConfig.KindClusterConfig = additionalConfig.KindClusterConfig
169 | }
170 |
171 | for _, additionalComponent := range additionalConfig.Components {
172 | found := false
173 | for i, defaultComponent := range defaultConfig.Components {
174 | if defaultComponent.Name == additionalComponent.Name {
175 | fmt.Printf("Overriding config for component: %s, overriding config: %+v\n", defaultComponent.Name, additionalComponent)
176 | defaultConfig.Components[i] = mergeComponent(defaultComponent, additionalComponent)
177 | found = true
178 | break
179 | }
180 | }
181 | if !found {
182 | defaultConfig.Components = append(defaultConfig.Components, additionalComponent)
183 | }
184 | }
185 | }
186 |
187 | func mergeComponent(defaultComponent, additionalComponent Component) Component {
188 | defaultComponent.SkipComponent = additionalComponent.SkipComponent
189 | defaultComponent.SkipLocalBuild = additionalComponent.SkipLocalBuild
190 |
191 | if len(additionalComponent.HelmRepo) > 0 {
192 | defaultComponent.HelmRepo = append(defaultComponent.HelmRepo, additionalComponent.HelmRepo...)
193 | }
194 | if additionalComponent.GitRepo.URL != "" {
195 | defaultComponent.GitRepo.URL = additionalComponent.GitRepo.URL
196 | }
197 | if additionalComponent.GitRepo.Version != "" {
198 | defaultComponent.GitRepo.Version = additionalComponent.GitRepo.Version
199 | }
200 | if len(additionalComponent.PreInstallCommands) > 0 {
201 | defaultComponent.PreInstallCommands = additionalComponent.PreInstallCommands
202 | }
203 | if additionalComponent.MakeDirectory != "" {
204 | defaultComponent.MakeDirectory = additionalComponent.MakeDirectory
205 | }
206 | if len(additionalComponent.MakeVariables) > 0 {
207 | defaultComponent.MakeVariables = additionalComponent.MakeVariables
208 | }
209 | if len(additionalComponent.MakeTargets) > 0 {
210 | defaultComponent.MakeTargets = additionalComponent.MakeTargets
211 | }
212 | if len(additionalComponent.PostInstallCommands) > 0 {
213 | defaultComponent.PostInstallCommands = additionalComponent.PostInstallCommands
214 | }
215 | return defaultComponent
216 | }
217 |
218 | func parseConfig(file string) (*Config, error) {
219 | data, err := os.ReadFile(file)
220 | if err != nil {
221 | return nil, err
222 | }
223 | var config Config
224 | err = yaml.Unmarshal(data, &config)
225 | if err != nil {
226 | return nil, err
227 | }
228 | return &config, nil
229 | }
230 |
231 | func runCommand(cmd string) error {
232 | fmt.Println("Running command:", cmd)
233 | command := exec.Command("bash", "-c", cmd)
234 | command.Stdout = os.Stdout
235 | command.Stderr = os.Stderr
236 | return command.Run()
237 | }
238 |
239 | func createKindCluster(configFile string) error {
240 | cmd := fmt.Sprintf("kind create cluster --config %s", configFile)
241 | return runCommand(cmd)
242 | }
243 |
244 | func processComponent(component Component) error {
245 | if component.SkipComponent {
246 | fmt.Printf("Skipping component: %s\n", component.Name)
247 | return nil
248 | }
249 |
250 | workspaceDir := filepath.Join("_workspace", component.Name)
251 |
252 | if err := os.RemoveAll(workspaceDir); err != nil {
253 | return err
254 | }
255 | if err := os.MkdirAll(workspaceDir, os.ModePerm); err != nil {
256 | return err
257 | }
258 |
259 | for _, cmd := range component.PreInstallCommands {
260 | cmd = fmt.Sprintf("cd %s && %s", workspaceDir, cmd)
261 | if err := runCommand(cmd); err != nil {
262 | return err
263 | }
264 | }
265 |
266 | if component.SkipLocalBuild {
267 | for _, helm := range component.HelmRepo {
268 | chart := fmt.Sprintf("%s/%s", helm.URL, helm.Package)
269 | cmd := fmt.Sprintf("helm install %s %s --namespace %s", helm.ReleaseName, chart, helm.Namespace)
270 | if helm.Version != "" {
271 | cmd = fmt.Sprintf("%s --version %s", cmd, helm.Version)
272 | }
273 | if helm.UseDevel {
274 | cmd = fmt.Sprintf("%s --devel", cmd)
275 | }
276 | if helm.Overrides != "" {
277 | cmd = fmt.Sprintf("%s %s", cmd, helm.Overrides)
278 | }
279 | if err := runCommand(cmd); err != nil {
280 | return err
281 | }
282 | }
283 | } else {
284 | // Check if the version is a commit hash
285 | commitHashRegex := regexp.MustCompile(gitCommitHashRegex)
286 | version := component.GitRepo.Version
287 | var cloneCmd string
288 | if commitHashRegex.MatchString(version) {
289 | cloneCmd = fmt.Sprintf("git clone %s %s && cd %s && git checkout %s", component.GitRepo.URL, workspaceDir, workspaceDir, version)
290 | } else {
291 | cloneCmd = fmt.Sprintf("git clone --branch %s %s %s", version, component.GitRepo.URL, workspaceDir)
292 | }
293 | if err := runCommand(cloneCmd); err != nil {
294 | return err
295 | }
296 |
297 | for _, target := range component.MakeTargets {
298 | makeDir := filepath.Join(workspaceDir, component.MakeDirectory)
299 | makeCmd := fmt.Sprintf("cd %s && make %s", makeDir, target)
300 | if len(component.MakeVariables) > 0 {
301 | makeCmd = fmt.Sprintf("cd %s && %s make %s", makeDir, strings.Join(component.MakeVariables, " "), target)
302 | }
303 | if err := runCommand(makeCmd); err != nil {
304 | return err
305 | }
306 | }
307 | }
308 |
309 | for _, cmd := range component.PostInstallCommands {
310 | cmd = fmt.Sprintf("cd %s && %s", workspaceDir, cmd)
311 | if err := runCommand(cmd); err != nil {
312 | return err
313 | }
314 | }
315 |
316 | return nil
317 | }
318 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
2 | github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
3 | github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
4 | github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
5 | github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
6 | github.com/bitfield/script v0.24.1 h1:D4ZWu72qWL/at0rXFF+9xgs17VwyrpT6PkkBTdEz9xU=
7 | github.com/bitfield/script v0.24.1/go.mod h1:fv+6x4OzVsRs6qAlc7wiGq8fq1b5orhtQdtW0dwjUHI=
8 | github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
9 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
10 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
11 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
12 | github.com/getkin/kin-openapi v0.133.0 h1:pJdmNohVIJ97r4AUFtEXRXwESr8b0bD721u/Tz6k8PQ=
13 | github.com/getkin/kin-openapi v0.133.0/go.mod h1:boAciF6cXk5FhPqe/NQeBTeenbjqU4LhWBf09ILVvWE=
14 | github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs=
15 | github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo=
16 | github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M=
17 | github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk=
18 | github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE=
19 | github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc=
20 | github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
21 | github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
22 | github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk=
23 | github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM=
24 | github.com/go-openapi/swag/jsonname v0.25.1 h1:Sgx+qbwa4ej6AomWC6pEfXrA6uP2RkaNjA9BR8a1RJU=
25 | github.com/go-openapi/swag/jsonname v0.25.1/go.mod h1:71Tekow6UOLBD3wS7XhdT98g5J5GR13NOTQ9/6Q11Zo=
26 | github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
27 | github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
28 | github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
29 | github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
30 | github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
31 | github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
32 | github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
33 | github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
34 | github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
35 | github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
36 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
37 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
38 | github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY=
39 | github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
40 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
41 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
42 | github.com/itchyny/gojq v0.12.17 h1:8av8eGduDb5+rvEdaOO+zQUjA04MS0m3Ps8HiD+fceg=
43 | github.com/itchyny/gojq v0.12.17/go.mod h1:WBrEMkgAfAGO1LUcGOckBl5O726KPp+OlkKug0I/FEY=
44 | github.com/itchyny/timefmt-go v0.1.7 h1:xyftit9Tbw+Dc/huSSPJaEmX1TVL8lw5vxjJLK4GMMA=
45 | github.com/itchyny/timefmt-go v0.1.7/go.mod h1:5E46Q+zj7vbTgWY8o5YkMeYb4I6GeWLFnetPy5oBrAI=
46 | github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
47 | github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
48 | github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE=
49 | github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung=
50 | github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
51 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
52 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
53 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
54 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
55 | github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=
56 | github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
57 | github.com/mailru/easyjson v0.9.1 h1:LbtsOm5WAswyWbvTEOqhypdPeZzHavpZx96/n553mR8=
58 | github.com/mailru/easyjson v0.9.1/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
59 | github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
60 | github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
61 | github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE=
62 | github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A=
63 | github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
64 | github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
65 | github.com/oapi-codegen/runtime v1.1.2 h1:P2+CubHq8fO4Q6fV1tqDBZHCwpVpvPg7oKiYzQgXIyI=
66 | github.com/oapi-codegen/runtime v1.1.2/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg=
67 | github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//JalHPu/3yz+De2J+4aLtSRlHiY=
68 | github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw=
69 | github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c=
70 | github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o=
71 | github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8=
72 | github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
73 | github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM=
74 | github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4=
75 | github.com/open-edge-platform/cluster-manager/v2 v2.2.6 h1:2nFN41zlMi2xvXW0t/WivSnn/9GW6BvaAJDon+SMips=
76 | github.com/open-edge-platform/cluster-manager/v2 v2.2.6/go.mod h1:tVbajDf4bOWo9dfn4l/Om4r6BoTD5O7hHdKtpIXVqOY=
77 | github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s=
78 | github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw=
79 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
80 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
81 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
82 | github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
83 | github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
84 | github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
85 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
86 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
87 | github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
88 | github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
89 | github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
90 | github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
91 | github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
92 | github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
93 | github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
94 | github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
95 | github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
96 | github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
97 | github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
98 | github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
99 | github.com/woodsbury/decimal128 v1.4.0 h1:xJATj7lLu4f2oObouMt2tgGiElE5gO6mSWUjQsBgUlc=
100 | github.com/woodsbury/decimal128 v1.4.0/go.mod h1:BP46FUrVjVhdTbKT+XuQh2xfQaGki9LMIRJSFuh6THU=
101 | go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
102 | go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
103 | golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
104 | golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
105 | golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
106 | golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
107 | golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
108 | golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
109 | golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
110 | golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
111 | golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
112 | golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
113 | golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
114 | golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
115 | google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
116 | google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
117 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
118 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
119 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
120 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
121 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
122 | mvdan.cc/sh/v3 v3.12.0 h1:ejKUR7ONP5bb+UGHGEG/k9V5+pRVIyD+LsZz7o8KHrI=
123 | mvdan.cc/sh/v3 v3.12.0/go.mod h1:Se6Cj17eYSn+sNooLZiEUnNNmNxg0imoYlTu4CyaGyg=
124 |
--------------------------------------------------------------------------------
/tests/utils/auth_utils.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package utils
5 |
6 | import (
7 | "bytes"
8 | "encoding/json"
9 | "fmt"
10 | "io"
11 | "net/http"
12 | "os"
13 | "os/exec"
14 | "strings"
15 | "text/template"
16 | "time"
17 |
18 | "github.com/open-edge-platform/cluster-tests/tests/auth"
19 | )
20 |
21 | // Constants for downstream cluster access
22 | const (
23 | LocalGatewayAddress = "http://localhost:8081"
24 | ConnectGatewayInternalAddress = "https://connect-gateway.kind.internal:443"
25 | TempKubeconfigPattern = "kubeconfig-*.yaml"
26 | LocalKubeconfigPattern = "kubeconfig-local-*.yaml"
27 | ConnectGatewayPort = 8081
28 | PortForwardStartupDelay = 2 * time.Second
29 | )
30 |
31 | // SetupTestAuthentication initializes JWT generation and returns auth context
32 | func SetupTestAuthentication(subject string) (*auth.TestAuthContext, error) {
33 | // Use the simple SetupTestAuthentication from auth package
34 | return auth.SetupTestAuthentication(subject)
35 | }
36 |
37 | // AuthenticatedHTTPClient creates an HTTP client with JWT authentication
38 | func AuthenticatedHTTPClient(authContext *auth.TestAuthContext) *http.Client {
39 | client := &http.Client{
40 | Timeout: 30 * time.Second,
41 | }
42 |
43 | // Add JWT token to requests
44 | originalTransport := client.Transport
45 | if originalTransport == nil {
46 | originalTransport = http.DefaultTransport
47 | }
48 |
49 | client.Transport = &AuthTransport{
50 | Transport: originalTransport,
51 | Token: authContext.Token,
52 | }
53 |
54 | return client
55 | }
56 |
57 | // AuthTransport adds JWT authentication to HTTP requests
58 | type AuthTransport struct {
59 | Transport http.RoundTripper
60 | Token string
61 | }
62 |
63 | func (t *AuthTransport) RoundTrip(req *http.Request) (*http.Response, error) {
64 | // Clone the request to avoid modifying the original
65 | clonedReq := req.Clone(req.Context())
66 | clonedReq.Header.Set("Authorization", "Bearer "+t.Token)
67 | clonedReq.Header.Set("Content-Type", "application/json")
68 | clonedReq.Header.Set("Accept", "application/json")
69 |
70 | return t.Transport.RoundTrip(clonedReq)
71 | }
72 |
73 | // CallClusterManagerAPI makes an authenticated API call to cluster-manager
74 | func CallClusterManagerAPI(authContext *auth.TestAuthContext, method, endpoint string, body interface{}) (*http.Response, error) {
75 | client := AuthenticatedHTTPClient(authContext)
76 |
77 | var bodyReader *bytes.Reader
78 | if body != nil {
79 | bodyBytes, err := json.Marshal(body)
80 | if err != nil {
81 | return nil, fmt.Errorf("failed to marshal request body: %w", err)
82 | }
83 | bodyReader = bytes.NewReader(bodyBytes)
84 | }
85 |
86 | var req *http.Request
87 | var err error
88 | if bodyReader != nil {
89 | req, err = http.NewRequest(method, endpoint, bodyReader)
90 | } else {
91 | req, err = http.NewRequest(method, endpoint, nil)
92 | }
93 |
94 | if err != nil {
95 | return nil, fmt.Errorf("failed to create request: %w", err)
96 | }
97 |
98 | return client.Do(req)
99 | }
100 |
101 | // GetClusterManagerEndpoint returns the cluster-manager API endpoint
102 | func GetClusterManagerEndpoint() string {
103 | return fmt.Sprintf("http://127.0.0.1:%s", PortForwardLocalPort)
104 | }
105 |
106 | // GetClusterKubeconfigFromAPI retrieves kubeconfig from cluster-manager API
107 | func GetClusterKubeconfigFromAPI(authContext *auth.TestAuthContext, namespace, clusterName string) (*http.Response, error) {
108 | endpoint := fmt.Sprintf("%s/v2/clusters/%s/kubeconfigs", GetClusterManagerEndpoint(), clusterName)
109 |
110 | req, err := http.NewRequest("GET", endpoint, nil)
111 | if err != nil {
112 | return nil, fmt.Errorf("failed to create request: %w", err)
113 | }
114 |
115 | // Add namespace header as used by cluster-manager
116 | req.Header.Set("Activeprojectid", namespace)
117 |
118 | client := AuthenticatedHTTPClient(authContext)
119 | return client.Do(req)
120 | }
121 |
122 | // TestClusterManagerAuthentication tests if cluster-manager API accepts JWT authentication
123 | func TestClusterManagerAuthentication(authContext *auth.TestAuthContext) error {
124 | endpoint := fmt.Sprintf("%s/v2/healthz", GetClusterManagerEndpoint())
125 |
126 | resp, err := CallClusterManagerAPI(authContext, "GET", endpoint, nil)
127 | if err != nil {
128 | return fmt.Errorf("failed to call cluster-manager healthz endpoint: %w", err)
129 | }
130 | defer resp.Body.Close()
131 |
132 | switch resp.StatusCode {
133 | case http.StatusOK:
134 | fmt.Println(" JWT authentication successful")
135 | return nil
136 | case http.StatusUnauthorized:
137 | return fmt.Errorf("JWT authentication failed: token invalid or expired")
138 | case http.StatusForbidden:
139 | return fmt.Errorf("JWT valid but insufficient RBAC permissions")
140 | default:
141 | return fmt.Errorf("unexpected response status: %d", resp.StatusCode)
142 | }
143 | }
144 |
145 | // GetClusterInfoWithAuth retrieves cluster information using authenticated API call
146 | func GetClusterInfoWithAuth(authContext *auth.TestAuthContext, namespace, clusterName string) (*http.Response, error) {
147 | endpoint := fmt.Sprintf("%s/v2/clusters/%s", GetClusterManagerEndpoint(), clusterName)
148 |
149 | req, err := http.NewRequest("GET", endpoint, nil)
150 | if err != nil {
151 | return nil, fmt.Errorf("failed to create request: %w", err)
152 | }
153 |
154 | req.Header.Set("Activeprojectid", namespace)
155 |
156 | client := AuthenticatedHTTPClient(authContext)
157 | return client.Do(req)
158 | }
159 |
160 | // ImportClusterTemplateAuthenticated imports a cluster template using JWT authentication
161 | func ImportClusterTemplateAuthenticated(authContext *auth.TestAuthContext, namespace string, templateType string) error {
162 | var data []byte
163 | var err error
164 | switch templateType {
165 | case TemplateTypeK3sBaseline:
166 | data, err = os.ReadFile(BaselineClusterTemplatePathK3s)
167 | case TemplateTypeRke2Baseline:
168 | data, err = os.ReadFile(BaselineClusterTemplatePathRke2)
169 | default:
170 | return fmt.Errorf("unsupported template type: %s", templateType)
171 | }
172 |
173 | if err != nil {
174 | return err
175 | }
176 |
177 | client := AuthenticatedHTTPClient(authContext)
178 |
179 | req, err := http.NewRequest("POST", ClusterTemplateURL, bytes.NewBuffer(data))
180 | if err != nil {
181 | return err
182 | }
183 |
184 | req.Header.Set("Activeprojectid", namespace)
185 | req.Header.Set("Content-Type", "application/json")
186 | req.Header.Set("Accept", "application/json")
187 |
188 | resp, err := client.Do(req)
189 | if err != nil {
190 | return err
191 | }
192 | defer resp.Body.Close()
193 |
194 | if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusConflict {
195 | body, _ := io.ReadAll(resp.Body)
196 | return fmt.Errorf("failed to import cluster template: %s", string(body))
197 | }
198 |
199 | return nil
200 | }
201 |
202 | // CreateClusterAuthenticated creates a cluster using JWT authentication
203 | func CreateClusterAuthenticated(authContext *auth.TestAuthContext, namespace, nodeGUID, templateName string) error {
204 | templateData, err := os.ReadFile(ClusterConfigTemplatePath)
205 | if err != nil {
206 | return err
207 | }
208 |
209 | tmpl, err := template.New("clusterConfig").Parse(string(templateData))
210 | if err != nil {
211 | return err
212 | }
213 |
214 | var configBuffer bytes.Buffer
215 | err = tmpl.Execute(&configBuffer, struct {
216 | ClusterName string
217 | TemplateName string
218 | NodeGUID string
219 | }{
220 | NodeGUID: nodeGUID,
221 | TemplateName: templateName,
222 | ClusterName: ClusterName,
223 | })
224 | if err != nil {
225 | return err
226 | }
227 |
228 | client := AuthenticatedHTTPClient(authContext)
229 |
230 | req, err := http.NewRequest("POST", ClusterCreateURL, &configBuffer)
231 | if err != nil {
232 | return err
233 | }
234 |
235 | req.Header.Set("Activeprojectid", namespace)
236 | req.Header.Set("Content-Type", "application/json")
237 | req.Header.Set("Accept", "application/json")
238 |
239 | resp, err := client.Do(req)
240 | if err != nil {
241 | return err
242 | }
243 | defer resp.Body.Close()
244 |
245 | if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusConflict {
246 | body, _ := io.ReadAll(resp.Body)
247 | return fmt.Errorf("failed to create cluster: %s", string(body))
248 | }
249 |
250 | return nil
251 | }
252 |
253 | // TestDownstreamClusterAccess tests accessing the downstream cluster using the provided kubeconfig
254 | func TestDownstreamClusterAccess(kubeconfigContent string) error {
255 | // Write kubeconfig to a temporary file
256 | tmpFile, err := os.CreateTemp("", TempKubeconfigPattern)
257 | if err != nil {
258 | return fmt.Errorf("failed to create temp file: %w", err)
259 | }
260 | defer os.Remove(tmpFile.Name())
261 |
262 | if _, err := tmpFile.WriteString(kubeconfigContent); err != nil {
263 | return fmt.Errorf("failed to write kubeconfig: %w", err)
264 | }
265 | tmpFile.Close()
266 |
267 | // Modify kubeconfig to use local port-forward for connect-gateway
268 | modifiedKubeconfig := strings.ReplaceAll(kubeconfigContent,
269 | ConnectGatewayInternalAddress,
270 | LocalGatewayAddress)
271 |
272 | tmpFileModified, err := os.CreateTemp("", LocalKubeconfigPattern)
273 | if err != nil {
274 | return fmt.Errorf("failed to create modified temp file: %w", err)
275 | }
276 | defer os.Remove(tmpFileModified.Name())
277 |
278 | if _, err := tmpFileModified.WriteString(modifiedKubeconfig); err != nil {
279 | return fmt.Errorf("failed to write modified kubeconfig: %w", err)
280 | }
281 | tmpFileModified.Close()
282 |
283 | // Set up port-forward to connect-gateway if not already running
284 | if !isPortForwardRunning(ConnectGatewayPort) {
285 | cmd := exec.Command("kubectl", "port-forward", "svc/cluster-connect-gateway", fmt.Sprintf("%d:8080", ConnectGatewayPort))
286 | err := cmd.Start()
287 | if err != nil {
288 | return fmt.Errorf("failed to start port-forward to connect-gateway: %w", err)
289 | }
290 | // Give port-forward a moment to establish
291 | time.Sleep(PortForwardStartupDelay)
292 | }
293 |
294 | // Test accessing the downstream cluster - get nodes
295 | cmd := exec.Command("kubectl", "--kubeconfig", tmpFileModified.Name(), "get", "nodes", "-o", "wide")
296 | nodeOutput, err := cmd.Output()
297 | if err != nil {
298 | return fmt.Errorf("failed to access downstream cluster nodes: %w", err)
299 | }
300 |
301 | if len(nodeOutput) == 0 {
302 | return fmt.Errorf("no nodes found in downstream cluster")
303 | }
304 |
305 | // Test accessing the downstream cluster - get all pods
306 | cmd = exec.Command("kubectl", "--kubeconfig", tmpFileModified.Name(), "get", "pods", "-A", "-o", "wide")
307 | podOutput, err := cmd.Output()
308 | if err != nil {
309 | return fmt.Errorf("failed to get pods from downstream cluster: %w", err)
310 | }
311 |
312 | // Display the complete downstream cluster information
313 | fmt.Printf("\n✅ DOWNSTREAM K3S CLUSTER ACCESS SUCCESSFUL!\n")
314 | fmt.Printf("==========================================\n")
315 | fmt.Printf("NODES:\n%s\n", string(nodeOutput))
316 | fmt.Printf("PODS (ALL NAMESPACES):\n%s\n", string(podOutput))
317 | fmt.Printf("==========================================\n")
318 |
319 | return nil
320 | }
321 |
322 | // isPortForwardRunning checks if a port-forward is already running on the specified port
323 | func isPortForwardRunning(port int) bool {
324 | cmd := exec.Command("lsof", "-i", fmt.Sprintf(":%d", port))
325 | err := cmd.Run()
326 | return err == nil
327 | }
328 |
--------------------------------------------------------------------------------
/LICENSES/Apache-2.0.txt:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/tests/robustness-test/cluster_orch_robustness_test.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package functional_test
5 |
6 | import (
7 | "encoding/json"
8 | "fmt"
9 | "net/http"
10 | "os"
11 | "os/exec"
12 | "strings"
13 | "testing"
14 | "time"
15 |
16 | "github.com/open-edge-platform/cluster-tests/tests/utils"
17 |
18 | . "github.com/onsi/ginkgo/v2"
19 | . "github.com/onsi/gomega"
20 | )
21 |
22 | func TestClusterOrchRobustnessTest(t *testing.T) {
23 | RegisterFailHandler(Fail)
24 | _, _ = fmt.Fprintf(GinkgoWriter, "Starting cluster orch robustness tests\n")
25 | RunSpecs(t, "cluster orch robustness test suite")
26 | }
27 |
28 | var _ = Describe("Cluster Orch Robustness tests", Ordered, Label(utils.ClusterOrchRobustnessTest), func() {
29 | var (
30 | namespace string
31 | nodeGUID string
32 | portForwardCmd *exec.Cmd
33 | gatewayPortForward *exec.Cmd
34 | clusterCreateStartTime time.Time
35 | clusterCreateEndTime time.Time
36 | )
37 |
38 | BeforeAll(func() {
39 | namespace = utils.GetEnv(utils.NamespaceEnvVar, utils.DefaultNamespace)
40 | nodeGUID = utils.GetEnv(utils.NodeGUIDEnvVar, utils.DefaultNodeGUID)
41 |
42 | // create namespace for the project
43 | By("Ensuring the namespace exists")
44 | err := utils.EnsureNamespaceExists(namespace)
45 | Expect(err).NotTo(HaveOccurred())
46 |
47 | By("Port forwarding to the cluster manager service")
48 | portForwardCmd = exec.Command("kubectl", "port-forward", utils.PortForwardService, fmt.Sprintf("%s:%s", utils.PortForwardLocalPort, utils.PortForwardRemotePort), "--address", utils.PortForwardAddress)
49 | err = portForwardCmd.Start()
50 | Expect(err).NotTo(HaveOccurred())
51 | time.Sleep(5 * time.Second) // Give some time for port-forwarding to establish
52 |
53 | By("Port forwarding to the cluster gateway service")
54 | gatewayPortForward = exec.Command("kubectl", "port-forward", utils.PortForwardGatewayService, fmt.Sprintf("%s:%s", utils.PortForwardGatewayLocalPort, utils.PortForwardGatewayRemotePort), "--address", utils.PortForwardAddress)
55 | err = gatewayPortForward.Start()
56 | Expect(err).NotTo(HaveOccurred())
57 | time.Sleep(5 * time.Second) // Give some time for port-forwarding to establish
58 |
59 | })
60 |
61 | AfterAll(func() {
62 | defer func() {
63 | if portForwardCmd != nil && portForwardCmd.Process != nil {
64 | portForwardCmd.Process.Kill()
65 | }
66 | }()
67 |
68 | if !utils.SkipDeleteCluster {
69 | By("Deleting the cluster")
70 | err := utils.DeleteCluster(namespace)
71 | Expect(err).NotTo(HaveOccurred())
72 |
73 | By("Verifying that the cluster is deleted")
74 | Eventually(func() bool {
75 | cmd := exec.Command("kubectl", "-n", namespace, "get", "cluster", utils.ClusterName)
76 | err := cmd.Run()
77 | return err != nil
78 | }, 1*time.Minute, 5*time.Second).Should(BeTrue())
79 | }
80 | })
81 |
82 | It("Test prerequisite: Should successfully import RKE2 Single Node cluster template", func() {
83 | By("Importing the cluster template")
84 | err := utils.ImportClusterTemplate(namespace, utils.TemplateTypeRke2Baseline)
85 | Expect(err).NotTo(HaveOccurred())
86 |
87 | By("Waiting for the cluster template to be ready")
88 | Eventually(func() bool {
89 | return utils.IsClusterTemplateReady(namespace, utils.Rke2TemplateName)
90 | }, 1*time.Minute, 2*time.Second).Should(BeTrue())
91 | })
92 |
93 | It("Test prerequisite: Should verify that cluster create API should succeed for rke2 cluster", func() {
94 | // Record the start time before creating the cluster
95 | clusterCreateStartTime = time.Now()
96 |
97 | By("Creating the cluster")
98 | err := utils.CreateCluster(namespace, nodeGUID, utils.Rke2TemplateName)
99 | Expect(err).NotTo(HaveOccurred())
100 | })
101 |
102 | It("Test prerequisite: Should verify that the cluster is fully active", func() {
103 | By("Waiting for IntelMachine to exist")
104 | Eventually(func() bool {
105 | cmd := exec.Command("kubectl", "-n", namespace, "get", "intelmachine", "-o", "jsonpath={.items[*].metadata.name}")
106 | output, err := cmd.Output()
107 | if err != nil {
108 | return false
109 | }
110 | count := 0
111 | if len(output) > 0 {
112 | count = len(strings.Fields(string(output)))
113 | }
114 | return count > 0
115 | }, 1*time.Minute, 5*time.Second).Should(BeTrue())
116 |
117 | By("Waiting for all components to be ready")
118 | Eventually(func() bool {
119 | cmd := exec.Command("clusterctl", "describe", "cluster", utils.ClusterName, "-n", namespace)
120 | output, err := cmd.Output()
121 | if err != nil {
122 | return false
123 | }
124 | fmt.Printf("Cluster components status:\n%s\n", string(output))
125 | return utils.CheckAllComponentsReady(string(output))
126 | }, 10*time.Minute, 10*time.Second).Should(BeTrue())
127 | // Record the end time after the cluster is fully active
128 | clusterCreateEndTime = time.Now()
129 |
130 | // Calculate and print the total time taken
131 | totalTime := clusterCreateEndTime.Sub(clusterCreateStartTime)
132 | fmt.Printf("\033[32mTotal time from cluster creation to fully active: %v 🚀 ✅\033[0m\n", totalTime)
133 | })
134 |
135 | It("Test prerequisite: Should verify that the cluster information can be queried ", func() {
136 | By("Getting the cluster information")
137 | resp, err := utils.GetClusterInfo(namespace, utils.ClusterName)
138 | Expect(err).NotTo(HaveOccurred())
139 | defer resp.Body.Close()
140 |
141 | Expect(resp.StatusCode).To(Equal(http.StatusOK))
142 | // TODO: Verify the cluster details are correct
143 | })
144 |
145 | It("Test prerequisite: Should verify that the connect gateway allow access to k8s api", func() {
146 | // cmd := exec.Command("curl", "-X", "GET", fmt.Sprintf("127.0.0.1:%v/kubernetes/%v-%v/api/v1/namespaces/default/pods", portForwardGatewayLocalPort, namespace, clusterName))
147 | By("Getting kubeconfig")
148 | fmt.Println(utils.ClusterName)
149 | cmd := exec.Command("clusterctl", "get", "kubeconfig", utils.ClusterName, "--namespace", utils.DefaultNamespace) // ">", "kubeconfig.yaml")
150 | output, err := cmd.Output()
151 | Expect(err).NotTo(HaveOccurred())
152 |
153 | kubeConfigName := "kubeconfig.yaml"
154 | err = os.WriteFile(kubeConfigName, output, 0644)
155 | Expect(err).NotTo(HaveOccurred())
156 |
157 | By("Setting in kubeconfig server to cluster connect gateway")
158 | cmd = exec.Command("sed", "-i", "s|http://[[:alnum:].-]*:8080/|http://127.0.0.1:8081/|", "kubeconfig.yaml")
159 | _, err = cmd.Output()
160 | Expect(err).NotTo(HaveOccurred())
161 |
162 | By("Getting list of pods")
163 | cmd = exec.Command("kubectl", "--kubeconfig", "kubeconfig.yaml", "get", "pods")
164 | _, err = cmd.Output()
165 | Expect(err).NotTo(HaveOccurred())
166 |
167 | // Exec into one of the pods in the kube-system namespace on the edge node cluster
168 | By("Executing command in kube-scheduler-cluster-agent-0 pod")
169 | cmd = exec.Command("kubectl", "exec", "--kubeconfig", "kubeconfig.yaml", "-it", "-n", "kube-system", "kube-scheduler-cluster-agent-0", "--", "ls")
170 | output, err = cmd.Output()
171 | Expect(err).NotTo(HaveOccurred())
172 | By("Printing the output of the command")
173 | fmt.Printf("Output of `ls` command:\n%s\n", string(output))
174 | })
175 |
176 | It("Should verify that clusterConnect gateway probes the connection to cluster", func() {
177 | By("Checking the clusterConnect's LastProbeSuccessTimestamp is not zero")
178 | Eventually(func() bool {
179 | // get all clusterconnects - there should be only one, pick its name
180 | cmd := exec.Command("kubectl", "get", "clusterconnect", "-o", "jsonpath={.items[0].metadata.name}")
181 | output, err := cmd.Output()
182 | if err != nil {
183 | return false
184 | }
185 | clusterConnectName := string(output)
186 | fmt.Printf("ClusterConnect Name: %s\n", clusterConnectName)
187 |
188 | cmd = exec.Command("kubectl", "get", "clusterconnect", clusterConnectName, "-o", "jsonpath={.status.connectionProbe.lastProbeSuccessTimestamp}")
189 | output, err = cmd.Output()
190 | if err != nil {
191 | return false
192 | }
193 | lastProbeSuccessTimestamp := string(output)
194 | if lastProbeSuccessTimestamp == "" {
195 | fmt.Println("LastProbeSuccessTimestamp is not set yet")
196 | return false
197 | }
198 | fmt.Printf("LastProbeSuccessTimestamp: %s\n", lastProbeSuccessTimestamp)
199 | return lastProbeSuccessTimestamp != ""
200 | }, 5*time.Minute, 10*time.Second).Should(BeTrue())
201 | })
202 |
203 | It("Should verify that a cluster shows connection lost status when connect agent stops working", func() {
204 | By("Breaking the connect agent by changing its image name in the pod manifest")
205 | // kubectl exec -n default cluster-agent-0 -- sed -i 's/connect-agent/connectx-agent/g' /var/lib/rancher/rke2/agent/pod-manifests/connect-agent.yaml
206 | breakConnectAgentCommand := exec.Command("kubectl", "exec", "-n", "default", "cluster-agent-0", "--", "sed", "-i", "s/connect-agent/connectx-agent/g", "/var/lib/rancher/rke2/agent/pod-manifests/connect-agent.yaml")
207 | err := breakConnectAgentCommand.Run()
208 | Expect(err).NotTo(HaveOccurred())
209 | connectionLostStartTime := time.Now()
210 |
211 | By("Waiting for intel infra provider to detect connection lost")
212 | Eventually(func() bool {
213 | cmd := exec.Command("clusterctl", "describe", "cluster", utils.ClusterName, "-n", namespace)
214 | output, err := cmd.Output()
215 | if err != nil {
216 | return false
217 | }
218 | fmt.Printf("Cluster components status:\n%s\n", string(output))
219 | return utils.CheckLostConnection(string(output))
220 | }, 10*time.Minute, 10*time.Second).Should(BeTrue())
221 | // Record the end time after the cluster is fully active
222 | connectionLostEndTime := time.Now()
223 |
224 | // Calculate and print the total time taken to detect connection lost
225 | totalTime := connectionLostEndTime.Sub(connectionLostStartTime)
226 | fmt.Printf("\033[32mTotal time from breaking connect-agent to detect connection lost: %v 🚨🛜\033[0m\n", totalTime)
227 |
228 | By("Getting the cluster information about lost connection")
229 | resp, err := utils.GetClusterInfo(namespace, utils.ClusterName)
230 | Expect(err).NotTo(HaveOccurred())
231 | defer resp.Body.Close()
232 | decoder := json.NewDecoder(resp.Body)
233 | var clusterInfo map[string]interface{}
234 | err = decoder.Decode(&clusterInfo)
235 | Expect(err).NotTo(HaveOccurred())
236 | Expect(resp.StatusCode).To(Equal(http.StatusOK))
237 |
238 | By("Verifying the providerStatus.message is 'connect agent is disconnected'")
239 | providerStatus, ok := clusterInfo["providerStatus"].(map[string]interface{})
240 | Expect(ok).To(BeTrue(), "providerStatus field is missing or not a map")
241 |
242 | message, ok := providerStatus["message"].(string)
243 | Expect(ok).To(BeTrue(), "message field is missing or not a string")
244 | Expect(message).To(ContainSubstring("connect agent is disconnected"), "providerStatus.message does not contain 'connect agent is disconnected'")
245 |
246 | })
247 |
248 | It("Should verify that cluster mark infrastructure as ready when connect-agent is fixed", func() {
249 | By("Fixing the connect agent by changing its image name in the pod manifest to the right one")
250 | // kubectl exec -n default cluster-agent-0 -- sed -i 's/connectx-agent/connect-agent/g' /var/lib/rancher/rke2/agent/pod-manifests/connect-agent.yaml
251 | fixConnectAgentCommand := exec.Command("kubectl", "exec", "-n", "default", "cluster-agent-0", "--", "sed", "-i", "s/connectx-agent/connect-agent/g", "/var/lib/rancher/rke2/agent/pod-manifests/connect-agent.yaml")
252 | err := fixConnectAgentCommand.Run()
253 | Expect(err).NotTo(HaveOccurred())
254 | connectionRecoveredStartTime := time.Now()
255 |
256 | By("Waiting for all components to be ready again")
257 | Eventually(func() bool {
258 | cmd := exec.Command("clusterctl", "describe", "cluster", utils.ClusterName, "-n", namespace)
259 | output, err := cmd.Output()
260 | if err != nil {
261 | return false
262 | }
263 | fmt.Printf("Cluster components status:\n%s\n", string(output))
264 | return utils.CheckAllComponentsReady(string(output))
265 | }, 5*time.Minute, 10*time.Second).Should(BeTrue())
266 |
267 | connectionRecoveredEndTime := time.Now()
268 |
269 | // Calculate and print the total time taken to recover from connection lost
270 | totalTime := connectionRecoveredEndTime.Sub(connectionRecoveredStartTime)
271 | fmt.Printf("\033[32mTotal time from breaking connect-agent to recover from connection lost: %v 🚨🛜 ✅\033[0m\n", totalTime)
272 |
273 | })
274 | })
275 |
--------------------------------------------------------------------------------
/.test-dependencies.yaml:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | # .test-dependencies.yaml
5 | # This YAML file defines the dependencies for the test bootstrap step. It specifies build steps for various dependencies
6 | # required for the test environment. The file contains the following fields:
7 | #
8 | # Fields:
9 | # - kind-cluster-config: Specifies the configuration file for the kind cluster.
10 | #
11 | # - components: A list of components, each with its own configuration:
12 | # - name: The name of the component.
13 | # - skip-component: A flag to skip the component during the build process (true/false).
14 | # - skip-local-build: A flag to skip the local build of the component (true/false).
15 | # - pre-install-commands: Commands to run before installing the component.
16 | # - helm-repo: Details for the Helm repositories, including:
17 | # - url: The URL of the Helm repository.
18 | # release-name: The release name for the Helm chart.
19 | # package: The Helm chart package name.
20 | # namespace: The Kubernetes namespace for the Helm release.
21 | # version: The version of the Helm chart.
22 | # use-devel: A flag to enable (or not) usage of developer versions of the chart
23 | # overrides: The Helm chart overrides.
24 | # - git-repo:
25 | # url: The Git URL of the component's repository.
26 | # version: The Git branch/tag/commit of the component to use.
27 | # - make-directory: The directory containing the Makefile.
28 | # - make-variables: Variables to pass to the `make` command.
29 | # - make-targets: `make` targets to build the component.
30 | # - post-install-commands: Commands to run after installing the component.
31 | ---
32 | kind-cluster-config: configs/kind-cluster-with-extramounts.yaml
33 |
34 | components:
35 | # Cluster API Provider Intel
36 | - name: cluster-api-provider-intel
37 | skip-component: false
38 | skip-local-build: true
39 | pre-install-commands:
40 | - echo "Installing Prometheus Operator CRDs to get Service Monitor CRD"
41 | - kubectl apply -f https://github.com/prometheus-operator/prometheus-operator/releases/download/v0.81.0/stripped-down-crds.yaml
42 | helm-repo:
43 | - url: "oci://registry-rs.edgeorchestration.intel.com"
44 | release-name: "intel-infra-provider"
45 | package: "edge-orch/cluster/charts/intel-infra-provider"
46 | namespace: "default"
47 | version: "" # Use the latest version when nil
48 | use-devel: false # Use development version of the chart
49 | overrides: "--set metrics.serviceMonitor.enabled=false --set manager.extraArgs.use-inv-stub=true
50 | --set southboundApi.extraArgs.useGrpcStubMiddleware=true"
51 | - url: "oci://registry-rs.edgeorchestration.intel.com"
52 | release-name: "intel-infra-provider-crds"
53 | package: "edge-orch/cluster/charts/intel-infra-provider-crds"
54 | namespace: "default"
55 | version: "" # Use the latest version when nil
56 | use-devel: false # Use development version of the chart
57 | overrides: ""
58 | git-repo:
59 | url: https://github.com/open-edge-platform/cluster-api-provider-intel.git
60 | version: main
61 | make-directory: ""
62 | make-variables:
63 | - VERSION=v0.0.0
64 | - HELM_VERSION=v0.0.0
65 | - USE_GRPC_MIDDLEWARE_STUB=true # Enable this flag to use the gRPC middleware stub. Skips jwt auth on SB-API
66 | - USE_INV_STUB=true # Enable this flag to use the Inventory stub. Inventory stub is used when we are not installing Inventory
67 | make-targets:
68 | - kind-load
69 | - helm-install
70 | post-install-commands:
71 | ##################################################################################
72 | # Install the CAPI operator and default control plane and infra providers -- start
73 | ##################################################################################
74 | # Install cert-manager
75 | - kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.16.0/cert-manager.yaml
76 | # Wait for cert-manager to be ready
77 | - kubectl wait --for=condition=Available --timeout=300s deployment.apps/cert-manager-webhook -n cert-manager
78 | - kubectl wait --for=condition=Available --timeout=300s deployment.apps/cert-manager-cainjector -n cert-manager
79 | - kubectl wait --for=condition=Available --timeout=300s deployment.apps/cert-manager -n cert-manager
80 | # Install the CAPI operator and default control plane and infra providers
81 | - helm repo add capi-operator https://kubernetes-sigs.github.io/cluster-api-operator
82 | - helm repo add jetstack https://charts.jetstack.io
83 | - helm repo update
84 | - kubectl apply -f ../../configs/capi-variables.yaml --force
85 | # Use envsubst to substitute the environment variables in the YAML file
86 | - envsubst < ../../configs/capi-operator.yaml > /tmp/capi-operator.yaml
87 | - helm install capi-operator capi-operator/cluster-api-operator --create-namespace -n capi-operator-system -f /tmp/capi-operator.yaml --wait --timeout 5m --version ${CAPI_OPERATOR_HELM_VERSION}
88 | # Wait for the CAPI operator to be ready
89 | - until kubectl get -n capi-operator-system deployment/capi-operator-cluster-api-operator >/dev/null 2>&1; do sleep 1; done && kubectl wait --namespace capi-operator-system deployment/capi-operator-cluster-api-operator --for=condition=available --timeout=5m
90 | - until kubectl get -n capi-system deployment/capi-controller-manager >/dev/null 2>&1; do sleep 1; done && kubectl wait --namespace capi-system deployment/capi-controller-manager --for=condition=available --timeout=5m
91 | # Install the default control plane and infra providers
92 | - until kubectl get -n docker-infrastructure-system deployment/capd-controller-manager >/dev/null 2>&1; do sleep 1; done && kubectl wait --namespace docker-infrastructure-system deployment/capd-controller-manager --for=condition=available --timeout=5m
93 | - until kubectl get -n kubeadm-bootstrap-system deployment/capi-kubeadm-bootstrap-controller-manager >/dev/null 2>&1; do sleep 1; done && kubectl wait --namespace kubeadm-bootstrap-system deployment/capi-kubeadm-bootstrap-controller-manager --for=condition=available --timeout=5m
94 | - until kubectl get -n kubeadm-control-plane-system deployment/capi-kubeadm-control-plane-controller-manager >/dev/null 2>&1; do sleep 1; done && kubectl wait --namespace kubeadm-control-plane-system deployment/capi-kubeadm-control-plane-controller-manager --for=condition=available --timeout=5m
95 | - until kubectl get -n capr-system deployment/rke2-bootstrap-controller-manager >/dev/null 2>&1; do sleep 1; done && kubectl wait --namespace capr-system deployment/rke2-bootstrap-controller-manager --for=condition=available --timeout=5m
96 | - until kubectl get -n capr-system deployment/rke2-control-plane-controller-manager >/dev/null 2>&1; do sleep 1; done && kubectl wait --namespace capr-system deployment/rke2-control-plane-controller-manager --for=condition=available --timeout=5m
97 | # Patch the K3S Providers so that they can be initialized and wait for the components to be ready
98 | # Note: The patch is required to set the fetchConfig URL for the K3S providers. CAPI ProviderURL for K3s seems not been supported like for RKE2 and Kubeadm. The field providerURL, however, still is needed.
99 | - until kubectl get -n capk-system controlplaneprovider/k3s >/dev/null 2>&1; do sleep 1; done && kubectl patch -n capk-system controlplaneprovider k3s --type=merge -p '{"spec":{"fetchConfig":{"url":"'"${CAPI_K3S_CONTROLPLANE_URL}"'"}}}'
100 | - until kubectl get -n capk-system bootstrapprovider/k3s >/dev/null 2>&1; do sleep 1; done && kubectl patch -n capk-system bootstrapprovider k3s --type=merge -p '{"spec":{"fetchConfig":{"url":"'"${CAPI_K3S_BOOTSTRAP_URL}"'"}}}'
101 | - until kubectl get -n capk-system deployment/capi-k3s-bootstrap-controller-manager >/dev/null 2>&1; do sleep 1; done && kubectl wait --namespace capk-system deployment/capi-k3s-bootstrap-controller-manager --for=condition=available --timeout=5m
102 | - until kubectl get -n capk-system deployment/capi-k3s-control-plane-controller-manager >/dev/null 2>&1; do sleep 1; done && kubectl wait --namespace capk-system deployment/capi-k3s-control-plane-controller-manager --for=condition=available --timeout=5m
103 | ##################################################################################
104 | # Install the CAPI operator and default control plane and infra providers -- end
105 | ##################################################################################
106 | - kubectl delete -f config/crd/deps/cluster.edge-orchestrator.intel.com_clusterconnects.yaml || true
107 |
108 | # Cluster Connect Gateway
109 | - name: cluster-connect-gateway
110 | skip-component: false
111 | skip-local-build: true
112 | pre-install-commands: []
113 | helm-repo:
114 | - url: "oci://registry-rs.edgeorchestration.intel.com"
115 | release-name: "cluster-connect-gateway"
116 | package: "edge-orch/cluster/charts/cluster-connect-gateway"
117 | namespace: "default"
118 | version: "" # Use the latest version when nil
119 | use-devel: false # Use development version of the chart
120 | overrides: "--set controller.privateCA.enabled=false"
121 | - url: "oci://registry-rs.edgeorchestration.intel.com"
122 | release-name: "cluster-connect-gateway-crd"
123 | package: "edge-orch/cluster/charts/cluster-connect-gateway-crd"
124 | namespace: "default"
125 | version: "" # Use the latest version when nil
126 | use-devel: false # Use development version of the chart
127 | overrides: ""
128 | git-repo:
129 | url: https://github.com/open-edge-platform/cluster-connect-gateway.git
130 | version: main
131 | make-directory: ""
132 | make-variables:
133 | - VERSION=v0.0.0
134 | - HELM_VERSION=v0.0.0
135 | - KIND_CLUSTER=kind
136 | - NAMESPACE=default
137 | - HELM_VARS="--set controller.privateCA.enabled=false --set agent.image.tag=latest --set controller.connectionProbeTimeout=1m --set gateway.connectionProbeInterval=20s"
138 | make-targets:
139 | - docker-build
140 | - docker-load
141 | - helm-install
142 | post-install-commands:
143 | - CONNECT_GATEWAY_IP=$(kubectl get svc cluster-connect-gateway -o go-template="{{ .spec.clusterIP }}") envsubst < ../../configs/conredns-config.yaml | kubectl apply -f -
144 |
145 | # Cluster Manager
146 | - name: cluster-manager
147 | skip-component: false
148 | skip-local-build: true
149 | pre-install-commands: []
150 | helm-repo:
151 | - url: "oci://registry-rs.edgeorchestration.intel.com"
152 | release-name: "cluster-manager"
153 | package: "edge-orch/cluster/charts/cluster-manager"
154 | namespace: "default"
155 | version: "" # Use the latest version when nil
156 | use-devel: false # Use development version of the chart
157 | overrides: "--set clusterManager.extraArgs.disable-multi-tenancy=true --set clusterManager.extraArgs.disable-auth=true --set clusterManager.extraArgs.disable-inventory=true --set templateController.extraArgs[0]='--webhook-enabled=true' --set webhookService.enabled=true"
158 | - url: "oci://registry-rs.edgeorchestration.intel.com"
159 | release-name: "cluster-template-crd"
160 | package: "edge-orch/cluster/charts/cluster-template-crd"
161 | namespace: "default"
162 | version: "" # Use the latest version when nil
163 | use-devel: false # Use development version of the chart
164 | overrides: ""
165 | git-repo:
166 | url: https://github.com/open-edge-platform/cluster-manager.git
167 | version: main
168 | make-variables:
169 | - VERSION=v0.0.0
170 | - HELM_VERSION=v0.0.0
171 | - KIND_CLUSTER=kind
172 | - DISABLE_MT=true # Enable this flag to disable the multi-tenancy feature. This is required for the test environment where no MT controllers are installed
173 | - DISABLE_AUTH=true # Should be true for CO subsystem integration tests if keycloak is not deployed
174 | - DISABLE_INV=true # Should be true for CO subsystem integration tests if inventory is not deployed
175 | make-targets:
176 | - helm-install
177 | make-directory: ""
178 | post-install-commands: []
179 |
180 | # OIDC Mock Server for JWT Authentication Testing (conditionally deployed via Makefile)
181 | - name: oidc-mock-server
182 | skip-component: true # Controlled by Makefile environment variables
183 | skip-local-build: true
184 | pre-install-commands: []
185 | helm-repo: []
186 | git-repo:
187 | url: ""
188 | version: ""
189 | make-directory: ""
190 | make-variables: []
191 | make-targets: []
192 | post-install-commands: []
193 |
194 | # Cluster Agent on LW-ENiC
195 | - name: cluster-agent
196 | skip-component: false
197 | skip-local-build: false
198 | pre-install-commands: []
199 | helm-repo:
200 | - url: ""
201 | release-name: ""
202 | package: ""
203 | namespace: ""
204 | version: ""
205 | use-devel: false # Use development version of the chart
206 | overrides: ""
207 | git-repo:
208 | url: https://github.com/open-edge-platform/edge-node-agents.git
209 | version: lw-enic
210 | make-directory: "cluster-agent/enic/"
211 | make-variables: []
212 | make-targets:
213 | - build-enic-docker # Enable this if you want to build the enic docker image. This usually is a time-consuming process
214 | - load-enic-docker # Enable this if you want to load the enic docker image that you build to the kind cluster
215 | - run-enic-pod
216 | post-install-commands: []
217 |
--------------------------------------------------------------------------------
/tests/auth/jwt.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package auth
5 |
6 | import (
7 | "crypto/rand"
8 | "crypto/rsa"
9 | "crypto/x509"
10 | "encoding/base64"
11 | "encoding/json"
12 | "encoding/pem"
13 | "fmt"
14 | "math/big"
15 | "os"
16 | "sync"
17 | "time"
18 |
19 | "github.com/golang-jwt/jwt/v5"
20 | )
21 |
22 | // Constants for JWT configuration
23 | const (
24 | KeyID = "cluster-tests-key"
25 | IssuerURL = "http://platform-keycloak.orch-platform.svc/realms/master"
26 | )
27 |
28 | // runtime-generated keys
29 | var (
30 | dynamicPrivateKey *rsa.PrivateKey
31 | dynamicPublicKey *rsa.PublicKey
32 | keyGenerationOnce sync.Once
33 | keyGenerationErr error
34 | )
35 |
36 | // keyFilePath returns the path where keys should be stored
37 | func keyFilePath() string {
38 | return "/tmp/cluster-tests-dynamic-keys.pem"
39 | }
40 |
41 | // loadKeysFromFile attempts to load existing keys from file
42 | func loadKeysFromFile() (*rsa.PrivateKey, error) {
43 | keyPath := keyFilePath()
44 | if _, err := os.Stat(keyPath); os.IsNotExist(err) {
45 | return nil, nil // File doesn't exist, will generate new keys
46 | }
47 |
48 | keyData, err := os.ReadFile(keyPath)
49 | if err != nil {
50 | return nil, fmt.Errorf("failed to read key file: %w", err)
51 | }
52 |
53 | block, _ := pem.Decode(keyData)
54 | if block == nil {
55 | return nil, fmt.Errorf("failed to decode PEM block")
56 | }
57 |
58 | privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes)
59 | if err != nil {
60 | return nil, fmt.Errorf("failed to parse private key: %w", err)
61 | }
62 |
63 | return privateKey, nil
64 | }
65 |
66 | // saveKeysToFile saves the generated keys to file for reuse
67 | func saveKeysToFile(privateKey *rsa.PrivateKey) error {
68 | keyPath := keyFilePath()
69 | privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey)
70 | privateKeyPEM := pem.EncodeToMemory(&pem.Block{
71 | Type: "RSA PRIVATE KEY",
72 | Bytes: privateKeyBytes,
73 | })
74 |
75 | return os.WriteFile(keyPath, privateKeyPEM, 0600)
76 | }
77 |
78 | // generateRuntimeKeys creates a new RSA key pair at runtime or loads existing ones
79 | func generateRuntimeKeys() {
80 | // First try to load existing keys
81 | if existingKey, err := loadKeysFromFile(); err == nil && existingKey != nil {
82 | dynamicPrivateKey = existingKey
83 | dynamicPublicKey = &existingKey.PublicKey
84 | return
85 | }
86 |
87 | // Generate new 2048-bit RSA key pair
88 | privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
89 | if err != nil {
90 | keyGenerationErr = fmt.Errorf("failed to generate RSA key pair: %w", err)
91 | return
92 | }
93 |
94 | if saveErr := saveKeysToFile(privateKey); saveErr != nil {
95 | keyGenerationErr = fmt.Errorf("failed to save keys to file: %w", saveErr)
96 | return
97 | }
98 |
99 | dynamicPrivateKey = privateKey
100 | dynamicPublicKey = &privateKey.PublicKey
101 | }
102 |
103 | // getOrGenerateKeys ensures we have a key pair, generating it if needed
104 | func getOrGenerateKeys() (*rsa.PrivateKey, *rsa.PublicKey, error) {
105 | keyGenerationOnce.Do(generateRuntimeKeys)
106 | if keyGenerationErr != nil {
107 | return nil, nil, keyGenerationErr
108 | }
109 | return dynamicPrivateKey, dynamicPublicKey, nil
110 | }
111 |
112 | // encodeBase64URLBigInt encodes a big integer as a base64url string (for JWKS)
113 | func encodeBase64URLBigInt(i *big.Int) string {
114 | return base64.RawURLEncoding.EncodeToString(i.Bytes())
115 | }
116 |
117 | // getPublicKeyPEM returns the public key in PEM format for OIDC mock server
118 | func getPublicKeyPEM() (string, error) {
119 | _, publicKey, err := getOrGenerateKeys()
120 | if err != nil {
121 | return "", err
122 | }
123 |
124 | pubKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey)
125 | if err != nil {
126 | return "", fmt.Errorf("failed to marshal public key: %w", err)
127 | }
128 |
129 | pubKeyPEM := pem.EncodeToMemory(&pem.Block{
130 | Type: "PUBLIC KEY",
131 | Bytes: pubKeyBytes,
132 | })
133 |
134 | return string(pubKeyPEM), nil
135 | }
136 |
137 | // getJWKS returns the public key in JWKS format for OIDC discovery
138 | func getJWKS() (string, error) {
139 | _, publicKey, err := getOrGenerateKeys()
140 | if err != nil {
141 | return "", err
142 | }
143 |
144 | jwks := map[string]interface{}{
145 | "keys": []map[string]interface{}{
146 | {
147 | "kty": "RSA",
148 | "use": "sig",
149 | "kid": KeyID,
150 | "alg": "PS512",
151 | "n": encodeBase64URLBigInt(publicKey.N),
152 | "e": encodeBase64URLBigInt(big.NewInt(int64(publicKey.E))),
153 | },
154 | },
155 | }
156 |
157 | jwksBytes, err := json.Marshal(jwks)
158 | if err != nil {
159 | return "", fmt.Errorf("failed to marshal JWKS: %w", err)
160 | }
161 |
162 | return string(jwksBytes), nil
163 | }
164 |
165 | // TestJWTGenerator provides backward compatibility for tests
166 | // This struct maintains the interface used by legacy test code while
167 | // leveraging the new dynamic key generation system internally.
168 | type TestJWTGenerator struct {
169 | privateKey *rsa.PrivateKey
170 | publicKey *rsa.PublicKey
171 | }
172 |
173 | // createToken is a helper function to reduce code duplication in token generation
174 | func (g *TestJWTGenerator) createToken(claims jwt.MapClaims) (string, error) {
175 | token := jwt.NewWithClaims(jwt.SigningMethodPS512, claims)
176 | token.Header["kid"] = KeyID // Use constant instead of hardcoded value
177 | return token.SignedString(g.privateKey)
178 | }
179 |
180 | // NewTestJWTGenerator creates a new JWT generator with dynamic keys (backward compatibility)
181 | func NewTestJWTGenerator() (*TestJWTGenerator, error) {
182 | // Generate unique keys for each generator instance (not shared)
183 | privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
184 | if err != nil {
185 | return nil, fmt.Errorf("failed to generate RSA key pair: %w", err)
186 | }
187 |
188 | return &TestJWTGenerator{
189 | privateKey: privateKey,
190 | publicKey: &privateKey.PublicKey,
191 | }, nil
192 | }
193 |
194 | // GenerateClusterManagerToken generates a token for cluster-manager (backward compatibility)
195 | func (g *TestJWTGenerator) GenerateClusterManagerToken(subject, projectUUID string, expiry time.Duration) (string, error) {
196 | // Set issuer and audience to match unit test expectations
197 | now := time.Now()
198 | clusterNamespace := "53cd37b9-66b2-4cc8-b080-3722ed7af64a" // Default namespace from cluster_utils.go
199 | claims := jwt.MapClaims{
200 | "sub": subject,
201 | "iss": IssuerURL,
202 | "aud": []string{"cluster-manager"},
203 | "scope": "openid email roles profile", // Match working JWT scope
204 | "exp": now.Add(expiry).Unix(),
205 | "iat": now.Unix(),
206 | "typ": "Bearer",
207 | "azp": "system-client",
208 | "realm_access": map[string]interface{}{ // Complete Keycloak-style roles structure
209 | "roles": []string{
210 | "account/view-profile",
211 | clusterNamespace + "_cl-tpl-r",
212 | clusterNamespace + "_cl-tpl-rw",
213 | "default-roles-master",
214 | clusterNamespace + "_im-r",
215 | clusterNamespace + "_reg-r",
216 | clusterNamespace + "_cat-r",
217 | clusterNamespace + "_alrt-r",
218 | clusterNamespace + "_tc-r",
219 | clusterNamespace + "_ao-rw",
220 | "offline_access",
221 | "uma_authorization",
222 | clusterNamespace + "_cl-r",
223 | clusterNamespace + "_cl-rw",
224 | "account/manage-account",
225 | "63764aaf-1527-46a0-b921-c5f32dba1ddb_" + clusterNamespace + "_m",
226 | },
227 | },
228 | "resource_access": map[string]interface{}{ // Resource-specific roles
229 | "cluster-manager": map[string]interface{}{
230 | "roles": []string{"admin", "manager"},
231 | },
232 | },
233 | "preferred_username": subject,
234 | }
235 |
236 | return g.createToken(claims)
237 | }
238 |
239 | // GenerateToken generates a general JWT token (backward compatibility)
240 | func (g *TestJWTGenerator) GenerateToken(subject string, audience []string, customClaims map[string]interface{}) (string, error) {
241 | now := time.Now()
242 | claims := jwt.MapClaims{
243 | "sub": subject,
244 | "iss": IssuerURL,
245 | "aud": audience,
246 | "exp": now.Add(time.Hour).Unix(),
247 | "iat": now.Unix(),
248 | "typ": "Bearer",
249 | }
250 |
251 | // Add custom claims
252 | for k, v := range customClaims {
253 | claims[k] = v
254 | }
255 |
256 | return g.createToken(claims)
257 | }
258 |
259 | // GenerateShortLivedToken generates a token with short expiry (backward compatibility)
260 | func (g *TestJWTGenerator) GenerateShortLivedToken(subject string, expiry time.Duration) (string, error) {
261 | now := time.Now()
262 | claims := jwt.MapClaims{
263 | "sub": subject,
264 | "iss": IssuerURL,
265 | "aud": []string{"cluster-manager"},
266 | "exp": now.Add(expiry).Unix(),
267 | "iat": now.Unix(),
268 | "typ": "Bearer",
269 | }
270 |
271 | return g.createToken(claims)
272 | }
273 |
274 | // ValidateToken validates a JWT token (backward compatibility)
275 | func (g *TestJWTGenerator) ValidateToken(tokenString string) (jwt.MapClaims, error) {
276 | token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
277 | if _, ok := token.Method.(*jwt.SigningMethodRSAPSS); !ok {
278 | return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
279 | }
280 | return g.publicKey, nil
281 | })
282 |
283 | if err != nil {
284 | return nil, err
285 | }
286 |
287 | if claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {
288 | return claims, nil
289 | }
290 |
291 | return nil, fmt.Errorf("invalid token")
292 | }
293 |
294 | // GetPublicKeyJWKS returns the public key in JWKS format (backward compatibility)
295 | func (g *TestJWTGenerator) GetPublicKeyJWKS() (string, error) {
296 | return getJWKS()
297 | }
298 |
299 | // GetPublicKeyPEM returns the public key in PEM format (backward compatibility)
300 | func (g *TestJWTGenerator) GetPublicKeyPEM() (string, error) {
301 | return getPublicKeyPEM()
302 | }
303 |
304 | // GetPrivateKeyPEM returns the private key in PEM format (backward compatibility)
305 | func (g *TestJWTGenerator) GetPrivateKeyPEM() (string, error) {
306 | privateKeyBytes := x509.MarshalPKCS1PrivateKey(g.privateKey)
307 | privateKeyPEM := pem.EncodeToMemory(&pem.Block{
308 | Type: "RSA PRIVATE KEY",
309 | Bytes: privateKeyBytes,
310 | })
311 | return string(privateKeyPEM), nil
312 | }
313 |
314 | // SetupTestAuthentication creates authentication context for the given username
315 | func SetupTestAuthentication(username string) (*TestAuthContext, error) {
316 | token, err := GenerateTestJWT(username)
317 | if err != nil {
318 | return nil, fmt.Errorf("failed to generate test JWT: %w", err)
319 | }
320 |
321 | return &TestAuthContext{
322 | Token: token,
323 | Subject: username,
324 | Issuer: "cluster-tests",
325 | Audience: []string{"cluster-manager"},
326 | }, nil
327 | }
328 |
329 | // GenerateTestJWT creates a JWT token for testing with the given username using PS512
330 | func GenerateTestJWT(username string) (string, error) {
331 | // Get the dynamically generated private key
332 | privateKey, _, err := getOrGenerateKeys()
333 | if err != nil {
334 | return "", fmt.Errorf("failed to get private key: %w", err)
335 | }
336 |
337 | // Set issuer and audience to match unit test expectations
338 | now := time.Now()
339 | clusterNamespace := "53cd37b9-66b2-4cc8-b080-3722ed7af64a" // Default namespace from cluster_utils.go
340 | claims := jwt.MapClaims{
341 | "sub": username,
342 | "iss": IssuerURL, // Use constant instead of hardcoded value
343 | "aud": []string{"cluster-manager"}, // Unit tests expect this audience
344 | "scope": "openid email roles profile", // Match working JWT scope
345 | "exp": now.Add(time.Hour).Unix(),
346 | "iat": now.Unix(),
347 | "typ": "Bearer", // Token type
348 | "azp": "system-client", // Authorized party
349 | "realm_access": map[string]interface{}{
350 | "roles": []string{
351 | "account/view-profile",
352 | clusterNamespace + "_cl-tpl-r",
353 | clusterNamespace + "_cl-tpl-rw",
354 | "default-roles-master",
355 | clusterNamespace + "_im-r",
356 | clusterNamespace + "_reg-r",
357 | clusterNamespace + "_cat-r",
358 | clusterNamespace + "_alrt-r",
359 | clusterNamespace + "_tc-r",
360 | clusterNamespace + "_ao-rw",
361 | "offline_access",
362 | "uma_authorization",
363 | clusterNamespace + "_cl-r",
364 | clusterNamespace + "_cl-rw",
365 | "account/manage-account",
366 | "63764aaf-1527-46a0-b921-c5f32dba1ddb_" + clusterNamespace + "_m",
367 | },
368 | },
369 | "resource_access": map[string]interface{}{ // Resource-specific roles
370 | "cluster-manager": map[string]interface{}{
371 | "roles": []string{"admin", "manager"},
372 | },
373 | },
374 | "preferred_username": username,
375 | }
376 |
377 | // Create token using PS512 as required by cluster-manager v2.1.15
378 | token := jwt.NewWithClaims(jwt.SigningMethodPS512, claims)
379 | token.Header["kid"] = KeyID // Use constant instead of hardcoded value
380 |
381 | tokenString, err := token.SignedString(privateKey)
382 | if err != nil {
383 | return "", fmt.Errorf("failed to sign JWT token: %w", err)
384 | }
385 |
386 | return tokenString, nil
387 | }
388 |
389 | // GenerateOIDCMockConfig generates a Kubernetes YAML configuration for OIDC mock server
390 | // with runtime-generated JWKS, replacing the bash script implementation
391 | func GenerateOIDCMockConfig() (string, error) {
392 | // Generate dynamic JWKS using the same auth package as JWT tests
393 | jwks, err := getJWKS()
394 | if err != nil {
395 | return "", fmt.Errorf("failed to generate JWKS: %w", err)
396 | }
397 |
398 | const template = `# SPDX-FileCopyrightText: (C) 2025 Intel Corporation
399 | # SPDX-License-Identifier: Apache-2.0
400 |
401 | # Generated OIDC Mock Server Configuration (Dynamic Keys)
402 | # This configuration provides a mock OIDC server with runtime-generated RSA keys
403 |
404 | apiVersion: apps/v1
405 | kind: Deployment
406 | metadata:
407 | name: oidc-mock
408 | namespace: default
409 | labels:
410 | app: oidc-mock
411 | spec:
412 | replicas: 1
413 | selector:
414 | matchLabels:
415 | app: oidc-mock
416 | template:
417 | metadata:
418 | labels:
419 | app: oidc-mock
420 | spec:
421 | containers:
422 | - name: nginx
423 | image: nginx:alpine
424 | ports:
425 | - containerPort: 80
426 | volumeMounts:
427 | - name: config
428 | mountPath: /etc/nginx/conf.d
429 | - name: content
430 | mountPath: /usr/share/nginx/html
431 | volumes:
432 | - name: config
433 | configMap:
434 | name: oidc-mock-nginx-config
435 | - name: content
436 | configMap:
437 | name: oidc-mock-content
438 | ---
439 | apiVersion: v1
440 | kind: Service
441 | metadata:
442 | name: platform-keycloak
443 | namespace: orch-platform
444 | spec:
445 | selector:
446 | app: oidc-mock
447 | ports:
448 | - port: 80
449 | targetPort: 80
450 | name: http
451 | type: ExternalName
452 | externalName: oidc-mock.default.svc.cluster.local
453 | ---
454 | apiVersion: v1
455 | kind: Service
456 | metadata:
457 | name: oidc-mock
458 | namespace: default
459 | spec:
460 | selector:
461 | app: oidc-mock
462 | ports:
463 | - port: 80
464 | targetPort: 80
465 | name: http
466 | ---
467 | apiVersion: v1
468 | kind: ConfigMap
469 | metadata:
470 | name: oidc-mock-nginx-config
471 | namespace: default
472 | data:
473 | default.conf: |
474 | server {
475 | listen 80;
476 | server_name localhost;
477 |
478 | location /realms/master/.well-known/openid-configuration {
479 | return 200 '{
480 | "issuer": "` + IssuerURL + `",
481 | "authorization_endpoint": "` + IssuerURL + `/protocol/openid-connect/auth",
482 | "token_endpoint": "` + IssuerURL + `/protocol/openid-connect/token",
483 | "jwks_uri": "` + IssuerURL + `/keys",
484 | "userinfo_endpoint": "` + IssuerURL + `/protocol/openid-connect/userinfo",
485 | "response_types_supported": ["code", "token", "id_token", "code token", "code id_token", "token id_token", "code token id_token"],
486 | "subject_types_supported": ["public"],
487 | "id_token_signing_alg_values_supported": ["PS512", "RS256"]
488 | }';
489 | add_header Content-Type application/json;
490 | }
491 |
492 | location /realms/master/keys {
493 | return 200 '%s';
494 | add_header Content-Type application/json;
495 | }
496 |
497 | location / {
498 | return 200 'OIDC Mock Server (Dynamic Keys)\nAvailable endpoints:\n /realms/master/.well-known/openid-configuration\n /realms/master/keys\n';
499 | }
500 | }
501 | ---
502 | apiVersion: v1
503 | kind: ConfigMap
504 | metadata:
505 | name: oidc-mock-content
506 | namespace: default
507 | data:
508 | jwks.json: |
509 | %s
510 | index.html: |
511 |
512 |
513 | OIDC Mock Server (Dynamic Keys)
514 |
515 | OIDC Mock Server
516 | Using Runtime-Generated Keys
517 | Available endpoints:
518 |
522 |
523 |
524 | `
525 |
526 | // Replace placeholders with actual JWKS
527 | config := fmt.Sprintf(template, jwks, jwks)
528 |
529 | return config, nil
530 | }
531 |
--------------------------------------------------------------------------------
/tests/utils/cluster_utils.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package utils
5 |
6 | import (
7 | "bufio"
8 | "bytes"
9 | "encoding/json"
10 | "fmt"
11 | "io"
12 | "net/http"
13 | "os"
14 | "os/exec"
15 | "strings"
16 | "text/template"
17 |
18 | "github.com/open-edge-platform/cluster-manager/v2/pkg/api"
19 | "github.com/open-edge-platform/cluster-tests/tests/auth"
20 | )
21 |
22 | const (
23 | DefaultNamespace = "53cd37b9-66b2-4cc8-b080-3722ed7af64a"
24 | DefaultNodeGUID = "12345678-1234-1234-1234-123456789012"
25 | NamespaceEnvVar = "NAMESPACE"
26 | NodeGUIDEnvVar = "NODEGUID"
27 | ClusterName = "demo-cluster"
28 |
29 | ClusterOrchFunctionalTest = "cluster-orch-functional-test"
30 | ClusterOrchSmokeTest = "cluster-orch-smoke-test"
31 | ClusterOrchRobustnessTest = "cluster-orch-robustness-test"
32 | ClusterOrchClusterApiAllTest = "cluster-orch-cluster-api-all-test"
33 | ClusterOrchClusterApiSmokeTest = "cluster-orch-cluster-api-smoke-test"
34 | ClusterOrchTemplateApiSmokeTest = "cluster-orch-template-api-smoke-test"
35 | ClusterOrchTemplateApiAllTest = "cluster-orch-template-api-all-test"
36 |
37 | PortForwardAddress = "0.0.0.0"
38 | PortForwardService = "svc/cluster-manager"
39 | PortForwardGatewayService = "svc/cluster-connect-gateway"
40 | PortForwardLocalPort = "8080"
41 | PortForwardRemotePort = "8080"
42 | PortForwardGatewayLocalPort = "8081"
43 | PortForwardGatewayRemotePort = "8080"
44 |
45 | Rke2TemplateOnlyName = "baseline-rke2"
46 | Rke2TemplateOnlyVersion = "v0.0.1"
47 |
48 | K3sTemplateOnlyName = "baseline-k3s"
49 | K3sTemplateOnlyVersion = "v0.0.1"
50 |
51 | Rke2TemplateName = "baseline-rke2-v0.0.1"
52 | K3sTemplateName = "baseline-k3s-v0.0.1"
53 |
54 | ClusterTemplateURL = "http://127.0.0.1:8080/v2/templates"
55 | ClusterCreateURL = "http://127.0.0.1:8080/v2/clusters"
56 |
57 | ClusterConfigTemplatePath = "../../configs/cluster-config.json"
58 |
59 | BaselineClusterTemplatePathRke2 = "../../configs/baseline-cluster-template-rke2.json"
60 | BaselineClusterTemplatePathK3s = "../../configs/baseline-cluster-template-k3s.json"
61 | )
62 |
63 | const (
64 | TemplateTypeK3sBaseline = "k3s-baseline"
65 | TemplateTypeRke2Baseline = "rke2-baseline"
66 | // Add more template types as needed
67 | )
68 |
69 | var (
70 | SkipDeleteCluster = os.Getenv("SKIP_DELETE_CLUSTER") == "true"
71 | )
72 |
73 | // GetEnv retrieves the value of the environment variable or returns the default value if not set.
74 | func GetEnv(key, defaultValue string) string {
75 | if value, exists := os.LookupEnv(key); exists {
76 | return value
77 | }
78 | return defaultValue
79 | }
80 |
81 | // EnsureNamespaceExists ensures that the specified namespace exists in the cluster.
82 | func EnsureNamespaceExists(namespace string) error {
83 | cmd := exec.Command("kubectl", "get", "namespace", namespace)
84 | err := cmd.Run()
85 | if err != nil {
86 | // Namespace does not exist, create it
87 | cmd = exec.Command("kubectl", "create", "namespace", namespace)
88 | return cmd.Run()
89 | }
90 | return nil
91 | }
92 |
93 | // ImportClusterTemplate imports a cluster template into the specified namespace.
94 | func ImportClusterTemplate(namespace string, templateType string) error {
95 | var data []byte
96 | var err error
97 | switch templateType {
98 | case TemplateTypeK3sBaseline:
99 | data, err = os.ReadFile(BaselineClusterTemplatePathK3s)
100 | case TemplateTypeRke2Baseline:
101 | data, err = os.ReadFile(BaselineClusterTemplatePathRke2)
102 | default:
103 | return fmt.Errorf("unsupported template type: %s", templateType)
104 | }
105 |
106 | if err != nil {
107 | return err
108 | }
109 |
110 | req, err := http.NewRequest("POST", ClusterTemplateURL, bytes.NewBuffer(data))
111 | if err != nil {
112 | return err
113 | }
114 |
115 | req.Header.Set("Activeprojectid", namespace)
116 | req.Header.Set("Content-Type", "application/json")
117 | req.Header.Set("Accept", "application/json")
118 |
119 | client := &http.Client{}
120 | resp, err := client.Do(req)
121 | if err != nil {
122 | return err
123 | }
124 | defer resp.Body.Close()
125 |
126 | if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusConflict {
127 | body, _ := io.ReadAll(resp.Body)
128 | return fmt.Errorf("failed to import cluster template: %s", string(body))
129 | }
130 |
131 | return nil
132 | }
133 |
134 | func GetClusterTemplate(namespace, templateName, templateVersion string) (*api.TemplateInfo, error) {
135 |
136 | url := fmt.Sprintf("%s/%s/%s", ClusterTemplateURL, templateName, templateVersion)
137 |
138 | req, err := http.NewRequest("GET", url, nil)
139 | if err != nil {
140 | return nil, err
141 | }
142 |
143 | req.Header.Set("Activeprojectid", namespace)
144 | req.Header.Set("Content-Type", "application/json")
145 | req.Header.Set("Accept", "application/json")
146 |
147 | client := &http.Client{}
148 | resp, err := client.Do(req)
149 | if err != nil {
150 | return nil, err
151 | }
152 | defer resp.Body.Close()
153 |
154 | if resp.StatusCode != http.StatusOK {
155 | body, _ := io.ReadAll(resp.Body)
156 | return nil, fmt.Errorf("failed to get template: %s", string(body))
157 | }
158 |
159 | var templateInfo api.TemplateInfo
160 | if err = json.NewDecoder(resp.Body).Decode(&templateInfo); err != nil {
161 | return nil, fmt.Errorf("failed to decode template info: %v", err)
162 | }
163 |
164 | return &templateInfo, nil
165 | }
166 |
167 | func GetClusterTemplatesWithFilter(namespace, filter string) (*api.TemplateInfoList, error) {
168 | ClusterTemplateURLWithFilter := fmt.Sprintf("%s?filter=%s", ClusterTemplateURL, filter)
169 | req, err := http.NewRequest("GET", ClusterTemplateURLWithFilter, nil)
170 | if err != nil {
171 | return nil, err
172 | }
173 |
174 | req.Header.Set("Activeprojectid", namespace)
175 | req.Header.Set("Content-Type", "application/json")
176 | req.Header.Set("Accept", "application/json")
177 |
178 | client := &http.Client{}
179 | resp, err := client.Do(req)
180 | if err != nil {
181 | return nil, err
182 | }
183 | defer resp.Body.Close()
184 |
185 | if resp.StatusCode != http.StatusOK {
186 | body, _ := io.ReadAll(resp.Body)
187 | return nil, fmt.Errorf("failed to get templates: %s", string(body))
188 | }
189 | var templateInfoList api.TemplateInfoList
190 | if err := json.NewDecoder(resp.Body).Decode(&templateInfoList); err != nil {
191 | return nil, fmt.Errorf("failed to decode template info list: %v", err)
192 | }
193 | return &templateInfoList, nil
194 | }
195 |
196 | func DeleteTemplate(namespace, templateName, templateVersion string) error {
197 | url := fmt.Sprintf("%s/%s/%s", ClusterTemplateURL, templateName, templateVersion)
198 |
199 | req, err := http.NewRequest("DELETE", url, nil)
200 | if err != nil {
201 | return err
202 | }
203 |
204 | req.Header.Set("Activeprojectid", namespace)
205 | req.Header.Set("Content-Type", "application/json")
206 | req.Header.Set("Accept", "application/json")
207 |
208 | client := &http.Client{}
209 | resp, err := client.Do(req)
210 | if err != nil {
211 | return err
212 | }
213 | defer resp.Body.Close()
214 |
215 | if resp.StatusCode != http.StatusNoContent {
216 | body, _ := io.ReadAll(resp.Body)
217 | return fmt.Errorf("failed to delete template: %s", string(body))
218 | }
219 |
220 | return nil
221 | }
222 |
223 | func DeleteAllTemplate(namespace string) error {
224 | req, err := http.NewRequest("GET", ClusterTemplateURL, nil)
225 | if err != nil {
226 | return err
227 | }
228 |
229 | req.Header.Set("Activeprojectid", namespace)
230 | req.Header.Set("Content-Type", "application/json")
231 | req.Header.Set("Accept", "application/json")
232 |
233 | client := &http.Client{}
234 | resp, err := client.Do(req)
235 | if err != nil {
236 | return err
237 | }
238 | defer resp.Body.Close()
239 |
240 | if resp.StatusCode != http.StatusOK {
241 | body, _ := io.ReadAll(resp.Body)
242 | return fmt.Errorf("failed to get templates: %s", string(body))
243 | }
244 | var templateInfoList api.TemplateInfoList
245 | if err := json.NewDecoder(resp.Body).Decode(&templateInfoList); err != nil {
246 | return fmt.Errorf("failed to decode template info list: %v", err)
247 | }
248 | if templateInfoList.TemplateInfoList != nil && len(*templateInfoList.TemplateInfoList) != 0 {
249 | for _, templateInfo := range *templateInfoList.TemplateInfoList {
250 | fmt.Printf("Deleting template: %s \n", templateInfo.Name+"-"+templateInfo.Version)
251 | err := DeleteTemplate(namespace, templateInfo.Name, templateInfo.Version)
252 | if err != nil {
253 | return fmt.Errorf("failed to delete template %s: %v", templateInfo.Name+"-"+templateInfo.Version, err)
254 | }
255 | }
256 | }
257 |
258 | return nil
259 | }
260 |
261 | func GetDefaultTemplate(namespace string) (*api.DefaultTemplateInfo, error) {
262 | req, err := http.NewRequest("GET", ClusterTemplateURL+"?default=true", nil)
263 | if err != nil {
264 | return nil, err
265 | }
266 |
267 | req.Header.Set("Activeprojectid", namespace)
268 | req.Header.Set("Content-Type", "application/json")
269 | req.Header.Set("Accept", "application/json")
270 |
271 | client := &http.Client{}
272 | resp, err := client.Do(req)
273 | if err != nil {
274 | return nil, err
275 | }
276 | defer resp.Body.Close()
277 |
278 | if resp.StatusCode != http.StatusOK {
279 | body, _ := io.ReadAll(resp.Body)
280 | return nil, fmt.Errorf("failed to get templates: %s", string(body))
281 | }
282 | var templateInfoList api.TemplateInfoList
283 | if err := json.NewDecoder(resp.Body).Decode(&templateInfoList); err != nil {
284 | return nil, fmt.Errorf("failed to decode template info list: %v", err)
285 | }
286 | return templateInfoList.DefaultTemplateInfo, nil
287 | }
288 |
289 | func SetDefaultTemplate(namespace, name, version string) error {
290 | url := fmt.Sprintf("%s/%s/default", ClusterTemplateURL, name)
291 | var err error
292 | var req *http.Request
293 | var data []byte
294 | var defaultTemplateInfo api.DefaultTemplateInfo
295 |
296 | if version != "" {
297 | defaultTemplateInfo.Version = version
298 | }
299 |
300 | data, err = json.Marshal(defaultTemplateInfo)
301 | if err != nil {
302 | return fmt.Errorf("failed to marshal default template info: %v", err)
303 | }
304 |
305 | req, err = http.NewRequest("PUT", url, bytes.NewBuffer(data))
306 | if err != nil {
307 | return err
308 | }
309 |
310 | req.Header.Set("Activeprojectid", namespace)
311 | req.Header.Set("Content-Type", "application/json")
312 | req.Header.Set("Accept", "application/json")
313 |
314 | client := &http.Client{}
315 | resp, err := client.Do(req)
316 | if err != nil {
317 | return err
318 | }
319 | defer resp.Body.Close()
320 |
321 | if resp.StatusCode != http.StatusOK {
322 | body, _ := io.ReadAll(resp.Body)
323 | return fmt.Errorf("failed to set default template: %s, code: %v", string(body), resp.StatusCode)
324 | }
325 |
326 | return nil
327 |
328 | }
329 |
330 | // IsClusterTemplateReady checks if the cluster template is ready.
331 | func IsClusterTemplateReady(namespace, templateName string) bool {
332 | cmd := exec.Command("kubectl", "get", "clustertemplates.edge-orchestrator.intel.com", templateName, "-n", namespace, "-o", "yaml")
333 | output, err := cmd.Output()
334 | if err != nil {
335 | return false
336 | }
337 |
338 | // Use yq to parse the YAML and check the .status.ready field
339 | cmd = exec.Command("yq", "eval", ".status.ready", "-")
340 | cmd.Stdin = bytes.NewReader(output)
341 | readyOutput, err := cmd.Output()
342 | if err != nil {
343 | return false
344 | }
345 |
346 | // Check if the ready status is true
347 | return strings.TrimSpace(string(readyOutput)) == "true"
348 | }
349 |
350 | // CheckLostConnection verifies if ControlPlane reports connection lost.
351 | func CheckLostConnection(output string) bool {
352 | lines := strings.Split(output, "\n")
353 | for _, line := range lines {
354 | // Skip the header line
355 | if strings.Contains(line, "NAME") && strings.Contains(line, "READY") {
356 | continue
357 | }
358 | // Check if the line contains a "False" status in the "READY" column and "ConnectAgentDisconnected" in the "REASON" column
359 | fields := strings.Fields(line)
360 | if len(fields) > 1 && strings.Contains(fields[0], "ClusterInfrastructure") && fields[3] == "False" && fields[5] == "ConnectAgentDisconnected" {
361 | return true
362 | }
363 | }
364 | return false
365 | }
366 |
367 | // CreateCluster creates a cluster using the provided configuration.
368 | func CreateCluster(namespace, nodeGUID, templateName string) error {
369 | templateData, err := os.ReadFile(ClusterConfigTemplatePath)
370 | if err != nil {
371 | return err
372 | }
373 |
374 | tmpl, err := template.New("clusterConfig").Parse(string(templateData))
375 | if err != nil {
376 | return err
377 | }
378 |
379 | var configBuffer bytes.Buffer
380 | err = tmpl.Execute(&configBuffer, struct {
381 | ClusterName string
382 | TemplateName string
383 | NodeGUID string
384 | }{
385 | NodeGUID: nodeGUID,
386 | TemplateName: templateName,
387 | ClusterName: ClusterName,
388 | })
389 | if err != nil {
390 | return err
391 | }
392 |
393 | req, err := http.NewRequest("POST", ClusterCreateURL, &configBuffer)
394 | if err != nil {
395 | return err
396 | }
397 |
398 | req.Header.Set("Activeprojectid", namespace)
399 | req.Header.Set("Content-Type", "application/json")
400 | req.Header.Set("Accept", "application/json")
401 |
402 | client := &http.Client{}
403 | resp, err := client.Do(req)
404 | if err != nil {
405 | return err
406 | }
407 | defer resp.Body.Close()
408 |
409 | if resp.StatusCode != http.StatusCreated {
410 | body, _ := io.ReadAll(resp.Body)
411 | return fmt.Errorf("failed to create cluster: %s", string(body))
412 | }
413 |
414 | return nil
415 | }
416 |
417 | // DeleteCluster deletes a cluster by name.
418 | func DeleteCluster(namespace string) error {
419 | url := fmt.Sprintf("%s/%s", ClusterCreateURL, ClusterName)
420 |
421 | req, err := http.NewRequest("DELETE", url, nil)
422 | if err != nil {
423 | return err
424 | }
425 |
426 | req.Header.Set("Activeprojectid", namespace)
427 | req.Header.Set("Content-Type", "application/json")
428 | req.Header.Set("Accept", "application/json")
429 |
430 | client := &http.Client{}
431 | resp, err := client.Do(req)
432 | if err != nil {
433 | return err
434 | }
435 | defer resp.Body.Close()
436 |
437 | if resp.StatusCode != http.StatusNoContent {
438 | body, _ := io.ReadAll(resp.Body)
439 | return fmt.Errorf("failed to delete cluster: %s", string(body))
440 | }
441 |
442 | return nil
443 | }
444 |
445 | // DeleteClusterAuthenticated deletes a cluster by name using JWT authentication.
446 | func DeleteClusterAuthenticated(authContext *auth.TestAuthContext, namespace string) error {
447 | url := fmt.Sprintf("%s/%s", ClusterCreateURL, ClusterName)
448 |
449 | req, err := http.NewRequest("DELETE", url, nil)
450 | if err != nil {
451 | return err
452 | }
453 |
454 | req.Header.Set("Activeprojectid", namespace)
455 | req.Header.Set("Content-Type", "application/json")
456 | req.Header.Set("Accept", "application/json")
457 | req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", authContext.Token))
458 |
459 | client := &http.Client{}
460 | resp, err := client.Do(req)
461 | if err != nil {
462 | return err
463 | }
464 | defer resp.Body.Close()
465 |
466 | if resp.StatusCode != http.StatusNoContent {
467 | body, _ := io.ReadAll(resp.Body)
468 | return fmt.Errorf("failed to delete cluster with JWT authentication: %s", string(body))
469 | }
470 |
471 | return nil
472 | }
473 |
474 | func GetClusterInfo(namespace, clusterName string) (*http.Response, error) {
475 | url := fmt.Sprintf("%s/%s", ClusterCreateURL, clusterName)
476 | req, err := http.NewRequest("GET", url, nil)
477 | if err != nil {
478 | return nil, err
479 | }
480 |
481 | req.Header.Set("Activeprojectid", namespace)
482 | req.Header.Set("Content-Type", "application/json")
483 | req.Header.Set("Accept", "application/json")
484 |
485 | client := &http.Client{}
486 | return client.Do(req)
487 | }
488 |
489 | // CheckAllComponentsReady verifies if all components in the cluster are ready.
490 | func CheckAllComponentsReady(output string) bool {
491 | lines := strings.Split(output, "\n")
492 | for _, line := range lines {
493 | // Skip the header line
494 | if strings.Contains(line, "NAME") && strings.Contains(line, "READY") {
495 | continue
496 | }
497 | // Check if the line contains a "False" status in the "READY" column
498 | fields := strings.Fields(line)
499 | if (len(fields) > 1 && fields[1] == "False") || len(fields) == 1 {
500 | return false
501 | }
502 | }
503 | return true
504 | }
505 |
506 | // FetchMetrics fetches the metrics from the /metrics endpoint.
507 | func FetchMetrics() (io.ReadCloser, error) {
508 | resp, err := http.Get("http://127.0.0.1:8081/metrics")
509 | if err != nil {
510 | return nil, fmt.Errorf("error fetching metrics: %v", err)
511 | }
512 | return resp.Body, nil
513 | }
514 |
515 | // ParseMetrics checks if the metric websocket_connections_total with status="succeeded" is 1.
516 | func ParseMetrics(metrics io.Reader) (bool, error) {
517 | scanner := bufio.NewScanner(metrics)
518 | for scanner.Scan() {
519 | line := scanner.Text()
520 | if strings.Contains(line, `websocket_connections_total{status="succeeded"}`) {
521 | fmt.Printf("\tfound metric: %s\n", line)
522 | parts := strings.Fields(line)
523 | if len(parts) == 2 && parts[1] != "0" {
524 | return true, nil
525 | }
526 | }
527 | }
528 |
529 | if err := scanner.Err(); err != nil {
530 | return false, fmt.Errorf("error reading metrics: %v", err)
531 | }
532 |
533 | return false, nil
534 | }
535 |
536 | func LogCommandOutput(command string, args []string) {
537 | cmd := exec.Command(command, args...)
538 | output, err := cmd.CombinedOutput()
539 | if err != nil {
540 | fmt.Printf("Error executing command: %v\n", err)
541 | }
542 | fmt.Printf("Command output:\n%s\n", string(output))
543 | }
544 |
--------------------------------------------------------------------------------
/tests/cluster-api-test/cluster_api_test.go:
--------------------------------------------------------------------------------
1 | // SPDX-FileCopyrightText: (C) 2025 Intel Corporation
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | package cluster_api_test_test
5 |
6 | import (
7 | "encoding/json"
8 | "fmt"
9 | "io"
10 | "net/http"
11 | "os"
12 | "os/exec"
13 | "strings"
14 | "testing"
15 | "time"
16 |
17 | . "github.com/onsi/ginkgo/v2"
18 | . "github.com/onsi/gomega"
19 | "github.com/open-edge-platform/cluster-tests/tests/auth"
20 | "github.com/open-edge-platform/cluster-tests/tests/utils"
21 | )
22 |
23 | // Constants for commonly used values
24 | const (
25 | TempKubeconfigPattern = "/tmp/%s-kubeconfig.yaml"
26 | KubeconfigFileName = "kubeconfig.yaml"
27 | LocalGatewayURL = "http://127.0.0.1:8081/"
28 | ClusterReadinessTimeout = 10 * time.Minute
29 | ClusterReadinessInterval = 10 * time.Second
30 | PodReadinessTimeout = 5 * time.Minute
31 | PodReadinessInterval = 10 * time.Second
32 | PortForwardTimeout = 1 * time.Minute
33 | PortForwardInterval = 5 * time.Second
34 | PortForwardDelay = 5 * time.Second
35 | )
36 |
37 | // function to check if cluster components are ready
38 | func checkClusterComponentsReady(namespace string) bool {
39 | cmd := exec.Command("clusterctl", "describe", "cluster", utils.ClusterName, "-n", namespace)
40 | output, err := cmd.Output()
41 | if err != nil {
42 | return false
43 | }
44 | fmt.Printf("Cluster components status:\n%s\n", string(output))
45 | return utils.CheckAllComponentsReady(string(output))
46 | }
47 |
48 | // function to wait for Intel machines to exist
49 | func waitForIntelMachines(namespace string) {
50 | By("Waiting for IntelMachine to exist")
51 | Eventually(func() bool {
52 | cmd := exec.Command("sh", "-c", fmt.Sprintf("kubectl -n %s get intelmachine -o yaml | yq '.items | length'", namespace))
53 | output, err := cmd.Output()
54 | if err != nil {
55 | return false
56 | }
57 | return string(output) > "0"
58 | }, PortForwardTimeout, PortForwardInterval).Should(BeTrue())
59 | }
60 |
61 | // function to wait for cluster components to be ready
62 | func waitForClusterComponentsReady(namespace string) {
63 | By("Waiting for all components to be ready")
64 | Eventually(func() bool {
65 | return checkClusterComponentsReady(namespace)
66 | }, ClusterReadinessTimeout, ClusterReadinessInterval).Should(BeTrue())
67 | }
68 |
69 | func TestClusterApiTest(t *testing.T) {
70 | RegisterFailHandler(Fail)
71 | _, _ = fmt.Fprintf(GinkgoWriter, "Starting cluster orch api tests\n")
72 | RunSpecs(t, "cluster orch api test suite")
73 | }
74 |
75 | // setupPortForwarding sets up port forwarding for any service
76 | func setupPortForwarding(serviceName, serviceIdentifier, localPort, remotePort string) (*exec.Cmd, error) {
77 | By(fmt.Sprintf("Port forwarding to the %s service", serviceName))
78 | portForwardCmd := exec.Command("kubectl", "port-forward", serviceIdentifier,
79 | fmt.Sprintf("%s:%s", localPort, remotePort), "--address", utils.PortForwardAddress)
80 | err := portForwardCmd.Start()
81 | if err != nil {
82 | return nil, err
83 | }
84 | time.Sleep(PortForwardDelay)
85 | return portForwardCmd, nil
86 | }
87 |
88 | // cleanupPortForwarding safely kills port forwarding processes
89 | func cleanupPortForwarding(portForwardCmd, gatewayPortForward *exec.Cmd) {
90 | if portForwardCmd != nil && portForwardCmd.Process != nil {
91 | portForwardCmd.Process.Kill()
92 | }
93 | if gatewayPortForward != nil && gatewayPortForward.Process != nil {
94 | gatewayPortForward.Process.Kill()
95 | }
96 | }
97 |
98 | // performClusterOperation executes a cluster operation with conditional authentication
99 | func performClusterOperation(operationType string, authDisabled bool, authContext *auth.TestAuthContext,
100 | namespace, nodeGUID, templateName string) error {
101 |
102 | if !authDisabled {
103 | fmt.Printf(" Using JWT authentication for cluster %s\n", operationType)
104 | switch operationType {
105 | case "import":
106 | By("Importing the cluster template")
107 | return utils.ImportClusterTemplateAuthenticated(authContext, namespace, templateName)
108 | case "create":
109 | By("Creating the k3s cluster")
110 | return utils.CreateClusterAuthenticated(authContext, namespace, nodeGUID, templateName)
111 | case "delete":
112 | By("Deleting the cluster")
113 | return utils.DeleteClusterAuthenticated(authContext, namespace)
114 | default:
115 | return fmt.Errorf("unknown operation type: %s", operationType)
116 | }
117 | } else {
118 | fmt.Printf(" Using non-authenticated cluster %s\n", operationType)
119 | switch operationType {
120 | case "import":
121 | By("Importing the cluster template")
122 | return utils.ImportClusterTemplate(namespace, templateName)
123 | case "create":
124 | By("Creating the k3s cluster")
125 | return utils.CreateCluster(namespace, nodeGUID, templateName)
126 | case "delete":
127 | By("Deleting the cluster")
128 | return utils.DeleteCluster(namespace)
129 | default:
130 | return fmt.Errorf("unknown operation type: %s", operationType)
131 | }
132 | }
133 | }
134 |
135 | // validateJWTWorkflow performs comprehensive JWT authentication validation
136 | func validateJWTWorkflow(authContext *auth.TestAuthContext, namespace string) {
137 | By("Testing JWT-authenticated kubeconfig API endpoint (primary workflow validation)")
138 | Expect(authContext).NotTo(BeNil())
139 |
140 | By("Confirming JWT authentication usage for cluster operations")
141 | fmt.Printf(" JWT Token confirmed for cluster operations: %s...\n"+
142 | " JWT authentication confirmed for:\n"+
143 | " - Cluster template import\n"+
144 | " - Cluster creation\n"+
145 | " - Cluster management APIs\n"+
146 | " - Kubeconfig retrieval\n"+
147 | " - Cluster deletion (in AfterEach)\n", authContext.Token[:20])
148 |
149 | By("Verifying JWT token structure and claims")
150 | // Token should be a JWT with header.payload.signature format
151 | parts := strings.Split(authContext.Token, ".")
152 | Expect(parts).To(HaveLen(3), "JWT should have 3 parts separated by dots")
153 |
154 | // Check auth context claims
155 | Expect(authContext.Subject).To(Equal("test-user"))
156 | Expect(authContext.Issuer).To(Equal("cluster-tests"))
157 | Expect(authContext.Audience).To(ContainElement("cluster-manager"))
158 |
159 | By("Testing cluster-manager API authentication")
160 | err := utils.TestClusterManagerAuthentication(authContext)
161 | if err != nil {
162 | fmt.Printf(" Authentication test result: %v\n", err)
163 | testConnectivity()
164 | } else {
165 | fmt.Println(" JWT authentication successful")
166 | }
167 |
168 | By("Testing kubeconfig retrieval via JWT workflow (no fallback)")
169 | testKubeconfigRetrieval(authContext, namespace)
170 | }
171 |
172 | // testConnectivity performs basic connectivity diagnostics
173 | func testConnectivity() {
174 | By("Attempting basic connectivity test")
175 | endpoint := fmt.Sprintf("%s/v2/healthz", utils.GetClusterManagerEndpoint())
176 | resp, connErr := http.Get(endpoint)
177 | if connErr != nil {
178 | fmt.Printf(" Cluster-manager API not accessible: %v\n", connErr)
179 | return
180 | }
181 | if resp != nil {
182 | defer resp.Body.Close()
183 | switch resp.StatusCode {
184 | case http.StatusOK:
185 | fmt.Println(" Cluster-manager API is accessible without authentication")
186 | case http.StatusUnauthorized:
187 | fmt.Println(" Cluster-manager API requires authentication (expected)")
188 | default:
189 | fmt.Printf(" Unexpected response from cluster-manager: %d\n", resp.StatusCode)
190 | }
191 | }
192 | }
193 |
194 | // testKubeconfigRetrieval tests kubeconfig API endpoint (JWT workflow validation)
195 | func testKubeconfigRetrieval(authContext *auth.TestAuthContext, namespace string) {
196 | resp, err := utils.GetClusterKubeconfigFromAPI(authContext, namespace, utils.ClusterName)
197 | Expect(err).NotTo(HaveOccurred(), "Kubeconfig API call should succeed for JWT workflow validation")
198 |
199 | Expect(resp).NotTo(BeNil(), "API response should not be nil")
200 | defer resp.Body.Close()
201 | handleKubeconfigResponse(resp, namespace)
202 | }
203 |
204 | // handleKubeconfigResponse processes the kubeconfig API response
205 | func handleKubeconfigResponse(resp *http.Response, namespace string) {
206 | switch resp.StatusCode {
207 | case http.StatusOK:
208 | fmt.Println(" Successfully retrieved kubeconfig via cluster-manager API")
209 | processSuccessfulKubeconfigResponse(resp)
210 | case http.StatusNotFound:
211 | Fail(fmt.Sprintf("Cluster '%s' not found in namespace '%s' - JWT workflow validation failed", utils.ClusterName, namespace))
212 | case http.StatusUnauthorized:
213 | Fail("JWT authentication failed for kubeconfig endpoint")
214 | case http.StatusForbidden:
215 | Fail("JWT token lacks permissions for kubeconfig endpoint")
216 | default:
217 | Fail(fmt.Sprintf("Unexpected response from kubeconfig API: %d - JWT workflow validation failed", resp.StatusCode))
218 | }
219 | }
220 |
221 | // processSuccessfulKubeconfigResponse handles successful kubeconfig retrieval
222 | func processSuccessfulKubeconfigResponse(resp *http.Response) {
223 | By("Validating the kubeconfig content")
224 | body, err := io.ReadAll(resp.Body)
225 | Expect(err).NotTo(HaveOccurred())
226 |
227 | var kubeconfigResponse map[string]interface{}
228 | err = json.Unmarshal(body, &kubeconfigResponse)
229 | Expect(err).NotTo(HaveOccurred())
230 |
231 | kubeconfig, exists := kubeconfigResponse["kubeconfig"]
232 | Expect(exists).To(BeTrue(), "Response should contain kubeconfig field")
233 | Expect(kubeconfig).NotTo(BeEmpty(), "Kubeconfig should not be empty")
234 |
235 | By("Testing downstream cluster access with retrieved kubeconfig")
236 | err = utils.TestDownstreamClusterAccess(kubeconfig.(string))
237 | Expect(err).NotTo(HaveOccurred(), "Downstream cluster access should work with JWT-retrieved kubeconfig")
238 |
239 | fmt.Printf("COMPLETE JWT WORKFLOW SUCCESSFUL: Token → API → Kubeconfig → Downstream K3s Cluster Access\n")
240 | }
241 |
242 | // waitForClusterReady performs common cluster readiness validation
243 | func waitForClusterReady(namespace string, clusterCreateStartTime time.Time) time.Time {
244 | waitForIntelMachines(namespace)
245 | waitForClusterComponentsReady(namespace)
246 |
247 | By("Checking that connect agent metric shows a successful connection")
248 | metrics, err := utils.FetchMetrics()
249 | Expect(err).NotTo(HaveOccurred())
250 | defer metrics.Close()
251 | connectionSucceeded, err := utils.ParseMetrics(metrics)
252 | Expect(err).NotTo(HaveOccurred())
253 | Eventually(connectionSucceeded).Should(BeTrue())
254 |
255 | clusterCreateEndTime := time.Now()
256 | totalTime := clusterCreateEndTime.Sub(clusterCreateStartTime)
257 | fmt.Printf("\033[32mTotal time from cluster creation to fully active: %v 🚀 ✅\033[0m\n", totalTime)
258 |
259 | return clusterCreateEndTime
260 | }
261 |
262 | // validateKubeconfigAndClusterAccess performs kubeconfig validation and cluster access testing
263 | func validateKubeconfigAndClusterAccess() {
264 | By("Getting kubeconfig")
265 | cmd := exec.Command("clusterctl", "get", "kubeconfig", utils.ClusterName, "--namespace", utils.DefaultNamespace)
266 | output, err := cmd.Output()
267 | Expect(err).NotTo(HaveOccurred())
268 |
269 | kubeConfigName := KubeconfigFileName
270 | err = os.WriteFile(kubeConfigName, output, 0644)
271 | Expect(err).NotTo(HaveOccurred())
272 |
273 | By("Setting in kubeconfig server to cluster connect gateway")
274 | cmd = exec.Command("sed", "-i", fmt.Sprintf("s|http://[[:alnum:].-]*:8080/|%s|", LocalGatewayURL), kubeConfigName)
275 | _, err = cmd.Output()
276 | Expect(err).NotTo(HaveOccurred())
277 |
278 | By("Getting list of pods")
279 | cmd = exec.Command("kubectl", "--kubeconfig", kubeConfigName, "get", "pods")
280 | _, err = cmd.Output()
281 | Expect(err).NotTo(HaveOccurred())
282 |
283 | By("Dumping kubectl client and server version")
284 | cmd = exec.Command("kubectl", "version", "--kubeconfig", kubeConfigName)
285 | output, err = cmd.Output()
286 | Expect(err).NotTo(HaveOccurred())
287 | fmt.Printf("kubectl client and server version:\n%s\n", string(output))
288 |
289 | // Wait for all pods to be running
290 | By("Waiting for all pods to be running")
291 | Eventually(func() bool {
292 | cmd := exec.Command("kubectl", "--kubeconfig", kubeConfigName, "get", "pods", "-A", "-o", "jsonpath={.items[*].status.phase}")
293 | output, err := cmd.Output()
294 | if err != nil {
295 | return false
296 | }
297 | podStatuses := strings.Fields(string(output))
298 | for _, status := range podStatuses {
299 | if status != "Running" && status != "Completed" && status != "Succeeded" {
300 | return false
301 | }
302 | }
303 | return true
304 | }, PodReadinessTimeout, PodReadinessInterval).Should(BeTrue(), "Not all pods are in Running or Completed state")
305 |
306 | By("Getting the local-path-provisioner pod name")
307 | cmd = exec.Command("kubectl", "get", "pods", "-n", "kube-system", "-l", "app=local-path-provisioner",
308 | "-o", "jsonpath={.items[0].metadata.name}", "--kubeconfig", kubeConfigName)
309 | output, err = cmd.Output()
310 | Expect(err).NotTo(HaveOccurred(), "Failed to get the local-path-provisioner pod name")
311 | fmt.Printf("Local-path-provisioner pod name: %s\n", string(output))
312 |
313 | podName := strings.TrimSpace(string(output))
314 | Expect(podName).NotTo(BeEmpty(), "Pod name should not be empty")
315 |
316 | By("Executing the `ls` command in the local-path-provisioner pod")
317 | cmd = exec.Command("kubectl", "exec", "-it", podName, "-n", "kube-system", "--kubeconfig", kubeConfigName, "--", "ls")
318 | output, err = cmd.Output()
319 | Expect(err).NotTo(HaveOccurred(), "Failed to execute the `ls` command in the pod")
320 |
321 | fmt.Printf("Output of `ls` command:\n%s\n", string(output))
322 | }
323 |
324 | var _ = Describe("Single Node K3S Cluster Create and Delete using Cluster Manager APIs with baseline template",
325 | Ordered, Label(utils.ClusterOrchClusterApiSmokeTest, utils.ClusterOrchClusterApiAllTest), func() {
326 | var (
327 | authContext *auth.TestAuthContext
328 | gatewayPortForward *exec.Cmd
329 | namespace string
330 | nodeGUID string
331 | portForwardCmd *exec.Cmd
332 | clusterCreateStartTime time.Time
333 | authDisabled bool
334 | )
335 |
336 | BeforeEach(func() {
337 | namespace = utils.GetEnv(utils.NamespaceEnvVar, utils.DefaultNamespace)
338 | nodeGUID = utils.GetEnv(utils.NodeGUIDEnvVar, utils.DefaultNodeGUID)
339 |
340 | // Check if authentication is disabled via environment variable
341 | authDisabled = os.Getenv("DISABLE_AUTH") == "true"
342 |
343 | if !authDisabled {
344 | By("Setting up JWT authentication")
345 | var err error
346 | authContext, err = utils.SetupTestAuthentication("test-user")
347 | Expect(err).NotTo(HaveOccurred())
348 | Expect(authContext).NotTo(BeNil())
349 | Expect(authContext.Token).NotTo(BeEmpty())
350 | } else {
351 | By("Authentication disabled - skipping JWT setup")
352 | fmt.Printf(" Authentication disabled (DISABLE_AUTH=true)\n")
353 | }
354 |
355 | By("Ensuring the namespace exists")
356 | var err error
357 | err = utils.EnsureNamespaceExists(namespace)
358 | Expect(err).NotTo(HaveOccurred())
359 |
360 | // Setup port forwarding using helper function
361 | portForwardCmd, err = setupPortForwarding("cluster manager", utils.PortForwardService,
362 | utils.PortForwardLocalPort, utils.PortForwardRemotePort)
363 | Expect(err).NotTo(HaveOccurred())
364 |
365 | // Import cluster template using helper function
366 | err = performClusterOperation("import", authDisabled, authContext, namespace, "", utils.TemplateTypeK3sBaseline)
367 | Expect(err).NotTo(HaveOccurred())
368 |
369 | By("Waiting for the cluster template to be ready")
370 | Eventually(func() bool {
371 | return utils.IsClusterTemplateReady(namespace, utils.K3sTemplateName)
372 | }, 1*time.Minute, 2*time.Second).Should(BeTrue())
373 |
374 | clusterCreateStartTime = time.Now()
375 |
376 | // Create cluster using helper function
377 | err = performClusterOperation("create", authDisabled, authContext, namespace, nodeGUID, utils.K3sTemplateName)
378 | Expect(err).NotTo(HaveOccurred())
379 |
380 | // Setup gateway port forwarding using helper function
381 | gatewayPortForward, err = setupPortForwarding("cluster gateway", utils.PortForwardGatewayService,
382 | utils.PortForwardGatewayLocalPort, utils.PortForwardGatewayRemotePort)
383 | Expect(err).NotTo(HaveOccurred())
384 | })
385 |
386 | AfterEach(func() {
387 | // Cleanup port forwarding using helper function
388 | defer cleanupPortForwarding(portForwardCmd, gatewayPortForward)
389 |
390 | if !utils.SkipDeleteCluster {
391 | // Delete cluster using helper function
392 | var err error
393 | err = performClusterOperation("delete", authDisabled, authContext, namespace, "", "")
394 | Expect(err).NotTo(HaveOccurred())
395 |
396 | By("Verifying that the cluster is deleted")
397 | Eventually(func() bool {
398 | cmd := exec.Command("kubectl", "-n", namespace, "get", "cluster", utils.ClusterName)
399 | err := cmd.Run()
400 | return err != nil
401 | }, PortForwardTimeout, PortForwardInterval).Should(BeTrue())
402 | }
403 | })
404 |
405 | It("should verify that the cluster is fully active", func() {
406 | // Wait for cluster to be ready using helper function
407 | waitForClusterReady(namespace, clusterCreateStartTime)
408 |
409 | // Validate kubeconfig and cluster access using helper function
410 | validateKubeconfigAndClusterAccess()
411 |
412 | // JWT Kubeconfig API Test - integrated after cluster is ready
413 | if !authDisabled {
414 | validateJWTWorkflow(authContext, namespace)
415 | } else {
416 | By("Authentication disabled - skipping JWT-specific tests")
417 | fmt.Printf(" DISABLE_AUTH=true - JWT kubeconfig API test skipped\n")
418 | }
419 | })
420 |
421 | JustAfterEach(func() {
422 | if CurrentSpecReport().Failed() {
423 | utils.LogCommandOutput("kubectl", []string{"exec", "cluster-agent-0", "--",
424 | "/usr/local/bin/k3s", "kubectl", "--kubeconfig", "/etc/rancher/k3s/k3s.yaml", "get", "pods", "-A"})
425 | utils.LogCommandOutput("kubectl", []string{"exec", "cluster-agent-0", "--",
426 | "/usr/local/bin/k3s", "kubectl", "--kubeconfig", "/etc/rancher/k3s/k3s.yaml", "describe", "pod", "-n", "kube-system", "connect-agent-cluster-agent-0"})
427 | }
428 | })
429 | })
430 |
431 | var _ = Describe("Single Node RKE2 Cluster Create and Delete using Cluster Manager APIs with baseline template",
432 | Ordered, Label(utils.ClusterOrchClusterApiAllTest), func() {
433 | var (
434 | namespace string
435 | nodeGUID string
436 | portForwardCmd *exec.Cmd
437 | gatewayPortForward *exec.Cmd
438 | clusterCreateStartTime time.Time
439 | clusterCreateEndTime time.Time
440 | )
441 |
442 | BeforeAll(func() {
443 | namespace = utils.GetEnv(utils.NamespaceEnvVar, utils.DefaultNamespace)
444 | nodeGUID = utils.GetEnv(utils.NodeGUIDEnvVar, utils.DefaultNodeGUID)
445 |
446 | // create namespace for the project
447 | By("Ensuring the namespace exists")
448 | err := utils.EnsureNamespaceExists(namespace)
449 | Expect(err).NotTo(HaveOccurred())
450 |
451 | By("Port forwarding to the cluster manager service")
452 | portForwardCmd = exec.Command("kubectl", "port-forward", utils.PortForwardService,
453 | fmt.Sprintf("%s:%s", utils.PortForwardLocalPort, utils.PortForwardRemotePort), "--address", utils.PortForwardAddress)
454 | err = portForwardCmd.Start()
455 | Expect(err).NotTo(HaveOccurred())
456 | time.Sleep(PortForwardDelay) // Give some time for port-forwarding to establish
457 |
458 | By("Port forwarding to the cluster gateway service")
459 | gatewayPortForward = exec.Command("kubectl", "port-forward", utils.PortForwardGatewayService,
460 | fmt.Sprintf("%s:%s", utils.PortForwardGatewayLocalPort, utils.PortForwardGatewayRemotePort), "--address", utils.PortForwardAddress)
461 | err = gatewayPortForward.Start()
462 | Expect(err).NotTo(HaveOccurred())
463 | time.Sleep(PortForwardDelay) // Give some time for port-forwarding to establish
464 |
465 | })
466 |
467 | AfterAll(func() {
468 | defer func() {
469 | if portForwardCmd != nil && portForwardCmd.Process != nil {
470 | portForwardCmd.Process.Kill()
471 | }
472 | if gatewayPortForward != nil && gatewayPortForward.Process != nil {
473 | gatewayPortForward.Process.Kill()
474 | }
475 | }()
476 |
477 | if !utils.SkipDeleteCluster {
478 | By("Deleting the cluster")
479 | err := utils.DeleteCluster(namespace)
480 | Expect(err).NotTo(HaveOccurred())
481 |
482 | By("Verifying that the cluster is deleted")
483 | Eventually(func() bool {
484 | cmd := exec.Command("kubectl", "-n", namespace, "get", "cluster", utils.ClusterName)
485 | err := cmd.Run()
486 | return err != nil
487 | }, PortForwardTimeout, PortForwardInterval).Should(BeTrue())
488 | }
489 | })
490 |
491 | It("Should successfully import RKE2 Single Node cluster template", func() {
492 | By("Importing the cluster template")
493 | err := utils.ImportClusterTemplate(namespace, utils.TemplateTypeRke2Baseline)
494 | Expect(err).NotTo(HaveOccurred())
495 |
496 | By("Waiting for the cluster template to be ready")
497 | Eventually(func() bool {
498 | return utils.IsClusterTemplateReady(namespace, utils.Rke2TemplateName)
499 | }, 1*time.Minute, 2*time.Second).Should(BeTrue())
500 | })
501 |
502 | It("Should verify that cluster create API should succeed for rke2 cluster", func() {
503 | // Record the start time before creating the cluster
504 | clusterCreateStartTime = time.Now()
505 |
506 | By("Creating the cluster")
507 | err := utils.CreateCluster(namespace, nodeGUID, utils.Rke2TemplateName)
508 | Expect(err).NotTo(HaveOccurred())
509 | })
510 |
511 | It("Should verify that the cluster is fully active", func() {
512 | By("Waiting for IntelMachine to exist")
513 | Eventually(func() bool {
514 | cmd := exec.Command("sh", "-c", fmt.Sprintf("kubectl -n %s get intelmachine -o yaml | yq '.items | length'", namespace))
515 | output, err := cmd.Output()
516 | if err != nil {
517 | return false
518 | }
519 | return string(output) > "0"
520 | }, PortForwardTimeout, PortForwardInterval).Should(BeTrue())
521 |
522 | By("Waiting for all components to be ready")
523 | Eventually(func() bool {
524 | cmd := exec.Command("clusterctl", "describe", "cluster", utils.ClusterName, "-n", namespace)
525 | output, err := cmd.Output()
526 | if err != nil {
527 | return false
528 | }
529 | fmt.Printf("Cluster components status:\n%s\n", string(output))
530 | return utils.CheckAllComponentsReady(string(output))
531 | }, ClusterReadinessTimeout, ClusterReadinessInterval).Should(BeTrue())
532 | // Record the end time after the cluster is fully active
533 | clusterCreateEndTime = time.Now()
534 |
535 | // Calculate and print the total time taken
536 | totalTime := clusterCreateEndTime.Sub(clusterCreateStartTime)
537 | fmt.Printf("\033[32mTotal time from cluster creation to fully active: %v 🚀 ✅\033[0m\n", totalTime)
538 | })
539 |
540 | It("Should verify that the cluster information can be queried ", func() {
541 | By("Getting the cluster information")
542 | resp, err := utils.GetClusterInfo(namespace, utils.ClusterName)
543 | Expect(err).NotTo(HaveOccurred())
544 | defer resp.Body.Close()
545 |
546 | Expect(resp.StatusCode).To(Equal(http.StatusOK))
547 | // TODO: Verify the cluster details are correct
548 | })
549 |
550 | It("TC-CO-INT-006: Should verify that the cluster label can be queried", func() {
551 | fmt.Printf("TODO: Implement this test\n")
552 | })
553 |
554 | It("TC-CO-INT-007: Should verify that the cluster label can be updated", func() {
555 | fmt.Printf("TODO: Implement this test\n")
556 | })
557 |
558 | It("Should verify that the connect gateway allow access to k8s api", func() {
559 | // cmd := exec.Command("curl", "-X", "GET", fmt.Sprintf("127.0.0.1:%v/kubernetes/%v-%v/api/v1/namespaces/default/pods", portForwardGatewayLocalPort, namespace, clusterName))
560 | By("Getting kubeconfig")
561 | fmt.Println(utils.ClusterName)
562 | cmd := exec.Command("clusterctl", "get", "kubeconfig", utils.ClusterName, "--namespace", utils.DefaultNamespace) // ">", "kubeconfig.yaml")
563 | output, err := cmd.Output()
564 | Expect(err).NotTo(HaveOccurred())
565 |
566 | kubeConfigName := KubeconfigFileName
567 | err = os.WriteFile(kubeConfigName, output, 0644)
568 | Expect(err).NotTo(HaveOccurred())
569 |
570 | By("Setting in kubeconfig server to cluster connect gateway")
571 | cmd = exec.Command("sed", "-i", fmt.Sprintf("s|http://[[:alnum:].-]*:8080/|%s|", LocalGatewayURL), KubeconfigFileName)
572 | _, err = cmd.Output()
573 | Expect(err).NotTo(HaveOccurred())
574 |
575 | By("Getting list of pods")
576 | cmd = exec.Command("kubectl", "--kubeconfig", KubeconfigFileName, "get", "pods")
577 | _, err = cmd.Output()
578 | Expect(err).NotTo(HaveOccurred())
579 |
580 | // Exec into one of the pods in the kube-system namespace on the edge node cluster
581 | By("Executing command in kube-scheduler-cluster-agent-0 pod")
582 | cmd = exec.Command("kubectl", "exec", "--kubeconfig", KubeconfigFileName, "-it", "-n",
583 | "kube-system", "kube-scheduler-cluster-agent-0", "--", "ls")
584 | output, err = cmd.Output()
585 | Expect(err).NotTo(HaveOccurred())
586 | By("Printing the output of the command")
587 | fmt.Printf("Output of `ls` command:\n%s\n", string(output))
588 | })
589 | It("Should verify that a cluster template cannot be deleted if there is a cluster using it", func() {
590 | By("Trying to delete the cluster template")
591 | err := utils.DeleteTemplate(namespace, utils.Rke2TemplateOnlyName, utils.Rke2TemplateOnlyVersion)
592 | Expect(err).To(HaveOccurred())
593 | Expect(err.Error()).To(ContainSubstring("clusterTemplate is in use"))
594 | })
595 | // TODO: Add more functional tests
596 | })
597 |
--------------------------------------------------------------------------------